file_path
stringlengths 20
207
| content
stringlengths 5
3.85M
| size
int64 5
3.85M
| lang
stringclasses 9
values | avg_line_length
float64 1.33
100
| max_line_length
int64 4
993
| alphanum_fraction
float64 0.26
0.93
|
---|---|---|---|---|---|---|
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/common/datasets.py | import torch
import copy
from torch.utils.data import Dataset
class PPODataset(Dataset):
def __init__(self, batch_size, minibatch_size, is_discrete, is_rnn, device, seq_len):
self.is_rnn = is_rnn
self.seq_len = seq_len
self.batch_size = batch_size
self.minibatch_size = minibatch_size
self.device = device
self.length = self.batch_size // self.minibatch_size
self.is_discrete = is_discrete
self.is_continuous = not is_discrete
total_games = self.batch_size // self.seq_len
self.num_games_batch = self.minibatch_size // self.seq_len
self.game_indexes = torch.arange(total_games, dtype=torch.long, device=self.device)
self.flat_indexes = torch.arange(total_games * self.seq_len, dtype=torch.long, device=self.device).reshape(total_games, self.seq_len)
self.special_names = ['rnn_states']
def update_values_dict(self, values_dict):
self.values_dict = values_dict
def update_mu_sigma(self, mu, sigma):
start = self.last_range[0]
end = self.last_range[1]
self.values_dict['mu'][start:end] = mu
self.values_dict['sigma'][start:end] = sigma
def __len__(self):
return self.length
def _get_item_rnn(self, idx):
gstart = idx * self.num_games_batch
gend = (idx + 1) * self.num_games_batch
start = gstart * self.seq_len
end = gend * self.seq_len
self.last_range = (start, end)
input_dict = {}
for k,v in self.values_dict.items():
if k not in self.special_names:
if v is dict:
v_dict = { kd:vd[start:end] for kd, vd in v.items() }
input_dict[k] = v_dict
else:
input_dict[k] = v[start:end]
rnn_states = self.values_dict['rnn_states']
input_dict['rnn_states'] = [s[:,gstart:gend,:] for s in rnn_states]
return input_dict
def _get_item(self, idx):
start = idx * self.minibatch_size
end = (idx + 1) * self.minibatch_size
self.last_range = (start, end)
input_dict = {}
for k,v in self.values_dict.items():
if k not in self.special_names and v is not None:
if type(v) is dict:
v_dict = { kd:vd[start:end] for kd, vd in v.items() }
input_dict[k] = v_dict
else:
input_dict[k] = v[start:end]
return input_dict
def __getitem__(self, idx):
if self.is_rnn:
sample = self._get_item_rnn(idx)
else:
sample = self._get_item(idx)
return sample
class DatasetList(Dataset):
def __init__(self):
self.dataset_list = []
def __len__(self):
return self.dataset_list[0].length * len(self.dataset_list)
def add_dataset(self, dataset):
self.dataset_list.append(copy.deepcopy(dataset))
def clear(self):
self.dataset_list = []
def __getitem__(self, idx):
ds_len = len(self.dataset_list)
ds_idx = idx % ds_len
in_idx = idx // ds_len
return self.dataset_list[ds_idx].__getitem__(in_idx) | 3,260 | Python | 33.326315 | 141 | 0.553067 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/common/player.py | import time
import gym
import numpy as np
import torch
from rl_games.common import env_configurations
class BasePlayer(object):
def __init__(self, config):
self.config = config
self.env_name = self.config['env_name']
self.env_config = self.config.get('env_config', {})
self.env_info = self.config.get('env_info')
if self.env_info is None:
self.env = self.create_env()
self.env_info = env_configurations.get_env_info(self.env)
self.value_size = self.env_info.get('value_size', 1)
self.action_space = self.env_info['action_space']
self.num_agents = self.env_info['agents']
self.observation_space = self.env_info['observation_space']
if isinstance(self.observation_space, gym.spaces.Dict):
self.obs_shape = {}
for k, v in self.observation_space.spaces.items():
self.obs_shape[k] = v.shape
else:
self.obs_shape = self.observation_space.shape
self.is_tensor_obses = False
self.states = None
self.player_config = self.config.get('player', {})
self.use_cuda = True
self.batch_size = 1
self.has_batch_dimension = False
self.has_central_value = self.config.get('central_value_config') is not None
self.device_name = self.player_config.get('device_name', 'cuda')
self.render_env = self.player_config.get('render', False)
self.games_num = self.player_config.get('games_num', 2000)
self.is_determenistic = self.player_config.get('determenistic', True)
self.n_game_life = self.player_config.get('n_game_life', 1)
self.print_stats = self.player_config.get('print_stats', True)
self.render_sleep = self.player_config.get('render_sleep', 0.002)
self.max_steps = 108000 // 4
self.device = torch.device(self.device_name)
def _preproc_obs(self, obs_batch):
if type(obs_batch) is dict:
for k, v in obs_batch.items():
obs_batch[k] = self._preproc_obs(v)
else:
if obs_batch.dtype == torch.uint8:
obs_batch = obs_batch.float() / 255.0
if self.normalize_input:
obs_batch = self.running_mean_std(obs_batch)
return obs_batch
def env_step(self, env, actions):
if not self.is_tensor_obses:
actions = actions.cpu().numpy()
obs, rewards, dones, infos = env.step(actions)
if hasattr(obs, 'dtype') and obs.dtype == np.float64:
obs = np.float32(obs)
if self.value_size > 1:
rewards = rewards[0]
if self.is_tensor_obses:
return self.obs_to_torch(obs), rewards.cpu(), dones.cpu(), infos
else:
if np.isscalar(dones):
rewards = np.expand_dims(np.asarray(rewards), 0)
dones = np.expand_dims(np.asarray(dones), 0)
return self.obs_to_torch(obs), torch.from_numpy(rewards), torch.from_numpy(dones), infos
def obs_to_torch(self, obs):
if isinstance(obs, dict):
if 'obs' in obs:
obs = obs['obs']
if isinstance(obs, dict):
upd_obs = {}
for key, value in obs.items():
upd_obs[key] = self._obs_to_tensors_internal(value, False)
else:
upd_obs = self.cast_obs(obs)
else:
upd_obs = self.cast_obs(obs)
return upd_obs
def _obs_to_tensors_internal(self, obs, cast_to_dict=True):
if isinstance(obs, dict):
upd_obs = {}
for key, value in obs.items():
upd_obs[key] = self._obs_to_tensors_internal(value, False)
else:
upd_obs = self.cast_obs(obs)
return upd_obs
def cast_obs(self, obs):
if isinstance(obs, torch.Tensor):
self.is_tensor_obses = True
elif isinstance(obs, np.ndarray):
assert(self.observation_space.dtype != np.int8)
if self.observation_space.dtype == np.uint8:
obs = torch.ByteTensor(obs).to(self.device)
else:
obs = torch.FloatTensor(obs).to(self.device)
return obs
def preprocess_actions(self, actions):
if not self.is_tensor_obses:
actions = actions.cpu().numpy()
return actions
def env_reset(self, env):
obs = env.reset()
return self.obs_to_torch(obs)
def restore(self, fn):
raise NotImplementedError('restore')
def get_weights(self):
weights = {}
weights['model'] = self.model.state_dict()
if self.normalize_input:
weights['running_mean_std'] = self.running_mean_std.state_dict()
return weights
def set_weights(self, weights):
self.model.load_state_dict(weights['model'])
if self.normalize_input:
self.running_mean_std.load_state_dict(weights['running_mean_std'])
def create_env(self):
return env_configurations.configurations[self.env_name]['env_creator'](**self.env_config)
def get_action(self, obs, is_determenistic=False):
raise NotImplementedError('step')
def get_masked_action(self, obs, mask, is_determenistic=False):
raise NotImplementedError('step')
def reset(self):
raise NotImplementedError('raise')
def init_rnn(self):
if self.is_rnn:
rnn_states = self.model.get_default_rnn_state()
self.states = [torch.zeros((s.size()[0], self.batch_size, s.size(
)[2]), dtype=torch.float32).to(self.device) for s in rnn_states]
def run(self):
n_games = self.games_num
render = self.render_env
n_game_life = self.n_game_life
is_determenistic = self.is_determenistic
sum_rewards = 0
sum_steps = 0
sum_game_res = 0
n_games = n_games * n_game_life
games_played = 0
has_masks = False
has_masks_func = getattr(self.env, "has_action_mask", None) is not None
op_agent = getattr(self.env, "create_agent", None)
if op_agent:
agent_inited = True
#print('setting agent weights for selfplay')
# self.env.create_agent(self.env.config)
# self.env.set_weights(range(8),self.get_weights())
if has_masks_func:
has_masks = self.env.has_action_mask()
need_init_rnn = self.is_rnn
for _ in range(n_games):
if games_played >= n_games:
break
obses = self.env_reset(self.env)
batch_size = 1
batch_size = self.get_batch_size(obses, batch_size)
if need_init_rnn:
self.init_rnn()
need_init_rnn = False
cr = torch.zeros(batch_size, dtype=torch.float32)
steps = torch.zeros(batch_size, dtype=torch.float32)
print_game_res = False
for n in range(self.max_steps):
if has_masks:
masks = self.env.get_action_mask()
action = self.get_masked_action(
obses, masks, is_determenistic)
else:
action = self.get_action(obses, is_determenistic)
obses, r, done, info = self.env_step(self.env, action)
cr += r
steps += 1
if render:
self.env.render(mode='human')
time.sleep(self.render_sleep)
all_done_indices = done.nonzero(as_tuple=False)
done_indices = all_done_indices[::self.num_agents]
done_count = len(done_indices)
games_played += done_count
if done_count > 0:
if self.is_rnn:
for s in self.states:
s[:, all_done_indices, :] = s[:,
all_done_indices, :] * 0.0
cur_rewards = cr[done_indices].sum().item()
cur_steps = steps[done_indices].sum().item()
cr = cr * (1.0 - done.float())
steps = steps * (1.0 - done.float())
sum_rewards += cur_rewards
sum_steps += cur_steps
game_res = 0.0
if isinstance(info, dict):
if 'battle_won' in info:
print_game_res = True
game_res = info.get('battle_won', 0.5)
if 'scores' in info:
print_game_res = True
game_res = info.get('scores', 0.5)
if self.print_stats:
if print_game_res:
print('reward:', cur_rewards/done_count,
'steps:', cur_steps/done_count, 'w:', game_res)
else:
print('reward:', cur_rewards/done_count,
'steps:', cur_steps/done_count)
sum_game_res += game_res
if batch_size//self.num_agents == 1 or games_played >= n_games:
break
print(sum_rewards)
if print_game_res:
print('av reward:', sum_rewards / games_played * n_game_life, 'av steps:', sum_steps /
games_played * n_game_life, 'winrate:', sum_game_res / games_played * n_game_life)
else:
print('av reward:', sum_rewards / games_played * n_game_life,
'av steps:', sum_steps / games_played * n_game_life)
def get_batch_size(self, obses, batch_size):
obs_shape = self.obs_shape
if type(self.obs_shape) is dict:
if 'obs' in obses:
obses = obses['obs']
keys_view = self.obs_shape.keys()
keys_iterator = iter(keys_view)
first_key = next(keys_iterator)
obs_shape = self.obs_shape[first_key]
obses = obses[first_key]
if len(obses.size()) > len(obs_shape):
batch_size = obses.size()[0]
self.has_batch_dimension = True
self.batch_size = batch_size
return batch_size
| 10,359 | Python | 37.37037 | 100 | 0.529009 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/common/categorical.py | import numpy as np
class CategoricalQ:
def __init__(self, n_atoms, v_min, v_max):
self.n_atoms = n_atoms
self.v_min = v_min
self.v_max = v_max
self.delta_z = (v_max - v_min) / (n_atoms - 1)
def distr_projection(self, next_distr, rewards, dones, gamma):
"""
Perform distribution projection aka Catergorical Algorithm from the
"A Distributional Perspective on RL" paper
"""
proj_distr = np.zeros_like(next_distr, dtype=np.float32)
n_atoms = self.n_atoms
v_min = self.v_min
v_max = self.v_max
delta_z = self.delta_z
for atom in range(n_atoms):
z = rewards + (v_min + atom * delta_z) * gamma
tz_j = np.clip(z, v_min, v_max)
b_j = (tz_j - v_min) / delta_z
l = np.floor(b_j).astype(np.int64)
u = np.ceil(b_j).astype(np.int64)
eq_mask = u == l
proj_distr[eq_mask, l[eq_mask]] += next_distr[eq_mask, atom]
ne_mask = u != l
proj_distr[ne_mask, l[ne_mask]] += next_distr[ne_mask, atom] * (u - b_j)[ne_mask]
proj_distr[ne_mask, u[ne_mask]] += next_distr[ne_mask, atom] * (b_j - l)[ne_mask]
if dones.any():
proj_distr[dones] = 0.0
tz_j = np.clip(rewards[dones], v_min, v_max)
b_j = (tz_j - v_min) / delta_z
l = np.floor(b_j).astype(np.int64)
u = np.ceil(b_j).astype(np.int64)
eq_mask = u == l
eq_dones = dones.copy()
eq_dones[dones] = eq_mask
if eq_dones.any():
proj_distr[eq_dones, l[eq_mask]] = 1.0
ne_mask = u != l
ne_dones = dones.copy()
ne_dones[dones] = ne_mask
if ne_dones.any():
proj_distr[ne_dones, l[ne_mask]] = (u - b_j)[ne_mask]
proj_distr[ne_dones, u[ne_mask]] = (b_j - l)[ne_mask]
return proj_distr | 1,977 | Python | 37.784313 | 93 | 0.49216 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/common/divergence.py | import torch
import torch.distributions as dist
def d_kl_discrete(p, q):
# p = target, q = online
# categorical distribution parametrized by logits
logits_diff = p - q
p_probs = torch.exp(p)
d_kl = (p_probs * logits_diff).sum(-1)
return d_kl
def d_kl_discrete_list(p, q):
d_kl = 0
for pi, qi in zip(p,q):
d_kl += d_kl_discrete(pi, qi)
return d_kl
def d_kl_normal(p, q):
# p = target, q = online
p_mean, p_sigma = p
q_mean, q_sigma = q
mean_diff = ((q_mean - p_mean) / q_sigma).pow(2)
var_ratio = (p_sigma / q_sigma).pow(2)
d_kl = 0.5 * (var_ratio + mean_diff - 1 - var_ratio.log())
return d_kl.sum(-1) | 680 | Python | 22.482758 | 62 | 0.570588 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/common/algo_observer.py | from rl_games.algos_torch import torch_ext
import torch
import numpy as np
class AlgoObserver:
def __init__(self):
pass
def before_init(self, base_name, config, experiment_name):
pass
def after_init(self, algo):
pass
def process_infos(self, infos, done_indices):
pass
def after_steps(self):
pass
def after_print_stats(self, frame, epoch_num, total_time):
pass
class DefaultAlgoObserver(AlgoObserver):
def __init__(self):
pass
def after_init(self, algo):
self.algo = algo
self.game_scores = torch_ext.AverageMeter(1, self.algo.games_to_track).to(self.algo.ppo_device)
self.writer = self.algo.writer
def process_infos(self, infos, done_indices):
if not infos:
return
if not isinstance(infos, dict) and len(infos) > 0 and isinstance(infos[0], dict):
done_indices = done_indices.cpu()
for ind in done_indices:
ind = ind.item()
if len(infos) <= ind//self.algo.num_agents:
continue
info = infos[ind//self.algo.num_agents]
game_res = None
if 'battle_won' in info:
game_res = info['battle_won']
if 'scores' in info:
game_res = info['scores']
if game_res is not None:
self.game_scores.update(torch.from_numpy(np.asarray([game_res])).to(self.algo.ppo_device))
def after_clear_stats(self):
self.game_scores.clear()
def after_print_stats(self, frame, epoch_num, total_time):
if self.game_scores.current_size > 0 and self.writer is not None:
mean_scores = self.game_scores.get_mean()
self.writer.add_scalar('scores/mean', mean_scores, frame)
self.writer.add_scalar('scores/iter', mean_scores, epoch_num)
self.writer.add_scalar('scores/time', mean_scores, total_time) | 2,001 | Python | 31.290322 | 110 | 0.578211 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/common/ivecenv.py | class IVecEnv:
def step(self, actions):
raise NotImplementedError
def reset(self):
raise NotImplementedError
def has_action_masks(self):
return False
def get_number_of_agents(self):
return 1
def get_env_info(self):
pass
def set_train_info(self, env_frames, *args, **kwargs):
"""
Send the information in the direction algo->environment.
Most common use case: tell the environment how far along we are in the training process. This is useful
for implementing curriculums and things such as that.
"""
pass
def get_env_state(self):
"""
Return serializable environment state to be saved to checkpoint.
Can be used for stateful training sessions, i.e. with adaptive curriculums.
"""
return None
def set_env_state(self, env_state):
pass
| 905 | Python | 25.647058 | 111 | 0.624309 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/common/vecenv.py | import ray
from rl_games.common.ivecenv import IVecEnv
from rl_games.common.env_configurations import configurations
from rl_games.common.tr_helpers import dicts_to_dict_with_arrays
import numpy as np
import gym
from time import sleep
class RayWorker:
def __init__(self, config_name, config):
self.env = configurations[config_name]['env_creator'](**config)
#self.obs = self.env.reset()
def step(self, action):
next_state, reward, is_done, info = self.env.step(action)
if np.isscalar(is_done):
episode_done = is_done
else:
episode_done = is_done.all()
if episode_done:
next_state = self.reset()
if isinstance(next_state, dict):
for k,v in next_state.items():
if isinstance(v, dict):
for dk,dv in v.items():
if dv.dtype == np.float64:
v[dk] = dv.astype(np.float32)
else:
if v.dtype == np.float64:
next_state[k] = v.astype(np.float32)
else:
if next_state.dtype == np.float64:
next_state = next_state.astype(np.float32)
return next_state, reward, is_done, info
def render(self):
self.env.render()
def reset(self):
self.obs = self.env.reset()
return self.obs
def get_action_mask(self):
return self.env.get_action_mask()
def get_number_of_agents(self):
if hasattr(self.env, 'get_number_of_agents'):
return self.env.get_number_of_agents()
else:
return 1
def set_weights(self, weights):
self.env.update_weights(weights)
def can_concat_infos(self):
if hasattr(self.env, 'concat_infos'):
return self.env.concat_infos
else:
return False
def get_env_info(self):
info = {}
observation_space = self.env.observation_space
#if isinstance(observation_space, gym.spaces.dict.Dict):
# observation_space = observation_space['observations']
info['action_space'] = self.env.action_space
info['observation_space'] = observation_space
info['state_space'] = None
info['use_global_observations'] = False
info['agents'] = self.get_number_of_agents()
info['value_size'] = 1
if hasattr(self.env, 'use_central_value'):
info['use_global_observations'] = self.env.use_central_value
if hasattr(self.env, 'value_size'):
info['value_size'] = self.env.value_size
if hasattr(self.env, 'state_space'):
info['state_space'] = self.env.state_space
return info
class RayVecEnv(IVecEnv):
def __init__(self, config_name, num_actors, **kwargs):
self.config_name = config_name
self.num_actors = num_actors
self.use_torch = False
self.remote_worker = ray.remote(RayWorker)
self.workers = [self.remote_worker.remote(self.config_name, kwargs) for i in range(self.num_actors)]
res = self.workers[0].get_number_of_agents.remote()
self.num_agents = ray.get(res)
res = self.workers[0].get_env_info.remote()
env_info = ray.get(res)
res = self.workers[0].can_concat_infos.remote()
can_concat_infos = ray.get(res)
self.use_global_obs = env_info['use_global_observations']
self.concat_infos = can_concat_infos
self.obs_type_dict = type(env_info.get('observation_space')) is gym.spaces.Dict
self.state_type_dict = type(env_info.get('state_space')) is gym.spaces.Dict
if self.num_agents == 1:
self.concat_func = np.stack
else:
self.concat_func = np.concatenate
def step(self, actions):
newobs, newstates, newrewards, newdones, newinfos = [], [], [], [], []
res_obs = []
if self.num_agents == 1:
for (action, worker) in zip(actions, self.workers):
res_obs.append(worker.step.remote(action))
else:
for num, worker in enumerate(self.workers):
res_obs.append(worker.step.remote(actions[self.num_agents * num: self.num_agents * num + self.num_agents]))
all_res = ray.get(res_obs)
for res in all_res:
cobs, crewards, cdones, cinfos = res
if self.use_global_obs:
newobs.append(cobs["obs"])
newstates.append(cobs["state"])
else:
newobs.append(cobs)
newrewards.append(crewards)
newdones.append(cdones)
newinfos.append(cinfos)
if self.obs_type_dict:
ret_obs = dicts_to_dict_with_arrays(newobs, self.num_agents == 1)
else:
ret_obs = self.concat_func(newobs)
if self.use_global_obs:
newobsdict = {}
newobsdict["obs"] = ret_obs
if self.state_type_dict:
newobsdict["states"] = dicts_to_dict_with_arrays(newstates, True)
else:
newobsdict["states"] = np.stack(newstates)
ret_obs = newobsdict
if self.concat_infos:
newinfos = dicts_to_dict_with_arrays(newinfos, False)
return ret_obs, self.concat_func(newrewards), self.concat_func(newdones), newinfos
def get_env_info(self):
res = self.workers[0].get_env_info.remote()
return ray.get(res)
def set_weights(self, indices, weights):
res = []
for ind in indices:
res.append(self.workers[ind].set_weights.remote(weights))
ray.get(res)
def has_action_masks(self):
return True
def get_action_masks(self):
mask = [worker.get_action_mask.remote() for worker in self.workers]
return np.asarray(ray.get(mask), dtype=np.int32)
def reset(self):
res_obs = [worker.reset.remote() for worker in self.workers]
newobs, newstates = [],[]
for res in res_obs:
cobs = ray.get(res)
if self.use_global_obs:
newobs.append(cobs["obs"])
newstates.append(cobs["state"])
else:
newobs.append(cobs)
if self.obs_type_dict:
ret_obs = dicts_to_dict_with_arrays(newobs, self.num_agents == 1)
else:
ret_obs = self.concat_func(newobs)
if self.use_global_obs:
newobsdict = {}
newobsdict["obs"] = ret_obs
if self.state_type_dict:
newobsdict["states"] = dicts_to_dict_with_arrays(newstates, True)
else:
newobsdict["states"] = np.stack(newstates)
ret_obs = newobsdict
return ret_obs
# todo rename multi-agent
class RayVecSMACEnv(IVecEnv):
def __init__(self, config_name, num_actors, **kwargs):
self.config_name = config_name
self.num_actors = num_actors
self.remote_worker = ray.remote(RayWorker)
self.workers = [self.remote_worker.remote(self.config_name, kwargs) for i in range(self.num_actors)]
res = self.workers[0].get_number_of_agents.remote()
self.num_agents = ray.get(res)
res = self.workers[0].get_env_info.remote()
env_info = ray.get(res)
self.use_global_obs = env_info['use_global_observations']
def get_env_info(self):
res = self.workers[0].get_env_info.remote()
return ray.get(res)
def get_number_of_agents(self):
return self.num_agents
def step(self, actions):
newobs, newstates, newrewards, newdones, newinfos = [], [], [], [], []
newobsdict = {}
res_obs, res_state = [], []
for num, worker in enumerate(self.workers):
res_obs.append(worker.step.remote(actions[self.num_agents * num: self.num_agents * num + self.num_agents]))
for res in res_obs:
cobs, crewards, cdones, cinfos = ray.get(res)
if self.use_global_obs:
newobs.append(cobs["obs"])
newstates.append(cobs["state"])
else:
newobs.append(cobs)
newrewards.append(crewards)
newdones.append(cdones)
newinfos.append(cinfos)
if self.use_global_obs:
newobsdict["obs"] = np.concatenate(newobs, axis=0)
newobsdict["states"] = np.asarray(newstates)
ret_obs = newobsdict
else:
ret_obs = np.concatenate(newobs, axis=0)
return ret_obs, np.concatenate(newrewards, axis=0), np.concatenate(newdones, axis=0), newinfos
def has_action_masks(self):
return True
def get_action_masks(self):
mask = [worker.get_action_mask.remote() for worker in self.workers]
masks = ray.get(mask)
return np.concatenate(masks, axis=0)
def reset(self):
res_obs = [worker.reset.remote() for worker in self.workers]
if self.use_global_obs:
newobs, newstates = [],[]
for res in res_obs:
cobs = ray.get(res)
if self.use_global_obs:
newobs.append(cobs["obs"])
newstates.append(cobs["state"])
else:
newobs.append(cobs)
newobsdict = {}
newobsdict["obs"] = np.concatenate(newobs, axis=0)
newobsdict["states"] = np.asarray(newstates)
ret_obs = newobsdict
else:
ret_obs = ray.get(res_obs)
ret_obs = np.concatenate(ret_obs, axis=0)
return ret_obs
vecenv_config = {}
def register(config_name, func):
vecenv_config[config_name] = func
def create_vec_env(config_name, num_actors, **kwargs):
vec_env_name = configurations[config_name]['vecenv_type']
return vecenv_config[vec_env_name](config_name, num_actors, **kwargs)
register('RAY', lambda config_name, num_actors, **kwargs: RayVecEnv(config_name, num_actors, **kwargs))
register('RAY_SMAC', lambda config_name, num_actors, **kwargs: RayVecSMACEnv(config_name, num_actors, **kwargs))
from rl_games.envs.brax import BraxEnv
register('BRAX', lambda config_name, num_actors, **kwargs: BraxEnv(config_name, num_actors, **kwargs))
| 10,351 | Python | 34.696552 | 123 | 0.571539 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/common/tr_helpers.py | import numpy as np
from collections import defaultdict
class LinearValueProcessor:
def __init__(self, start_eps, end_eps, end_eps_frames):
self.start_eps = start_eps
self.end_eps = end_eps
self.end_eps_frames = end_eps_frames
def __call__(self, frame):
if frame >= self.end_eps_frames:
return self.end_eps
df = frame / self.end_eps_frames
return df * self.end_eps + (1.0 - df) * self.start_eps
class DefaultRewardsShaper:
def __init__(self, scale_value = 1, shift_value = 0, min_val=-np.inf, max_val=np.inf, is_torch=True):
self.scale_value = scale_value
self.shift_value = shift_value
self.min_val = min_val
self.max_val = max_val
self.is_torch = is_torch
def __call__(self, reward):
reward = reward + self.shift_value
reward = reward * self.scale_value
if self.is_torch:
import torch
reward = torch.clamp(reward, self.min_val, self.max_val)
else:
reward = np.clip(reward, self.min_val, self.max_val)
return reward
def dicts_to_dict_with_arrays(dicts, add_batch_dim = True):
def stack(v):
if len(np.shape(v)) == 1:
return np.array(v)
else:
return np.stack(v)
def concatenate(v):
if len(np.shape(v)) == 1:
return np.array(v)
else:
return np.concatenate(v)
dicts_len = len(dicts)
if(dicts_len <= 1):
return dicts
res = defaultdict(list)
{ res[key].append(sub[key]) for sub in dicts for key in sub }
if add_batch_dim:
concat_func = stack
else:
concat_func = concatenate
res = {k : concat_func(v) for k,v in res.items()}
return res
def unsqueeze_obs(obs):
if type(obs) is dict:
for k,v in obs.items():
obs[k] = unsqueeze_obs(v)
else:
obs = obs.unsqueeze(0)
return obs
def flatten_first_two_dims(arr):
if arr.ndim > 2:
return arr.reshape(-1, *arr.shape[-(arr.ndim-2):])
else:
return arr.reshape(-1)
def free_mem():
import ctypes
ctypes.CDLL('libc.so.6').malloc_trim(0) | 2,203 | Python | 26.898734 | 105 | 0.568316 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/common/object_factory.py | class ObjectFactory:
def __init__(self):
self._builders = {}
def register_builder(self, name, builder):
self._builders[name] = builder
def set_builders(self, builders):
self._builders = builders
def create(self, name, **kwargs):
builder = self._builders.get(name)
if not builder:
raise ValueError(name)
return builder(**kwargs) | 414 | Python | 26.666665 | 46 | 0.584541 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/common/transforms/soft_augmentation.py | from rl_games.common.transforms import transforms
import torch
class SoftAugmentation():
def __init__(self, **kwargs):
self.transform_config = kwargs.pop('transform')
self.aug_coef = kwargs.pop('aug_coef', 0.001)
print('aug coef:', self.aug_coef)
self.name = self.transform_config['name']
#TODO: remove hardcode
self.transform = transforms.ImageDatasetTransform(**self.transform_config)
def get_coef(self):
return self.aug_coef
def get_loss(self, p_dict, model, input_dict, loss_type = 'both'):
'''
loss_type: 'critic', 'policy', 'both'
'''
if self.transform:
input_dict = self.transform(input_dict)
loss = 0
q_dict = model(input_dict)
if loss_type == 'policy' or loss_type == 'both':
p_dict['logits'] = p_dict['logits'].detach()
loss = model.kl(p_dict, q_dict)
if loss_type == 'critic' or loss_type == 'both':
p_value = p_dict['value'].detach()
q_value = q_dict['value']
loss = loss + (0.5 * (p_value - q_value)**2).sum(dim=-1)
return loss | 1,165 | Python | 34.333332 | 82 | 0.559657 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/common/transforms/transforms.py | import torch
from torch import nn
class DatasetTransform(nn.Module):
def __init__(self):
super().__init__()
def forward(self, dataset):
return dataset
class ImageDatasetTransform(DatasetTransform):
def __init__(self, **kwargs):
super().__init__()
import kornia
self.transform = torch.nn.Sequential(
nn.ReplicationPad2d(4),
kornia.augmentation.RandomCrop((84,84))
#kornia.augmentation.RandomErasing(p=0.2),
#kornia.augmentation.RandomAffine(degrees=0, translate=(2.0/84,2.0/84), p=1),
#kornia.augmentation.RandomCrop((84,84))
)
def forward(self, dataset):
dataset['obs'] = self.transform(dataset['obs'])
return dataset | 746 | Python | 27.730768 | 85 | 0.619303 |
vstrozzi/FRL-SHAC-Extension/models/model_utils.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import torch.nn as nn
def init(module, weight_init, bias_init, gain=1):
weight_init(module.weight.data, gain=gain)
bias_init(module.bias.data)
return module
def get_activation_func(activation_name):
if activation_name.lower() == 'tanh':
return nn.Tanh()
elif activation_name.lower() == 'relu':
return nn.ReLU()
elif activation_name.lower() == 'elu':
return nn.ELU()
elif activation_name.lower() == 'identity':
return nn.Identity()
else:
raise NotImplementedError('Actication func {} not defined'.format(activation_name)) | 1,018 | Python | 39.759998 | 91 | 0.720039 |
vstrozzi/FRL-SHAC-Extension/models/actor.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import torch
import torch.nn as nn
from torch.distributions.normal import Normal
import numpy as np
from models import model_utils
class ActorDeterministicMLP(nn.Module):
def __init__(self, obs_dim, action_dim, cfg_network, device='cuda:0'):
super(ActorDeterministicMLP, self).__init__()
self.device = device
self.layer_dims = [obs_dim] + cfg_network['actor_mlp']['units'] + [action_dim]
init_ = lambda m: model_utils.init(m, nn.init.orthogonal_, lambda x: nn.init.
constant_(x, 0), np.sqrt(2))
modules = []
for i in range(len(self.layer_dims) - 1):
modules.append(init_(nn.Linear(self.layer_dims[i], self.layer_dims[i + 1])))
if i < len(self.layer_dims) - 2:
modules.append(model_utils.get_activation_func(cfg_network['actor_mlp']['activation']))
modules.append(torch.nn.LayerNorm(self.layer_dims[i+1]))
self.actor = nn.Sequential(*modules).to(device)
self.action_dim = action_dim
self.obs_dim = obs_dim
print(self.actor)
def get_logstd(self):
# return self.logstd
return None
def forward(self, observations, deterministic = False):
return self.actor(observations)
class ActorStochasticMLP(nn.Module):
def __init__(self, obs_dim, action_dim, cfg_network, device='cuda:0'):
super(ActorStochasticMLP, self).__init__()
self.device = device
self.layer_dims = [obs_dim] + cfg_network['actor_mlp']['units'] + [action_dim]
init_ = lambda m: model_utils.init(m, nn.init.orthogonal_, lambda x: nn.init.
constant_(x, 0), np.sqrt(2))
modules = []
for i in range(len(self.layer_dims) - 1):
modules.append(nn.Linear(self.layer_dims[i], self.layer_dims[i + 1]))
if i < len(self.layer_dims) - 2:
modules.append(model_utils.get_activation_func(cfg_network['actor_mlp']['activation']))
modules.append(torch.nn.LayerNorm(self.layer_dims[i+1]))
else:
modules.append(model_utils.get_activation_func('identity'))
self.mu_net = nn.Sequential(*modules).to(device)
logstd = cfg_network.get('actor_logstd_init', -1.0)
self.logstd = torch.nn.Parameter(torch.ones(action_dim, dtype=torch.float32, device=device) * logstd)
self.action_dim = action_dim
self.obs_dim = obs_dim
print(self.mu_net)
print(self.logstd)
def get_logstd(self):
return self.logstd
def forward(self, obs, deterministic = False):
mu = self.mu_net(obs)
if deterministic:
return mu
else:
std = self.logstd.exp() # (num_actions)
# eps = torch.randn((*obs.shape[:-1], std.shape[-1])).to(self.device)
# sample = mu + eps * std
dist = Normal(mu, std)
sample = dist.rsample()
return sample
def forward_with_dist(self, obs, deterministic = False):
mu = self.mu_net(obs)
std = self.logstd.exp() # (num_actions)
if deterministic:
return mu, mu, std
else:
dist = Normal(mu, std)
sample = dist.rsample()
return sample, mu, std
def evaluate_actions_log_probs(self, obs, actions):
mu = self.mu_net(obs)
std = self.logstd.exp()
dist = Normal(mu, std)
return dist.log_prob(actions) | 3,988 | Python | 33.991228 | 109 | 0.595286 |
vstrozzi/FRL-SHAC-Extension/models/critic.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import torch
import torch.nn as nn
import numpy as np
from models import model_utils
class CriticMLP(nn.Module):
def __init__(self, obs_dim, cfg_network, device='cuda:0'):
super(CriticMLP, self).__init__()
self.device = device
self.layer_dims = [obs_dim] + cfg_network['critic_mlp']['units'] + [1]
init_ = lambda m: model_utils.init(m, nn.init.orthogonal_, lambda x: nn.init.
constant_(x, 0), np.sqrt(2))
modules = []
for i in range(len(self.layer_dims) - 1):
modules.append(init_(nn.Linear(self.layer_dims[i], self.layer_dims[i + 1])))
if i < len(self.layer_dims) - 2:
modules.append(model_utils.get_activation_func(cfg_network['critic_mlp']['activation']))
modules.append(torch.nn.LayerNorm(self.layer_dims[i + 1]))
self.critic = nn.Sequential(*modules).to(device)
self.obs_dim = obs_dim
print(self.critic)
def forward(self, observations):
return self.critic(observations) | 1,505 | Python | 36.649999 | 104 | 0.646512 |
vstrozzi/FRL-SHAC-Extension/dflex/setup.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import setuptools
setuptools.setup(
name="dflex",
version="0.0.1",
author="NVIDIA",
author_email="[email protected]",
description="Differentiable Multiphysics for Python",
long_description="",
long_description_content_type="text/markdown",
# url="https://github.com/pypa/sampleproject",
packages=setuptools.find_packages(),
package_data={"": ["*.h"]},
classifiers=[
"Programming Language :: Python :: 3",
"Operating System :: OS Independent",
],
install_requires=["ninja", "torch"],
)
| 983 | Python | 35.444443 | 76 | 0.713123 |
vstrozzi/FRL-SHAC-Extension/dflex/README.md | # A Differentiable Multiphysics Engine for PyTorch
dFlex is a physics engine for Python. It is written entirely in PyTorch and supports reverse mode differentiation w.r.t. to any simulation inputs.
It includes a USD-based visualization library (`dflex.render`), which can generate time-sampled USD files, or update an existing stage on-the-fly.
## Prerequisites
* Python 3.6
* PyTorch 1.4.0 or higher
* Pixar USD lib (for visualization)
Pre-built USD Python libraries can be downloaded from https://developer.nvidia.com/usd, once they are downloaded you should follow the instructions to add them to your PYTHONPATH environment variable.
## Using the built-in backend
By default dFlex uses the built-in PyTorch cpp-extensions mechanism to compile auto-generated simulation kernels.
- Windows users should ensure they have Visual Studio 2019 installed
## Setup and Running
To use the engine you can import first the simulation module:
```python
import dflex.sim
```
To build physical models there is a helper class available in `dflex.sim.ModelBuilder`. This can be used to create models programmatically from Python. For example, to create a chain of particles:
```python
builder = dflex.sim.ModelBuilder()
# anchor point (zero mass)
builder.add_particle((0, 1.0, 0.0), (0.0, 0.0, 0.0), 0.0)
# build chain
for i in range(1,10):
builder.add_particle((i, 1.0, 0.0), (0.0, 0.0, 0.0), 1.0)
builder.add_spring(i-1, i, 1.e+3, 0.0, 0)
# add ground plane
builder.add_shape_plane((0.0, 1.0, 0.0, 0.0), 0)
```
Once you have built your model you must convert it to a finalized PyTorch simulation data structure using `finalize()`:
```python
model = builder.finalize('cpu')
```
The model object represents static (non-time varying) data such as constraints, collision shapes, etc. The model is stored in PyTorch tensors, allowing differentiation with respect to both model and state.
## Time Stepping
To advance the simulation forward in time (forward dynamics), we use an `integrator` object. dFlex currently offers semi-implicit and fully implicit (planned), via. the `dflex.sim.ExplicitIntegrator`, and `dflex.sim.ImplicitIntegrator` classes as follows:
```python
sim_dt = 1.0/60.0
sim_steps = 100
integrator = dflex.sim.ExplicitIntegrator()
for i in range(0, sim_steps):
state = integrator.forward(model, state, sim_dt)
```
## Rendering
To visualize the scene dFlex supports a USD-based update via. the `dflex.render.UsdRenderer` class. To create a renderer you must first create the USD stage, and the physical model.
```python
import dflex.render
stage = Usd.Stage.CreateNew("test.usda")
renderer = dflex.render.UsdRenderer(model, stage)
renderer.draw_points = True
renderer.draw_springs = True
renderer.draw_shapes = True
```
Each frame the renderer should be updated with the current model state and the current elapsed simulation time:
```python
renderer.update(state, sim_time)
```
## Contact
Miles Macklin ([email protected]) | 3,065 | Markdown | 32.326087 | 255 | 0.730832 |
vstrozzi/FRL-SHAC-Extension/dflex/build/lib/dflex/mat33.h | #pragma once
//----------------------------------------------------------
// mat33
struct mat33
{
inline CUDA_CALLABLE mat33(float3 c0, float3 c1, float3 c2)
{
data[0][0] = c0.x;
data[1][0] = c0.y;
data[2][0] = c0.z;
data[0][1] = c1.x;
data[1][1] = c1.y;
data[2][1] = c1.z;
data[0][2] = c2.x;
data[1][2] = c2.y;
data[2][2] = c2.z;
}
inline CUDA_CALLABLE mat33(float m00=0.0f, float m01=0.0f, float m02=0.0f,
float m10=0.0f, float m11=0.0f, float m12=0.0f,
float m20=0.0f, float m21=0.0f, float m22=0.0f)
{
data[0][0] = m00;
data[1][0] = m10;
data[2][0] = m20;
data[0][1] = m01;
data[1][1] = m11;
data[2][1] = m21;
data[0][2] = m02;
data[1][2] = m12;
data[2][2] = m22;
}
CUDA_CALLABLE float3 get_row(int index) const
{
return (float3&)data[index];
}
CUDA_CALLABLE void set_row(int index, const float3& v)
{
(float3&)data[index] = v;
}
CUDA_CALLABLE float3 get_col(int index) const
{
return float3(data[0][index], data[1][index], data[2][index]);
}
CUDA_CALLABLE void set_col(int index, const float3& v)
{
data[0][index] = v.x;
data[1][index] = v.y;
data[2][index] = v.z;
}
// row major storage assumed to be compatible with PyTorch
float data[3][3];
};
#ifdef CUDA
inline __device__ void atomic_add(mat33 * addr, mat33 value) {
atomicAdd(&((addr -> data)[0][0]), value.data[0][0]);
atomicAdd(&((addr -> data)[1][0]), value.data[1][0]);
atomicAdd(&((addr -> data)[2][0]), value.data[2][0]);
atomicAdd(&((addr -> data)[0][1]), value.data[0][1]);
atomicAdd(&((addr -> data)[1][1]), value.data[1][1]);
atomicAdd(&((addr -> data)[2][1]), value.data[2][1]);
atomicAdd(&((addr -> data)[0][2]), value.data[0][2]);
atomicAdd(&((addr -> data)[1][2]), value.data[1][2]);
atomicAdd(&((addr -> data)[2][2]), value.data[2][2]);
}
#endif
inline CUDA_CALLABLE void adj_mat33(float3 c0, float3 c1, float3 c2,
float3& a0, float3& a1, float3& a2,
const mat33& adj_ret)
{
// column constructor
a0 += adj_ret.get_col(0);
a1 += adj_ret.get_col(1);
a2 += adj_ret.get_col(2);
}
inline CUDA_CALLABLE void adj_mat33(float m00, float m01, float m02,
float m10, float m11, float m12,
float m20, float m21, float m22,
float& a00, float& a01, float& a02,
float& a10, float& a11, float& a12,
float& a20, float& a21, float& a22,
const mat33& adj_ret)
{
printf("todo\n");
}
inline CUDA_CALLABLE float index(const mat33& m, int row, int col)
{
return m.data[row][col];
}
inline CUDA_CALLABLE mat33 add(const mat33& a, const mat33& b)
{
mat33 t;
for (int i=0; i < 3; ++i)
{
for (int j=0; j < 3; ++j)
{
t.data[i][j] = a.data[i][j] + b.data[i][j];
}
}
return t;
}
inline CUDA_CALLABLE mat33 mul(const mat33& a, float b)
{
mat33 t;
for (int i=0; i < 3; ++i)
{
for (int j=0; j < 3; ++j)
{
t.data[i][j] = a.data[i][j]*b;
}
}
return t;
}
inline CUDA_CALLABLE float3 mul(const mat33& a, const float3& b)
{
float3 r = a.get_col(0)*b.x +
a.get_col(1)*b.y +
a.get_col(2)*b.z;
return r;
}
inline CUDA_CALLABLE mat33 mul(const mat33& a, const mat33& b)
{
mat33 t;
for (int i=0; i < 3; ++i)
{
for (int j=0; j < 3; ++j)
{
for (int k=0; k < 3; ++k)
{
t.data[i][j] += a.data[i][k]*b.data[k][j];
}
}
}
return t;
}
inline CUDA_CALLABLE mat33 transpose(const mat33& a)
{
mat33 t;
for (int i=0; i < 3; ++i)
{
for (int j=0; j < 3; ++j)
{
t.data[i][j] = a.data[j][i];
}
}
return t;
}
inline CUDA_CALLABLE float determinant(const mat33& m)
{
return dot(float3(m.data[0]), cross(float3(m.data[1]), float3(m.data[2])));
}
inline CUDA_CALLABLE mat33 outer(const float3& a, const float3& b)
{
return mat33(a*b.x, a*b.y, a*b.z);
}
inline CUDA_CALLABLE mat33 skew(const float3& a)
{
mat33 out(0.0f, -a.z, a.y,
a.z, 0.0f, -a.x,
-a.y, a.x, 0.0f);
return out;
}
inline void CUDA_CALLABLE adj_index(const mat33& m, int row, int col, mat33& adj_m, int& adj_row, int& adj_col, float adj_ret)
{
adj_m.data[row][col] += adj_ret;
}
inline CUDA_CALLABLE void adj_add(const mat33& a, const mat33& b, mat33& adj_a, mat33& adj_b, const mat33& adj_ret)
{
for (int i=0; i < 3; ++i)
{
for (int j=0; j < 3; ++j)
{
adj_a.data[i][j] += adj_ret.data[i][j];
adj_b.data[i][j] += adj_ret.data[i][j];
}
}
}
inline CUDA_CALLABLE void adj_mul(const mat33& a, float b, mat33& adj_a, float& adj_b, const mat33& adj_ret)
{
for (int i=0; i < 3; ++i)
{
for (int j=0; j < 3; ++j)
{
adj_a.data[i][j] += b*adj_ret.data[i][j];
adj_b += a.data[i][j]*adj_ret.data[i][j];
}
}
}
inline CUDA_CALLABLE void adj_mul(const mat33& a, const float3& b, mat33& adj_a, float3& adj_b, const float3& adj_ret)
{
adj_a += outer(adj_ret, b);
adj_b += mul(transpose(a), adj_ret);
}
inline CUDA_CALLABLE void adj_mul(const mat33& a, const mat33& b, mat33& adj_a, mat33& adj_b, const mat33& adj_ret)
{
adj_a += mul(adj_ret, transpose(b));
adj_b += mul(transpose(a), adj_ret);
}
inline CUDA_CALLABLE void adj_transpose(const mat33& a, mat33& adj_a, const mat33& adj_ret)
{
adj_a += transpose(adj_ret);
}
inline CUDA_CALLABLE void adj_determinant(const mat33& m, mat33& adj_m, float adj_ret)
{
(float3&)adj_m.data[0] += cross(m.get_row(1), m.get_row(2))*adj_ret;
(float3&)adj_m.data[1] += cross(m.get_row(2), m.get_row(0))*adj_ret;
(float3&)adj_m.data[2] += cross(m.get_row(0), m.get_row(1))*adj_ret;
}
inline CUDA_CALLABLE void adj_skew(const float3& a, float3& adj_a, const mat33& adj_ret)
{
mat33 out(0.0f, -a.z, a.y,
a.z, 0.0f, -a.x,
-a.y, a.x, 0.0f);
adj_a.x += adj_ret.data[2][1] - adj_ret.data[1][2];
adj_a.y += adj_ret.data[0][2] - adj_ret.data[2][0];
adj_a.z += adj_ret.data[1][0] - adj_ret.data[0][1];
} | 6,549 | C | 24.192308 | 126 | 0.505726 |
vstrozzi/FRL-SHAC-Extension/dflex/build/lib/dflex/spatial.h | #pragma once
//---------------------------------------------------------------------------------
// Represents a twist in se(3)
struct spatial_vector
{
float3 w;
float3 v;
CUDA_CALLABLE inline spatial_vector(float a, float b, float c, float d, float e, float f) : w(a, b, c), v(d, e, f) {}
CUDA_CALLABLE inline spatial_vector(float3 w=float3(), float3 v=float3()) : w(w), v(v) {}
CUDA_CALLABLE inline spatial_vector(float a) : w(a, a, a), v(a, a, a) {}
CUDA_CALLABLE inline float operator[](int index) const
{
assert(index < 6);
return (&w.x)[index];
}
CUDA_CALLABLE inline float& operator[](int index)
{
assert(index < 6);
return (&w.x)[index];
}
};
CUDA_CALLABLE inline spatial_vector operator - (spatial_vector a)
{
return spatial_vector(-a.w, -a.v);
}
CUDA_CALLABLE inline spatial_vector add(const spatial_vector& a, const spatial_vector& b)
{
return { a.w + b.w, a.v + b.v };
}
CUDA_CALLABLE inline spatial_vector sub(const spatial_vector& a, const spatial_vector& b)
{
return { a.w - b.w, a.v - b.v };
}
CUDA_CALLABLE inline spatial_vector mul(const spatial_vector& a, float s)
{
return { a.w*s, a.v*s };
}
CUDA_CALLABLE inline float spatial_dot(const spatial_vector& a, const spatial_vector& b)
{
return dot(a.w, b.w) + dot(a.v, b.v);
}
CUDA_CALLABLE inline spatial_vector spatial_cross(const spatial_vector& a, const spatial_vector& b)
{
float3 w = cross(a.w, b.w);
float3 v = cross(a.v, b.w) + cross(a.w, b.v);
return spatial_vector(w, v);
}
CUDA_CALLABLE inline spatial_vector spatial_cross_dual(const spatial_vector& a, const spatial_vector& b)
{
float3 w = cross(a.w, b.w) + cross(a.v, b.v);
float3 v = cross(a.w, b.v);
return spatial_vector(w, v);
}
CUDA_CALLABLE inline float3 spatial_top(const spatial_vector& a)
{
return a.w;
}
CUDA_CALLABLE inline float3 spatial_bottom(const spatial_vector& a)
{
return a.v;
}
// adjoint methods
CUDA_CALLABLE inline void adj_spatial_vector(
float a, float b, float c,
float d, float e, float f,
float& adj_a, float& adj_b, float& adj_c,
float& adj_d, float& adj_e,float& adj_f,
const spatial_vector& adj_ret)
{
adj_a += adj_ret.w.x;
adj_b += adj_ret.w.y;
adj_c += adj_ret.w.z;
adj_d += adj_ret.v.x;
adj_e += adj_ret.v.y;
adj_f += adj_ret.v.z;
}
CUDA_CALLABLE inline void adj_spatial_vector(const float3& w, const float3& v, float3& adj_w, float3& adj_v, const spatial_vector& adj_ret)
{
adj_w += adj_ret.w;
adj_v += adj_ret.v;
}
CUDA_CALLABLE inline void adj_add(const spatial_vector& a, const spatial_vector& b, spatial_vector& adj_a, spatial_vector& adj_b, const spatial_vector& adj_ret)
{
adj_add(a.w, b.w, adj_a.w, adj_b.w, adj_ret.w);
adj_add(a.v, b.v, adj_a.v, adj_b.v, adj_ret.v);
}
CUDA_CALLABLE inline void adj_sub(const spatial_vector& a, const spatial_vector& b, spatial_vector& adj_a, spatial_vector& adj_b, const spatial_vector& adj_ret)
{
adj_sub(a.w, b.w, adj_a.w, adj_b.w, adj_ret.w);
adj_sub(a.v, b.v, adj_a.v, adj_b.v, adj_ret.v);
}
CUDA_CALLABLE inline void adj_mul(const spatial_vector& a, float s, spatial_vector& adj_a, float& adj_s, const spatial_vector& adj_ret)
{
adj_mul(a.w, s, adj_a.w, adj_s, adj_ret.w);
adj_mul(a.v, s, adj_a.v, adj_s, adj_ret.v);
}
CUDA_CALLABLE inline void adj_spatial_dot(const spatial_vector& a, const spatial_vector& b, spatial_vector& adj_a, spatial_vector& adj_b, const float& adj_ret)
{
adj_dot(a.w, b.w, adj_a.w, adj_b.w, adj_ret);
adj_dot(a.v, b.v, adj_a.v, adj_b.v, adj_ret);
}
CUDA_CALLABLE inline void adj_spatial_cross(const spatial_vector& a, const spatial_vector& b, spatial_vector& adj_a, spatial_vector& adj_b, const spatial_vector& adj_ret)
{
adj_cross(a.w, b.w, adj_a.w, adj_b.w, adj_ret.w);
adj_cross(a.v, b.w, adj_a.v, adj_b.w, adj_ret.v);
adj_cross(a.w, b.v, adj_a.w, adj_b.v, adj_ret.v);
}
CUDA_CALLABLE inline void adj_spatial_cross_dual(const spatial_vector& a, const spatial_vector& b, spatial_vector& adj_a, spatial_vector& adj_b, const spatial_vector& adj_ret)
{
adj_cross(a.w, b.w, adj_a.w, adj_b.w, adj_ret.w);
adj_cross(a.v, b.v, adj_a.v, adj_b.v, adj_ret.w);
adj_cross(a.w, b.v, adj_a.w, adj_b.v, adj_ret.v);
}
CUDA_CALLABLE inline void adj_spatial_top(const spatial_vector& a, spatial_vector& adj_a, const float3& adj_ret)
{
adj_a.w += adj_ret;
}
CUDA_CALLABLE inline void adj_spatial_bottom(const spatial_vector& a, spatial_vector& adj_a, const float3& adj_ret)
{
adj_a.v += adj_ret;
}
#ifdef CUDA
inline __device__ void atomic_add(spatial_vector* addr, const spatial_vector& value) {
atomic_add(&addr->w, value.w);
atomic_add(&addr->v, value.v);
}
#endif
//---------------------------------------------------------------------------------
// Represents a rigid body transformation
struct spatial_transform
{
float3 p;
quat q;
CUDA_CALLABLE inline spatial_transform(float3 p=float3(), quat q=quat()) : p(p), q(q) {}
CUDA_CALLABLE inline spatial_transform(float) {} // helps uniform initialization
};
CUDA_CALLABLE inline spatial_transform spatial_transform_identity()
{
return spatial_transform(float3(), quat_identity());
}
CUDA_CALLABLE inline float3 spatial_transform_get_translation(const spatial_transform& t)
{
return t.p;
}
CUDA_CALLABLE inline quat spatial_transform_get_rotation(const spatial_transform& t)
{
return t.q;
}
CUDA_CALLABLE inline spatial_transform spatial_transform_multiply(const spatial_transform& a, const spatial_transform& b)
{
return { rotate(a.q, b.p) + a.p, mul(a.q, b.q) };
}
/*
CUDA_CALLABLE inline spatial_transform spatial_transform_inverse(const spatial_transform& t)
{
quat q_inv = inverse(t.q);
return spatial_transform(-rotate(q_inv, t.p), q_inv);
}
*/
CUDA_CALLABLE inline float3 spatial_transform_vector(const spatial_transform& t, const float3& x)
{
return rotate(t.q, x);
}
CUDA_CALLABLE inline float3 spatial_transform_point(const spatial_transform& t, const float3& x)
{
return t.p + rotate(t.q, x);
}
// Frank & Park definition 3.20, pg 100
CUDA_CALLABLE inline spatial_vector spatial_transform_twist(const spatial_transform& t, const spatial_vector& x)
{
float3 w = rotate(t.q, x.w);
float3 v = rotate(t.q, x.v) + cross(t.p, w);
return spatial_vector(w, v);
}
CUDA_CALLABLE inline spatial_vector spatial_transform_wrench(const spatial_transform& t, const spatial_vector& x)
{
float3 v = rotate(t.q, x.v);
float3 w = rotate(t.q, x.w) + cross(t.p, v);
return spatial_vector(w, v);
}
CUDA_CALLABLE inline spatial_transform add(const spatial_transform& a, const spatial_transform& b)
{
return { a.p + b.p, a.q + b.q };
}
CUDA_CALLABLE inline spatial_transform sub(const spatial_transform& a, const spatial_transform& b)
{
return { a.p - b.p, a.q - b.q };
}
CUDA_CALLABLE inline spatial_transform mul(const spatial_transform& a, float s)
{
return { a.p*s, a.q*s };
}
// adjoint methods
CUDA_CALLABLE inline void adj_add(const spatial_transform& a, const spatial_transform& b, spatial_transform& adj_a, spatial_transform& adj_b, const spatial_transform& adj_ret)
{
adj_add(a.p, b.p, adj_a.p, adj_b.p, adj_ret.p);
adj_add(a.q, b.q, adj_a.q, adj_b.q, adj_ret.q);
}
CUDA_CALLABLE inline void adj_sub(const spatial_transform& a, const spatial_transform& b, spatial_transform& adj_a, spatial_transform& adj_b, const spatial_transform& adj_ret)
{
adj_sub(a.p, b.p, adj_a.p, adj_b.p, adj_ret.p);
adj_sub(a.q, b.q, adj_a.q, adj_b.q, adj_ret.q);
}
CUDA_CALLABLE inline void adj_mul(const spatial_transform& a, float s, spatial_transform& adj_a, float& adj_s, const spatial_transform& adj_ret)
{
adj_mul(a.p, s, adj_a.p, adj_s, adj_ret.p);
adj_mul(a.q, s, adj_a.q, adj_s, adj_ret.q);
}
#ifdef CUDA
inline __device__ void atomic_add(spatial_transform* addr, const spatial_transform& value) {
atomic_add(&addr->p, value.p);
atomic_add(&addr->q, value.q);
}
#endif
CUDA_CALLABLE inline void adj_spatial_transform(const float3& p, const quat& q, float3& adj_p, quat& adj_q, const spatial_transform& adj_ret)
{
adj_p += adj_ret.p;
adj_q += adj_ret.q;
}
CUDA_CALLABLE inline void adj_spatial_transform_identity(const spatial_transform& adj_ret)
{
// nop
}
CUDA_CALLABLE inline void adj_spatial_transform_get_translation(const spatial_transform& t, spatial_transform& adj_t, const float3& adj_ret)
{
adj_t.p += adj_ret;
}
CUDA_CALLABLE inline void adj_spatial_transform_get_rotation(const spatial_transform& t, spatial_transform& adj_t, const quat& adj_ret)
{
adj_t.q += adj_ret;
}
/*
CUDA_CALLABLE inline void adj_spatial_transform_inverse(const spatial_transform& t, spatial_transform& adj_t, const spatial_transform& adj_ret)
{
//quat q_inv = inverse(t.q);
//return spatial_transform(-rotate(q_inv, t.p), q_inv);
quat q_inv = inverse(t.q);
float3 p = rotate(q_inv, t.p);
float3 np = -p;
quat adj_q_inv = 0.0f;
quat adj_q = 0.0f;
float3 adj_p = 0.0f;
float3 adj_np = 0.0f;
adj_spatial_transform(np, q_inv, adj_np, adj_q_inv, adj_ret);
adj_p = -adj_np;
adj_rotate(q_inv, t.p, adj_q_inv, adj_t.p, adj_p);
adj_inverse(t.q, adj_t.q, adj_q_inv);
}
*/
CUDA_CALLABLE inline void adj_spatial_transform_multiply(const spatial_transform& a, const spatial_transform& b, spatial_transform& adj_a, spatial_transform& adj_b, const spatial_transform& adj_ret)
{
// translational part
adj_rotate(a.q, b.p, adj_a.q, adj_b.p, adj_ret.p);
adj_a.p += adj_ret.p;
// rotational part
adj_mul(a.q, b.q, adj_a.q, adj_b.q, adj_ret.q);
}
CUDA_CALLABLE inline void adj_spatial_transform_vector(const spatial_transform& t, const float3& x, spatial_transform& adj_t, float3& adj_x, const float3& adj_ret)
{
adj_rotate(t.q, x, adj_t.q, adj_x, adj_ret);
}
CUDA_CALLABLE inline void adj_spatial_transform_point(const spatial_transform& t, const float3& x, spatial_transform& adj_t, float3& adj_x, const float3& adj_ret)
{
adj_rotate(t.q, x, adj_t.q, adj_x, adj_ret);
adj_t.p += adj_ret;
}
CUDA_CALLABLE inline void adj_spatial_transform_twist(const spatial_transform& a, const spatial_vector& s, spatial_transform& adj_a, spatial_vector& adj_s, const spatial_vector& adj_ret)
{
printf("todo, %s, %d\n", __FILE__, __LINE__);
// float3 w = rotate(t.q, x.w);
// float3 v = rotate(t.q, x.v) + cross(t.p, w);
// return spatial_vector(w, v);
}
CUDA_CALLABLE inline void adj_spatial_transform_wrench(const spatial_transform& t, const spatial_vector& x, spatial_transform& adj_t, spatial_vector& adj_x, const spatial_vector& adj_ret)
{
printf("todo, %s, %d\n", __FILE__, __LINE__);
// float3 v = rotate(t.q, x.v);
// float3 w = rotate(t.q, x.w) + cross(t.p, v);
// return spatial_vector(w, v);
}
/*
// should match model.py
#define JOINT_PRISMATIC 0
#define JOINT_REVOLUTE 1
#define JOINT_FIXED 2
#define JOINT_FREE 3
CUDA_CALLABLE inline spatial_transform spatial_jcalc(int type, float* joint_q, float3 axis, int start)
{
if (type == JOINT_REVOLUTE)
{
float q = joint_q[start];
spatial_transform X_jc = spatial_transform(float3(), quat_from_axis_angle(axis, q));
return X_jc;
}
else if (type == JOINT_PRISMATIC)
{
float q = joint_q[start];
spatial_transform X_jc = spatial_transform(axis*q, quat_identity());
return X_jc;
}
else if (type == JOINT_FREE)
{
float px = joint_q[start+0];
float py = joint_q[start+1];
float pz = joint_q[start+2];
float qx = joint_q[start+3];
float qy = joint_q[start+4];
float qz = joint_q[start+5];
float qw = joint_q[start+6];
spatial_transform X_jc = spatial_transform(float3(px, py, pz), quat(qx, qy, qz, qw));
return X_jc;
}
// JOINT_FIXED
return spatial_transform(float3(), quat_identity());
}
CUDA_CALLABLE inline void adj_spatial_jcalc(int type, float* q, float3 axis, int start, int& adj_type, float* adj_q, float3& adj_axis, int& adj_start, const spatial_transform& adj_ret)
{
if (type == JOINT_REVOLUTE)
{
adj_quat_from_axis_angle(axis, q[start], adj_axis, adj_q[start], adj_ret.q);
}
else if (type == JOINT_PRISMATIC)
{
adj_mul(axis, q[start], adj_axis, adj_q[start], adj_ret.p);
}
else if (type == JOINT_FREE)
{
adj_q[start+0] += adj_ret.p.x;
adj_q[start+1] += adj_ret.p.y;
adj_q[start+2] += adj_ret.p.z;
adj_q[start+3] += adj_ret.q.x;
adj_q[start+4] += adj_ret.q.y;
adj_q[start+5] += adj_ret.q.z;
adj_q[start+6] += adj_ret.q.w;
}
}
*/
struct spatial_matrix
{
float data[6][6] = { { 0 } };
CUDA_CALLABLE inline spatial_matrix(float f=0.0f)
{
}
CUDA_CALLABLE inline spatial_matrix(
float a00, float a01, float a02, float a03, float a04, float a05,
float a10, float a11, float a12, float a13, float a14, float a15,
float a20, float a21, float a22, float a23, float a24, float a25,
float a30, float a31, float a32, float a33, float a34, float a35,
float a40, float a41, float a42, float a43, float a44, float a45,
float a50, float a51, float a52, float a53, float a54, float a55)
{
data[0][0] = a00;
data[0][1] = a01;
data[0][2] = a02;
data[0][3] = a03;
data[0][4] = a04;
data[0][5] = a05;
data[1][0] = a10;
data[1][1] = a11;
data[1][2] = a12;
data[1][3] = a13;
data[1][4] = a14;
data[1][5] = a15;
data[2][0] = a20;
data[2][1] = a21;
data[2][2] = a22;
data[2][3] = a23;
data[2][4] = a24;
data[2][5] = a25;
data[3][0] = a30;
data[3][1] = a31;
data[3][2] = a32;
data[3][3] = a33;
data[3][4] = a34;
data[3][5] = a35;
data[4][0] = a40;
data[4][1] = a41;
data[4][2] = a42;
data[4][3] = a43;
data[4][4] = a44;
data[4][5] = a45;
data[5][0] = a50;
data[5][1] = a51;
data[5][2] = a52;
data[5][3] = a53;
data[5][4] = a54;
data[5][5] = a55;
}
};
inline CUDA_CALLABLE float index(const spatial_matrix& m, int row, int col)
{
return m.data[row][col];
}
inline CUDA_CALLABLE spatial_matrix add(const spatial_matrix& a, const spatial_matrix& b)
{
spatial_matrix out;
for (int i=0; i < 6; ++i)
for (int j=0; j < 6; ++j)
out.data[i][j] = a.data[i][j] + b.data[i][j];
return out;
}
inline CUDA_CALLABLE spatial_vector mul(const spatial_matrix& a, const spatial_vector& b)
{
spatial_vector out;
for (int i=0; i < 6; ++i)
for (int j=0; j < 6; ++j)
out[i] += a.data[i][j]*b[j];
return out;
}
inline CUDA_CALLABLE spatial_matrix mul(const spatial_matrix& a, const spatial_matrix& b)
{
spatial_matrix out;
for (int i=0; i < 6; ++i)
{
for (int j=0; j < 6; ++j)
{
for (int k=0; k < 6; ++k)
{
out.data[i][j] += a.data[i][k]*b.data[k][j];
}
}
}
return out;
}
inline CUDA_CALLABLE spatial_matrix transpose(const spatial_matrix& a)
{
spatial_matrix out;
for (int i=0; i < 6; i++)
for (int j=0; j < 6; j++)
out.data[i][j] = a.data[j][i];
return out;
}
inline CUDA_CALLABLE spatial_matrix outer(const spatial_vector& a, const spatial_vector& b)
{
spatial_matrix out;
for (int i=0; i < 6; i++)
for (int j=0; j < 6; j++)
out.data[i][j] = a[i]*b[j];
return out;
}
CUDA_CALLABLE void print(spatial_transform t);
CUDA_CALLABLE void print(spatial_matrix m);
inline CUDA_CALLABLE spatial_matrix spatial_adjoint(const mat33& R, const mat33& S)
{
spatial_matrix adT;
// T = [R 0]
// [skew(p)*R R]
// diagonal blocks
for (int i=0; i < 3; ++i)
{
for (int j=0; j < 3; ++j)
{
adT.data[i][j] = R.data[i][j];
adT.data[i+3][j+3] = R.data[i][j];
}
}
// lower off diagonal
for (int i=0; i < 3; ++i)
{
for (int j=0; j < 3; ++j)
{
adT.data[i+3][j] = S.data[i][j];
}
}
return adT;
}
inline CUDA_CALLABLE void adj_spatial_adjoint(const mat33& R, const mat33& S, mat33& adj_R, mat33& adj_S, const spatial_matrix& adj_ret)
{
// diagonal blocks
for (int i=0; i < 3; ++i)
{
for (int j=0; j < 3; ++j)
{
adj_R.data[i][j] += adj_ret.data[i][j];
adj_R.data[i][j] += adj_ret.data[i+3][j+3];
}
}
// lower off diagonal
for (int i=0; i < 3; ++i)
{
for (int j=0; j < 3; ++j)
{
adj_S.data[i][j] += adj_ret.data[i+3][j];
}
}
}
/*
// computes adj_t^-T*I*adj_t^-1 (tensor change of coordinates), Frank & Park, section 8.2.3, pg 290
inline CUDA_CALLABLE spatial_matrix spatial_transform_inertia(const spatial_transform& t, const spatial_matrix& I)
{
spatial_transform t_inv = spatial_transform_inverse(t);
float3 r1 = rotate(t_inv.q, float3(1.0, 0.0, 0.0));
float3 r2 = rotate(t_inv.q, float3(0.0, 1.0, 0.0));
float3 r3 = rotate(t_inv.q, float3(0.0, 0.0, 1.0));
mat33 R(r1, r2, r3);
mat33 S = mul(skew(t_inv.p), R);
spatial_matrix T = spatial_adjoint(R, S);
// first quadratic form, for derivation of the adjoint see https://people.maths.ox.ac.uk/gilesm/files/AD2008.pdf, section 2.3.2
return mul(mul(transpose(T), I), T);
}
*/
inline CUDA_CALLABLE void adj_add(const spatial_matrix& a, const spatial_matrix& b, spatial_matrix& adj_a, spatial_matrix& adj_b, const spatial_matrix& adj_ret)
{
adj_a += adj_ret;
adj_b += adj_ret;
}
inline CUDA_CALLABLE void adj_mul(const spatial_matrix& a, const spatial_vector& b, spatial_matrix& adj_a, spatial_vector& adj_b, const spatial_vector& adj_ret)
{
adj_a += outer(adj_ret, b);
adj_b += mul(transpose(a), adj_ret);
}
inline CUDA_CALLABLE void adj_mul(const spatial_matrix& a, const spatial_matrix& b, spatial_matrix& adj_a, spatial_matrix& adj_b, const spatial_matrix& adj_ret)
{
adj_a += mul(adj_ret, transpose(b));
adj_b += mul(transpose(a), adj_ret);
}
inline CUDA_CALLABLE void adj_transpose(const spatial_matrix& a, spatial_matrix& adj_a, const spatial_matrix& adj_ret)
{
adj_a += transpose(adj_ret);
}
inline CUDA_CALLABLE void adj_spatial_transform_inertia(
const spatial_transform& xform, const spatial_matrix& I,
const spatial_transform& adj_xform, const spatial_matrix& adj_I,
spatial_matrix& adj_ret)
{
//printf("todo, %s, %d\n", __FILE__, __LINE__);
}
inline void CUDA_CALLABLE adj_index(const spatial_matrix& m, int row, int col, spatial_matrix& adj_m, int& adj_row, int& adj_col, float adj_ret)
{
adj_m.data[row][col] += adj_ret;
}
#ifdef CUDA
inline __device__ void atomic_add(spatial_matrix* addr, const spatial_matrix& value)
{
for (int i=0; i < 6; ++i)
{
for (int j=0; j < 6; ++j)
{
atomicAdd(&addr->data[i][j], value.data[i][j]);
}
}
}
#endif
CUDA_CALLABLE inline int row_index(int stride, int i, int j)
{
return i*stride + j;
}
// builds spatial Jacobian J which is an (joint_count*6)x(dof_count) matrix
CUDA_CALLABLE inline void spatial_jacobian(
const spatial_vector* S,
const int* joint_parents,
const int* joint_qd_start,
int joint_start, // offset of the first joint for the articulation
int joint_count,
int J_start,
float* J)
{
const int articulation_dof_start = joint_qd_start[joint_start];
const int articulation_dof_end = joint_qd_start[joint_start + joint_count];
const int articulation_dof_count = articulation_dof_end-articulation_dof_start;
// shift output pointers
const int S_start = articulation_dof_start;
S += S_start;
J += J_start;
for (int i=0; i < joint_count; ++i)
{
const int row_start = i * 6;
int j = joint_start + i;
while (j != -1)
{
const int joint_dof_start = joint_qd_start[j];
const int joint_dof_end = joint_qd_start[j+1];
const int joint_dof_count = joint_dof_end-joint_dof_start;
// fill out each row of the Jacobian walking up the tree
//for (int col=dof_start; col < dof_end; ++col)
for (int dof=0; dof < joint_dof_count; ++dof)
{
const int col = (joint_dof_start-articulation_dof_start) + dof;
J[row_index(articulation_dof_count, row_start+0, col)] = S[col].w.x;
J[row_index(articulation_dof_count, row_start+1, col)] = S[col].w.y;
J[row_index(articulation_dof_count, row_start+2, col)] = S[col].w.z;
J[row_index(articulation_dof_count, row_start+3, col)] = S[col].v.x;
J[row_index(articulation_dof_count, row_start+4, col)] = S[col].v.y;
J[row_index(articulation_dof_count, row_start+5, col)] = S[col].v.z;
}
j = joint_parents[j];
}
}
}
CUDA_CALLABLE inline void adj_spatial_jacobian(
const spatial_vector* S,
const int* joint_parents,
const int* joint_qd_start,
const int joint_start,
const int joint_count,
const int J_start,
const float* J,
// adjs
spatial_vector* adj_S,
int* adj_joint_parents,
int* adj_joint_qd_start,
int& adj_joint_start,
int& adj_joint_count,
int& adj_J_start,
const float* adj_J)
{
const int articulation_dof_start = joint_qd_start[joint_start];
const int articulation_dof_end = joint_qd_start[joint_start + joint_count];
const int articulation_dof_count = articulation_dof_end-articulation_dof_start;
// shift output pointers
const int S_start = articulation_dof_start;
S += S_start;
J += J_start;
adj_S += S_start;
adj_J += J_start;
for (int i=0; i < joint_count; ++i)
{
const int row_start = i * 6;
int j = joint_start + i;
while (j != -1)
{
const int joint_dof_start = joint_qd_start[j];
const int joint_dof_end = joint_qd_start[j+1];
const int joint_dof_count = joint_dof_end-joint_dof_start;
// fill out each row of the Jacobian walking up the tree
//for (int col=dof_start; col < dof_end; ++col)
for (int dof=0; dof < joint_dof_count; ++dof)
{
const int col = (joint_dof_start-articulation_dof_start) + dof;
adj_S[col].w.x += adj_J[row_index(articulation_dof_count, row_start+0, col)];
adj_S[col].w.y += adj_J[row_index(articulation_dof_count, row_start+1, col)];
adj_S[col].w.z += adj_J[row_index(articulation_dof_count, row_start+2, col)];
adj_S[col].v.x += adj_J[row_index(articulation_dof_count, row_start+3, col)];
adj_S[col].v.y += adj_J[row_index(articulation_dof_count, row_start+4, col)];
adj_S[col].v.z += adj_J[row_index(articulation_dof_count, row_start+5, col)];
}
j = joint_parents[j];
}
}
}
CUDA_CALLABLE inline void spatial_mass(const spatial_matrix* I_s, int joint_start, int joint_count, int M_start, float* M)
{
const int stride = joint_count*6;
for (int l=0; l < joint_count; ++l)
{
for (int i=0; i < 6; ++i)
{
for (int j=0; j < 6; ++j)
{
M[M_start + row_index(stride, l*6 + i, l*6 + j)] = I_s[joint_start + l].data[i][j];
}
}
}
}
CUDA_CALLABLE inline void adj_spatial_mass(
const spatial_matrix* I_s,
const int joint_start,
const int joint_count,
const int M_start,
const float* M,
spatial_matrix* adj_I_s,
int& adj_joint_start,
int& adj_joint_count,
int& adj_M_start,
const float* adj_M)
{
const int stride = joint_count*6;
for (int l=0; l < joint_count; ++l)
{
for (int i=0; i < 6; ++i)
{
for (int j=0; j < 6; ++j)
{
adj_I_s[joint_start + l].data[i][j] += adj_M[M_start + row_index(stride, l*6 + i, l*6 + j)];
}
}
}
}
| 24,501 | C | 28.099762 | 198 | 0.594057 |
vstrozzi/FRL-SHAC-Extension/dflex/build/lib/dflex/mat22.h | #pragma once
//----------------------------------------------------------
// mat22
struct mat22
{
inline CUDA_CALLABLE mat22(float m00=0.0f, float m01=0.0f, float m10=0.0f, float m11=0.0f)
{
data[0][0] = m00;
data[1][0] = m10;
data[0][1] = m01;
data[1][1] = m11;
}
// row major storage assumed to be compatible with PyTorch
float data[2][2];
};
#ifdef CUDA
inline __device__ void atomic_add(mat22 * addr, mat22 value) {
// *addr += value;
atomicAdd(&((addr -> data)[0][0]), value.data[0][0]);
atomicAdd(&((addr -> data)[0][1]), value.data[0][1]);
atomicAdd(&((addr -> data)[1][0]), value.data[1][0]);
atomicAdd(&((addr -> data)[1][1]), value.data[1][1]);
}
#endif
inline CUDA_CALLABLE void adj_mat22(float m00, float m01, float m10, float m11, float& adj_m00, float& adj_m01, float& adj_m10, float& adj_m11, const mat22& adj_ret)
{
printf("todo\n");
}
inline CUDA_CALLABLE float index(const mat22& m, int row, int col)
{
return m.data[row][col];
}
inline CUDA_CALLABLE mat22 add(const mat22& a, const mat22& b)
{
mat22 t;
for (int i=0; i < 2; ++i)
{
for (int j=0; j < 2; ++j)
{
t.data[i][j] = a.data[i][j] + b.data[i][j];
}
}
return t;
}
inline CUDA_CALLABLE mat22 mul(const mat22& a, float b)
{
mat22 t;
for (int i=0; i < 2; ++i)
{
for (int j=0; j < 2; ++j)
{
t.data[i][j] = a.data[i][j]*b;
}
}
return t;
}
inline CUDA_CALLABLE mat22 mul(const mat22& a, const mat22& b)
{
mat22 t;
for (int i=0; i < 2; ++i)
{
for (int j=0; j < 2; ++j)
{
for (int k=0; k < 2; ++k)
{
t.data[i][j] += a.data[i][k]*b.data[k][j];
}
}
}
return t;
}
inline CUDA_CALLABLE mat22 transpose(const mat22& a)
{
mat22 t;
for (int i=0; i < 2; ++i)
{
for (int j=0; j < 2; ++j)
{
t.data[i][j] = a.data[j][i];
}
}
return t;
}
inline CUDA_CALLABLE float determinant(const mat22& m)
{
return m.data[0][0]*m.data[1][1] - m.data[1][0]*m.data[0][1];
}
inline void CUDA_CALLABLE adj_index(const mat22& m, int row, int col, mat22& adj_m, int& adj_row, int& adj_col, float adj_ret)
{
adj_m.data[row][col] += adj_ret;
}
inline CUDA_CALLABLE void adj_add(const mat22& a, const mat22& b, mat22& adj_a, mat22& adj_b, const mat22& adj_ret)
{
for (int i=0; i < 2; ++i)
{
for (int j=0; j < 2; ++j)
{
adj_a.data[i][j] = adj_ret.data[i][j];
adj_b.data[i][j] = adj_ret.data[i][j];
}
}
}
inline CUDA_CALLABLE void adj_mul(const mat22& a, const mat22& b, mat22& adj_a, mat22& adj_b, const mat22& adj_ret)
{
printf("todo\n");
}
inline CUDA_CALLABLE void adj_transpose(const mat22& a, mat22& adj_a, const mat22& adj_ret)
{
printf("todo\n");
}
inline CUDA_CALLABLE void adj_determinant(const mat22& m, mat22& adj_m, float adj_ret)
{
adj_m.data[0][0] += m.data[1][1]*adj_ret;
adj_m.data[1][1] += m.data[0][0]*adj_ret;
adj_m.data[0][1] -= m.data[1][0]*adj_ret;
adj_m.data[1][0] -= m.data[0][1]*adj_ret;
}
| 3,206 | C | 21.744681 | 165 | 0.515908 |
vstrozzi/FRL-SHAC-Extension/dflex/build/lib/dflex/vec2.h | #pragma once
struct float2
{
float x;
float y;
}; | 58 | C | 7.42857 | 13 | 0.586207 |
vstrozzi/FRL-SHAC-Extension/dflex/build/lib/dflex/util.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import timeit
import math
import numpy as np
import gc
import torch
import cProfile
log_output = ""
def log(s):
print(s)
global log_output
log_output = log_output + s + "\n"
# short hands
def length(a):
return np.linalg.norm(a)
def length_sq(a):
return np.dot(a, a)
# NumPy has no normalize() method..
def normalize(v):
norm = np.linalg.norm(v)
if norm == 0.0:
return v
return v / norm
def skew(v):
return np.array([[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]])
# math utils
def quat(i, j, k, w):
return np.array([i, j, k, w])
def quat_identity():
return np.array((0.0, 0.0, 0.0, 1.0))
def quat_inverse(q):
return np.array((-q[0], -q[1], -q[2], q[3]))
def quat_from_axis_angle(axis, angle):
v = np.array(axis)
half = angle * 0.5
w = math.cos(half)
sin_theta_over_two = math.sin(half)
v *= sin_theta_over_two
return np.array((v[0], v[1], v[2], w))
# rotate a vector
def quat_rotate(q, x):
x = np.array(x)
axis = np.array((q[0], q[1], q[2]))
return x * (2.0 * q[3] * q[3] - 1.0) + np.cross(axis, x) * q[3] * 2.0 + axis * np.dot(axis, x) * 2.0
# multiply two quats
def quat_multiply(a, b):
return np.array((a[3] * b[0] + b[3] * a[0] + a[1] * b[2] - b[1] * a[2],
a[3] * b[1] + b[3] * a[1] + a[2] * b[0] - b[2] * a[0],
a[3] * b[2] + b[3] * a[2] + a[0] * b[1] - b[0] * a[1],
a[3] * b[3] - a[0] * b[0] - a[1] * b[1] - a[2] * b[2]))
# convert to mat33
def quat_to_matrix(q):
c1 = quat_rotate(q, np.array((1.0, 0.0, 0.0)))
c2 = quat_rotate(q, np.array((0.0, 1.0, 0.0)))
c3 = quat_rotate(q, np.array((0.0, 0.0, 1.0)))
return np.array([c1, c2, c3]).T
def quat_rpy(roll, pitch, yaw):
cy = math.cos(yaw * 0.5)
sy = math.sin(yaw * 0.5)
cr = math.cos(roll * 0.5)
sr = math.sin(roll * 0.5)
cp = math.cos(pitch * 0.5)
sp = math.sin(pitch * 0.5)
w = (cy * cr * cp + sy * sr * sp)
x = (cy * sr * cp - sy * cr * sp)
y = (cy * cr * sp + sy * sr * cp)
z = (sy * cr * cp - cy * sr * sp)
return (x, y, z, w)
def quat_from_matrix(m):
tr = m[0, 0] + m[1, 1] + m[2, 2]
h = 0.0
if(tr >= 0.0):
h = math.sqrt(tr + 1.0)
w = 0.5 * h
h = 0.5 / h
x = (m[2, 1] - m[1, 2]) * h
y = (m[0, 2] - m[2, 0]) * h
z = (m[1, 0] - m[0, 1]) * h
else:
i = 0;
if(m[1, 1] > m[0, 0]):
i = 1;
if(m[2, 2] > m[i, i]):
i = 2;
if (i == 0):
h = math.sqrt((m[0, 0] - (m[1, 1] + m[2, 2])) + 1.0)
x = 0.5 * h
h = 0.5 / h
y = (m[0, 1] + m[1, 0]) * h
z = (m[2, 0] + m[0, 2]) * h
w = (m[2, 1] - m[1, 2]) * h
elif (i == 1):
h = sqrtf((m[1, 1] - (m[2, 2] + m[0, 0])) + 1.0)
y = 0.5 * h
h = 0.5 / h
z = (m[1, 2] + m[2, 1]) * h
x = (m[0, 1] + m[1, 0]) * h
w = (m[0, 2] - m[2, 0]) * h
elif (i == 2):
h = sqrtf((m[2, 2] - (m[0, 0] + m[1, 1])) + 1.0)
z = 0.5 * h
h = 0.5 / h
x = (m[2, 0] + m[0, 2]) * h
y = (m[1, 2] + m[2, 1]) * h
w = (m[1, 0] - m[0, 1]) * h
return normalize(quat(x, y, z, w))
# rigid body transform
def transform(x, r):
return (np.array(x), np.array(r))
def transform_identity():
return (np.array((0.0, 0.0, 0.0)), quat_identity())
# se(3) -> SE(3), Park & Lynch pg. 105, screw in [w, v] normalized form
def transform_exp(s, angle):
w = np.array(s[0:3])
v = np.array(s[3:6])
if (length(w) < 1.0):
r = quat_identity()
else:
r = quat_from_axis_angle(w, angle)
t = v * angle + (1.0 - math.cos(angle)) * np.cross(w, v) + (angle - math.sin(angle)) * np.cross(w, np.cross(w, v))
return (t, r)
def transform_inverse(t):
q_inv = quat_inverse(t[1])
return (-quat_rotate(q_inv, t[0]), q_inv)
def transform_vector(t, v):
return quat_rotate(t[1], v)
def transform_point(t, p):
return np.array(t[0]) + quat_rotate(t[1], p)
def transform_multiply(t, u):
return (quat_rotate(t[1], u[0]) + t[0], quat_multiply(t[1], u[1]))
# flatten an array of transforms (p,q) format to a 7-vector
def transform_flatten(t):
return np.array([*t[0], *t[1]])
# expand a 7-vec to a tuple of arrays
def transform_expand(t):
return (np.array(t[0:3]), np.array(t[3:7]))
# convert array of transforms to a array of 7-vecs
def transform_flatten_list(xforms):
exp = lambda t: transform_flatten(t)
return list(map(exp, xforms))
def transform_expand_list(xforms):
exp = lambda t: transform_expand(t)
return list(map(exp, xforms))
def transform_inertia(m, I, p, q):
R = quat_to_matrix(q)
# Steiner's theorem
return R * I * R.T + m * (np.dot(p, p) * np.eye(3) - np.outer(p, p))
# spatial operators
# AdT
def spatial_adjoint(t):
R = quat_to_matrix(t[1])
w = skew(t[0])
A = np.zeros((6, 6))
A[0:3, 0:3] = R
A[3:6, 0:3] = np.dot(w, R)
A[3:6, 3:6] = R
return A
# (AdT)^-T
def spatial_adjoint_dual(t):
R = quat_to_matrix(t[1])
w = skew(t[0])
A = np.zeros((6, 6))
A[0:3, 0:3] = R
A[0:3, 3:6] = np.dot(w, R)
A[3:6, 3:6] = R
return A
# AdT*s
def transform_twist(t_ab, s_b):
return np.dot(spatial_adjoint(t_ab), s_b)
# AdT^{-T}*s
def transform_wrench(t_ab, f_b):
return np.dot(spatial_adjoint_dual(t_ab), f_b)
# transform spatial inertia (6x6) in b frame to a frame
def transform_spatial_inertia(t_ab, I_b):
t_ba = transform_inverse(t_ab)
# todo: write specialized method
I_a = np.dot(np.dot(spatial_adjoint(t_ba).T, I_b), spatial_adjoint(t_ba))
return I_a
def translate_twist(p_ab, s_b):
w = s_b[0:3]
v = np.cross(p_ab, s_b[0:3]) + s_b[3:6]
return np.array((*w, *v))
def translate_wrench(p_ab, s_b):
w = s_b[0:3] + np.cross(p_ab, s_b[3:6])
v = s_b[3:6]
return np.array((*w, *v))
def spatial_vector(v=(0.0, 0.0, 0.0, 0.0, 0.0, 0.0)):
return np.array(v)
# ad_V pg. 289 L&P, pg. 25 Featherstone
def spatial_cross(a, b):
w = np.cross(a[0:3], b[0:3])
v = np.cross(a[3:6], b[0:3]) + np.cross(a[0:3], b[3:6])
return np.array((*w, *v))
# ad_V^T pg. 290 L&P, pg. 25 Featurestone, note this does not includes the sign flip in the definition
def spatial_cross_dual(a, b):
w = np.cross(a[0:3], b[0:3]) + np.cross(a[3:6], b[3:6])
v = np.cross(a[0:3], b[3:6])
return np.array((*w, *v))
def spatial_dot(a, b):
return np.dot(a, b)
def spatial_outer(a, b):
return np.outer(a, b)
def spatial_matrix():
return np.zeros((6, 6))
def spatial_matrix_from_inertia(I, m):
G = spatial_matrix()
G[0:3, 0:3] = I
G[3, 3] = m
G[4, 4] = m
G[5, 5] = m
return G
# solves x = I^(-1)b
def spatial_solve(I, b):
return np.dot(np.linalg.inv(I), b)
def rpy2quat(roll, pitch, yaw):
cy = math.cos(yaw * 0.5)
sy = math.sin(yaw * 0.5)
cr = math.cos(roll * 0.5)
sr = math.sin(roll * 0.5)
cp = math.cos(pitch * 0.5)
sp = math.sin(pitch * 0.5)
w = cy * cr * cp + sy * sr * sp
x = cy * sr * cp - sy * cr * sp
y = cy * cr * sp + sy * sr * cp
z = sy * cr * cp - cy * sr * sp
return (x, y, z, w)
# helper to retrive body angular velocity from a twist v_s in se(3)
def get_body_angular_velocity(v_s):
return v_s[0:3]
# helper to compute velocity of a point p on a body given it's spatial twist v_s
def get_body_linear_velocity(v_s, p):
dpdt = v_s[3:6] + torch.cross(v_s[0:3], p)
return dpdt
# helper to build a body twist given the angular and linear velocity of
# the center of mass specified in the world frame, returns the body
# twist with respect to the origin (v_s)
def get_body_twist(w_m, v_m, p_m):
lin = v_m + torch.cross(p_m, w_m)
return (*w_m, *lin)
# timer utils
class ScopedTimer:
indent = -1
enabled = True
def __init__(self, name, active=True, detailed=False):
self.name = name
self.active = active and self.enabled
self.detailed = detailed
def __enter__(self):
if (self.active):
self.start = timeit.default_timer()
ScopedTimer.indent += 1
if (self.detailed):
self.cp = cProfile.Profile()
self.cp.clear()
self.cp.enable()
def __exit__(self, exc_type, exc_value, traceback):
if (self.detailed):
self.cp.disable()
self.cp.print_stats(sort='tottime')
if (self.active):
elapsed = (timeit.default_timer() - self.start) * 1000.0
indent = ""
for i in range(ScopedTimer.indent):
indent += "\t"
log("{}{} took {:.2f} ms".format(indent, self.name, elapsed))
ScopedTimer.indent -= 1
# code snippet for invoking cProfile
# cp = cProfile.Profile()
# cp.enable()
# for i in range(1000):
# self.state = self.integrator.forward(self.model, self.state, self.sim_dt)
# cp.disable()
# cp.print_stats(sort='tottime')
# exit(0)
# represent an edge between v0, v1 with connected faces f0, f1, and opposite vertex o0, and o1
# winding is such that first tri can be reconstructed as {v0, v1, o0}, and second tri as { v1, v0, o1 }
class MeshEdge:
def __init__(self, v0, v1, o0, o1, f0, f1):
self.v0 = v0 # vertex 0
self.v1 = v1 # vertex 1
self.o0 = o0 # opposite vertex 1
self.o1 = o1 # opposite vertex 2
self.f0 = f0 # index of tri1
self.f1 = f1 # index of tri2
class MeshAdjacency:
def __init__(self, indices, num_tris):
# map edges (v0, v1) to faces (f0, f1)
self.edges = {}
self.indices = indices
for index, tri in enumerate(indices):
self.add_edge(tri[0], tri[1], tri[2], index)
self.add_edge(tri[1], tri[2], tri[0], index)
self.add_edge(tri[2], tri[0], tri[1], index)
def add_edge(self, i0, i1, o, f): # index1, index2, index3, index of triangle
key = (min(i0, i1), max(i0, i1))
edge = None
if key in self.edges:
edge = self.edges[key]
if (edge.f1 != -1):
print("Detected non-manifold edge")
return
else:
# update other side of the edge
edge.o1 = o
edge.f1 = f
else:
# create new edge with opposite yet to be filled
edge = MeshEdge(i0, i1, o, -1, f, -1)
self.edges[key] = edge
def opposite_vertex(self, edge):
pass
def mem_report():
'''Report the memory usage of the tensor.storage in pytorch
Both on CPUs and GPUs are reported'''
def _mem_report(tensors, mem_type):
'''Print the selected tensors of type
There are two major storage types in our major concern:
- GPU: tensors transferred to CUDA devices
- CPU: tensors remaining on the system memory (usually unimportant)
Args:
- tensors: the tensors of specified type
- mem_type: 'CPU' or 'GPU' in current implementation '''
total_numel = 0
total_mem = 0
visited_data = []
for tensor in tensors:
if tensor.is_sparse:
continue
# a data_ptr indicates a memory block allocated
data_ptr = tensor.storage().data_ptr()
if data_ptr in visited_data:
continue
visited_data.append(data_ptr)
numel = tensor.storage().size()
total_numel += numel
element_size = tensor.storage().element_size()
mem = numel*element_size /1024/1024 # 32bit=4Byte, MByte
total_mem += mem
element_type = type(tensor).__name__
size = tuple(tensor.size())
# print('%s\t\t%s\t\t%.2f' % (
# element_type,
# size,
# mem) )
print('Type: %s Total Tensors: %d \tUsed Memory Space: %.2f MBytes' % (mem_type, total_numel, total_mem) )
gc.collect()
LEN = 65
objects = gc.get_objects()
#print('%s\t%s\t\t\t%s' %('Element type', 'Size', 'Used MEM(MBytes)') )
tensors = [obj for obj in objects if torch.is_tensor(obj)]
cuda_tensors = [t for t in tensors if t.is_cuda]
host_tensors = [t for t in tensors if not t.is_cuda]
_mem_report(cuda_tensors, 'GPU')
_mem_report(host_tensors, 'CPU')
print('='*LEN)
| 13,134 | Python | 23.056777 | 118 | 0.522994 |
vstrozzi/FRL-SHAC-Extension/dflex/build/lib/dflex/adjoint.h | #pragma once
#include <cmath>
#include <stdio.h>
#ifdef CPU
#define CUDA_CALLABLE
#define __device__
#define __host__
#define __constant__
#elif defined(CUDA)
#define CUDA_CALLABLE __device__
#include <cuda.h>
#include <cuda_runtime_api.h>
#define check_cuda(code) { check_cuda_impl(code, __FILE__, __LINE__); }
void check_cuda_impl(cudaError_t code, const char* file, int line)
{
if (code != cudaSuccess)
{
printf("CUDA Error: %s %s %d\n", cudaGetErrorString(code), file, line);
}
}
void print_device()
{
int currentDevice;
cudaError_t err = cudaGetDevice(¤tDevice);
cudaDeviceProp props;
err = cudaGetDeviceProperties(&props, currentDevice);
if (err != cudaSuccess)
printf("CUDA error: %d\n", err);
else
printf("%s\n", props.name);
}
#endif
#ifdef _WIN32
#define __restrict__ __restrict
#endif
#define FP_CHECK 0
namespace df
{
template <typename T>
CUDA_CALLABLE float cast_float(T x) { return (float)(x); }
template <typename T>
CUDA_CALLABLE int cast_int(T x) { return (int)(x); }
template <typename T>
CUDA_CALLABLE void adj_cast_float(T x, T& adj_x, float adj_ret) { adj_x += adj_ret; }
template <typename T>
CUDA_CALLABLE void adj_cast_int(T x, T& adj_x, int adj_ret) { adj_x += adj_ret; }
// avoid namespacing of float type for casting to float type, this is to avoid wp::float(x), which is not valid in C++
#define float(x) cast_float(x)
#define adj_float(x, adj_x, adj_ret) adj_cast_float(x, adj_x, adj_ret)
#define int(x) cast_int(x)
#define adj_int(x, adj_x, adj_ret) adj_cast_int(x, adj_x, adj_ret)
#define kEps 0.0f
// basic ops for integer types
inline CUDA_CALLABLE int mul(int a, int b) { return a*b; }
inline CUDA_CALLABLE int div(int a, int b) { return a/b; }
inline CUDA_CALLABLE int add(int a, int b) { return a+b; }
inline CUDA_CALLABLE int sub(int a, int b) { return a-b; }
inline CUDA_CALLABLE int mod(int a, int b) { return a % b; }
inline CUDA_CALLABLE void adj_mul(int a, int b, int& adj_a, int& adj_b, int adj_ret) { }
inline CUDA_CALLABLE void adj_div(int a, int b, int& adj_a, int& adj_b, int adj_ret) { }
inline CUDA_CALLABLE void adj_add(int a, int b, int& adj_a, int& adj_b, int adj_ret) { }
inline CUDA_CALLABLE void adj_sub(int a, int b, int& adj_a, int& adj_b, int adj_ret) { }
inline CUDA_CALLABLE void adj_mod(int a, int b, int& adj_a, int& adj_b, int adj_ret) { }
// basic ops for float types
inline CUDA_CALLABLE float mul(float a, float b) { return a*b; }
inline CUDA_CALLABLE float div(float a, float b) { return a/b; }
inline CUDA_CALLABLE float add(float a, float b) { return a+b; }
inline CUDA_CALLABLE float sub(float a, float b) { return a-b; }
inline CUDA_CALLABLE float min(float a, float b) { return a<b?a:b; }
inline CUDA_CALLABLE float max(float a, float b) { return a>b?a:b; }
inline CUDA_CALLABLE float leaky_min(float a, float b, float r) { return min(a, b); }
inline CUDA_CALLABLE float leaky_max(float a, float b, float r) { return max(a, b); }
inline CUDA_CALLABLE float clamp(float x, float a, float b) { return min(max(a, x), b); }
inline CUDA_CALLABLE float step(float x) { return x < 0.0 ? 1.0 : 0.0; }
inline CUDA_CALLABLE float sign(float x) { return x < 0.0 ? -1.0 : 1.0; }
inline CUDA_CALLABLE float abs(float x) { return fabsf(x); }
inline CUDA_CALLABLE float nonzero(float x) { return x == 0.0 ? 0.0 : 1.0; }
inline CUDA_CALLABLE float acos(float x) { return std::acos(std::min(std::max(x, -1.0f), 1.0f)); }
inline CUDA_CALLABLE float sin(float x) { return std::sin(x); }
inline CUDA_CALLABLE float cos(float x) { return std::cos(x); }
inline CUDA_CALLABLE float sqrt(float x) { return std::sqrt(x); }
inline CUDA_CALLABLE void adj_mul(float a, float b, float& adj_a, float& adj_b, float adj_ret) { adj_a += b*adj_ret; adj_b += a*adj_ret; }
inline CUDA_CALLABLE void adj_div(float a, float b, float& adj_a, float& adj_b, float adj_ret) { adj_a += adj_ret/b; adj_b -= adj_ret*(a/b)/b; }
inline CUDA_CALLABLE void adj_add(float a, float b, float& adj_a, float& adj_b, float adj_ret) { adj_a += adj_ret; adj_b += adj_ret; }
inline CUDA_CALLABLE void adj_sub(float a, float b, float& adj_a, float& adj_b, float adj_ret) { adj_a += adj_ret; adj_b -= adj_ret; }
// inline CUDA_CALLABLE bool lt(float a, float b) { return a < b; }
// inline CUDA_CALLABLE bool gt(float a, float b) { return a > b; }
// inline CUDA_CALLABLE bool lte(float a, float b) { return a <= b; }
// inline CUDA_CALLABLE bool gte(float a, float b) { return a >= b; }
// inline CUDA_CALLABLE bool eq(float a, float b) { return a == b; }
// inline CUDA_CALLABLE bool neq(float a, float b) { return a != b; }
// inline CUDA_CALLABLE bool adj_lt(float a, float b, float & adj_a, float & adj_b, bool & adj_ret) { }
// inline CUDA_CALLABLE bool adj_gt(float a, float b, float & adj_a, float & adj_b, bool & adj_ret) { }
// inline CUDA_CALLABLE bool adj_lte(float a, float b, float & adj_a, float & adj_b, bool & adj_ret) { }
// inline CUDA_CALLABLE bool adj_gte(float a, float b, float & adj_a, float & adj_b, bool & adj_ret) { }
// inline CUDA_CALLABLE bool adj_eq(float a, float b, float & adj_a, float & adj_b, bool & adj_ret) { }
// inline CUDA_CALLABLE bool adj_neq(float a, float b, float & adj_a, float & adj_b, bool & adj_ret) { }
inline CUDA_CALLABLE void adj_min(float a, float b, float& adj_a, float& adj_b, float adj_ret)
{
if (a < b)
adj_a += adj_ret;
else
adj_b += adj_ret;
}
inline CUDA_CALLABLE void adj_max(float a, float b, float& adj_a, float& adj_b, float adj_ret)
{
if (a > b)
adj_a += adj_ret;
else
adj_b += adj_ret;
}
inline CUDA_CALLABLE void adj_leaky_min(float a, float b, float r, float& adj_a, float& adj_b, float& adj_r, float adj_ret)
{
if (a < b)
adj_a += adj_ret;
else
{
adj_a += r*adj_ret;
adj_b += adj_ret;
}
}
inline CUDA_CALLABLE void adj_leaky_max(float a, float b, float r, float& adj_a, float& adj_b, float& adj_r, float adj_ret)
{
if (a > b)
adj_a += adj_ret;
else
{
adj_a += r*adj_ret;
adj_b += adj_ret;
}
}
inline CUDA_CALLABLE void adj_clamp(float x, float a, float b, float& adj_x, float& adj_a, float& adj_b, float adj_ret)
{
if (x < a)
adj_a += adj_ret;
else if (x > b)
adj_b += adj_ret;
else
adj_x += adj_ret;
}
inline CUDA_CALLABLE void adj_step(float x, float& adj_x, float adj_ret)
{
// nop
}
inline CUDA_CALLABLE void adj_nonzero(float x, float& adj_x, float adj_ret)
{
// nop
}
inline CUDA_CALLABLE void adj_sign(float x, float& adj_x, float adj_ret)
{
// nop
}
inline CUDA_CALLABLE void adj_abs(float x, float& adj_x, float adj_ret)
{
if (x < 0.0)
adj_x -= adj_ret;
else
adj_x += adj_ret;
}
inline CUDA_CALLABLE void adj_acos(float x, float& adj_x, float adj_ret)
{
float d = sqrt(1.0-x*x);
if (d > 0.0)
adj_x -= (1.0/d)*adj_ret;
}
inline CUDA_CALLABLE void adj_sin(float x, float& adj_x, float adj_ret)
{
adj_x += std::cos(x)*adj_ret;
}
inline CUDA_CALLABLE void adj_cos(float x, float& adj_x, float adj_ret)
{
adj_x -= std::sin(x)*adj_ret;
}
inline CUDA_CALLABLE void adj_sqrt(float x, float& adj_x, float adj_ret)
{
adj_x += 0.5f*(1.0/std::sqrt(x))*adj_ret;
}
template <typename T>
CUDA_CALLABLE inline T select(bool cond, const T& a, const T& b) { return cond?b:a; }
template <typename T>
CUDA_CALLABLE inline void adj_select(bool cond, const T& a, const T& b, bool& adj_cond, T& adj_a, T& adj_b, const T& adj_ret)
{
if (cond)
adj_b += adj_ret;
else
adj_a += adj_ret;
}
// some helpful operator overloads (just for C++ use, these are not adjointed)
template <typename T>
CUDA_CALLABLE T& operator += (T& a, const T& b) { a = add(a, b); return a; }
template <typename T>
CUDA_CALLABLE T& operator -= (T& a, const T& b) { a = sub(a, b); return a; }
template <typename T>
CUDA_CALLABLE T operator*(const T& a, float s) { return mul(a, s); }
template <typename T>
CUDA_CALLABLE T operator/(const T& a, float s) { return div(a, s); }
template <typename T>
CUDA_CALLABLE T operator+(const T& a, const T& b) { return add(a, b); }
template <typename T>
CUDA_CALLABLE T operator-(const T& a, const T& b) { return sub(a, b); }
// for single thread CPU only
static int s_threadIdx;
inline CUDA_CALLABLE int tid()
{
#ifdef CPU
return s_threadIdx;
#elif defined(CUDA)
return blockDim.x * blockIdx.x + threadIdx.x;
#endif
}
#include "vec2.h"
#include "vec3.h"
#include "mat22.h"
#include "mat33.h"
#include "matnn.h"
#include "quat.h"
#include "spatial.h"
//--------------
template<typename T>
inline CUDA_CALLABLE T load(T* buf, int index)
{
assert(buf);
return buf[index];
}
template<typename T>
inline CUDA_CALLABLE void store(T* buf, int index, T value)
{
// allow NULL buffers for case where gradients are not required
if (buf)
{
buf[index] = value;
}
}
#ifdef CUDA
template<typename T>
inline __device__ void atomic_add(T* buf, T value)
{
atomicAdd(buf, value);
}
#endif
template<typename T>
inline __device__ void atomic_add(T* buf, int index, T value)
{
if (buf)
{
// CPU mode is sequential so just add
#ifdef CPU
buf[index] += value;
#elif defined(CUDA)
atomic_add(buf + index, value);
#endif
}
}
template<typename T>
inline __device__ void atomic_sub(T* buf, int index, T value)
{
if (buf)
{
// CPU mode is sequential so just add
#ifdef CPU
buf[index] -= value;
#elif defined(CUDA)
atomic_add(buf + index, -value);
#endif
}
}
template <typename T>
inline CUDA_CALLABLE void adj_load(T* buf, int index, T* adj_buf, int& adj_index, const T& adj_output)
{
// allow NULL buffers for case where gradients are not required
if (adj_buf) {
#ifdef CPU
adj_buf[index] += adj_output; // does not need to be atomic if single-threaded
#elif defined(CUDA)
atomic_add(adj_buf, index, adj_output);
#endif
}
}
template <typename T>
inline CUDA_CALLABLE void adj_store(T* buf, int index, T value, T* adj_buf, int& adj_index, T& adj_value)
{
adj_value += adj_buf[index]; // doesn't need to be atomic because it's used to load from a buffer onto the stack
}
template<typename T>
inline CUDA_CALLABLE void adj_atomic_add(T* buf, int index, T value, T* adj_buf, int& adj_index, T& adj_value)
{
if (adj_buf) { // cannot be atomic because used locally
adj_value += adj_buf[index];
}
}
template<typename T>
inline CUDA_CALLABLE void adj_atomic_sub(T* buf, int index, T value, T* adj_buf, int& adj_index, T& adj_value)
{
if (adj_buf) { // cannot be atomic because used locally
adj_value -= adj_buf[index];
}
}
//-------------------------
// Texture methods
inline CUDA_CALLABLE float sdf_sample(float3 x)
{
return 0.0;
}
inline CUDA_CALLABLE float3 sdf_grad(float3 x)
{
return float3();
}
inline CUDA_CALLABLE void adj_sdf_sample(float3 x, float3& adj_x, float adj_ret)
{
}
inline CUDA_CALLABLE void adj_sdf_grad(float3 x, float3& adj_x, float3& adj_ret)
{
}
inline CUDA_CALLABLE void print(int i)
{
printf("%d\n", i);
}
inline CUDA_CALLABLE void print(float i)
{
printf("%f\n", i);
}
inline CUDA_CALLABLE void print(float3 i)
{
printf("%f %f %f\n", i.x, i.y, i.z);
}
inline CUDA_CALLABLE void print(quat i)
{
printf("%f %f %f %f\n", i.x, i.y, i.z, i.w);
}
inline CUDA_CALLABLE void print(mat22 m)
{
printf("%f %f\n%f %f\n", m.data[0][0], m.data[0][1],
m.data[1][0], m.data[1][1]);
}
inline CUDA_CALLABLE void print(mat33 m)
{
printf("%f %f %f\n%f %f %f\n%f %f %f\n", m.data[0][0], m.data[0][1], m.data[0][2],
m.data[1][0], m.data[1][1], m.data[1][2],
m.data[2][0], m.data[2][1], m.data[2][2]);
}
inline CUDA_CALLABLE void print(spatial_transform t)
{
printf("(%f %f %f) (%f %f %f %f)\n", t.p.x, t.p.y, t.p.z, t.q.x, t.q.y, t.q.z, t.q.w);
}
inline CUDA_CALLABLE void print(spatial_vector v)
{
printf("(%f %f %f) (%f %f %f)\n", v.w.x, v.w.y, v.w.z, v.v.x, v.v.y, v.v.z);
}
inline CUDA_CALLABLE void print(spatial_matrix m)
{
printf("%f %f %f %f %f %f\n"
"%f %f %f %f %f %f\n"
"%f %f %f %f %f %f\n"
"%f %f %f %f %f %f\n"
"%f %f %f %f %f %f\n"
"%f %f %f %f %f %f\n",
m.data[0][0], m.data[0][1], m.data[0][2], m.data[0][3], m.data[0][4], m.data[0][5],
m.data[1][0], m.data[1][1], m.data[1][2], m.data[1][3], m.data[1][4], m.data[1][5],
m.data[2][0], m.data[2][1], m.data[2][2], m.data[2][3], m.data[2][4], m.data[2][5],
m.data[3][0], m.data[3][1], m.data[3][2], m.data[3][3], m.data[3][4], m.data[3][5],
m.data[4][0], m.data[4][1], m.data[4][2], m.data[4][3], m.data[4][4], m.data[4][5],
m.data[5][0], m.data[5][1], m.data[5][2], m.data[5][3], m.data[5][4], m.data[5][5]);
}
inline CUDA_CALLABLE void adj_print(int i, int& adj_i) { printf("%d adj: %d\n", i, adj_i); }
inline CUDA_CALLABLE void adj_print(float i, float& adj_i) { printf("%f adj: %f\n", i, adj_i); }
inline CUDA_CALLABLE void adj_print(float3 i, float3& adj_i) { printf("%f %f %f adj: %f %f %f \n", i.x, i.y, i.z, adj_i.x, adj_i.y, adj_i.z); }
inline CUDA_CALLABLE void adj_print(quat i, quat& adj_i) { }
inline CUDA_CALLABLE void adj_print(mat22 m, mat22& adj_m) { }
inline CUDA_CALLABLE void adj_print(mat33 m, mat33& adj_m) { }
inline CUDA_CALLABLE void adj_print(spatial_transform t, spatial_transform& adj_t) {}
inline CUDA_CALLABLE void adj_print(spatial_vector t, spatial_vector& adj_t) {}
inline CUDA_CALLABLE void adj_print(spatial_matrix t, spatial_matrix& adj_t) {}
} // namespace df | 13,946 | C | 29.05819 | 144 | 0.608992 |
vstrozzi/FRL-SHAC-Extension/dflex/build/lib/dflex/config.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import os
no_grad = False # disable adjoint tracking
check_grad = False # will perform numeric gradient checking after each launch
verify_fp = False # verify inputs and outputs are finite after each launch
| 650 | Python | 49.076919 | 82 | 0.783077 |
vstrozzi/FRL-SHAC-Extension/dflex/build/lib/dflex/quat.h | #pragma once
struct quat
{
// imaginary part
float x;
float y;
float z;
// real part
float w;
inline CUDA_CALLABLE quat(float x=0.0f, float y=0.0f, float z=0.0f, float w=0.0) : x(x), y(y), z(z), w(w) {}
explicit inline CUDA_CALLABLE quat(const float3& v, float w=0.0f) : x(v.x), y(v.y), z(v.z), w(w) {}
};
#ifdef CUDA
inline __device__ void atomic_add(quat * addr, quat value) {
atomicAdd(&(addr -> x), value.x);
atomicAdd(&(addr -> y), value.y);
atomicAdd(&(addr -> z), value.z);
atomicAdd(&(addr -> w), value.w);
}
#endif
inline CUDA_CALLABLE void adj_quat(float x, float y, float z, float w, float& adj_x, float& adj_y, float& adj_z, float& adj_w, quat adj_ret)
{
adj_x += adj_ret.x;
adj_y += adj_ret.y;
adj_z += adj_ret.z;
adj_w += adj_ret.w;
}
inline CUDA_CALLABLE void adj_quat(const float3& v, float w, float3& adj_v, float& adj_w, quat adj_ret)
{
adj_v.x += adj_ret.x;
adj_v.y += adj_ret.y;
adj_v.z += adj_ret.z;
adj_w += adj_ret.w;
}
// foward methods
inline CUDA_CALLABLE quat quat_from_axis_angle(const float3& axis, float angle)
{
float half = angle*0.5f;
float w = cosf(half);
float sin_theta_over_two = sinf(half);
float3 v = axis*sin_theta_over_two;
return quat(v.x, v.y, v.z, w);
}
inline CUDA_CALLABLE quat quat_identity()
{
return quat(0.0f, 0.0f, 0.0f, 1.0f);
}
inline CUDA_CALLABLE float dot(const quat& a, const quat& b)
{
return a.x*b.x + a.y*b.y + a.z*b.z + a.w*b.w;
}
inline CUDA_CALLABLE float length(const quat& q)
{
return sqrtf(dot(q, q));
}
inline CUDA_CALLABLE quat normalize(const quat& q)
{
float l = length(q);
if (l > kEps)
{
float inv_l = 1.0f/l;
return quat(q.x*inv_l, q.y*inv_l, q.z*inv_l, q.w*inv_l);
}
else
{
return quat(0.0f, 0.0f, 0.0f, 1.0f);
}
}
inline CUDA_CALLABLE quat inverse(const quat& q)
{
return quat(-q.x, -q.y, -q.z, q.w);
}
inline CUDA_CALLABLE quat add(const quat& a, const quat& b)
{
return quat(a.x+b.x, a.y+b.y, a.z+b.z, a.w+b.w);
}
inline CUDA_CALLABLE quat sub(const quat& a, const quat& b)
{
return quat(a.x-b.x, a.y-b.y, a.z-b.z, a.w-b.w);}
inline CUDA_CALLABLE quat mul(const quat& a, const quat& b)
{
return quat(a.w*b.x + b.w*a.x + a.y*b.z - b.y*a.z,
a.w*b.y + b.w*a.y + a.z*b.x - b.z*a.x,
a.w*b.z + b.w*a.z + a.x*b.y - b.x*a.y,
a.w*b.w - a.x*b.x - a.y*b.y - a.z*b.z);
}
inline CUDA_CALLABLE quat mul(const quat& a, float s)
{
return quat(a.x*s, a.y*s, a.z*s, a.w*s);
}
inline CUDA_CALLABLE float3 rotate(const quat& q, const float3& x)
{
return x*(2.0f*q.w*q.w-1.0f) + cross(float3(&q.x), x)*q.w*2.0f + float3(&q.x)*dot(float3(&q.x), x)*2.0f;
}
inline CUDA_CALLABLE float3 rotate_inv(const quat& q, const float3& x)
{
return x*(2.0f*q.w*q.w-1.0f) - cross(float3(&q.x), x)*q.w*2.0f + float3(&q.x)*dot(float3(&q.x), x)*2.0f;
}
inline CUDA_CALLABLE float index(const quat& a, int idx)
{
#if FP_CHECK
if (idx < 0 || idx > 3)
{
printf("quat index %d out of bounds at %s %d", idx, __FILE__, __LINE__);
exit(1);
}
#endif
return (&a.x)[idx];
}
inline CUDA_CALLABLE void adj_index(const quat& a, int idx, quat& adj_a, int & adj_idx, float & adj_ret)
{
#if FP_CHECK
if (idx < 0 || idx > 3)
{
printf("quat index %d out of bounds at %s %d", idx, __FILE__, __LINE__);
exit(1);
}
#endif
(&adj_a.x)[idx] += adj_ret;
}
// backward methods
inline CUDA_CALLABLE void adj_quat_from_axis_angle(const float3& axis, float angle, float3& adj_axis, float& adj_angle, const quat& adj_ret)
{
float3 v = float3(adj_ret.x, adj_ret.y, adj_ret.z);
float s = sinf(angle*0.5f);
float c = cosf(angle*0.5f);
quat dqda = quat(axis.x*c, axis.y*c, axis.z*c, -s)*0.5f;
adj_axis += v*s;
adj_angle += dot(dqda, adj_ret);
}
inline CUDA_CALLABLE void adj_quat_identity(const quat& adj_ret)
{
// nop
}
inline CUDA_CALLABLE void adj_dot(const quat& a, const quat& b, quat& adj_a, quat& adj_b, const float adj_ret)
{
adj_a += b*adj_ret;
adj_b += a*adj_ret;
}
inline CUDA_CALLABLE void adj_length(const quat& a, quat& adj_a, const float adj_ret)
{
adj_a += normalize(a)*adj_ret;
}
inline CUDA_CALLABLE void adj_normalize(const quat& q, quat& adj_q, const quat& adj_ret)
{
float l = length(q);
if (l > kEps)
{
float l_inv = 1.0f/l;
adj_q += adj_ret*l_inv - q*(l_inv*l_inv*l_inv*dot(q, adj_ret));
}
}
inline CUDA_CALLABLE void adj_inverse(const quat& q, quat& adj_q, const quat& adj_ret)
{
adj_q.x -= adj_ret.x;
adj_q.y -= adj_ret.y;
adj_q.z -= adj_ret.z;
adj_q.w += adj_ret.w;
}
// inline void adj_normalize(const quat& a, quat& adj_a, const quat& adj_ret)
// {
// float d = length(a);
// if (d > kEps)
// {
// float invd = 1.0f/d;
// quat ahat = normalize(a);
// adj_a += (adj_ret - ahat*(dot(ahat, adj_ret))*invd);
// //if (!isfinite(adj_a))
// // printf("%s:%d - adj_normalize((%f %f %f), (%f %f %f), (%f, %f, %f))\n", __FILE__, __LINE__, a.x, a.y, a.z, adj_a.x, adj_a.y, adj_a.z, adj_ret.x, adj_ret.y, adj_ret.z);
// }
// }
inline CUDA_CALLABLE void adj_add(const quat& a, const quat& b, quat& adj_a, quat& adj_b, const quat& adj_ret)
{
adj_a += adj_ret;
adj_b += adj_ret;
}
inline CUDA_CALLABLE void adj_sub(const quat& a, const quat& b, quat& adj_a, quat& adj_b, const quat& adj_ret)
{
adj_a += adj_ret;
adj_b -= adj_ret;
}
inline CUDA_CALLABLE void adj_mul(const quat& a, const quat& b, quat& adj_a, quat& adj_b, const quat& adj_ret)
{
// shorthand
const quat& r = adj_ret;
adj_a += quat(b.w*r.x - b.x*r.w + b.y*r.z - b.z*r.y,
b.w*r.y - b.y*r.w - b.x*r.z + b.z*r.x,
b.w*r.z + b.x*r.y - b.y*r.x - b.z*r.w,
b.w*r.w + b.x*r.x + b.y*r.y + b.z*r.z);
adj_b += quat(a.w*r.x - a.x*r.w - a.y*r.z + a.z*r.y,
a.w*r.y - a.y*r.w + a.x*r.z - a.z*r.x,
a.w*r.z - a.x*r.y + a.y*r.x - a.z*r.w,
a.w*r.w + a.x*r.x + a.y*r.y + a.z*r.z);
}
inline CUDA_CALLABLE void adj_mul(const quat& a, float s, quat& adj_a, float& adj_s, const quat& adj_ret)
{
adj_a += adj_ret*s;
adj_s += dot(a, adj_ret);
}
inline CUDA_CALLABLE void adj_rotate(const quat& q, const float3& p, quat& adj_q, float3& adj_p, const float3& adj_ret)
{
const float3& r = adj_ret;
{
float t2 = p.z*q.z*2.0f;
float t3 = p.y*q.w*2.0f;
float t4 = p.x*q.w*2.0f;
float t5 = p.x*q.x*2.0f;
float t6 = p.y*q.y*2.0f;
float t7 = p.z*q.y*2.0f;
float t8 = p.x*q.z*2.0f;
float t9 = p.x*q.y*2.0f;
float t10 = p.y*q.x*2.0f;
adj_q.x += r.z*(t3+t8)+r.x*(t2+t6+p.x*q.x*4.0f)+r.y*(t9-p.z*q.w*2.0f);
adj_q.y += r.y*(t2+t5+p.y*q.y*4.0f)+r.x*(t10+p.z*q.w*2.0f)-r.z*(t4-p.y*q.z*2.0f);
adj_q.z += r.y*(t4+t7)+r.z*(t5+t6+p.z*q.z*4.0f)-r.x*(t3-p.z*q.x*2.0f);
adj_q.w += r.x*(t7+p.x*q.w*4.0f-p.y*q.z*2.0f)+r.y*(t8+p.y*q.w*4.0f-p.z*q.x*2.0f)+r.z*(-t9+t10+p.z*q.w*4.0f);
}
{
float t2 = q.w*q.w;
float t3 = t2*2.0f;
float t4 = q.w*q.z*2.0f;
float t5 = q.x*q.y*2.0f;
float t6 = q.w*q.y*2.0f;
float t7 = q.w*q.x*2.0f;
float t8 = q.y*q.z*2.0f;
adj_p.x += r.y*(t4+t5)+r.x*(t3+(q.x*q.x)*2.0f-1.0f)-r.z*(t6-q.x*q.z*2.0f);
adj_p.y += r.z*(t7+t8)-r.x*(t4-t5)+r.y*(t3+(q.y*q.y)*2.0f-1.0f);
adj_p.z += -r.y*(t7-t8)+r.z*(t3+(q.z*q.z)*2.0f-1.0f)+r.x*(t6+q.x*q.z*2.0f);
}
}
inline CUDA_CALLABLE void adj_rotate_inv(const quat& q, const float3& p, quat& adj_q, float3& adj_p, const float3& adj_ret)
{
const float3& r = adj_ret;
{
float t2 = p.z*q.w*2.0f;
float t3 = p.z*q.z*2.0f;
float t4 = p.y*q.w*2.0f;
float t5 = p.x*q.w*2.0f;
float t6 = p.x*q.x*2.0f;
float t7 = p.y*q.y*2.0f;
float t8 = p.y*q.z*2.0f;
float t9 = p.z*q.x*2.0f;
float t10 = p.x*q.y*2.0f;
adj_q.x += r.y*(t2+t10)+r.x*(t3+t7+p.x*q.x*4.0f)-r.z*(t4-p.x*q.z*2.0f);
adj_q.y += r.z*(t5+t8)+r.y*(t3+t6+p.y*q.y*4.0f)-r.x*(t2-p.y*q.x*2.0f);
adj_q.z += r.x*(t4+t9)+r.z*(t6+t7+p.z*q.z*4.0f)-r.y*(t5-p.z*q.y*2.0f);
adj_q.w += r.x*(t8+p.x*q.w*4.0f-p.z*q.y*2.0f)+r.y*(t9+p.y*q.w*4.0f-p.x*q.z*2.0f)+r.z*(t10-p.y*q.x*2.0f+p.z*q.w*4.0f);
}
{
float t2 = q.w*q.w;
float t3 = t2*2.0f;
float t4 = q.w*q.z*2.0f;
float t5 = q.w*q.y*2.0f;
float t6 = q.x*q.z*2.0f;
float t7 = q.w*q.x*2.0f;
adj_p.x += r.z*(t5+t6)+r.x*(t3+(q.x*q.x)*2.0f-1.0f)-r.y*(t4-q.x*q.y*2.0f);
adj_p.y += r.y*(t3+(q.y*q.y)*2.0f-1.0f)+r.x*(t4+q.x*q.y*2.0f)-r.z*(t7-q.y*q.z*2.0f);
adj_p.z += -r.x*(t5-t6)+r.z*(t3+(q.z*q.z)*2.0f-1.0f)+r.y*(t7+q.y*q.z*2.0f);
}
} | 8,985 | C | 26.993769 | 184 | 0.522315 |
vstrozzi/FRL-SHAC-Extension/dflex/build/lib/dflex/vec3.h | #pragma once
struct float3
{
float x;
float y;
float z;
inline CUDA_CALLABLE float3(float x=0.0f, float y=0.0f, float z=0.0f) : x(x), y(y), z(z) {}
explicit inline CUDA_CALLABLE float3(const float* p) : x(p[0]), y(p[1]), z(p[2]) {}
};
//--------------
// float3 methods
inline CUDA_CALLABLE float3 operator - (float3 a)
{
return { -a.x, -a.y, -a.z };
}
inline CUDA_CALLABLE float3 mul(float3 a, float s)
{
return { a.x*s, a.y*s, a.z*s };
}
inline CUDA_CALLABLE float3 div(float3 a, float s)
{
return { a.x/s, a.y/s, a.z/s };
}
inline CUDA_CALLABLE float3 add(float3 a, float3 b)
{
return { a.x+b.x, a.y+b.y, a.z+b.z };
}
inline CUDA_CALLABLE float3 add(float3 a, float s)
{
return { a.x + s, a.y + s, a.z + s };
}
inline CUDA_CALLABLE float3 sub(float3 a, float3 b)
{
return { a.x-b.x, a.y-b.y, a.z-b.z };
}
inline CUDA_CALLABLE float dot(float3 a, float3 b)
{
return a.x*b.x + a.y*b.y + a.z*b.z;
}
inline CUDA_CALLABLE float3 cross(float3 a, float3 b)
{
float3 c;
c.x = a.y*b.z - a.z*b.y;
c.y = a.z*b.x - a.x*b.z;
c.z = a.x*b.y - a.y*b.x;
return c;
}
inline CUDA_CALLABLE float index(const float3 & a, int idx)
{
#if FP_CHECK
if (idx < 0 || idx > 2)
{
printf("float3 index %d out of bounds at %s %d\n", idx, __FILE__, __LINE__);
exit(1);
}
#endif
return (&a.x)[idx];
}
inline CUDA_CALLABLE void adj_index(const float3 & a, int idx, float3 & adj_a, int & adj_idx, float & adj_ret)
{
#if FP_CHECK
if (idx < 0 || idx > 2)
{
printf("float3 index %d out of bounds at %s %d\n", idx, __FILE__, __LINE__);
exit(1);
}
#endif
(&adj_a.x)[idx] += adj_ret;
}
inline CUDA_CALLABLE float length(float3 a)
{
return sqrtf(dot(a, a));
}
inline CUDA_CALLABLE float3 normalize(float3 a)
{
float l = length(a);
if (l > kEps)
return div(a,l);
else
return float3();
}
inline bool CUDA_CALLABLE isfinite(float3 x)
{
return std::isfinite(x.x) && std::isfinite(x.y) && std::isfinite(x.z);
}
// adjoint float3 constructor
inline CUDA_CALLABLE void adj_float3(float x, float y, float z, float& adj_x, float& adj_y, float& adj_z, const float3& adj_ret)
{
adj_x += adj_ret.x;
adj_y += adj_ret.y;
adj_z += adj_ret.z;
}
inline CUDA_CALLABLE void adj_mul(float3 a, float s, float3& adj_a, float& adj_s, const float3& adj_ret)
{
adj_a.x += s*adj_ret.x;
adj_a.y += s*adj_ret.y;
adj_a.z += s*adj_ret.z;
adj_s += dot(a, adj_ret);
#if FP_CHECK
if (!isfinite(a) || !isfinite(s) || !isfinite(adj_a) || !isfinite(adj_s) || !isfinite(adj_ret))
printf("adj_mul((%f %f %f), %f, (%f %f %f), %f, (%f %f %f)\n", a.x, a.y, a.z, s, adj_a.x, adj_a.y, adj_a.z, adj_s, adj_ret.x, adj_ret.y, adj_ret.z);
#endif
}
inline CUDA_CALLABLE void adj_div(float3 a, float s, float3& adj_a, float& adj_s, const float3& adj_ret)
{
adj_s += dot(- a / (s * s), adj_ret); // - a / s^2
adj_a.x += adj_ret.x / s;
adj_a.y += adj_ret.y / s;
adj_a.z += adj_ret.z / s;
#if FP_CHECK
if (!isfinite(a) || !isfinite(s) || !isfinite(adj_a) || !isfinite(adj_s) || !isfinite(adj_ret))
printf("adj_div((%f %f %f), %f, (%f %f %f), %f, (%f %f %f)\n", a.x, a.y, a.z, s, adj_a.x, adj_a.y, adj_a.z, adj_s, adj_ret.x, adj_ret.y, adj_ret.z);
#endif
}
inline CUDA_CALLABLE void adj_add(float3 a, float3 b, float3& adj_a, float3& adj_b, const float3& adj_ret)
{
adj_a += adj_ret;
adj_b += adj_ret;
}
inline CUDA_CALLABLE void adj_add(float3 a, float s, float3& adj_a, float& adj_s, const float3& adj_ret)
{
adj_a += adj_ret;
adj_s += adj_ret.x + adj_ret.y + adj_ret.z;
}
inline CUDA_CALLABLE void adj_sub(float3 a, float3 b, float3& adj_a, float3& adj_b, const float3& adj_ret)
{
adj_a += adj_ret;
adj_b -= adj_ret;
}
inline CUDA_CALLABLE void adj_dot(float3 a, float3 b, float3& adj_a, float3& adj_b, const float adj_ret)
{
adj_a += b*adj_ret;
adj_b += a*adj_ret;
#if FP_CHECK
if (!isfinite(a) || !isfinite(b) || !isfinite(adj_a) || !isfinite(adj_b) || !isfinite(adj_ret))
printf("adj_dot((%f %f %f), (%f %f %f), (%f %f %f), (%f %f %f), %f)\n", a.x, a.y, a.z, b.x, b.y, b.z, adj_a.x, adj_a.y, adj_a.z, adj_b.x, adj_b.y, adj_b.z, adj_ret);
#endif
}
inline CUDA_CALLABLE void adj_cross(float3 a, float3 b, float3& adj_a, float3& adj_b, const float3& adj_ret)
{
// todo: sign check
adj_a += cross(b, adj_ret);
adj_b -= cross(a, adj_ret);
}
#ifdef CUDA
inline __device__ void atomic_add(float3 * addr, float3 value) {
// *addr += value;
atomicAdd(&(addr -> x), value.x);
atomicAdd(&(addr -> y), value.y);
atomicAdd(&(addr -> z), value.z);
}
#endif
inline CUDA_CALLABLE void adj_length(float3 a, float3& adj_a, const float adj_ret)
{
adj_a += normalize(a)*adj_ret;
#if FP_CHECK
if (!isfinite(adj_a))
printf("%s:%d - adj_length((%f %f %f), (%f %f %f), (%f))\n", __FILE__, __LINE__, a.x, a.y, a.z, adj_a.x, adj_a.y, adj_a.z, adj_ret);
#endif
}
inline CUDA_CALLABLE void adj_normalize(float3 a, float3& adj_a, const float3& adj_ret)
{
float d = length(a);
if (d > kEps)
{
float invd = 1.0f/d;
float3 ahat = normalize(a);
adj_a += (adj_ret*invd - ahat*(dot(ahat, adj_ret))*invd);
#if FP_CHECK
if (!isfinite(adj_a))
printf("%s:%d - adj_normalize((%f %f %f), (%f %f %f), (%f, %f, %f))\n", __FILE__, __LINE__, a.x, a.y, a.z, adj_a.x, adj_a.y, adj_a.z, adj_ret.x, adj_ret.y, adj_ret.z);
#endif
}
}
| 5,542 | C | 23.745536 | 179 | 0.560628 |
vstrozzi/FRL-SHAC-Extension/dflex/build/lib/dflex/sim.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""This module contains time-integration objects for simulating
models + state forward in time.
"""
import math
import torch
import numpy as np
import dflex.util
import dflex.adjoint as df
import dflex.config
from dflex.model import *
import time
# Todo
#-----
#
# [x] Spring model
# [x] 2D FEM model
# [x] 3D FEM model
# [x] Cloth
# [x] Wind/Drag model
# [x] Bending model
# [x] Triangle collision
# [x] Rigid body model
# [x] Rigid shape contact
# [x] Sphere
# [x] Capsule
# [x] Box
# [ ] Convex
# [ ] SDF
# [ ] Implicit solver
# [x] USD import
# [x] USD export
# -----
# externally compiled kernels module (C++/CUDA code with PyBind entry points)
kernels = None
@df.func
def test(c: float):
x = 1.0
y = float(2)
z = int(3.0)
print(y)
print(z)
if (c < 3.0):
x = 2.0
return x*6.0
def kernel_init():
global kernels
kernels = df.compile()
@df.kernel
def integrate_particles(x: df.tensor(df.float3),
v: df.tensor(df.float3),
f: df.tensor(df.float3),
w: df.tensor(float),
gravity: df.tensor(df.float3),
dt: float,
x_new: df.tensor(df.float3),
v_new: df.tensor(df.float3)):
tid = df.tid()
x0 = df.load(x, tid)
v0 = df.load(v, tid)
f0 = df.load(f, tid)
inv_mass = df.load(w, tid)
g = df.load(gravity, 0)
# simple semi-implicit Euler. v1 = v0 + a dt, x1 = x0 + v1 dt
v1 = v0 + (f0 * inv_mass + g * df.step(0.0 - inv_mass)) * dt
x1 = x0 + v1 * dt
df.store(x_new, tid, x1)
df.store(v_new, tid, v1)
# semi-implicit Euler integration
@df.kernel
def integrate_rigids(rigid_x: df.tensor(df.float3),
rigid_r: df.tensor(df.quat),
rigid_v: df.tensor(df.float3),
rigid_w: df.tensor(df.float3),
rigid_f: df.tensor(df.float3),
rigid_t: df.tensor(df.float3),
inv_m: df.tensor(float),
inv_I: df.tensor(df.mat33),
gravity: df.tensor(df.float3),
dt: float,
rigid_x_new: df.tensor(df.float3),
rigid_r_new: df.tensor(df.quat),
rigid_v_new: df.tensor(df.float3),
rigid_w_new: df.tensor(df.float3)):
tid = df.tid()
# positions
x0 = df.load(rigid_x, tid)
r0 = df.load(rigid_r, tid)
# velocities
v0 = df.load(rigid_v, tid)
w0 = df.load(rigid_w, tid) # angular velocity
# forces
f0 = df.load(rigid_f, tid)
t0 = df.load(rigid_t, tid)
# masses
inv_mass = df.load(inv_m, tid) # 1 / mass
inv_inertia = df.load(inv_I, tid) # inverse of 3x3 inertia matrix
g = df.load(gravity, 0)
# linear part
v1 = v0 + (f0 * inv_mass + g * df.nonzero(inv_mass)) * dt # linear integral (linear position/velocity)
x1 = x0 + v1 * dt
# angular part
# so reverse multiplication by r0 takes you from global coordinates into local coordinates
# because it's covector and thus gets pulled back rather than pushed forward
wb = df.rotate_inv(r0, w0) # angular integral (angular velocity and rotation), rotate into object reference frame
tb = df.rotate_inv(r0, t0) # also rotate torques into local coordinates
# I^{-1} torque = angular acceleration and inv_inertia is always going to be in the object frame.
# So we need to rotate into that frame, and then back into global.
w1 = df.rotate(r0, wb + inv_inertia * tb * dt) # I^-1 * torque * dt., then go back into global coordinates
r1 = df.normalize(r0 + df.quat(w1, 0.0) * r0 * 0.5 * dt) # rotate around w1 by dt
df.store(rigid_x_new, tid, x1)
df.store(rigid_r_new, tid, r1)
df.store(rigid_v_new, tid, v1)
df.store(rigid_w_new, tid, w1)
@df.kernel
def eval_springs(x: df.tensor(df.float3),
v: df.tensor(df.float3),
spring_indices: df.tensor(int),
spring_rest_lengths: df.tensor(float),
spring_stiffness: df.tensor(float),
spring_damping: df.tensor(float),
f: df.tensor(df.float3)):
tid = df.tid()
i = df.load(spring_indices, tid * 2 + 0)
j = df.load(spring_indices, tid * 2 + 1)
ke = df.load(spring_stiffness, tid)
kd = df.load(spring_damping, tid)
rest = df.load(spring_rest_lengths, tid)
xi = df.load(x, i)
xj = df.load(x, j)
vi = df.load(v, i)
vj = df.load(v, j)
xij = xi - xj
vij = vi - vj
l = length(xij)
l_inv = 1.0 / l
# normalized spring direction
dir = xij * l_inv
c = l - rest
dcdt = dot(dir, vij)
# damping based on relative velocity.
fs = dir * (ke * c + kd * dcdt)
df.atomic_sub(f, i, fs)
df.atomic_add(f, j, fs)
@df.kernel
def eval_triangles(x: df.tensor(df.float3),
v: df.tensor(df.float3),
indices: df.tensor(int),
pose: df.tensor(df.mat22),
activation: df.tensor(float),
k_mu: float,
k_lambda: float,
k_damp: float,
k_drag: float,
k_lift: float,
f: df.tensor(df.float3)):
tid = df.tid()
i = df.load(indices, tid * 3 + 0)
j = df.load(indices, tid * 3 + 1)
k = df.load(indices, tid * 3 + 2)
p = df.load(x, i) # point zero
q = df.load(x, j) # point one
r = df.load(x, k) # point two
vp = df.load(v, i) # vel zero
vq = df.load(v, j) # vel one
vr = df.load(v, k) # vel two
qp = q - p # barycentric coordinates (centered at p)
rp = r - p
Dm = df.load(pose, tid)
inv_rest_area = df.determinant(Dm) * 2.0 # 1 / det(A) = det(A^-1)
rest_area = 1.0 / inv_rest_area
# scale stiffness coefficients to account for area
k_mu = k_mu * rest_area
k_lambda = k_lambda * rest_area
k_damp = k_damp * rest_area
# F = Xs*Xm^-1
f1 = qp * Dm[0, 0] + rp * Dm[1, 0]
f2 = qp * Dm[0, 1] + rp * Dm[1, 1]
#-----------------------------
# St. Venant-Kirchoff
# # Green strain, F'*F-I
# e00 = dot(f1, f1) - 1.0
# e10 = dot(f2, f1)
# e01 = dot(f1, f2)
# e11 = dot(f2, f2) - 1.0
# E = df.mat22(e00, e01,
# e10, e11)
# # local forces (deviatoric part)
# T = df.mul(E, df.transpose(Dm))
# # spatial forces, F*T
# fq = (f1*T[0,0] + f2*T[1,0])*k_mu*2.0
# fr = (f1*T[0,1] + f2*T[1,1])*k_mu*2.0
# alpha = 1.0
#-----------------------------
# Baraff & Witkin, note this model is not isotropic
# c1 = length(f1) - 1.0
# c2 = length(f2) - 1.0
# f1 = normalize(f1)*c1*k1
# f2 = normalize(f2)*c2*k1
# fq = f1*Dm[0,0] + f2*Dm[0,1]
# fr = f1*Dm[1,0] + f2*Dm[1,1]
#-----------------------------
# Neo-Hookean (with rest stability)
# force = mu*F*Dm'
fq = (f1 * Dm[0, 0] + f2 * Dm[0, 1]) * k_mu
fr = (f1 * Dm[1, 0] + f2 * Dm[1, 1]) * k_mu
alpha = 1.0 + k_mu / k_lambda
#-----------------------------
# Area Preservation
n = df.cross(qp, rp)
area = df.length(n) * 0.5
# actuation
act = df.load(activation, tid)
# J-alpha
c = area * inv_rest_area - alpha + act
# dJdx
n = df.normalize(n)
dcdq = df.cross(rp, n) * inv_rest_area * 0.5
dcdr = df.cross(n, qp) * inv_rest_area * 0.5
f_area = k_lambda * c
#-----------------------------
# Area Damping
dcdt = dot(dcdq, vq) + dot(dcdr, vr) - dot(dcdq + dcdr, vp)
f_damp = k_damp * dcdt
fq = fq + dcdq * (f_area + f_damp)
fr = fr + dcdr * (f_area + f_damp)
fp = fq + fr
#-----------------------------
# Lift + Drag
vmid = (vp + vr + vq) * 0.3333
vdir = df.normalize(vmid)
f_drag = vmid * (k_drag * area * df.abs(df.dot(n, vmid)))
f_lift = n * (k_lift * area * (1.57079 - df.acos(df.dot(n, vdir)))) * dot(vmid, vmid)
# note reversed sign due to atomic_add below.. need to write the unary op -
fp = fp - f_drag - f_lift
fq = fq + f_drag + f_lift
fr = fr + f_drag + f_lift
# apply forces
df.atomic_add(f, i, fp)
df.atomic_sub(f, j, fq)
df.atomic_sub(f, k, fr)
@df.func
def triangle_closest_point_barycentric(a: df.float3, b: df.float3, c: df.float3, p: df.float3):
ab = b - a
ac = c - a
ap = p - a
d1 = df.dot(ab, ap)
d2 = df.dot(ac, ap)
if (d1 <= 0.0 and d2 <= 0.0):
return float3(1.0, 0.0, 0.0)
bp = p - b
d3 = df.dot(ab, bp)
d4 = df.dot(ac, bp)
if (d3 >= 0.0 and d4 <= d3):
return float3(0.0, 1.0, 0.0)
vc = d1 * d4 - d3 * d2
v = d1 / (d1 - d3)
if (vc <= 0.0 and d1 >= 0.0 and d3 <= 0.0):
return float3(1.0 - v, v, 0.0)
cp = p - c
d5 = dot(ab, cp)
d6 = dot(ac, cp)
if (d6 >= 0.0 and d5 <= d6):
return float3(0.0, 0.0, 1.0)
vb = d5 * d2 - d1 * d6
w = d2 / (d2 - d6)
if (vb <= 0.0 and d2 >= 0.0 and d6 <= 0.0):
return float3(1.0 - w, 0.0, w)
va = d3 * d6 - d5 * d4
w = (d4 - d3) / ((d4 - d3) + (d5 - d6))
if (va <= 0.0 and (d4 - d3) >= 0.0 and (d5 - d6) >= 0.0):
return float3(0.0, w, 1.0 - w)
denom = 1.0 / (va + vb + vc)
v = vb * denom
w = vc * denom
return float3(1.0 - v - w, v, w)
@df.kernel
def eval_triangles_contact(
# idx : df.tensor(int), # list of indices for colliding particles
num_particles: int, # size of particles
x: df.tensor(df.float3),
v: df.tensor(df.float3),
indices: df.tensor(int),
pose: df.tensor(df.mat22),
activation: df.tensor(float),
k_mu: float,
k_lambda: float,
k_damp: float,
k_drag: float,
k_lift: float,
f: df.tensor(df.float3)):
tid = df.tid()
face_no = tid // num_particles # which face
particle_no = tid % num_particles # which particle
# index = df.load(idx, tid)
pos = df.load(x, particle_no) # at the moment, just one particle
# vel0 = df.load(v, 0)
i = df.load(indices, face_no * 3 + 0)
j = df.load(indices, face_no * 3 + 1)
k = df.load(indices, face_no * 3 + 2)
if (i == particle_no or j == particle_no or k == particle_no):
return
p = df.load(x, i) # point zero
q = df.load(x, j) # point one
r = df.load(x, k) # point two
# vp = df.load(v, i) # vel zero
# vq = df.load(v, j) # vel one
# vr = df.load(v, k) # vel two
# qp = q-p # barycentric coordinates (centered at p)
# rp = r-p
bary = triangle_closest_point_barycentric(p, q, r, pos)
closest = p * bary[0] + q * bary[1] + r * bary[2]
diff = pos - closest
dist = df.dot(diff, diff)
n = df.normalize(diff)
c = df.min(dist - 0.01, 0.0) # 0 unless within 0.01 of surface
#c = df.leaky_min(dot(n, x0)-0.01, 0.0, 0.0)
fn = n * c * 1e5
df.atomic_sub(f, particle_no, fn)
# # apply forces (could do - f / 3 here)
df.atomic_add(f, i, fn * bary[0])
df.atomic_add(f, j, fn * bary[1])
df.atomic_add(f, k, fn * bary[2])
@df.kernel
def eval_triangles_rigid_contacts(
num_particles: int, # number of particles (size of contact_point)
x: df.tensor(df.float3), # position of particles
v: df.tensor(df.float3),
indices: df.tensor(int), # triangle indices
rigid_x: df.tensor(df.float3), # rigid body positions
rigid_r: df.tensor(df.quat),
rigid_v: df.tensor(df.float3),
rigid_w: df.tensor(df.float3),
contact_body: df.tensor(int),
contact_point: df.tensor(df.float3), # position of contact points relative to body
contact_dist: df.tensor(float),
contact_mat: df.tensor(int),
materials: df.tensor(float),
# rigid_f : df.tensor(df.float3),
# rigid_t : df.tensor(df.float3),
tri_f: df.tensor(df.float3)):
tid = df.tid()
face_no = tid // num_particles # which face
particle_no = tid % num_particles # which particle
# -----------------------
# load rigid body point
c_body = df.load(contact_body, particle_no)
c_point = df.load(contact_point, particle_no)
c_dist = df.load(contact_dist, particle_no)
c_mat = df.load(contact_mat, particle_no)
# hard coded surface parameter tensor layout (ke, kd, kf, mu)
ke = df.load(materials, c_mat * 4 + 0) # restitution coefficient
kd = df.load(materials, c_mat * 4 + 1) # damping coefficient
kf = df.load(materials, c_mat * 4 + 2) # friction coefficient
mu = df.load(materials, c_mat * 4 + 3) # coulomb friction
x0 = df.load(rigid_x, c_body) # position of colliding body
r0 = df.load(rigid_r, c_body) # orientation of colliding body
v0 = df.load(rigid_v, c_body)
w0 = df.load(rigid_w, c_body)
# transform point to world space
pos = x0 + df.rotate(r0, c_point)
# use x0 as center, everything is offset from center of mass
# moment arm
r = pos - x0 # basically just c_point in the new coordinates
rhat = df.normalize(r)
pos = pos + rhat * c_dist # add on 'thickness' of shape, e.g.: radius of sphere/capsule
# contact point velocity
dpdt = v0 + df.cross(w0, r) # this is rigid velocity cross offset, so it's the velocity of the contact point.
# -----------------------
# load triangle
i = df.load(indices, face_no * 3 + 0)
j = df.load(indices, face_no * 3 + 1)
k = df.load(indices, face_no * 3 + 2)
p = df.load(x, i) # point zero
q = df.load(x, j) # point one
r = df.load(x, k) # point two
vp = df.load(v, i) # vel zero
vq = df.load(v, j) # vel one
vr = df.load(v, k) # vel two
bary = triangle_closest_point_barycentric(p, q, r, pos)
closest = p * bary[0] + q * bary[1] + r * bary[2]
diff = pos - closest # vector from tri to point
dist = df.dot(diff, diff) # squared distance
n = df.normalize(diff) # points into the object
c = df.min(dist - 0.05, 0.0) # 0 unless within 0.05 of surface
#c = df.leaky_min(dot(n, x0)-0.01, 0.0, 0.0)
# fn = n * c * 1e6 # points towards cloth (both n and c are negative)
# df.atomic_sub(tri_f, particle_no, fn)
fn = c * ke # normal force (restitution coefficient * how far inside for ground) (negative)
vtri = vp * bary[0] + vq * bary[1] + vr * bary[2] # bad approximation for centroid velocity
vrel = vtri - dpdt
vn = dot(n, vrel) # velocity component of rigid in negative normal direction
vt = vrel - n * vn # velocity component not in normal direction
# contact damping
fd = 0.0 - df.max(vn, 0.0) * kd * df.step(c) # again, negative, into the ground
# # viscous friction
# ft = vt*kf
# Coulomb friction (box)
lower = mu * (fn + fd)
upper = 0.0 - lower # workaround because no unary ops yet
nx = cross(n, float3(0.0, 0.0, 1.0)) # basis vectors for tangent
nz = cross(n, float3(1.0, 0.0, 0.0))
vx = df.clamp(dot(nx * kf, vt), lower, upper)
vz = df.clamp(dot(nz * kf, vt), lower, upper)
ft = (nx * vx + nz * vz) * (0.0 - df.step(c)) # df.float3(vx, 0.0, vz)*df.step(c)
# # Coulomb friction (smooth, but gradients are numerically unstable around |vt| = 0)
# #ft = df.normalize(vt)*df.min(kf*df.length(vt), 0.0 - mu*c*ke)
f_total = n * (fn + fd) + ft
df.atomic_add(tri_f, i, f_total * bary[0])
df.atomic_add(tri_f, j, f_total * bary[1])
df.atomic_add(tri_f, k, f_total * bary[2])
@df.kernel
def eval_bending(
x: df.tensor(df.float3), v: df.tensor(df.float3), indices: df.tensor(int), rest: df.tensor(float), ke: float, kd: float, f: df.tensor(df.float3)):
tid = df.tid()
i = df.load(indices, tid * 4 + 0)
j = df.load(indices, tid * 4 + 1)
k = df.load(indices, tid * 4 + 2)
l = df.load(indices, tid * 4 + 3)
rest_angle = df.load(rest, tid)
x1 = df.load(x, i)
x2 = df.load(x, j)
x3 = df.load(x, k)
x4 = df.load(x, l)
v1 = df.load(v, i)
v2 = df.load(v, j)
v3 = df.load(v, k)
v4 = df.load(v, l)
n1 = df.cross(x3 - x1, x4 - x1) # normal to face 1
n2 = df.cross(x4 - x2, x3 - x2) # normal to face 2
n1_length = df.length(n1)
n2_length = df.length(n2)
rcp_n1 = 1.0 / n1_length
rcp_n2 = 1.0 / n2_length
cos_theta = df.dot(n1, n2) * rcp_n1 * rcp_n2
n1 = n1 * rcp_n1 * rcp_n1
n2 = n2 * rcp_n2 * rcp_n2
e = x4 - x3
e_hat = df.normalize(e)
e_length = df.length(e)
s = df.sign(df.dot(df.cross(n2, n1), e_hat))
angle = df.acos(cos_theta) * s
d1 = n1 * e_length
d2 = n2 * e_length
d3 = n1 * df.dot(x1 - x4, e_hat) + n2 * df.dot(x2 - x4, e_hat)
d4 = n1 * df.dot(x3 - x1, e_hat) + n2 * df.dot(x3 - x2, e_hat)
# elastic
f_elastic = ke * (angle - rest_angle)
# damping
f_damp = kd * (df.dot(d1, v1) + df.dot(d2, v2) + df.dot(d3, v3) + df.dot(d4, v4))
# total force, proportional to edge length
f_total = 0.0 - e_length * (f_elastic + f_damp)
df.atomic_add(f, i, d1 * f_total)
df.atomic_add(f, j, d2 * f_total)
df.atomic_add(f, k, d3 * f_total)
df.atomic_add(f, l, d4 * f_total)
@df.kernel
def eval_tetrahedra(x: df.tensor(df.float3),
v: df.tensor(df.float3),
indices: df.tensor(int),
pose: df.tensor(df.mat33),
activation: df.tensor(float),
materials: df.tensor(float),
f: df.tensor(df.float3)):
tid = df.tid()
i = df.load(indices, tid * 4 + 0)
j = df.load(indices, tid * 4 + 1)
k = df.load(indices, tid * 4 + 2)
l = df.load(indices, tid * 4 + 3)
act = df.load(activation, tid)
k_mu = df.load(materials, tid * 3 + 0)
k_lambda = df.load(materials, tid * 3 + 1)
k_damp = df.load(materials, tid * 3 + 2)
x0 = df.load(x, i)
x1 = df.load(x, j)
x2 = df.load(x, k)
x3 = df.load(x, l)
v0 = df.load(v, i)
v1 = df.load(v, j)
v2 = df.load(v, k)
v3 = df.load(v, l)
x10 = x1 - x0
x20 = x2 - x0
x30 = x3 - x0
v10 = v1 - v0
v20 = v2 - v0
v30 = v3 - v0
Ds = df.mat33(x10, x20, x30)
Dm = df.load(pose, tid)
inv_rest_volume = df.determinant(Dm) * 6.0
rest_volume = 1.0 / inv_rest_volume
alpha = 1.0 + k_mu / k_lambda - k_mu / (4.0 * k_lambda)
# scale stiffness coefficients to account for area
k_mu = k_mu * rest_volume
k_lambda = k_lambda * rest_volume
k_damp = k_damp * rest_volume
# F = Xs*Xm^-1
F = Ds * Dm
dFdt = df.mat33(v10, v20, v30) * Dm
col1 = df.float3(F[0, 0], F[1, 0], F[2, 0])
col2 = df.float3(F[0, 1], F[1, 1], F[2, 1])
col3 = df.float3(F[0, 2], F[1, 2], F[2, 2])
#-----------------------------
# Neo-Hookean (with rest stability [Smith et al 2018])
Ic = dot(col1, col1) + dot(col2, col2) + dot(col3, col3)
# deviatoric part
P = F * k_mu * (1.0 - 1.0 / (Ic + 1.0)) + dFdt * k_damp
H = P * df.transpose(Dm)
f1 = df.float3(H[0, 0], H[1, 0], H[2, 0])
f2 = df.float3(H[0, 1], H[1, 1], H[2, 1])
f3 = df.float3(H[0, 2], H[1, 2], H[2, 2])
#-----------------------------
# C_spherical
# r_s = df.sqrt(dot(col1, col1) + dot(col2, col2) + dot(col3, col3))
# r_s_inv = 1.0/r_s
# C = r_s - df.sqrt(3.0)
# dCdx = F*df.transpose(Dm)*r_s_inv
# grad1 = float3(dCdx[0,0], dCdx[1,0], dCdx[2,0])
# grad2 = float3(dCdx[0,1], dCdx[1,1], dCdx[2,1])
# grad3 = float3(dCdx[0,2], dCdx[1,2], dCdx[2,2])
# f1 = grad1*C*k_mu
# f2 = grad2*C*k_mu
# f3 = grad3*C*k_mu
#----------------------------
# C_D
# r_s = df.sqrt(dot(col1, col1) + dot(col2, col2) + dot(col3, col3))
# C = r_s*r_s - 3.0
# dCdx = F*df.transpose(Dm)*2.0
# grad1 = float3(dCdx[0,0], dCdx[1,0], dCdx[2,0])
# grad2 = float3(dCdx[0,1], dCdx[1,1], dCdx[2,1])
# grad3 = float3(dCdx[0,2], dCdx[1,2], dCdx[2,2])
# f1 = grad1*C*k_mu
# f2 = grad2*C*k_mu
# f3 = grad3*C*k_mu
# hydrostatic part
J = df.determinant(F)
#print(J)
s = inv_rest_volume / 6.0
dJdx1 = df.cross(x20, x30) * s
dJdx2 = df.cross(x30, x10) * s
dJdx3 = df.cross(x10, x20) * s
f_volume = (J - alpha + act) * k_lambda
f_damp = (df.dot(dJdx1, v1) + df.dot(dJdx2, v2) + df.dot(dJdx3, v3)) * k_damp
f_total = f_volume + f_damp
f1 = f1 + dJdx1 * f_total
f2 = f2 + dJdx2 * f_total
f3 = f3 + dJdx3 * f_total
f0 = (f1 + f2 + f3) * (0.0 - 1.0)
# apply forces
df.atomic_sub(f, i, f0)
df.atomic_sub(f, j, f1)
df.atomic_sub(f, k, f2)
df.atomic_sub(f, l, f3)
@df.kernel
def eval_contacts(x: df.tensor(df.float3), v: df.tensor(df.float3), ke: float, kd: float, kf: float, mu: float, f: df.tensor(df.float3)):
tid = df.tid() # this just handles contact of particles with the ground plane, nothing else.
x0 = df.load(x, tid)
v0 = df.load(v, tid)
n = float3(0.0, 1.0, 0.0) # why is the normal always y? Ground is always (0, 1, 0) normal
c = df.min(dot(n, x0) - 0.01, 0.0) # 0 unless within 0.01 of surface
#c = df.leaky_min(dot(n, x0)-0.01, 0.0, 0.0)
vn = dot(n, v0)
vt = v0 - n * vn
fn = n * c * ke
# contact damping
fd = n * df.min(vn, 0.0) * kd
# viscous friction
#ft = vt*kf
# Coulomb friction (box)
lower = mu * c * ke
upper = 0.0 - lower
vx = clamp(dot(float3(kf, 0.0, 0.0), vt), lower, upper)
vz = clamp(dot(float3(0.0, 0.0, kf), vt), lower, upper)
ft = df.float3(vx, 0.0, vz)
# Coulomb friction (smooth, but gradients are numerically unstable around |vt| = 0)
#ft = df.normalize(vt)*df.min(kf*df.length(vt), 0.0 - mu*c*ke)
ftotal = fn + (fd + ft) * df.step(c)
df.atomic_sub(f, tid, ftotal)
@df.func
def sphere_sdf(center: df.float3, radius: float, p: df.float3):
return df.length(p-center) - radius
@df.func
def sphere_sdf_grad(center: df.float3, radius: float, p: df.float3):
return df.normalize(p-center)
@df.func
def box_sdf(upper: df.float3, p: df.float3):
# adapted from https://www.iquilezles.org/www/articles/distfunctions/distfunctions.htm
qx = abs(p[0])-upper[0]
qy = abs(p[1])-upper[1]
qz = abs(p[2])-upper[2]
e = df.float3(df.max(qx, 0.0), df.max(qy, 0.0), df.max(qz, 0.0))
return df.length(e) + df.min(df.max(qx, df.max(qy, qz)), 0.0)
@df.func
def box_sdf_grad(upper: df.float3, p: df.float3):
qx = abs(p[0])-upper[0]
qy = abs(p[1])-upper[1]
qz = abs(p[2])-upper[2]
# exterior case
if (qx > 0.0 or qy > 0.0 or qz > 0.0):
x = df.clamp(p[0], 0.0-upper[0], upper[0])
y = df.clamp(p[1], 0.0-upper[1], upper[1])
z = df.clamp(p[2], 0.0-upper[2], upper[2])
return df.normalize(p - df.float3(x, y, z))
sx = df.sign(p[0])
sy = df.sign(p[1])
sz = df.sign(p[2])
# x projection
if (qx > qy and qx > qz):
return df.float3(sx, 0.0, 0.0)
# y projection
if (qy > qx and qy > qz):
return df.float3(0.0, sy, 0.0)
# z projection
if (qz > qx and qz > qy):
return df.float3(0.0, 0.0, sz)
@df.func
def capsule_sdf(radius: float, half_width: float, p: df.float3):
if (p[0] > half_width):
return length(df.float3(p[0] - half_width, p[1], p[2])) - radius
if (p[0] < 0.0 - half_width):
return length(df.float3(p[0] + half_width, p[1], p[2])) - radius
return df.length(df.float3(0.0, p[1], p[2])) - radius
@df.func
def capsule_sdf_grad(radius: float, half_width: float, p: df.float3):
if (p[0] > half_width):
return normalize(df.float3(p[0] - half_width, p[1], p[2]))
if (p[0] < 0.0 - half_width):
return normalize(df.float3(p[0] + half_width, p[1], p[2]))
return normalize(df.float3(0.0, p[1], p[2]))
@df.kernel
def eval_soft_contacts(
num_particles: int,
particle_x: df.tensor(df.float3),
particle_v: df.tensor(df.float3),
body_X_sc: df.tensor(df.spatial_transform),
body_v_sc: df.tensor(df.spatial_vector),
shape_X_co: df.tensor(df.spatial_transform),
shape_body: df.tensor(int),
shape_geo_type: df.tensor(int),
shape_geo_src: df.tensor(int),
shape_geo_scale: df.tensor(df.float3),
shape_materials: df.tensor(float),
ke: float,
kd: float,
kf: float,
mu: float,
# outputs
particle_f: df.tensor(df.float3),
body_f: df.tensor(df.spatial_vector)):
tid = df.tid()
shape_index = tid // num_particles # which shape
particle_index = tid % num_particles # which particle
rigid_index = df.load(shape_body, shape_index)
px = df.load(particle_x, particle_index)
pv = df.load(particle_v, particle_index)
#center = float3(0.0, 0.5, 0.0)
#radius = 0.25
#margin = 0.01
# sphere collider
# c = df.min(sphere_sdf(center, radius, x0)-margin, 0.0)
# n = sphere_sdf_grad(center, radius, x0)
# box collider
#c = df.min(box_sdf(df.float3(radius, radius, radius), x0-center)-margin, 0.0)
#n = box_sdf_grad(df.float3(radius, radius, radius), x0-center)
X_sc = df.spatial_transform_identity()
if (rigid_index >= 0):
X_sc = df.load(body_X_sc, rigid_index)
X_co = df.load(shape_X_co, shape_index)
X_so = df.spatial_transform_multiply(X_sc, X_co)
X_os = df.spatial_transform_inverse(X_so)
# transform particle position to shape local space
x_local = df.spatial_transform_point(X_os, px)
# geo description
geo_type = df.load(shape_geo_type, shape_index)
geo_scale = df.load(shape_geo_scale, shape_index)
margin = 0.01
# evaluate shape sdf
c = 0.0
n = df.float3(0.0, 0.0, 0.0)
# GEO_SPHERE (0)
if (geo_type == 0):
c = df.min(sphere_sdf(df.float3(0.0, 0.0, 0.0), geo_scale[0], x_local)-margin, 0.0)
n = df.spatial_transform_vector(X_so, sphere_sdf_grad(df.float3(0.0, 0.0, 0.0), geo_scale[0], x_local))
# GEO_BOX (1)
if (geo_type == 1):
c = df.min(box_sdf(geo_scale, x_local)-margin, 0.0)
n = df.spatial_transform_vector(X_so, box_sdf_grad(geo_scale, x_local))
# GEO_CAPSULE (2)
if (geo_type == 2):
c = df.min(capsule_sdf(geo_scale[0], geo_scale[1], x_local)-margin, 0.0)
n = df.spatial_transform_vector(X_so, capsule_sdf_grad(geo_scale[0], geo_scale[1], x_local))
# rigid velocity
rigid_v_s = df.spatial_vector()
if (rigid_index >= 0):
rigid_v_s = df.load(body_v_sc, rigid_index)
rigid_w = df.spatial_top(rigid_v_s)
rigid_v = df.spatial_bottom(rigid_v_s)
# compute the body velocity at the particle position
bv = rigid_v + df.cross(rigid_w, px)
# relative velocity
v = pv - bv
# decompose relative velocity
vn = dot(n, v)
vt = v - n * vn
# contact elastic
fn = n * c * ke
# contact damping
fd = n * df.min(vn, 0.0) * kd
# viscous friction
#ft = vt*kf
# Coulomb friction (box)
lower = mu * c * ke
upper = 0.0 - lower
vx = clamp(dot(float3(kf, 0.0, 0.0), vt), lower, upper)
vz = clamp(dot(float3(0.0, 0.0, kf), vt), lower, upper)
ft = df.float3(vx, 0.0, vz)
# Coulomb friction (smooth, but gradients are numerically unstable around |vt| = 0)
#ft = df.normalize(vt)*df.min(kf*df.length(vt), 0.0 - mu*c*ke)
f_total = fn + (fd + ft) * df.step(c)
t_total = df.cross(px, f_total)
df.atomic_sub(particle_f, particle_index, f_total)
if (rigid_index >= 0):
df.atomic_sub(body_f, rigid_index, df.spatial_vector(t_total, f_total))
@df.kernel
def eval_rigid_contacts(rigid_x: df.tensor(df.float3),
rigid_r: df.tensor(df.quat),
rigid_v: df.tensor(df.float3),
rigid_w: df.tensor(df.float3),
contact_body: df.tensor(int),
contact_point: df.tensor(df.float3),
contact_dist: df.tensor(float),
contact_mat: df.tensor(int),
materials: df.tensor(float),
rigid_f: df.tensor(df.float3),
rigid_t: df.tensor(df.float3)):
tid = df.tid()
c_body = df.load(contact_body, tid)
c_point = df.load(contact_point, tid)
c_dist = df.load(contact_dist, tid)
c_mat = df.load(contact_mat, tid)
# hard coded surface parameter tensor layout (ke, kd, kf, mu)
ke = df.load(materials, c_mat * 4 + 0) # restitution coefficient
kd = df.load(materials, c_mat * 4 + 1) # damping coefficient
kf = df.load(materials, c_mat * 4 + 2) # friction coefficient
mu = df.load(materials, c_mat * 4 + 3) # coulomb friction
x0 = df.load(rigid_x, c_body) # position of colliding body
r0 = df.load(rigid_r, c_body) # orientation of colliding body
v0 = df.load(rigid_v, c_body)
w0 = df.load(rigid_w, c_body)
n = float3(0.0, 1.0, 0.0)
# transform point to world space
p = x0 + df.rotate(r0, c_point) - n * c_dist # add on 'thickness' of shape, e.g.: radius of sphere/capsule
# use x0 as center, everything is offset from center of mass
# moment arm
r = p - x0 # basically just c_point in the new coordinates
# contact point velocity
dpdt = v0 + df.cross(w0, r) # this is rigid velocity cross offset, so it's the velocity of the contact point.
# check ground contact
c = df.min(dot(n, p), 0.0) # check if we're inside the ground
vn = dot(n, dpdt) # velocity component out of the ground
vt = dpdt - n * vn # velocity component not into the ground
fn = c * ke # normal force (restitution coefficient * how far inside for ground)
# contact damping
fd = df.min(vn, 0.0) * kd * df.step(c) # again, velocity into the ground, negative
# viscous friction
#ft = vt*kf
# Coulomb friction (box)
lower = mu * (fn + fd) # negative
upper = 0.0 - lower # positive, workaround for no unary ops
vx = df.clamp(dot(float3(kf, 0.0, 0.0), vt), lower, upper)
vz = df.clamp(dot(float3(0.0, 0.0, kf), vt), lower, upper)
ft = df.float3(vx, 0.0, vz) * df.step(c)
# Coulomb friction (smooth, but gradients are numerically unstable around |vt| = 0)
#ft = df.normalize(vt)*df.min(kf*df.length(vt), 0.0 - mu*c*ke)
f_total = n * (fn + fd) + ft
t_total = df.cross(r, f_total)
df.atomic_sub(rigid_f, c_body, f_total)
df.atomic_sub(rigid_t, c_body, t_total)
# # Frank & Park definition 3.20, pg 100
@df.func
def spatial_transform_twist(t: df.spatial_transform, x: df.spatial_vector):
q = spatial_transform_get_rotation(t)
p = spatial_transform_get_translation(t)
w = spatial_top(x)
v = spatial_bottom(x)
w = rotate(q, w)
v = rotate(q, v) + cross(p, w)
return spatial_vector(w, v)
@df.func
def spatial_transform_wrench(t: df.spatial_transform, x: df.spatial_vector):
q = spatial_transform_get_rotation(t)
p = spatial_transform_get_translation(t)
w = spatial_top(x)
v = spatial_bottom(x)
v = rotate(q, v)
w = rotate(q, w) + cross(p, v)
return spatial_vector(w, v)
@df.func
def spatial_transform_inverse(t: df.spatial_transform):
p = spatial_transform_get_translation(t)
q = spatial_transform_get_rotation(t)
q_inv = inverse(q)
return spatial_transform(rotate(q_inv, p)*(0.0 - 1.0), q_inv);
# computes adj_t^-T*I*adj_t^-1 (tensor change of coordinates), Frank & Park, section 8.2.3, pg 290
@df.func
def spatial_transform_inertia(t: df.spatial_transform, I: df.spatial_matrix):
t_inv = spatial_transform_inverse(t)
q = spatial_transform_get_rotation(t_inv)
p = spatial_transform_get_translation(t_inv)
r1 = rotate(q, float3(1.0, 0.0, 0.0))
r2 = rotate(q, float3(0.0, 1.0, 0.0))
r3 = rotate(q, float3(0.0, 0.0, 1.0))
R = mat33(r1, r2, r3)
S = mul(skew(p), R)
T = spatial_adjoint(R, S)
return mul(mul(transpose(T), I), T)
@df.kernel
def eval_rigid_contacts_art(
body_X_s: df.tensor(df.spatial_transform),
body_v_s: df.tensor(df.spatial_vector),
contact_body: df.tensor(int),
contact_point: df.tensor(df.float3),
contact_dist: df.tensor(float),
contact_mat: df.tensor(int),
materials: df.tensor(float),
body_f_s: df.tensor(df.spatial_vector)):
tid = df.tid()
c_body = df.load(contact_body, tid)
c_point = df.load(contact_point, tid)
c_dist = df.load(contact_dist, tid)
c_mat = df.load(contact_mat, tid)
# hard coded surface parameter tensor layout (ke, kd, kf, mu)
ke = df.load(materials, c_mat * 4 + 0) # restitution coefficient
kd = df.load(materials, c_mat * 4 + 1) # damping coefficient
kf = df.load(materials, c_mat * 4 + 2) # friction coefficient
mu = df.load(materials, c_mat * 4 + 3) # coulomb friction
X_s = df.load(body_X_s, c_body) # position of colliding body
v_s = df.load(body_v_s, c_body) # orientation of colliding body
n = float3(0.0, 1.0, 0.0)
# transform point to world space
p = df.spatial_transform_point(X_s, c_point) - n * c_dist # add on 'thickness' of shape, e.g.: radius of sphere/capsule
w = df.spatial_top(v_s)
v = df.spatial_bottom(v_s)
# contact point velocity
dpdt = v + df.cross(w, p)
# check ground contact
c = df.dot(n, p) # check if we're inside the ground
if (c >= 0.0):
return
vn = dot(n, dpdt) # velocity component out of the ground
vt = dpdt - n * vn # velocity component not into the ground
fn = c * ke # normal force (restitution coefficient * how far inside for ground)
# contact damping
fd = df.min(vn, 0.0) * kd * df.step(c) * (0.0 - c)
# viscous friction
#ft = vt*kf
# Coulomb friction (box)
lower = mu * (fn + fd) # negative
upper = 0.0 - lower # positive, workaround for no unary ops
vx = df.clamp(dot(float3(kf, 0.0, 0.0), vt), lower, upper)
vz = df.clamp(dot(float3(0.0, 0.0, kf), vt), lower, upper)
# Coulomb friction (smooth, but gradients are numerically unstable around |vt| = 0)
ft = df.normalize(vt)*df.min(kf*df.length(vt), 0.0 - mu*c*ke) * df.step(c)
f_total = n * (fn + fd) + ft
t_total = df.cross(p, f_total)
df.atomic_add(body_f_s, c_body, df.spatial_vector(t_total, f_total))
@df.func
def compute_muscle_force(
i: int,
body_X_s: df.tensor(df.spatial_transform),
body_v_s: df.tensor(df.spatial_vector),
muscle_links: df.tensor(int),
muscle_points: df.tensor(df.float3),
muscle_activation: float,
body_f_s: df.tensor(df.spatial_vector)):
link_0 = df.load(muscle_links, i)
link_1 = df.load(muscle_links, i+1)
if (link_0 == link_1):
return 0
r_0 = df.load(muscle_points, i)
r_1 = df.load(muscle_points, i+1)
xform_0 = df.load(body_X_s, link_0)
xform_1 = df.load(body_X_s, link_1)
pos_0 = df.spatial_transform_point(xform_0, r_0)
pos_1 = df.spatial_transform_point(xform_1, r_1)
n = df.normalize(pos_1 - pos_0)
# todo: add passive elastic and viscosity terms
f = n * muscle_activation
df.atomic_sub(body_f_s, link_0, df.spatial_vector(df.cross(pos_0, f), f))
df.atomic_add(body_f_s, link_1, df.spatial_vector(df.cross(pos_1, f), f))
return 0
@df.kernel
def eval_muscles(
body_X_s: df.tensor(df.spatial_transform),
body_v_s: df.tensor(df.spatial_vector),
muscle_start: df.tensor(int),
muscle_params: df.tensor(float),
muscle_links: df.tensor(int),
muscle_points: df.tensor(df.float3),
muscle_activation: df.tensor(float),
# output
body_f_s: df.tensor(df.spatial_vector)):
tid = df.tid()
m_start = df.load(muscle_start, tid)
m_end = df.load(muscle_start, tid+1) - 1
activation = df.load(muscle_activation, tid)
for i in range(m_start, m_end):
compute_muscle_force(i, body_X_s, body_v_s, muscle_links, muscle_points, activation, body_f_s)
# compute transform across a joint
@df.func
def jcalc_transform(type: int, axis: df.float3, joint_q: df.tensor(float), start: int):
# prismatic
if (type == 0):
q = df.load(joint_q, start)
X_jc = spatial_transform(axis * q, quat_identity())
return X_jc
# revolute
if (type == 1):
q = df.load(joint_q, start)
X_jc = spatial_transform(float3(0.0, 0.0, 0.0), quat_from_axis_angle(axis, q))
return X_jc
# ball
if (type == 2):
qx = df.load(joint_q, start + 0)
qy = df.load(joint_q, start + 1)
qz = df.load(joint_q, start + 2)
qw = df.load(joint_q, start + 3)
X_jc = spatial_transform(float3(0.0, 0.0, 0.0), quat(qx, qy, qz, qw))
return X_jc
# fixed
if (type == 3):
X_jc = spatial_transform_identity()
return X_jc
# free
if (type == 4):
px = df.load(joint_q, start + 0)
py = df.load(joint_q, start + 1)
pz = df.load(joint_q, start + 2)
qx = df.load(joint_q, start + 3)
qy = df.load(joint_q, start + 4)
qz = df.load(joint_q, start + 5)
qw = df.load(joint_q, start + 6)
X_jc = spatial_transform(float3(px, py, pz), quat(qx, qy, qz, qw))
return X_jc
# default case
return spatial_transform_identity()
# compute motion subspace and velocity for a joint
@df.func
def jcalc_motion(type: int, axis: df.float3, X_sc: df.spatial_transform, joint_S_s: df.tensor(df.spatial_vector), joint_qd: df.tensor(float), joint_start: int):
# prismatic
if (type == 0):
S_s = df.spatial_transform_twist(X_sc, spatial_vector(float3(0.0, 0.0, 0.0), axis))
v_j_s = S_s * df.load(joint_qd, joint_start)
df.store(joint_S_s, joint_start, S_s)
return v_j_s
# revolute
if (type == 1):
S_s = df.spatial_transform_twist(X_sc, spatial_vector(axis, float3(0.0, 0.0, 0.0)))
v_j_s = S_s * df.load(joint_qd, joint_start)
df.store(joint_S_s, joint_start, S_s)
return v_j_s
# ball
if (type == 2):
w = float3(df.load(joint_qd, joint_start + 0),
df.load(joint_qd, joint_start + 1),
df.load(joint_qd, joint_start + 2))
S_0 = df.spatial_transform_twist(X_sc, spatial_vector(1.0, 0.0, 0.0, 0.0, 0.0, 0.0))
S_1 = df.spatial_transform_twist(X_sc, spatial_vector(0.0, 1.0, 0.0, 0.0, 0.0, 0.0))
S_2 = df.spatial_transform_twist(X_sc, spatial_vector(0.0, 0.0, 1.0, 0.0, 0.0, 0.0))
# write motion subspace
df.store(joint_S_s, joint_start + 0, S_0)
df.store(joint_S_s, joint_start + 1, S_1)
df.store(joint_S_s, joint_start + 2, S_2)
return S_0*w[0] + S_1*w[1] + S_2*w[2]
# fixed
if (type == 3):
return spatial_vector()
# free
if (type == 4):
v_j_s = spatial_vector(df.load(joint_qd, joint_start + 0),
df.load(joint_qd, joint_start + 1),
df.load(joint_qd, joint_start + 2),
df.load(joint_qd, joint_start + 3),
df.load(joint_qd, joint_start + 4),
df.load(joint_qd, joint_start + 5))
# write motion subspace
df.store(joint_S_s, joint_start + 0, spatial_vector(1.0, 0.0, 0.0, 0.0, 0.0, 0.0))
df.store(joint_S_s, joint_start + 1, spatial_vector(0.0, 1.0, 0.0, 0.0, 0.0, 0.0))
df.store(joint_S_s, joint_start + 2, spatial_vector(0.0, 0.0, 1.0, 0.0, 0.0, 0.0))
df.store(joint_S_s, joint_start + 3, spatial_vector(0.0, 0.0, 0.0, 1.0, 0.0, 0.0))
df.store(joint_S_s, joint_start + 4, spatial_vector(0.0, 0.0, 0.0, 0.0, 1.0, 0.0))
df.store(joint_S_s, joint_start + 5, spatial_vector(0.0, 0.0, 0.0, 0.0, 0.0, 1.0))
return v_j_s
# default case
return spatial_vector()
# # compute the velocity across a joint
# #@df.func
# def jcalc_velocity(self, type, S_s, joint_qd, start):
# # prismatic
# if (type == 0):
# v_j_s = df.load(S_s, start)*df.load(joint_qd, start)
# return v_j_s
# # revolute
# if (type == 1):
# v_j_s = df.load(S_s, start)*df.load(joint_qd, start)
# return v_j_s
# # fixed
# if (type == 2):
# v_j_s = spatial_vector()
# return v_j_s
# # free
# if (type == 3):
# v_j_s = S_s[start+0]*joint_qd[start+0]
# v_j_s += S_s[start+1]*joint_qd[start+1]
# v_j_s += S_s[start+2]*joint_qd[start+2]
# v_j_s += S_s[start+3]*joint_qd[start+3]
# v_j_s += S_s[start+4]*joint_qd[start+4]
# v_j_s += S_s[start+5]*joint_qd[start+5]
# return v_j_s
# computes joint space forces/torques in tau
@df.func
def jcalc_tau(
type: int,
target_k_e: float,
target_k_d: float,
limit_k_e: float,
limit_k_d: float,
joint_S_s: df.tensor(spatial_vector),
joint_q: df.tensor(float),
joint_qd: df.tensor(float),
joint_act: df.tensor(float),
joint_target: df.tensor(float),
joint_limit_lower: df.tensor(float),
joint_limit_upper: df.tensor(float),
coord_start: int,
dof_start: int,
body_f_s: spatial_vector,
tau: df.tensor(float)):
# prismatic / revolute
if (type == 0 or type == 1):
S_s = df.load(joint_S_s, dof_start)
q = df.load(joint_q, coord_start)
qd = df.load(joint_qd, dof_start)
act = df.load(joint_act, dof_start)
target = df.load(joint_target, coord_start)
lower = df.load(joint_limit_lower, coord_start)
upper = df.load(joint_limit_upper, coord_start)
limit_f = 0.0
# compute limit forces, damping only active when limit is violated
if (q < lower):
limit_f = limit_k_e*(lower-q)
if (q > upper):
limit_f = limit_k_e*(upper-q)
damping_f = (0.0 - limit_k_d) * qd
# total torque / force on the joint
t = 0.0 - spatial_dot(S_s, body_f_s) - target_k_e*(q - target) - target_k_d*qd + act + limit_f + damping_f
df.store(tau, dof_start, t)
# ball
if (type == 2):
# elastic term.. this is proportional to the
# imaginary part of the relative quaternion
r_j = float3(df.load(joint_q, coord_start + 0),
df.load(joint_q, coord_start + 1),
df.load(joint_q, coord_start + 2))
# angular velocity for damping
w_j = float3(df.load(joint_qd, dof_start + 0),
df.load(joint_qd, dof_start + 1),
df.load(joint_qd, dof_start + 2))
for i in range(0, 3):
S_s = df.load(joint_S_s, dof_start+i)
w = w_j[i]
r = r_j[i]
df.store(tau, dof_start+i, 0.0 - spatial_dot(S_s, body_f_s) - w*target_k_d - r*target_k_e)
# fixed
# if (type == 3)
# pass
# free
if (type == 4):
for i in range(0, 6):
S_s = df.load(joint_S_s, dof_start+i)
df.store(tau, dof_start+i, 0.0 - spatial_dot(S_s, body_f_s))
return 0
@df.func
def jcalc_integrate(
type: int,
joint_q: df.tensor(float),
joint_qd: df.tensor(float),
joint_qdd: df.tensor(float),
coord_start: int,
dof_start: int,
dt: float,
joint_q_new: df.tensor(float),
joint_qd_new: df.tensor(float)):
# prismatic / revolute
if (type == 0 or type == 1):
qdd = df.load(joint_qdd, dof_start)
qd = df.load(joint_qd, dof_start)
q = df.load(joint_q, coord_start)
qd_new = qd + qdd*dt
q_new = q + qd_new*dt
df.store(joint_qd_new, dof_start, qd_new)
df.store(joint_q_new, coord_start, q_new)
# ball
if (type == 2):
m_j = float3(df.load(joint_qdd, dof_start + 0),
df.load(joint_qdd, dof_start + 1),
df.load(joint_qdd, dof_start + 2))
w_j = float3(df.load(joint_qd, dof_start + 0),
df.load(joint_qd, dof_start + 1),
df.load(joint_qd, dof_start + 2))
r_j = quat(df.load(joint_q, coord_start + 0),
df.load(joint_q, coord_start + 1),
df.load(joint_q, coord_start + 2),
df.load(joint_q, coord_start + 3))
# symplectic Euler
w_j_new = w_j + m_j*dt
drdt_j = mul(quat(w_j_new, 0.0), r_j) * 0.5
# new orientation (normalized)
r_j_new = normalize(r_j + drdt_j * dt)
# update joint coords
df.store(joint_q_new, coord_start + 0, r_j_new[0])
df.store(joint_q_new, coord_start + 1, r_j_new[1])
df.store(joint_q_new, coord_start + 2, r_j_new[2])
df.store(joint_q_new, coord_start + 3, r_j_new[3])
# update joint vel
df.store(joint_qd_new, dof_start + 0, w_j_new[0])
df.store(joint_qd_new, dof_start + 1, w_j_new[1])
df.store(joint_qd_new, dof_start + 2, w_j_new[2])
# fixed joint
#if (type == 3)
# pass
# free joint
if (type == 4):
# dofs: qd = (omega_x, omega_y, omega_z, vel_x, vel_y, vel_z)
# coords: q = (trans_x, trans_y, trans_z, quat_x, quat_y, quat_z, quat_w)
# angular and linear acceleration
m_s = float3(df.load(joint_qdd, dof_start + 0),
df.load(joint_qdd, dof_start + 1),
df.load(joint_qdd, dof_start + 2))
a_s = float3(df.load(joint_qdd, dof_start + 3),
df.load(joint_qdd, dof_start + 4),
df.load(joint_qdd, dof_start + 5))
# angular and linear velocity
w_s = float3(df.load(joint_qd, dof_start + 0),
df.load(joint_qd, dof_start + 1),
df.load(joint_qd, dof_start + 2))
v_s = float3(df.load(joint_qd, dof_start + 3),
df.load(joint_qd, dof_start + 4),
df.load(joint_qd, dof_start + 5))
# symplectic Euler
w_s = w_s + m_s*dt
v_s = v_s + a_s*dt
# translation of origin
p_s = float3(df.load(joint_q, coord_start + 0),
df.load(joint_q, coord_start + 1),
df.load(joint_q, coord_start + 2))
# linear vel of origin (note q/qd switch order of linear angular elements)
# note we are converting the body twist in the space frame (w_s, v_s) to compute center of mass velcity
dpdt_s = v_s + cross(w_s, p_s)
# quat and quat derivative
r_s = quat(df.load(joint_q, coord_start + 3),
df.load(joint_q, coord_start + 4),
df.load(joint_q, coord_start + 5),
df.load(joint_q, coord_start + 6))
drdt_s = mul(quat(w_s, 0.0), r_s) * 0.5
# new orientation (normalized)
p_s_new = p_s + dpdt_s * dt
r_s_new = normalize(r_s + drdt_s * dt)
# update transform
df.store(joint_q_new, coord_start + 0, p_s_new[0])
df.store(joint_q_new, coord_start + 1, p_s_new[1])
df.store(joint_q_new, coord_start + 2, p_s_new[2])
df.store(joint_q_new, coord_start + 3, r_s_new[0])
df.store(joint_q_new, coord_start + 4, r_s_new[1])
df.store(joint_q_new, coord_start + 5, r_s_new[2])
df.store(joint_q_new, coord_start + 6, r_s_new[3])
# update joint_twist
df.store(joint_qd_new, dof_start + 0, w_s[0])
df.store(joint_qd_new, dof_start + 1, w_s[1])
df.store(joint_qd_new, dof_start + 2, w_s[2])
df.store(joint_qd_new, dof_start + 3, v_s[0])
df.store(joint_qd_new, dof_start + 4, v_s[1])
df.store(joint_qd_new, dof_start + 5, v_s[2])
return 0
@df.func
def compute_link_transform(i: int,
joint_type: df.tensor(int),
joint_parent: df.tensor(int),
joint_q_start: df.tensor(int),
joint_qd_start: df.tensor(int),
joint_q: df.tensor(float),
joint_X_pj: df.tensor(df.spatial_transform),
joint_X_cm: df.tensor(df.spatial_transform),
joint_axis: df.tensor(df.float3),
body_X_sc: df.tensor(df.spatial_transform),
body_X_sm: df.tensor(df.spatial_transform)):
# parent transform
parent = load(joint_parent, i)
# parent transform in spatial coordinates
X_sp = spatial_transform_identity()
if (parent >= 0):
X_sp = load(body_X_sc, parent)
type = load(joint_type, i)
axis = load(joint_axis, i)
coord_start = load(joint_q_start, i)
dof_start = load(joint_qd_start, i)
# compute transform across joint
X_jc = jcalc_transform(type, axis, joint_q, coord_start)
X_pj = load(joint_X_pj, i)
X_sc = spatial_transform_multiply(X_sp, spatial_transform_multiply(X_pj, X_jc))
# compute transform of center of mass
X_cm = load(joint_X_cm, i)
X_sm = spatial_transform_multiply(X_sc, X_cm)
# store geometry transforms
store(body_X_sc, i, X_sc)
store(body_X_sm, i, X_sm)
return 0
@df.kernel
def eval_rigid_fk(articulation_start: df.tensor(int),
joint_type: df.tensor(int),
joint_parent: df.tensor(int),
joint_q_start: df.tensor(int),
joint_qd_start: df.tensor(int),
joint_q: df.tensor(float),
joint_X_pj: df.tensor(df.spatial_transform),
joint_X_cm: df.tensor(df.spatial_transform),
joint_axis: df.tensor(df.float3),
body_X_sc: df.tensor(df.spatial_transform),
body_X_sm: df.tensor(df.spatial_transform)):
# one thread per-articulation
index = tid()
start = df.load(articulation_start, index)
end = df.load(articulation_start, index+1)
for i in range(start, end):
compute_link_transform(i,
joint_type,
joint_parent,
joint_q_start,
joint_qd_start,
joint_q,
joint_X_pj,
joint_X_cm,
joint_axis,
body_X_sc,
body_X_sm)
@df.func
def compute_link_velocity(i: int,
joint_type: df.tensor(int),
joint_parent: df.tensor(int),
joint_qd_start: df.tensor(int),
joint_qd: df.tensor(float),
joint_axis: df.tensor(df.float3),
body_I_m: df.tensor(df.spatial_matrix),
body_X_sc: df.tensor(df.spatial_transform),
body_X_sm: df.tensor(df.spatial_transform),
joint_X_pj: df.tensor(df.spatial_transform),
gravity: df.tensor(df.float3),
# outputs
joint_S_s: df.tensor(df.spatial_vector),
body_I_s: df.tensor(df.spatial_matrix),
body_v_s: df.tensor(df.spatial_vector),
body_f_s: df.tensor(df.spatial_vector),
body_a_s: df.tensor(df.spatial_vector)):
type = df.load(joint_type, i)
axis = df.load(joint_axis, i)
parent = df.load(joint_parent, i)
dof_start = df.load(joint_qd_start, i)
X_sc = df.load(body_X_sc, i)
# parent transform in spatial coordinates
X_sp = spatial_transform_identity()
if (parent >= 0):
X_sp = load(body_X_sc, parent)
X_pj = load(joint_X_pj, i)
X_sj = spatial_transform_multiply(X_sp, X_pj)
# compute motion subspace and velocity across the joint (also stores S_s to global memory)
v_j_s = jcalc_motion(type, axis, X_sj, joint_S_s, joint_qd, dof_start)
# parent velocity
v_parent_s = spatial_vector()
a_parent_s = spatial_vector()
if (parent >= 0):
v_parent_s = df.load(body_v_s, parent)
a_parent_s = df.load(body_a_s, parent)
# body velocity, acceleration
v_s = v_parent_s + v_j_s
a_s = a_parent_s + spatial_cross(v_s, v_j_s) # + self.joint_S_s[i]*self.joint_qdd[i]
# compute body forces
X_sm = df.load(body_X_sm, i)
I_m = df.load(body_I_m, i)
# gravity and external forces (expressed in frame aligned with s but centered at body mass)
g = df.load(gravity, 0)
m = I_m[3, 3]
f_g_m = spatial_vector(float3(), g) * m
f_g_s = spatial_transform_wrench(spatial_transform(spatial_transform_get_translation(X_sm), quat_identity()), f_g_m)
#f_ext_s = df.load(body_f_s, i) + f_g_s
# body forces
I_s = spatial_transform_inertia(X_sm, I_m)
f_b_s = df.mul(I_s, a_s) + spatial_cross_dual(v_s, df.mul(I_s, v_s))
df.store(body_v_s, i, v_s)
df.store(body_a_s, i, a_s)
df.store(body_f_s, i, f_b_s - f_g_s)
df.store(body_I_s, i, I_s)
return 0
@df.func
def compute_link_tau(offset: int,
joint_end: int,
joint_type: df.tensor(int),
joint_parent: df.tensor(int),
joint_q_start: df.tensor(int),
joint_qd_start: df.tensor(int),
joint_q: df.tensor(float),
joint_qd: df.tensor(float),
joint_act: df.tensor(float),
joint_target: df.tensor(float),
joint_target_ke: df.tensor(float),
joint_target_kd: df.tensor(float),
joint_limit_lower: df.tensor(float),
joint_limit_upper: df.tensor(float),
joint_limit_ke: df.tensor(float),
joint_limit_kd: df.tensor(float),
joint_S_s: df.tensor(df.spatial_vector),
body_fb_s: df.tensor(df.spatial_vector),
# outputs
body_ft_s: df.tensor(df.spatial_vector),
tau: df.tensor(float)):
# for backwards traversal
i = joint_end-offset-1
type = df.load(joint_type, i)
parent = df.load(joint_parent, i)
dof_start = df.load(joint_qd_start, i)
coord_start = df.load(joint_q_start, i)
target_k_e = df.load(joint_target_ke, i)
target_k_d = df.load(joint_target_kd, i)
limit_k_e = df.load(joint_limit_ke, i)
limit_k_d = df.load(joint_limit_kd, i)
# total forces on body
f_b_s = df.load(body_fb_s, i)
f_t_s = df.load(body_ft_s, i)
f_s = f_b_s + f_t_s
# compute joint-space forces, writes out tau
jcalc_tau(type, target_k_e, target_k_d, limit_k_e, limit_k_d, joint_S_s, joint_q, joint_qd, joint_act, joint_target, joint_limit_lower, joint_limit_upper, coord_start, dof_start, f_s, tau)
# update parent forces, todo: check that this is valid for the backwards pass
if (parent >= 0):
df.atomic_add(body_ft_s, parent, f_s)
return 0
@df.kernel
def eval_rigid_id(articulation_start: df.tensor(int),
joint_type: df.tensor(int),
joint_parent: df.tensor(int),
joint_q_start: df.tensor(int),
joint_qd_start: df.tensor(int),
joint_q: df.tensor(float),
joint_qd: df.tensor(float),
joint_axis: df.tensor(df.float3),
joint_target_ke: df.tensor(float),
joint_target_kd: df.tensor(float),
body_I_m: df.tensor(df.spatial_matrix),
body_X_sc: df.tensor(df.spatial_transform),
body_X_sm: df.tensor(df.spatial_transform),
joint_X_pj: df.tensor(df.spatial_transform),
gravity: df.tensor(df.float3),
# outputs
joint_S_s: df.tensor(df.spatial_vector),
body_I_s: df.tensor(df.spatial_matrix),
body_v_s: df.tensor(df.spatial_vector),
body_f_s: df.tensor(df.spatial_vector),
body_a_s: df.tensor(df.spatial_vector)):
# one thread per-articulation
index = tid()
start = df.load(articulation_start, index)
end = df.load(articulation_start, index+1)
count = end-start
# compute link velocities and coriolis forces
for i in range(start, end):
compute_link_velocity(
i,
joint_type,
joint_parent,
joint_qd_start,
joint_qd,
joint_axis,
body_I_m,
body_X_sc,
body_X_sm,
joint_X_pj,
gravity,
joint_S_s,
body_I_s,
body_v_s,
body_f_s,
body_a_s)
@df.kernel
def eval_rigid_tau(articulation_start: df.tensor(int),
joint_type: df.tensor(int),
joint_parent: df.tensor(int),
joint_q_start: df.tensor(int),
joint_qd_start: df.tensor(int),
joint_q: df.tensor(float),
joint_qd: df.tensor(float),
joint_act: df.tensor(float),
joint_target: df.tensor(float),
joint_target_ke: df.tensor(float),
joint_target_kd: df.tensor(float),
joint_limit_lower: df.tensor(float),
joint_limit_upper: df.tensor(float),
joint_limit_ke: df.tensor(float),
joint_limit_kd: df.tensor(float),
joint_axis: df.tensor(df.float3),
joint_S_s: df.tensor(df.spatial_vector),
body_fb_s: df.tensor(df.spatial_vector),
# outputs
body_ft_s: df.tensor(df.spatial_vector),
tau: df.tensor(float)):
# one thread per-articulation
index = tid()
start = df.load(articulation_start, index)
end = df.load(articulation_start, index+1)
count = end-start
# compute joint forces
for i in range(0, count):
compute_link_tau(
i,
end,
joint_type,
joint_parent,
joint_q_start,
joint_qd_start,
joint_q,
joint_qd,
joint_act,
joint_target,
joint_target_ke,
joint_target_kd,
joint_limit_lower,
joint_limit_upper,
joint_limit_ke,
joint_limit_kd,
joint_S_s,
body_fb_s,
body_ft_s,
tau)
@df.kernel
def eval_rigid_jacobian(
articulation_start: df.tensor(int),
articulation_J_start: df.tensor(int),
joint_parent: df.tensor(int),
joint_qd_start: df.tensor(int),
joint_S_s: df.tensor(spatial_vector),
# outputs
J: df.tensor(float)):
# one thread per-articulation
index = tid()
joint_start = df.load(articulation_start, index)
joint_end = df.load(articulation_start, index+1)
joint_count = joint_end-joint_start
J_offset = df.load(articulation_J_start, index)
# in spatial.h
spatial_jacobian(joint_S_s, joint_parent, joint_qd_start, joint_start, joint_count, J_offset, J)
# @df.kernel
# def eval_rigid_jacobian(
# articulation_start: df.tensor(int),
# articulation_J_start: df.tensor(int),
# joint_parent: df.tensor(int),
# joint_qd_start: df.tensor(int),
# joint_S_s: df.tensor(spatial_vector),
# # outputs
# J: df.tensor(float)):
# # one thread per-articulation
# index = tid()
# joint_start = df.load(articulation_start, index)
# joint_end = df.load(articulation_start, index+1)
# joint_count = joint_end-joint_start
# dof_start = df.load(joint_qd_start, joint_start)
# dof_end = df.load(joint_qd_start, joint_end)
# dof_count = dof_end-dof_start
# #(const spatial_vector* S, const int* joint_parents, const int* joint_qd_start, int num_links, int num_dofs, float* J)
# spatial_jacobian(joint_S_s, joint_parent, joint_qd_start, joint_count, dof_count, J)
@df.kernel
def eval_rigid_mass(
articulation_start: df.tensor(int),
articulation_M_start: df.tensor(int),
body_I_s: df.tensor(spatial_matrix),
# outputs
M: df.tensor(float)):
# one thread per-articulation
index = tid()
joint_start = df.load(articulation_start, index)
joint_end = df.load(articulation_start, index+1)
joint_count = joint_end-joint_start
M_offset = df.load(articulation_M_start, index)
# in spatial.h
spatial_mass(body_I_s, joint_start, joint_count, M_offset, M)
@df.kernel
def eval_dense_gemm(m: int, n: int, p: int, t1: int, t2: int, A: df.tensor(float), B: df.tensor(float), C: df.tensor(float)):
dense_gemm(m, n, p, t1, t2, A, B, C)
@df.kernel
def eval_dense_gemm_batched(m: df.tensor(int), n: df.tensor(int), p: df.tensor(int), t1: int, t2: int, A_start: df.tensor(int), B_start: df.tensor(int), C_start: df.tensor(int), A: df.tensor(float), B: df.tensor(float), C: df.tensor(float)):
dense_gemm_batched(m, n, p, t1, t2, A_start, B_start, C_start, A, B, C)
@df.kernel
def eval_dense_cholesky(n: int, A: df.tensor(float), regularization: df.tensor(float), L: df.tensor(float)):
dense_chol(n, A, regularization, L)
@df.kernel
def eval_dense_cholesky_batched(A_start: df.tensor(int), A_dim: df.tensor(int), A: df.tensor(float), regularization: df.tensor(float), L: df.tensor(float)):
dense_chol_batched(A_start, A_dim, A, regularization, L)
@df.kernel
def eval_dense_subs(n: int, L: df.tensor(float), b: df.tensor(float), x: df.tensor(float)):
dense_subs(n, L, b, x)
# helper that propagates gradients back to A, treating L as a constant / temporary variable
# allows us to reuse the Cholesky decomposition from the forward pass
@df.kernel
def eval_dense_solve(n: int, A: df.tensor(float), L: df.tensor(float), b: df.tensor(float), tmp: df.tensor(float), x: df.tensor(float)):
dense_solve(n, A, L, b, tmp, x)
# helper that propagates gradients back to A, treating L as a constant / temporary variable
# allows us to reuse the Cholesky decomposition from the forward pass
@df.kernel
def eval_dense_solve_batched(b_start: df.tensor(int), A_start: df.tensor(int), A_dim: df.tensor(int), A: df.tensor(float), L: df.tensor(float), b: df.tensor(float), tmp: df.tensor(float), x: df.tensor(float)):
dense_solve_batched(b_start, A_start, A_dim, A, L, b, tmp, x)
@df.kernel
def eval_rigid_integrate(
joint_type: df.tensor(int),
joint_q_start: df.tensor(int),
joint_qd_start: df.tensor(int),
joint_q: df.tensor(float),
joint_qd: df.tensor(float),
joint_qdd: df.tensor(float),
dt: float,
# outputs
joint_q_new: df.tensor(float),
joint_qd_new: df.tensor(float)):
# one thread per-articulation
index = tid()
type = df.load(joint_type, index)
coord_start = df.load(joint_q_start, index)
dof_start = df.load(joint_qd_start, index)
jcalc_integrate(
type,
joint_q,
joint_qd,
joint_qdd,
coord_start,
dof_start,
dt,
joint_q_new,
joint_qd_new)
g_state_out = None
# define PyTorch autograd op to wrap simulate func
class SimulateFunc(torch.autograd.Function):
"""PyTorch autograd function representing a simulation stpe
Note:
This node will be inserted into the computation graph whenever
`forward()` is called on an integrator object. It should not be called
directly by the user.
"""
@staticmethod
def forward(ctx, integrator, model, state_in, dt, substeps, mass_matrix_freq, *tensors):
# record launches
ctx.tape = df.Tape()
ctx.inputs = tensors
#ctx.outputs = df.to_weak_list(state_out.flatten())
actuation = state_in.joint_act
# simulate
for i in range(substeps):
# ensure actuation is set on all substeps
state_in.joint_act = actuation
state_out = model.state()
integrator._simulate(ctx.tape, model, state_in, state_out, dt/float(substeps), update_mass_matrix=((i%mass_matrix_freq)==0))
# swap states
state_in = state_out
# use global to pass state object back to caller
global g_state_out
g_state_out = state_out
ctx.outputs = df.to_weak_list(state_out.flatten())
return tuple(state_out.flatten())
@staticmethod
def backward(ctx, *grads):
# ensure grads are contiguous in memory
adj_outputs = df.make_contiguous(grads)
# register outputs with tape
outputs = df.to_strong_list(ctx.outputs)
for o in range(len(outputs)):
ctx.tape.adjoints[outputs[o]] = adj_outputs[o]
# replay launches backwards
ctx.tape.replay()
# find adjoint of inputs
adj_inputs = []
for i in ctx.inputs:
if i in ctx.tape.adjoints:
adj_inputs.append(ctx.tape.adjoints[i])
else:
adj_inputs.append(None)
# free the tape
ctx.tape.reset()
# filter grads to replace empty tensors / no grad / constant params with None
return (None, None, None, None, None, None, *df.filter_grads(adj_inputs))
class SemiImplicitIntegrator:
"""A semi-implicit integrator using symplectic Euler
After constructing `Model` and `State` objects this time-integrator
may be used to advance the simulation state forward in time.
Semi-implicit time integration is a variational integrator that
preserves energy, however it not unconditionally stable, and requires a time-step
small enough to support the required stiffness and damping forces.
See: https://en.wikipedia.org/wiki/Semi-implicit_Euler_method
Example:
>>> integrator = df.SemiImplicitIntegrator()
>>>
>>> # simulation loop
>>> for i in range(100):
>>> state = integrator.forward(model, state, dt)
"""
def __init__(self):
pass
def forward(self, model: Model, state_in: State, dt: float, substeps: int, mass_matrix_freq: int) -> State:
"""Performs a single integration step forward in time
This method inserts a node into the PyTorch computational graph with
references to all model and state tensors such that gradients
can be propagrated back through the simulation step.
Args:
model: Simulation model
state: Simulation state at the start the time-step
dt: The simulation time-step (usually in seconds)
Returns:
The state of the system at the end of the time-step
"""
if dflex.config.no_grad:
# if no gradient required then do inplace update
for i in range(substeps):
self._simulate(df.Tape(), model, state_in, state_in, dt/float(substeps), update_mass_matrix=(i%mass_matrix_freq)==0)
return state_in
else:
# get list of inputs and outputs for PyTorch tensor tracking
inputs = [*state_in.flatten(), *model.flatten()]
# run sim as a PyTorch op
tensors = SimulateFunc.apply(self, model, state_in, dt, substeps, mass_matrix_freq, *inputs)
global g_state_out
state_out = g_state_out
g_state_out = None # null reference
return state_out
def _simulate(self, tape, model, state_in, state_out, dt, update_mass_matrix=True):
with dflex.util.ScopedTimer("simulate", False):
# alloc particle force buffer
if (model.particle_count):
state_out.particle_f.zero_()
if (model.link_count):
state_out.body_ft_s = torch.zeros((model.link_count, 6), dtype=torch.float32, device=model.adapter, requires_grad=True)
state_out.body_f_ext_s = torch.zeros((model.link_count, 6), dtype=torch.float32, device=model.adapter, requires_grad=True)
# damped springs
if (model.spring_count):
tape.launch(func=eval_springs,
dim=model.spring_count,
inputs=[state_in.particle_q, state_in.particle_qd, model.spring_indices, model.spring_rest_length, model.spring_stiffness, model.spring_damping],
outputs=[state_out.particle_f],
adapter=model.adapter)
# triangle elastic and lift/drag forces
if (model.tri_count and model.tri_ke > 0.0):
tape.launch(func=eval_triangles,
dim=model.tri_count,
inputs=[
state_in.particle_q,
state_in.particle_qd,
model.tri_indices,
model.tri_poses,
model.tri_activations,
model.tri_ke,
model.tri_ka,
model.tri_kd,
model.tri_drag,
model.tri_lift
],
outputs=[state_out.particle_f],
adapter=model.adapter)
# triangle/triangle contacts
if (model.enable_tri_collisions and model.tri_count and model.tri_ke > 0.0):
tape.launch(func=eval_triangles_contact,
dim=model.tri_count * model.particle_count,
inputs=[
model.particle_count,
state_in.particle_q,
state_in.particle_qd,
model.tri_indices,
model.tri_poses,
model.tri_activations,
model.tri_ke,
model.tri_ka,
model.tri_kd,
model.tri_drag,
model.tri_lift
],
outputs=[state_out.particle_f],
adapter=model.adapter)
# triangle bending
if (model.edge_count):
tape.launch(func=eval_bending,
dim=model.edge_count,
inputs=[state_in.particle_q, state_in.particle_qd, model.edge_indices, model.edge_rest_angle, model.edge_ke, model.edge_kd],
outputs=[state_out.particle_f],
adapter=model.adapter)
# particle ground contact
if (model.ground and model.particle_count):
tape.launch(func=eval_contacts,
dim=model.particle_count,
inputs=[state_in.particle_q, state_in.particle_qd, model.contact_ke, model.contact_kd, model.contact_kf, model.contact_mu],
outputs=[state_out.particle_f],
adapter=model.adapter)
# tetrahedral FEM
if (model.tet_count):
tape.launch(func=eval_tetrahedra,
dim=model.tet_count,
inputs=[state_in.particle_q, state_in.particle_qd, model.tet_indices, model.tet_poses, model.tet_activations, model.tet_materials],
outputs=[state_out.particle_f],
adapter=model.adapter)
#----------------------------
# articulations
if (model.link_count):
# evaluate body transforms
tape.launch(
func=eval_rigid_fk,
dim=model.articulation_count,
inputs=[
model.articulation_joint_start,
model.joint_type,
model.joint_parent,
model.joint_q_start,
model.joint_qd_start,
state_in.joint_q,
model.joint_X_pj,
model.joint_X_cm,
model.joint_axis
],
outputs=[
state_out.body_X_sc,
state_out.body_X_sm
],
adapter=model.adapter,
preserve_output=True)
# evaluate joint inertias, motion vectors, and forces
tape.launch(
func=eval_rigid_id,
dim=model.articulation_count,
inputs=[
model.articulation_joint_start,
model.joint_type,
model.joint_parent,
model.joint_q_start,
model.joint_qd_start,
state_in.joint_q,
state_in.joint_qd,
model.joint_axis,
model.joint_target_ke,
model.joint_target_kd,
model.body_I_m,
state_out.body_X_sc,
state_out.body_X_sm,
model.joint_X_pj,
model.gravity
],
outputs=[
state_out.joint_S_s,
state_out.body_I_s,
state_out.body_v_s,
state_out.body_f_s,
state_out.body_a_s,
],
adapter=model.adapter,
preserve_output=True)
if (model.ground and model.contact_count > 0):
# evaluate contact forces
tape.launch(
func=eval_rigid_contacts_art,
dim=model.contact_count,
inputs=[
state_out.body_X_sc,
state_out.body_v_s,
model.contact_body0,
model.contact_point0,
model.contact_dist,
model.contact_material,
model.shape_materials
],
outputs=[
state_out.body_f_s
],
adapter=model.adapter,
preserve_output=True)
# particle shape contact
if (model.particle_count):
# tape.launch(func=eval_soft_contacts,
# dim=model.particle_count*model.shape_count,
# inputs=[state_in.particle_q, state_in.particle_qd, model.contact_ke, model.contact_kd, model.contact_kf, model.contact_mu],
# outputs=[state_out.particle_f],
# adapter=model.adapter)
tape.launch(func=eval_soft_contacts,
dim=model.particle_count*model.shape_count,
inputs=[
model.particle_count,
state_in.particle_q,
state_in.particle_qd,
state_in.body_X_sc,
state_in.body_v_s,
model.shape_transform,
model.shape_body,
model.shape_geo_type,
torch.Tensor(),
model.shape_geo_scale,
model.shape_materials,
model.contact_ke,
model.contact_kd,
model.contact_kf,
model.contact_mu],
# outputs
outputs=[
state_out.particle_f,
state_out.body_f_s],
adapter=model.adapter)
# evaluate muscle actuation
tape.launch(
func=eval_muscles,
dim=model.muscle_count,
inputs=[
state_out.body_X_sc,
state_out.body_v_s,
model.muscle_start,
model.muscle_params,
model.muscle_links,
model.muscle_points,
model.muscle_activation
],
outputs=[
state_out.body_f_s
],
adapter=model.adapter,
preserve_output=True)
# evaluate joint torques
tape.launch(
func=eval_rigid_tau,
dim=model.articulation_count,
inputs=[
model.articulation_joint_start,
model.joint_type,
model.joint_parent,
model.joint_q_start,
model.joint_qd_start,
state_in.joint_q,
state_in.joint_qd,
state_in.joint_act,
model.joint_target,
model.joint_target_ke,
model.joint_target_kd,
model.joint_limit_lower,
model.joint_limit_upper,
model.joint_limit_ke,
model.joint_limit_kd,
model.joint_axis,
state_out.joint_S_s,
state_out.body_f_s
],
outputs=[
state_out.body_ft_s,
state_out.joint_tau
],
adapter=model.adapter,
preserve_output=True)
if (update_mass_matrix):
model.alloc_mass_matrix()
# build J
tape.launch(
func=eval_rigid_jacobian,
dim=model.articulation_count,
inputs=[
# inputs
model.articulation_joint_start,
model.articulation_J_start,
model.joint_parent,
model.joint_qd_start,
state_out.joint_S_s
],
outputs=[
model.J
],
adapter=model.adapter,
preserve_output=True)
# build M
tape.launch(
func=eval_rigid_mass,
dim=model.articulation_count,
inputs=[
# inputs
model.articulation_joint_start,
model.articulation_M_start,
state_out.body_I_s
],
outputs=[
model.M
],
adapter=model.adapter,
preserve_output=True)
# form P = M*J
df.matmul_batched(
tape,
model.articulation_count,
model.articulation_M_rows,
model.articulation_J_cols,
model.articulation_J_rows,
0,
0,
model.articulation_M_start,
model.articulation_J_start,
model.articulation_J_start, # P start is the same as J start since it has the same dims as J
model.M,
model.J,
model.P,
adapter=model.adapter)
# form H = J^T*P
df.matmul_batched(
tape,
model.articulation_count,
model.articulation_J_cols,
model.articulation_J_cols,
model.articulation_J_rows, # P rows is the same as J rows
1,
0,
model.articulation_J_start,
model.articulation_J_start, # P start is the same as J start since it has the same dims as J
model.articulation_H_start,
model.J,
model.P,
model.H,
adapter=model.adapter)
# compute decomposition
tape.launch(
func=eval_dense_cholesky_batched,
dim=model.articulation_count,
inputs=[
model.articulation_H_start,
model.articulation_H_rows,
model.H,
model.joint_armature
],
outputs=[
model.L
],
adapter=model.adapter,
skip_check_grad=True)
tmp = torch.zeros_like(state_out.joint_tau)
# solve for qdd
tape.launch(
func=eval_dense_solve_batched,
dim=model.articulation_count,
inputs=[
model.articulation_dof_start,
model.articulation_H_start,
model.articulation_H_rows,
model.H,
model.L,
state_out.joint_tau,
tmp
],
outputs=[
state_out.joint_qdd
],
adapter=model.adapter,
skip_check_grad=True)
# integrate joint dofs -> joint coords
tape.launch(
func=eval_rigid_integrate,
dim=model.link_count,
inputs=[
model.joint_type,
model.joint_q_start,
model.joint_qd_start,
state_in.joint_q,
state_in.joint_qd,
state_out.joint_qdd,
dt
],
outputs=[
state_out.joint_q,
state_out.joint_qd
],
adapter=model.adapter)
#----------------------------
# integrate particles
if (model.particle_count):
tape.launch(func=integrate_particles,
dim=model.particle_count,
inputs=[state_in.particle_q, state_in.particle_qd, state_out.particle_f, model.particle_inv_mass, model.gravity, dt],
outputs=[state_out.particle_q, state_out.particle_qd],
adapter=model.adapter)
return state_out
@df.kernel
def solve_springs(x: df.tensor(df.float3),
v: df.tensor(df.float3),
invmass: df.tensor(float),
spring_indices: df.tensor(int),
spring_rest_lengths: df.tensor(float),
spring_stiffness: df.tensor(float),
spring_damping: df.tensor(float),
dt: float,
delta: df.tensor(df.float3)):
tid = df.tid()
i = df.load(spring_indices, tid * 2 + 0)
j = df.load(spring_indices, tid * 2 + 1)
ke = df.load(spring_stiffness, tid)
kd = df.load(spring_damping, tid)
rest = df.load(spring_rest_lengths, tid)
xi = df.load(x, i)
xj = df.load(x, j)
vi = df.load(v, i)
vj = df.load(v, j)
xij = xi - xj
vij = vi - vj
l = length(xij)
l_inv = 1.0 / l
# normalized spring direction
dir = xij * l_inv
c = l - rest
dcdt = dot(dir, vij)
# damping based on relative velocity.
#fs = dir * (ke * c + kd * dcdt)
wi = df.load(invmass, i)
wj = df.load(invmass, j)
denom = wi + wj
alpha = 1.0/(ke*dt*dt)
multiplier = c / (denom)# + alpha)
xd = dir*multiplier
df.atomic_sub(delta, i, xd*wi)
df.atomic_add(delta, j, xd*wj)
@df.kernel
def solve_tetrahedra(x: df.tensor(df.float3),
v: df.tensor(df.float3),
inv_mass: df.tensor(float),
indices: df.tensor(int),
pose: df.tensor(df.mat33),
activation: df.tensor(float),
materials: df.tensor(float),
dt: float,
relaxation: float,
delta: df.tensor(df.float3)):
tid = df.tid()
i = df.load(indices, tid * 4 + 0)
j = df.load(indices, tid * 4 + 1)
k = df.load(indices, tid * 4 + 2)
l = df.load(indices, tid * 4 + 3)
act = df.load(activation, tid)
k_mu = df.load(materials, tid * 3 + 0)
k_lambda = df.load(materials, tid * 3 + 1)
k_damp = df.load(materials, tid * 3 + 2)
x0 = df.load(x, i)
x1 = df.load(x, j)
x2 = df.load(x, k)
x3 = df.load(x, l)
v0 = df.load(v, i)
v1 = df.load(v, j)
v2 = df.load(v, k)
v3 = df.load(v, l)
w0 = df.load(inv_mass, i)
w1 = df.load(inv_mass, j)
w2 = df.load(inv_mass, k)
w3 = df.load(inv_mass, l)
x10 = x1 - x0
x20 = x2 - x0
x30 = x3 - x0
v10 = v1 - v0
v20 = v2 - v0
v30 = v3 - v0
Ds = df.mat33(x10, x20, x30)
Dm = df.load(pose, tid)
inv_rest_volume = df.determinant(Dm) * 6.0
rest_volume = 1.0 / inv_rest_volume
# F = Xs*Xm^-1
F = Ds * Dm
f1 = df.float3(F[0, 0], F[1, 0], F[2, 0])
f2 = df.float3(F[0, 1], F[1, 1], F[2, 1])
f3 = df.float3(F[0, 2], F[1, 2], F[2, 2])
# C_sqrt
tr = dot(f1, f1) + dot(f2, f2) + dot(f3, f3)
r_s = df.sqrt(abs(tr - 3.0))
C = r_s
if (r_s == 0.0):
return
if (tr < 3.0):
r_s = 0.0 - r_s
dCdx = F*df.transpose(Dm)*(1.0/r_s)
alpha = 1.0 + k_mu / k_lambda
# C_Neo
# r_s = df.sqrt(dot(f1, f1) + dot(f2, f2) + dot(f3, f3))
# r_s_inv = 1.0/r_s
# C = r_s
# dCdx = F*df.transpose(Dm)*r_s_inv
# alpha = 1.0 + k_mu / k_lambda
# C_Spherical
# r_s = df.sqrt(dot(f1, f1) + dot(f2, f2) + dot(f3, f3))
# r_s_inv = 1.0/r_s
# C = r_s - df.sqrt(3.0)
# dCdx = F*df.transpose(Dm)*r_s_inv
# alpha = 1.0
# C_D
#r_s = df.sqrt(dot(f1, f1) + dot(f2, f2) + dot(f3, f3))
#C = r_s*r_s - 3.0
#dCdx = F*df.transpose(Dm)*2.0
#alpha = 1.0
grad1 = float3(dCdx[0,0], dCdx[1,0], dCdx[2,0])
grad2 = float3(dCdx[0,1], dCdx[1,1], dCdx[2,1])
grad3 = float3(dCdx[0,2], dCdx[1,2], dCdx[2,2])
grad0 = (grad1 + grad2 + grad3)*(0.0 - 1.0)
denom = dot(grad0,grad0)*w0 + dot(grad1,grad1)*w1 + dot(grad2,grad2)*w2 + dot(grad3,grad3)*w3
multiplier = C/(denom + 1.0/(k_mu*dt*dt*rest_volume))
delta0 = grad0*multiplier
delta1 = grad1*multiplier
delta2 = grad2*multiplier
delta3 = grad3*multiplier
# hydrostatic part
J = df.determinant(F)
C_vol = J - alpha
# dCdx = df.mat33(cross(f2, f3), cross(f3, f1), cross(f1, f2))*df.transpose(Dm)
# grad1 = float3(dCdx[0,0], dCdx[1,0], dCdx[2,0])
# grad2 = float3(dCdx[0,1], dCdx[1,1], dCdx[2,1])
# grad3 = float3(dCdx[0,2], dCdx[1,2], dCdx[2,2])
# grad0 = (grad1 + grad2 + grad3)*(0.0 - 1.0)
s = inv_rest_volume / 6.0
grad1 = df.cross(x20, x30) * s
grad2 = df.cross(x30, x10) * s
grad3 = df.cross(x10, x20) * s
grad0 = (grad1 + grad2 + grad3)*(0.0 - 1.0)
denom = dot(grad0, grad0)*w0 + dot(grad1, grad1)*w1 + dot(grad2, grad2)*w2 + dot(grad3, grad3)*w3
multiplier = C_vol/(denom + 1.0/(k_lambda*dt*dt*rest_volume))
delta0 = delta0 + grad0 * multiplier
delta1 = delta1 + grad1 * multiplier
delta2 = delta2 + grad2 * multiplier
delta3 = delta3 + grad3 * multiplier
# apply forces
df.atomic_sub(delta, i, delta0*w0*relaxation)
df.atomic_sub(delta, j, delta1*w1*relaxation)
df.atomic_sub(delta, k, delta2*w2*relaxation)
df.atomic_sub(delta, l, delta3*w3*relaxation)
@df.kernel
def solve_contacts(
x: df.tensor(df.float3),
v: df.tensor(df.float3),
inv_mass: df.tensor(float),
mu: float,
dt: float,
delta: df.tensor(df.float3)):
tid = df.tid()
x0 = df.load(x, tid)
v0 = df.load(v, tid)
w0 = df.load(inv_mass, tid)
n = df.float3(0.0, 1.0, 0.0)
c = df.dot(n, x0) - 0.01
if (c > 0.0):
return
# normal
lambda_n = c
delta_n = n*lambda_n
# friction
vn = df.dot(n, v0)
vt = v0 - n * vn
lambda_f = df.max(mu*lambda_n, 0.0 - df.length(vt)*dt)
delta_f = df.normalize(vt)*lambda_f
df.atomic_add(delta, tid, delta_f - delta_n)
@df.kernel
def apply_deltas(x_orig: df.tensor(df.float3),
v_orig: df.tensor(df.float3),
x_pred: df.tensor(df.float3),
delta: df.tensor(df.float3),
dt: float,
x_out: df.tensor(df.float3),
v_out: df.tensor(df.float3)):
tid = df.tid()
x0 = df.load(x_orig, tid)
xp = df.load(x_pred, tid)
# constraint deltas
d = df.load(delta, tid)
x_new = xp + d
v_new = (x_new - x0)/dt
df.store(x_out, tid, x_new)
df.store(v_out, tid, v_new)
class XPBDIntegrator:
"""A implicit integrator using XPBD
After constructing `Model` and `State` objects this time-integrator
may be used to advance the simulation state forward in time.
Semi-implicit time integration is a variational integrator that
preserves energy, however it not unconditionally stable, and requires a time-step
small enough to support the required stiffness and damping forces.
See: https://en.wikipedia.org/wiki/Semi-implicit_Euler_method
Example:
>>> integrator = df.SemiImplicitIntegrator()
>>>
>>> # simulation loop
>>> for i in range(100):
>>> state = integrator.forward(model, state, dt)
"""
def __init__(self):
pass
def forward(self, model: Model, state_in: State, dt: float) -> State:
"""Performs a single integration step forward in time
This method inserts a node into the PyTorch computational graph with
references to all model and state tensors such that gradients
can be propagrated back through the simulation step.
Args:
model: Simulation model
state: Simulation state at the start the time-step
dt: The simulation time-step (usually in seconds)
Returns:
The state of the system at the end of the time-step
"""
if dflex.config.no_grad:
# if no gradient required then do inplace update
self._simulate(df.Tape(), model, state_in, state_in, dt)
return state_in
else:
# get list of inputs and outputs for PyTorch tensor tracking
inputs = [*state_in.flatten(), *model.flatten()]
# allocate new output
state_out = model.state()
# run sim as a PyTorch op
tensors = SimulateFunc.apply(self, model, state_in, state_out, dt, *inputs)
return state_out
def _simulate(self, tape, model, state_in, state_out, dt):
with dflex.util.ScopedTimer("simulate", False):
# alloc particle force buffer
if (model.particle_count):
state_out.particle_f.zero_()
q_pred = torch.zeros_like(state_in.particle_q)
qd_pred = torch.zeros_like(state_in.particle_qd)
#----------------------------
# integrate particles
if (model.particle_count):
tape.launch(func=integrate_particles,
dim=model.particle_count,
inputs=[state_in.particle_q, state_in.particle_qd, state_out.particle_f, model.particle_inv_mass, model.gravity, dt],
outputs=[q_pred, qd_pred],
adapter=model.adapter)
# contacts
if (model.particle_count and model.ground):
tape.launch(func=solve_contacts,
dim=model.particle_count,
inputs=[q_pred, qd_pred, model.particle_inv_mass, model.contact_mu, dt],
outputs=[state_out.particle_f],
adapter=model.adapter)
# damped springs
if (model.spring_count):
tape.launch(func=solve_springs,
dim=model.spring_count,
inputs=[q_pred, qd_pred, model.particle_inv_mass, model.spring_indices, model.spring_rest_length, model.spring_stiffness, model.spring_damping, dt],
outputs=[state_out.particle_f],
adapter=model.adapter)
# tetrahedral FEM
if (model.tet_count):
tape.launch(func=solve_tetrahedra,
dim=model.tet_count,
inputs=[q_pred, qd_pred, model.particle_inv_mass, model.tet_indices, model.tet_poses, model.tet_activations, model.tet_materials, dt, model.relaxation],
outputs=[state_out.particle_f],
adapter=model.adapter)
# apply updates
tape.launch(func=apply_deltas,
dim=model.particle_count,
inputs=[state_in.particle_q,
state_in.particle_qd,
q_pred,
state_out.particle_f,
dt],
outputs=[state_out.particle_q,
state_out.particle_qd],
adapter=model.adapter)
return state_out
| 97,130 | Python | 31.333888 | 241 | 0.51253 |
vstrozzi/FRL-SHAC-Extension/dflex/build/lib/dflex/matnn.h | #pragma once
CUDA_CALLABLE inline int dense_index(int stride, int i, int j)
{
return i*stride + j;
}
template <bool transpose>
CUDA_CALLABLE inline int dense_index(int rows, int cols, int i, int j)
{
if (transpose)
return j*rows + i;
else
return i*cols + j;
}
#ifdef CPU
const int kNumThreadsPerBlock = 1;
template <bool t1, bool t2, bool add>
CUDA_CALLABLE inline void dense_gemm_impl(int m, int n, int p, const float* __restrict__ A, const float* __restrict__ B, float* __restrict__ C)
{
for (int i=0; i < m; i++)
{
for (int j=0; j < n; ++j)
{
float sum = 0.0f;
for (int k=0; k < p; ++k)
{
sum += A[dense_index<t1>(m, p, i, k)]*B[dense_index<t2>(p, n, k, j)];
}
if (add)
C[i*n + j] += sum;
else
C[i*n + j] = sum;
}
}
}
#else
const int kNumThreadsPerBlock = 256;
template <bool t1, bool t2, bool add>
CUDA_CALLABLE inline void dense_gemm_impl(int m, int n, int p, const float* __restrict__ A, const float* __restrict__ B, float* __restrict__ C)
{
// each thread in the block calculates an output (or more if output dim > block dim)
for (int e=threadIdx.x; e < m*n; e += blockDim.x)
{
const int i=e/n;
const int j=e%n;
float sum = 0.0f;
for (int k=0; k < p; ++k)
{
sum += A[dense_index<t1>(m, p, i, k)]*B[dense_index<t2>(p, n, k, j)];
}
if (add)
C[i*n + j] += sum;
else
C[i*n + j] = sum;
}
}
#endif
template <bool add=false>
CUDA_CALLABLE inline void dense_gemm(int m, int n, int p, int t1, int t2, const float* __restrict__ A, const float* __restrict__ B, float* __restrict__ C)
{
if (t1 == 0 && t2 == 0)
dense_gemm_impl<false, false, add>(m, n, p, A, B, C);
else if (t1 == 1 && t2 == 0)
dense_gemm_impl<true, false, add>(m, n, p, A, B, C);
else if (t1 == 0 && t2 == 1)
dense_gemm_impl<false, true, add>(m, n, p, A, B, C);
else if (t1 == 1 && t2 == 1)
dense_gemm_impl<true, true, add>(m, n, p, A, B, C);
}
template <bool add=false>
CUDA_CALLABLE inline void dense_gemm_batched(
const int* __restrict__ m, const int* __restrict__ n, const int* __restrict__ p, int t1, int t2,
const int* __restrict__ A_start, const int* __restrict__ B_start, const int* __restrict__ C_start,
const float* __restrict__ A, const float* __restrict__ B, float* __restrict__ C)
{
// on the CPU each thread computes the whole matrix multiply
// on the GPU each block computes the multiply with one output per-thread
const int batch = tid()/kNumThreadsPerBlock;
dense_gemm<add>(m[batch], n[batch], p[batch], t1, t2, A+A_start[batch], B+B_start[batch], C+C_start[batch]);
}
// computes c = b^T*a*b, with a and b being stored in row-major layout
CUDA_CALLABLE inline void dense_quadratic()
{
}
// CUDA_CALLABLE inline void dense_chol(int n, const float* A, float* L)
// {
// // for each column
// for (int j=0; j < n; ++j)
// {
// for (int i=j; i < n; ++i)
// {
// L[dense_index(n, i, j)] = A[dense_index(n, i, j)];
// }
// for (int k = 0; k < j; ++k)
// {
// const float p = L[dense_index(n, j, k)];
// for (int i=j; i < n; ++i)
// {
// L[dense_index(n, i, j)] -= p*L[dense_index(n, i, k)];
// }
// }
// // scale
// const float d = L[dense_index(n, j, j)];
// const float s = 1.0f/sqrtf(d);
// for (int i=j; i < n; ++i)
// {
// L[dense_index(n, i, j)] *=s;
// }
// }
// }
void CUDA_CALLABLE inline dense_chol(int n, const float* __restrict__ A, const float* __restrict__ regularization, float* __restrict__ L)
{
for (int j=0; j < n; ++j)
{
float s = A[dense_index(n, j, j)] + regularization[j];
for (int k=0; k < j; ++k)
{
float r = L[dense_index(n, j, k)];
s -= r*r;
}
s = sqrtf(s);
const float invS = 1.0f/s;
L[dense_index(n, j, j)] = s;
for (int i=j+1; i < n; ++i)
{
s = A[dense_index(n, i, j)];
for (int k=0; k < j; ++k)
{
s -= L[dense_index(n, i, k)]*L[dense_index(n, j, k)];
}
L[dense_index(n, i, j)] = s*invS;
}
}
}
void CUDA_CALLABLE inline dense_chol_batched(const int* __restrict__ A_start, const int* __restrict__ A_dim, const float* __restrict__ A, const float* __restrict__ regularization, float* __restrict__ L)
{
const int batch = tid();
const int n = A_dim[batch];
const int offset = A_start[batch];
dense_chol(n, A + offset, regularization + n*batch, L + offset);
}
// Solves (L*L^T)x = b given the Cholesky factor L
CUDA_CALLABLE inline void dense_subs(int n, const float* __restrict__ L, const float* __restrict__ b, float* __restrict__ x)
{
// forward substitution
for (int i=0; i < n; ++i)
{
float s = b[i];
for (int j=0; j < i; ++j)
{
s -= L[dense_index(n, i, j)]*x[j];
}
x[i] = s/L[dense_index(n, i, i)];
}
// backward substitution
for (int i=n-1; i >= 0; --i)
{
float s = x[i];
for (int j=i+1; j < n; ++j)
{
s -= L[dense_index(n, j, i)]*x[j];
}
x[i] = s/L[dense_index(n, i, i)];
}
}
CUDA_CALLABLE inline void dense_solve(int n, const float* __restrict__ A, const float* __restrict__ L, const float* __restrict__ b, float* __restrict__ tmp, float* __restrict__ x)
{
dense_subs(n, L, b, x);
}
CUDA_CALLABLE inline void dense_solve_batched(
const int* __restrict__ b_start, const int* A_start, const int* A_dim,
const float* __restrict__ A, const float* __restrict__ L,
const float* __restrict__ b, float* __restrict__ tmp, float* __restrict__ x)
{
const int batch = tid();
dense_solve(A_dim[batch], A + A_start[batch], L + A_start[batch], b + b_start[batch], NULL, x + b_start[batch]);
}
CUDA_CALLABLE inline void print_matrix(const char* name, int m, int n, const float* data)
{
printf("%s = [", name);
for (int i=0; i < m; ++i)
{
for (int j=0; j < n; ++j)
{
printf("%f ", data[dense_index(n, i, j)]);
}
printf(";\n");
}
printf("]\n");
}
// adjoint methods
CUDA_CALLABLE inline void adj_dense_gemm(
int m, int n, int p, int t1, int t2, const float* A, const float* B, float* C,
int adj_m, int adj_n, int adj_p, int adj_t1, int adj_t2, float* adj_A, float* adj_B, const float* adj_C)
{
// print_matrix("A", m, p, A);
// print_matrix("B", p, n, B);
// printf("t1: %d t2: %d\n", t1, t2);
if (t1)
{
dense_gemm<true>(p, m, n, 0, 1, B, adj_C, adj_A);
dense_gemm<true>(p, n, m, int(!t1), 0, A, adj_C, adj_B);
}
else
{
dense_gemm<true>(m, p, n, 0, int(!t2), adj_C, B, adj_A);
dense_gemm<true>(p, n, m, int(!t1), 0, A, adj_C, adj_B);
}
}
CUDA_CALLABLE inline void adj_dense_gemm_batched(
const int* __restrict__ m, const int* __restrict__ n, const int* __restrict__ p, int t1, int t2,
const int* __restrict__ A_start, const int* __restrict__ B_start, const int* __restrict__ C_start,
const float* __restrict__ A, const float* __restrict__ B, float* __restrict__ C,
// adj
int* __restrict__ adj_m, int* __restrict__ adj_n, int* __restrict__ adj_p, int adj_t1, int adj_t2,
int* __restrict__ adj_A_start, int* __restrict__ adj_B_start, int* __restrict__ adj_C_start,
float* __restrict__ adj_A, float* __restrict__ adj_B, const float* __restrict__ adj_C)
{
const int batch = tid()/kNumThreadsPerBlock;
adj_dense_gemm(m[batch], n[batch], p[batch], t1, t2, A+A_start[batch], B+B_start[batch], C+C_start[batch],
0, 0, 0, 0, 0, adj_A+A_start[batch], adj_B+B_start[batch], adj_C+C_start[batch]);
}
CUDA_CALLABLE inline void adj_dense_chol(
int n, const float* A, const float* __restrict__ regularization, float* L,
int adj_n, const float* adj_A, const float* __restrict__ adj_regularization, float* adj_L)
{
// nop, use dense_solve to differentiate through (A^-1)b = x
}
CUDA_CALLABLE inline void adj_dense_chol_batched(
const int* __restrict__ A_start, const int* __restrict__ A_dim, const float* __restrict__ A, const float* __restrict__ regularization, float* __restrict__ L,
const int* __restrict__ adj_A_start, const int* __restrict__ adj_A_dim, const float* __restrict__ adj_A, const float* __restrict__ adj_regularization, float* __restrict__ adj_L)
{
// nop, use dense_solve to differentiate through (A^-1)b = x
}
CUDA_CALLABLE inline void adj_dense_subs(
int n, const float* L, const float* b, float* x,
int adj_n, const float* adj_L, const float* adj_b, float* adj_x)
{
// nop, use dense_solve to differentiate through (A^-1)b = x
}
CUDA_CALLABLE inline void adj_dense_solve(
int n, const float* __restrict__ A, const float* __restrict__ L, const float* __restrict__ b, float* __restrict__ tmp, const float* __restrict__ x,
int adj_n, float* __restrict__ adj_A, float* __restrict__ adj_L, float* __restrict__ adj_b, float* __restrict__ adj_tmp, const float* __restrict__ adj_x)
{
for (int i=0; i < n; ++i)
{
tmp[i] = 0.0f;
}
dense_subs(n, L, adj_x, tmp);
for (int i=0; i < n; ++i)
{
adj_b[i] += tmp[i];
}
//dense_subs(n, L, adj_x, adj_b);
// A* = -adj_b*x^T
for (int i=0; i < n; ++i)
{
for (int j=0; j < n; ++j)
{
adj_A[dense_index(n, i, j)] += -tmp[i]*x[j];
}
}
}
CUDA_CALLABLE inline void adj_dense_solve_batched(
const int* __restrict__ b_start, const int* A_start, const int* A_dim,
const float* __restrict__ A, const float* __restrict__ L,
const float* __restrict__ b, float* __restrict__ tmp, float* __restrict__ x,
// adj
int* __restrict__ adj_b_start, int* __restrict__ adj_A_start, int* __restrict__ adj_A_dim,
float* __restrict__ adj_A, float* __restrict__ adj_L,
float* __restrict__ adj_b, float* __restrict__ adj_tmp, const float* __restrict__ adj_x)
{
const int batch = tid();
adj_dense_solve(A_dim[batch], A + A_start[batch], L + A_start[batch], b + b_start[batch], tmp + b_start[batch], x + b_start[batch],
0, adj_A + A_start[batch], adj_L + A_start[batch], adj_b + b_start[batch], tmp + b_start[batch], adj_x + b_start[batch]);
}
| 10,723 | C | 29.379603 | 202 | 0.531847 |
vstrozzi/FRL-SHAC-Extension/dflex/build/lib/dflex/__init__.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
from dflex.sim import *
from dflex.render import *
from dflex.adjoint import compile
from dflex.util import *
# compiles kernels
kernel_init()
| 569 | Python | 34.624998 | 76 | 0.804921 |
vstrozzi/FRL-SHAC-Extension/dflex/build/lib/dflex/render.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""This optional module contains a built-in renderer for the USD data
format that can be used to visualize time-sampled simulation data.
Users should create a simulation model and integrator and periodically
call :func:`UsdRenderer.update()` to write time-sampled simulation data to the USD stage.
Example:
>>> # construct a new USD stage
>>> stage = Usd.Stage.CreateNew("my_stage.usda")
>>> renderer = df.render.UsdRenderer(model, stage)
>>>
>>> time = 0.0
>>>
>>> for i in range(100):
>>>
>>> # update simulation here
>>> # ....
>>>
>>> # update renderer
>>> stage.update(state, time)
>>> time += dt
>>>
>>> # write stage to file
>>> stage.Save()
Note:
You must have the Pixar USD bindings installed to use this module
please see https://developer.nvidia.com/usd to obtain precompiled
USD binaries and installation instructions.
"""
try:
from pxr import Usd, UsdGeom, Gf, Sdf
except ModuleNotFoundError:
print("No pxr package")
import dflex.sim
import dflex.util
import math
def _usd_add_xform(prim):
prim.ClearXformOpOrder()
t = prim.AddTranslateOp()
r = prim.AddOrientOp()
s = prim.AddScaleOp()
def _usd_set_xform(xform, transform, scale, time):
xform_ops = xform.GetOrderedXformOps()
pos = tuple(transform[0])
rot = tuple(transform[1])
xform_ops[0].Set(Gf.Vec3d(pos), time)
xform_ops[1].Set(Gf.Quatf(rot[3], rot[0], rot[1], rot[2]), time)
xform_ops[2].Set(Gf.Vec3d(scale), time)
# transforms a cylinder such that it connects the two points pos0, pos1
def _compute_segment_xform(pos0, pos1):
mid = (pos0 + pos1) * 0.5
height = (pos1 - pos0).GetLength()
dir = (pos1 - pos0) / height
rot = Gf.Rotation()
rot.SetRotateInto((0.0, 0.0, 1.0), Gf.Vec3d(dir))
scale = Gf.Vec3f(1.0, 1.0, height)
return (mid, Gf.Quath(rot.GetQuat()), scale)
class UsdRenderer:
"""A USD renderer
"""
def __init__(self, model: dflex.model.Model, stage):
"""Construct a UsdRenderer object
Args:
model: A simulation model
stage (Usd.Stage): A USD stage (either in memory or on disk)
"""
self.stage = stage
self.model = model
self.draw_points = True
self.draw_springs = False
self.draw_triangles = False
if (stage.GetPrimAtPath("/root")):
stage.RemovePrim("/root")
self.root = UsdGeom.Xform.Define(stage, '/root')
# add sphere instancer for particles
self.particle_instancer = UsdGeom.PointInstancer.Define(stage, self.root.GetPath().AppendChild("particle_instancer"))
self.particle_instancer_sphere = UsdGeom.Sphere.Define(stage, self.particle_instancer.GetPath().AppendChild("sphere"))
self.particle_instancer_sphere.GetRadiusAttr().Set(model.particle_radius)
self.particle_instancer.CreatePrototypesRel().SetTargets([self.particle_instancer_sphere.GetPath()])
self.particle_instancer.CreateProtoIndicesAttr().Set([0] * model.particle_count)
# add line instancer
if (self.model.spring_count > 0):
self.spring_instancer = UsdGeom.PointInstancer.Define(stage, self.root.GetPath().AppendChild("spring_instancer"))
self.spring_instancer_cylinder = UsdGeom.Capsule.Define(stage, self.spring_instancer.GetPath().AppendChild("cylinder"))
self.spring_instancer_cylinder.GetRadiusAttr().Set(0.01)
self.spring_instancer.CreatePrototypesRel().SetTargets([self.spring_instancer_cylinder.GetPath()])
self.spring_instancer.CreateProtoIndicesAttr().Set([0] * model.spring_count)
self.stage.SetDefaultPrim(self.root.GetPrim())
# time codes
try:
self.stage.SetStartTimeCode(0.0)
self.stage.SetEndTimeCode(0.0)
self.stage.SetTimeCodesPerSecond(1.0)
except:
pass
# add dynamic cloth mesh
if (model.tri_count > 0):
self.cloth_mesh = UsdGeom.Mesh.Define(stage, self.root.GetPath().AppendChild("cloth"))
self.cloth_remap = {}
self.cloth_verts = []
self.cloth_indices = []
# USD needs a contiguous vertex buffer, use a dict to map from simulation indices->render indices
indices = self.model.tri_indices.flatten().tolist()
for i in indices:
if i not in self.cloth_remap:
# copy vertex
new_index = len(self.cloth_verts)
self.cloth_verts.append(self.model.particle_q[i].tolist())
self.cloth_indices.append(new_index)
self.cloth_remap[i] = new_index
else:
self.cloth_indices.append(self.cloth_remap[i])
self.cloth_mesh.GetPointsAttr().Set(self.cloth_verts)
self.cloth_mesh.GetFaceVertexIndicesAttr().Set(self.cloth_indices)
self.cloth_mesh.GetFaceVertexCountsAttr().Set([3] * model.tri_count)
else:
self.cloth_mesh = None
# built-in ground plane
if (model.ground):
size = 10.0
mesh = UsdGeom.Mesh.Define(stage, self.root.GetPath().AppendChild("plane_0"))
mesh.CreateDoubleSidedAttr().Set(True)
points = ((-size, 0.0, -size), (size, 0.0, -size), (size, 0.0, size), (-size, 0.0, size))
normals = ((0.0, 1.0, 0.0), (0.0, 1.0, 0.0), (0.0, 1.0, 0.0), (0.0, 1.0, 0.0))
counts = (4, )
indices = [0, 1, 2, 3]
mesh.GetPointsAttr().Set(points)
mesh.GetNormalsAttr().Set(normals)
mesh.GetFaceVertexCountsAttr().Set(counts)
mesh.GetFaceVertexIndicesAttr().Set(indices)
# add rigid bodies xform root
for b in range(model.link_count):
xform = UsdGeom.Xform.Define(stage, self.root.GetPath().AppendChild("body_" + str(b)))
_usd_add_xform(xform)
# add rigid body shapes
for s in range(model.shape_count):
parent_path = self.root.GetPath()
if model.shape_body[s] >= 0:
parent_path = parent_path.AppendChild("body_" + str(model.shape_body[s].item()))
geo_type = model.shape_geo_type[s].item()
geo_scale = model.shape_geo_scale[s].tolist()
geo_src = model.shape_geo_src[s]
# shape transform in body frame
X_bs = dflex.util.transform_expand(model.shape_transform[s].tolist())
if (geo_type == dflex.sim.GEO_PLANE):
# plane mesh
size = 1000.0
mesh = UsdGeom.Mesh.Define(stage, parent_path.AppendChild("plane_" + str(s)))
mesh.CreateDoubleSidedAttr().Set(True)
points = ((-size, 0.0, -size), (size, 0.0, -size), (size, 0.0, size), (-size, 0.0, size))
normals = ((0.0, 1.0, 0.0), (0.0, 1.0, 0.0), (0.0, 1.0, 0.0), (0.0, 1.0, 0.0))
counts = (4, )
indices = [0, 1, 2, 3]
mesh.GetPointsAttr().Set(points)
mesh.GetNormalsAttr().Set(normals)
mesh.GetFaceVertexCountsAttr().Set(counts)
mesh.GetFaceVertexIndicesAttr().Set(indices)
elif (geo_type == dflex.sim.GEO_SPHERE):
mesh = UsdGeom.Sphere.Define(stage, parent_path.AppendChild("sphere_" + str(s)))
mesh.GetRadiusAttr().Set(geo_scale[0])
_usd_add_xform(mesh)
_usd_set_xform(mesh, X_bs, (1.0, 1.0, 1.0), 0.0)
elif (geo_type == dflex.sim.GEO_CAPSULE):
mesh = UsdGeom.Capsule.Define(stage, parent_path.AppendChild("capsule_" + str(s)))
mesh.GetRadiusAttr().Set(geo_scale[0])
mesh.GetHeightAttr().Set(geo_scale[1] * 2.0)
# geometry transform w.r.t shape, convert USD geometry to physics engine convention
X_sg = dflex.util.transform((0.0, 0.0, 0.0), dflex.util.quat_from_axis_angle((0.0, 1.0, 0.0), math.pi * 0.5))
X_bg = dflex.util.transform_multiply(X_bs, X_sg)
_usd_add_xform(mesh)
_usd_set_xform(mesh, X_bg, (1.0, 1.0, 1.0), 0.0)
elif (geo_type == dflex.sim.GEO_BOX):
mesh = UsdGeom.Cube.Define(stage, parent_path.AppendChild("box_" + str(s)))
#mesh.GetSizeAttr().Set((geo_scale[0], geo_scale[1], geo_scale[2]))
_usd_add_xform(mesh)
_usd_set_xform(mesh, X_bs, (geo_scale[0], geo_scale[1], geo_scale[2]), 0.0)
elif (geo_type == dflex.sim.GEO_MESH):
mesh = UsdGeom.Mesh.Define(stage, parent_path.AppendChild("mesh_" + str(s)))
mesh.GetPointsAttr().Set(geo_src.vertices)
mesh.GetFaceVertexIndicesAttr().Set(geo_src.indices)
mesh.GetFaceVertexCountsAttr().Set([3] * int(len(geo_src.indices) / 3))
_usd_add_xform(mesh)
_usd_set_xform(mesh, X_bs, (geo_scale[0], geo_scale[1], geo_scale[2]), 0.0)
elif (geo_type == dflex.sim.GEO_SDF):
pass
def update(self, state: dflex.model.State, time: float):
"""Update the USD stage with latest simulation data
Args:
state: Current state of the simulation
time: The current time to update at in seconds
"""
try:
self.stage.SetEndTimeCode(time)
except:
pass
# convert to list
if self.model.particle_count:
particle_q = state.particle_q.tolist()
particle_orientations = [Gf.Quath(1.0, 0.0, 0.0, 0.0)] * self.model.particle_count
self.particle_instancer.GetPositionsAttr().Set(particle_q, time)
self.particle_instancer.GetOrientationsAttr().Set(particle_orientations, time)
# update cloth
if (self.cloth_mesh):
for k, v in self.cloth_remap.items():
self.cloth_verts[v] = particle_q[k]
self.cloth_mesh.GetPointsAttr().Set(self.cloth_verts, time)
# update springs
if (self.model.spring_count > 0):
line_positions = []
line_rotations = []
line_scales = []
for i in range(self.model.spring_count):
index0 = self.model.spring_indices[i * 2 + 0]
index1 = self.model.spring_indices[i * 2 + 1]
pos0 = particle_q[index0]
pos1 = particle_q[index1]
(pos, rot, scale) = _compute_segment_xform(Gf.Vec3f(pos0), Gf.Vec3f(pos1))
line_positions.append(pos)
line_rotations.append(rot)
line_scales.append(scale)
self.spring_instancer.GetPositionsAttr().Set(line_positions, time)
self.spring_instancer.GetOrientationsAttr().Set(line_rotations, time)
self.spring_instancer.GetScalesAttr().Set(line_scales, time)
# rigids
for b in range(self.model.link_count):
#xform = UsdGeom.Xform.Define(self.stage, self.root.GetPath().AppendChild("body_" + str(b)))
node = UsdGeom.Xform(self.stage.GetPrimAtPath(self.root.GetPath().AppendChild("body_" + str(b))))
# unpack rigid spatial_transform
X_sb = dflex.util.transform_expand(state.body_X_sc[b].tolist())
_usd_set_xform(node, X_sb, (1.0, 1.0, 1.0), time)
def add_sphere(self, pos: tuple, radius: float, name: str, time: float=0.0):
"""Debug helper to add a sphere for visualization
Args:
pos: The position of the sphere
radius: The radius of the sphere
name: A name for the USD prim on the stage
"""
sphere_path = self.root.GetPath().AppendChild(name)
sphere = UsdGeom.Sphere.Get(self.stage, sphere_path)
if not sphere:
sphere = UsdGeom.Sphere.Define(self.stage, sphere_path)
sphere.GetRadiusAttr().Set(radius, time)
mat = Gf.Matrix4d()
mat.SetIdentity()
mat.SetTranslateOnly(Gf.Vec3d(pos))
op = sphere.MakeMatrixXform()
op.Set(mat, time)
def add_box(self, pos: tuple, extents: float, name: str, time: float=0.0):
"""Debug helper to add a box for visualization
Args:
pos: The position of the sphere
extents: The radius of the sphere
name: A name for the USD prim on the stage
"""
sphere_path = self.root.GetPath().AppendChild(name)
sphere = UsdGeom.Cube.Get(self.stage, sphere_path)
if not sphere:
sphere = UsdGeom.Cube.Define(self.stage, sphere_path)
#sphere.GetSizeAttr().Set((extents[0]*2.0, extents[1]*2.0, extents[2]*2.0), time)
mat = Gf.Matrix4d()
mat.SetIdentity()
mat.SetScale(extents)
mat.SetTranslateOnly(Gf.Vec3d(pos))
op = sphere.MakeMatrixXform()
op.Set(mat, time)
def add_mesh(self, name: str, path: str, transform, scale, time: float):
ref_path = "/root/" + name
ref = UsdGeom.Xform.Get(self.stage, ref_path)
if not ref:
ref = UsdGeom.Xform.Define(self.stage, ref_path)
ref.GetPrim().GetReferences().AddReference(path)
_usd_add_xform(ref)
# update transform
_usd_set_xform(ref, transform, scale, time)
def add_line_list(self, vertices, color, time, name, radius):
"""Debug helper to add a line list as a set of capsules
Args:
vertices: The vertices of the line-strip
color: The color of the line
time: The time to update at
"""
num_lines = int(len(vertices)/2)
if (num_lines < 1):
return
# look up rope point instancer
instancer_path = self.root.GetPath().AppendChild(name)
instancer = UsdGeom.PointInstancer.Get(self.stage, instancer_path)
if not instancer:
instancer = UsdGeom.PointInstancer.Define(self.stage, instancer_path)
instancer_capsule = UsdGeom.Capsule.Define(self.stage, instancer.GetPath().AppendChild("capsule"))
instancer_capsule.GetRadiusAttr().Set(radius)
instancer.CreatePrototypesRel().SetTargets([instancer_capsule.GetPath()])
instancer.CreatePrimvar("displayColor", Sdf.ValueTypeNames.Float3Array, "constant", 1)
line_positions = []
line_rotations = []
line_scales = []
# line_colors = []
for i in range(num_lines):
pos0 = vertices[i*2+0]
pos1 = vertices[i*2+1]
(pos, rot, scale) = _compute_segment_xform(Gf.Vec3f(pos0), Gf.Vec3f(pos1))
line_positions.append(pos)
line_rotations.append(rot)
line_scales.append(scale)
#line_colors.append(Gf.Vec3f((float(i)/num_lines, 0.5, 0.5)))
instancer.GetPositionsAttr().Set(line_positions, time)
instancer.GetOrientationsAttr().Set(line_rotations, time)
instancer.GetScalesAttr().Set(line_scales, time)
instancer.GetProtoIndicesAttr().Set([0] * num_lines, time)
# instancer.GetPrimvar("displayColor").Set(line_colors, time)
def add_line_strip(self, vertices: dflex.sim.List[dflex.sim.Vec3], color: tuple, time: float, name: str, radius: float=0.01):
"""Debug helper to add a line strip as a connected list of capsules
Args:
vertices: The vertices of the line-strip
color: The color of the line
time: The time to update at
"""
num_lines = int(len(vertices)-1)
if (num_lines < 1):
return
# look up rope point instancer
instancer_path = self.root.GetPath().AppendChild(name)
instancer = UsdGeom.PointInstancer.Get(self.stage, instancer_path)
if not instancer:
instancer = UsdGeom.PointInstancer.Define(self.stage, instancer_path)
instancer_capsule = UsdGeom.Capsule.Define(self.stage, instancer.GetPath().AppendChild("capsule"))
instancer_capsule.GetRadiusAttr().Set(radius)
instancer.CreatePrototypesRel().SetTargets([instancer_capsule.GetPath()])
line_positions = []
line_rotations = []
line_scales = []
for i in range(num_lines):
pos0 = vertices[i]
pos1 = vertices[i+1]
(pos, rot, scale) = _compute_segment_xform(Gf.Vec3f(pos0), Gf.Vec3f(pos1))
line_positions.append(pos)
line_rotations.append(rot)
line_scales.append(scale)
instancer.GetPositionsAttr().Set(line_positions, time)
instancer.GetOrientationsAttr().Set(line_rotations, time)
instancer.GetScalesAttr().Set(line_scales, time)
instancer.GetProtoIndicesAttr().Set([0] * num_lines, time)
instancer_capsule = UsdGeom.Capsule.Get(self.stage, instancer.GetPath().AppendChild("capsule"))
instancer_capsule.GetDisplayColorAttr().Set([Gf.Vec3f(color)], time)
| 17,760 | Python | 34.808468 | 131 | 0.586768 |
vstrozzi/FRL-SHAC-Extension/dflex/build/lib/dflex/model.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""A module for building simulation models and state.
"""
import math
import torch
import numpy as np
from typing import Tuple
from typing import List
Vec3 = List[float]
Vec4 = List[float]
Quat = List[float]
Mat33 = List[float]
Transform = Tuple[Vec3, Quat]
from dflex.util import *
# shape geometry types
GEO_SPHERE = 0
GEO_BOX = 1
GEO_CAPSULE = 2
GEO_MESH = 3
GEO_SDF = 4
GEO_PLANE = 5
GEO_NONE = 6
# body joint types
JOINT_PRISMATIC = 0
JOINT_REVOLUTE = 1
JOINT_BALL = 2
JOINT_FIXED = 3
JOINT_FREE = 4
class Mesh:
"""Describes a triangle collision mesh for simulation
Attributes:
vertices (List[Vec3]): Mesh vertices
indices (List[int]): Mesh indices
I (Mat33): Inertia tensor of the mesh assuming density of 1.0 (around the center of mass)
mass (float): The total mass of the body assuming density of 1.0
com (Vec3): The center of mass of the body
"""
def __init__(self, vertices: List[Vec3], indices: List[int]):
"""Construct a Mesh object from a triangle mesh
The mesh center of mass and inertia tensor will automatically be
calculated using a density of 1.0. This computation is only valid
if the mesh is closed (two-manifold).
Args:
vertices: List of vertices in the mesh
indices: List of triangle indices, 3 per-element
"""
self.vertices = vertices
self.indices = indices
# compute com and inertia (using density=1.0)
com = np.mean(vertices, 0)
num_tris = int(len(indices) / 3)
# compute signed inertia for each tetrahedron
# formed with the interior point, using an order-2
# quadrature: https://www.sciencedirect.com/science/article/pii/S0377042712001604#br000040
weight = 0.25
alpha = math.sqrt(5.0) / 5.0
I = np.zeros((3, 3))
mass = 0.0
for i in range(num_tris):
p = np.array(vertices[indices[i * 3 + 0]])
q = np.array(vertices[indices[i * 3 + 1]])
r = np.array(vertices[indices[i * 3 + 2]])
mid = (com + p + q + r) / 4.0
pcom = p - com
qcom = q - com
rcom = r - com
Dm = np.matrix((pcom, qcom, rcom)).T
volume = np.linalg.det(Dm) / 6.0
# quadrature points lie on the line between the
# centroid and each vertex of the tetrahedron
quads = (mid + (p - mid) * alpha, mid + (q - mid) * alpha, mid + (r - mid) * alpha, mid + (com - mid) * alpha)
for j in range(4):
# displacement of quadrature point from COM
d = quads[j] - com
I += weight * volume * (length_sq(d) * np.eye(3, 3) - np.outer(d, d))
mass += weight * volume
self.I = I
self.mass = mass
self.com = com
class State:
"""The State object holds all *time-varying* data for a model.
Time-varying data includes particle positions, velocities, rigid body states, and
anything that is output from the integrator as derived data, e.g.: forces.
The exact attributes depend on the contents of the model. State objects should
generally be created using the :func:`Model.state()` function.
Attributes:
particle_q (torch.Tensor): Tensor of particle positions
particle_qd (torch.Tensor): Tensor of particle velocities
joint_q (torch.Tensor): Tensor of joint coordinates
joint_qd (torch.Tensor): Tensor of joint velocities
joint_act (torch.Tensor): Tensor of joint actuation values
"""
def __init__(self):
self.particle_count = 0
self.link_count = 0
# def flatten(self):
# """Returns a list of Tensors stored by the state
# This function is intended to be used internal-only but can be used to obtain
# a set of all tensors owned by the state.
# """
# tensors = []
# # particles
# if (self.particle_count):
# tensors.append(self.particle_q)
# tensors.append(self.particle_qd)
# # articulations
# if (self.link_count):
# tensors.append(self.joint_q)
# tensors.append(self.joint_qd)
# tensors.append(self.joint_act)
# return tensors
def flatten(self):
"""Returns a list of Tensors stored by the state
This function is intended to be used internal-only but can be used to obtain
a set of all tensors owned by the state.
"""
tensors = []
# build a list of all tensor attributes
for attr, value in self.__dict__.items():
if (torch.is_tensor(value)):
tensors.append(value)
return tensors
class Model:
"""Holds the definition of the simulation model
This class holds the non-time varying description of the system, i.e.:
all geometry, constraints, and parameters used to describe the simulation.
Attributes:
particle_q (torch.Tensor): Particle positions, shape [particle_count, 3], float
particle_qd (torch.Tensor): Particle velocities, shape [particle_count, 3], float
particle_mass (torch.Tensor): Particle mass, shape [particle_count], float
particle_inv_mass (torch.Tensor): Particle inverse mass, shape [particle_count], float
shape_transform (torch.Tensor): Rigid shape transforms, shape [shape_count, 7], float
shape_body (torch.Tensor): Rigid shape body index, shape [shape_count], int
shape_geo_type (torch.Tensor): Rigid shape geometry type, [shape_count], int
shape_geo_src (torch.Tensor): Rigid shape geometry source, shape [shape_count], int
shape_geo_scale (torch.Tensor): Rigid shape geometry scale, shape [shape_count, 3], float
shape_materials (torch.Tensor): Rigid shape contact materials, shape [shape_count, 4], float
spring_indices (torch.Tensor): Particle spring indices, shape [spring_count*2], int
spring_rest_length (torch.Tensor): Particle spring rest length, shape [spring_count], float
spring_stiffness (torch.Tensor): Particle spring stiffness, shape [spring_count], float
spring_damping (torch.Tensor): Particle spring damping, shape [spring_count], float
spring_control (torch.Tensor): Particle spring activation, shape [spring_count], float
tri_indices (torch.Tensor): Triangle element indices, shape [tri_count*3], int
tri_poses (torch.Tensor): Triangle element rest pose, shape [tri_count, 2, 2], float
tri_activations (torch.Tensor): Triangle element activations, shape [tri_count], float
edge_indices (torch.Tensor): Bending edge indices, shape [edge_count*2], int
edge_rest_angle (torch.Tensor): Bending edge rest angle, shape [edge_count], float
tet_indices (torch.Tensor): Tetrahedral element indices, shape [tet_count*4], int
tet_poses (torch.Tensor): Tetrahedral rest poses, shape [tet_count, 3, 3], float
tet_activations (torch.Tensor): Tetrahedral volumetric activations, shape [tet_count], float
tet_materials (torch.Tensor): Tetrahedral elastic parameters in form :math:`k_{mu}, k_{lambda}, k_{damp}`, shape [tet_count, 3]
body_X_cm (torch.Tensor): Rigid body center of mass (in local frame), shape [link_count, 7], float
body_I_m (torch.Tensor): Rigid body inertia tensor (relative to COM), shape [link_count, 3, 3], float
articulation_start (torch.Tensor): Articulation start offset, shape [num_articulations], int
joint_q (torch.Tensor): Joint coordinate, shape [joint_coord_count], float
joint_qd (torch.Tensor): Joint velocity, shape [joint_dof_count], float
joint_type (torch.Tensor): Joint type, shape [joint_count], int
joint_parent (torch.Tensor): Joint parent, shape [joint_count], int
joint_X_pj (torch.Tensor): Joint transform in parent frame, shape [joint_count, 7], float
joint_X_cm (torch.Tensor): Joint mass frame in child frame, shape [joint_count, 7], float
joint_axis (torch.Tensor): Joint axis in child frame, shape [joint_count, 3], float
joint_q_start (torch.Tensor): Joint coordinate offset, shape [joint_count], int
joint_qd_start (torch.Tensor): Joint velocity offset, shape [joint_count], int
joint_armature (torch.Tensor): Armature for each joint, shape [joint_count], float
joint_target_ke (torch.Tensor): Joint stiffness, shape [joint_count], float
joint_target_kd (torch.Tensor): Joint damping, shape [joint_count], float
joint_target (torch.Tensor): Joint target, shape [joint_count], float
particle_count (int): Total number of particles in the system
joint_coord_count (int): Total number of joint coordinates in the system
joint_dof_count (int): Total number of joint dofs in the system
link_count (int): Total number of links in the system
shape_count (int): Total number of shapes in the system
tri_count (int): Total number of triangles in the system
tet_count (int): Total number of tetrahedra in the system
edge_count (int): Total number of edges in the system
spring_count (int): Total number of springs in the system
contact_count (int): Total number of contacts in the system
Note:
It is strongly recommended to use the ModelBuilder to construct a simulation rather
than creating your own Model object directly, however it is possible to do so if
desired.
"""
def __init__(self, adapter):
self.particle_q = None
self.particle_qd = None
self.particle_mass = None
self.particle_inv_mass = None
self.shape_transform = None
self.shape_body = None
self.shape_geo_type = None
self.shape_geo_src = None
self.shape_geo_scale = None
self.shape_materials = None
self.spring_indices = None
self.spring_rest_length = None
self.spring_stiffness = None
self.spring_damping = None
self.spring_control = None
self.tri_indices = None
self.tri_poses = None
self.tri_activations = None
self.edge_indices = None
self.edge_rest_angle = None
self.tet_indices = None
self.tet_poses = None
self.tet_activations = None
self.tet_materials = None
self.body_X_cm = None
self.body_I_m = None
self.articulation_start = None
self.joint_q = None
self.joint_qd = None
self.joint_type = None
self.joint_parent = None
self.joint_X_pj = None
self.joint_X_cm = None
self.joint_axis = None
self.joint_q_start = None
self.joint_qd_start = None
self.joint_armature = None
self.joint_target_ke = None
self.joint_target_kd = None
self.joint_target = None
self.particle_count = 0
self.joint_coord_count = 0
self.joint_dof_count = 0
self.link_count = 0
self.shape_count = 0
self.tri_count = 0
self.tet_count = 0
self.edge_count = 0
self.spring_count = 0
self.contact_count = 0
self.gravity = torch.tensor((0.0, -9.8, 0.0), dtype=torch.float32, device=adapter)
self.contact_distance = 0.1
self.contact_ke = 1.e+3
self.contact_kd = 0.0
self.contact_kf = 1.e+3
self.contact_mu = 0.5
self.tri_ke = 100.0
self.tri_ka = 100.0
self.tri_kd = 10.0
self.tri_kb = 100.0
self.tri_drag = 0.0
self.tri_lift = 0.0
self.edge_ke = 100.0
self.edge_kd = 0.0
self.particle_radius = 0.1
self.adapter = adapter
def state(self) -> State:
"""Returns a state object for the model
The returned state will be initialized with the initial configuration given in
the model description.
"""
s = State()
s.particle_count = self.particle_count
s.link_count = self.link_count
#--------------------------------
# dynamic state (input, output)
# particles
if (self.particle_count):
s.particle_q = torch.clone(self.particle_q)
s.particle_qd = torch.clone(self.particle_qd)
# articulations
if (self.link_count):
s.joint_q = torch.clone(self.joint_q)
s.joint_qd = torch.clone(self.joint_qd)
s.joint_act = torch.zeros_like(self.joint_qd)
s.joint_q.requires_grad = True
s.joint_qd.requires_grad = True
#--------------------------------
# derived state (output only)
if (self.particle_count):
s.particle_f = torch.empty_like(self.particle_qd, requires_grad=True)
if (self.link_count):
# joints
s.joint_qdd = torch.empty_like(self.joint_qd, requires_grad=True)
s.joint_tau = torch.empty_like(self.joint_qd, requires_grad=True)
s.joint_S_s = torch.empty((self.joint_dof_count, 6), dtype=torch.float32, device=self.adapter, requires_grad=True)
# derived rigid body data (maximal coordinates)
s.body_X_sc = torch.empty((self.link_count, 7), dtype=torch.float32, device=self.adapter, requires_grad=True)
s.body_X_sm = torch.empty((self.link_count, 7), dtype=torch.float32, device=self.adapter, requires_grad=True)
s.body_I_s = torch.empty((self.link_count, 6, 6), dtype=torch.float32, device=self.adapter, requires_grad=True)
s.body_v_s = torch.empty((self.link_count, 6), dtype=torch.float32, device=self.adapter, requires_grad=True)
s.body_a_s = torch.empty((self.link_count, 6), dtype=torch.float32, device=self.adapter, requires_grad=True)
s.body_f_s = torch.zeros((self.link_count, 6), dtype=torch.float32, device=self.adapter, requires_grad=True)
#s.body_ft_s = torch.zeros((self.link_count, 6), dtype=torch.float32, device=self.adapter, requires_grad=True)
#s.body_f_ext_s = torch.zeros((self.link_count, 6), dtype=torch.float32, device=self.adapter, requires_grad=True)
return s
def alloc_mass_matrix(self):
if (self.link_count):
# system matrices
self.M = torch.zeros(self.M_size, dtype=torch.float32, device=self.adapter, requires_grad=True)
self.J = torch.zeros(self.J_size, dtype=torch.float32, device=self.adapter, requires_grad=True)
self.P = torch.empty(self.J_size, dtype=torch.float32, device=self.adapter, requires_grad=True)
self.H = torch.empty(self.H_size, dtype=torch.float32, device=self.adapter, requires_grad=True)
# zero since only upper triangle is set which can trigger NaN detection
self.L = torch.zeros(self.H_size, dtype=torch.float32, device=self.adapter, requires_grad=True)
def flatten(self):
"""Returns a list of Tensors stored by the model
This function is intended to be used internal-only but can be used to obtain
a set of all tensors owned by the model.
"""
tensors = []
# build a list of all tensor attributes
for attr, value in self.__dict__.items():
if (torch.is_tensor(value)):
tensors.append(value)
return tensors
# builds contacts
def collide(self, state: State):
"""Constructs a set of contacts between rigid bodies and ground
This method performs collision detection between rigid body vertices in the scene and updates
the model's set of contacts stored as the following attributes:
* **contact_body0**: Tensor of ints with first rigid body index
* **contact_body1**: Tensor of ints with second rigid body index (currently always -1 to indicate ground)
* **contact_point0**: Tensor of Vec3 representing contact point in local frame of body0
* **contact_dist**: Tensor of float values representing the distance to maintain
* **contact_material**: Tensor contact material indices
Args:
state: The state of the simulation at which to perform collision detection
Note:
Currently this method uses an 'all pairs' approach to contact generation that is
state indepdendent. In the future this will change and will create a node in
the computational graph to propagate gradients as a function of state.
Todo:
Only ground-plane collision is currently implemented. Since the ground is static
it is acceptable to call this method once at initialization time.
"""
body0 = []
body1 = []
point = []
dist = []
mat = []
def add_contact(b0, b1, t, p0, d, m):
body0.append(b0)
body1.append(b1)
point.append(transform_point(t, np.array(p0)))
dist.append(d)
mat.append(m)
for i in range(self.shape_count):
# transform from shape to body
X_bs = transform_expand(self.shape_transform[i].tolist())
geo_type = self.shape_geo_type[i].item()
if (geo_type == GEO_SPHERE):
radius = self.shape_geo_scale[i][0].item()
add_contact(self.shape_body[i], -1, X_bs, (0.0, 0.0, 0.0), radius, i)
elif (geo_type == GEO_CAPSULE):
radius = self.shape_geo_scale[i][0].item()
half_width = self.shape_geo_scale[i][1].item()
add_contact(self.shape_body[i], -1, X_bs, (-half_width, 0.0, 0.0), radius, i)
add_contact(self.shape_body[i], -1, X_bs, (half_width, 0.0, 0.0), radius, i)
elif (geo_type == GEO_BOX):
edges = self.shape_geo_scale[i].tolist()
add_contact(self.shape_body[i], -1, X_bs, (-edges[0], -edges[1], -edges[2]), 0.0, i)
add_contact(self.shape_body[i], -1, X_bs, ( edges[0], -edges[1], -edges[2]), 0.0, i)
add_contact(self.shape_body[i], -1, X_bs, (-edges[0], edges[1], -edges[2]), 0.0, i)
add_contact(self.shape_body[i], -1, X_bs, (edges[0], edges[1], -edges[2]), 0.0, i)
add_contact(self.shape_body[i], -1, X_bs, (-edges[0], -edges[1], edges[2]), 0.0, i)
add_contact(self.shape_body[i], -1, X_bs, (edges[0], -edges[1], edges[2]), 0.0, i)
add_contact(self.shape_body[i], -1, X_bs, (-edges[0], edges[1], edges[2]), 0.0, i)
add_contact(self.shape_body[i], -1, X_bs, (edges[0], edges[1], edges[2]), 0.0, i)
elif (geo_type == GEO_MESH):
mesh = self.shape_geo_src[i]
scale = self.shape_geo_scale[i]
for v in mesh.vertices:
p = (v[0] * scale[0], v[1] * scale[1], v[2] * scale[2])
add_contact(self.shape_body[i], -1, X_bs, p, 0.0, i)
# send to torch
self.contact_body0 = torch.tensor(body0, dtype=torch.int32, device=self.adapter)
self.contact_body1 = torch.tensor(body1, dtype=torch.int32, device=self.adapter)
self.contact_point0 = torch.tensor(point, dtype=torch.float32, device=self.adapter)
self.contact_dist = torch.tensor(dist, dtype=torch.float32, device=self.adapter)
self.contact_material = torch.tensor(mat, dtype=torch.int32, device=self.adapter)
self.contact_count = len(body0)
class ModelBuilder:
"""A helper class for building simulation models at runtime.
Use the ModelBuilder to construct a simulation scene. The ModelBuilder
is independent of PyTorch and builds the scene representation using
standard Python data structures, this means it is not differentiable. Once :func:`finalize()`
has been called the ModelBuilder transfers all data to Torch tensors and returns
an object that may be used for simulation.
Example:
>>> import dflex as df
>>>
>>> builder = df.ModelBuilder()
>>>
>>> # anchor point (zero mass)
>>> builder.add_particle((0, 1.0, 0.0), (0.0, 0.0, 0.0), 0.0)
>>>
>>> # build chain
>>> for i in range(1,10):
>>> builder.add_particle((i, 1.0, 0.0), (0.0, 0.0, 0.0), 1.0)
>>> builder.add_spring(i-1, i, 1.e+3, 0.0, 0)
>>>
>>> # create model
>>> model = builder.finalize()
Note:
It is strongly recommended to use the ModelBuilder to construct a simulation rather
than creating your own Model object directly, however it is possible to do so if
desired.
"""
def __init__(self):
# particles
self.particle_q = []
self.particle_qd = []
self.particle_mass = []
# shapes
self.shape_transform = []
self.shape_body = []
self.shape_geo_type = []
self.shape_geo_scale = []
self.shape_geo_src = []
self.shape_materials = []
# geometry
self.geo_meshes = []
self.geo_sdfs = []
# springs
self.spring_indices = []
self.spring_rest_length = []
self.spring_stiffness = []
self.spring_damping = []
self.spring_control = []
# triangles
self.tri_indices = []
self.tri_poses = []
self.tri_activations = []
# edges (bending)
self.edge_indices = []
self.edge_rest_angle = []
# tetrahedra
self.tet_indices = []
self.tet_poses = []
self.tet_activations = []
self.tet_materials = []
# muscles
self.muscle_start = []
self.muscle_params = []
self.muscle_activation = []
self.muscle_links = []
self.muscle_points = []
# rigid bodies
self.joint_parent = [] # index of the parent body (constant)
self.joint_child = [] # index of the child body (constant)
self.joint_axis = [] # joint axis in child joint frame (constant)
self.joint_X_pj = [] # frame of joint in parent (constant)
self.joint_X_cm = [] # frame of child com (in child coordinates) (constant)
self.joint_q_start = [] # joint offset in the q array
self.joint_qd_start = [] # joint offset in the qd array
self.joint_type = []
self.joint_armature = []
self.joint_target_ke = []
self.joint_target_kd = []
self.joint_target = []
self.joint_limit_lower = []
self.joint_limit_upper = []
self.joint_limit_ke = []
self.joint_limit_kd = []
self.joint_q = [] # generalized coordinates (input)
self.joint_qd = [] # generalized velocities (input)
self.joint_qdd = [] # generalized accelerations (id,fd)
self.joint_tau = [] # generalized actuation (input)
self.joint_u = [] # generalized total torque (fd)
self.body_mass = []
self.body_inertia = []
self.body_com = []
self.articulation_start = []
def add_articulation(self) -> int:
"""Add an articulation object, all subsequently added links (see: :func:`add_link`) will belong to this articulation object.
Calling this method multiple times 'closes' any previous articulations and begins a new one.
Returns:
The index of the articulation
"""
self.articulation_start.append(len(self.joint_type))
return len(self.articulation_start)-1
# rigids, register a rigid body and return its index.
def add_link(
self,
parent : int,
X_pj : Transform,
axis : Vec3,
type : int,
armature: float=0.01,
stiffness: float=0.0,
damping: float=0.0,
limit_lower: float=-1.e+3,
limit_upper: float=1.e+3,
limit_ke: float=100.0,
limit_kd: float=10.0,
com: Vec3=np.zeros(3),
I_m: Mat33=np.zeros((3, 3)),
m: float=0.0) -> int:
"""Adds a rigid body to the model.
Args:
parent: The index of the parent body
X_pj: The location of the joint in the parent's local frame connecting this body
axis: The joint axis
type: The type of joint, should be one of: JOINT_PRISMATIC, JOINT_REVOLUTE, JOINT_BALL, JOINT_FIXED, or JOINT_FREE
armature: Additional inertia around the joint axis
stiffness: Spring stiffness that attempts to return joint to zero position
damping: Spring damping that attempts to remove joint velocity
com: The center of mass of the body w.r.t its origin
I_m: The 3x3 inertia tensor of the body (specified relative to the center of mass)
m: The mass of the body
Returns:
The index of the body in the model
Note:
If the mass (m) is zero then the body is treated as kinematic with no dynamics
"""
# joint data
self.joint_type.append(type)
self.joint_axis.append(np.array(axis))
self.joint_parent.append(parent)
self.joint_X_pj.append(X_pj)
self.joint_target_ke.append(stiffness)
self.joint_target_kd.append(damping)
self.joint_limit_ke.append(limit_ke)
self.joint_limit_kd.append(limit_kd)
self.joint_q_start.append(len(self.joint_q))
self.joint_qd_start.append(len(self.joint_qd))
if (type == JOINT_PRISMATIC):
self.joint_q.append(0.0)
self.joint_qd.append(0.0)
self.joint_target.append(0.0)
self.joint_armature.append(armature)
self.joint_limit_lower.append(limit_lower)
self.joint_limit_upper.append(limit_upper)
elif (type == JOINT_REVOLUTE):
self.joint_q.append(0.0)
self.joint_qd.append(0.0)
self.joint_target.append(0.0)
self.joint_armature.append(armature)
self.joint_limit_lower.append(limit_lower)
self.joint_limit_upper.append(limit_upper)
elif (type == JOINT_BALL):
# quaternion
self.joint_q.append(0.0)
self.joint_q.append(0.0)
self.joint_q.append(0.0)
self.joint_q.append(1.0)
# angular velocity
self.joint_qd.append(0.0)
self.joint_qd.append(0.0)
self.joint_qd.append(0.0)
# pd targets
self.joint_target.append(0.0)
self.joint_target.append(0.0)
self.joint_target.append(0.0)
self.joint_target.append(0.0)
self.joint_armature.append(armature)
self.joint_armature.append(armature)
self.joint_armature.append(armature)
self.joint_limit_lower.append(limit_lower)
self.joint_limit_lower.append(limit_lower)
self.joint_limit_lower.append(limit_lower)
self.joint_limit_lower.append(0.0)
self.joint_limit_upper.append(limit_upper)
self.joint_limit_upper.append(limit_upper)
self.joint_limit_upper.append(limit_upper)
self.joint_limit_upper.append(0.0)
elif (type == JOINT_FIXED):
pass
elif (type == JOINT_FREE):
# translation
self.joint_q.append(0.0)
self.joint_q.append(0.0)
self.joint_q.append(0.0)
# quaternion
self.joint_q.append(0.0)
self.joint_q.append(0.0)
self.joint_q.append(0.0)
self.joint_q.append(1.0)
# note armature for free joints should always be zero, better to modify the body inertia directly
self.joint_armature.append(0.0)
self.joint_armature.append(0.0)
self.joint_armature.append(0.0)
self.joint_armature.append(0.0)
self.joint_armature.append(0.0)
self.joint_armature.append(0.0)
self.joint_target.append(0.0)
self.joint_target.append(0.0)
self.joint_target.append(0.0)
self.joint_target.append(0.0)
self.joint_target.append(0.0)
self.joint_target.append(0.0)
self.joint_target.append(0.0)
self.joint_limit_lower.append(0.0)
self.joint_limit_lower.append(0.0)
self.joint_limit_lower.append(0.0)
self.joint_limit_lower.append(0.0)
self.joint_limit_lower.append(0.0)
self.joint_limit_lower.append(0.0)
self.joint_limit_lower.append(0.0)
self.joint_limit_upper.append(0.0)
self.joint_limit_upper.append(0.0)
self.joint_limit_upper.append(0.0)
self.joint_limit_upper.append(0.0)
self.joint_limit_upper.append(0.0)
self.joint_limit_upper.append(0.0)
self.joint_limit_upper.append(0.0)
# joint velocities
for i in range(6):
self.joint_qd.append(0.0)
self.body_inertia.append(np.zeros((3, 3)))
self.body_mass.append(0.0)
self.body_com.append(np.zeros(3))
# return index of body
return len(self.joint_type) - 1
# muscles
def add_muscle(self, links: List[int], positions: List[Vec3], f0: float, lm: float, lt: float, lmax: float, pen: float) -> float:
"""Adds a muscle-tendon activation unit
Args:
links: A list of link indices for each waypoint
positions: A list of positions of each waypoint in the link's local frame
f0: Force scaling
lm: Muscle length
lt: Tendon length
lmax: Maximally efficient muscle length
Returns:
The index of the muscle in the model
"""
n = len(links)
self.muscle_start.append(len(self.muscle_links))
self.muscle_params.append((f0, lm, lt, lmax, pen))
self.muscle_activation.append(0.0)
for i in range(n):
self.muscle_links.append(links[i])
self.muscle_points.append(positions[i])
# return the index of the muscle
return len(self.muscle_start)-1
# shapes
def add_shape_plane(self, plane: Vec4=(0.0, 1.0, 0.0, 0.0), ke: float=1.e+5, kd: float=1000.0, kf: float=1000.0, mu: float=0.5):
"""Adds a plane collision shape
Args:
plane: The plane equation in form a*x + b*y + c*z + d = 0
ke: The contact elastic stiffness
kd: The contact damping stiffness
kf: The contact friction stiffness
mu: The coefficient of friction
"""
self._add_shape(-1, (0.0, 0.0, 0.0), (0.0, 0.0, 0.0), GEO_PLANE, plane, None, 0.0, ke, kd, kf, mu)
def add_shape_sphere(self, body, pos: Vec3=(0.0, 0.0, 0.0), rot: Quat=(0.0, 0.0, 0.0, 1.0), radius: float=1.0, density: float=1000.0, ke: float=1.e+5, kd: float=1000.0, kf: float=1000.0, mu: float=0.5):
"""Adds a sphere collision shape to a link.
Args:
body: The index of the parent link this shape belongs to
pos: The location of the shape with respect to the parent frame
rot: The rotation of the shape with respect to the parent frame
radius: The radius of the sphere
density: The density of the shape
ke: The contact elastic stiffness
kd: The contact damping stiffness
kf: The contact friction stiffness
mu: The coefficient of friction
"""
self._add_shape(body, pos, rot, GEO_SPHERE, (radius, 0.0, 0.0, 0.0), None, density, ke, kd, kf, mu)
def add_shape_box(self,
body : int,
pos: Vec3=(0.0, 0.0, 0.0),
rot: Quat=(0.0, 0.0, 0.0, 1.0),
hx: float=0.5,
hy: float=0.5,
hz: float=0.5,
density: float=1000.0,
ke: float=1.e+5,
kd: float=1000.0,
kf: float=1000.0,
mu: float=0.5):
"""Adds a box collision shape to a link.
Args:
body: The index of the parent link this shape belongs to
pos: The location of the shape with respect to the parent frame
rot: The rotation of the shape with respect to the parent frame
hx: The half-extents along the x-axis
hy: The half-extents along the y-axis
hz: The half-extents along the z-axis
density: The density of the shape
ke: The contact elastic stiffness
kd: The contact damping stiffness
kf: The contact friction stiffness
mu: The coefficient of friction
"""
self._add_shape(body, pos, rot, GEO_BOX, (hx, hy, hz, 0.0), None, density, ke, kd, kf, mu)
def add_shape_capsule(self,
body: int,
pos: Vec3=(0.0, 0.0, 0.0),
rot: Quat=(0.0, 0.0, 0.0, 1.0),
radius: float=1.0,
half_width: float=0.5,
density: float=1000.0,
ke: float=1.e+5,
kd: float=1000.0,
kf: float=1000.0,
mu: float=0.5):
"""Adds a capsule collision shape to a link.
Args:
body: The index of the parent link this shape belongs to
pos: The location of the shape with respect to the parent frame
rot: The rotation of the shape with respect to the parent frame
radius: The radius of the capsule
half_width: The half length of the center cylinder along the x-axis
density: The density of the shape
ke: The contact elastic stiffness
kd: The contact damping stiffness
kf: The contact friction stiffness
mu: The coefficient of friction
"""
self._add_shape(body, pos, rot, GEO_CAPSULE, (radius, half_width, 0.0, 0.0), None, density, ke, kd, kf, mu)
def add_shape_mesh(self,
body: int,
pos: Vec3=(0.0, 0.0, 0.0),
rot: Quat=(0.0, 0.0, 0.0, 1.0),
mesh: Mesh=None,
scale: Vec3=(1.0, 1.0, 1.0),
density: float=1000.0,
ke: float=1.e+5,
kd: float=1000.0,
kf: float=1000.0,
mu: float=0.5):
"""Adds a triangle mesh collision shape to a link.
Args:
body: The index of the parent link this shape belongs to
pos: The location of the shape with respect to the parent frame
rot: The rotation of the shape with respect to the parent frame
mesh: The mesh object
scale: Scale to use for the collider
density: The density of the shape
ke: The contact elastic stiffness
kd: The contact damping stiffness
kf: The contact friction stiffness
mu: The coefficient of friction
"""
self._add_shape(body, pos, rot, GEO_MESH, (scale[0], scale[1], scale[2], 0.0), mesh, density, ke, kd, kf, mu)
def _add_shape(self, body , pos, rot, type, scale, src, density, ke, kd, kf, mu):
self.shape_body.append(body)
self.shape_transform.append(transform(pos, rot))
self.shape_geo_type.append(type)
self.shape_geo_scale.append((scale[0], scale[1], scale[2]))
self.shape_geo_src.append(src)
self.shape_materials.append((ke, kd, kf, mu))
(m, I) = self._compute_shape_mass(type, scale, src, density)
self._update_body_mass(body, m, I, np.array(pos), np.array(rot))
# particles
def add_particle(self, pos : Vec3, vel : Vec3, mass : float) -> int:
"""Adds a single particle to the model
Args:
pos: The initial position of the particle
vel: The initial velocity of the particle
mass: The mass of the particle
Note:
Set the mass equal to zero to create a 'kinematic' particle that does is not subject to dynamics.
Returns:
The index of the particle in the system
"""
self.particle_q.append(pos)
self.particle_qd.append(vel)
self.particle_mass.append(mass)
return len(self.particle_q) - 1
def add_spring(self, i : int, j, ke : float, kd : float, control: float):
"""Adds a spring between two particles in the system
Args:
i: The index of the first particle
j: The index of the second particle
ke: The elastic stiffness of the spring
kd: The damping stiffness of the spring
control: The actuation level of the spring
Note:
The spring is created with a rest-length based on the distance
between the particles in their initial configuration.
"""
self.spring_indices.append(i)
self.spring_indices.append(j)
self.spring_stiffness.append(ke)
self.spring_damping.append(kd)
self.spring_control.append(control)
# compute rest length
p = self.particle_q[i]
q = self.particle_q[j]
delta = np.subtract(p, q)
l = np.sqrt(np.dot(delta, delta))
self.spring_rest_length.append(l)
def add_triangle(self, i : int, j : int, k : int) -> float:
"""Adds a trianglular FEM element between three particles in the system.
Triangles are modeled as viscoelastic elements with elastic stiffness and damping
Parameters specfied on the model. See model.tri_ke, model.tri_kd.
Args:
i: The index of the first particle
j: The index of the second particle
k: The index of the third particle
Return:
The area of the triangle
Note:
The triangle is created with a rest-length based on the distance
between the particles in their initial configuration.
Todo:
* Expose elastic paramters on a per-element basis
"""
# compute basis for 2D rest pose
p = np.array(self.particle_q[i])
q = np.array(self.particle_q[j])
r = np.array(self.particle_q[k])
qp = q - p
rp = r - p
# construct basis aligned with the triangle
n = normalize(np.cross(qp, rp))
e1 = normalize(qp)
e2 = normalize(np.cross(n, e1))
R = np.matrix((e1, e2))
M = np.matrix((qp, rp))
D = R * M.T
inv_D = np.linalg.inv(D)
area = np.linalg.det(D) / 2.0
if (area < 0.0):
print("inverted triangle element")
self.tri_indices.append((i, j, k))
self.tri_poses.append(inv_D.tolist())
self.tri_activations.append(0.0)
return area
def add_tetrahedron(self, i: int, j: int, k: int, l: int, k_mu: float=1.e+3, k_lambda: float=1.e+3, k_damp: float=0.0) -> float:
"""Adds a tetrahedral FEM element between four particles in the system.
Tetrahdera are modeled as viscoelastic elements with a NeoHookean energy
density based on [Smith et al. 2018].
Args:
i: The index of the first particle
j: The index of the second particle
k: The index of the third particle
l: The index of the fourth particle
k_mu: The first elastic Lame parameter
k_lambda: The second elastic Lame parameter
k_damp: The element's damping stiffness
Return:
The volume of the tetrahedron
Note:
The tetrahedron is created with a rest-pose based on the particle's initial configruation
"""
# compute basis for 2D rest pose
p = np.array(self.particle_q[i])
q = np.array(self.particle_q[j])
r = np.array(self.particle_q[k])
s = np.array(self.particle_q[l])
qp = q - p
rp = r - p
sp = s - p
Dm = np.matrix((qp, rp, sp)).T
volume = np.linalg.det(Dm) / 6.0
if (volume <= 0.0):
print("inverted tetrahedral element")
else:
inv_Dm = np.linalg.inv(Dm)
self.tet_indices.append((i, j, k, l))
self.tet_poses.append(inv_Dm.tolist())
self.tet_activations.append(0.0)
self.tet_materials.append((k_mu, k_lambda, k_damp))
return volume
def add_edge(self, i: int, j: int, k: int, l: int, rest: float=None):
"""Adds a bending edge element between four particles in the system.
Bending elements are designed to be between two connected triangles. Then
bending energy is based of [Bridson et al. 2002]. Bending stiffness is controlled
by the `model.tri_kb` parameter.
Args:
i: The index of the first particle
j: The index of the second particle
k: The index of the third particle
l: The index of the fourth particle
rest: The rest angle across the edge in radians, if not specified it will be computed
Note:
The edge lies between the particles indexed by 'k' and 'l' parameters with the opposing
vertices indexed by 'i' and 'j'. This defines two connected triangles with counter clockwise
winding: (i, k, l), (j, l, k).
"""
# compute rest angle
if (rest == None):
x1 = np.array(self.particle_q[i])
x2 = np.array(self.particle_q[j])
x3 = np.array(self.particle_q[k])
x4 = np.array(self.particle_q[l])
n1 = normalize(np.cross(x3 - x1, x4 - x1))
n2 = normalize(np.cross(x4 - x2, x3 - x2))
e = normalize(x4 - x3)
d = np.clip(np.dot(n2, n1), -1.0, 1.0)
angle = math.acos(d)
sign = np.sign(np.dot(np.cross(n2, n1), e))
rest = angle * sign
self.edge_indices.append((i, j, k, l))
self.edge_rest_angle.append(rest)
def add_cloth_grid(self,
pos: Vec3,
rot: Quat,
vel: Vec3,
dim_x: int,
dim_y: int,
cell_x: float,
cell_y: float,
mass: float,
reverse_winding: bool=False,
fix_left: bool=False,
fix_right: bool=False,
fix_top: bool=False,
fix_bottom: bool=False):
"""Helper to create a regular planar cloth grid
Creates a rectangular grid of particles with FEM triangles and bending elements
automatically.
Args:
pos: The position of the cloth in world space
rot: The orientation of the cloth in world space
vel: The velocity of the cloth in world space
dim_x_: The number of rectangular cells along the x-axis
dim_y: The number of rectangular cells along the y-axis
cell_x: The width of each cell in the x-direction
cell_y: The width of each cell in the y-direction
mass: The mass of each particle
reverse_winding: Flip the winding of the mesh
fix_left: Make the left-most edge of particles kinematic (fixed in place)
fix_right: Make the right-most edge of particles kinematic
fix_top: Make the top-most edge of particles kinematic
fix_bottom: Make the bottom-most edge of particles kinematic
"""
def grid_index(x, y, dim_x):
return y * dim_x + x
start_vertex = len(self.particle_q)
start_tri = len(self.tri_indices)
for y in range(0, dim_y + 1):
for x in range(0, dim_x + 1):
g = np.array((x * cell_x, y * cell_y, 0.0))
p = quat_rotate(rot, g) + pos
m = mass
if (x == 0 and fix_left):
m = 0.0
elif (x == dim_x and fix_right):
m = 0.0
elif (y == 0 and fix_bottom):
m = 0.0
elif (y == dim_y and fix_top):
m = 0.0
self.add_particle(p, vel, m)
if (x > 0 and y > 0):
if (reverse_winding):
tri1 = (start_vertex + grid_index(x - 1, y - 1, dim_x + 1),
start_vertex + grid_index(x, y - 1, dim_x + 1),
start_vertex + grid_index(x, y, dim_x + 1))
tri2 = (start_vertex + grid_index(x - 1, y - 1, dim_x + 1),
start_vertex + grid_index(x, y, dim_x + 1),
start_vertex + grid_index(x - 1, y, dim_x + 1))
self.add_triangle(*tri1)
self.add_triangle(*tri2)
else:
tri1 = (start_vertex + grid_index(x - 1, y - 1, dim_x + 1),
start_vertex + grid_index(x, y - 1, dim_x + 1),
start_vertex + grid_index(x - 1, y, dim_x + 1))
tri2 = (start_vertex + grid_index(x, y - 1, dim_x + 1),
start_vertex + grid_index(x, y, dim_x + 1),
start_vertex + grid_index(x - 1, y, dim_x + 1))
self.add_triangle(*tri1)
self.add_triangle(*tri2)
end_vertex = len(self.particle_q)
end_tri = len(self.tri_indices)
# bending constraints, could create these explicitly for a grid but this
# is a good test of the adjacency structure
adj = MeshAdjacency(self.tri_indices[start_tri:end_tri], end_tri - start_tri)
for k, e in adj.edges.items():
# skip open edges
if (e.f0 == -1 or e.f1 == -1):
continue
self.add_edge(e.o0, e.o1, e.v0, e.v1) # opposite 0, opposite 1, vertex 0, vertex 1
def add_cloth_mesh(self, pos: Vec3, rot: Quat, scale: float, vel: Vec3, vertices: List[Vec3], indices: List[int], density: float, edge_callback=None, face_callback=None):
"""Helper to create a cloth model from a regular triangle mesh
Creates one FEM triangle element and one bending element for every face
and edge in the input triangle mesh
Args:
pos: The position of the cloth in world space
rot: The orientation of the cloth in world space
vel: The velocity of the cloth in world space
vertices: A list of vertex positions
indices: A list of triangle indices, 3 entries per-face
density: The density per-area of the mesh
edge_callback: A user callback when an edge is created
face_callback: A user callback when a face is created
Note:
The mesh should be two manifold.
"""
num_tris = int(len(indices) / 3)
start_vertex = len(self.particle_q)
start_tri = len(self.tri_indices)
# particles
for i, v in enumerate(vertices):
p = quat_rotate(rot, v * scale) + pos
self.add_particle(p, vel, 0.0)
# triangles
for t in range(num_tris):
i = start_vertex + indices[t * 3 + 0]
j = start_vertex + indices[t * 3 + 1]
k = start_vertex + indices[t * 3 + 2]
if (face_callback):
face_callback(i, j, k)
area = self.add_triangle(i, j, k)
# add area fraction to particles
if (area > 0.0):
self.particle_mass[i] += density * area / 3.0
self.particle_mass[j] += density * area / 3.0
self.particle_mass[k] += density * area / 3.0
end_vertex = len(self.particle_q)
end_tri = len(self.tri_indices)
adj = MeshAdjacency(self.tri_indices[start_tri:end_tri], end_tri - start_tri)
# bend constraints
for k, e in adj.edges.items():
# skip open edges
if (e.f0 == -1 or e.f1 == -1):
continue
if (edge_callback):
edge_callback(e.f0, e.f1)
self.add_edge(e.o0, e.o1, e.v0, e.v1)
def add_soft_grid(self,
pos: Vec3,
rot: Quat,
vel: Vec3,
dim_x: int,
dim_y: int,
dim_z: int,
cell_x: float,
cell_y: float,
cell_z: float,
density: float,
k_mu: float,
k_lambda: float,
k_damp: float,
fix_left: bool=False,
fix_right: bool=False,
fix_top: bool=False,
fix_bottom: bool=False):
"""Helper to create a rectangular tetrahedral FEM grid
Creates a regular grid of FEM tetrhedra and surface triangles. Useful for example
to create beams and sheets. Each hexahedral cell is decomposed into 5
tetrahedral elements.
Args:
pos: The position of the solid in world space
rot: The orientation of the solid in world space
vel: The velocity of the solid in world space
dim_x_: The number of rectangular cells along the x-axis
dim_y: The number of rectangular cells along the y-axis
dim_z: The number of rectangular cells along the z-axis
cell_x: The width of each cell in the x-direction
cell_y: The width of each cell in the y-direction
cell_z: The width of each cell in the z-direction
density: The density of each particle
k_mu: The first elastic Lame parameter
k_lambda: The second elastic Lame parameter
k_damp: The damping stiffness
fix_left: Make the left-most edge of particles kinematic (fixed in place)
fix_right: Make the right-most edge of particles kinematic
fix_top: Make the top-most edge of particles kinematic
fix_bottom: Make the bottom-most edge of particles kinematic
"""
start_vertex = len(self.particle_q)
mass = cell_x * cell_y * cell_z * density
for z in range(dim_z + 1):
for y in range(dim_y + 1):
for x in range(dim_x + 1):
v = np.array((x * cell_x, y * cell_y, z * cell_z))
m = mass
if (fix_left and x == 0):
m = 0.0
if (fix_right and x == dim_x):
m = 0.0
if (fix_top and y == dim_y):
m = 0.0
if (fix_bottom and y == 0):
m = 0.0
p = quat_rotate(rot, v) + pos
self.add_particle(p, vel, m)
# dict of open faces
faces = {}
def add_face(i: int, j: int, k: int):
key = tuple(sorted((i, j, k)))
if key not in faces:
faces[key] = (i, j, k)
else:
del faces[key]
def add_tet(i: int, j: int, k: int, l: int):
self.add_tetrahedron(i, j, k, l, k_mu, k_lambda, k_damp)
add_face(i, k, j)
add_face(j, k, l)
add_face(i, j, l)
add_face(i, l, k)
def grid_index(x, y, z):
return (dim_x + 1) * (dim_y + 1) * z + (dim_x + 1) * y + x
for z in range(dim_z):
for y in range(dim_y):
for x in range(dim_x):
v0 = grid_index(x, y, z) + start_vertex
v1 = grid_index(x + 1, y, z) + start_vertex
v2 = grid_index(x + 1, y, z + 1) + start_vertex
v3 = grid_index(x, y, z + 1) + start_vertex
v4 = grid_index(x, y + 1, z) + start_vertex
v5 = grid_index(x + 1, y + 1, z) + start_vertex
v6 = grid_index(x + 1, y + 1, z + 1) + start_vertex
v7 = grid_index(x, y + 1, z + 1) + start_vertex
if (((x & 1) ^ (y & 1) ^ (z & 1))):
add_tet(v0, v1, v4, v3)
add_tet(v2, v3, v6, v1)
add_tet(v5, v4, v1, v6)
add_tet(v7, v6, v3, v4)
add_tet(v4, v1, v6, v3)
else:
add_tet(v1, v2, v5, v0)
add_tet(v3, v0, v7, v2)
add_tet(v4, v7, v0, v5)
add_tet(v6, v5, v2, v7)
add_tet(v5, v2, v7, v0)
# add triangles
for k, v in faces.items():
self.add_triangle(v[0], v[1], v[2])
def add_soft_mesh(self, pos: Vec3, rot: Quat, scale: float, vel: Vec3, vertices: List[Vec3], indices: List[int], density: float, k_mu: float, k_lambda: float, k_damp: float):
"""Helper to create a tetrahedral model from an input tetrahedral mesh
Args:
pos: The position of the solid in world space
rot: The orientation of the solid in world space
vel: The velocity of the solid in world space
vertices: A list of vertex positions
indices: A list of tetrahedron indices, 4 entries per-element
density: The density per-area of the mesh
k_mu: The first elastic Lame parameter
k_lambda: The second elastic Lame parameter
k_damp: The damping stiffness
"""
num_tets = int(len(indices) / 4)
start_vertex = len(self.particle_q)
start_tri = len(self.tri_indices)
# dict of open faces
faces = {}
def add_face(i, j, k):
key = tuple(sorted((i, j, k)))
if key not in faces:
faces[key] = (i, j, k)
else:
del faces[key]
# add particles
for v in vertices:
p = quat_rotate(rot, v * scale) + pos
self.add_particle(p, vel, 0.0)
# add tetrahedra
for t in range(num_tets):
v0 = start_vertex + indices[t * 4 + 0]
v1 = start_vertex + indices[t * 4 + 1]
v2 = start_vertex + indices[t * 4 + 2]
v3 = start_vertex + indices[t * 4 + 3]
volume = self.add_tetrahedron(v0, v1, v2, v3, k_mu, k_lambda, k_damp)
# distribute volume fraction to particles
if (volume > 0.0):
self.particle_mass[v0] += density * volume / 4.0
self.particle_mass[v1] += density * volume / 4.0
self.particle_mass[v2] += density * volume / 4.0
self.particle_mass[v3] += density * volume / 4.0
# build open faces
add_face(v0, v2, v1)
add_face(v1, v2, v3)
add_face(v0, v1, v3)
add_face(v0, v3, v2)
# add triangles
for k, v in faces.items():
try:
self.add_triangle(v[0], v[1], v[2])
except np.linalg.LinAlgError:
continue
def compute_sphere_inertia(self, density: float, r: float) -> tuple:
"""Helper to compute mass and inertia of a sphere
Args:
density: The sphere density
r: The sphere radius
Returns:
A tuple of (mass, inertia) with inertia specified around the origin
"""
v = 4.0 / 3.0 * math.pi * r * r * r
m = density * v
Ia = 2.0 / 5.0 * m * r * r
I = np.array([[Ia, 0.0, 0.0], [0.0, Ia, 0.0], [0.0, 0.0, Ia]])
return (m, I)
def compute_capsule_inertia(self, density: float, r: float, l: float) -> tuple:
"""Helper to compute mass and inertia of a capsule
Args:
density: The capsule density
r: The capsule radius
l: The capsule length (full width of the interior cylinder)
Returns:
A tuple of (mass, inertia) with inertia specified around the origin
"""
ms = density * (4.0 / 3.0) * math.pi * r * r * r
mc = density * math.pi * r * r * l
# total mass
m = ms + mc
# adapted from ODE
Ia = mc * (0.25 * r * r + (1.0 / 12.0) * l * l) + ms * (0.4 * r * r + 0.375 * r * l + 0.25 * l * l)
Ib = (mc * 0.5 + ms * 0.4) * r * r
I = np.array([[Ib, 0.0, 0.0], [0.0, Ia, 0.0], [0.0, 0.0, Ia]])
return (m, I)
def compute_box_inertia(self, density: float, w: float, h: float, d: float) -> tuple:
"""Helper to compute mass and inertia of a box
Args:
density: The box density
w: The box width along the x-axis
h: The box height along the y-axis
d: The box depth along the z-axis
Returns:
A tuple of (mass, inertia) with inertia specified around the origin
"""
v = w * h * d
m = density * v
Ia = 1.0 / 12.0 * m * (h * h + d * d)
Ib = 1.0 / 12.0 * m * (w * w + d * d)
Ic = 1.0 / 12.0 * m * (w * w + h * h)
I = np.array([[Ia, 0.0, 0.0], [0.0, Ib, 0.0], [0.0, 0.0, Ic]])
return (m, I)
def _compute_shape_mass(self, type, scale, src, density):
if density == 0: # zero density means fixed
return 0, np.zeros((3, 3))
if (type == GEO_SPHERE):
return self.compute_sphere_inertia(density, scale[0])
elif (type == GEO_BOX):
return self.compute_box_inertia(density, scale[0] * 2.0, scale[1] * 2.0, scale[2] * 2.0)
elif (type == GEO_CAPSULE):
return self.compute_capsule_inertia(density, scale[0], scale[1] * 2.0)
elif (type == GEO_MESH):
#todo: non-uniform scale of inertia tensor
s = scale[0] # eventually want to compute moment of inertia for mesh.
return (density * src.mass * s * s * s, density * src.I * s * s * s * s * s)
# incrementally updates rigid body mass with additional mass and inertia expressed at a local to the body
def _update_body_mass(self, i, m, I, p, q):
if (i == -1):
return
# find new COM
new_mass = self.body_mass[i] + m
if new_mass == 0.0: # no mass
return
new_com = (self.body_com[i] * self.body_mass[i] + p * m) / new_mass
# shift inertia to new COM
com_offset = new_com - self.body_com[i]
shape_offset = new_com - p
new_inertia = transform_inertia(self.body_mass[i], self.body_inertia[i], com_offset, quat_identity()) + transform_inertia(
m, I, shape_offset, q)
self.body_mass[i] = new_mass
self.body_inertia[i] = new_inertia
self.body_com[i] = new_com
# returns a (model, state) pair given the description
def finalize(self, adapter: str) -> Model:
"""Convert this builder object to a concrete model for simulation.
After building simulation elements this method should be called to transfer
all data to PyTorch tensors ready for simulation.
Args:
adapter: The simulation adapter to use, e.g.: 'cpu', 'cuda'
Returns:
A model object.
"""
# construct particle inv masses
particle_inv_mass = []
for m in self.particle_mass:
if (m > 0.0):
particle_inv_mass.append(1.0 / m)
else:
particle_inv_mass.append(0.0)
#-------------------------------------
# construct Model (non-time varying) data
m = Model(adapter)
#---------------------
# particles
# state (initial)
m.particle_q = torch.tensor(self.particle_q, dtype=torch.float32, device=adapter)
m.particle_qd = torch.tensor(self.particle_qd, dtype=torch.float32, device=adapter)
# model
m.particle_mass = torch.tensor(self.particle_mass, dtype=torch.float32, device=adapter)
m.particle_inv_mass = torch.tensor(particle_inv_mass, dtype=torch.float32, device=adapter)
#---------------------
# collision geometry
m.shape_transform = torch.tensor(transform_flatten_list(self.shape_transform), dtype=torch.float32, device=adapter)
m.shape_body = torch.tensor(self.shape_body, dtype=torch.int32, device=adapter)
m.shape_geo_type = torch.tensor(self.shape_geo_type, dtype=torch.int32, device=adapter)
m.shape_geo_src = self.shape_geo_src
m.shape_geo_scale = torch.tensor(self.shape_geo_scale, dtype=torch.float32, device=adapter)
m.shape_materials = torch.tensor(self.shape_materials, dtype=torch.float32, device=adapter)
#---------------------
# springs
m.spring_indices = torch.tensor(self.spring_indices, dtype=torch.int32, device=adapter)
m.spring_rest_length = torch.tensor(self.spring_rest_length, dtype=torch.float32, device=adapter)
m.spring_stiffness = torch.tensor(self.spring_stiffness, dtype=torch.float32, device=adapter)
m.spring_damping = torch.tensor(self.spring_damping, dtype=torch.float32, device=adapter)
m.spring_control = torch.tensor(self.spring_control, dtype=torch.float32, device=adapter)
#---------------------
# triangles
m.tri_indices = torch.tensor(self.tri_indices, dtype=torch.int32, device=adapter)
m.tri_poses = torch.tensor(self.tri_poses, dtype=torch.float32, device=adapter)
m.tri_activations = torch.tensor(self.tri_activations, dtype=torch.float32, device=adapter)
#---------------------
# edges
m.edge_indices = torch.tensor(self.edge_indices, dtype=torch.int32, device=adapter)
m.edge_rest_angle = torch.tensor(self.edge_rest_angle, dtype=torch.float32, device=adapter)
#---------------------
# tetrahedra
m.tet_indices = torch.tensor(self.tet_indices, dtype=torch.int32, device=adapter)
m.tet_poses = torch.tensor(self.tet_poses, dtype=torch.float32, device=adapter)
m.tet_activations = torch.tensor(self.tet_activations, dtype=torch.float32, device=adapter)
m.tet_materials = torch.tensor(self.tet_materials, dtype=torch.float32, device=adapter)
#-----------------------
# muscles
muscle_count = len(self.muscle_start)
# close the muscle waypoint indices
self.muscle_start.append(len(self.muscle_links))
m.muscle_start = torch.tensor(self.muscle_start, dtype=torch.int32, device=adapter)
m.muscle_params = torch.tensor(self.muscle_params, dtype=torch.float32, device=adapter)
m.muscle_links = torch.tensor(self.muscle_links, dtype=torch.int32, device=adapter)
m.muscle_points = torch.tensor(self.muscle_points, dtype=torch.float32, device=adapter)
m.muscle_activation = torch.tensor(self.muscle_activation, dtype=torch.float32, device=adapter)
#--------------------------------------
# articulations
# build 6x6 spatial inertia and COM transform
body_X_cm = []
body_I_m = []
for i in range(len(self.body_inertia)):
body_I_m.append(spatial_matrix_from_inertia(self.body_inertia[i], self.body_mass[i]))
body_X_cm.append(transform(self.body_com[i], quat_identity()))
m.body_I_m = torch.tensor(body_I_m, dtype=torch.float32, device=adapter)
articulation_count = len(self.articulation_start)
joint_coord_count = len(self.joint_q)
joint_dof_count = len(self.joint_qd)
# 'close' the start index arrays with a sentinel value
self.joint_q_start.append(len(self.joint_q))
self.joint_qd_start.append(len(self.joint_qd))
self.articulation_start.append(len(self.joint_type))
# calculate total size and offsets of Jacobian and mass matrices for entire system
m.J_size = 0
m.M_size = 0
m.H_size = 0
articulation_J_start = []
articulation_M_start = []
articulation_H_start = []
articulation_M_rows = []
articulation_H_rows = []
articulation_J_rows = []
articulation_J_cols = []
articulation_dof_start = []
articulation_coord_start = []
for i in range(articulation_count):
first_joint = self.articulation_start[i]
last_joint = self.articulation_start[i+1]
first_coord = self.joint_q_start[first_joint]
last_coord = self.joint_q_start[last_joint]
first_dof = self.joint_qd_start[first_joint]
last_dof = self.joint_qd_start[last_joint]
joint_count = last_joint-first_joint
dof_count = last_dof-first_dof
coord_count = last_coord-first_coord
articulation_J_start.append(m.J_size)
articulation_M_start.append(m.M_size)
articulation_H_start.append(m.H_size)
articulation_dof_start.append(first_dof)
articulation_coord_start.append(first_coord)
# bit of data duplication here, but will leave it as such for clarity
articulation_M_rows.append(joint_count*6)
articulation_H_rows.append(dof_count)
articulation_J_rows.append(joint_count*6)
articulation_J_cols.append(dof_count)
m.J_size += 6*joint_count*dof_count
m.M_size += 6*joint_count*6*joint_count
m.H_size += dof_count*dof_count
m.articulation_joint_start = torch.tensor(self.articulation_start, dtype=torch.int32, device=adapter)
# matrix offsets for batched gemm
m.articulation_J_start = torch.tensor(articulation_J_start, dtype=torch.int32, device=adapter)
m.articulation_M_start = torch.tensor(articulation_M_start, dtype=torch.int32, device=adapter)
m.articulation_H_start = torch.tensor(articulation_H_start, dtype=torch.int32, device=adapter)
m.articulation_M_rows = torch.tensor(articulation_M_rows, dtype=torch.int32, device=adapter)
m.articulation_H_rows = torch.tensor(articulation_H_rows, dtype=torch.int32, device=adapter)
m.articulation_J_rows = torch.tensor(articulation_J_rows, dtype=torch.int32, device=adapter)
m.articulation_J_cols = torch.tensor(articulation_J_cols, dtype=torch.int32, device=adapter)
m.articulation_dof_start = torch.tensor(articulation_dof_start, dtype=torch.int32, device=adapter)
m.articulation_coord_start = torch.tensor(articulation_coord_start, dtype=torch.int32, device=adapter)
# state (initial)
m.joint_q = torch.tensor(self.joint_q, dtype=torch.float32, device=adapter)
m.joint_qd = torch.tensor(self.joint_qd, dtype=torch.float32, device=adapter)
# model
m.joint_type = torch.tensor(self.joint_type, dtype=torch.int32, device=adapter)
m.joint_parent = torch.tensor(self.joint_parent, dtype=torch.int32, device=adapter)
m.joint_X_pj = torch.tensor(transform_flatten_list(self.joint_X_pj), dtype=torch.float32, device=adapter)
m.joint_X_cm = torch.tensor(transform_flatten_list(body_X_cm), dtype=torch.float32, device=adapter)
m.joint_axis = torch.tensor(self.joint_axis, dtype=torch.float32, device=adapter)
m.joint_q_start = torch.tensor(self.joint_q_start, dtype=torch.int32, device=adapter)
m.joint_qd_start = torch.tensor(self.joint_qd_start, dtype=torch.int32, device=adapter)
# dynamics properties
m.joint_armature = torch.tensor(self.joint_armature, dtype=torch.float32, device=adapter)
m.joint_target = torch.tensor(self.joint_target, dtype=torch.float32, device=adapter)
m.joint_target_ke = torch.tensor(self.joint_target_ke, dtype=torch.float32, device=adapter)
m.joint_target_kd = torch.tensor(self.joint_target_kd, dtype=torch.float32, device=adapter)
m.joint_limit_lower = torch.tensor(self.joint_limit_lower, dtype=torch.float32, device=adapter)
m.joint_limit_upper = torch.tensor(self.joint_limit_upper, dtype=torch.float32, device=adapter)
m.joint_limit_ke = torch.tensor(self.joint_limit_ke, dtype=torch.float32, device=adapter)
m.joint_limit_kd = torch.tensor(self.joint_limit_kd, dtype=torch.float32, device=adapter)
# counts
m.particle_count = len(self.particle_q)
m.articulation_count = articulation_count
m.joint_coord_count = joint_coord_count
m.joint_dof_count = joint_dof_count
m.muscle_count = muscle_count
m.link_count = len(self.joint_type)
m.shape_count = len(self.shape_geo_type)
m.tri_count = len(self.tri_poses)
m.tet_count = len(self.tet_poses)
m.edge_count = len(self.edge_rest_angle)
m.spring_count = len(self.spring_rest_length)
m.contact_count = 0
# store refs to geometry
m.geo_meshes = self.geo_meshes
m.geo_sdfs = self.geo_sdfs
# enable ground plane
m.ground = True
m.enable_tri_collisions = False
m.gravity = torch.tensor((0.0, -9.8, 0.0), dtype=torch.float32, device=adapter)
# allocate space for mass / jacobian matrices
m.alloc_mass_matrix()
return m
| 71,060 | Python | 36.798404 | 206 | 0.562046 |
vstrozzi/FRL-SHAC-Extension/dflex/build/lib/dflex/adjoint.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import os
import imp
import ast
import math
import inspect
import typing
import weakref
import numpy as np
import torch
import torch.utils.cpp_extension
import dflex.config
import copy
# Todo
#-----
#
# [ ] Unary ops (e.g.: -)
# [ ] Inplace ops (e.g.: +=, -=)
# [ ] Conditionals
# [ ] Loops (unrolled)
# [ ] Auto-gen PyTorch operator
# [ ] CUDA kernel code gen + dynamic compilation
# -----
operators = {}
functions = {}
cuda_functions = {}
kernels = {}
#----------------------
# built-in types
class float3:
def __init__(self):
x = 0.0
y = 0.0
z = 0.0
class float4:
def __init__(self):
x = 0.0
y = 0.0
z = 0.0
w = 0.0
class quat:
def __init__(self):
x = 0.0
y = 0.0
z = 0.0
w = 1.0
class mat22:
def __init__(self):
pass
class mat33:
def __init__(self):
pass
class spatial_vector:
def __init__(self):
pass
class spatial_matrix:
def __init__(self):
pass
class spatial_transform:
def __init__(self):
pass
class void:
def __init__(self):
pass
class tensor:
def __init__(self, type):
self.type = type
self.requires_grad = True
self.__name__ = "tensor<" + type.__name__ + ">"
#----------------------
# register built-in function
def builtin(key):
def insert(func):
func.key = key
func.prefix = "df::"
functions[key] = func
return func
return insert
#---------------------------------
# built-in operators +,-,*,/
@builtin("add")
class AddFunc:
@staticmethod
def value_type(args):
return args[0].type
@builtin("sub")
class SubFunc:
@staticmethod
def value_type(args):
return args[0].type
@builtin("mod")
class ModFunc:
@staticmethod
def value_type(args):
return args[0].type
@builtin("mul")
class MulFunc:
@staticmethod
def value_type(args):
# todo: encode type operator type globally
if (args[0].type == mat33 and args[1].type == float3):
return float3
if (args[0].type == spatial_matrix and args[1].type == spatial_vector):
return spatial_vector
else:
return args[0].type
@builtin("div")
class DivFunc:
@staticmethod
def value_type(args):
return args[0].type
#----------------------
# map operator nodes to builtin
operators[ast.Add] = "add"
operators[ast.Sub] = "sub"
operators[ast.Mult] = "mul"
operators[ast.Div] = "div"
operators[ast.FloorDiv] = "div"
operators[ast.Mod] = "mod"
operators[ast.Gt] = ">"
operators[ast.Lt] = "<"
operators[ast.GtE] = ">="
operators[ast.LtE] = "<="
operators[ast.Eq] = "=="
operators[ast.NotEq] = "!="
#----------------------
# built-in functions
@builtin("min")
class MinFunc:
@staticmethod
def value_type(args):
return float
@builtin("max")
class MaxFunc:
@staticmethod
def value_type(args):
return float
@builtin("leaky_max")
class LeakyMaxFunc:
@staticmethod
def value_type(args):
return float
@builtin("leaky_min")
class LeakyMinFunc:
@staticmethod
def value_type(args):
return float
@builtin("clamp")
class ClampFunc:
@staticmethod
def value_type(args):
return float
@builtin("step")
class StepFunc:
@staticmethod
def value_type(args):
return float
@builtin("nonzero")
class NonZeroFunc:
@staticmethod
def value_type(args):
return float
@builtin("sign")
class SignFunc:
@staticmethod
def value_type(args):
return float
@builtin("abs")
class AbsFunc:
@staticmethod
def value_type(args):
return float
@builtin("sin")
class SinFunc:
@staticmethod
def value_type(args):
return float
@builtin("cos")
class CosFunc:
@staticmethod
def value_type(args):
return float
@builtin("acos")
class ACosFunc:
@staticmethod
def value_type(args):
return float
@builtin("sin")
class SinFunc:
@staticmethod
def value_type(args):
return float
@builtin("cos")
class CosFunc:
@staticmethod
def value_type(args):
return float
@builtin("sqrt")
class SqrtFunc:
@staticmethod
def value_type(args):
return float
@builtin("dot")
class DotFunc:
@staticmethod
def value_type(args):
return float
@builtin("cross")
class CrossFunc:
@staticmethod
def value_type(args):
return float3
@builtin("skew")
class SkewFunc:
@staticmethod
def value_type(args):
return mat33
@builtin("length")
class LengthFunc:
@staticmethod
def value_type(args):
return float
@builtin("normalize")
class NormalizeFunc:
@staticmethod
def value_type(args):
return args[0].type
@builtin("select")
class SelectFunc:
@staticmethod
def value_type(args):
return args[1].type
@builtin("rotate")
class RotateFunc:
@staticmethod
def value_type(args):
return float3
@builtin("rotate_inv")
class RotateInvFunc:
@staticmethod
def value_type(args):
return float3
@builtin("determinant")
class DeterminantFunc:
@staticmethod
def value_type(args):
return float
@builtin("transpose")
class TransposeFunc:
@staticmethod
def value_type(args):
return args[0].type
@builtin("load")
class LoadFunc:
@staticmethod
def value_type(args):
if (type(args[0].type) != tensor):
raise Exception("Load input 0 must be a tensor")
if (args[1].type != int):
raise Exception("Load input 1 must be a int")
return args[0].type.type
@builtin("store")
class StoreFunc:
@staticmethod
def value_type(args):
if (type(args[0].type) != tensor):
raise Exception("Store input 0 must be a tensor")
if (args[1].type != int):
raise Exception("Store input 1 must be a int")
if (args[2].type != args[0].type.type):
raise Exception("Store input 2 must be of the same type as the tensor")
return None
@builtin("atomic_add")
class AtomicAddFunc:
@staticmethod
def value_type(args):
return None
@builtin("atomic_sub")
class AtomicSubFunc:
@staticmethod
def value_type(args):
return None
@builtin("tid")
class ThreadIdFunc:
@staticmethod
def value_type(args):
return int
# type construtors
@builtin("float")
class floatFunc:
@staticmethod
def value_type(args):
return float
@builtin("int")
class IntFunc:
@staticmethod
def value_type(args):
return int
@builtin("float3")
class Float3Func:
@staticmethod
def value_type(args):
return float3
@builtin("quat")
class QuatFunc:
@staticmethod
def value_type(args):
return quat
@builtin("quat_identity")
class QuatIdentityFunc:
@staticmethod
def value_type(args):
return quat
@builtin("quat_from_axis_angle")
class QuatAxisAngleFunc:
@staticmethod
def value_type(args):
return quat
@builtin("mat22")
class Mat22Func:
@staticmethod
def value_type(args):
return mat22
@builtin("mat33")
class Mat33Func:
@staticmethod
def value_type(args):
return mat33
@builtin("spatial_vector")
class SpatialVectorFunc:
@staticmethod
def value_type(args):
return spatial_vector
# built-in spatial operators
@builtin("spatial_transform")
class TransformFunc:
@staticmethod
def value_type(args):
return spatial_transform
@builtin("spatial_transform_identity")
class TransformIdentity:
@staticmethod
def value_type(args):
return spatial_transform
@builtin("inverse")
class Inverse:
@staticmethod
def value_type(args):
return quat
# @builtin("spatial_transform_inverse")
# class TransformInverse:
# @staticmethod
# def value_type(args):
# return spatial_transform
@builtin("spatial_transform_get_translation")
class TransformGetTranslation:
@staticmethod
def value_type(args):
return float3
@builtin("spatial_transform_get_rotation")
class TransformGetRotation:
@staticmethod
def value_type(args):
return quat
@builtin("spatial_transform_multiply")
class TransformMulFunc:
@staticmethod
def value_type(args):
return spatial_transform
# @builtin("spatial_transform_inertia")
# class TransformInertiaFunc:
# @staticmethod
# def value_type(args):
# return spatial_matrix
@builtin("spatial_adjoint")
class SpatialAdjoint:
@staticmethod
def value_type(args):
return spatial_matrix
@builtin("spatial_dot")
class SpatialDotFunc:
@staticmethod
def value_type(args):
return float
@builtin("spatial_cross")
class SpatialDotFunc:
@staticmethod
def value_type(args):
return spatial_vector
@builtin("spatial_cross_dual")
class SpatialDotFunc:
@staticmethod
def value_type(args):
return spatial_vector
@builtin("spatial_transform_point")
class SpatialTransformPointFunc:
@staticmethod
def value_type(args):
return float3
@builtin("spatial_transform_vector")
class SpatialTransformVectorFunc:
@staticmethod
def value_type(args):
return float3
@builtin("spatial_top")
class SpatialTopFunc:
@staticmethod
def value_type(args):
return float3
@builtin("spatial_bottom")
class SpatialBottomFunc:
@staticmethod
def value_type(args):
return float3
@builtin("spatial_jacobian")
class SpatialJacobian:
@staticmethod
def value_type(args):
return None
@builtin("spatial_mass")
class SpatialMass:
@staticmethod
def value_type(args):
return None
@builtin("dense_gemm")
class DenseGemm:
@staticmethod
def value_type(args):
return None
@builtin("dense_gemm_batched")
class DenseGemmBatched:
@staticmethod
def value_type(args):
return None
@builtin("dense_chol")
class DenseChol:
@staticmethod
def value_type(args):
return None
@builtin("dense_chol_batched")
class DenseCholBatched:
@staticmethod
def value_type(args):
return None
@builtin("dense_subs")
class DenseSubs:
@staticmethod
def value_type(args):
return None
@builtin("dense_solve")
class DenseSolve:
@staticmethod
def value_type(args):
return None
@builtin("dense_solve_batched")
class DenseSolve:
@staticmethod
def value_type(args):
return None
# helpers
@builtin("index")
class IndexFunc:
@staticmethod
def value_type(args):
return float
@builtin("print")
class PrintFunc:
@staticmethod
def value_type(args):
return None
class Var:
def __init__(adj, label, type, requires_grad=False, constant=None):
adj.label = label
adj.type = type
adj.requires_grad = requires_grad
adj.constant = constant
def __str__(adj):
return adj.label
def ctype(self):
if (isinstance(self.type, tensor)):
if self.type.type == float3:
return str("df::" + self.type.type.__name__) + "*"
return str(self.type.type.__name__) + "*"
elif self.type == float3:
return "df::" + str(self.type.__name__)
else:
return str(self.type.__name__)
#--------------------
# Storage class for partial AST up to a return statement.
class Stmt:
def __init__(self, cond, forward, forward_replay, reverse, ret_forward, ret_line):
self.cond = cond # condition, can be None
self.forward = forward # all forward code outside of conditional branch *since last return*
self.forward_replay = forward_replay
self.reverse = reverse # all reverse code including the reverse of any code in ret_forward
self.ret_forward = ret_forward # all forward commands in the return statement except the actual return statement
self.ret_line = ret_line # actual return statement
#------------------------------------------------------------------------
# Source code transformer, this class takes a Python function and
# computes its adjoint using single-pass translation of the function's AST
class Adjoint:
def __init__(adj, func, device='cpu'):
adj.func = func
adj.device = device
adj.symbols = {} # map from symbols to adjoint variables
adj.variables = [] # list of local variables (in order)
adj.args = [] # list of function arguments (in order)
adj.cond = None # condition variable if in branch
adj.return_var = None # return type for function or kernel
# build AST from function object
adj.source = inspect.getsource(func)
adj.tree = ast.parse(adj.source)
# parse argument types
arg_types = typing.get_type_hints(func)
# add variables and symbol map for each argument
for name, t in arg_types.items():
adj.symbols[name] = Var(name, t, False)
# build ordered list of args
for a in adj.tree.body[0].args.args:
adj.args.append(adj.symbols[a.arg])
# primal statements (allows different statements in replay)
adj.body_forward = []
adj.body_forward_replay = []
adj.body_reverse = []
adj.output = []
adj.indent_count = 0
adj.label_count = 0
# recursively evaluate function body
adj.eval(adj.tree.body[0])
# code generation methods
def format_template(adj, template, input_vars, output_var):
# output var is always the 0th index
args = [output_var] + input_vars
s = template.format(*args)
return s
# generates a comma separated list of args
def format_args(adj, prefix, indices):
args = ""
sep = ""
for i in indices:
args += sep + prefix + str(i)
sep = ", "
return args
def add_var(adj, type=None, constant=None):
index = len(adj.variables)
v = Var(str(index), type=type, constant=constant)
adj.variables.append(v)
return v
def add_constant(adj, n):
output = adj.add_var(type=type(n), constant=n)
#adj.add_forward("var_{} = {};".format(output, n))
return output
def add_load(adj, input):
output = adj.add_var(input.type)
adj.add_forward("var_{} = {};".format(output, input))
adj.add_reverse("adj_{} += adj_{};".format(input, output))
return output
def add_operator(adj, op, inputs):
# todo: just using first input as the output type, would need some
# type inference here to support things like float3 = float*float3
output = adj.add_var(inputs[0].type)
transformer = operators[op.__class__]
for t in transformer.forward():
adj.add_forward(adj.format_template(t, inputs, output))
for t in transformer.reverse():
adj.add_reverse(adj.format_template(t, inputs, output))
return output
def add_comp(adj, op_strings, left, comps):
output = adj.add_var(bool)
s = "var_" + str(output) + " = " + ("(" * len(comps)) + "var_" + str(left) + " "
for op, comp in zip(op_strings, comps):
s += op + " var_" + str(comp) + ") "
s = s.rstrip() + ";"
adj.add_forward(s)
return output
def add_bool_op(adj, op_string, exprs):
output = adj.add_var(bool)
command = "var_" + str(output) + " = " + (" " + op_string + " ").join(["var_" + str(expr) for expr in exprs]) + ";"
adj.add_forward(command)
return output
def add_call(adj, func, inputs, prefix='df::'):
# expression (zero output), e.g.: tid()
if (func.value_type(inputs) == None):
forward_call = prefix + "{}({});".format(func.key, adj.format_args("var_", inputs))
adj.add_forward(forward_call)
if (len(inputs)):
reverse_call = prefix + "{}({}, {});".format("adj_" + func.key, adj.format_args("var_", inputs), adj.format_args("adj_", inputs))
adj.add_reverse(reverse_call)
return None
# function (one output)
else:
output = adj.add_var(func.value_type(inputs))
forward_call = "var_{} = ".format(output) + prefix + "{}({});".format(func.key, adj.format_args("var_", inputs))
adj.add_forward(forward_call)
if (len(inputs)):
reverse_call = prefix + "{}({}, {}, {});".format(
"adj_" + func.key, adj.format_args("var_", inputs), adj.format_args("adj_", inputs), adj.format_args("adj_", [output]))
adj.add_reverse(reverse_call)
return output
def add_return(adj, var):
if (var == None):
adj.add_forward("return;".format(var), "goto label{};".format(adj.label_count))
else:
adj.add_forward("return var_{};".format(var), "goto label{};".format(adj.label_count))
adj.add_reverse("adj_" + str(var) + " += adj_ret;")
adj.add_reverse("label{}:;".format(adj.label_count))
adj.label_count += 1
# define an if statement
def begin_if(adj, cond):
adj.add_forward("if (var_{}) {{".format(cond))
adj.add_reverse("}")
adj.indent_count += 1
def end_if(adj, cond):
adj.indent_count -= 1
adj.add_forward("}")
adj.add_reverse("if (var_{}) {{".format(cond))
# define a for-loop
def begin_for(adj, iter, start, end):
# note that dynamic for-loops must not mutate any previous state, so we don't need to re-run them in the reverse pass
adj.add_forward("for (var_{0}=var_{1}; var_{0} < var_{2}; ++var_{0}) {{".format(iter, start, end), "if (false) {")
adj.add_reverse("}")
adj.indent_count += 1
def end_for(adj, iter, start, end):
adj.indent_count -= 1
adj.add_forward("}")
adj.add_reverse("for (var_{0}=var_{2}-1; var_{0} >= var_{1}; --var_{0}) {{".format(iter, start, end))
# append a statement to the forward pass
def add_forward(adj, statement, statement_replay=None):
prefix = ""
for i in range(adj.indent_count):
prefix += "\t"
adj.body_forward.append(prefix + statement)
# allow for different statement in reverse kernel replay
if (statement_replay):
adj.body_forward_replay.append(prefix + statement_replay)
else:
adj.body_forward_replay.append(prefix + statement)
# append a statement to the reverse pass
def add_reverse(adj, statement):
prefix = ""
for i in range(adj.indent_count):
prefix += "\t"
adj.body_reverse.append(prefix + statement)
def eval(adj, node):
try:
if (isinstance(node, ast.FunctionDef)):
out = None
for f in node.body:
out = adj.eval(f)
if 'return' in adj.symbols and adj.symbols['return'] is not None:
out = adj.symbols['return']
stmt = Stmt(None, adj.body_forward, adj.body_forward_replay, reversed(adj.body_reverse), [], "")
adj.output.append(stmt)
else:
stmt = Stmt(None, adj.body_forward, adj.body_forward_replay, reversed(adj.body_reverse), [], "")
adj.output.append(stmt)
return out
elif (isinstance(node, ast.If)): # if statement
if len(node.orelse) != 0:
raise SyntaxError("Else statements not currently supported")
if len(node.body) == 0:
return None
# save symbol map
symbols_prev = adj.symbols.copy()
# eval condition
cond = adj.eval(node.test)
# eval body
adj.begin_if(cond)
for stmt in node.body:
adj.eval(stmt)
adj.end_if(cond)
# detect symbols with conflicting definitions (assigned inside the branch)
for items in symbols_prev.items():
sym = items[0]
var1 = items[1]
var2 = adj.symbols[sym]
if var1 != var2:
# insert a phi function that
# selects var1, var2 based on cond
out = adj.add_call(functions["select"], [cond, var1, var2])
adj.symbols[sym] = out
return None
elif (isinstance(node, ast.Compare)):
# node.left, node.ops (list of ops), node.comparators (things to compare to)
# e.g. (left ops[0] node.comparators[0]) ops[1] node.comparators[1]
left = adj.eval(node.left)
comps = [adj.eval(comp) for comp in node.comparators]
op_strings = [operators[type(op)] for op in node.ops]
out = adj.add_comp(op_strings, left, comps)
return out
elif (isinstance(node, ast.BoolOp)):
# op, expr list values (e.g. and and a list of things anded together)
op = node.op
if isinstance(op, ast.And):
func = "&&"
elif isinstance(op, ast.Or):
func = "||"
else:
raise KeyError("Op {} is not supported".format(op))
out = adj.add_bool_op(func, [adj.eval(expr) for expr in node.values])
# import pdb
# pdb.set_trace()
return out
elif (isinstance(node, ast.Name)):
# lookup symbol, if it has already been assigned to a variable then return the existing mapping
if (node.id in adj.symbols):
return adj.symbols[node.id]
else:
raise KeyError("Referencing undefined symbol: " + str(node.id))
elif (isinstance(node, ast.Num)):
# lookup constant, if it has already been assigned then return existing var
# currently disabled, since assigning constant in a branch means it
key = (node.n, type(node.n))
if (key in adj.symbols):
return adj.symbols[key]
else:
out = adj.add_constant(node.n)
adj.symbols[key] = out
return out
#out = adj.add_constant(node.n)
#return out
elif (isinstance(node, ast.BinOp)):
# evaluate binary operator arguments
left = adj.eval(node.left)
right = adj.eval(node.right)
name = operators[type(node.op)]
func = functions[name]
out = adj.add_call(func, [left, right])
return out
elif (isinstance(node, ast.UnaryOp)):
# evaluate unary op arguments
arg = adj.eval(node.operand)
out = adj.add_operator(node.op, [arg])
return out
elif (isinstance(node, ast.For)):
if (len(node.iter.args) != 2):
raise Exception("For loop ranges must be of form range(start, end) with both start and end specified and no skip specifier.")
# check if loop range is compile time constant
unroll = True
for a in node.iter.args:
if (isinstance(a, ast.Num) == False):
unroll = False
break
if (unroll):
# constant loop, unroll
start = node.iter.args[0].n
end = node.iter.args[1].n
for i in range(start, end):
var_iter = adj.add_constant(i)
adj.symbols[node.target.id] = var_iter
# eval body
for s in node.body:
adj.eval(s)
else:
# dynamic loop, body must be side-effect free, i.e.: not
# overwrite memory locations used by previous operations
start = adj.eval(node.iter.args[0])
end = adj.eval(node.iter.args[1])
# add iterator variable
iter = adj.add_var(int)
adj.symbols[node.target.id] = iter
adj.begin_for(iter, start, end)
# eval body
for s in node.body:
adj.eval(s)
adj.end_for(iter, start, end)
elif (isinstance(node, ast.Expr)):
return adj.eval(node.value)
elif (isinstance(node, ast.Call)):
name = None
# determine if call is to a builtin (attribute), or to a user-func (name)
if (isinstance(node.func, ast.Attribute)):
name = node.func.attr
elif (isinstance(node.func, ast.Name)):
name = node.func.id
# check it exists
if name not in functions:
raise KeyError("Could not find function {}".format(name))
if adj.device == 'cuda' and name in cuda_functions:
func = cuda_functions[name]
else:
func = functions[name]
args = []
# eval all arguments
for arg in node.args:
var = adj.eval(arg)
args.append(var)
# add var with value type from the function
out = adj.add_call(func, args, prefix=func.prefix)
return out
elif (isinstance(node, ast.Subscript)):
target = adj.eval(node.value)
indices = []
if isinstance(node.slice, ast.Tuple):
# handles the M[i, j] case
for arg in node.slice.elts:
var = adj.eval(arg)
indices.append(var)
else:
# simple expression
var = adj.eval(node.slice)
indices.append(var)
out = adj.add_call(functions["index"], [target, *indices])
return out
elif (isinstance(node, ast.Assign)):
# if adj.cond is not None:
# raise SyntaxError("error, cannot assign variables in a conditional branch")
# evaluate rhs
out = adj.eval(node.value)
# update symbol map (assumes lhs is a Name node)
adj.symbols[node.targets[0].id] = out
return out
elif (isinstance(node, ast.Return)):
cond = adj.cond # None if not in branch, else branch boolean
out = adj.eval(node.value)
adj.symbols['return'] = out
if out is not None: # set return type of function
return_var = out
if adj.return_var is not None and adj.return_var.ctype() != return_var.ctype():
raise TypeError("error, function returned different types")
adj.return_var = return_var
adj.add_return(out)
return out
elif node is None:
return None
else:
print("[WARNING] ast node of type {} not supported".format(type(node)))
except Exception as e:
# print error / line number
lines = adj.source.splitlines()
print("Error: {} while transforming node {} in func: {} at line: {} col: {}: \n {}".format(e, type(node), adj.func.__name__, node.lineno, node.col_offset, lines[max(node.lineno-1, 0)]))
raise
#----------------
# code generation
cpu_module_header = '''
#define CPU
#include "adjoint.h"
using namespace df;
template <typename T>
T cast(torch::Tensor t)
{{
return (T)(t.data_ptr());
}}
'''
cuda_module_header = '''
#define CUDA
#include "adjoint.h"
using namespace df;
template <typename T>
T cast(torch::Tensor t)
{{
return (T)(t.data_ptr());
}}
'''
cpu_function_template = '''
{return_type} {name}_cpu_func({forward_args})
{{
{forward_body}
}}
void adj_{name}_cpu_func({forward_args}, {reverse_args})
{{
{reverse_body}
}}
'''
cuda_function_template = '''
CUDA_CALLABLE {return_type} {name}_cuda_func({forward_args})
{{
{forward_body}
}}
CUDA_CALLABLE void adj_{name}_cuda_func({forward_args}, {reverse_args})
{{
{reverse_body}
}}
'''
cuda_kernel_template = '''
__global__ void {name}_cuda_kernel_forward(int dim, {forward_args})
{{
{forward_body}
}}
__global__ void {name}_cuda_kernel_backward(int dim, {forward_args}, {reverse_args})
{{
{reverse_body}
}}
'''
cpu_kernel_template = '''
void {name}_cpu_kernel_forward({forward_args})
{{
{forward_body}
}}
void {name}_cpu_kernel_backward({forward_args}, {reverse_args})
{{
{reverse_body}
}}
'''
cuda_module_template = '''
// Python entry points
void {name}_cuda_forward(int dim, {forward_args})
{{
{name}_cuda_kernel_forward<<<(dim + 256 - 1) / 256, 256>>>(dim, {forward_params});
//check_cuda(cudaPeekAtLastError());
//check_cuda(cudaDeviceSynchronize());
}}
void {name}_cuda_backward(int dim, {forward_args}, {reverse_args})
{{
{name}_cuda_kernel_backward<<<(dim + 256 - 1) / 256, 256>>>(dim, {forward_params}, {reverse_params});
//check_cuda(cudaPeekAtLastError());
//check_cuda(cudaDeviceSynchronize());
}}
'''
cpu_module_template = '''
// Python entry points
void {name}_cpu_forward(int dim, {forward_args})
{{
for (int i=0; i < dim; ++i)
{{
s_threadIdx = i;
{name}_cpu_kernel_forward({forward_params});
}}
}}
void {name}_cpu_backward(int dim, {forward_args}, {reverse_args})
{{
for (int i=0; i < dim; ++i)
{{
s_threadIdx = i;
{name}_cpu_kernel_backward({forward_params}, {reverse_params});
}}
}}
'''
cuda_module_header_template = '''
// Python entry points
void {name}_cuda_forward(int dim, {forward_args});
void {name}_cuda_backward(int dim, {forward_args}, {reverse_args});
'''
cpu_module_header_template = '''
// Python entry points
void {name}_cpu_forward(int dim, {forward_args});
void {name}_cpu_backward(int dim, {forward_args}, {reverse_args});
'''
def indent(args, stops=1):
sep = "\n"
for i in range(stops):
sep += "\t"
return sep + args.replace(", ", "," + sep)
def codegen_func_forward_body(adj, device='cpu', indent=4):
body = []
indent_block = " " * indent
for stmt in adj.output:
for f in stmt.forward:
body += [f + "\n"]
if stmt.cond is not None:
body += ["if (" + str(stmt.cond) + ") {\n"]
for l in stmt.ret_forward:
body += [indent_block + l + "\n"]
body += [indent_block + stmt.ret_line + "\n"]
body += ["}\n"]
else:
for l in stmt.ret_forward:
body += [l + "\n"]
body += [stmt.ret_line + "\n"]
break # break once unconditional return is encountered
return "".join([indent_block + l for l in body])
def codegen_func_forward(adj, func_type='kernel', device='cpu'):
s = ""
# primal vars
s += " //---------\n"
s += " // primal vars\n"
for var in adj.variables:
if var.constant == None:
s += " " + var.ctype() + " var_" + str(var.label) + ";\n"
else:
s += " const " + var.ctype() + " var_" + str(var.label) + " = " + str(var.constant) + ";\n"
# forward pass
s += " //---------\n"
s += " // forward\n"
if device == 'cpu':
s += codegen_func_forward_body(adj, device=device, indent=4)
elif device == 'cuda':
if func_type == 'kernel':
s += " int var_idx = blockDim.x * blockIdx.x + threadIdx.x;\n"
s += " if (var_idx < dim) {\n"
s += codegen_func_forward_body(adj, device=device, indent=8)
s += " }\n"
else:
s += codegen_func_forward_body(adj, device=device, indent=4)
return s
def codegen_func_reverse_body(adj, device='cpu', indent=4):
body = []
indent_block = " " * indent
for stmt in adj.output:
# forward pass
body += ["//---------\n"]
body += ["// forward\n"]
for f in stmt.forward_replay:
body += [f + "\n"]
if stmt.cond is not None:
body += ["if (" + str(stmt.cond) + ") {\n"]
for l in stmt.ret_forward:
body += [indent_block + l + "\n"]
# reverse pass
body += [indent_block + "//---------\n"]
body += [indent_block + "// reverse\n"]
for l in stmt.reverse:
body += [indent_block + l + "\n"]
body += [indent_block + "return;\n"]
body += ["}\n"]
else:
for l in stmt.ret_forward:
body += [l + "\n"]
# reverse pass
body += ["//---------\n"]
body += ["// reverse\n"]
for l in stmt.reverse:
body += [l + "\n"]
body += ["return;\n"]
break # break once unconditional return is encountered
return "".join([indent_block + l for l in body])
def codegen_func_reverse(adj, func_type='kernel', device='cpu'):
s = ""
# primal vars
s += " //---------\n"
s += " // primal vars\n"
for var in adj.variables:
if var.constant == None:
s += " " + var.ctype() + " var_" + str(var.label) + ";\n"
else:
s += " const " + var.ctype() + " var_" + str(var.label) + " = " + str(var.constant) + ";\n"
# dual vars
s += " //---------\n"
s += " // dual vars\n"
for var in adj.variables:
s += " " + var.ctype() + " adj_" + str(var.label) + " = 0;\n"
if device == 'cpu':
s += codegen_func_reverse_body(adj, device=device, indent=4)
elif device == 'cuda':
if func_type == 'kernel':
s += " int var_idx = blockDim.x * blockIdx.x + threadIdx.x;\n"
s += " if (var_idx < dim) {\n"
s += codegen_func_reverse_body(adj, device=device, indent=8)
s += " }\n"
else:
s += codegen_func_reverse_body(adj, device=device, indent=4)
else:
raise ValueError("Device {} not supported for codegen".format(device))
return s
def codegen_func(adj, device='cpu'):
# forward header
# return_type = "void"
return_type = 'void' if adj.return_var is None else adj.return_var.ctype()
# s = "{} {}_forward(".format(return_type, adj.func.__name__)
# sep = ""
# for arg in adj.args:
# if (arg.label != 'return'):
# s += sep + str(arg.type.__name__) + " var_" + arg.label
# sep = ", "
# reverse header
# s = "void {}_reverse(".format(adj.func.__name__)
# return s
forward_args = ""
reverse_args = ""
# s = ""
# forward args
sep = ""
for arg in adj.args:
forward_args += sep + arg.ctype() + " var_" + arg.label
sep = ", "
# reverse args
sep = ""
for arg in adj.args:
if "*" in arg.ctype():
reverse_args += sep + arg.ctype() + " adj_" + arg.label
else:
reverse_args += sep + arg.ctype() + " & adj_" + arg.label
sep = ", "
reverse_args += sep + return_type + " & adj_ret"
# reverse args
# add primal version of parameters
# sep = ""
# for var in adj.args:
# if (var.label != 'return'):
# s += sep + var.ctype() + " var_" + var.label
# sep = ", "
# # add adjoint version of parameters
# for var in adj.args:
# if (var.label != 'return'):
# s += sep + var.ctype() + "& adj_" + var.label
# sep = ", "
# # add adjoint of output
# if ('return' in adj.symbols and adj.symbols['return'] != None):
# s += sep + str(adj.symbols['return'].type.__name__) + " adj_" + str(adj.symbols['return'])
# codegen body
forward_body = codegen_func_forward(adj, func_type='function', device=device)
reverse_body = codegen_func_reverse(adj, func_type='function', device=device)
if device == 'cpu':
template = cpu_function_template
elif device == 'cuda':
template = cuda_function_template
else:
raise ValueError("Device {} is not supported".format(device))
s = template.format(name=adj.func.__name__,
return_type=return_type,
forward_args=indent(forward_args),
reverse_args=indent(reverse_args),
forward_body=forward_body,
reverse_body=reverse_body)
return s
def codegen_kernel(adj, device='cpu'):
forward_args = ""
reverse_args = ""
# forward args
sep = ""
for arg in adj.args:
forward_args += sep + arg.ctype() + " var_" + arg.label
sep = ", "
# reverse args
sep = ""
for arg in adj.args:
reverse_args += sep + arg.ctype() + " adj_" + arg.label
sep = ", "
# codegen body
forward_body = codegen_func_forward(adj, func_type='kernel', device=device)
reverse_body = codegen_func_reverse(adj, func_type='kernel', device=device)
# import pdb
# pdb.set_trace()
if device == 'cpu':
template = cpu_kernel_template
elif device == 'cuda':
template = cuda_kernel_template
else:
raise ValueError("Device {} is not supported".format(device))
s = template.format(name=adj.func.__name__,
forward_args=indent(forward_args),
reverse_args=indent(reverse_args),
forward_body=forward_body,
reverse_body=reverse_body)
return s
def codegen_module(adj, device='cpu'):
forward_args = ""
reverse_args = ""
forward_params = ""
reverse_params = ""
sep = ""
for arg in adj.args:
if (isinstance(arg.type, tensor)):
forward_args += sep + "torch::Tensor var_" + arg.label
forward_params += sep + "cast<" + arg.ctype() + ">(var_" + arg.label + ")"
else:
forward_args += sep + arg.ctype() + " var_" + arg.label
forward_params += sep + "var_" + arg.label
sep = ", "
sep = ""
for arg in adj.args:
if (isinstance(arg.type, tensor)):
reverse_args += sep + "torch::Tensor adj_" + arg.label
reverse_params += sep + "cast<" + arg.ctype() + ">(adj_" + arg.label + ")"
else:
reverse_args += sep + arg.ctype() + " adj_" + arg.label
reverse_params += sep + "adj_" + arg.label
sep = ", "
if device == 'cpu':
template = cpu_module_template
elif device == 'cuda':
template = cuda_module_template
else:
raise ValueError("Device {} is not supported".format(device))
s = template.format(name=adj.func.__name__,
forward_args=indent(forward_args),
reverse_args=indent(reverse_args),
forward_params=indent(forward_params, 3),
reverse_params=indent(reverse_params, 3))
return s
def codegen_module_decl(adj, device='cpu'):
forward_args = ""
reverse_args = ""
forward_params = ""
reverse_params = ""
sep = ""
for arg in adj.args:
if (isinstance(arg.type, tensor)):
forward_args += sep + "torch::Tensor var_" + arg.label
forward_params += sep + "cast<" + arg.ctype() + ">(var_" + arg.label + ")"
else:
forward_args += sep + arg.ctype() + " var_" + arg.label
forward_params += sep + "var_" + arg.label
sep = ", "
sep = ""
for arg in adj.args:
if (isinstance(arg.type, tensor)):
reverse_args += sep + "torch::Tensor adj_" + arg.label
reverse_params += sep + "cast<" + arg.ctype() + ">(adj_" + arg.label + ")"
else:
reverse_args += sep + arg.ctype() + " adj_" + arg.label
reverse_params += sep + "adj_" + arg.label
sep = ", "
if device == 'cpu':
template = cpu_module_header_template
elif device == 'cuda':
template = cuda_module_header_template
else:
raise ValueError("Device {} is not supported".format(device))
s = template.format(name=adj.func.__name__, forward_args=indent(forward_args), reverse_args=indent(reverse_args))
return s
# runs vcvars and copies back the build environment, PyTorch should really be doing this
def set_build_env():
if os.name == 'nt':
# VS2019 (required for PyTorch headers)
vcvars_path = "C:\\Program Files (x86)\\Microsoft Visual Studio\\2019\\Community\\VC\\Auxiliary\Build\\vcvars64.bat"
s = '"{}" && set'.format(vcvars_path)
output = os.popen(s).read()
for line in output.splitlines():
pair = line.split("=", 1)
if (len(pair) >= 2):
os.environ[pair[0]] = pair[1]
else: # nothing needed for Linux or Mac
pass
def import_module(module_name, path):
# https://stackoverflow.com/questions/67631/how-to-import-a-module-given-the-full-path
file, path, description = imp.find_module(module_name, [path])
# Close the .so file after load.
with file:
return imp.load_module(module_name, file, path, description)
def rename(name, return_type):
def func(cls):
cls.__name__ = name
cls.key = name
cls.prefix = ""
cls.return_type = return_type
return cls
return func
user_funcs = {}
user_kernels = {}
def func(f):
user_funcs[f.__name__] = f
# adj = Adjoint(f)
# print(adj.codegen_forward())
# print(adj.codegen_reverse())
# set_build_env()
# include_path = os.path.dirname(os.path.realpath(__file__))
# # requires PyTorch hotfix https://github.com/pytorch/pytorch/pull/33002
# test_cuda = torch.utils.cpp_extension.load_inline('test_cuda', [cpp_template], None, ["test_forward_1", "test_backward_1"], extra_include_paths=include_path, verbose=True)
# help(test_cuda)
def kernel(f):
# stores source and compiled entry points for a kernel (will be populated after module loads)
class Kernel:
def __init__(self, f):
self.func = f
def register(self, module):
# lookup entry points based on name
self.forward_cpu = eval("module." + self.func.__name__ + "_cpu_forward")
self.backward_cpu = eval("module." + self.func.__name__ + "_cpu_backward")
if (torch.cuda.is_available()):
self.forward_cuda = eval("module." + self.func.__name__ + "_cuda_forward")
self.backward_cuda = eval("module." + self.func.__name__ + "_cuda_backward")
k = Kernel(f)
# register globally
user_kernels[f.__name__] = k
return k
def compile():
use_cuda = torch.cuda.is_available()
if not use_cuda:
print("[INFO] CUDA support not found. Disabling CUDA kernel compilation.")
cpp_source = ""
cuda_source = ""
cpp_source += cpu_module_header
cuda_source += cuda_module_header
# kernels
entry_points = []
# functions
for name, func in user_funcs.items():
adj = Adjoint(func, device='cpu')
cpp_source += codegen_func(adj, device='cpu')
adj = Adjoint(func, device='cuda')
cuda_source += codegen_func(adj, device='cuda')
# import pdb
# pdb.set_trace()
import copy
@rename(func.__name__ + "_cpu_func", adj.return_var.type)
class Func:
@classmethod
def value_type(cls, *args):
return cls.return_type
functions[func.__name__] = Func
@rename(func.__name__ + "_cuda_func", adj.return_var.type)
class CUDAFunc:
@classmethod
def value_type(cls, *args):
return cls.return_type
cuda_functions[func.__name__] = CUDAFunc
for name, kernel in user_kernels.items():
if use_cuda:
# each kernel gets an entry point in the module
entry_points.append(name + "_cuda_forward")
entry_points.append(name + "_cuda_backward")
# each kernel gets an entry point in the module
entry_points.append(name + "_cpu_forward")
entry_points.append(name + "_cpu_backward")
if use_cuda:
adj = Adjoint(kernel.func, device='cuda')
cuda_source += codegen_kernel(adj, device='cuda')
cuda_source += codegen_module(adj, device='cuda')
cpp_source += codegen_module_decl(adj, device='cuda')
adj = Adjoint(kernel.func, device='cpu')
cpp_source += codegen_kernel(adj, device='cpu')
cpp_source += codegen_module(adj, device='cpu')
cpp_source += codegen_module_decl(adj, device='cpu')
include_path = os.path.dirname(os.path.realpath(__file__))
build_path = os.path.dirname(os.path.realpath(__file__)) + "/kernels"
cache_file = build_path + "/adjoint.gen"
if (os.path.exists(build_path) == False):
os.mkdir(build_path)
# test cache
if (os.path.exists(cache_file)):
f = open(cache_file, 'r')
cache_string = f.read()
f.close()
if (cache_string == cpp_source):
print("Using cached kernels")
module = import_module("kernels", build_path)
# register kernel methods
for k in user_kernels.values():
k.register(module)
return module
# print("ignoring rebuild, using stale kernels")
# module = import_module("kernels", build_path)
# return module
# cache stale, rebuild
print("Rebuilding kernels")
set_build_env()
# debug config
#module = torch.utils.cpp_extension.load_inline('kernels', [cpp_source], None, entry_points, extra_cflags=["/Zi", "/Od"], extra_ldflags=["/DEBUG"], build_directory=build_path, extra_include_paths=[include_path], verbose=True)
if os.name == 'nt':
cpp_flags = ["/Ox", "-DNDEBUG", "/fp:fast"]
ld_flags = ["-DNDEBUG"]
# cpp_flags = ["/Zi", "/Od", "/DEBUG"]
# ld_flags = ["/DEBUG"]
else:
cpp_flags = ["-Z", "-O2", "-DNDEBUG"]
ld_flags = ["-DNDEBUG"]
# just use minimum to ensure compatability
cuda_flags = ['-gencode=arch=compute_35,code=compute_35']
# release config
if use_cuda:
module = torch.utils.cpp_extension.load_inline('kernels',
cpp_sources=[cpp_source],
cuda_sources=[cuda_source],
functions=entry_points,
extra_cflags=cpp_flags,
extra_ldflags=ld_flags,
extra_cuda_cflags=cuda_flags,
build_directory=build_path,
extra_include_paths=[include_path],
verbose=True,
with_pytorch_error_handling=False)
else:
module = torch.utils.cpp_extension.load_inline('kernels',
cpp_sources=[cpp_source],
cuda_sources=[],
functions=entry_points,
extra_cflags=cpp_flags,
extra_ldflags=ld_flags,
extra_cuda_cflags=cuda_flags,
build_directory=build_path,
extra_include_paths=[include_path],
verbose=True,
with_pytorch_error_handling=False)
# update cache
f = open(cache_file, 'w')
f.write(cpp_source)
f.close()
# register kernel methods
for k in user_kernels.values():
k.register(module)
return module
#---------------------------------------------
# Helper functions for launching kernels as Torch ops
def check_adapter(l, a):
for t in l:
if torch.is_tensor(t):
assert(t.device.type == a)
def check_finite(l):
for t in l:
if torch.is_tensor(t):
assert(t.is_contiguous())
if (torch.isnan(t).any() == True):
print(t)
assert(torch.isnan(t).any() == False)
else:
assert(math.isnan(t) == False)
def filter_grads(grads):
"""helper that takes a list of gradient tensors and makes non-outputs None
as required by PyTorch when returning from a custom op
"""
outputs = []
for g in grads:
if torch.is_tensor(g) and len(g) > 0:
outputs.append(g)
else:
outputs.append(None)
return tuple(outputs)
def make_empty(outputs, device):
empty = []
for o in outputs:
empty.append(torch.FloatTensor().to(device))
return empty
def make_contiguous(grads):
ret = []
for g in grads:
ret.append(g.contiguous())
return ret
def copy_params(params):
out = []
for p in params:
if torch.is_tensor(p):
c = p.clone()
if c.dtype == torch.float32:
c.requires_grad_()
out.append(c)
else:
out.append(p)
return out
def assert_device(device, inputs):
"""helper that asserts that all Tensors in inputs reside on the specified
device (device should be cpu or cuda). Also checks that dtypes are correct.
"""
for arg in inputs:
if isinstance(arg, torch.Tensor):
if (arg.dtype == torch.float64) or (arg.dtype == torch.float16):
raise TypeError("Tensor {arg} has invalid dtype {dtype}".format(arg=arg, dtype=arg.dtype))
if device == 'cpu':
if arg.is_cuda: # make sure all tensors are on the right device. Can fail silently in the CUDA kernel.
raise TypeError("Tensor {arg} is using CUDA but was expected to be on the CPU.".format(arg=arg))
elif torch.device(device).type == 'cuda': #elif device.startswith('cuda'):
if not arg.is_cuda:
raise TypeError("Tensor {arg} is not on a CUDA device but was expected to be using CUDA.".format(arg=arg))
else:
raise ValueError("Device {} is not supported".format(device))
def to_weak_list(s):
w = []
for o in s:
w.append(weakref.ref(o))
return w
def to_strong_list(w):
s = []
for o in w:
s.append(o())
return s
# standalone method to launch a kernel using PyTorch graph (skip custom tape)
def launch_torch(func, dim, inputs, outputs, adapter, preserve_output=False, check_grad=False, no_grad=False):
num_inputs = len(inputs)
num_outputs = len(outputs)
# define autograd type
class TorchFunc(torch.autograd.Function):
@staticmethod
def forward(ctx, *args):
#local_inputs = args[0:num_inputs]
#local_outputs = args[num_inputs:len(args)]
# save for backward
#ctx.inputs = list(local_inputs)
ctx.inputs = args
local_outputs = []
for o in outputs:
local_outputs.append(torch.zeros_like(o, requires_grad=True))
ctx.outputs = local_outputs
# ensure inputs match adapter
assert_device(adapter, args)
# launch
if adapter == 'cpu':
func.forward_cpu(*[dim, *args, *ctx.outputs])
elif torch.device(adapter).type == 'cuda': #elif adapter.startswith('cuda'):
func.forward_cuda(*[dim, *args, *ctx.outputs])
ret = tuple(ctx.outputs)
return ret
@staticmethod
def backward(ctx, *grads):
# ensure grads are contiguous in memory
adj_outputs = make_contiguous(grads)
# alloc grads
adj_inputs = alloc_grads(ctx.inputs, adapter)
# if we don't need outputs then make empty tensors to skip the write
local_outputs = ctx.outputs
# if preserve_output == True:
# local_outputs = ctx.outputs
# else:
# local_outputs = []
# for o in range(num_outputs):
# local_outputs.append(torch.FloatTensor().to(adapter))
# print("backward")
# print("--------")
# print (" inputs")
# for i in ctx.inputs:
# print(i)
# print (" outputs")
# for o in ctx.outputs:
# print(o)
# print (" adj_inputs")
# for adj_i in adj_inputs:
# print(adj_i)
# print (" adj_outputs")
# for adj_o in adj_outputs:
# print(adj_o)
# launch
if adapter == 'cpu':
func.backward_cpu(*[dim, *ctx.inputs, *local_outputs, *adj_inputs, *adj_outputs])
elif torch.device(adapter).type == 'cuda': #elif adapter.startswith('cuda'):
func.backward_cuda(*[dim, *ctx.inputs, *local_outputs, *adj_inputs, *adj_outputs])
# filter grads replaces empty tensors / constant params with None
ret = list(filter_grads(adj_inputs))
for i in range(num_outputs):
ret.append(None)
return tuple(ret)
# run
params = [*inputs]
torch.set_printoptions(edgeitems=3)
if (check_grad == True and no_grad == False):
try:
torch.autograd.gradcheck(TorchFunc.apply, params, eps=1e-2, atol=1e-3, rtol=1.e-3, raise_exception=True)
except Exception as e:
print(str(func.func.__name__) + " failed: " + str(e))
output = TorchFunc.apply(*params)
return output
class Tape:
def __init__(self):
self.launches = []
# dictionary mapping Tensor inputs to their adjoint
self.adjoints = {}
def launch(self, func, dim, inputs, outputs, adapter, preserve_output=False, skip_check_grad=False):
if (dim > 0):
# run kernel
if adapter == 'cpu':
func.forward_cpu(*[dim, *inputs, *outputs])
elif torch.device(adapter).type == 'cuda': #adapter.startswith('cuda'):
func.forward_cuda(*[dim, *inputs, *outputs])
if dflex.config.verify_fp:
check_adapter(inputs, adapter)
check_adapter(outputs, adapter)
check_finite(inputs)
check_finite(outputs)
# record launch
if dflex.config.no_grad == False:
self.launches.append([func, dim, inputs, outputs, adapter, preserve_output])
# optionally run grad check
if dflex.config.check_grad == True and skip_check_grad == False:
# copy inputs and outputs to avoid disturbing the computational graph
inputs_copy = copy_params(inputs)
outputs_copy = copy_params(outputs)
launch_torch(func, dim, inputs_copy, outputs_copy, adapter, preserve_output, check_grad=True)
def replay(self):
for kernel in reversed(self.launches):
func = kernel[0]
dim = kernel[1]
inputs = kernel[2]
#outputs = to_strong_list(kernel[3])
outputs = kernel[3]
adapter = kernel[4]
# lookup adj_inputs
adj_inputs = []
adj_outputs = []
# build input adjoints
for i in inputs:
if i in self.adjoints:
adj_inputs.append(self.adjoints[i])
else:
if torch.is_tensor(i):
adj_inputs.append(self.alloc_grad(i))
else:
adj_inputs.append(type(i)())
# build output adjoints
for o in outputs:
if o in self.adjoints:
adj_outputs.append(self.adjoints[o])
else:
# no output adjoint means the output wasn't used in the loss function so
# allocate a zero tensor (they will still be read by the kernels)
adj_outputs.append(self.alloc_grad(o))
# launch reverse
if adapter == 'cpu':
func.backward_cpu(*[dim, *inputs, *outputs, *adj_inputs, *adj_outputs])
elif torch.device(adapter).type == 'cuda': #elif adapter.startswith('cuda'):
func.backward_cuda(*[dim, *inputs, *outputs, *adj_inputs, *adj_outputs])
if dflex.config.verify_fp:
check_finite(inputs)
check_finite(outputs)
check_finite(adj_inputs)
check_finite(adj_outputs)
def reset(self):
self.adjoints = {}
self.launches = []
def alloc_grad(self, t):
if t.dtype == torch.float32 and t.requires_grad:
# zero tensor
self.adjoints[t] = torch.zeros_like(t)
return self.adjoints[t]
else:
# null tensor
return torch.FloatTensor().to(t.device)
# helper that given a set of inputs, will generate a set of output grad buffers
def alloc_grads(inputs, adapter):
"""helper that generates output grad buffers for a set of inputs
on the specified device.
Args:
inputs (iterable of Tensors, other literals): list of Tensors
to generate gradient buffers for. Non-tensors are ignored.
adapter (str, optional): name of torch device for storage location
of allocated gradient buffers. Defaults to 'cpu'.
"""
grads = []
for arg in inputs:
if (torch.is_tensor(arg)):
if (arg.requires_grad and arg.dtype == torch.float):
grads.append(torch.zeros_like(arg, device=adapter))
#grads.append(lookup_grad(arg))
else:
grads.append(torch.FloatTensor().to(adapter))
else:
grads.append(type(arg)())
return grads
def matmul(tape, m, n, k, t1, t2, A, B, C, adapter):
if (adapter == 'cpu'):
threads = 1
else:
threads = 256 # should match the threadblock size
tape.launch(
func=dflex.eval_dense_gemm,
dim=threads,
inputs=[
m,
n,
k,
t1,
t2,
A,
B,
],
outputs=[
C
],
adapter=adapter,
preserve_output=False)
def matmul_batched(tape, batch_count, m, n, k, t1, t2, A_start, B_start, C_start, A, B, C, adapter):
if (adapter == 'cpu'):
threads = batch_count
else:
threads = 256*batch_count # must match the threadblock size used in adjoint.py
tape.launch(
func=dflex.eval_dense_gemm_batched,
dim=threads,
inputs=[
m,
n,
k,
t1,
t2,
A_start,
B_start,
C_start,
A,
B,
],
outputs=[
C
],
adapter=adapter,
preserve_output=False) | 61,318 | Python | 25.683638 | 229 | 0.535161 |
vstrozzi/FRL-SHAC-Extension/dflex/extension/dflex.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""dFlex Kit extension
Allows setting up, training, and running inference on dFlex optimization environments.
"""
import os
import subprocess
import carb
import carb.input
import math
import numpy as np
import omni.kit.ui
import omni.appwindow
import omni.kit.editor
import omni.timeline
import omni.usd
import omni.ui as ui
from pathlib import Path
ICON_PATH = Path(__file__).parent.parent.joinpath("icons")
from pxr import Usd, UsdGeom, Sdf, Gf
import torch
from omni.kit.settings import create_setting_widget, create_setting_widget_combo, SettingType, get_settings_interface
KIT_GREEN = 0xFF8A8777
LABEL_PADDING = 120
DARK_WINDOW_STYLE = {
"Button": {"background_color": 0xFF292929, "margin": 3, "padding": 3, "border_radius": 2},
"Button.Label": {"color": 0xFFCCCCCC},
"Button:hovered": {"background_color": 0xFF9E9E9E},
"Button:pressed": {"background_color": 0xC22A8778},
"VStack::main_v_stack": {"secondary_color": 0x0, "margin_width": 10, "margin_height": 0},
"VStack::frame_v_stack": {"margin_width": 15, "margin_height": 10},
"Rectangle::frame_background": {"background_color": 0xFF343432, "border_radius": 5},
"Field::models": {"background_color": 0xFF23211F, "font_size": 14, "color": 0xFFAAAAAA, "border_radius": 4.0},
"Frame": {"background_color": 0xFFAAAAAA},
"Label": {"font_size": 14, "color": 0xFF8A8777},
"Label::status": {"font_size": 14, "color": 0xFF8AFF77}
}
CollapsableFrame_style = {
"CollapsableFrame": {
"background_color": 0xFF343432,
"secondary_color": 0xFF343432,
"color": 0xFFAAAAAA,
"border_radius": 4.0,
"border_color": 0x0,
"border_width": 0,
"font_size": 14,
"padding": 0,
},
"HStack::header": {"margin": 5},
"CollapsableFrame:hovered": {"secondary_color": 0xFF3A3A3A},
"CollapsableFrame:pressed": {"secondary_color": 0xFF343432},
}
experiment = None
class Extension:
def __init__(self):
self.MENU_SHOW_WINDOW = "Window/dFlex"
self.MENU_INSERT_REFERENCE = "Utilities/Insert Reference"
self._editor_window = None
self._window_Frame = None
self.time = 0.0
self.plot = None
self.log = None
self.status = None
self.mode = 'stopped'
self.properties = {}
# add some helper menus
self.menus = []
def on_shutdown(self):
self._editor_window = None
self.menus = []
#self.input.unsubscribe_to_keyboard_events(self.appwindow.get_keyboard(), self.key_sub)
def on_startup(self):
self.appwindow = omni.appwindow.get_default_app_window()
self.editor = omni.kit.editor.get_editor_interface()
self.input = carb.input.acquire_input_interface()
self.timeline = omni.timeline.get_timeline_interface()
self.usd_context = omni.usd.get_context()
# event subscriptions
self.stage_sub = self.usd_context.get_stage_event_stream().create_subscription_to_pop(self.on_stage, name="dFlex")
self.update_sub = self.editor.subscribe_to_update_events(self.on_update)
#self.key_sub = self.input.subscribe_to_keyboard_events(self.appwindow.get_keyboard(), self.on_key)
self.menus.append(omni.kit.ui.get_editor_menu().add_item(self.MENU_SHOW_WINDOW, self.ui_on_menu, True, 11))
self.menus.append(omni.kit.ui.get_editor_menu().add_item(self.MENU_INSERT_REFERENCE, self.ui_on_menu))
self.reload()
self.build_ui()
def format(self, s):
return s.replace("_", " ").title()
def add_float_field(self, label, x, low=0.0, high=1.0):
with ui.HStack():
ui.Label(self.format(label), width=120)
self.add_property(label, ui.FloatSlider(name="value", width=150, min=low, max=high), x)
def add_int_field(self, label, x, low=0, high=100):
with ui.HStack():
ui.Label(self.format(label), width=120)
self.add_property(label, ui.IntSlider(name="value", width=150, min=low, max=high), x)
def add_combo_field(self, label, i, options):
with ui.HStack():
ui.Label(self.format(label), width=120)
ui.ComboBox(i, *options, width=150) # todo: how does the model work for combo boxes in omni.ui
def add_bool_field(self, label, b):
with ui.HStack():
ui.Label(self.format(label), width=120)
self.add_property(label, ui.CheckBox(width=10), b)
def add_property(self, label, widget, value):
self.properties[label] = widget
widget.model.set_value(value)
def ui_on_menu(self, menu, value):
if menu == self.MENU_SHOW_WINDOW:
if self.window:
if value:
self.window.show()
else:
self.window.hide()
omni.kit.ui.get_editor_menu().set_value(self.STAGE_SCRIPT_WINDOW_MENU, value)
if menu == self.MENU_INSERT_REFERENCE:
self.file_pick = omni.kit.ui.FilePicker("Select USD File", file_type=omni.kit.ui.FileDialogSelectType.FILE)
self.file_pick.set_file_selected_fn(self.ui_on_select_ref_fn)
self.file_pick.show(omni.kit.ui.FileDialogDataSource.LOCAL)
def ui_on_select_ref_fn(self, real_path):
file = os.path.normpath(real_path)
name = os.path.basename(file)
stem = os.path.splitext(name)[0]
stage = self.usd_context.get_stage()
stage_path = stage.GetRootLayer().realPath
base = os.path.commonpath([real_path, stage_path])
rel_path = os.path.relpath(real_path, base)
over = stage.OverridePrim('/' + stem)
over.GetReferences().AddReference(rel_path)
def ui_on_select_script_fn(self):
# file picker
self.file_pick = omni.kit.ui.FilePicker("Select Python Script", file_type=omni.kit.ui.FileDialogSelectType.FILE)
self.file_pick.set_file_selected_fn(self.set_stage_script)
self.file_pick.add_filter("Python Files (*.py)", ".*.py")
self.file_pick.show(omni.kit.ui.FileDialogDataSource.LOCAL)
def ui_on_clear_script_fn(self, widget):
self.clear_stage_script()
def ui_on_select_network_fn(self):
# file picker
self.file_pick = omni.kit.ui.FilePicker("Select Model", file_type=omni.kit.ui.FileDialogSelectType.FILE)
self.file_pick.set_file_selected_fn(self.set_network)
self.file_pick.add_filter("PyTorch Files (*.pt)", ".*.pt")
self.file_pick.show(omni.kit.ui.FileDialogDataSource.LOCAL)
# build panel
def build_ui(self):
stage = self.usd_context.get_stage()
self._editor_window = ui.Window("dFlex", width=450, height=800)
self._editor_window.frame.set_style(DARK_WINDOW_STYLE)
with self._editor_window.frame:
with ui.VStack():
self._window_Frame = ui.ScrollingFrame(
name="canvas",
horizontal_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_ALWAYS_OFF,
)
with self._window_Frame:
with ui.VStack(spacing=6, name="main_v_stack"):
ui.Spacer(height=5)
with ui.CollapsableFrame(title="Experiment", height=60, style=CollapsableFrame_style):
with ui.VStack(spacing=4, name="frame_v_stack"):
with ui.HStack():
ui.Label("Script", name="label", width=120)
s = ""
if (self.get_stage_script() != None):
s = self.get_stage_script()
ui.StringField(name="models", tooltip="Training Python script").model.set_value(self.get_stage_script())
ui.Button("", image_url="resources/icons/folder.png", width=15, image_width=15, clicked_fn=self.ui_on_select_script_fn)
ui.Button("Clear", width=15, clicked_fn=self.clear_stage_script)
ui.Button("Reload", width=15, clicked_fn=self.reload)
with ui.HStack():
ui.Label("Hot Reload", width=100)
ui.CheckBox(width=10).model.set_value(False)
if (experiment):
with ui.CollapsableFrame(height=60, title="Simulation Settings", style=CollapsableFrame_style):
with ui.VStack(spacing=4, name="frame_v_stack"):
self.add_int_field("sim_substeps", 4, 1, 100)
self.add_float_field("sim_duration", 5.0, 0.0, 30.0)
with ui.CollapsableFrame(title="Training Settings", height=60, style=CollapsableFrame_style):
with ui.VStack(spacing=4, name="frame_v_stack"):
self.add_int_field("train_iters", 64, 1, 100)
self.add_float_field("train_rate", 0.1, 0.0, 10.0)
self.add_combo_field("train_optimizer", 0, ["GD", "SGD", "L-BFGS"])
with ui.CollapsableFrame(title="Actions", height=10, style=CollapsableFrame_style):
with ui.VStack(spacing=4, name="frame_v_stack"):
with ui.HStack():
ui.Label("Network", name="label", width=120)
s = ""
if (self.get_network() != None):
s = self.get_network()
ui.StringField(name="models", tooltip="Pretrained PyTorch network").model.set_value(s)
ui.Button("", image_url="resources/icons/folder.png", width=15, image_width=15, clicked_fn=self.ui_on_select_network_fn)
ui.Button("Clear", width=15, clicked_fn=self.clear_network)
with ui.HStack():
p = (1.0/6.0)*100.0
ui.Button("Run", width=ui.Percent(p), clicked_fn=self.run)
ui.Button("Train", width=ui.Percent(p), clicked_fn=self.train)
ui.Button("Stop", width=ui.Percent(p), clicked_fn=self.stop)
ui.Button("Reset", width=ui.Percent(p), clicked_fn=self.reset)
self.add_bool_field("record", True)
with ui.HStack():
ui.Label("Status: ", width=120)
self.status = ui.Label("", name="status", width=200)
with ui.CollapsableFrame(title="Loss", style=CollapsableFrame_style):
data = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0]
self.plot = ui.Plot(ui.Type.LINE, -1.0, 1.0, *data, height=200, style={"color": 0xff00ffFF})
# with ui.ScrollingFrame(
# name="log",
# horizontal_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_ALWAYS_OFF,
# height=200,
# width=ui.Percent(95)
# ):
with ui.CollapsableFrame(title="Log", style=CollapsableFrame_style):
with ui.VStack(spacing=4, name="frame_v_stack"):
self.log = ui.Label("", height=200)
def reload(self):
path = self.get_stage_script()
if (path):
# read code to string
file = open(path)
code = file.read()
file.close()
# run it in the local environment
exec(code, globals(), globals())
self.build_ui()
# methods for storing script in stage metadata
def get_stage_script(self):
stage = self.usd_context.get_stage()
custom_data = stage.GetEditTarget().GetLayer().customLayerData
print(custom_data)
if "script" in custom_data:
return custom_data["script"]
else:
return None
def set_stage_script(self, real_path):
path = os.path.normpath(real_path)
print("Setting stage script to: " + str(path))
stage = self.usd_context.get_stage()
with Sdf.ChangeBlock():
custom_data = stage.GetEditTarget().GetLayer().customLayerData
custom_data["script"] = path
stage.GetEditTarget().GetLayer().customLayerData = custom_data
# rebuild ui
self.build_ui()
def clear_stage_script(self):
stage = self.usd_context.get_stage()
with Sdf.ChangeBlock():
custom_data = stage.GetEditTarget().GetLayer().customLayerData
if "script" in custom_data:
del custom_data["script"]
stage.GetEditTarget().GetLayer().customLayerData = custom_data
self.build_ui()
def set_network(self, real_path):
path = os.path.normpath(real_path)
experiment.network_file = path
self.build_ui()
def get_network(self):
return experiment.network_file
def clear_network(self):
experiment.network_file = None
self.build_ui()
def on_key(self, event, *args, **kwargs):
# if event.keyboard == self.appwindow.get_keyboard():
# if event.type == carb.input.KeyboardEventType.KEY_PRESS:
# if event.input == carb.input.KeyboardInput.ESCAPE:
# self.stop()
# return True
pass
def on_stage(self, stage_event):
if stage_event.type == int(omni.usd.StageEventType.OPENED):
self.build_ui()
self.reload()
def on_update(self, dt):
if (experiment):
stage = self.usd_context.get_stage()
stage.SetStartTimeCode(0.0)
stage.SetEndTimeCode(experiment.render_time*60.0)
stage.SetTimeCodesPerSecond(60.0)
# pass parameters to the experiment
if ('record' in self.properties):
experiment.record = self.properties['record'].model.get_value_as_bool()
# experiment.train_rate = self.get_property('train_rate')
# experiment.train_iters = self.get_property('train_iters')
# experiment.sim_duration = self.get_property('sim_duration')
# experiment.sim_substeps = self.get_property('sim_substeps')
if (self.mode == 'training'):
experiment.train()
# update error plot
if (self.plot):
self.plot.scale_min = np.min(experiment.train_loss)
self.plot.scale_max = np.max(experiment.train_loss)
self.plot.set_data(*experiment.train_loss)
elif (self.mode == 'inference'):
experiment.run()
# update stage time (allow scrubbing while stopped)
if (self.mode != 'stopped'):
self.timeline.set_current_time(experiment.render_time*60.0)
# update log
if (self.log):
self.log.text = df.util.log_output
def set_status(self, str):
self.status.text = str
def train(self):
experiment.reset()
self.mode = 'training'
# update status
self.set_status('Training in progress, press [ESC] to cancel')
def run(self):
experiment.reset()
self.mode = 'inference'
# update status
self.set_status('Inference in progress, press [ESC] to cancel')
def stop(self):
self.mode = 'stopped'
# update status
self.set_status('Stopped')
def reset(self):
experiment.reset()
self.stop()
def get_extension():
return Extension()
| 16,913 | Python | 35.689805 | 160 | 0.549814 |
vstrozzi/FRL-SHAC-Extension/dflex/dflex/mat33.h | #pragma once
//----------------------------------------------------------
// mat33
struct mat33
{
inline CUDA_CALLABLE mat33(float3 c0, float3 c1, float3 c2)
{
data[0][0] = c0.x;
data[1][0] = c0.y;
data[2][0] = c0.z;
data[0][1] = c1.x;
data[1][1] = c1.y;
data[2][1] = c1.z;
data[0][2] = c2.x;
data[1][2] = c2.y;
data[2][2] = c2.z;
}
inline CUDA_CALLABLE mat33(float m00=0.0f, float m01=0.0f, float m02=0.0f,
float m10=0.0f, float m11=0.0f, float m12=0.0f,
float m20=0.0f, float m21=0.0f, float m22=0.0f)
{
data[0][0] = m00;
data[1][0] = m10;
data[2][0] = m20;
data[0][1] = m01;
data[1][1] = m11;
data[2][1] = m21;
data[0][2] = m02;
data[1][2] = m12;
data[2][2] = m22;
}
CUDA_CALLABLE float3 get_row(int index) const
{
return (float3&)data[index];
}
CUDA_CALLABLE void set_row(int index, const float3& v)
{
(float3&)data[index] = v;
}
CUDA_CALLABLE float3 get_col(int index) const
{
return float3(data[0][index], data[1][index], data[2][index]);
}
CUDA_CALLABLE void set_col(int index, const float3& v)
{
data[0][index] = v.x;
data[1][index] = v.y;
data[2][index] = v.z;
}
// row major storage assumed to be compatible with PyTorch
float data[3][3];
};
#ifdef CUDA
inline __device__ void atomic_add(mat33 * addr, mat33 value) {
atomicAdd(&((addr -> data)[0][0]), value.data[0][0]);
atomicAdd(&((addr -> data)[1][0]), value.data[1][0]);
atomicAdd(&((addr -> data)[2][0]), value.data[2][0]);
atomicAdd(&((addr -> data)[0][1]), value.data[0][1]);
atomicAdd(&((addr -> data)[1][1]), value.data[1][1]);
atomicAdd(&((addr -> data)[2][1]), value.data[2][1]);
atomicAdd(&((addr -> data)[0][2]), value.data[0][2]);
atomicAdd(&((addr -> data)[1][2]), value.data[1][2]);
atomicAdd(&((addr -> data)[2][2]), value.data[2][2]);
}
#endif
inline CUDA_CALLABLE void adj_mat33(float3 c0, float3 c1, float3 c2,
float3& a0, float3& a1, float3& a2,
const mat33& adj_ret)
{
// column constructor
a0 += adj_ret.get_col(0);
a1 += adj_ret.get_col(1);
a2 += adj_ret.get_col(2);
}
inline CUDA_CALLABLE void adj_mat33(float m00, float m01, float m02,
float m10, float m11, float m12,
float m20, float m21, float m22,
float& a00, float& a01, float& a02,
float& a10, float& a11, float& a12,
float& a20, float& a21, float& a22,
const mat33& adj_ret)
{
printf("todo\n");
}
inline CUDA_CALLABLE float index(const mat33& m, int row, int col)
{
return m.data[row][col];
}
inline CUDA_CALLABLE mat33 add(const mat33& a, const mat33& b)
{
mat33 t;
for (int i=0; i < 3; ++i)
{
for (int j=0; j < 3; ++j)
{
t.data[i][j] = a.data[i][j] + b.data[i][j];
}
}
return t;
}
inline CUDA_CALLABLE mat33 mul(const mat33& a, float b)
{
mat33 t;
for (int i=0; i < 3; ++i)
{
for (int j=0; j < 3; ++j)
{
t.data[i][j] = a.data[i][j]*b;
}
}
return t;
}
inline CUDA_CALLABLE float3 mul(const mat33& a, const float3& b)
{
float3 r = a.get_col(0)*b.x +
a.get_col(1)*b.y +
a.get_col(2)*b.z;
return r;
}
inline CUDA_CALLABLE mat33 mul(const mat33& a, const mat33& b)
{
mat33 t;
for (int i=0; i < 3; ++i)
{
for (int j=0; j < 3; ++j)
{
for (int k=0; k < 3; ++k)
{
t.data[i][j] += a.data[i][k]*b.data[k][j];
}
}
}
return t;
}
inline CUDA_CALLABLE mat33 transpose(const mat33& a)
{
mat33 t;
for (int i=0; i < 3; ++i)
{
for (int j=0; j < 3; ++j)
{
t.data[i][j] = a.data[j][i];
}
}
return t;
}
inline CUDA_CALLABLE float determinant(const mat33& m)
{
return dot(float3(m.data[0]), cross(float3(m.data[1]), float3(m.data[2])));
}
inline CUDA_CALLABLE mat33 outer(const float3& a, const float3& b)
{
return mat33(a*b.x, a*b.y, a*b.z);
}
inline CUDA_CALLABLE mat33 skew(const float3& a)
{
mat33 out(0.0f, -a.z, a.y,
a.z, 0.0f, -a.x,
-a.y, a.x, 0.0f);
return out;
}
inline void CUDA_CALLABLE adj_index(const mat33& m, int row, int col, mat33& adj_m, int& adj_row, int& adj_col, float adj_ret)
{
adj_m.data[row][col] += adj_ret;
}
inline CUDA_CALLABLE void adj_add(const mat33& a, const mat33& b, mat33& adj_a, mat33& adj_b, const mat33& adj_ret)
{
for (int i=0; i < 3; ++i)
{
for (int j=0; j < 3; ++j)
{
adj_a.data[i][j] += adj_ret.data[i][j];
adj_b.data[i][j] += adj_ret.data[i][j];
}
}
}
inline CUDA_CALLABLE void adj_mul(const mat33& a, float b, mat33& adj_a, float& adj_b, const mat33& adj_ret)
{
for (int i=0; i < 3; ++i)
{
for (int j=0; j < 3; ++j)
{
adj_a.data[i][j] += b*adj_ret.data[i][j];
adj_b += a.data[i][j]*adj_ret.data[i][j];
}
}
}
inline CUDA_CALLABLE void adj_mul(const mat33& a, const float3& b, mat33& adj_a, float3& adj_b, const float3& adj_ret)
{
adj_a += outer(adj_ret, b);
adj_b += mul(transpose(a), adj_ret);
}
inline CUDA_CALLABLE void adj_mul(const mat33& a, const mat33& b, mat33& adj_a, mat33& adj_b, const mat33& adj_ret)
{
adj_a += mul(adj_ret, transpose(b));
adj_b += mul(transpose(a), adj_ret);
}
inline CUDA_CALLABLE void adj_transpose(const mat33& a, mat33& adj_a, const mat33& adj_ret)
{
adj_a += transpose(adj_ret);
}
inline CUDA_CALLABLE void adj_determinant(const mat33& m, mat33& adj_m, float adj_ret)
{
(float3&)adj_m.data[0] += cross(m.get_row(1), m.get_row(2))*adj_ret;
(float3&)adj_m.data[1] += cross(m.get_row(2), m.get_row(0))*adj_ret;
(float3&)adj_m.data[2] += cross(m.get_row(0), m.get_row(1))*adj_ret;
}
inline CUDA_CALLABLE void adj_skew(const float3& a, float3& adj_a, const mat33& adj_ret)
{
mat33 out(0.0f, -a.z, a.y,
a.z, 0.0f, -a.x,
-a.y, a.x, 0.0f);
adj_a.x += adj_ret.data[2][1] - adj_ret.data[1][2];
adj_a.y += adj_ret.data[0][2] - adj_ret.data[2][0];
adj_a.z += adj_ret.data[1][0] - adj_ret.data[0][1];
} | 6,549 | C | 24.192308 | 126 | 0.505726 |
vstrozzi/FRL-SHAC-Extension/dflex/dflex/spatial.h | #pragma once
//---------------------------------------------------------------------------------
// Represents a twist in se(3)
struct spatial_vector
{
float3 w;
float3 v;
CUDA_CALLABLE inline spatial_vector(float a, float b, float c, float d, float e, float f) : w(a, b, c), v(d, e, f) {}
CUDA_CALLABLE inline spatial_vector(float3 w=float3(), float3 v=float3()) : w(w), v(v) {}
CUDA_CALLABLE inline spatial_vector(float a) : w(a, a, a), v(a, a, a) {}
CUDA_CALLABLE inline float operator[](int index) const
{
assert(index < 6);
return (&w.x)[index];
}
CUDA_CALLABLE inline float& operator[](int index)
{
assert(index < 6);
return (&w.x)[index];
}
};
CUDA_CALLABLE inline spatial_vector operator - (spatial_vector a)
{
return spatial_vector(-a.w, -a.v);
}
CUDA_CALLABLE inline spatial_vector add(const spatial_vector& a, const spatial_vector& b)
{
return { a.w + b.w, a.v + b.v };
}
CUDA_CALLABLE inline spatial_vector sub(const spatial_vector& a, const spatial_vector& b)
{
return { a.w - b.w, a.v - b.v };
}
CUDA_CALLABLE inline spatial_vector mul(const spatial_vector& a, float s)
{
return { a.w*s, a.v*s };
}
CUDA_CALLABLE inline float spatial_dot(const spatial_vector& a, const spatial_vector& b)
{
return dot(a.w, b.w) + dot(a.v, b.v);
}
CUDA_CALLABLE inline spatial_vector spatial_cross(const spatial_vector& a, const spatial_vector& b)
{
float3 w = cross(a.w, b.w);
float3 v = cross(a.v, b.w) + cross(a.w, b.v);
return spatial_vector(w, v);
}
CUDA_CALLABLE inline spatial_vector spatial_cross_dual(const spatial_vector& a, const spatial_vector& b)
{
float3 w = cross(a.w, b.w) + cross(a.v, b.v);
float3 v = cross(a.w, b.v);
return spatial_vector(w, v);
}
CUDA_CALLABLE inline float3 spatial_top(const spatial_vector& a)
{
return a.w;
}
CUDA_CALLABLE inline float3 spatial_bottom(const spatial_vector& a)
{
return a.v;
}
// adjoint methods
CUDA_CALLABLE inline void adj_spatial_vector(
float a, float b, float c,
float d, float e, float f,
float& adj_a, float& adj_b, float& adj_c,
float& adj_d, float& adj_e,float& adj_f,
const spatial_vector& adj_ret)
{
adj_a += adj_ret.w.x;
adj_b += adj_ret.w.y;
adj_c += adj_ret.w.z;
adj_d += adj_ret.v.x;
adj_e += adj_ret.v.y;
adj_f += adj_ret.v.z;
}
CUDA_CALLABLE inline void adj_spatial_vector(const float3& w, const float3& v, float3& adj_w, float3& adj_v, const spatial_vector& adj_ret)
{
adj_w += adj_ret.w;
adj_v += adj_ret.v;
}
CUDA_CALLABLE inline void adj_add(const spatial_vector& a, const spatial_vector& b, spatial_vector& adj_a, spatial_vector& adj_b, const spatial_vector& adj_ret)
{
adj_add(a.w, b.w, adj_a.w, adj_b.w, adj_ret.w);
adj_add(a.v, b.v, adj_a.v, adj_b.v, adj_ret.v);
}
CUDA_CALLABLE inline void adj_sub(const spatial_vector& a, const spatial_vector& b, spatial_vector& adj_a, spatial_vector& adj_b, const spatial_vector& adj_ret)
{
adj_sub(a.w, b.w, adj_a.w, adj_b.w, adj_ret.w);
adj_sub(a.v, b.v, adj_a.v, adj_b.v, adj_ret.v);
}
CUDA_CALLABLE inline void adj_mul(const spatial_vector& a, float s, spatial_vector& adj_a, float& adj_s, const spatial_vector& adj_ret)
{
adj_mul(a.w, s, adj_a.w, adj_s, adj_ret.w);
adj_mul(a.v, s, adj_a.v, adj_s, adj_ret.v);
}
CUDA_CALLABLE inline void adj_spatial_dot(const spatial_vector& a, const spatial_vector& b, spatial_vector& adj_a, spatial_vector& adj_b, const float& adj_ret)
{
adj_dot(a.w, b.w, adj_a.w, adj_b.w, adj_ret);
adj_dot(a.v, b.v, adj_a.v, adj_b.v, adj_ret);
}
CUDA_CALLABLE inline void adj_spatial_cross(const spatial_vector& a, const spatial_vector& b, spatial_vector& adj_a, spatial_vector& adj_b, const spatial_vector& adj_ret)
{
adj_cross(a.w, b.w, adj_a.w, adj_b.w, adj_ret.w);
adj_cross(a.v, b.w, adj_a.v, adj_b.w, adj_ret.v);
adj_cross(a.w, b.v, adj_a.w, adj_b.v, adj_ret.v);
}
CUDA_CALLABLE inline void adj_spatial_cross_dual(const spatial_vector& a, const spatial_vector& b, spatial_vector& adj_a, spatial_vector& adj_b, const spatial_vector& adj_ret)
{
adj_cross(a.w, b.w, adj_a.w, adj_b.w, adj_ret.w);
adj_cross(a.v, b.v, adj_a.v, adj_b.v, adj_ret.w);
adj_cross(a.w, b.v, adj_a.w, adj_b.v, adj_ret.v);
}
CUDA_CALLABLE inline void adj_spatial_top(const spatial_vector& a, spatial_vector& adj_a, const float3& adj_ret)
{
adj_a.w += adj_ret;
}
CUDA_CALLABLE inline void adj_spatial_bottom(const spatial_vector& a, spatial_vector& adj_a, const float3& adj_ret)
{
adj_a.v += adj_ret;
}
#ifdef CUDA
inline __device__ void atomic_add(spatial_vector* addr, const spatial_vector& value) {
atomic_add(&addr->w, value.w);
atomic_add(&addr->v, value.v);
}
#endif
//---------------------------------------------------------------------------------
// Represents a rigid body transformation
struct spatial_transform
{
float3 p;
quat q;
CUDA_CALLABLE inline spatial_transform(float3 p=float3(), quat q=quat()) : p(p), q(q) {}
CUDA_CALLABLE inline spatial_transform(float) {} // helps uniform initialization
};
CUDA_CALLABLE inline spatial_transform spatial_transform_identity()
{
return spatial_transform(float3(), quat_identity());
}
CUDA_CALLABLE inline float3 spatial_transform_get_translation(const spatial_transform& t)
{
return t.p;
}
CUDA_CALLABLE inline quat spatial_transform_get_rotation(const spatial_transform& t)
{
return t.q;
}
CUDA_CALLABLE inline spatial_transform spatial_transform_multiply(const spatial_transform& a, const spatial_transform& b)
{
return { rotate(a.q, b.p) + a.p, mul(a.q, b.q) };
}
/*
CUDA_CALLABLE inline spatial_transform spatial_transform_inverse(const spatial_transform& t)
{
quat q_inv = inverse(t.q);
return spatial_transform(-rotate(q_inv, t.p), q_inv);
}
*/
CUDA_CALLABLE inline float3 spatial_transform_vector(const spatial_transform& t, const float3& x)
{
return rotate(t.q, x);
}
CUDA_CALLABLE inline float3 spatial_transform_point(const spatial_transform& t, const float3& x)
{
return t.p + rotate(t.q, x);
}
// Frank & Park definition 3.20, pg 100
CUDA_CALLABLE inline spatial_vector spatial_transform_twist(const spatial_transform& t, const spatial_vector& x)
{
float3 w = rotate(t.q, x.w);
float3 v = rotate(t.q, x.v) + cross(t.p, w);
return spatial_vector(w, v);
}
CUDA_CALLABLE inline spatial_vector spatial_transform_wrench(const spatial_transform& t, const spatial_vector& x)
{
float3 v = rotate(t.q, x.v);
float3 w = rotate(t.q, x.w) + cross(t.p, v);
return spatial_vector(w, v);
}
CUDA_CALLABLE inline spatial_transform add(const spatial_transform& a, const spatial_transform& b)
{
return { a.p + b.p, a.q + b.q };
}
CUDA_CALLABLE inline spatial_transform sub(const spatial_transform& a, const spatial_transform& b)
{
return { a.p - b.p, a.q - b.q };
}
CUDA_CALLABLE inline spatial_transform mul(const spatial_transform& a, float s)
{
return { a.p*s, a.q*s };
}
// adjoint methods
CUDA_CALLABLE inline void adj_add(const spatial_transform& a, const spatial_transform& b, spatial_transform& adj_a, spatial_transform& adj_b, const spatial_transform& adj_ret)
{
adj_add(a.p, b.p, adj_a.p, adj_b.p, adj_ret.p);
adj_add(a.q, b.q, adj_a.q, adj_b.q, adj_ret.q);
}
CUDA_CALLABLE inline void adj_sub(const spatial_transform& a, const spatial_transform& b, spatial_transform& adj_a, spatial_transform& adj_b, const spatial_transform& adj_ret)
{
adj_sub(a.p, b.p, adj_a.p, adj_b.p, adj_ret.p);
adj_sub(a.q, b.q, adj_a.q, adj_b.q, adj_ret.q);
}
CUDA_CALLABLE inline void adj_mul(const spatial_transform& a, float s, spatial_transform& adj_a, float& adj_s, const spatial_transform& adj_ret)
{
adj_mul(a.p, s, adj_a.p, adj_s, adj_ret.p);
adj_mul(a.q, s, adj_a.q, adj_s, adj_ret.q);
}
#ifdef CUDA
inline __device__ void atomic_add(spatial_transform* addr, const spatial_transform& value) {
atomic_add(&addr->p, value.p);
atomic_add(&addr->q, value.q);
}
#endif
CUDA_CALLABLE inline void adj_spatial_transform(const float3& p, const quat& q, float3& adj_p, quat& adj_q, const spatial_transform& adj_ret)
{
adj_p += adj_ret.p;
adj_q += adj_ret.q;
}
CUDA_CALLABLE inline void adj_spatial_transform_identity(const spatial_transform& adj_ret)
{
// nop
}
CUDA_CALLABLE inline void adj_spatial_transform_get_translation(const spatial_transform& t, spatial_transform& adj_t, const float3& adj_ret)
{
adj_t.p += adj_ret;
}
CUDA_CALLABLE inline void adj_spatial_transform_get_rotation(const spatial_transform& t, spatial_transform& adj_t, const quat& adj_ret)
{
adj_t.q += adj_ret;
}
/*
CUDA_CALLABLE inline void adj_spatial_transform_inverse(const spatial_transform& t, spatial_transform& adj_t, const spatial_transform& adj_ret)
{
//quat q_inv = inverse(t.q);
//return spatial_transform(-rotate(q_inv, t.p), q_inv);
quat q_inv = inverse(t.q);
float3 p = rotate(q_inv, t.p);
float3 np = -p;
quat adj_q_inv = 0.0f;
quat adj_q = 0.0f;
float3 adj_p = 0.0f;
float3 adj_np = 0.0f;
adj_spatial_transform(np, q_inv, adj_np, adj_q_inv, adj_ret);
adj_p = -adj_np;
adj_rotate(q_inv, t.p, adj_q_inv, adj_t.p, adj_p);
adj_inverse(t.q, adj_t.q, adj_q_inv);
}
*/
CUDA_CALLABLE inline void adj_spatial_transform_multiply(const spatial_transform& a, const spatial_transform& b, spatial_transform& adj_a, spatial_transform& adj_b, const spatial_transform& adj_ret)
{
// translational part
adj_rotate(a.q, b.p, adj_a.q, adj_b.p, adj_ret.p);
adj_a.p += adj_ret.p;
// rotational part
adj_mul(a.q, b.q, adj_a.q, adj_b.q, adj_ret.q);
}
CUDA_CALLABLE inline void adj_spatial_transform_vector(const spatial_transform& t, const float3& x, spatial_transform& adj_t, float3& adj_x, const float3& adj_ret)
{
adj_rotate(t.q, x, adj_t.q, adj_x, adj_ret);
}
CUDA_CALLABLE inline void adj_spatial_transform_point(const spatial_transform& t, const float3& x, spatial_transform& adj_t, float3& adj_x, const float3& adj_ret)
{
adj_rotate(t.q, x, adj_t.q, adj_x, adj_ret);
adj_t.p += adj_ret;
}
CUDA_CALLABLE inline void adj_spatial_transform_twist(const spatial_transform& a, const spatial_vector& s, spatial_transform& adj_a, spatial_vector& adj_s, const spatial_vector& adj_ret)
{
printf("todo, %s, %d\n", __FILE__, __LINE__);
// float3 w = rotate(t.q, x.w);
// float3 v = rotate(t.q, x.v) + cross(t.p, w);
// return spatial_vector(w, v);
}
CUDA_CALLABLE inline void adj_spatial_transform_wrench(const spatial_transform& t, const spatial_vector& x, spatial_transform& adj_t, spatial_vector& adj_x, const spatial_vector& adj_ret)
{
printf("todo, %s, %d\n", __FILE__, __LINE__);
// float3 v = rotate(t.q, x.v);
// float3 w = rotate(t.q, x.w) + cross(t.p, v);
// return spatial_vector(w, v);
}
/*
// should match model.py
#define JOINT_PRISMATIC 0
#define JOINT_REVOLUTE 1
#define JOINT_FIXED 2
#define JOINT_FREE 3
CUDA_CALLABLE inline spatial_transform spatial_jcalc(int type, float* joint_q, float3 axis, int start)
{
if (type == JOINT_REVOLUTE)
{
float q = joint_q[start];
spatial_transform X_jc = spatial_transform(float3(), quat_from_axis_angle(axis, q));
return X_jc;
}
else if (type == JOINT_PRISMATIC)
{
float q = joint_q[start];
spatial_transform X_jc = spatial_transform(axis*q, quat_identity());
return X_jc;
}
else if (type == JOINT_FREE)
{
float px = joint_q[start+0];
float py = joint_q[start+1];
float pz = joint_q[start+2];
float qx = joint_q[start+3];
float qy = joint_q[start+4];
float qz = joint_q[start+5];
float qw = joint_q[start+6];
spatial_transform X_jc = spatial_transform(float3(px, py, pz), quat(qx, qy, qz, qw));
return X_jc;
}
// JOINT_FIXED
return spatial_transform(float3(), quat_identity());
}
CUDA_CALLABLE inline void adj_spatial_jcalc(int type, float* q, float3 axis, int start, int& adj_type, float* adj_q, float3& adj_axis, int& adj_start, const spatial_transform& adj_ret)
{
if (type == JOINT_REVOLUTE)
{
adj_quat_from_axis_angle(axis, q[start], adj_axis, adj_q[start], adj_ret.q);
}
else if (type == JOINT_PRISMATIC)
{
adj_mul(axis, q[start], adj_axis, adj_q[start], adj_ret.p);
}
else if (type == JOINT_FREE)
{
adj_q[start+0] += adj_ret.p.x;
adj_q[start+1] += adj_ret.p.y;
adj_q[start+2] += adj_ret.p.z;
adj_q[start+3] += adj_ret.q.x;
adj_q[start+4] += adj_ret.q.y;
adj_q[start+5] += adj_ret.q.z;
adj_q[start+6] += adj_ret.q.w;
}
}
*/
struct spatial_matrix
{
float data[6][6] = { { 0 } };
CUDA_CALLABLE inline spatial_matrix(float f=0.0f)
{
}
CUDA_CALLABLE inline spatial_matrix(
float a00, float a01, float a02, float a03, float a04, float a05,
float a10, float a11, float a12, float a13, float a14, float a15,
float a20, float a21, float a22, float a23, float a24, float a25,
float a30, float a31, float a32, float a33, float a34, float a35,
float a40, float a41, float a42, float a43, float a44, float a45,
float a50, float a51, float a52, float a53, float a54, float a55)
{
data[0][0] = a00;
data[0][1] = a01;
data[0][2] = a02;
data[0][3] = a03;
data[0][4] = a04;
data[0][5] = a05;
data[1][0] = a10;
data[1][1] = a11;
data[1][2] = a12;
data[1][3] = a13;
data[1][4] = a14;
data[1][5] = a15;
data[2][0] = a20;
data[2][1] = a21;
data[2][2] = a22;
data[2][3] = a23;
data[2][4] = a24;
data[2][5] = a25;
data[3][0] = a30;
data[3][1] = a31;
data[3][2] = a32;
data[3][3] = a33;
data[3][4] = a34;
data[3][5] = a35;
data[4][0] = a40;
data[4][1] = a41;
data[4][2] = a42;
data[4][3] = a43;
data[4][4] = a44;
data[4][5] = a45;
data[5][0] = a50;
data[5][1] = a51;
data[5][2] = a52;
data[5][3] = a53;
data[5][4] = a54;
data[5][5] = a55;
}
};
inline CUDA_CALLABLE float index(const spatial_matrix& m, int row, int col)
{
return m.data[row][col];
}
inline CUDA_CALLABLE spatial_matrix add(const spatial_matrix& a, const spatial_matrix& b)
{
spatial_matrix out;
for (int i=0; i < 6; ++i)
for (int j=0; j < 6; ++j)
out.data[i][j] = a.data[i][j] + b.data[i][j];
return out;
}
inline CUDA_CALLABLE spatial_vector mul(const spatial_matrix& a, const spatial_vector& b)
{
spatial_vector out;
for (int i=0; i < 6; ++i)
for (int j=0; j < 6; ++j)
out[i] += a.data[i][j]*b[j];
return out;
}
inline CUDA_CALLABLE spatial_matrix mul(const spatial_matrix& a, const spatial_matrix& b)
{
spatial_matrix out;
for (int i=0; i < 6; ++i)
{
for (int j=0; j < 6; ++j)
{
for (int k=0; k < 6; ++k)
{
out.data[i][j] += a.data[i][k]*b.data[k][j];
}
}
}
return out;
}
inline CUDA_CALLABLE spatial_matrix transpose(const spatial_matrix& a)
{
spatial_matrix out;
for (int i=0; i < 6; i++)
for (int j=0; j < 6; j++)
out.data[i][j] = a.data[j][i];
return out;
}
inline CUDA_CALLABLE spatial_matrix outer(const spatial_vector& a, const spatial_vector& b)
{
spatial_matrix out;
for (int i=0; i < 6; i++)
for (int j=0; j < 6; j++)
out.data[i][j] = a[i]*b[j];
return out;
}
CUDA_CALLABLE void print(spatial_transform t);
CUDA_CALLABLE void print(spatial_matrix m);
inline CUDA_CALLABLE spatial_matrix spatial_adjoint(const mat33& R, const mat33& S)
{
spatial_matrix adT;
// T = [R 0]
// [skew(p)*R R]
// diagonal blocks
for (int i=0; i < 3; ++i)
{
for (int j=0; j < 3; ++j)
{
adT.data[i][j] = R.data[i][j];
adT.data[i+3][j+3] = R.data[i][j];
}
}
// lower off diagonal
for (int i=0; i < 3; ++i)
{
for (int j=0; j < 3; ++j)
{
adT.data[i+3][j] = S.data[i][j];
}
}
return adT;
}
inline CUDA_CALLABLE void adj_spatial_adjoint(const mat33& R, const mat33& S, mat33& adj_R, mat33& adj_S, const spatial_matrix& adj_ret)
{
// diagonal blocks
for (int i=0; i < 3; ++i)
{
for (int j=0; j < 3; ++j)
{
adj_R.data[i][j] += adj_ret.data[i][j];
adj_R.data[i][j] += adj_ret.data[i+3][j+3];
}
}
// lower off diagonal
for (int i=0; i < 3; ++i)
{
for (int j=0; j < 3; ++j)
{
adj_S.data[i][j] += adj_ret.data[i+3][j];
}
}
}
/*
// computes adj_t^-T*I*adj_t^-1 (tensor change of coordinates), Frank & Park, section 8.2.3, pg 290
inline CUDA_CALLABLE spatial_matrix spatial_transform_inertia(const spatial_transform& t, const spatial_matrix& I)
{
spatial_transform t_inv = spatial_transform_inverse(t);
float3 r1 = rotate(t_inv.q, float3(1.0, 0.0, 0.0));
float3 r2 = rotate(t_inv.q, float3(0.0, 1.0, 0.0));
float3 r3 = rotate(t_inv.q, float3(0.0, 0.0, 1.0));
mat33 R(r1, r2, r3);
mat33 S = mul(skew(t_inv.p), R);
spatial_matrix T = spatial_adjoint(R, S);
// first quadratic form, for derivation of the adjoint see https://people.maths.ox.ac.uk/gilesm/files/AD2008.pdf, section 2.3.2
return mul(mul(transpose(T), I), T);
}
*/
inline CUDA_CALLABLE void adj_add(const spatial_matrix& a, const spatial_matrix& b, spatial_matrix& adj_a, spatial_matrix& adj_b, const spatial_matrix& adj_ret)
{
adj_a += adj_ret;
adj_b += adj_ret;
}
inline CUDA_CALLABLE void adj_mul(const spatial_matrix& a, const spatial_vector& b, spatial_matrix& adj_a, spatial_vector& adj_b, const spatial_vector& adj_ret)
{
adj_a += outer(adj_ret, b);
adj_b += mul(transpose(a), adj_ret);
}
inline CUDA_CALLABLE void adj_mul(const spatial_matrix& a, const spatial_matrix& b, spatial_matrix& adj_a, spatial_matrix& adj_b, const spatial_matrix& adj_ret)
{
adj_a += mul(adj_ret, transpose(b));
adj_b += mul(transpose(a), adj_ret);
}
inline CUDA_CALLABLE void adj_transpose(const spatial_matrix& a, spatial_matrix& adj_a, const spatial_matrix& adj_ret)
{
adj_a += transpose(adj_ret);
}
inline CUDA_CALLABLE void adj_spatial_transform_inertia(
const spatial_transform& xform, const spatial_matrix& I,
const spatial_transform& adj_xform, const spatial_matrix& adj_I,
spatial_matrix& adj_ret)
{
//printf("todo, %s, %d\n", __FILE__, __LINE__);
}
inline void CUDA_CALLABLE adj_index(const spatial_matrix& m, int row, int col, spatial_matrix& adj_m, int& adj_row, int& adj_col, float adj_ret)
{
adj_m.data[row][col] += adj_ret;
}
#ifdef CUDA
inline __device__ void atomic_add(spatial_matrix* addr, const spatial_matrix& value)
{
for (int i=0; i < 6; ++i)
{
for (int j=0; j < 6; ++j)
{
atomicAdd(&addr->data[i][j], value.data[i][j]);
}
}
}
#endif
CUDA_CALLABLE inline int row_index(int stride, int i, int j)
{
return i*stride + j;
}
// builds spatial Jacobian J which is an (joint_count*6)x(dof_count) matrix
CUDA_CALLABLE inline void spatial_jacobian(
const spatial_vector* S,
const int* joint_parents,
const int* joint_qd_start,
int joint_start, // offset of the first joint for the articulation
int joint_count,
int J_start,
float* J)
{
const int articulation_dof_start = joint_qd_start[joint_start];
const int articulation_dof_end = joint_qd_start[joint_start + joint_count];
const int articulation_dof_count = articulation_dof_end-articulation_dof_start;
// shift output pointers
const int S_start = articulation_dof_start;
S += S_start;
J += J_start;
for (int i=0; i < joint_count; ++i)
{
const int row_start = i * 6;
int j = joint_start + i;
while (j != -1)
{
const int joint_dof_start = joint_qd_start[j];
const int joint_dof_end = joint_qd_start[j+1];
const int joint_dof_count = joint_dof_end-joint_dof_start;
// fill out each row of the Jacobian walking up the tree
//for (int col=dof_start; col < dof_end; ++col)
for (int dof=0; dof < joint_dof_count; ++dof)
{
const int col = (joint_dof_start-articulation_dof_start) + dof;
J[row_index(articulation_dof_count, row_start+0, col)] = S[col].w.x;
J[row_index(articulation_dof_count, row_start+1, col)] = S[col].w.y;
J[row_index(articulation_dof_count, row_start+2, col)] = S[col].w.z;
J[row_index(articulation_dof_count, row_start+3, col)] = S[col].v.x;
J[row_index(articulation_dof_count, row_start+4, col)] = S[col].v.y;
J[row_index(articulation_dof_count, row_start+5, col)] = S[col].v.z;
}
j = joint_parents[j];
}
}
}
CUDA_CALLABLE inline void adj_spatial_jacobian(
const spatial_vector* S,
const int* joint_parents,
const int* joint_qd_start,
const int joint_start,
const int joint_count,
const int J_start,
const float* J,
// adjs
spatial_vector* adj_S,
int* adj_joint_parents,
int* adj_joint_qd_start,
int& adj_joint_start,
int& adj_joint_count,
int& adj_J_start,
const float* adj_J)
{
const int articulation_dof_start = joint_qd_start[joint_start];
const int articulation_dof_end = joint_qd_start[joint_start + joint_count];
const int articulation_dof_count = articulation_dof_end-articulation_dof_start;
// shift output pointers
const int S_start = articulation_dof_start;
S += S_start;
J += J_start;
adj_S += S_start;
adj_J += J_start;
for (int i=0; i < joint_count; ++i)
{
const int row_start = i * 6;
int j = joint_start + i;
while (j != -1)
{
const int joint_dof_start = joint_qd_start[j];
const int joint_dof_end = joint_qd_start[j+1];
const int joint_dof_count = joint_dof_end-joint_dof_start;
// fill out each row of the Jacobian walking up the tree
//for (int col=dof_start; col < dof_end; ++col)
for (int dof=0; dof < joint_dof_count; ++dof)
{
const int col = (joint_dof_start-articulation_dof_start) + dof;
adj_S[col].w.x += adj_J[row_index(articulation_dof_count, row_start+0, col)];
adj_S[col].w.y += adj_J[row_index(articulation_dof_count, row_start+1, col)];
adj_S[col].w.z += adj_J[row_index(articulation_dof_count, row_start+2, col)];
adj_S[col].v.x += adj_J[row_index(articulation_dof_count, row_start+3, col)];
adj_S[col].v.y += adj_J[row_index(articulation_dof_count, row_start+4, col)];
adj_S[col].v.z += adj_J[row_index(articulation_dof_count, row_start+5, col)];
}
j = joint_parents[j];
}
}
}
CUDA_CALLABLE inline void spatial_mass(const spatial_matrix* I_s, int joint_start, int joint_count, int M_start, float* M)
{
const int stride = joint_count*6;
for (int l=0; l < joint_count; ++l)
{
for (int i=0; i < 6; ++i)
{
for (int j=0; j < 6; ++j)
{
M[M_start + row_index(stride, l*6 + i, l*6 + j)] = I_s[joint_start + l].data[i][j];
}
}
}
}
CUDA_CALLABLE inline void adj_spatial_mass(
const spatial_matrix* I_s,
const int joint_start,
const int joint_count,
const int M_start,
const float* M,
spatial_matrix* adj_I_s,
int& adj_joint_start,
int& adj_joint_count,
int& adj_M_start,
const float* adj_M)
{
const int stride = joint_count*6;
for (int l=0; l < joint_count; ++l)
{
for (int i=0; i < 6; ++i)
{
for (int j=0; j < 6; ++j)
{
adj_I_s[joint_start + l].data[i][j] += adj_M[M_start + row_index(stride, l*6 + i, l*6 + j)];
}
}
}
}
| 24,501 | C | 28.099762 | 198 | 0.594057 |
vstrozzi/FRL-SHAC-Extension/dflex/dflex/mat22.h | #pragma once
//----------------------------------------------------------
// mat22
struct mat22
{
inline CUDA_CALLABLE mat22(float m00=0.0f, float m01=0.0f, float m10=0.0f, float m11=0.0f)
{
data[0][0] = m00;
data[1][0] = m10;
data[0][1] = m01;
data[1][1] = m11;
}
// row major storage assumed to be compatible with PyTorch
float data[2][2];
};
#ifdef CUDA
inline __device__ void atomic_add(mat22 * addr, mat22 value) {
// *addr += value;
atomicAdd(&((addr -> data)[0][0]), value.data[0][0]);
atomicAdd(&((addr -> data)[0][1]), value.data[0][1]);
atomicAdd(&((addr -> data)[1][0]), value.data[1][0]);
atomicAdd(&((addr -> data)[1][1]), value.data[1][1]);
}
#endif
inline CUDA_CALLABLE void adj_mat22(float m00, float m01, float m10, float m11, float& adj_m00, float& adj_m01, float& adj_m10, float& adj_m11, const mat22& adj_ret)
{
printf("todo\n");
}
inline CUDA_CALLABLE float index(const mat22& m, int row, int col)
{
return m.data[row][col];
}
inline CUDA_CALLABLE mat22 add(const mat22& a, const mat22& b)
{
mat22 t;
for (int i=0; i < 2; ++i)
{
for (int j=0; j < 2; ++j)
{
t.data[i][j] = a.data[i][j] + b.data[i][j];
}
}
return t;
}
inline CUDA_CALLABLE mat22 mul(const mat22& a, float b)
{
mat22 t;
for (int i=0; i < 2; ++i)
{
for (int j=0; j < 2; ++j)
{
t.data[i][j] = a.data[i][j]*b;
}
}
return t;
}
inline CUDA_CALLABLE mat22 mul(const mat22& a, const mat22& b)
{
mat22 t;
for (int i=0; i < 2; ++i)
{
for (int j=0; j < 2; ++j)
{
for (int k=0; k < 2; ++k)
{
t.data[i][j] += a.data[i][k]*b.data[k][j];
}
}
}
return t;
}
inline CUDA_CALLABLE mat22 transpose(const mat22& a)
{
mat22 t;
for (int i=0; i < 2; ++i)
{
for (int j=0; j < 2; ++j)
{
t.data[i][j] = a.data[j][i];
}
}
return t;
}
inline CUDA_CALLABLE float determinant(const mat22& m)
{
return m.data[0][0]*m.data[1][1] - m.data[1][0]*m.data[0][1];
}
inline void CUDA_CALLABLE adj_index(const mat22& m, int row, int col, mat22& adj_m, int& adj_row, int& adj_col, float adj_ret)
{
adj_m.data[row][col] += adj_ret;
}
inline CUDA_CALLABLE void adj_add(const mat22& a, const mat22& b, mat22& adj_a, mat22& adj_b, const mat22& adj_ret)
{
for (int i=0; i < 2; ++i)
{
for (int j=0; j < 2; ++j)
{
adj_a.data[i][j] = adj_ret.data[i][j];
adj_b.data[i][j] = adj_ret.data[i][j];
}
}
}
inline CUDA_CALLABLE void adj_mul(const mat22& a, const mat22& b, mat22& adj_a, mat22& adj_b, const mat22& adj_ret)
{
printf("todo\n");
}
inline CUDA_CALLABLE void adj_transpose(const mat22& a, mat22& adj_a, const mat22& adj_ret)
{
printf("todo\n");
}
inline CUDA_CALLABLE void adj_determinant(const mat22& m, mat22& adj_m, float adj_ret)
{
adj_m.data[0][0] += m.data[1][1]*adj_ret;
adj_m.data[1][1] += m.data[0][0]*adj_ret;
adj_m.data[0][1] -= m.data[1][0]*adj_ret;
adj_m.data[1][0] -= m.data[0][1]*adj_ret;
}
| 3,206 | C | 21.744681 | 165 | 0.515908 |
vstrozzi/FRL-SHAC-Extension/dflex/dflex/vec2.h | #pragma once
struct float2
{
float x;
float y;
}; | 58 | C | 7.42857 | 13 | 0.586207 |
vstrozzi/FRL-SHAC-Extension/dflex/dflex/util.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import timeit
import math
import numpy as np
import gc
import torch
import cProfile
log_output = ""
def log(s):
print(s)
global log_output
log_output = log_output + s + "\n"
# short hands
def length(a):
return np.linalg.norm(a)
def length_sq(a):
return np.dot(a, a)
# NumPy has no normalize() method..
def normalize(v):
norm = np.linalg.norm(v)
if norm == 0.0:
return v
return v / norm
def skew(v):
return np.array([[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]])
# math utils
def quat(i, j, k, w):
return np.array([i, j, k, w])
def quat_identity():
return np.array((0.0, 0.0, 0.0, 1.0))
def quat_inverse(q):
return np.array((-q[0], -q[1], -q[2], q[3]))
def quat_from_axis_angle(axis, angle):
v = np.array(axis)
half = angle * 0.5
w = math.cos(half)
sin_theta_over_two = math.sin(half)
v *= sin_theta_over_two
return np.array((v[0], v[1], v[2], w))
# rotate a vector
def quat_rotate(q, x):
x = np.array(x)
axis = np.array((q[0], q[1], q[2]))
return x * (2.0 * q[3] * q[3] - 1.0) + np.cross(axis, x) * q[3] * 2.0 + axis * np.dot(axis, x) * 2.0
# multiply two quats
def quat_multiply(a, b):
return np.array((a[3] * b[0] + b[3] * a[0] + a[1] * b[2] - b[1] * a[2],
a[3] * b[1] + b[3] * a[1] + a[2] * b[0] - b[2] * a[0],
a[3] * b[2] + b[3] * a[2] + a[0] * b[1] - b[0] * a[1],
a[3] * b[3] - a[0] * b[0] - a[1] * b[1] - a[2] * b[2]))
# convert to mat33
def quat_to_matrix(q):
c1 = quat_rotate(q, np.array((1.0, 0.0, 0.0)))
c2 = quat_rotate(q, np.array((0.0, 1.0, 0.0)))
c3 = quat_rotate(q, np.array((0.0, 0.0, 1.0)))
return np.array([c1, c2, c3]).T
def quat_rpy(roll, pitch, yaw):
cy = math.cos(yaw * 0.5)
sy = math.sin(yaw * 0.5)
cr = math.cos(roll * 0.5)
sr = math.sin(roll * 0.5)
cp = math.cos(pitch * 0.5)
sp = math.sin(pitch * 0.5)
w = (cy * cr * cp + sy * sr * sp)
x = (cy * sr * cp - sy * cr * sp)
y = (cy * cr * sp + sy * sr * cp)
z = (sy * cr * cp - cy * sr * sp)
return (x, y, z, w)
def quat_from_matrix(m):
tr = m[0, 0] + m[1, 1] + m[2, 2]
h = 0.0
if(tr >= 0.0):
h = math.sqrt(tr + 1.0)
w = 0.5 * h
h = 0.5 / h
x = (m[2, 1] - m[1, 2]) * h
y = (m[0, 2] - m[2, 0]) * h
z = (m[1, 0] - m[0, 1]) * h
else:
i = 0;
if(m[1, 1] > m[0, 0]):
i = 1;
if(m[2, 2] > m[i, i]):
i = 2;
if (i == 0):
h = math.sqrt((m[0, 0] - (m[1, 1] + m[2, 2])) + 1.0)
x = 0.5 * h
h = 0.5 / h
y = (m[0, 1] + m[1, 0]) * h
z = (m[2, 0] + m[0, 2]) * h
w = (m[2, 1] - m[1, 2]) * h
elif (i == 1):
h = sqrtf((m[1, 1] - (m[2, 2] + m[0, 0])) + 1.0)
y = 0.5 * h
h = 0.5 / h
z = (m[1, 2] + m[2, 1]) * h
x = (m[0, 1] + m[1, 0]) * h
w = (m[0, 2] - m[2, 0]) * h
elif (i == 2):
h = sqrtf((m[2, 2] - (m[0, 0] + m[1, 1])) + 1.0)
z = 0.5 * h
h = 0.5 / h
x = (m[2, 0] + m[0, 2]) * h
y = (m[1, 2] + m[2, 1]) * h
w = (m[1, 0] - m[0, 1]) * h
return normalize(quat(x, y, z, w))
# rigid body transform
def transform(x, r):
return (np.array(x), np.array(r))
def transform_identity():
return (np.array((0.0, 0.0, 0.0)), quat_identity())
# se(3) -> SE(3), Park & Lynch pg. 105, screw in [w, v] normalized form
def transform_exp(s, angle):
w = np.array(s[0:3])
v = np.array(s[3:6])
if (length(w) < 1.0):
r = quat_identity()
else:
r = quat_from_axis_angle(w, angle)
t = v * angle + (1.0 - math.cos(angle)) * np.cross(w, v) + (angle - math.sin(angle)) * np.cross(w, np.cross(w, v))
return (t, r)
def transform_inverse(t):
q_inv = quat_inverse(t[1])
return (-quat_rotate(q_inv, t[0]), q_inv)
def transform_vector(t, v):
return quat_rotate(t[1], v)
def transform_point(t, p):
return np.array(t[0]) + quat_rotate(t[1], p)
def transform_multiply(t, u):
return (quat_rotate(t[1], u[0]) + t[0], quat_multiply(t[1], u[1]))
# flatten an array of transforms (p,q) format to a 7-vector
def transform_flatten(t):
return np.array([*t[0], *t[1]])
# expand a 7-vec to a tuple of arrays
def transform_expand(t):
return (np.array(t[0:3]), np.array(t[3:7]))
# convert array of transforms to a array of 7-vecs
def transform_flatten_list(xforms):
exp = lambda t: transform_flatten(t)
return list(map(exp, xforms))
def transform_expand_list(xforms):
exp = lambda t: transform_expand(t)
return list(map(exp, xforms))
def transform_inertia(m, I, p, q):
R = quat_to_matrix(q)
# Steiner's theorem
return R * I * R.T + m * (np.dot(p, p) * np.eye(3) - np.outer(p, p))
# spatial operators
# AdT
def spatial_adjoint(t):
R = quat_to_matrix(t[1])
w = skew(t[0])
A = np.zeros((6, 6))
A[0:3, 0:3] = R
A[3:6, 0:3] = np.dot(w, R)
A[3:6, 3:6] = R
return A
# (AdT)^-T
def spatial_adjoint_dual(t):
R = quat_to_matrix(t[1])
w = skew(t[0])
A = np.zeros((6, 6))
A[0:3, 0:3] = R
A[0:3, 3:6] = np.dot(w, R)
A[3:6, 3:6] = R
return A
# AdT*s
def transform_twist(t_ab, s_b):
return np.dot(spatial_adjoint(t_ab), s_b)
# AdT^{-T}*s
def transform_wrench(t_ab, f_b):
return np.dot(spatial_adjoint_dual(t_ab), f_b)
# transform spatial inertia (6x6) in b frame to a frame
def transform_spatial_inertia(t_ab, I_b):
t_ba = transform_inverse(t_ab)
# todo: write specialized method
I_a = np.dot(np.dot(spatial_adjoint(t_ba).T, I_b), spatial_adjoint(t_ba))
return I_a
def translate_twist(p_ab, s_b):
w = s_b[0:3]
v = np.cross(p_ab, s_b[0:3]) + s_b[3:6]
return np.array((*w, *v))
def translate_wrench(p_ab, s_b):
w = s_b[0:3] + np.cross(p_ab, s_b[3:6])
v = s_b[3:6]
return np.array((*w, *v))
def spatial_vector(v=(0.0, 0.0, 0.0, 0.0, 0.0, 0.0)):
return np.array(v)
# ad_V pg. 289 L&P, pg. 25 Featherstone
def spatial_cross(a, b):
w = np.cross(a[0:3], b[0:3])
v = np.cross(a[3:6], b[0:3]) + np.cross(a[0:3], b[3:6])
return np.array((*w, *v))
# ad_V^T pg. 290 L&P, pg. 25 Featurestone, note this does not includes the sign flip in the definition
def spatial_cross_dual(a, b):
w = np.cross(a[0:3], b[0:3]) + np.cross(a[3:6], b[3:6])
v = np.cross(a[0:3], b[3:6])
return np.array((*w, *v))
def spatial_dot(a, b):
return np.dot(a, b)
def spatial_outer(a, b):
return np.outer(a, b)
def spatial_matrix():
return np.zeros((6, 6))
def spatial_matrix_from_inertia(I, m):
G = spatial_matrix()
G[0:3, 0:3] = I
G[3, 3] = m
G[4, 4] = m
G[5, 5] = m
return G
# solves x = I^(-1)b
def spatial_solve(I, b):
return np.dot(np.linalg.inv(I), b)
def rpy2quat(roll, pitch, yaw):
cy = math.cos(yaw * 0.5)
sy = math.sin(yaw * 0.5)
cr = math.cos(roll * 0.5)
sr = math.sin(roll * 0.5)
cp = math.cos(pitch * 0.5)
sp = math.sin(pitch * 0.5)
w = cy * cr * cp + sy * sr * sp
x = cy * sr * cp - sy * cr * sp
y = cy * cr * sp + sy * sr * cp
z = sy * cr * cp - cy * sr * sp
return (x, y, z, w)
# helper to retrive body angular velocity from a twist v_s in se(3)
def get_body_angular_velocity(v_s):
return v_s[0:3]
# helper to compute velocity of a point p on a body given it's spatial twist v_s
def get_body_linear_velocity(v_s, p):
dpdt = v_s[3:6] + torch.cross(v_s[0:3], p)
return dpdt
# helper to build a body twist given the angular and linear velocity of
# the center of mass specified in the world frame, returns the body
# twist with respect to the origin (v_s)
def get_body_twist(w_m, v_m, p_m):
lin = v_m + torch.cross(p_m, w_m)
return (*w_m, *lin)
# timer utils
class ScopedTimer:
indent = -1
enabled = True
def __init__(self, name, active=True, detailed=False):
self.name = name
self.active = active and self.enabled
self.detailed = detailed
def __enter__(self):
if (self.active):
self.start = timeit.default_timer()
ScopedTimer.indent += 1
if (self.detailed):
self.cp = cProfile.Profile()
self.cp.clear()
self.cp.enable()
def __exit__(self, exc_type, exc_value, traceback):
if (self.detailed):
self.cp.disable()
self.cp.print_stats(sort='tottime')
if (self.active):
elapsed = (timeit.default_timer() - self.start) * 1000.0
indent = ""
for i in range(ScopedTimer.indent):
indent += "\t"
log("{}{} took {:.2f} ms".format(indent, self.name, elapsed))
ScopedTimer.indent -= 1
# code snippet for invoking cProfile
# cp = cProfile.Profile()
# cp.enable()
# for i in range(1000):
# self.state = self.integrator.forward(self.model, self.state, self.sim_dt)
# cp.disable()
# cp.print_stats(sort='tottime')
# exit(0)
# represent an edge between v0, v1 with connected faces f0, f1, and opposite vertex o0, and o1
# winding is such that first tri can be reconstructed as {v0, v1, o0}, and second tri as { v1, v0, o1 }
class MeshEdge:
def __init__(self, v0, v1, o0, o1, f0, f1):
self.v0 = v0 # vertex 0
self.v1 = v1 # vertex 1
self.o0 = o0 # opposite vertex 1
self.o1 = o1 # opposite vertex 2
self.f0 = f0 # index of tri1
self.f1 = f1 # index of tri2
class MeshAdjacency:
def __init__(self, indices, num_tris):
# map edges (v0, v1) to faces (f0, f1)
self.edges = {}
self.indices = indices
for index, tri in enumerate(indices):
self.add_edge(tri[0], tri[1], tri[2], index)
self.add_edge(tri[1], tri[2], tri[0], index)
self.add_edge(tri[2], tri[0], tri[1], index)
def add_edge(self, i0, i1, o, f): # index1, index2, index3, index of triangle
key = (min(i0, i1), max(i0, i1))
edge = None
if key in self.edges:
edge = self.edges[key]
if (edge.f1 != -1):
print("Detected non-manifold edge")
return
else:
# update other side of the edge
edge.o1 = o
edge.f1 = f
else:
# create new edge with opposite yet to be filled
edge = MeshEdge(i0, i1, o, -1, f, -1)
self.edges[key] = edge
def opposite_vertex(self, edge):
pass
def mem_report():
'''Report the memory usage of the tensor.storage in pytorch
Both on CPUs and GPUs are reported'''
def _mem_report(tensors, mem_type):
'''Print the selected tensors of type
There are two major storage types in our major concern:
- GPU: tensors transferred to CUDA devices
- CPU: tensors remaining on the system memory (usually unimportant)
Args:
- tensors: the tensors of specified type
- mem_type: 'CPU' or 'GPU' in current implementation '''
total_numel = 0
total_mem = 0
visited_data = []
for tensor in tensors:
if tensor.is_sparse:
continue
# a data_ptr indicates a memory block allocated
data_ptr = tensor.storage().data_ptr()
if data_ptr in visited_data:
continue
visited_data.append(data_ptr)
numel = tensor.storage().size()
total_numel += numel
element_size = tensor.storage().element_size()
mem = numel*element_size /1024/1024 # 32bit=4Byte, MByte
total_mem += mem
element_type = type(tensor).__name__
size = tuple(tensor.size())
# print('%s\t\t%s\t\t%.2f' % (
# element_type,
# size,
# mem) )
print('Type: %s Total Tensors: %d \tUsed Memory Space: %.2f MBytes' % (mem_type, total_numel, total_mem) )
gc.collect()
LEN = 65
objects = gc.get_objects()
#print('%s\t%s\t\t\t%s' %('Element type', 'Size', 'Used MEM(MBytes)') )
tensors = [obj for obj in objects if torch.is_tensor(obj)]
cuda_tensors = [t for t in tensors if t.is_cuda]
host_tensors = [t for t in tensors if not t.is_cuda]
_mem_report(cuda_tensors, 'GPU')
_mem_report(host_tensors, 'CPU')
print('='*LEN)
| 13,134 | Python | 23.056777 | 118 | 0.522994 |
vstrozzi/FRL-SHAC-Extension/dflex/dflex/adjoint.h | #pragma once
#include <cmath>
#include <stdio.h>
#ifdef CPU
#define CUDA_CALLABLE
#define __device__
#define __host__
#define __constant__
#elif defined(CUDA)
#define CUDA_CALLABLE __device__
#include <cuda.h>
#include <cuda_runtime_api.h>
#define check_cuda(code) { check_cuda_impl(code, __FILE__, __LINE__); }
void check_cuda_impl(cudaError_t code, const char* file, int line)
{
if (code != cudaSuccess)
{
printf("CUDA Error: %s %s %d\n", cudaGetErrorString(code), file, line);
}
}
void print_device()
{
int currentDevice;
cudaError_t err = cudaGetDevice(¤tDevice);
cudaDeviceProp props;
err = cudaGetDeviceProperties(&props, currentDevice);
if (err != cudaSuccess)
printf("CUDA error: %d\n", err);
else
printf("%s\n", props.name);
}
#endif
#ifdef _WIN32
#define __restrict__ __restrict
#endif
#define FP_CHECK 0
namespace df
{
template <typename T>
CUDA_CALLABLE float cast_float(T x) { return (float)(x); }
template <typename T>
CUDA_CALLABLE int cast_int(T x) { return (int)(x); }
template <typename T>
CUDA_CALLABLE void adj_cast_float(T x, T& adj_x, float adj_ret) { adj_x += adj_ret; }
template <typename T>
CUDA_CALLABLE void adj_cast_int(T x, T& adj_x, int adj_ret) { adj_x += adj_ret; }
// avoid namespacing of float type for casting to float type, this is to avoid wp::float(x), which is not valid in C++
#define float(x) cast_float(x)
#define adj_float(x, adj_x, adj_ret) adj_cast_float(x, adj_x, adj_ret)
#define int(x) cast_int(x)
#define adj_int(x, adj_x, adj_ret) adj_cast_int(x, adj_x, adj_ret)
#define kEps 0.0f
// basic ops for integer types
inline CUDA_CALLABLE int mul(int a, int b) { return a*b; }
inline CUDA_CALLABLE int div(int a, int b) { return a/b; }
inline CUDA_CALLABLE int add(int a, int b) { return a+b; }
inline CUDA_CALLABLE int sub(int a, int b) { return a-b; }
inline CUDA_CALLABLE int mod(int a, int b) { return a % b; }
inline CUDA_CALLABLE void adj_mul(int a, int b, int& adj_a, int& adj_b, int adj_ret) { }
inline CUDA_CALLABLE void adj_div(int a, int b, int& adj_a, int& adj_b, int adj_ret) { }
inline CUDA_CALLABLE void adj_add(int a, int b, int& adj_a, int& adj_b, int adj_ret) { }
inline CUDA_CALLABLE void adj_sub(int a, int b, int& adj_a, int& adj_b, int adj_ret) { }
inline CUDA_CALLABLE void adj_mod(int a, int b, int& adj_a, int& adj_b, int adj_ret) { }
// basic ops for float types
inline CUDA_CALLABLE float mul(float a, float b) { return a*b; }
inline CUDA_CALLABLE float div(float a, float b) { return a/b; }
inline CUDA_CALLABLE float add(float a, float b) { return a+b; }
inline CUDA_CALLABLE float sub(float a, float b) { return a-b; }
inline CUDA_CALLABLE float min(float a, float b) { return a<b?a:b; }
inline CUDA_CALLABLE float max(float a, float b) { return a>b?a:b; }
inline CUDA_CALLABLE float leaky_min(float a, float b, float r) { return min(a, b); }
inline CUDA_CALLABLE float leaky_max(float a, float b, float r) { return max(a, b); }
inline CUDA_CALLABLE float clamp(float x, float a, float b) { return min(max(a, x), b); }
inline CUDA_CALLABLE float step(float x) { return x < 0.0 ? 1.0 : 0.0; }
inline CUDA_CALLABLE float sign(float x) { return x < 0.0 ? -1.0 : 1.0; }
inline CUDA_CALLABLE float abs(float x) { return fabsf(x); }
inline CUDA_CALLABLE float nonzero(float x) { return x == 0.0 ? 0.0 : 1.0; }
inline CUDA_CALLABLE float acos(float x) { return std::acos(std::min(std::max(x, -1.0f), 1.0f)); }
inline CUDA_CALLABLE float sin(float x) { return std::sin(x); }
inline CUDA_CALLABLE float cos(float x) { return std::cos(x); }
inline CUDA_CALLABLE float sqrt(float x) { return std::sqrt(x); }
inline CUDA_CALLABLE void adj_mul(float a, float b, float& adj_a, float& adj_b, float adj_ret) { adj_a += b*adj_ret; adj_b += a*adj_ret; }
inline CUDA_CALLABLE void adj_div(float a, float b, float& adj_a, float& adj_b, float adj_ret) { adj_a += adj_ret/b; adj_b -= adj_ret*(a/b)/b; }
inline CUDA_CALLABLE void adj_add(float a, float b, float& adj_a, float& adj_b, float adj_ret) { adj_a += adj_ret; adj_b += adj_ret; }
inline CUDA_CALLABLE void adj_sub(float a, float b, float& adj_a, float& adj_b, float adj_ret) { adj_a += adj_ret; adj_b -= adj_ret; }
// inline CUDA_CALLABLE bool lt(float a, float b) { return a < b; }
// inline CUDA_CALLABLE bool gt(float a, float b) { return a > b; }
// inline CUDA_CALLABLE bool lte(float a, float b) { return a <= b; }
// inline CUDA_CALLABLE bool gte(float a, float b) { return a >= b; }
// inline CUDA_CALLABLE bool eq(float a, float b) { return a == b; }
// inline CUDA_CALLABLE bool neq(float a, float b) { return a != b; }
// inline CUDA_CALLABLE bool adj_lt(float a, float b, float & adj_a, float & adj_b, bool & adj_ret) { }
// inline CUDA_CALLABLE bool adj_gt(float a, float b, float & adj_a, float & adj_b, bool & adj_ret) { }
// inline CUDA_CALLABLE bool adj_lte(float a, float b, float & adj_a, float & adj_b, bool & adj_ret) { }
// inline CUDA_CALLABLE bool adj_gte(float a, float b, float & adj_a, float & adj_b, bool & adj_ret) { }
// inline CUDA_CALLABLE bool adj_eq(float a, float b, float & adj_a, float & adj_b, bool & adj_ret) { }
// inline CUDA_CALLABLE bool adj_neq(float a, float b, float & adj_a, float & adj_b, bool & adj_ret) { }
inline CUDA_CALLABLE void adj_min(float a, float b, float& adj_a, float& adj_b, float adj_ret)
{
if (a < b)
adj_a += adj_ret;
else
adj_b += adj_ret;
}
inline CUDA_CALLABLE void adj_max(float a, float b, float& adj_a, float& adj_b, float adj_ret)
{
if (a > b)
adj_a += adj_ret;
else
adj_b += adj_ret;
}
inline CUDA_CALLABLE void adj_leaky_min(float a, float b, float r, float& adj_a, float& adj_b, float& adj_r, float adj_ret)
{
if (a < b)
adj_a += adj_ret;
else
{
adj_a += r*adj_ret;
adj_b += adj_ret;
}
}
inline CUDA_CALLABLE void adj_leaky_max(float a, float b, float r, float& adj_a, float& adj_b, float& adj_r, float adj_ret)
{
if (a > b)
adj_a += adj_ret;
else
{
adj_a += r*adj_ret;
adj_b += adj_ret;
}
}
inline CUDA_CALLABLE void adj_clamp(float x, float a, float b, float& adj_x, float& adj_a, float& adj_b, float adj_ret)
{
if (x < a)
adj_a += adj_ret;
else if (x > b)
adj_b += adj_ret;
else
adj_x += adj_ret;
}
inline CUDA_CALLABLE void adj_step(float x, float& adj_x, float adj_ret)
{
// nop
}
inline CUDA_CALLABLE void adj_nonzero(float x, float& adj_x, float adj_ret)
{
// nop
}
inline CUDA_CALLABLE void adj_sign(float x, float& adj_x, float adj_ret)
{
// nop
}
inline CUDA_CALLABLE void adj_abs(float x, float& adj_x, float adj_ret)
{
if (x < 0.0)
adj_x -= adj_ret;
else
adj_x += adj_ret;
}
inline CUDA_CALLABLE void adj_acos(float x, float& adj_x, float adj_ret)
{
float d = sqrt(1.0-x*x);
if (d > 0.0)
adj_x -= (1.0/d)*adj_ret;
}
inline CUDA_CALLABLE void adj_sin(float x, float& adj_x, float adj_ret)
{
adj_x += std::cos(x)*adj_ret;
}
inline CUDA_CALLABLE void adj_cos(float x, float& adj_x, float adj_ret)
{
adj_x -= std::sin(x)*adj_ret;
}
inline CUDA_CALLABLE void adj_sqrt(float x, float& adj_x, float adj_ret)
{
adj_x += 0.5f*(1.0/std::sqrt(x))*adj_ret;
}
template <typename T>
CUDA_CALLABLE inline T select(bool cond, const T& a, const T& b) { return cond?b:a; }
template <typename T>
CUDA_CALLABLE inline void adj_select(bool cond, const T& a, const T& b, bool& adj_cond, T& adj_a, T& adj_b, const T& adj_ret)
{
if (cond)
adj_b += adj_ret;
else
adj_a += adj_ret;
}
// some helpful operator overloads (just for C++ use, these are not adjointed)
template <typename T>
CUDA_CALLABLE T& operator += (T& a, const T& b) { a = add(a, b); return a; }
template <typename T>
CUDA_CALLABLE T& operator -= (T& a, const T& b) { a = sub(a, b); return a; }
template <typename T>
CUDA_CALLABLE T operator*(const T& a, float s) { return mul(a, s); }
template <typename T>
CUDA_CALLABLE T operator/(const T& a, float s) { return div(a, s); }
template <typename T>
CUDA_CALLABLE T operator+(const T& a, const T& b) { return add(a, b); }
template <typename T>
CUDA_CALLABLE T operator-(const T& a, const T& b) { return sub(a, b); }
// for single thread CPU only
static int s_threadIdx;
inline CUDA_CALLABLE int tid()
{
#ifdef CPU
return s_threadIdx;
#elif defined(CUDA)
return blockDim.x * blockIdx.x + threadIdx.x;
#endif
}
#include "vec2.h"
#include "vec3.h"
#include "mat22.h"
#include "mat33.h"
#include "matnn.h"
#include "quat.h"
#include "spatial.h"
//--------------
template<typename T>
inline CUDA_CALLABLE T load(T* buf, int index)
{
assert(buf);
return buf[index];
}
template<typename T>
inline CUDA_CALLABLE void store(T* buf, int index, T value)
{
// allow NULL buffers for case where gradients are not required
if (buf)
{
buf[index] = value;
}
}
#ifdef CUDA
template<typename T>
inline __device__ void atomic_add(T* buf, T value)
{
atomicAdd(buf, value);
}
#endif
template<typename T>
inline __device__ void atomic_add(T* buf, int index, T value)
{
if (buf)
{
// CPU mode is sequential so just add
#ifdef CPU
buf[index] += value;
#elif defined(CUDA)
atomic_add(buf + index, value);
#endif
}
}
template<typename T>
inline __device__ void atomic_sub(T* buf, int index, T value)
{
if (buf)
{
// CPU mode is sequential so just add
#ifdef CPU
buf[index] -= value;
#elif defined(CUDA)
atomic_add(buf + index, -value);
#endif
}
}
template <typename T>
inline CUDA_CALLABLE void adj_load(T* buf, int index, T* adj_buf, int& adj_index, const T& adj_output)
{
// allow NULL buffers for case where gradients are not required
if (adj_buf) {
#ifdef CPU
adj_buf[index] += adj_output; // does not need to be atomic if single-threaded
#elif defined(CUDA)
atomic_add(adj_buf, index, adj_output);
#endif
}
}
template <typename T>
inline CUDA_CALLABLE void adj_store(T* buf, int index, T value, T* adj_buf, int& adj_index, T& adj_value)
{
adj_value += adj_buf[index]; // doesn't need to be atomic because it's used to load from a buffer onto the stack
}
template<typename T>
inline CUDA_CALLABLE void adj_atomic_add(T* buf, int index, T value, T* adj_buf, int& adj_index, T& adj_value)
{
if (adj_buf) { // cannot be atomic because used locally
adj_value += adj_buf[index];
}
}
template<typename T>
inline CUDA_CALLABLE void adj_atomic_sub(T* buf, int index, T value, T* adj_buf, int& adj_index, T& adj_value)
{
if (adj_buf) { // cannot be atomic because used locally
adj_value -= adj_buf[index];
}
}
//-------------------------
// Texture methods
inline CUDA_CALLABLE float sdf_sample(float3 x)
{
return 0.0;
}
inline CUDA_CALLABLE float3 sdf_grad(float3 x)
{
return float3();
}
inline CUDA_CALLABLE void adj_sdf_sample(float3 x, float3& adj_x, float adj_ret)
{
}
inline CUDA_CALLABLE void adj_sdf_grad(float3 x, float3& adj_x, float3& adj_ret)
{
}
inline CUDA_CALLABLE void print(int i)
{
printf("%d\n", i);
}
inline CUDA_CALLABLE void print(float i)
{
printf("%f\n", i);
}
inline CUDA_CALLABLE void print(float3 i)
{
printf("%f %f %f\n", i.x, i.y, i.z);
}
inline CUDA_CALLABLE void print(quat i)
{
printf("%f %f %f %f\n", i.x, i.y, i.z, i.w);
}
inline CUDA_CALLABLE void print(mat22 m)
{
printf("%f %f\n%f %f\n", m.data[0][0], m.data[0][1],
m.data[1][0], m.data[1][1]);
}
inline CUDA_CALLABLE void print(mat33 m)
{
printf("%f %f %f\n%f %f %f\n%f %f %f\n", m.data[0][0], m.data[0][1], m.data[0][2],
m.data[1][0], m.data[1][1], m.data[1][2],
m.data[2][0], m.data[2][1], m.data[2][2]);
}
inline CUDA_CALLABLE void print(spatial_transform t)
{
printf("(%f %f %f) (%f %f %f %f)\n", t.p.x, t.p.y, t.p.z, t.q.x, t.q.y, t.q.z, t.q.w);
}
inline CUDA_CALLABLE void print(spatial_vector v)
{
printf("(%f %f %f) (%f %f %f)\n", v.w.x, v.w.y, v.w.z, v.v.x, v.v.y, v.v.z);
}
inline CUDA_CALLABLE void print(spatial_matrix m)
{
printf("%f %f %f %f %f %f\n"
"%f %f %f %f %f %f\n"
"%f %f %f %f %f %f\n"
"%f %f %f %f %f %f\n"
"%f %f %f %f %f %f\n"
"%f %f %f %f %f %f\n",
m.data[0][0], m.data[0][1], m.data[0][2], m.data[0][3], m.data[0][4], m.data[0][5],
m.data[1][0], m.data[1][1], m.data[1][2], m.data[1][3], m.data[1][4], m.data[1][5],
m.data[2][0], m.data[2][1], m.data[2][2], m.data[2][3], m.data[2][4], m.data[2][5],
m.data[3][0], m.data[3][1], m.data[3][2], m.data[3][3], m.data[3][4], m.data[3][5],
m.data[4][0], m.data[4][1], m.data[4][2], m.data[4][3], m.data[4][4], m.data[4][5],
m.data[5][0], m.data[5][1], m.data[5][2], m.data[5][3], m.data[5][4], m.data[5][5]);
}
inline CUDA_CALLABLE void adj_print(int i, int& adj_i) { printf("%d adj: %d\n", i, adj_i); }
inline CUDA_CALLABLE void adj_print(float i, float& adj_i) { printf("%f adj: %f\n", i, adj_i); }
inline CUDA_CALLABLE void adj_print(float3 i, float3& adj_i) { printf("%f %f %f adj: %f %f %f \n", i.x, i.y, i.z, adj_i.x, adj_i.y, adj_i.z); }
inline CUDA_CALLABLE void adj_print(quat i, quat& adj_i) { }
inline CUDA_CALLABLE void adj_print(mat22 m, mat22& adj_m) { }
inline CUDA_CALLABLE void adj_print(mat33 m, mat33& adj_m) { }
inline CUDA_CALLABLE void adj_print(spatial_transform t, spatial_transform& adj_t) {}
inline CUDA_CALLABLE void adj_print(spatial_vector t, spatial_vector& adj_t) {}
inline CUDA_CALLABLE void adj_print(spatial_matrix t, spatial_matrix& adj_t) {}
} // namespace df | 13,946 | C | 29.05819 | 144 | 0.608992 |
vstrozzi/FRL-SHAC-Extension/dflex/dflex/config.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import os
no_grad = False # disable adjoint tracking
check_grad = False # will perform numeric gradient checking after each launch
verify_fp = False # verify inputs and outputs are finite after each launch
| 650 | Python | 49.076919 | 82 | 0.783077 |
vstrozzi/FRL-SHAC-Extension/dflex/dflex/quat.h | #pragma once
struct quat
{
// imaginary part
float x;
float y;
float z;
// real part
float w;
inline CUDA_CALLABLE quat(float x=0.0f, float y=0.0f, float z=0.0f, float w=0.0) : x(x), y(y), z(z), w(w) {}
explicit inline CUDA_CALLABLE quat(const float3& v, float w=0.0f) : x(v.x), y(v.y), z(v.z), w(w) {}
};
#ifdef CUDA
inline __device__ void atomic_add(quat * addr, quat value) {
atomicAdd(&(addr -> x), value.x);
atomicAdd(&(addr -> y), value.y);
atomicAdd(&(addr -> z), value.z);
atomicAdd(&(addr -> w), value.w);
}
#endif
inline CUDA_CALLABLE void adj_quat(float x, float y, float z, float w, float& adj_x, float& adj_y, float& adj_z, float& adj_w, quat adj_ret)
{
adj_x += adj_ret.x;
adj_y += adj_ret.y;
adj_z += adj_ret.z;
adj_w += adj_ret.w;
}
inline CUDA_CALLABLE void adj_quat(const float3& v, float w, float3& adj_v, float& adj_w, quat adj_ret)
{
adj_v.x += adj_ret.x;
adj_v.y += adj_ret.y;
adj_v.z += adj_ret.z;
adj_w += adj_ret.w;
}
// foward methods
inline CUDA_CALLABLE quat quat_from_axis_angle(const float3& axis, float angle)
{
float half = angle*0.5f;
float w = cosf(half);
float sin_theta_over_two = sinf(half);
float3 v = axis*sin_theta_over_two;
return quat(v.x, v.y, v.z, w);
}
inline CUDA_CALLABLE quat quat_identity()
{
return quat(0.0f, 0.0f, 0.0f, 1.0f);
}
inline CUDA_CALLABLE float dot(const quat& a, const quat& b)
{
return a.x*b.x + a.y*b.y + a.z*b.z + a.w*b.w;
}
inline CUDA_CALLABLE float length(const quat& q)
{
return sqrtf(dot(q, q));
}
inline CUDA_CALLABLE quat normalize(const quat& q)
{
float l = length(q);
if (l > kEps)
{
float inv_l = 1.0f/l;
return quat(q.x*inv_l, q.y*inv_l, q.z*inv_l, q.w*inv_l);
}
else
{
return quat(0.0f, 0.0f, 0.0f, 1.0f);
}
}
inline CUDA_CALLABLE quat inverse(const quat& q)
{
return quat(-q.x, -q.y, -q.z, q.w);
}
inline CUDA_CALLABLE quat add(const quat& a, const quat& b)
{
return quat(a.x+b.x, a.y+b.y, a.z+b.z, a.w+b.w);
}
inline CUDA_CALLABLE quat sub(const quat& a, const quat& b)
{
return quat(a.x-b.x, a.y-b.y, a.z-b.z, a.w-b.w);}
inline CUDA_CALLABLE quat mul(const quat& a, const quat& b)
{
return quat(a.w*b.x + b.w*a.x + a.y*b.z - b.y*a.z,
a.w*b.y + b.w*a.y + a.z*b.x - b.z*a.x,
a.w*b.z + b.w*a.z + a.x*b.y - b.x*a.y,
a.w*b.w - a.x*b.x - a.y*b.y - a.z*b.z);
}
inline CUDA_CALLABLE quat mul(const quat& a, float s)
{
return quat(a.x*s, a.y*s, a.z*s, a.w*s);
}
inline CUDA_CALLABLE float3 rotate(const quat& q, const float3& x)
{
return x*(2.0f*q.w*q.w-1.0f) + cross(float3(&q.x), x)*q.w*2.0f + float3(&q.x)*dot(float3(&q.x), x)*2.0f;
}
inline CUDA_CALLABLE float3 rotate_inv(const quat& q, const float3& x)
{
return x*(2.0f*q.w*q.w-1.0f) - cross(float3(&q.x), x)*q.w*2.0f + float3(&q.x)*dot(float3(&q.x), x)*2.0f;
}
inline CUDA_CALLABLE float index(const quat& a, int idx)
{
#if FP_CHECK
if (idx < 0 || idx > 3)
{
printf("quat index %d out of bounds at %s %d", idx, __FILE__, __LINE__);
exit(1);
}
#endif
return (&a.x)[idx];
}
inline CUDA_CALLABLE void adj_index(const quat& a, int idx, quat& adj_a, int & adj_idx, float & adj_ret)
{
#if FP_CHECK
if (idx < 0 || idx > 3)
{
printf("quat index %d out of bounds at %s %d", idx, __FILE__, __LINE__);
exit(1);
}
#endif
(&adj_a.x)[idx] += adj_ret;
}
// backward methods
inline CUDA_CALLABLE void adj_quat_from_axis_angle(const float3& axis, float angle, float3& adj_axis, float& adj_angle, const quat& adj_ret)
{
float3 v = float3(adj_ret.x, adj_ret.y, adj_ret.z);
float s = sinf(angle*0.5f);
float c = cosf(angle*0.5f);
quat dqda = quat(axis.x*c, axis.y*c, axis.z*c, -s)*0.5f;
adj_axis += v*s;
adj_angle += dot(dqda, adj_ret);
}
inline CUDA_CALLABLE void adj_quat_identity(const quat& adj_ret)
{
// nop
}
inline CUDA_CALLABLE void adj_dot(const quat& a, const quat& b, quat& adj_a, quat& adj_b, const float adj_ret)
{
adj_a += b*adj_ret;
adj_b += a*adj_ret;
}
inline CUDA_CALLABLE void adj_length(const quat& a, quat& adj_a, const float adj_ret)
{
adj_a += normalize(a)*adj_ret;
}
inline CUDA_CALLABLE void adj_normalize(const quat& q, quat& adj_q, const quat& adj_ret)
{
float l = length(q);
if (l > kEps)
{
float l_inv = 1.0f/l;
adj_q += adj_ret*l_inv - q*(l_inv*l_inv*l_inv*dot(q, adj_ret));
}
}
inline CUDA_CALLABLE void adj_inverse(const quat& q, quat& adj_q, const quat& adj_ret)
{
adj_q.x -= adj_ret.x;
adj_q.y -= adj_ret.y;
adj_q.z -= adj_ret.z;
adj_q.w += adj_ret.w;
}
// inline void adj_normalize(const quat& a, quat& adj_a, const quat& adj_ret)
// {
// float d = length(a);
// if (d > kEps)
// {
// float invd = 1.0f/d;
// quat ahat = normalize(a);
// adj_a += (adj_ret - ahat*(dot(ahat, adj_ret))*invd);
// //if (!isfinite(adj_a))
// // printf("%s:%d - adj_normalize((%f %f %f), (%f %f %f), (%f, %f, %f))\n", __FILE__, __LINE__, a.x, a.y, a.z, adj_a.x, adj_a.y, adj_a.z, adj_ret.x, adj_ret.y, adj_ret.z);
// }
// }
inline CUDA_CALLABLE void adj_add(const quat& a, const quat& b, quat& adj_a, quat& adj_b, const quat& adj_ret)
{
adj_a += adj_ret;
adj_b += adj_ret;
}
inline CUDA_CALLABLE void adj_sub(const quat& a, const quat& b, quat& adj_a, quat& adj_b, const quat& adj_ret)
{
adj_a += adj_ret;
adj_b -= adj_ret;
}
inline CUDA_CALLABLE void adj_mul(const quat& a, const quat& b, quat& adj_a, quat& adj_b, const quat& adj_ret)
{
// shorthand
const quat& r = adj_ret;
adj_a += quat(b.w*r.x - b.x*r.w + b.y*r.z - b.z*r.y,
b.w*r.y - b.y*r.w - b.x*r.z + b.z*r.x,
b.w*r.z + b.x*r.y - b.y*r.x - b.z*r.w,
b.w*r.w + b.x*r.x + b.y*r.y + b.z*r.z);
adj_b += quat(a.w*r.x - a.x*r.w - a.y*r.z + a.z*r.y,
a.w*r.y - a.y*r.w + a.x*r.z - a.z*r.x,
a.w*r.z - a.x*r.y + a.y*r.x - a.z*r.w,
a.w*r.w + a.x*r.x + a.y*r.y + a.z*r.z);
}
inline CUDA_CALLABLE void adj_mul(const quat& a, float s, quat& adj_a, float& adj_s, const quat& adj_ret)
{
adj_a += adj_ret*s;
adj_s += dot(a, adj_ret);
}
inline CUDA_CALLABLE void adj_rotate(const quat& q, const float3& p, quat& adj_q, float3& adj_p, const float3& adj_ret)
{
const float3& r = adj_ret;
{
float t2 = p.z*q.z*2.0f;
float t3 = p.y*q.w*2.0f;
float t4 = p.x*q.w*2.0f;
float t5 = p.x*q.x*2.0f;
float t6 = p.y*q.y*2.0f;
float t7 = p.z*q.y*2.0f;
float t8 = p.x*q.z*2.0f;
float t9 = p.x*q.y*2.0f;
float t10 = p.y*q.x*2.0f;
adj_q.x += r.z*(t3+t8)+r.x*(t2+t6+p.x*q.x*4.0f)+r.y*(t9-p.z*q.w*2.0f);
adj_q.y += r.y*(t2+t5+p.y*q.y*4.0f)+r.x*(t10+p.z*q.w*2.0f)-r.z*(t4-p.y*q.z*2.0f);
adj_q.z += r.y*(t4+t7)+r.z*(t5+t6+p.z*q.z*4.0f)-r.x*(t3-p.z*q.x*2.0f);
adj_q.w += r.x*(t7+p.x*q.w*4.0f-p.y*q.z*2.0f)+r.y*(t8+p.y*q.w*4.0f-p.z*q.x*2.0f)+r.z*(-t9+t10+p.z*q.w*4.0f);
}
{
float t2 = q.w*q.w;
float t3 = t2*2.0f;
float t4 = q.w*q.z*2.0f;
float t5 = q.x*q.y*2.0f;
float t6 = q.w*q.y*2.0f;
float t7 = q.w*q.x*2.0f;
float t8 = q.y*q.z*2.0f;
adj_p.x += r.y*(t4+t5)+r.x*(t3+(q.x*q.x)*2.0f-1.0f)-r.z*(t6-q.x*q.z*2.0f);
adj_p.y += r.z*(t7+t8)-r.x*(t4-t5)+r.y*(t3+(q.y*q.y)*2.0f-1.0f);
adj_p.z += -r.y*(t7-t8)+r.z*(t3+(q.z*q.z)*2.0f-1.0f)+r.x*(t6+q.x*q.z*2.0f);
}
}
inline CUDA_CALLABLE void adj_rotate_inv(const quat& q, const float3& p, quat& adj_q, float3& adj_p, const float3& adj_ret)
{
const float3& r = adj_ret;
{
float t2 = p.z*q.w*2.0f;
float t3 = p.z*q.z*2.0f;
float t4 = p.y*q.w*2.0f;
float t5 = p.x*q.w*2.0f;
float t6 = p.x*q.x*2.0f;
float t7 = p.y*q.y*2.0f;
float t8 = p.y*q.z*2.0f;
float t9 = p.z*q.x*2.0f;
float t10 = p.x*q.y*2.0f;
adj_q.x += r.y*(t2+t10)+r.x*(t3+t7+p.x*q.x*4.0f)-r.z*(t4-p.x*q.z*2.0f);
adj_q.y += r.z*(t5+t8)+r.y*(t3+t6+p.y*q.y*4.0f)-r.x*(t2-p.y*q.x*2.0f);
adj_q.z += r.x*(t4+t9)+r.z*(t6+t7+p.z*q.z*4.0f)-r.y*(t5-p.z*q.y*2.0f);
adj_q.w += r.x*(t8+p.x*q.w*4.0f-p.z*q.y*2.0f)+r.y*(t9+p.y*q.w*4.0f-p.x*q.z*2.0f)+r.z*(t10-p.y*q.x*2.0f+p.z*q.w*4.0f);
}
{
float t2 = q.w*q.w;
float t3 = t2*2.0f;
float t4 = q.w*q.z*2.0f;
float t5 = q.w*q.y*2.0f;
float t6 = q.x*q.z*2.0f;
float t7 = q.w*q.x*2.0f;
adj_p.x += r.z*(t5+t6)+r.x*(t3+(q.x*q.x)*2.0f-1.0f)-r.y*(t4-q.x*q.y*2.0f);
adj_p.y += r.y*(t3+(q.y*q.y)*2.0f-1.0f)+r.x*(t4+q.x*q.y*2.0f)-r.z*(t7-q.y*q.z*2.0f);
adj_p.z += -r.x*(t5-t6)+r.z*(t3+(q.z*q.z)*2.0f-1.0f)+r.y*(t7+q.y*q.z*2.0f);
}
} | 8,985 | C | 26.993769 | 184 | 0.522315 |
vstrozzi/FRL-SHAC-Extension/dflex/dflex/vec3.h | #pragma once
struct float3
{
float x;
float y;
float z;
inline CUDA_CALLABLE float3(float x=0.0f, float y=0.0f, float z=0.0f) : x(x), y(y), z(z) {}
explicit inline CUDA_CALLABLE float3(const float* p) : x(p[0]), y(p[1]), z(p[2]) {}
};
//--------------
// float3 methods
inline CUDA_CALLABLE float3 operator - (float3 a)
{
return { -a.x, -a.y, -a.z };
}
inline CUDA_CALLABLE float3 mul(float3 a, float s)
{
return { a.x*s, a.y*s, a.z*s };
}
inline CUDA_CALLABLE float3 div(float3 a, float s)
{
return { a.x/s, a.y/s, a.z/s };
}
inline CUDA_CALLABLE float3 add(float3 a, float3 b)
{
return { a.x+b.x, a.y+b.y, a.z+b.z };
}
inline CUDA_CALLABLE float3 add(float3 a, float s)
{
return { a.x + s, a.y + s, a.z + s };
}
inline CUDA_CALLABLE float3 sub(float3 a, float3 b)
{
return { a.x-b.x, a.y-b.y, a.z-b.z };
}
inline CUDA_CALLABLE float dot(float3 a, float3 b)
{
return a.x*b.x + a.y*b.y + a.z*b.z;
}
inline CUDA_CALLABLE float3 cross(float3 a, float3 b)
{
float3 c;
c.x = a.y*b.z - a.z*b.y;
c.y = a.z*b.x - a.x*b.z;
c.z = a.x*b.y - a.y*b.x;
return c;
}
inline CUDA_CALLABLE float index(const float3 & a, int idx)
{
#if FP_CHECK
if (idx < 0 || idx > 2)
{
printf("float3 index %d out of bounds at %s %d\n", idx, __FILE__, __LINE__);
exit(1);
}
#endif
return (&a.x)[idx];
}
inline CUDA_CALLABLE void adj_index(const float3 & a, int idx, float3 & adj_a, int & adj_idx, float & adj_ret)
{
#if FP_CHECK
if (idx < 0 || idx > 2)
{
printf("float3 index %d out of bounds at %s %d\n", idx, __FILE__, __LINE__);
exit(1);
}
#endif
(&adj_a.x)[idx] += adj_ret;
}
inline CUDA_CALLABLE float length(float3 a)
{
return sqrtf(dot(a, a));
}
inline CUDA_CALLABLE float3 normalize(float3 a)
{
float l = length(a);
if (l > kEps)
return div(a,l);
else
return float3();
}
inline bool CUDA_CALLABLE isfinite(float3 x)
{
return std::isfinite(x.x) && std::isfinite(x.y) && std::isfinite(x.z);
}
// adjoint float3 constructor
inline CUDA_CALLABLE void adj_float3(float x, float y, float z, float& adj_x, float& adj_y, float& adj_z, const float3& adj_ret)
{
adj_x += adj_ret.x;
adj_y += adj_ret.y;
adj_z += adj_ret.z;
}
inline CUDA_CALLABLE void adj_mul(float3 a, float s, float3& adj_a, float& adj_s, const float3& adj_ret)
{
adj_a.x += s*adj_ret.x;
adj_a.y += s*adj_ret.y;
adj_a.z += s*adj_ret.z;
adj_s += dot(a, adj_ret);
#if FP_CHECK
if (!isfinite(a) || !isfinite(s) || !isfinite(adj_a) || !isfinite(adj_s) || !isfinite(adj_ret))
printf("adj_mul((%f %f %f), %f, (%f %f %f), %f, (%f %f %f)\n", a.x, a.y, a.z, s, adj_a.x, adj_a.y, adj_a.z, adj_s, adj_ret.x, adj_ret.y, adj_ret.z);
#endif
}
inline CUDA_CALLABLE void adj_div(float3 a, float s, float3& adj_a, float& adj_s, const float3& adj_ret)
{
adj_s += dot(- a / (s * s), adj_ret); // - a / s^2
adj_a.x += adj_ret.x / s;
adj_a.y += adj_ret.y / s;
adj_a.z += adj_ret.z / s;
#if FP_CHECK
if (!isfinite(a) || !isfinite(s) || !isfinite(adj_a) || !isfinite(adj_s) || !isfinite(adj_ret))
printf("adj_div((%f %f %f), %f, (%f %f %f), %f, (%f %f %f)\n", a.x, a.y, a.z, s, adj_a.x, adj_a.y, adj_a.z, adj_s, adj_ret.x, adj_ret.y, adj_ret.z);
#endif
}
inline CUDA_CALLABLE void adj_add(float3 a, float3 b, float3& adj_a, float3& adj_b, const float3& adj_ret)
{
adj_a += adj_ret;
adj_b += adj_ret;
}
inline CUDA_CALLABLE void adj_add(float3 a, float s, float3& adj_a, float& adj_s, const float3& adj_ret)
{
adj_a += adj_ret;
adj_s += adj_ret.x + adj_ret.y + adj_ret.z;
}
inline CUDA_CALLABLE void adj_sub(float3 a, float3 b, float3& adj_a, float3& adj_b, const float3& adj_ret)
{
adj_a += adj_ret;
adj_b -= adj_ret;
}
inline CUDA_CALLABLE void adj_dot(float3 a, float3 b, float3& adj_a, float3& adj_b, const float adj_ret)
{
adj_a += b*adj_ret;
adj_b += a*adj_ret;
#if FP_CHECK
if (!isfinite(a) || !isfinite(b) || !isfinite(adj_a) || !isfinite(adj_b) || !isfinite(adj_ret))
printf("adj_dot((%f %f %f), (%f %f %f), (%f %f %f), (%f %f %f), %f)\n", a.x, a.y, a.z, b.x, b.y, b.z, adj_a.x, adj_a.y, adj_a.z, adj_b.x, adj_b.y, adj_b.z, adj_ret);
#endif
}
inline CUDA_CALLABLE void adj_cross(float3 a, float3 b, float3& adj_a, float3& adj_b, const float3& adj_ret)
{
// todo: sign check
adj_a += cross(b, adj_ret);
adj_b -= cross(a, adj_ret);
}
#ifdef CUDA
inline __device__ void atomic_add(float3 * addr, float3 value) {
// *addr += value;
atomicAdd(&(addr -> x), value.x);
atomicAdd(&(addr -> y), value.y);
atomicAdd(&(addr -> z), value.z);
}
#endif
inline CUDA_CALLABLE void adj_length(float3 a, float3& adj_a, const float adj_ret)
{
adj_a += normalize(a)*adj_ret;
#if FP_CHECK
if (!isfinite(adj_a))
printf("%s:%d - adj_length((%f %f %f), (%f %f %f), (%f))\n", __FILE__, __LINE__, a.x, a.y, a.z, adj_a.x, adj_a.y, adj_a.z, adj_ret);
#endif
}
inline CUDA_CALLABLE void adj_normalize(float3 a, float3& adj_a, const float3& adj_ret)
{
float d = length(a);
if (d > kEps)
{
float invd = 1.0f/d;
float3 ahat = normalize(a);
adj_a += (adj_ret*invd - ahat*(dot(ahat, adj_ret))*invd);
#if FP_CHECK
if (!isfinite(adj_a))
printf("%s:%d - adj_normalize((%f %f %f), (%f %f %f), (%f, %f, %f))\n", __FILE__, __LINE__, a.x, a.y, a.z, adj_a.x, adj_a.y, adj_a.z, adj_ret.x, adj_ret.y, adj_ret.z);
#endif
}
}
| 5,542 | C | 23.745536 | 179 | 0.560628 |
vstrozzi/FRL-SHAC-Extension/dflex/dflex/sim.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""This module contains time-integration objects for simulating
models + state forward in time.
"""
import math
import torch
import numpy as np
import dflex.util
import dflex.adjoint as df
import dflex.config
from dflex.model import *
import time
# Todo
#-----
#
# [x] Spring model
# [x] 2D FEM model
# [x] 3D FEM model
# [x] Cloth
# [x] Wind/Drag model
# [x] Bending model
# [x] Triangle collision
# [x] Rigid body model
# [x] Rigid shape contact
# [x] Sphere
# [x] Capsule
# [x] Box
# [ ] Convex
# [ ] SDF
# [ ] Implicit solver
# [x] USD import
# [x] USD export
# -----
# externally compiled kernels module (C++/CUDA code with PyBind entry points)
kernels = None
@df.func
def test(c: float):
x = 1.0
y = float(2)
z = int(3.0)
print(y)
print(z)
if (c < 3.0):
x = 2.0
return x*6.0
def kernel_init():
global kernels
kernels = df.compile()
@df.kernel
def integrate_particles(x: df.tensor(df.float3),
v: df.tensor(df.float3),
f: df.tensor(df.float3),
w: df.tensor(float),
gravity: df.tensor(df.float3),
dt: float,
x_new: df.tensor(df.float3),
v_new: df.tensor(df.float3)):
tid = df.tid()
x0 = df.load(x, tid)
v0 = df.load(v, tid)
f0 = df.load(f, tid)
inv_mass = df.load(w, tid)
g = df.load(gravity, 0)
# simple semi-implicit Euler. v1 = v0 + a dt, x1 = x0 + v1 dt
v1 = v0 + (f0 * inv_mass + g * df.step(0.0 - inv_mass)) * dt
x1 = x0 + v1 * dt
df.store(x_new, tid, x1)
df.store(v_new, tid, v1)
# semi-implicit Euler integration
@df.kernel
def integrate_rigids(rigid_x: df.tensor(df.float3),
rigid_r: df.tensor(df.quat),
rigid_v: df.tensor(df.float3),
rigid_w: df.tensor(df.float3),
rigid_f: df.tensor(df.float3),
rigid_t: df.tensor(df.float3),
inv_m: df.tensor(float),
inv_I: df.tensor(df.mat33),
gravity: df.tensor(df.float3),
dt: float,
rigid_x_new: df.tensor(df.float3),
rigid_r_new: df.tensor(df.quat),
rigid_v_new: df.tensor(df.float3),
rigid_w_new: df.tensor(df.float3)):
tid = df.tid()
# positions
x0 = df.load(rigid_x, tid)
r0 = df.load(rigid_r, tid)
# velocities
v0 = df.load(rigid_v, tid)
w0 = df.load(rigid_w, tid) # angular velocity
# forces
f0 = df.load(rigid_f, tid)
t0 = df.load(rigid_t, tid)
# masses
inv_mass = df.load(inv_m, tid) # 1 / mass
inv_inertia = df.load(inv_I, tid) # inverse of 3x3 inertia matrix
g = df.load(gravity, 0)
# linear part
v1 = v0 + (f0 * inv_mass + g * df.nonzero(inv_mass)) * dt # linear integral (linear position/velocity)
x1 = x0 + v1 * dt
# angular part
# so reverse multiplication by r0 takes you from global coordinates into local coordinates
# because it's covector and thus gets pulled back rather than pushed forward
wb = df.rotate_inv(r0, w0) # angular integral (angular velocity and rotation), rotate into object reference frame
tb = df.rotate_inv(r0, t0) # also rotate torques into local coordinates
# I^{-1} torque = angular acceleration and inv_inertia is always going to be in the object frame.
# So we need to rotate into that frame, and then back into global.
w1 = df.rotate(r0, wb + inv_inertia * tb * dt) # I^-1 * torque * dt., then go back into global coordinates
r1 = df.normalize(r0 + df.quat(w1, 0.0) * r0 * 0.5 * dt) # rotate around w1 by dt
df.store(rigid_x_new, tid, x1)
df.store(rigid_r_new, tid, r1)
df.store(rigid_v_new, tid, v1)
df.store(rigid_w_new, tid, w1)
@df.kernel
def eval_springs(x: df.tensor(df.float3),
v: df.tensor(df.float3),
spring_indices: df.tensor(int),
spring_rest_lengths: df.tensor(float),
spring_stiffness: df.tensor(float),
spring_damping: df.tensor(float),
f: df.tensor(df.float3)):
tid = df.tid()
i = df.load(spring_indices, tid * 2 + 0)
j = df.load(spring_indices, tid * 2 + 1)
ke = df.load(spring_stiffness, tid)
kd = df.load(spring_damping, tid)
rest = df.load(spring_rest_lengths, tid)
xi = df.load(x, i)
xj = df.load(x, j)
vi = df.load(v, i)
vj = df.load(v, j)
xij = xi - xj
vij = vi - vj
l = length(xij)
l_inv = 1.0 / l
# normalized spring direction
dir = xij * l_inv
c = l - rest
dcdt = dot(dir, vij)
# damping based on relative velocity.
fs = dir * (ke * c + kd * dcdt)
df.atomic_sub(f, i, fs)
df.atomic_add(f, j, fs)
@df.kernel
def eval_triangles(x: df.tensor(df.float3),
v: df.tensor(df.float3),
indices: df.tensor(int),
pose: df.tensor(df.mat22),
activation: df.tensor(float),
k_mu: float,
k_lambda: float,
k_damp: float,
k_drag: float,
k_lift: float,
f: df.tensor(df.float3)):
tid = df.tid()
i = df.load(indices, tid * 3 + 0)
j = df.load(indices, tid * 3 + 1)
k = df.load(indices, tid * 3 + 2)
p = df.load(x, i) # point zero
q = df.load(x, j) # point one
r = df.load(x, k) # point two
vp = df.load(v, i) # vel zero
vq = df.load(v, j) # vel one
vr = df.load(v, k) # vel two
qp = q - p # barycentric coordinates (centered at p)
rp = r - p
Dm = df.load(pose, tid)
inv_rest_area = df.determinant(Dm) * 2.0 # 1 / det(A) = det(A^-1)
rest_area = 1.0 / inv_rest_area
# scale stiffness coefficients to account for area
k_mu = k_mu * rest_area
k_lambda = k_lambda * rest_area
k_damp = k_damp * rest_area
# F = Xs*Xm^-1
f1 = qp * Dm[0, 0] + rp * Dm[1, 0]
f2 = qp * Dm[0, 1] + rp * Dm[1, 1]
#-----------------------------
# St. Venant-Kirchoff
# # Green strain, F'*F-I
# e00 = dot(f1, f1) - 1.0
# e10 = dot(f2, f1)
# e01 = dot(f1, f2)
# e11 = dot(f2, f2) - 1.0
# E = df.mat22(e00, e01,
# e10, e11)
# # local forces (deviatoric part)
# T = df.mul(E, df.transpose(Dm))
# # spatial forces, F*T
# fq = (f1*T[0,0] + f2*T[1,0])*k_mu*2.0
# fr = (f1*T[0,1] + f2*T[1,1])*k_mu*2.0
# alpha = 1.0
#-----------------------------
# Baraff & Witkin, note this model is not isotropic
# c1 = length(f1) - 1.0
# c2 = length(f2) - 1.0
# f1 = normalize(f1)*c1*k1
# f2 = normalize(f2)*c2*k1
# fq = f1*Dm[0,0] + f2*Dm[0,1]
# fr = f1*Dm[1,0] + f2*Dm[1,1]
#-----------------------------
# Neo-Hookean (with rest stability)
# force = mu*F*Dm'
fq = (f1 * Dm[0, 0] + f2 * Dm[0, 1]) * k_mu
fr = (f1 * Dm[1, 0] + f2 * Dm[1, 1]) * k_mu
alpha = 1.0 + k_mu / k_lambda
#-----------------------------
# Area Preservation
n = df.cross(qp, rp)
area = df.length(n) * 0.5
# actuation
act = df.load(activation, tid)
# J-alpha
c = area * inv_rest_area - alpha + act
# dJdx
n = df.normalize(n)
dcdq = df.cross(rp, n) * inv_rest_area * 0.5
dcdr = df.cross(n, qp) * inv_rest_area * 0.5
f_area = k_lambda * c
#-----------------------------
# Area Damping
dcdt = dot(dcdq, vq) + dot(dcdr, vr) - dot(dcdq + dcdr, vp)
f_damp = k_damp * dcdt
fq = fq + dcdq * (f_area + f_damp)
fr = fr + dcdr * (f_area + f_damp)
fp = fq + fr
#-----------------------------
# Lift + Drag
vmid = (vp + vr + vq) * 0.3333
vdir = df.normalize(vmid)
f_drag = vmid * (k_drag * area * df.abs(df.dot(n, vmid)))
f_lift = n * (k_lift * area * (1.57079 - df.acos(df.dot(n, vdir)))) * dot(vmid, vmid)
# note reversed sign due to atomic_add below.. need to write the unary op -
fp = fp - f_drag - f_lift
fq = fq + f_drag + f_lift
fr = fr + f_drag + f_lift
# apply forces
df.atomic_add(f, i, fp)
df.atomic_sub(f, j, fq)
df.atomic_sub(f, k, fr)
@df.func
def triangle_closest_point_barycentric(a: df.float3, b: df.float3, c: df.float3, p: df.float3):
ab = b - a
ac = c - a
ap = p - a
d1 = df.dot(ab, ap)
d2 = df.dot(ac, ap)
if (d1 <= 0.0 and d2 <= 0.0):
return float3(1.0, 0.0, 0.0)
bp = p - b
d3 = df.dot(ab, bp)
d4 = df.dot(ac, bp)
if (d3 >= 0.0 and d4 <= d3):
return float3(0.0, 1.0, 0.0)
vc = d1 * d4 - d3 * d2
v = d1 / (d1 - d3)
if (vc <= 0.0 and d1 >= 0.0 and d3 <= 0.0):
return float3(1.0 - v, v, 0.0)
cp = p - c
d5 = dot(ab, cp)
d6 = dot(ac, cp)
if (d6 >= 0.0 and d5 <= d6):
return float3(0.0, 0.0, 1.0)
vb = d5 * d2 - d1 * d6
w = d2 / (d2 - d6)
if (vb <= 0.0 and d2 >= 0.0 and d6 <= 0.0):
return float3(1.0 - w, 0.0, w)
va = d3 * d6 - d5 * d4
w = (d4 - d3) / ((d4 - d3) + (d5 - d6))
if (va <= 0.0 and (d4 - d3) >= 0.0 and (d5 - d6) >= 0.0):
return float3(0.0, w, 1.0 - w)
denom = 1.0 / (va + vb + vc)
v = vb * denom
w = vc * denom
return float3(1.0 - v - w, v, w)
@df.kernel
def eval_triangles_contact(
# idx : df.tensor(int), # list of indices for colliding particles
num_particles: int, # size of particles
x: df.tensor(df.float3),
v: df.tensor(df.float3),
indices: df.tensor(int),
pose: df.tensor(df.mat22),
activation: df.tensor(float),
k_mu: float,
k_lambda: float,
k_damp: float,
k_drag: float,
k_lift: float,
f: df.tensor(df.float3)):
tid = df.tid()
face_no = tid // num_particles # which face
particle_no = tid % num_particles # which particle
# index = df.load(idx, tid)
pos = df.load(x, particle_no) # at the moment, just one particle
# vel0 = df.load(v, 0)
i = df.load(indices, face_no * 3 + 0)
j = df.load(indices, face_no * 3 + 1)
k = df.load(indices, face_no * 3 + 2)
if (i == particle_no or j == particle_no or k == particle_no):
return
p = df.load(x, i) # point zero
q = df.load(x, j) # point one
r = df.load(x, k) # point two
# vp = df.load(v, i) # vel zero
# vq = df.load(v, j) # vel one
# vr = df.load(v, k) # vel two
# qp = q-p # barycentric coordinates (centered at p)
# rp = r-p
bary = triangle_closest_point_barycentric(p, q, r, pos)
closest = p * bary[0] + q * bary[1] + r * bary[2]
diff = pos - closest
dist = df.dot(diff, diff)
n = df.normalize(diff)
c = df.min(dist - 0.01, 0.0) # 0 unless within 0.01 of surface
#c = df.leaky_min(dot(n, x0)-0.01, 0.0, 0.0)
fn = n * c * 1e5
df.atomic_sub(f, particle_no, fn)
# # apply forces (could do - f / 3 here)
df.atomic_add(f, i, fn * bary[0])
df.atomic_add(f, j, fn * bary[1])
df.atomic_add(f, k, fn * bary[2])
@df.kernel
def eval_triangles_rigid_contacts(
num_particles: int, # number of particles (size of contact_point)
x: df.tensor(df.float3), # position of particles
v: df.tensor(df.float3),
indices: df.tensor(int), # triangle indices
rigid_x: df.tensor(df.float3), # rigid body positions
rigid_r: df.tensor(df.quat),
rigid_v: df.tensor(df.float3),
rigid_w: df.tensor(df.float3),
contact_body: df.tensor(int),
contact_point: df.tensor(df.float3), # position of contact points relative to body
contact_dist: df.tensor(float),
contact_mat: df.tensor(int),
materials: df.tensor(float),
# rigid_f : df.tensor(df.float3),
# rigid_t : df.tensor(df.float3),
tri_f: df.tensor(df.float3)):
tid = df.tid()
face_no = tid // num_particles # which face
particle_no = tid % num_particles # which particle
# -----------------------
# load rigid body point
c_body = df.load(contact_body, particle_no)
c_point = df.load(contact_point, particle_no)
c_dist = df.load(contact_dist, particle_no)
c_mat = df.load(contact_mat, particle_no)
# hard coded surface parameter tensor layout (ke, kd, kf, mu)
ke = df.load(materials, c_mat * 4 + 0) # restitution coefficient
kd = df.load(materials, c_mat * 4 + 1) # damping coefficient
kf = df.load(materials, c_mat * 4 + 2) # friction coefficient
mu = df.load(materials, c_mat * 4 + 3) # coulomb friction
x0 = df.load(rigid_x, c_body) # position of colliding body
r0 = df.load(rigid_r, c_body) # orientation of colliding body
v0 = df.load(rigid_v, c_body)
w0 = df.load(rigid_w, c_body)
# transform point to world space
pos = x0 + df.rotate(r0, c_point)
# use x0 as center, everything is offset from center of mass
# moment arm
r = pos - x0 # basically just c_point in the new coordinates
rhat = df.normalize(r)
pos = pos + rhat * c_dist # add on 'thickness' of shape, e.g.: radius of sphere/capsule
# contact point velocity
dpdt = v0 + df.cross(w0, r) # this is rigid velocity cross offset, so it's the velocity of the contact point.
# -----------------------
# load triangle
i = df.load(indices, face_no * 3 + 0)
j = df.load(indices, face_no * 3 + 1)
k = df.load(indices, face_no * 3 + 2)
p = df.load(x, i) # point zero
q = df.load(x, j) # point one
r = df.load(x, k) # point two
vp = df.load(v, i) # vel zero
vq = df.load(v, j) # vel one
vr = df.load(v, k) # vel two
bary = triangle_closest_point_barycentric(p, q, r, pos)
closest = p * bary[0] + q * bary[1] + r * bary[2]
diff = pos - closest # vector from tri to point
dist = df.dot(diff, diff) # squared distance
n = df.normalize(diff) # points into the object
c = df.min(dist - 0.05, 0.0) # 0 unless within 0.05 of surface
#c = df.leaky_min(dot(n, x0)-0.01, 0.0, 0.0)
# fn = n * c * 1e6 # points towards cloth (both n and c are negative)
# df.atomic_sub(tri_f, particle_no, fn)
fn = c * ke # normal force (restitution coefficient * how far inside for ground) (negative)
vtri = vp * bary[0] + vq * bary[1] + vr * bary[2] # bad approximation for centroid velocity
vrel = vtri - dpdt
vn = dot(n, vrel) # velocity component of rigid in negative normal direction
vt = vrel - n * vn # velocity component not in normal direction
# contact damping
fd = 0.0 - df.max(vn, 0.0) * kd * df.step(c) # again, negative, into the ground
# # viscous friction
# ft = vt*kf
# Coulomb friction (box)
lower = mu * (fn + fd)
upper = 0.0 - lower # workaround because no unary ops yet
nx = cross(n, float3(0.0, 0.0, 1.0)) # basis vectors for tangent
nz = cross(n, float3(1.0, 0.0, 0.0))
vx = df.clamp(dot(nx * kf, vt), lower, upper)
vz = df.clamp(dot(nz * kf, vt), lower, upper)
ft = (nx * vx + nz * vz) * (0.0 - df.step(c)) # df.float3(vx, 0.0, vz)*df.step(c)
# # Coulomb friction (smooth, but gradients are numerically unstable around |vt| = 0)
# #ft = df.normalize(vt)*df.min(kf*df.length(vt), 0.0 - mu*c*ke)
f_total = n * (fn + fd) + ft
df.atomic_add(tri_f, i, f_total * bary[0])
df.atomic_add(tri_f, j, f_total * bary[1])
df.atomic_add(tri_f, k, f_total * bary[2])
@df.kernel
def eval_bending(
x: df.tensor(df.float3), v: df.tensor(df.float3), indices: df.tensor(int), rest: df.tensor(float), ke: float, kd: float, f: df.tensor(df.float3)):
tid = df.tid()
i = df.load(indices, tid * 4 + 0)
j = df.load(indices, tid * 4 + 1)
k = df.load(indices, tid * 4 + 2)
l = df.load(indices, tid * 4 + 3)
rest_angle = df.load(rest, tid)
x1 = df.load(x, i)
x2 = df.load(x, j)
x3 = df.load(x, k)
x4 = df.load(x, l)
v1 = df.load(v, i)
v2 = df.load(v, j)
v3 = df.load(v, k)
v4 = df.load(v, l)
n1 = df.cross(x3 - x1, x4 - x1) # normal to face 1
n2 = df.cross(x4 - x2, x3 - x2) # normal to face 2
n1_length = df.length(n1)
n2_length = df.length(n2)
rcp_n1 = 1.0 / n1_length
rcp_n2 = 1.0 / n2_length
cos_theta = df.dot(n1, n2) * rcp_n1 * rcp_n2
n1 = n1 * rcp_n1 * rcp_n1
n2 = n2 * rcp_n2 * rcp_n2
e = x4 - x3
e_hat = df.normalize(e)
e_length = df.length(e)
s = df.sign(df.dot(df.cross(n2, n1), e_hat))
angle = df.acos(cos_theta) * s
d1 = n1 * e_length
d2 = n2 * e_length
d3 = n1 * df.dot(x1 - x4, e_hat) + n2 * df.dot(x2 - x4, e_hat)
d4 = n1 * df.dot(x3 - x1, e_hat) + n2 * df.dot(x3 - x2, e_hat)
# elastic
f_elastic = ke * (angle - rest_angle)
# damping
f_damp = kd * (df.dot(d1, v1) + df.dot(d2, v2) + df.dot(d3, v3) + df.dot(d4, v4))
# total force, proportional to edge length
f_total = 0.0 - e_length * (f_elastic + f_damp)
df.atomic_add(f, i, d1 * f_total)
df.atomic_add(f, j, d2 * f_total)
df.atomic_add(f, k, d3 * f_total)
df.atomic_add(f, l, d4 * f_total)
@df.kernel
def eval_tetrahedra(x: df.tensor(df.float3),
v: df.tensor(df.float3),
indices: df.tensor(int),
pose: df.tensor(df.mat33),
activation: df.tensor(float),
materials: df.tensor(float),
f: df.tensor(df.float3)):
tid = df.tid()
i = df.load(indices, tid * 4 + 0)
j = df.load(indices, tid * 4 + 1)
k = df.load(indices, tid * 4 + 2)
l = df.load(indices, tid * 4 + 3)
act = df.load(activation, tid)
k_mu = df.load(materials, tid * 3 + 0)
k_lambda = df.load(materials, tid * 3 + 1)
k_damp = df.load(materials, tid * 3 + 2)
x0 = df.load(x, i)
x1 = df.load(x, j)
x2 = df.load(x, k)
x3 = df.load(x, l)
v0 = df.load(v, i)
v1 = df.load(v, j)
v2 = df.load(v, k)
v3 = df.load(v, l)
x10 = x1 - x0
x20 = x2 - x0
x30 = x3 - x0
v10 = v1 - v0
v20 = v2 - v0
v30 = v3 - v0
Ds = df.mat33(x10, x20, x30)
Dm = df.load(pose, tid)
inv_rest_volume = df.determinant(Dm) * 6.0
rest_volume = 1.0 / inv_rest_volume
alpha = 1.0 + k_mu / k_lambda - k_mu / (4.0 * k_lambda)
# scale stiffness coefficients to account for area
k_mu = k_mu * rest_volume
k_lambda = k_lambda * rest_volume
k_damp = k_damp * rest_volume
# F = Xs*Xm^-1
F = Ds * Dm
dFdt = df.mat33(v10, v20, v30) * Dm
col1 = df.float3(F[0, 0], F[1, 0], F[2, 0])
col2 = df.float3(F[0, 1], F[1, 1], F[2, 1])
col3 = df.float3(F[0, 2], F[1, 2], F[2, 2])
#-----------------------------
# Neo-Hookean (with rest stability [Smith et al 2018])
Ic = dot(col1, col1) + dot(col2, col2) + dot(col3, col3)
# deviatoric part
P = F * k_mu * (1.0 - 1.0 / (Ic + 1.0)) + dFdt * k_damp
H = P * df.transpose(Dm)
f1 = df.float3(H[0, 0], H[1, 0], H[2, 0])
f2 = df.float3(H[0, 1], H[1, 1], H[2, 1])
f3 = df.float3(H[0, 2], H[1, 2], H[2, 2])
#-----------------------------
# C_spherical
# r_s = df.sqrt(dot(col1, col1) + dot(col2, col2) + dot(col3, col3))
# r_s_inv = 1.0/r_s
# C = r_s - df.sqrt(3.0)
# dCdx = F*df.transpose(Dm)*r_s_inv
# grad1 = float3(dCdx[0,0], dCdx[1,0], dCdx[2,0])
# grad2 = float3(dCdx[0,1], dCdx[1,1], dCdx[2,1])
# grad3 = float3(dCdx[0,2], dCdx[1,2], dCdx[2,2])
# f1 = grad1*C*k_mu
# f2 = grad2*C*k_mu
# f3 = grad3*C*k_mu
#----------------------------
# C_D
# r_s = df.sqrt(dot(col1, col1) + dot(col2, col2) + dot(col3, col3))
# C = r_s*r_s - 3.0
# dCdx = F*df.transpose(Dm)*2.0
# grad1 = float3(dCdx[0,0], dCdx[1,0], dCdx[2,0])
# grad2 = float3(dCdx[0,1], dCdx[1,1], dCdx[2,1])
# grad3 = float3(dCdx[0,2], dCdx[1,2], dCdx[2,2])
# f1 = grad1*C*k_mu
# f2 = grad2*C*k_mu
# f3 = grad3*C*k_mu
# hydrostatic part
J = df.determinant(F)
#print(J)
s = inv_rest_volume / 6.0
dJdx1 = df.cross(x20, x30) * s
dJdx2 = df.cross(x30, x10) * s
dJdx3 = df.cross(x10, x20) * s
f_volume = (J - alpha + act) * k_lambda
f_damp = (df.dot(dJdx1, v1) + df.dot(dJdx2, v2) + df.dot(dJdx3, v3)) * k_damp
f_total = f_volume + f_damp
f1 = f1 + dJdx1 * f_total
f2 = f2 + dJdx2 * f_total
f3 = f3 + dJdx3 * f_total
f0 = (f1 + f2 + f3) * (0.0 - 1.0)
# apply forces
df.atomic_sub(f, i, f0)
df.atomic_sub(f, j, f1)
df.atomic_sub(f, k, f2)
df.atomic_sub(f, l, f3)
@df.kernel
def eval_contacts(x: df.tensor(df.float3), v: df.tensor(df.float3), ke: float, kd: float, kf: float, mu: float, f: df.tensor(df.float3)):
tid = df.tid() # this just handles contact of particles with the ground plane, nothing else.
x0 = df.load(x, tid)
v0 = df.load(v, tid)
n = float3(0.0, 1.0, 0.0) # why is the normal always y? Ground is always (0, 1, 0) normal
c = df.min(dot(n, x0) - 0.01, 0.0) # 0 unless within 0.01 of surface
#c = df.leaky_min(dot(n, x0)-0.01, 0.0, 0.0)
vn = dot(n, v0)
vt = v0 - n * vn
fn = n * c * ke
# contact damping
fd = n * df.min(vn, 0.0) * kd
# viscous friction
#ft = vt*kf
# Coulomb friction (box)
lower = mu * c * ke
upper = 0.0 - lower
vx = clamp(dot(float3(kf, 0.0, 0.0), vt), lower, upper)
vz = clamp(dot(float3(0.0, 0.0, kf), vt), lower, upper)
ft = df.float3(vx, 0.0, vz)
# Coulomb friction (smooth, but gradients are numerically unstable around |vt| = 0)
#ft = df.normalize(vt)*df.min(kf*df.length(vt), 0.0 - mu*c*ke)
ftotal = fn + (fd + ft) * df.step(c)
df.atomic_sub(f, tid, ftotal)
@df.func
def sphere_sdf(center: df.float3, radius: float, p: df.float3):
return df.length(p-center) - radius
@df.func
def sphere_sdf_grad(center: df.float3, radius: float, p: df.float3):
return df.normalize(p-center)
@df.func
def box_sdf(upper: df.float3, p: df.float3):
# adapted from https://www.iquilezles.org/www/articles/distfunctions/distfunctions.htm
qx = abs(p[0])-upper[0]
qy = abs(p[1])-upper[1]
qz = abs(p[2])-upper[2]
e = df.float3(df.max(qx, 0.0), df.max(qy, 0.0), df.max(qz, 0.0))
return df.length(e) + df.min(df.max(qx, df.max(qy, qz)), 0.0)
@df.func
def box_sdf_grad(upper: df.float3, p: df.float3):
qx = abs(p[0])-upper[0]
qy = abs(p[1])-upper[1]
qz = abs(p[2])-upper[2]
# exterior case
if (qx > 0.0 or qy > 0.0 or qz > 0.0):
x = df.clamp(p[0], 0.0-upper[0], upper[0])
y = df.clamp(p[1], 0.0-upper[1], upper[1])
z = df.clamp(p[2], 0.0-upper[2], upper[2])
return df.normalize(p - df.float3(x, y, z))
sx = df.sign(p[0])
sy = df.sign(p[1])
sz = df.sign(p[2])
# x projection
if (qx > qy and qx > qz):
return df.float3(sx, 0.0, 0.0)
# y projection
if (qy > qx and qy > qz):
return df.float3(0.0, sy, 0.0)
# z projection
if (qz > qx and qz > qy):
return df.float3(0.0, 0.0, sz)
@df.func
def capsule_sdf(radius: float, half_width: float, p: df.float3):
if (p[0] > half_width):
return length(df.float3(p[0] - half_width, p[1], p[2])) - radius
if (p[0] < 0.0 - half_width):
return length(df.float3(p[0] + half_width, p[1], p[2])) - radius
return df.length(df.float3(0.0, p[1], p[2])) - radius
@df.func
def capsule_sdf_grad(radius: float, half_width: float, p: df.float3):
if (p[0] > half_width):
return normalize(df.float3(p[0] - half_width, p[1], p[2]))
if (p[0] < 0.0 - half_width):
return normalize(df.float3(p[0] + half_width, p[1], p[2]))
return normalize(df.float3(0.0, p[1], p[2]))
@df.kernel
def eval_soft_contacts(
num_particles: int,
particle_x: df.tensor(df.float3),
particle_v: df.tensor(df.float3),
body_X_sc: df.tensor(df.spatial_transform),
body_v_sc: df.tensor(df.spatial_vector),
shape_X_co: df.tensor(df.spatial_transform),
shape_body: df.tensor(int),
shape_geo_type: df.tensor(int),
shape_geo_src: df.tensor(int),
shape_geo_scale: df.tensor(df.float3),
shape_materials: df.tensor(float),
ke: float,
kd: float,
kf: float,
mu: float,
# outputs
particle_f: df.tensor(df.float3),
body_f: df.tensor(df.spatial_vector)):
tid = df.tid()
shape_index = tid // num_particles # which shape
particle_index = tid % num_particles # which particle
rigid_index = df.load(shape_body, shape_index)
px = df.load(particle_x, particle_index)
pv = df.load(particle_v, particle_index)
#center = float3(0.0, 0.5, 0.0)
#radius = 0.25
#margin = 0.01
# sphere collider
# c = df.min(sphere_sdf(center, radius, x0)-margin, 0.0)
# n = sphere_sdf_grad(center, radius, x0)
# box collider
#c = df.min(box_sdf(df.float3(radius, radius, radius), x0-center)-margin, 0.0)
#n = box_sdf_grad(df.float3(radius, radius, radius), x0-center)
X_sc = df.spatial_transform_identity()
if (rigid_index >= 0):
X_sc = df.load(body_X_sc, rigid_index)
X_co = df.load(shape_X_co, shape_index)
X_so = df.spatial_transform_multiply(X_sc, X_co)
X_os = df.spatial_transform_inverse(X_so)
# transform particle position to shape local space
x_local = df.spatial_transform_point(X_os, px)
# geo description
geo_type = df.load(shape_geo_type, shape_index)
geo_scale = df.load(shape_geo_scale, shape_index)
margin = 0.01
# evaluate shape sdf
c = 0.0
n = df.float3(0.0, 0.0, 0.0)
# GEO_SPHERE (0)
if (geo_type == 0):
c = df.min(sphere_sdf(df.float3(0.0, 0.0, 0.0), geo_scale[0], x_local)-margin, 0.0)
n = df.spatial_transform_vector(X_so, sphere_sdf_grad(df.float3(0.0, 0.0, 0.0), geo_scale[0], x_local))
# GEO_BOX (1)
if (geo_type == 1):
c = df.min(box_sdf(geo_scale, x_local)-margin, 0.0)
n = df.spatial_transform_vector(X_so, box_sdf_grad(geo_scale, x_local))
# GEO_CAPSULE (2)
if (geo_type == 2):
c = df.min(capsule_sdf(geo_scale[0], geo_scale[1], x_local)-margin, 0.0)
n = df.spatial_transform_vector(X_so, capsule_sdf_grad(geo_scale[0], geo_scale[1], x_local))
# rigid velocity
rigid_v_s = df.spatial_vector()
if (rigid_index >= 0):
rigid_v_s = df.load(body_v_sc, rigid_index)
rigid_w = df.spatial_top(rigid_v_s)
rigid_v = df.spatial_bottom(rigid_v_s)
# compute the body velocity at the particle position
bv = rigid_v + df.cross(rigid_w, px)
# relative velocity
v = pv - bv
# decompose relative velocity
vn = dot(n, v)
vt = v - n * vn
# contact elastic
fn = n * c * ke
# contact damping
fd = n * df.min(vn, 0.0) * kd
# viscous friction
#ft = vt*kf
# Coulomb friction (box)
lower = mu * c * ke
upper = 0.0 - lower
vx = clamp(dot(float3(kf, 0.0, 0.0), vt), lower, upper)
vz = clamp(dot(float3(0.0, 0.0, kf), vt), lower, upper)
ft = df.float3(vx, 0.0, vz)
# Coulomb friction (smooth, but gradients are numerically unstable around |vt| = 0)
#ft = df.normalize(vt)*df.min(kf*df.length(vt), 0.0 - mu*c*ke)
f_total = fn + (fd + ft) * df.step(c)
t_total = df.cross(px, f_total)
df.atomic_sub(particle_f, particle_index, f_total)
if (rigid_index >= 0):
df.atomic_sub(body_f, rigid_index, df.spatial_vector(t_total, f_total))
@df.kernel
def eval_rigid_contacts(rigid_x: df.tensor(df.float3),
rigid_r: df.tensor(df.quat),
rigid_v: df.tensor(df.float3),
rigid_w: df.tensor(df.float3),
contact_body: df.tensor(int),
contact_point: df.tensor(df.float3),
contact_dist: df.tensor(float),
contact_mat: df.tensor(int),
materials: df.tensor(float),
rigid_f: df.tensor(df.float3),
rigid_t: df.tensor(df.float3)):
tid = df.tid()
c_body = df.load(contact_body, tid)
c_point = df.load(contact_point, tid)
c_dist = df.load(contact_dist, tid)
c_mat = df.load(contact_mat, tid)
# hard coded surface parameter tensor layout (ke, kd, kf, mu)
ke = df.load(materials, c_mat * 4 + 0) # restitution coefficient
kd = df.load(materials, c_mat * 4 + 1) # damping coefficient
kf = df.load(materials, c_mat * 4 + 2) # friction coefficient
mu = df.load(materials, c_mat * 4 + 3) # coulomb friction
x0 = df.load(rigid_x, c_body) # position of colliding body
r0 = df.load(rigid_r, c_body) # orientation of colliding body
v0 = df.load(rigid_v, c_body)
w0 = df.load(rigid_w, c_body)
n = float3(0.0, 1.0, 0.0)
# transform point to world space
p = x0 + df.rotate(r0, c_point) - n * c_dist # add on 'thickness' of shape, e.g.: radius of sphere/capsule
# use x0 as center, everything is offset from center of mass
# moment arm
r = p - x0 # basically just c_point in the new coordinates
# contact point velocity
dpdt = v0 + df.cross(w0, r) # this is rigid velocity cross offset, so it's the velocity of the contact point.
# check ground contact
c = df.min(dot(n, p), 0.0) # check if we're inside the ground
vn = dot(n, dpdt) # velocity component out of the ground
vt = dpdt - n * vn # velocity component not into the ground
fn = c * ke # normal force (restitution coefficient * how far inside for ground)
# contact damping
fd = df.min(vn, 0.0) * kd * df.step(c) # again, velocity into the ground, negative
# viscous friction
#ft = vt*kf
# Coulomb friction (box)
lower = mu * (fn + fd) # negative
upper = 0.0 - lower # positive, workaround for no unary ops
vx = df.clamp(dot(float3(kf, 0.0, 0.0), vt), lower, upper)
vz = df.clamp(dot(float3(0.0, 0.0, kf), vt), lower, upper)
ft = df.float3(vx, 0.0, vz) * df.step(c)
# Coulomb friction (smooth, but gradients are numerically unstable around |vt| = 0)
#ft = df.normalize(vt)*df.min(kf*df.length(vt), 0.0 - mu*c*ke)
f_total = n * (fn + fd) + ft
t_total = df.cross(r, f_total)
df.atomic_sub(rigid_f, c_body, f_total)
df.atomic_sub(rigid_t, c_body, t_total)
# # Frank & Park definition 3.20, pg 100
@df.func
def spatial_transform_twist(t: df.spatial_transform, x: df.spatial_vector):
q = spatial_transform_get_rotation(t)
p = spatial_transform_get_translation(t)
w = spatial_top(x)
v = spatial_bottom(x)
w = rotate(q, w)
v = rotate(q, v) + cross(p, w)
return spatial_vector(w, v)
@df.func
def spatial_transform_wrench(t: df.spatial_transform, x: df.spatial_vector):
q = spatial_transform_get_rotation(t)
p = spatial_transform_get_translation(t)
w = spatial_top(x)
v = spatial_bottom(x)
v = rotate(q, v)
w = rotate(q, w) + cross(p, v)
return spatial_vector(w, v)
@df.func
def spatial_transform_inverse(t: df.spatial_transform):
p = spatial_transform_get_translation(t)
q = spatial_transform_get_rotation(t)
q_inv = inverse(q)
return spatial_transform(rotate(q_inv, p)*(0.0 - 1.0), q_inv);
# computes adj_t^-T*I*adj_t^-1 (tensor change of coordinates), Frank & Park, section 8.2.3, pg 290
@df.func
def spatial_transform_inertia(t: df.spatial_transform, I: df.spatial_matrix):
t_inv = spatial_transform_inverse(t)
q = spatial_transform_get_rotation(t_inv)
p = spatial_transform_get_translation(t_inv)
r1 = rotate(q, float3(1.0, 0.0, 0.0))
r2 = rotate(q, float3(0.0, 1.0, 0.0))
r3 = rotate(q, float3(0.0, 0.0, 1.0))
R = mat33(r1, r2, r3)
S = mul(skew(p), R)
T = spatial_adjoint(R, S)
return mul(mul(transpose(T), I), T)
@df.kernel
def eval_rigid_contacts_art(
body_X_s: df.tensor(df.spatial_transform),
body_v_s: df.tensor(df.spatial_vector),
contact_body: df.tensor(int),
contact_point: df.tensor(df.float3),
contact_dist: df.tensor(float),
contact_mat: df.tensor(int),
materials: df.tensor(float),
body_f_s: df.tensor(df.spatial_vector)):
tid = df.tid()
c_body = df.load(contact_body, tid)
c_point = df.load(contact_point, tid)
c_dist = df.load(contact_dist, tid)
c_mat = df.load(contact_mat, tid)
# hard coded surface parameter tensor layout (ke, kd, kf, mu)
ke = df.load(materials, c_mat * 4 + 0) # restitution coefficient
kd = df.load(materials, c_mat * 4 + 1) # damping coefficient
kf = df.load(materials, c_mat * 4 + 2) # friction coefficient
mu = df.load(materials, c_mat * 4 + 3) # coulomb friction
X_s = df.load(body_X_s, c_body) # position of colliding body
v_s = df.load(body_v_s, c_body) # orientation of colliding body
n = float3(0.0, 1.0, 0.0)
# transform point to world space
p = df.spatial_transform_point(X_s, c_point) - n * c_dist # add on 'thickness' of shape, e.g.: radius of sphere/capsule
w = df.spatial_top(v_s)
v = df.spatial_bottom(v_s)
# contact point velocity
dpdt = v + df.cross(w, p)
# check ground contact
c = df.dot(n, p) # check if we're inside the ground
if (c >= 0.0):
return
vn = dot(n, dpdt) # velocity component out of the ground
vt = dpdt - n * vn # velocity component not into the ground
fn = c * ke # normal force (restitution coefficient * how far inside for ground)
# contact damping
fd = df.min(vn, 0.0) * kd * df.step(c) * (0.0 - c)
# viscous friction
#ft = vt*kf
# Coulomb friction (box)
lower = mu * (fn + fd) # negative
upper = 0.0 - lower # positive, workaround for no unary ops
vx = df.clamp(dot(float3(kf, 0.0, 0.0), vt), lower, upper)
vz = df.clamp(dot(float3(0.0, 0.0, kf), vt), lower, upper)
# Coulomb friction (smooth, but gradients are numerically unstable around |vt| = 0)
ft = df.normalize(vt)*df.min(kf*df.length(vt), 0.0 - mu*c*ke) * df.step(c)
f_total = n * (fn + fd) + ft
t_total = df.cross(p, f_total)
df.atomic_add(body_f_s, c_body, df.spatial_vector(t_total, f_total))
@df.func
def compute_muscle_force(
i: int,
body_X_s: df.tensor(df.spatial_transform),
body_v_s: df.tensor(df.spatial_vector),
muscle_links: df.tensor(int),
muscle_points: df.tensor(df.float3),
muscle_activation: float,
body_f_s: df.tensor(df.spatial_vector)):
link_0 = df.load(muscle_links, i)
link_1 = df.load(muscle_links, i+1)
if (link_0 == link_1):
return 0
r_0 = df.load(muscle_points, i)
r_1 = df.load(muscle_points, i+1)
xform_0 = df.load(body_X_s, link_0)
xform_1 = df.load(body_X_s, link_1)
pos_0 = df.spatial_transform_point(xform_0, r_0)
pos_1 = df.spatial_transform_point(xform_1, r_1)
n = df.normalize(pos_1 - pos_0)
# todo: add passive elastic and viscosity terms
f = n * muscle_activation
df.atomic_sub(body_f_s, link_0, df.spatial_vector(df.cross(pos_0, f), f))
df.atomic_add(body_f_s, link_1, df.spatial_vector(df.cross(pos_1, f), f))
return 0
@df.kernel
def eval_muscles(
body_X_s: df.tensor(df.spatial_transform),
body_v_s: df.tensor(df.spatial_vector),
muscle_start: df.tensor(int),
muscle_params: df.tensor(float),
muscle_links: df.tensor(int),
muscle_points: df.tensor(df.float3),
muscle_activation: df.tensor(float),
# output
body_f_s: df.tensor(df.spatial_vector)):
tid = df.tid()
m_start = df.load(muscle_start, tid)
m_end = df.load(muscle_start, tid+1) - 1
activation = df.load(muscle_activation, tid)
for i in range(m_start, m_end):
compute_muscle_force(i, body_X_s, body_v_s, muscle_links, muscle_points, activation, body_f_s)
# compute transform across a joint
@df.func
def jcalc_transform(type: int, axis: df.float3, joint_q: df.tensor(float), start: int):
# prismatic
if (type == 0):
q = df.load(joint_q, start)
X_jc = spatial_transform(axis * q, quat_identity())
return X_jc
# revolute
if (type == 1):
q = df.load(joint_q, start)
X_jc = spatial_transform(float3(0.0, 0.0, 0.0), quat_from_axis_angle(axis, q))
return X_jc
# ball
if (type == 2):
qx = df.load(joint_q, start + 0)
qy = df.load(joint_q, start + 1)
qz = df.load(joint_q, start + 2)
qw = df.load(joint_q, start + 3)
X_jc = spatial_transform(float3(0.0, 0.0, 0.0), quat(qx, qy, qz, qw))
return X_jc
# fixed
if (type == 3):
X_jc = spatial_transform_identity()
return X_jc
# free
if (type == 4):
px = df.load(joint_q, start + 0)
py = df.load(joint_q, start + 1)
pz = df.load(joint_q, start + 2)
qx = df.load(joint_q, start + 3)
qy = df.load(joint_q, start + 4)
qz = df.load(joint_q, start + 5)
qw = df.load(joint_q, start + 6)
X_jc = spatial_transform(float3(px, py, pz), quat(qx, qy, qz, qw))
return X_jc
# default case
return spatial_transform_identity()
# compute motion subspace and velocity for a joint
@df.func
def jcalc_motion(type: int, axis: df.float3, X_sc: df.spatial_transform, joint_S_s: df.tensor(df.spatial_vector), joint_qd: df.tensor(float), joint_start: int):
# prismatic
if (type == 0):
S_s = df.spatial_transform_twist(X_sc, spatial_vector(float3(0.0, 0.0, 0.0), axis))
v_j_s = S_s * df.load(joint_qd, joint_start)
df.store(joint_S_s, joint_start, S_s)
return v_j_s
# revolute
if (type == 1):
S_s = df.spatial_transform_twist(X_sc, spatial_vector(axis, float3(0.0, 0.0, 0.0)))
v_j_s = S_s * df.load(joint_qd, joint_start)
df.store(joint_S_s, joint_start, S_s)
return v_j_s
# ball
if (type == 2):
w = float3(df.load(joint_qd, joint_start + 0),
df.load(joint_qd, joint_start + 1),
df.load(joint_qd, joint_start + 2))
S_0 = df.spatial_transform_twist(X_sc, spatial_vector(1.0, 0.0, 0.0, 0.0, 0.0, 0.0))
S_1 = df.spatial_transform_twist(X_sc, spatial_vector(0.0, 1.0, 0.0, 0.0, 0.0, 0.0))
S_2 = df.spatial_transform_twist(X_sc, spatial_vector(0.0, 0.0, 1.0, 0.0, 0.0, 0.0))
# write motion subspace
df.store(joint_S_s, joint_start + 0, S_0)
df.store(joint_S_s, joint_start + 1, S_1)
df.store(joint_S_s, joint_start + 2, S_2)
return S_0*w[0] + S_1*w[1] + S_2*w[2]
# fixed
if (type == 3):
return spatial_vector()
# free
if (type == 4):
v_j_s = spatial_vector(df.load(joint_qd, joint_start + 0),
df.load(joint_qd, joint_start + 1),
df.load(joint_qd, joint_start + 2),
df.load(joint_qd, joint_start + 3),
df.load(joint_qd, joint_start + 4),
df.load(joint_qd, joint_start + 5))
# write motion subspace
df.store(joint_S_s, joint_start + 0, spatial_vector(1.0, 0.0, 0.0, 0.0, 0.0, 0.0))
df.store(joint_S_s, joint_start + 1, spatial_vector(0.0, 1.0, 0.0, 0.0, 0.0, 0.0))
df.store(joint_S_s, joint_start + 2, spatial_vector(0.0, 0.0, 1.0, 0.0, 0.0, 0.0))
df.store(joint_S_s, joint_start + 3, spatial_vector(0.0, 0.0, 0.0, 1.0, 0.0, 0.0))
df.store(joint_S_s, joint_start + 4, spatial_vector(0.0, 0.0, 0.0, 0.0, 1.0, 0.0))
df.store(joint_S_s, joint_start + 5, spatial_vector(0.0, 0.0, 0.0, 0.0, 0.0, 1.0))
return v_j_s
# default case
return spatial_vector()
# # compute the velocity across a joint
# #@df.func
# def jcalc_velocity(self, type, S_s, joint_qd, start):
# # prismatic
# if (type == 0):
# v_j_s = df.load(S_s, start)*df.load(joint_qd, start)
# return v_j_s
# # revolute
# if (type == 1):
# v_j_s = df.load(S_s, start)*df.load(joint_qd, start)
# return v_j_s
# # fixed
# if (type == 2):
# v_j_s = spatial_vector()
# return v_j_s
# # free
# if (type == 3):
# v_j_s = S_s[start+0]*joint_qd[start+0]
# v_j_s += S_s[start+1]*joint_qd[start+1]
# v_j_s += S_s[start+2]*joint_qd[start+2]
# v_j_s += S_s[start+3]*joint_qd[start+3]
# v_j_s += S_s[start+4]*joint_qd[start+4]
# v_j_s += S_s[start+5]*joint_qd[start+5]
# return v_j_s
# computes joint space forces/torques in tau
@df.func
def jcalc_tau(
type: int,
target_k_e: float,
target_k_d: float,
limit_k_e: float,
limit_k_d: float,
joint_S_s: df.tensor(spatial_vector),
joint_q: df.tensor(float),
joint_qd: df.tensor(float),
joint_act: df.tensor(float),
joint_target: df.tensor(float),
joint_limit_lower: df.tensor(float),
joint_limit_upper: df.tensor(float),
coord_start: int,
dof_start: int,
body_f_s: spatial_vector,
tau: df.tensor(float)):
# prismatic / revolute
if (type == 0 or type == 1):
S_s = df.load(joint_S_s, dof_start)
q = df.load(joint_q, coord_start)
qd = df.load(joint_qd, dof_start)
act = df.load(joint_act, dof_start)
target = df.load(joint_target, coord_start)
lower = df.load(joint_limit_lower, coord_start)
upper = df.load(joint_limit_upper, coord_start)
limit_f = 0.0
# compute limit forces, damping only active when limit is violated
if (q < lower):
limit_f = limit_k_e*(lower-q)
if (q > upper):
limit_f = limit_k_e*(upper-q)
damping_f = (0.0 - limit_k_d) * qd
# total torque / force on the joint
t = 0.0 - spatial_dot(S_s, body_f_s) - target_k_e*(q - target) - target_k_d*qd + act + limit_f + damping_f
df.store(tau, dof_start, t)
# ball
if (type == 2):
# elastic term.. this is proportional to the
# imaginary part of the relative quaternion
r_j = float3(df.load(joint_q, coord_start + 0),
df.load(joint_q, coord_start + 1),
df.load(joint_q, coord_start + 2))
# angular velocity for damping
w_j = float3(df.load(joint_qd, dof_start + 0),
df.load(joint_qd, dof_start + 1),
df.load(joint_qd, dof_start + 2))
for i in range(0, 3):
S_s = df.load(joint_S_s, dof_start+i)
w = w_j[i]
r = r_j[i]
df.store(tau, dof_start+i, 0.0 - spatial_dot(S_s, body_f_s) - w*target_k_d - r*target_k_e)
# fixed
# if (type == 3)
# pass
# free
if (type == 4):
for i in range(0, 6):
S_s = df.load(joint_S_s, dof_start+i)
df.store(tau, dof_start+i, 0.0 - spatial_dot(S_s, body_f_s))
return 0
@df.func
def jcalc_integrate(
type: int,
joint_q: df.tensor(float),
joint_qd: df.tensor(float),
joint_qdd: df.tensor(float),
coord_start: int,
dof_start: int,
dt: float,
joint_q_new: df.tensor(float),
joint_qd_new: df.tensor(float)):
# prismatic / revolute
if (type == 0 or type == 1):
qdd = df.load(joint_qdd, dof_start)
qd = df.load(joint_qd, dof_start)
q = df.load(joint_q, coord_start)
qd_new = qd + qdd*dt
q_new = q + qd_new*dt
df.store(joint_qd_new, dof_start, qd_new)
df.store(joint_q_new, coord_start, q_new)
# ball
if (type == 2):
m_j = float3(df.load(joint_qdd, dof_start + 0),
df.load(joint_qdd, dof_start + 1),
df.load(joint_qdd, dof_start + 2))
w_j = float3(df.load(joint_qd, dof_start + 0),
df.load(joint_qd, dof_start + 1),
df.load(joint_qd, dof_start + 2))
r_j = quat(df.load(joint_q, coord_start + 0),
df.load(joint_q, coord_start + 1),
df.load(joint_q, coord_start + 2),
df.load(joint_q, coord_start + 3))
# symplectic Euler
w_j_new = w_j + m_j*dt
drdt_j = mul(quat(w_j_new, 0.0), r_j) * 0.5
# new orientation (normalized)
r_j_new = normalize(r_j + drdt_j * dt)
# update joint coords
df.store(joint_q_new, coord_start + 0, r_j_new[0])
df.store(joint_q_new, coord_start + 1, r_j_new[1])
df.store(joint_q_new, coord_start + 2, r_j_new[2])
df.store(joint_q_new, coord_start + 3, r_j_new[3])
# update joint vel
df.store(joint_qd_new, dof_start + 0, w_j_new[0])
df.store(joint_qd_new, dof_start + 1, w_j_new[1])
df.store(joint_qd_new, dof_start + 2, w_j_new[2])
# fixed joint
#if (type == 3)
# pass
# free joint
if (type == 4):
# dofs: qd = (omega_x, omega_y, omega_z, vel_x, vel_y, vel_z)
# coords: q = (trans_x, trans_y, trans_z, quat_x, quat_y, quat_z, quat_w)
# angular and linear acceleration
m_s = float3(df.load(joint_qdd, dof_start + 0),
df.load(joint_qdd, dof_start + 1),
df.load(joint_qdd, dof_start + 2))
a_s = float3(df.load(joint_qdd, dof_start + 3),
df.load(joint_qdd, dof_start + 4),
df.load(joint_qdd, dof_start + 5))
# angular and linear velocity
w_s = float3(df.load(joint_qd, dof_start + 0),
df.load(joint_qd, dof_start + 1),
df.load(joint_qd, dof_start + 2))
v_s = float3(df.load(joint_qd, dof_start + 3),
df.load(joint_qd, dof_start + 4),
df.load(joint_qd, dof_start + 5))
# symplectic Euler
w_s = w_s + m_s*dt
v_s = v_s + a_s*dt
# translation of origin
p_s = float3(df.load(joint_q, coord_start + 0),
df.load(joint_q, coord_start + 1),
df.load(joint_q, coord_start + 2))
# linear vel of origin (note q/qd switch order of linear angular elements)
# note we are converting the body twist in the space frame (w_s, v_s) to compute center of mass velcity
dpdt_s = v_s + cross(w_s, p_s)
# quat and quat derivative
r_s = quat(df.load(joint_q, coord_start + 3),
df.load(joint_q, coord_start + 4),
df.load(joint_q, coord_start + 5),
df.load(joint_q, coord_start + 6))
drdt_s = mul(quat(w_s, 0.0), r_s) * 0.5
# new orientation (normalized)
p_s_new = p_s + dpdt_s * dt
r_s_new = normalize(r_s + drdt_s * dt)
# update transform
df.store(joint_q_new, coord_start + 0, p_s_new[0])
df.store(joint_q_new, coord_start + 1, p_s_new[1])
df.store(joint_q_new, coord_start + 2, p_s_new[2])
df.store(joint_q_new, coord_start + 3, r_s_new[0])
df.store(joint_q_new, coord_start + 4, r_s_new[1])
df.store(joint_q_new, coord_start + 5, r_s_new[2])
df.store(joint_q_new, coord_start + 6, r_s_new[3])
# update joint_twist
df.store(joint_qd_new, dof_start + 0, w_s[0])
df.store(joint_qd_new, dof_start + 1, w_s[1])
df.store(joint_qd_new, dof_start + 2, w_s[2])
df.store(joint_qd_new, dof_start + 3, v_s[0])
df.store(joint_qd_new, dof_start + 4, v_s[1])
df.store(joint_qd_new, dof_start + 5, v_s[2])
return 0
@df.func
def compute_link_transform(i: int,
joint_type: df.tensor(int),
joint_parent: df.tensor(int),
joint_q_start: df.tensor(int),
joint_qd_start: df.tensor(int),
joint_q: df.tensor(float),
joint_X_pj: df.tensor(df.spatial_transform),
joint_X_cm: df.tensor(df.spatial_transform),
joint_axis: df.tensor(df.float3),
body_X_sc: df.tensor(df.spatial_transform),
body_X_sm: df.tensor(df.spatial_transform)):
# parent transform
parent = load(joint_parent, i)
# parent transform in spatial coordinates
X_sp = spatial_transform_identity()
if (parent >= 0):
X_sp = load(body_X_sc, parent)
type = load(joint_type, i)
axis = load(joint_axis, i)
coord_start = load(joint_q_start, i)
dof_start = load(joint_qd_start, i)
# compute transform across joint
X_jc = jcalc_transform(type, axis, joint_q, coord_start)
X_pj = load(joint_X_pj, i)
X_sc = spatial_transform_multiply(X_sp, spatial_transform_multiply(X_pj, X_jc))
# compute transform of center of mass
X_cm = load(joint_X_cm, i)
X_sm = spatial_transform_multiply(X_sc, X_cm)
# store geometry transforms
store(body_X_sc, i, X_sc)
store(body_X_sm, i, X_sm)
return 0
@df.kernel
def eval_rigid_fk(articulation_start: df.tensor(int),
joint_type: df.tensor(int),
joint_parent: df.tensor(int),
joint_q_start: df.tensor(int),
joint_qd_start: df.tensor(int),
joint_q: df.tensor(float),
joint_X_pj: df.tensor(df.spatial_transform),
joint_X_cm: df.tensor(df.spatial_transform),
joint_axis: df.tensor(df.float3),
body_X_sc: df.tensor(df.spatial_transform),
body_X_sm: df.tensor(df.spatial_transform)):
# one thread per-articulation
index = tid()
start = df.load(articulation_start, index)
end = df.load(articulation_start, index+1)
for i in range(start, end):
compute_link_transform(i,
joint_type,
joint_parent,
joint_q_start,
joint_qd_start,
joint_q,
joint_X_pj,
joint_X_cm,
joint_axis,
body_X_sc,
body_X_sm)
@df.func
def compute_link_velocity(i: int,
joint_type: df.tensor(int),
joint_parent: df.tensor(int),
joint_qd_start: df.tensor(int),
joint_qd: df.tensor(float),
joint_axis: df.tensor(df.float3),
body_I_m: df.tensor(df.spatial_matrix),
body_X_sc: df.tensor(df.spatial_transform),
body_X_sm: df.tensor(df.spatial_transform),
joint_X_pj: df.tensor(df.spatial_transform),
gravity: df.tensor(df.float3),
# outputs
joint_S_s: df.tensor(df.spatial_vector),
body_I_s: df.tensor(df.spatial_matrix),
body_v_s: df.tensor(df.spatial_vector),
body_f_s: df.tensor(df.spatial_vector),
body_a_s: df.tensor(df.spatial_vector)):
type = df.load(joint_type, i)
axis = df.load(joint_axis, i)
parent = df.load(joint_parent, i)
dof_start = df.load(joint_qd_start, i)
X_sc = df.load(body_X_sc, i)
# parent transform in spatial coordinates
X_sp = spatial_transform_identity()
if (parent >= 0):
X_sp = load(body_X_sc, parent)
X_pj = load(joint_X_pj, i)
X_sj = spatial_transform_multiply(X_sp, X_pj)
# compute motion subspace and velocity across the joint (also stores S_s to global memory)
v_j_s = jcalc_motion(type, axis, X_sj, joint_S_s, joint_qd, dof_start)
# parent velocity
v_parent_s = spatial_vector()
a_parent_s = spatial_vector()
if (parent >= 0):
v_parent_s = df.load(body_v_s, parent)
a_parent_s = df.load(body_a_s, parent)
# body velocity, acceleration
v_s = v_parent_s + v_j_s
a_s = a_parent_s + spatial_cross(v_s, v_j_s) # + self.joint_S_s[i]*self.joint_qdd[i]
# compute body forces
X_sm = df.load(body_X_sm, i)
I_m = df.load(body_I_m, i)
# gravity and external forces (expressed in frame aligned with s but centered at body mass)
g = df.load(gravity, 0)
m = I_m[3, 3]
f_g_m = spatial_vector(float3(), g) * m
f_g_s = spatial_transform_wrench(spatial_transform(spatial_transform_get_translation(X_sm), quat_identity()), f_g_m)
#f_ext_s = df.load(body_f_s, i) + f_g_s
# body forces
I_s = spatial_transform_inertia(X_sm, I_m)
f_b_s = df.mul(I_s, a_s) + spatial_cross_dual(v_s, df.mul(I_s, v_s))
df.store(body_v_s, i, v_s)
df.store(body_a_s, i, a_s)
df.store(body_f_s, i, f_b_s - f_g_s)
df.store(body_I_s, i, I_s)
return 0
@df.func
def compute_link_tau(offset: int,
joint_end: int,
joint_type: df.tensor(int),
joint_parent: df.tensor(int),
joint_q_start: df.tensor(int),
joint_qd_start: df.tensor(int),
joint_q: df.tensor(float),
joint_qd: df.tensor(float),
joint_act: df.tensor(float),
joint_target: df.tensor(float),
joint_target_ke: df.tensor(float),
joint_target_kd: df.tensor(float),
joint_limit_lower: df.tensor(float),
joint_limit_upper: df.tensor(float),
joint_limit_ke: df.tensor(float),
joint_limit_kd: df.tensor(float),
joint_S_s: df.tensor(df.spatial_vector),
body_fb_s: df.tensor(df.spatial_vector),
# outputs
body_ft_s: df.tensor(df.spatial_vector),
tau: df.tensor(float)):
# for backwards traversal
i = joint_end-offset-1
type = df.load(joint_type, i)
parent = df.load(joint_parent, i)
dof_start = df.load(joint_qd_start, i)
coord_start = df.load(joint_q_start, i)
target_k_e = df.load(joint_target_ke, i)
target_k_d = df.load(joint_target_kd, i)
limit_k_e = df.load(joint_limit_ke, i)
limit_k_d = df.load(joint_limit_kd, i)
# total forces on body
f_b_s = df.load(body_fb_s, i)
f_t_s = df.load(body_ft_s, i)
f_s = f_b_s + f_t_s
# compute joint-space forces, writes out tau
jcalc_tau(type, target_k_e, target_k_d, limit_k_e, limit_k_d, joint_S_s, joint_q, joint_qd, joint_act, joint_target, joint_limit_lower, joint_limit_upper, coord_start, dof_start, f_s, tau)
# update parent forces, todo: check that this is valid for the backwards pass
if (parent >= 0):
df.atomic_add(body_ft_s, parent, f_s)
return 0
@df.kernel
def eval_rigid_id(articulation_start: df.tensor(int),
joint_type: df.tensor(int),
joint_parent: df.tensor(int),
joint_q_start: df.tensor(int),
joint_qd_start: df.tensor(int),
joint_q: df.tensor(float),
joint_qd: df.tensor(float),
joint_axis: df.tensor(df.float3),
joint_target_ke: df.tensor(float),
joint_target_kd: df.tensor(float),
body_I_m: df.tensor(df.spatial_matrix),
body_X_sc: df.tensor(df.spatial_transform),
body_X_sm: df.tensor(df.spatial_transform),
joint_X_pj: df.tensor(df.spatial_transform),
gravity: df.tensor(df.float3),
# outputs
joint_S_s: df.tensor(df.spatial_vector),
body_I_s: df.tensor(df.spatial_matrix),
body_v_s: df.tensor(df.spatial_vector),
body_f_s: df.tensor(df.spatial_vector),
body_a_s: df.tensor(df.spatial_vector)):
# one thread per-articulation
index = tid()
start = df.load(articulation_start, index)
end = df.load(articulation_start, index+1)
count = end-start
# compute link velocities and coriolis forces
for i in range(start, end):
compute_link_velocity(
i,
joint_type,
joint_parent,
joint_qd_start,
joint_qd,
joint_axis,
body_I_m,
body_X_sc,
body_X_sm,
joint_X_pj,
gravity,
joint_S_s,
body_I_s,
body_v_s,
body_f_s,
body_a_s)
@df.kernel
def eval_rigid_tau(articulation_start: df.tensor(int),
joint_type: df.tensor(int),
joint_parent: df.tensor(int),
joint_q_start: df.tensor(int),
joint_qd_start: df.tensor(int),
joint_q: df.tensor(float),
joint_qd: df.tensor(float),
joint_act: df.tensor(float),
joint_target: df.tensor(float),
joint_target_ke: df.tensor(float),
joint_target_kd: df.tensor(float),
joint_limit_lower: df.tensor(float),
joint_limit_upper: df.tensor(float),
joint_limit_ke: df.tensor(float),
joint_limit_kd: df.tensor(float),
joint_axis: df.tensor(df.float3),
joint_S_s: df.tensor(df.spatial_vector),
body_fb_s: df.tensor(df.spatial_vector),
# outputs
body_ft_s: df.tensor(df.spatial_vector),
tau: df.tensor(float)):
# one thread per-articulation
index = tid()
start = df.load(articulation_start, index)
end = df.load(articulation_start, index+1)
count = end-start
# compute joint forces
for i in range(0, count):
compute_link_tau(
i,
end,
joint_type,
joint_parent,
joint_q_start,
joint_qd_start,
joint_q,
joint_qd,
joint_act,
joint_target,
joint_target_ke,
joint_target_kd,
joint_limit_lower,
joint_limit_upper,
joint_limit_ke,
joint_limit_kd,
joint_S_s,
body_fb_s,
body_ft_s,
tau)
@df.kernel
def eval_rigid_jacobian(
articulation_start: df.tensor(int),
articulation_J_start: df.tensor(int),
joint_parent: df.tensor(int),
joint_qd_start: df.tensor(int),
joint_S_s: df.tensor(spatial_vector),
# outputs
J: df.tensor(float)):
# one thread per-articulation
index = tid()
joint_start = df.load(articulation_start, index)
joint_end = df.load(articulation_start, index+1)
joint_count = joint_end-joint_start
J_offset = df.load(articulation_J_start, index)
# in spatial.h
spatial_jacobian(joint_S_s, joint_parent, joint_qd_start, joint_start, joint_count, J_offset, J)
# @df.kernel
# def eval_rigid_jacobian(
# articulation_start: df.tensor(int),
# articulation_J_start: df.tensor(int),
# joint_parent: df.tensor(int),
# joint_qd_start: df.tensor(int),
# joint_S_s: df.tensor(spatial_vector),
# # outputs
# J: df.tensor(float)):
# # one thread per-articulation
# index = tid()
# joint_start = df.load(articulation_start, index)
# joint_end = df.load(articulation_start, index+1)
# joint_count = joint_end-joint_start
# dof_start = df.load(joint_qd_start, joint_start)
# dof_end = df.load(joint_qd_start, joint_end)
# dof_count = dof_end-dof_start
# #(const spatial_vector* S, const int* joint_parents, const int* joint_qd_start, int num_links, int num_dofs, float* J)
# spatial_jacobian(joint_S_s, joint_parent, joint_qd_start, joint_count, dof_count, J)
@df.kernel
def eval_rigid_mass(
articulation_start: df.tensor(int),
articulation_M_start: df.tensor(int),
body_I_s: df.tensor(spatial_matrix),
# outputs
M: df.tensor(float)):
# one thread per-articulation
index = tid()
joint_start = df.load(articulation_start, index)
joint_end = df.load(articulation_start, index+1)
joint_count = joint_end-joint_start
M_offset = df.load(articulation_M_start, index)
# in spatial.h
spatial_mass(body_I_s, joint_start, joint_count, M_offset, M)
@df.kernel
def eval_dense_gemm(m: int, n: int, p: int, t1: int, t2: int, A: df.tensor(float), B: df.tensor(float), C: df.tensor(float)):
dense_gemm(m, n, p, t1, t2, A, B, C)
@df.kernel
def eval_dense_gemm_batched(m: df.tensor(int), n: df.tensor(int), p: df.tensor(int), t1: int, t2: int, A_start: df.tensor(int), B_start: df.tensor(int), C_start: df.tensor(int), A: df.tensor(float), B: df.tensor(float), C: df.tensor(float)):
dense_gemm_batched(m, n, p, t1, t2, A_start, B_start, C_start, A, B, C)
@df.kernel
def eval_dense_cholesky(n: int, A: df.tensor(float), regularization: df.tensor(float), L: df.tensor(float)):
dense_chol(n, A, regularization, L)
@df.kernel
def eval_dense_cholesky_batched(A_start: df.tensor(int), A_dim: df.tensor(int), A: df.tensor(float), regularization: df.tensor(float), L: df.tensor(float)):
dense_chol_batched(A_start, A_dim, A, regularization, L)
@df.kernel
def eval_dense_subs(n: int, L: df.tensor(float), b: df.tensor(float), x: df.tensor(float)):
dense_subs(n, L, b, x)
# helper that propagates gradients back to A, treating L as a constant / temporary variable
# allows us to reuse the Cholesky decomposition from the forward pass
@df.kernel
def eval_dense_solve(n: int, A: df.tensor(float), L: df.tensor(float), b: df.tensor(float), tmp: df.tensor(float), x: df.tensor(float)):
dense_solve(n, A, L, b, tmp, x)
# helper that propagates gradients back to A, treating L as a constant / temporary variable
# allows us to reuse the Cholesky decomposition from the forward pass
@df.kernel
def eval_dense_solve_batched(b_start: df.tensor(int), A_start: df.tensor(int), A_dim: df.tensor(int), A: df.tensor(float), L: df.tensor(float), b: df.tensor(float), tmp: df.tensor(float), x: df.tensor(float)):
dense_solve_batched(b_start, A_start, A_dim, A, L, b, tmp, x)
@df.kernel
def eval_rigid_integrate(
joint_type: df.tensor(int),
joint_q_start: df.tensor(int),
joint_qd_start: df.tensor(int),
joint_q: df.tensor(float),
joint_qd: df.tensor(float),
joint_qdd: df.tensor(float),
dt: float,
# outputs
joint_q_new: df.tensor(float),
joint_qd_new: df.tensor(float)):
# one thread per-articulation
index = tid()
type = df.load(joint_type, index)
coord_start = df.load(joint_q_start, index)
dof_start = df.load(joint_qd_start, index)
jcalc_integrate(
type,
joint_q,
joint_qd,
joint_qdd,
coord_start,
dof_start,
dt,
joint_q_new,
joint_qd_new)
g_state_out = None
# define PyTorch autograd op to wrap simulate func
class SimulateFunc(torch.autograd.Function):
"""PyTorch autograd function representing a simulation stpe
Note:
This node will be inserted into the computation graph whenever
`forward()` is called on an integrator object. It should not be called
directly by the user.
"""
@staticmethod
def forward(ctx, integrator, model, state_in, dt, substeps, mass_matrix_freq, *tensors):
# record launches
ctx.tape = df.Tape()
ctx.inputs = tensors
#ctx.outputs = df.to_weak_list(state_out.flatten())
actuation = state_in.joint_act
# simulate
for i in range(substeps):
# ensure actuation is set on all substeps
state_in.joint_act = actuation
state_out = model.state()
integrator._simulate(ctx.tape, model, state_in, state_out, dt/float(substeps), update_mass_matrix=((i%mass_matrix_freq)==0))
# swap states
state_in = state_out
# use global to pass state object back to caller
global g_state_out
g_state_out = state_out
ctx.outputs = df.to_weak_list(state_out.flatten())
return tuple(state_out.flatten())
@staticmethod
def backward(ctx, *grads):
# ensure grads are contiguous in memory
adj_outputs = df.make_contiguous(grads)
# register outputs with tape
outputs = df.to_strong_list(ctx.outputs)
for o in range(len(outputs)):
ctx.tape.adjoints[outputs[o]] = adj_outputs[o]
# replay launches backwards
ctx.tape.replay()
# find adjoint of inputs
adj_inputs = []
for i in ctx.inputs:
if i in ctx.tape.adjoints:
adj_inputs.append(ctx.tape.adjoints[i])
else:
adj_inputs.append(None)
# free the tape
ctx.tape.reset()
# filter grads to replace empty tensors / no grad / constant params with None
return (None, None, None, None, None, None, *df.filter_grads(adj_inputs))
class SemiImplicitIntegrator:
"""A semi-implicit integrator using symplectic Euler
After constructing `Model` and `State` objects this time-integrator
may be used to advance the simulation state forward in time.
Semi-implicit time integration is a variational integrator that
preserves energy, however it not unconditionally stable, and requires a time-step
small enough to support the required stiffness and damping forces.
See: https://en.wikipedia.org/wiki/Semi-implicit_Euler_method
Example:
>>> integrator = df.SemiImplicitIntegrator()
>>>
>>> # simulation loop
>>> for i in range(100):
>>> state = integrator.forward(model, state, dt)
"""
def __init__(self):
pass
def forward(self, model: Model, state_in: State, dt: float, substeps: int, mass_matrix_freq: int) -> State:
"""Performs a single integration step forward in time
This method inserts a node into the PyTorch computational graph with
references to all model and state tensors such that gradients
can be propagrated back through the simulation step.
Args:
model: Simulation model
state: Simulation state at the start the time-step
dt: The simulation time-step (usually in seconds)
Returns:
The state of the system at the end of the time-step
"""
if dflex.config.no_grad:
# if no gradient required then do inplace update
for i in range(substeps):
self._simulate(df.Tape(), model, state_in, state_in, dt/float(substeps), update_mass_matrix=(i%mass_matrix_freq)==0)
return state_in
else:
# get list of inputs and outputs for PyTorch tensor tracking
inputs = [*state_in.flatten(), *model.flatten()]
# run sim as a PyTorch op
tensors = SimulateFunc.apply(self, model, state_in, dt, substeps, mass_matrix_freq, *inputs)
global g_state_out
state_out = g_state_out
g_state_out = None # null reference
return state_out
def _simulate(self, tape, model, state_in, state_out, dt, update_mass_matrix=True):
with dflex.util.ScopedTimer("simulate", False):
# alloc particle force buffer
if (model.particle_count):
state_out.particle_f.zero_()
if (model.link_count):
state_out.body_ft_s = torch.zeros((model.link_count, 6), dtype=torch.float32, device=model.adapter, requires_grad=True)
state_out.body_f_ext_s = torch.zeros((model.link_count, 6), dtype=torch.float32, device=model.adapter, requires_grad=True)
# damped springs
if (model.spring_count):
tape.launch(func=eval_springs,
dim=model.spring_count,
inputs=[state_in.particle_q, state_in.particle_qd, model.spring_indices, model.spring_rest_length, model.spring_stiffness, model.spring_damping],
outputs=[state_out.particle_f],
adapter=model.adapter)
# triangle elastic and lift/drag forces
if (model.tri_count and model.tri_ke > 0.0):
tape.launch(func=eval_triangles,
dim=model.tri_count,
inputs=[
state_in.particle_q,
state_in.particle_qd,
model.tri_indices,
model.tri_poses,
model.tri_activations,
model.tri_ke,
model.tri_ka,
model.tri_kd,
model.tri_drag,
model.tri_lift
],
outputs=[state_out.particle_f],
adapter=model.adapter)
# triangle/triangle contacts
if (model.enable_tri_collisions and model.tri_count and model.tri_ke > 0.0):
tape.launch(func=eval_triangles_contact,
dim=model.tri_count * model.particle_count,
inputs=[
model.particle_count,
state_in.particle_q,
state_in.particle_qd,
model.tri_indices,
model.tri_poses,
model.tri_activations,
model.tri_ke,
model.tri_ka,
model.tri_kd,
model.tri_drag,
model.tri_lift
],
outputs=[state_out.particle_f],
adapter=model.adapter)
# triangle bending
if (model.edge_count):
tape.launch(func=eval_bending,
dim=model.edge_count,
inputs=[state_in.particle_q, state_in.particle_qd, model.edge_indices, model.edge_rest_angle, model.edge_ke, model.edge_kd],
outputs=[state_out.particle_f],
adapter=model.adapter)
# particle ground contact
if (model.ground and model.particle_count):
tape.launch(func=eval_contacts,
dim=model.particle_count,
inputs=[state_in.particle_q, state_in.particle_qd, model.contact_ke, model.contact_kd, model.contact_kf, model.contact_mu],
outputs=[state_out.particle_f],
adapter=model.adapter)
# tetrahedral FEM
if (model.tet_count):
tape.launch(func=eval_tetrahedra,
dim=model.tet_count,
inputs=[state_in.particle_q, state_in.particle_qd, model.tet_indices, model.tet_poses, model.tet_activations, model.tet_materials],
outputs=[state_out.particle_f],
adapter=model.adapter)
#----------------------------
# articulations
if (model.link_count):
# evaluate body transforms
tape.launch(
func=eval_rigid_fk,
dim=model.articulation_count,
inputs=[
model.articulation_joint_start,
model.joint_type,
model.joint_parent,
model.joint_q_start,
model.joint_qd_start,
state_in.joint_q,
model.joint_X_pj,
model.joint_X_cm,
model.joint_axis
],
outputs=[
state_out.body_X_sc,
state_out.body_X_sm
],
adapter=model.adapter,
preserve_output=True)
# evaluate joint inertias, motion vectors, and forces
tape.launch(
func=eval_rigid_id,
dim=model.articulation_count,
inputs=[
model.articulation_joint_start,
model.joint_type,
model.joint_parent,
model.joint_q_start,
model.joint_qd_start,
state_in.joint_q,
state_in.joint_qd,
model.joint_axis,
model.joint_target_ke,
model.joint_target_kd,
model.body_I_m,
state_out.body_X_sc,
state_out.body_X_sm,
model.joint_X_pj,
model.gravity
],
outputs=[
state_out.joint_S_s,
state_out.body_I_s,
state_out.body_v_s,
state_out.body_f_s,
state_out.body_a_s,
],
adapter=model.adapter,
preserve_output=True)
if (model.ground and model.contact_count > 0):
# evaluate contact forces
tape.launch(
func=eval_rigid_contacts_art,
dim=model.contact_count,
inputs=[
state_out.body_X_sc,
state_out.body_v_s,
model.contact_body0,
model.contact_point0,
model.contact_dist,
model.contact_material,
model.shape_materials
],
outputs=[
state_out.body_f_s
],
adapter=model.adapter,
preserve_output=True)
# particle shape contact
if (model.particle_count):
# tape.launch(func=eval_soft_contacts,
# dim=model.particle_count*model.shape_count,
# inputs=[state_in.particle_q, state_in.particle_qd, model.contact_ke, model.contact_kd, model.contact_kf, model.contact_mu],
# outputs=[state_out.particle_f],
# adapter=model.adapter)
tape.launch(func=eval_soft_contacts,
dim=model.particle_count*model.shape_count,
inputs=[
model.particle_count,
state_in.particle_q,
state_in.particle_qd,
state_in.body_X_sc,
state_in.body_v_s,
model.shape_transform,
model.shape_body,
model.shape_geo_type,
torch.Tensor(),
model.shape_geo_scale,
model.shape_materials,
model.contact_ke,
model.contact_kd,
model.contact_kf,
model.contact_mu],
# outputs
outputs=[
state_out.particle_f,
state_out.body_f_s],
adapter=model.adapter)
# evaluate muscle actuation
tape.launch(
func=eval_muscles,
dim=model.muscle_count,
inputs=[
state_out.body_X_sc,
state_out.body_v_s,
model.muscle_start,
model.muscle_params,
model.muscle_links,
model.muscle_points,
model.muscle_activation
],
outputs=[
state_out.body_f_s
],
adapter=model.adapter,
preserve_output=True)
# evaluate joint torques
tape.launch(
func=eval_rigid_tau,
dim=model.articulation_count,
inputs=[
model.articulation_joint_start,
model.joint_type,
model.joint_parent,
model.joint_q_start,
model.joint_qd_start,
state_in.joint_q,
state_in.joint_qd,
state_in.joint_act,
model.joint_target,
model.joint_target_ke,
model.joint_target_kd,
model.joint_limit_lower,
model.joint_limit_upper,
model.joint_limit_ke,
model.joint_limit_kd,
model.joint_axis,
state_out.joint_S_s,
state_out.body_f_s
],
outputs=[
state_out.body_ft_s,
state_out.joint_tau
],
adapter=model.adapter,
preserve_output=True)
if (update_mass_matrix):
model.alloc_mass_matrix()
# build J
tape.launch(
func=eval_rigid_jacobian,
dim=model.articulation_count,
inputs=[
# inputs
model.articulation_joint_start,
model.articulation_J_start,
model.joint_parent,
model.joint_qd_start,
state_out.joint_S_s
],
outputs=[
model.J
],
adapter=model.adapter,
preserve_output=True)
# build M
tape.launch(
func=eval_rigid_mass,
dim=model.articulation_count,
inputs=[
# inputs
model.articulation_joint_start,
model.articulation_M_start,
state_out.body_I_s
],
outputs=[
model.M
],
adapter=model.adapter,
preserve_output=True)
# form P = M*J
df.matmul_batched(
tape,
model.articulation_count,
model.articulation_M_rows,
model.articulation_J_cols,
model.articulation_J_rows,
0,
0,
model.articulation_M_start,
model.articulation_J_start,
model.articulation_J_start, # P start is the same as J start since it has the same dims as J
model.M,
model.J,
model.P,
adapter=model.adapter)
# form H = J^T*P
df.matmul_batched(
tape,
model.articulation_count,
model.articulation_J_cols,
model.articulation_J_cols,
model.articulation_J_rows, # P rows is the same as J rows
1,
0,
model.articulation_J_start,
model.articulation_J_start, # P start is the same as J start since it has the same dims as J
model.articulation_H_start,
model.J,
model.P,
model.H,
adapter=model.adapter)
# compute decomposition
tape.launch(
func=eval_dense_cholesky_batched,
dim=model.articulation_count,
inputs=[
model.articulation_H_start,
model.articulation_H_rows,
model.H,
model.joint_armature
],
outputs=[
model.L
],
adapter=model.adapter,
skip_check_grad=True)
tmp = torch.zeros_like(state_out.joint_tau)
# solve for qdd
tape.launch(
func=eval_dense_solve_batched,
dim=model.articulation_count,
inputs=[
model.articulation_dof_start,
model.articulation_H_start,
model.articulation_H_rows,
model.H,
model.L,
state_out.joint_tau,
tmp
],
outputs=[
state_out.joint_qdd
],
adapter=model.adapter,
skip_check_grad=True)
# integrate joint dofs -> joint coords
tape.launch(
func=eval_rigid_integrate,
dim=model.link_count,
inputs=[
model.joint_type,
model.joint_q_start,
model.joint_qd_start,
state_in.joint_q,
state_in.joint_qd,
state_out.joint_qdd,
dt
],
outputs=[
state_out.joint_q,
state_out.joint_qd
],
adapter=model.adapter)
#----------------------------
# integrate particles
if (model.particle_count):
tape.launch(func=integrate_particles,
dim=model.particle_count,
inputs=[state_in.particle_q, state_in.particle_qd, state_out.particle_f, model.particle_inv_mass, model.gravity, dt],
outputs=[state_out.particle_q, state_out.particle_qd],
adapter=model.adapter)
return state_out
@df.kernel
def solve_springs(x: df.tensor(df.float3),
v: df.tensor(df.float3),
invmass: df.tensor(float),
spring_indices: df.tensor(int),
spring_rest_lengths: df.tensor(float),
spring_stiffness: df.tensor(float),
spring_damping: df.tensor(float),
dt: float,
delta: df.tensor(df.float3)):
tid = df.tid()
i = df.load(spring_indices, tid * 2 + 0)
j = df.load(spring_indices, tid * 2 + 1)
ke = df.load(spring_stiffness, tid)
kd = df.load(spring_damping, tid)
rest = df.load(spring_rest_lengths, tid)
xi = df.load(x, i)
xj = df.load(x, j)
vi = df.load(v, i)
vj = df.load(v, j)
xij = xi - xj
vij = vi - vj
l = length(xij)
l_inv = 1.0 / l
# normalized spring direction
dir = xij * l_inv
c = l - rest
dcdt = dot(dir, vij)
# damping based on relative velocity.
#fs = dir * (ke * c + kd * dcdt)
wi = df.load(invmass, i)
wj = df.load(invmass, j)
denom = wi + wj
alpha = 1.0/(ke*dt*dt)
multiplier = c / (denom)# + alpha)
xd = dir*multiplier
df.atomic_sub(delta, i, xd*wi)
df.atomic_add(delta, j, xd*wj)
@df.kernel
def solve_tetrahedra(x: df.tensor(df.float3),
v: df.tensor(df.float3),
inv_mass: df.tensor(float),
indices: df.tensor(int),
pose: df.tensor(df.mat33),
activation: df.tensor(float),
materials: df.tensor(float),
dt: float,
relaxation: float,
delta: df.tensor(df.float3)):
tid = df.tid()
i = df.load(indices, tid * 4 + 0)
j = df.load(indices, tid * 4 + 1)
k = df.load(indices, tid * 4 + 2)
l = df.load(indices, tid * 4 + 3)
act = df.load(activation, tid)
k_mu = df.load(materials, tid * 3 + 0)
k_lambda = df.load(materials, tid * 3 + 1)
k_damp = df.load(materials, tid * 3 + 2)
x0 = df.load(x, i)
x1 = df.load(x, j)
x2 = df.load(x, k)
x3 = df.load(x, l)
v0 = df.load(v, i)
v1 = df.load(v, j)
v2 = df.load(v, k)
v3 = df.load(v, l)
w0 = df.load(inv_mass, i)
w1 = df.load(inv_mass, j)
w2 = df.load(inv_mass, k)
w3 = df.load(inv_mass, l)
x10 = x1 - x0
x20 = x2 - x0
x30 = x3 - x0
v10 = v1 - v0
v20 = v2 - v0
v30 = v3 - v0
Ds = df.mat33(x10, x20, x30)
Dm = df.load(pose, tid)
inv_rest_volume = df.determinant(Dm) * 6.0
rest_volume = 1.0 / inv_rest_volume
# F = Xs*Xm^-1
F = Ds * Dm
f1 = df.float3(F[0, 0], F[1, 0], F[2, 0])
f2 = df.float3(F[0, 1], F[1, 1], F[2, 1])
f3 = df.float3(F[0, 2], F[1, 2], F[2, 2])
# C_sqrt
tr = dot(f1, f1) + dot(f2, f2) + dot(f3, f3)
r_s = df.sqrt(abs(tr - 3.0))
C = r_s
if (r_s == 0.0):
return
if (tr < 3.0):
r_s = 0.0 - r_s
dCdx = F*df.transpose(Dm)*(1.0/r_s)
alpha = 1.0 + k_mu / k_lambda
# C_Neo
# r_s = df.sqrt(dot(f1, f1) + dot(f2, f2) + dot(f3, f3))
# r_s_inv = 1.0/r_s
# C = r_s
# dCdx = F*df.transpose(Dm)*r_s_inv
# alpha = 1.0 + k_mu / k_lambda
# C_Spherical
# r_s = df.sqrt(dot(f1, f1) + dot(f2, f2) + dot(f3, f3))
# r_s_inv = 1.0/r_s
# C = r_s - df.sqrt(3.0)
# dCdx = F*df.transpose(Dm)*r_s_inv
# alpha = 1.0
# C_D
#r_s = df.sqrt(dot(f1, f1) + dot(f2, f2) + dot(f3, f3))
#C = r_s*r_s - 3.0
#dCdx = F*df.transpose(Dm)*2.0
#alpha = 1.0
grad1 = float3(dCdx[0,0], dCdx[1,0], dCdx[2,0])
grad2 = float3(dCdx[0,1], dCdx[1,1], dCdx[2,1])
grad3 = float3(dCdx[0,2], dCdx[1,2], dCdx[2,2])
grad0 = (grad1 + grad2 + grad3)*(0.0 - 1.0)
denom = dot(grad0,grad0)*w0 + dot(grad1,grad1)*w1 + dot(grad2,grad2)*w2 + dot(grad3,grad3)*w3
multiplier = C/(denom + 1.0/(k_mu*dt*dt*rest_volume))
delta0 = grad0*multiplier
delta1 = grad1*multiplier
delta2 = grad2*multiplier
delta3 = grad3*multiplier
# hydrostatic part
J = df.determinant(F)
C_vol = J - alpha
# dCdx = df.mat33(cross(f2, f3), cross(f3, f1), cross(f1, f2))*df.transpose(Dm)
# grad1 = float3(dCdx[0,0], dCdx[1,0], dCdx[2,0])
# grad2 = float3(dCdx[0,1], dCdx[1,1], dCdx[2,1])
# grad3 = float3(dCdx[0,2], dCdx[1,2], dCdx[2,2])
# grad0 = (grad1 + grad2 + grad3)*(0.0 - 1.0)
s = inv_rest_volume / 6.0
grad1 = df.cross(x20, x30) * s
grad2 = df.cross(x30, x10) * s
grad3 = df.cross(x10, x20) * s
grad0 = (grad1 + grad2 + grad3)*(0.0 - 1.0)
denom = dot(grad0, grad0)*w0 + dot(grad1, grad1)*w1 + dot(grad2, grad2)*w2 + dot(grad3, grad3)*w3
multiplier = C_vol/(denom + 1.0/(k_lambda*dt*dt*rest_volume))
delta0 = delta0 + grad0 * multiplier
delta1 = delta1 + grad1 * multiplier
delta2 = delta2 + grad2 * multiplier
delta3 = delta3 + grad3 * multiplier
# apply forces
df.atomic_sub(delta, i, delta0*w0*relaxation)
df.atomic_sub(delta, j, delta1*w1*relaxation)
df.atomic_sub(delta, k, delta2*w2*relaxation)
df.atomic_sub(delta, l, delta3*w3*relaxation)
@df.kernel
def solve_contacts(
x: df.tensor(df.float3),
v: df.tensor(df.float3),
inv_mass: df.tensor(float),
mu: float,
dt: float,
delta: df.tensor(df.float3)):
tid = df.tid()
x0 = df.load(x, tid)
v0 = df.load(v, tid)
w0 = df.load(inv_mass, tid)
n = df.float3(0.0, 1.0, 0.0)
c = df.dot(n, x0) - 0.01
if (c > 0.0):
return
# normal
lambda_n = c
delta_n = n*lambda_n
# friction
vn = df.dot(n, v0)
vt = v0 - n * vn
lambda_f = df.max(mu*lambda_n, 0.0 - df.length(vt)*dt)
delta_f = df.normalize(vt)*lambda_f
df.atomic_add(delta, tid, delta_f - delta_n)
@df.kernel
def apply_deltas(x_orig: df.tensor(df.float3),
v_orig: df.tensor(df.float3),
x_pred: df.tensor(df.float3),
delta: df.tensor(df.float3),
dt: float,
x_out: df.tensor(df.float3),
v_out: df.tensor(df.float3)):
tid = df.tid()
x0 = df.load(x_orig, tid)
xp = df.load(x_pred, tid)
# constraint deltas
d = df.load(delta, tid)
x_new = xp + d
v_new = (x_new - x0)/dt
df.store(x_out, tid, x_new)
df.store(v_out, tid, v_new)
class XPBDIntegrator:
"""A implicit integrator using XPBD
After constructing `Model` and `State` objects this time-integrator
may be used to advance the simulation state forward in time.
Semi-implicit time integration is a variational integrator that
preserves energy, however it not unconditionally stable, and requires a time-step
small enough to support the required stiffness and damping forces.
See: https://en.wikipedia.org/wiki/Semi-implicit_Euler_method
Example:
>>> integrator = df.SemiImplicitIntegrator()
>>>
>>> # simulation loop
>>> for i in range(100):
>>> state = integrator.forward(model, state, dt)
"""
def __init__(self):
pass
def forward(self, model: Model, state_in: State, dt: float) -> State:
"""Performs a single integration step forward in time
This method inserts a node into the PyTorch computational graph with
references to all model and state tensors such that gradients
can be propagrated back through the simulation step.
Args:
model: Simulation model
state: Simulation state at the start the time-step
dt: The simulation time-step (usually in seconds)
Returns:
The state of the system at the end of the time-step
"""
if dflex.config.no_grad:
# if no gradient required then do inplace update
self._simulate(df.Tape(), model, state_in, state_in, dt)
return state_in
else:
# get list of inputs and outputs for PyTorch tensor tracking
inputs = [*state_in.flatten(), *model.flatten()]
# allocate new output
state_out = model.state()
# run sim as a PyTorch op
tensors = SimulateFunc.apply(self, model, state_in, state_out, dt, *inputs)
return state_out
def _simulate(self, tape, model, state_in, state_out, dt):
with dflex.util.ScopedTimer("simulate", False):
# alloc particle force buffer
if (model.particle_count):
state_out.particle_f.zero_()
q_pred = torch.zeros_like(state_in.particle_q)
qd_pred = torch.zeros_like(state_in.particle_qd)
#----------------------------
# integrate particles
if (model.particle_count):
tape.launch(func=integrate_particles,
dim=model.particle_count,
inputs=[state_in.particle_q, state_in.particle_qd, state_out.particle_f, model.particle_inv_mass, model.gravity, dt],
outputs=[q_pred, qd_pred],
adapter=model.adapter)
# contacts
if (model.particle_count and model.ground):
tape.launch(func=solve_contacts,
dim=model.particle_count,
inputs=[q_pred, qd_pred, model.particle_inv_mass, model.contact_mu, dt],
outputs=[state_out.particle_f],
adapter=model.adapter)
# damped springs
if (model.spring_count):
tape.launch(func=solve_springs,
dim=model.spring_count,
inputs=[q_pred, qd_pred, model.particle_inv_mass, model.spring_indices, model.spring_rest_length, model.spring_stiffness, model.spring_damping, dt],
outputs=[state_out.particle_f],
adapter=model.adapter)
# tetrahedral FEM
if (model.tet_count):
tape.launch(func=solve_tetrahedra,
dim=model.tet_count,
inputs=[q_pred, qd_pred, model.particle_inv_mass, model.tet_indices, model.tet_poses, model.tet_activations, model.tet_materials, dt, model.relaxation],
outputs=[state_out.particle_f],
adapter=model.adapter)
# apply updates
tape.launch(func=apply_deltas,
dim=model.particle_count,
inputs=[state_in.particle_q,
state_in.particle_qd,
q_pred,
state_out.particle_f,
dt],
outputs=[state_out.particle_q,
state_out.particle_qd],
adapter=model.adapter)
return state_out
| 97,130 | Python | 31.333888 | 241 | 0.51253 |
vstrozzi/FRL-SHAC-Extension/dflex/dflex/matnn.h | #pragma once
CUDA_CALLABLE inline int dense_index(int stride, int i, int j)
{
return i*stride + j;
}
template <bool transpose>
CUDA_CALLABLE inline int dense_index(int rows, int cols, int i, int j)
{
if (transpose)
return j*rows + i;
else
return i*cols + j;
}
#ifdef CPU
const int kNumThreadsPerBlock = 1;
template <bool t1, bool t2, bool add>
CUDA_CALLABLE inline void dense_gemm_impl(int m, int n, int p, const float* __restrict__ A, const float* __restrict__ B, float* __restrict__ C)
{
for (int i=0; i < m; i++)
{
for (int j=0; j < n; ++j)
{
float sum = 0.0f;
for (int k=0; k < p; ++k)
{
sum += A[dense_index<t1>(m, p, i, k)]*B[dense_index<t2>(p, n, k, j)];
}
if (add)
C[i*n + j] += sum;
else
C[i*n + j] = sum;
}
}
}
#else
const int kNumThreadsPerBlock = 256;
template <bool t1, bool t2, bool add>
CUDA_CALLABLE inline void dense_gemm_impl(int m, int n, int p, const float* __restrict__ A, const float* __restrict__ B, float* __restrict__ C)
{
// each thread in the block calculates an output (or more if output dim > block dim)
for (int e=threadIdx.x; e < m*n; e += blockDim.x)
{
const int i=e/n;
const int j=e%n;
float sum = 0.0f;
for (int k=0; k < p; ++k)
{
sum += A[dense_index<t1>(m, p, i, k)]*B[dense_index<t2>(p, n, k, j)];
}
if (add)
C[i*n + j] += sum;
else
C[i*n + j] = sum;
}
}
#endif
template <bool add=false>
CUDA_CALLABLE inline void dense_gemm(int m, int n, int p, int t1, int t2, const float* __restrict__ A, const float* __restrict__ B, float* __restrict__ C)
{
if (t1 == 0 && t2 == 0)
dense_gemm_impl<false, false, add>(m, n, p, A, B, C);
else if (t1 == 1 && t2 == 0)
dense_gemm_impl<true, false, add>(m, n, p, A, B, C);
else if (t1 == 0 && t2 == 1)
dense_gemm_impl<false, true, add>(m, n, p, A, B, C);
else if (t1 == 1 && t2 == 1)
dense_gemm_impl<true, true, add>(m, n, p, A, B, C);
}
template <bool add=false>
CUDA_CALLABLE inline void dense_gemm_batched(
const int* __restrict__ m, const int* __restrict__ n, const int* __restrict__ p, int t1, int t2,
const int* __restrict__ A_start, const int* __restrict__ B_start, const int* __restrict__ C_start,
const float* __restrict__ A, const float* __restrict__ B, float* __restrict__ C)
{
// on the CPU each thread computes the whole matrix multiply
// on the GPU each block computes the multiply with one output per-thread
const int batch = tid()/kNumThreadsPerBlock;
dense_gemm<add>(m[batch], n[batch], p[batch], t1, t2, A+A_start[batch], B+B_start[batch], C+C_start[batch]);
}
// computes c = b^T*a*b, with a and b being stored in row-major layout
CUDA_CALLABLE inline void dense_quadratic()
{
}
// CUDA_CALLABLE inline void dense_chol(int n, const float* A, float* L)
// {
// // for each column
// for (int j=0; j < n; ++j)
// {
// for (int i=j; i < n; ++i)
// {
// L[dense_index(n, i, j)] = A[dense_index(n, i, j)];
// }
// for (int k = 0; k < j; ++k)
// {
// const float p = L[dense_index(n, j, k)];
// for (int i=j; i < n; ++i)
// {
// L[dense_index(n, i, j)] -= p*L[dense_index(n, i, k)];
// }
// }
// // scale
// const float d = L[dense_index(n, j, j)];
// const float s = 1.0f/sqrtf(d);
// for (int i=j; i < n; ++i)
// {
// L[dense_index(n, i, j)] *=s;
// }
// }
// }
void CUDA_CALLABLE inline dense_chol(int n, const float* __restrict__ A, const float* __restrict__ regularization, float* __restrict__ L)
{
for (int j=0; j < n; ++j)
{
float s = A[dense_index(n, j, j)] + regularization[j];
for (int k=0; k < j; ++k)
{
float r = L[dense_index(n, j, k)];
s -= r*r;
}
s = sqrtf(s);
const float invS = 1.0f/s;
L[dense_index(n, j, j)] = s;
for (int i=j+1; i < n; ++i)
{
s = A[dense_index(n, i, j)];
for (int k=0; k < j; ++k)
{
s -= L[dense_index(n, i, k)]*L[dense_index(n, j, k)];
}
L[dense_index(n, i, j)] = s*invS;
}
}
}
void CUDA_CALLABLE inline dense_chol_batched(const int* __restrict__ A_start, const int* __restrict__ A_dim, const float* __restrict__ A, const float* __restrict__ regularization, float* __restrict__ L)
{
const int batch = tid();
const int n = A_dim[batch];
const int offset = A_start[batch];
dense_chol(n, A + offset, regularization + n*batch, L + offset);
}
// Solves (L*L^T)x = b given the Cholesky factor L
CUDA_CALLABLE inline void dense_subs(int n, const float* __restrict__ L, const float* __restrict__ b, float* __restrict__ x)
{
// forward substitution
for (int i=0; i < n; ++i)
{
float s = b[i];
for (int j=0; j < i; ++j)
{
s -= L[dense_index(n, i, j)]*x[j];
}
x[i] = s/L[dense_index(n, i, i)];
}
// backward substitution
for (int i=n-1; i >= 0; --i)
{
float s = x[i];
for (int j=i+1; j < n; ++j)
{
s -= L[dense_index(n, j, i)]*x[j];
}
x[i] = s/L[dense_index(n, i, i)];
}
}
CUDA_CALLABLE inline void dense_solve(int n, const float* __restrict__ A, const float* __restrict__ L, const float* __restrict__ b, float* __restrict__ tmp, float* __restrict__ x)
{
dense_subs(n, L, b, x);
}
CUDA_CALLABLE inline void dense_solve_batched(
const int* __restrict__ b_start, const int* A_start, const int* A_dim,
const float* __restrict__ A, const float* __restrict__ L,
const float* __restrict__ b, float* __restrict__ tmp, float* __restrict__ x)
{
const int batch = tid();
dense_solve(A_dim[batch], A + A_start[batch], L + A_start[batch], b + b_start[batch], NULL, x + b_start[batch]);
}
CUDA_CALLABLE inline void print_matrix(const char* name, int m, int n, const float* data)
{
printf("%s = [", name);
for (int i=0; i < m; ++i)
{
for (int j=0; j < n; ++j)
{
printf("%f ", data[dense_index(n, i, j)]);
}
printf(";\n");
}
printf("]\n");
}
// adjoint methods
CUDA_CALLABLE inline void adj_dense_gemm(
int m, int n, int p, int t1, int t2, const float* A, const float* B, float* C,
int adj_m, int adj_n, int adj_p, int adj_t1, int adj_t2, float* adj_A, float* adj_B, const float* adj_C)
{
// print_matrix("A", m, p, A);
// print_matrix("B", p, n, B);
// printf("t1: %d t2: %d\n", t1, t2);
if (t1)
{
dense_gemm<true>(p, m, n, 0, 1, B, adj_C, adj_A);
dense_gemm<true>(p, n, m, int(!t1), 0, A, adj_C, adj_B);
}
else
{
dense_gemm<true>(m, p, n, 0, int(!t2), adj_C, B, adj_A);
dense_gemm<true>(p, n, m, int(!t1), 0, A, adj_C, adj_B);
}
}
CUDA_CALLABLE inline void adj_dense_gemm_batched(
const int* __restrict__ m, const int* __restrict__ n, const int* __restrict__ p, int t1, int t2,
const int* __restrict__ A_start, const int* __restrict__ B_start, const int* __restrict__ C_start,
const float* __restrict__ A, const float* __restrict__ B, float* __restrict__ C,
// adj
int* __restrict__ adj_m, int* __restrict__ adj_n, int* __restrict__ adj_p, int adj_t1, int adj_t2,
int* __restrict__ adj_A_start, int* __restrict__ adj_B_start, int* __restrict__ adj_C_start,
float* __restrict__ adj_A, float* __restrict__ adj_B, const float* __restrict__ adj_C)
{
const int batch = tid()/kNumThreadsPerBlock;
adj_dense_gemm(m[batch], n[batch], p[batch], t1, t2, A+A_start[batch], B+B_start[batch], C+C_start[batch],
0, 0, 0, 0, 0, adj_A+A_start[batch], adj_B+B_start[batch], adj_C+C_start[batch]);
}
CUDA_CALLABLE inline void adj_dense_chol(
int n, const float* A, const float* __restrict__ regularization, float* L,
int adj_n, const float* adj_A, const float* __restrict__ adj_regularization, float* adj_L)
{
// nop, use dense_solve to differentiate through (A^-1)b = x
}
CUDA_CALLABLE inline void adj_dense_chol_batched(
const int* __restrict__ A_start, const int* __restrict__ A_dim, const float* __restrict__ A, const float* __restrict__ regularization, float* __restrict__ L,
const int* __restrict__ adj_A_start, const int* __restrict__ adj_A_dim, const float* __restrict__ adj_A, const float* __restrict__ adj_regularization, float* __restrict__ adj_L)
{
// nop, use dense_solve to differentiate through (A^-1)b = x
}
CUDA_CALLABLE inline void adj_dense_subs(
int n, const float* L, const float* b, float* x,
int adj_n, const float* adj_L, const float* adj_b, float* adj_x)
{
// nop, use dense_solve to differentiate through (A^-1)b = x
}
CUDA_CALLABLE inline void adj_dense_solve(
int n, const float* __restrict__ A, const float* __restrict__ L, const float* __restrict__ b, float* __restrict__ tmp, const float* __restrict__ x,
int adj_n, float* __restrict__ adj_A, float* __restrict__ adj_L, float* __restrict__ adj_b, float* __restrict__ adj_tmp, const float* __restrict__ adj_x)
{
for (int i=0; i < n; ++i)
{
tmp[i] = 0.0f;
}
dense_subs(n, L, adj_x, tmp);
for (int i=0; i < n; ++i)
{
adj_b[i] += tmp[i];
}
//dense_subs(n, L, adj_x, adj_b);
// A* = -adj_b*x^T
for (int i=0; i < n; ++i)
{
for (int j=0; j < n; ++j)
{
adj_A[dense_index(n, i, j)] += -tmp[i]*x[j];
}
}
}
CUDA_CALLABLE inline void adj_dense_solve_batched(
const int* __restrict__ b_start, const int* A_start, const int* A_dim,
const float* __restrict__ A, const float* __restrict__ L,
const float* __restrict__ b, float* __restrict__ tmp, float* __restrict__ x,
// adj
int* __restrict__ adj_b_start, int* __restrict__ adj_A_start, int* __restrict__ adj_A_dim,
float* __restrict__ adj_A, float* __restrict__ adj_L,
float* __restrict__ adj_b, float* __restrict__ adj_tmp, const float* __restrict__ adj_x)
{
const int batch = tid();
adj_dense_solve(A_dim[batch], A + A_start[batch], L + A_start[batch], b + b_start[batch], tmp + b_start[batch], x + b_start[batch],
0, adj_A + A_start[batch], adj_L + A_start[batch], adj_b + b_start[batch], tmp + b_start[batch], adj_x + b_start[batch]);
}
| 10,723 | C | 29.379603 | 202 | 0.531847 |
vstrozzi/FRL-SHAC-Extension/dflex/dflex/__init__.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
from dflex.sim import *
from dflex.render import *
from dflex.adjoint import compile
from dflex.util import *
# compiles kernels
kernel_init()
| 569 | Python | 34.624998 | 76 | 0.804921 |
vstrozzi/FRL-SHAC-Extension/dflex/dflex/render.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""This optional module contains a built-in renderer for the USD data
format that can be used to visualize time-sampled simulation data.
Users should create a simulation model and integrator and periodically
call :func:`UsdRenderer.update()` to write time-sampled simulation data to the USD stage.
Example:
>>> # construct a new USD stage
>>> stage = Usd.Stage.CreateNew("my_stage.usda")
>>> renderer = df.render.UsdRenderer(model, stage)
>>>
>>> time = 0.0
>>>
>>> for i in range(100):
>>>
>>> # update simulation here
>>> # ....
>>>
>>> # update renderer
>>> stage.update(state, time)
>>> time += dt
>>>
>>> # write stage to file
>>> stage.Save()
Note:
You must have the Pixar USD bindings installed to use this module
please see https://developer.nvidia.com/usd to obtain precompiled
USD binaries and installation instructions.
"""
try:
from pxr import Usd, UsdGeom, Gf, Sdf
except ModuleNotFoundError:
print("No pxr package")
import dflex.sim
import dflex.util
import math
def _usd_add_xform(prim):
prim.ClearXformOpOrder()
t = prim.AddTranslateOp()
r = prim.AddOrientOp()
s = prim.AddScaleOp()
def _usd_set_xform(xform, transform, scale, time):
xform_ops = xform.GetOrderedXformOps()
pos = tuple(transform[0])
rot = tuple(transform[1])
xform_ops[0].Set(Gf.Vec3d(pos), time)
xform_ops[1].Set(Gf.Quatf(rot[3], rot[0], rot[1], rot[2]), time)
xform_ops[2].Set(Gf.Vec3d(scale), time)
# transforms a cylinder such that it connects the two points pos0, pos1
def _compute_segment_xform(pos0, pos1):
mid = (pos0 + pos1) * 0.5
height = (pos1 - pos0).GetLength()
dir = (pos1 - pos0) / height
rot = Gf.Rotation()
rot.SetRotateInto((0.0, 0.0, 1.0), Gf.Vec3d(dir))
scale = Gf.Vec3f(1.0, 1.0, height)
return (mid, Gf.Quath(rot.GetQuat()), scale)
class UsdRenderer:
"""A USD renderer
"""
def __init__(self, model: dflex.model.Model, stage):
"""Construct a UsdRenderer object
Args:
model: A simulation model
stage (Usd.Stage): A USD stage (either in memory or on disk)
"""
self.stage = stage
self.model = model
self.draw_points = True
self.draw_springs = False
self.draw_triangles = False
if (stage.GetPrimAtPath("/root")):
stage.RemovePrim("/root")
self.root = UsdGeom.Xform.Define(stage, '/root')
# add sphere instancer for particles
self.particle_instancer = UsdGeom.PointInstancer.Define(stage, self.root.GetPath().AppendChild("particle_instancer"))
self.particle_instancer_sphere = UsdGeom.Sphere.Define(stage, self.particle_instancer.GetPath().AppendChild("sphere"))
self.particle_instancer_sphere.GetRadiusAttr().Set(model.particle_radius)
self.particle_instancer.CreatePrototypesRel().SetTargets([self.particle_instancer_sphere.GetPath()])
self.particle_instancer.CreateProtoIndicesAttr().Set([0] * model.particle_count)
# add line instancer
if (self.model.spring_count > 0):
self.spring_instancer = UsdGeom.PointInstancer.Define(stage, self.root.GetPath().AppendChild("spring_instancer"))
self.spring_instancer_cylinder = UsdGeom.Capsule.Define(stage, self.spring_instancer.GetPath().AppendChild("cylinder"))
self.spring_instancer_cylinder.GetRadiusAttr().Set(0.01)
self.spring_instancer.CreatePrototypesRel().SetTargets([self.spring_instancer_cylinder.GetPath()])
self.spring_instancer.CreateProtoIndicesAttr().Set([0] * model.spring_count)
self.stage.SetDefaultPrim(self.root.GetPrim())
# time codes
try:
self.stage.SetStartTimeCode(0.0)
self.stage.SetEndTimeCode(0.0)
self.stage.SetTimeCodesPerSecond(1.0)
except:
pass
# add dynamic cloth mesh
if (model.tri_count > 0):
self.cloth_mesh = UsdGeom.Mesh.Define(stage, self.root.GetPath().AppendChild("cloth"))
self.cloth_remap = {}
self.cloth_verts = []
self.cloth_indices = []
# USD needs a contiguous vertex buffer, use a dict to map from simulation indices->render indices
indices = self.model.tri_indices.flatten().tolist()
for i in indices:
if i not in self.cloth_remap:
# copy vertex
new_index = len(self.cloth_verts)
self.cloth_verts.append(self.model.particle_q[i].tolist())
self.cloth_indices.append(new_index)
self.cloth_remap[i] = new_index
else:
self.cloth_indices.append(self.cloth_remap[i])
self.cloth_mesh.GetPointsAttr().Set(self.cloth_verts)
self.cloth_mesh.GetFaceVertexIndicesAttr().Set(self.cloth_indices)
self.cloth_mesh.GetFaceVertexCountsAttr().Set([3] * model.tri_count)
else:
self.cloth_mesh = None
# built-in ground plane
if (model.ground):
size = 10.0
mesh = UsdGeom.Mesh.Define(stage, self.root.GetPath().AppendChild("plane_0"))
mesh.CreateDoubleSidedAttr().Set(True)
points = ((-size, 0.0, -size), (size, 0.0, -size), (size, 0.0, size), (-size, 0.0, size))
normals = ((0.0, 1.0, 0.0), (0.0, 1.0, 0.0), (0.0, 1.0, 0.0), (0.0, 1.0, 0.0))
counts = (4, )
indices = [0, 1, 2, 3]
mesh.GetPointsAttr().Set(points)
mesh.GetNormalsAttr().Set(normals)
mesh.GetFaceVertexCountsAttr().Set(counts)
mesh.GetFaceVertexIndicesAttr().Set(indices)
# add rigid bodies xform root
for b in range(model.link_count):
xform = UsdGeom.Xform.Define(stage, self.root.GetPath().AppendChild("body_" + str(b)))
_usd_add_xform(xform)
# add rigid body shapes
for s in range(model.shape_count):
parent_path = self.root.GetPath()
if model.shape_body[s] >= 0:
parent_path = parent_path.AppendChild("body_" + str(model.shape_body[s].item()))
geo_type = model.shape_geo_type[s].item()
geo_scale = model.shape_geo_scale[s].tolist()
geo_src = model.shape_geo_src[s]
# shape transform in body frame
X_bs = dflex.util.transform_expand(model.shape_transform[s].tolist())
if (geo_type == dflex.sim.GEO_PLANE):
# plane mesh
size = 1000.0
mesh = UsdGeom.Mesh.Define(stage, parent_path.AppendChild("plane_" + str(s)))
mesh.CreateDoubleSidedAttr().Set(True)
points = ((-size, 0.0, -size), (size, 0.0, -size), (size, 0.0, size), (-size, 0.0, size))
normals = ((0.0, 1.0, 0.0), (0.0, 1.0, 0.0), (0.0, 1.0, 0.0), (0.0, 1.0, 0.0))
counts = (4, )
indices = [0, 1, 2, 3]
mesh.GetPointsAttr().Set(points)
mesh.GetNormalsAttr().Set(normals)
mesh.GetFaceVertexCountsAttr().Set(counts)
mesh.GetFaceVertexIndicesAttr().Set(indices)
elif (geo_type == dflex.sim.GEO_SPHERE):
mesh = UsdGeom.Sphere.Define(stage, parent_path.AppendChild("sphere_" + str(s)))
mesh.GetRadiusAttr().Set(geo_scale[0])
_usd_add_xform(mesh)
_usd_set_xform(mesh, X_bs, (1.0, 1.0, 1.0), 0.0)
elif (geo_type == dflex.sim.GEO_CAPSULE):
mesh = UsdGeom.Capsule.Define(stage, parent_path.AppendChild("capsule_" + str(s)))
mesh.GetRadiusAttr().Set(geo_scale[0])
mesh.GetHeightAttr().Set(geo_scale[1] * 2.0)
# geometry transform w.r.t shape, convert USD geometry to physics engine convention
X_sg = dflex.util.transform((0.0, 0.0, 0.0), dflex.util.quat_from_axis_angle((0.0, 1.0, 0.0), math.pi * 0.5))
X_bg = dflex.util.transform_multiply(X_bs, X_sg)
_usd_add_xform(mesh)
_usd_set_xform(mesh, X_bg, (1.0, 1.0, 1.0), 0.0)
elif (geo_type == dflex.sim.GEO_BOX):
mesh = UsdGeom.Cube.Define(stage, parent_path.AppendChild("box_" + str(s)))
#mesh.GetSizeAttr().Set((geo_scale[0], geo_scale[1], geo_scale[2]))
_usd_add_xform(mesh)
_usd_set_xform(mesh, X_bs, (geo_scale[0], geo_scale[1], geo_scale[2]), 0.0)
elif (geo_type == dflex.sim.GEO_MESH):
mesh = UsdGeom.Mesh.Define(stage, parent_path.AppendChild("mesh_" + str(s)))
mesh.GetPointsAttr().Set(geo_src.vertices)
mesh.GetFaceVertexIndicesAttr().Set(geo_src.indices)
mesh.GetFaceVertexCountsAttr().Set([3] * int(len(geo_src.indices) / 3))
_usd_add_xform(mesh)
_usd_set_xform(mesh, X_bs, (geo_scale[0], geo_scale[1], geo_scale[2]), 0.0)
elif (geo_type == dflex.sim.GEO_SDF):
pass
def update(self, state: dflex.model.State, time: float):
"""Update the USD stage with latest simulation data
Args:
state: Current state of the simulation
time: The current time to update at in seconds
"""
try:
self.stage.SetEndTimeCode(time)
except:
pass
# convert to list
if self.model.particle_count:
particle_q = state.particle_q.tolist()
particle_orientations = [Gf.Quath(1.0, 0.0, 0.0, 0.0)] * self.model.particle_count
self.particle_instancer.GetPositionsAttr().Set(particle_q, time)
self.particle_instancer.GetOrientationsAttr().Set(particle_orientations, time)
# update cloth
if (self.cloth_mesh):
for k, v in self.cloth_remap.items():
self.cloth_verts[v] = particle_q[k]
self.cloth_mesh.GetPointsAttr().Set(self.cloth_verts, time)
# update springs
if (self.model.spring_count > 0):
line_positions = []
line_rotations = []
line_scales = []
for i in range(self.model.spring_count):
index0 = self.model.spring_indices[i * 2 + 0]
index1 = self.model.spring_indices[i * 2 + 1]
pos0 = particle_q[index0]
pos1 = particle_q[index1]
(pos, rot, scale) = _compute_segment_xform(Gf.Vec3f(pos0), Gf.Vec3f(pos1))
line_positions.append(pos)
line_rotations.append(rot)
line_scales.append(scale)
self.spring_instancer.GetPositionsAttr().Set(line_positions, time)
self.spring_instancer.GetOrientationsAttr().Set(line_rotations, time)
self.spring_instancer.GetScalesAttr().Set(line_scales, time)
# rigids
for b in range(self.model.link_count):
#xform = UsdGeom.Xform.Define(self.stage, self.root.GetPath().AppendChild("body_" + str(b)))
node = UsdGeom.Xform(self.stage.GetPrimAtPath(self.root.GetPath().AppendChild("body_" + str(b))))
# unpack rigid spatial_transform
X_sb = dflex.util.transform_expand(state.body_X_sc[b].tolist())
_usd_set_xform(node, X_sb, (1.0, 1.0, 1.0), time)
def add_sphere(self, pos: tuple, radius: float, name: str, time: float=0.0):
"""Debug helper to add a sphere for visualization
Args:
pos: The position of the sphere
radius: The radius of the sphere
name: A name for the USD prim on the stage
"""
sphere_path = self.root.GetPath().AppendChild(name)
sphere = UsdGeom.Sphere.Get(self.stage, sphere_path)
if not sphere:
sphere = UsdGeom.Sphere.Define(self.stage, sphere_path)
sphere.GetRadiusAttr().Set(radius, time)
mat = Gf.Matrix4d()
mat.SetIdentity()
mat.SetTranslateOnly(Gf.Vec3d(pos))
op = sphere.MakeMatrixXform()
op.Set(mat, time)
def add_box(self, pos: tuple, extents: float, name: str, time: float=0.0):
"""Debug helper to add a box for visualization
Args:
pos: The position of the sphere
extents: The radius of the sphere
name: A name for the USD prim on the stage
"""
sphere_path = self.root.GetPath().AppendChild(name)
sphere = UsdGeom.Cube.Get(self.stage, sphere_path)
if not sphere:
sphere = UsdGeom.Cube.Define(self.stage, sphere_path)
#sphere.GetSizeAttr().Set((extents[0]*2.0, extents[1]*2.0, extents[2]*2.0), time)
mat = Gf.Matrix4d()
mat.SetIdentity()
mat.SetScale(extents)
mat.SetTranslateOnly(Gf.Vec3d(pos))
op = sphere.MakeMatrixXform()
op.Set(mat, time)
def add_mesh(self, name: str, path: str, transform, scale, time: float):
ref_path = "/root/" + name
ref = UsdGeom.Xform.Get(self.stage, ref_path)
if not ref:
ref = UsdGeom.Xform.Define(self.stage, ref_path)
ref.GetPrim().GetReferences().AddReference(path)
_usd_add_xform(ref)
# update transform
_usd_set_xform(ref, transform, scale, time)
def add_line_list(self, vertices, color, time, name, radius):
"""Debug helper to add a line list as a set of capsules
Args:
vertices: The vertices of the line-strip
color: The color of the line
time: The time to update at
"""
num_lines = int(len(vertices)/2)
if (num_lines < 1):
return
# look up rope point instancer
instancer_path = self.root.GetPath().AppendChild(name)
instancer = UsdGeom.PointInstancer.Get(self.stage, instancer_path)
if not instancer:
instancer = UsdGeom.PointInstancer.Define(self.stage, instancer_path)
instancer_capsule = UsdGeom.Capsule.Define(self.stage, instancer.GetPath().AppendChild("capsule"))
instancer_capsule.GetRadiusAttr().Set(radius)
instancer.CreatePrototypesRel().SetTargets([instancer_capsule.GetPath()])
instancer.CreatePrimvar("displayColor", Sdf.ValueTypeNames.Float3Array, "constant", 1)
line_positions = []
line_rotations = []
line_scales = []
# line_colors = []
for i in range(num_lines):
pos0 = vertices[i*2+0]
pos1 = vertices[i*2+1]
(pos, rot, scale) = _compute_segment_xform(Gf.Vec3f(pos0), Gf.Vec3f(pos1))
line_positions.append(pos)
line_rotations.append(rot)
line_scales.append(scale)
#line_colors.append(Gf.Vec3f((float(i)/num_lines, 0.5, 0.5)))
instancer.GetPositionsAttr().Set(line_positions, time)
instancer.GetOrientationsAttr().Set(line_rotations, time)
instancer.GetScalesAttr().Set(line_scales, time)
instancer.GetProtoIndicesAttr().Set([0] * num_lines, time)
# instancer.GetPrimvar("displayColor").Set(line_colors, time)
def add_line_strip(self, vertices: dflex.sim.List[dflex.sim.Vec3], color: tuple, time: float, name: str, radius: float=0.01):
"""Debug helper to add a line strip as a connected list of capsules
Args:
vertices: The vertices of the line-strip
color: The color of the line
time: The time to update at
"""
num_lines = int(len(vertices)-1)
if (num_lines < 1):
return
# look up rope point instancer
instancer_path = self.root.GetPath().AppendChild(name)
instancer = UsdGeom.PointInstancer.Get(self.stage, instancer_path)
if not instancer:
instancer = UsdGeom.PointInstancer.Define(self.stage, instancer_path)
instancer_capsule = UsdGeom.Capsule.Define(self.stage, instancer.GetPath().AppendChild("capsule"))
instancer_capsule.GetRadiusAttr().Set(radius)
instancer.CreatePrototypesRel().SetTargets([instancer_capsule.GetPath()])
line_positions = []
line_rotations = []
line_scales = []
for i in range(num_lines):
pos0 = vertices[i]
pos1 = vertices[i+1]
(pos, rot, scale) = _compute_segment_xform(Gf.Vec3f(pos0), Gf.Vec3f(pos1))
line_positions.append(pos)
line_rotations.append(rot)
line_scales.append(scale)
instancer.GetPositionsAttr().Set(line_positions, time)
instancer.GetOrientationsAttr().Set(line_rotations, time)
instancer.GetScalesAttr().Set(line_scales, time)
instancer.GetProtoIndicesAttr().Set([0] * num_lines, time)
instancer_capsule = UsdGeom.Capsule.Get(self.stage, instancer.GetPath().AppendChild("capsule"))
instancer_capsule.GetDisplayColorAttr().Set([Gf.Vec3f(color)], time)
| 17,760 | Python | 34.808468 | 131 | 0.586768 |
vstrozzi/FRL-SHAC-Extension/dflex/dflex/model.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""A module for building simulation models and state.
"""
import math
import torch
import numpy as np
from typing import Tuple
from typing import List
Vec3 = List[float]
Vec4 = List[float]
Quat = List[float]
Mat33 = List[float]
Transform = Tuple[Vec3, Quat]
from dflex.util import *
# shape geometry types
GEO_SPHERE = 0
GEO_BOX = 1
GEO_CAPSULE = 2
GEO_MESH = 3
GEO_SDF = 4
GEO_PLANE = 5
GEO_NONE = 6
# body joint types
JOINT_PRISMATIC = 0
JOINT_REVOLUTE = 1
JOINT_BALL = 2
JOINT_FIXED = 3
JOINT_FREE = 4
class Mesh:
"""Describes a triangle collision mesh for simulation
Attributes:
vertices (List[Vec3]): Mesh vertices
indices (List[int]): Mesh indices
I (Mat33): Inertia tensor of the mesh assuming density of 1.0 (around the center of mass)
mass (float): The total mass of the body assuming density of 1.0
com (Vec3): The center of mass of the body
"""
def __init__(self, vertices: List[Vec3], indices: List[int]):
"""Construct a Mesh object from a triangle mesh
The mesh center of mass and inertia tensor will automatically be
calculated using a density of 1.0. This computation is only valid
if the mesh is closed (two-manifold).
Args:
vertices: List of vertices in the mesh
indices: List of triangle indices, 3 per-element
"""
self.vertices = vertices
self.indices = indices
# compute com and inertia (using density=1.0)
com = np.mean(vertices, 0)
num_tris = int(len(indices) / 3)
# compute signed inertia for each tetrahedron
# formed with the interior point, using an order-2
# quadrature: https://www.sciencedirect.com/science/article/pii/S0377042712001604#br000040
weight = 0.25
alpha = math.sqrt(5.0) / 5.0
I = np.zeros((3, 3))
mass = 0.0
for i in range(num_tris):
p = np.array(vertices[indices[i * 3 + 0]])
q = np.array(vertices[indices[i * 3 + 1]])
r = np.array(vertices[indices[i * 3 + 2]])
mid = (com + p + q + r) / 4.0
pcom = p - com
qcom = q - com
rcom = r - com
Dm = np.matrix((pcom, qcom, rcom)).T
volume = np.linalg.det(Dm) / 6.0
# quadrature points lie on the line between the
# centroid and each vertex of the tetrahedron
quads = (mid + (p - mid) * alpha, mid + (q - mid) * alpha, mid + (r - mid) * alpha, mid + (com - mid) * alpha)
for j in range(4):
# displacement of quadrature point from COM
d = quads[j] - com
I += weight * volume * (length_sq(d) * np.eye(3, 3) - np.outer(d, d))
mass += weight * volume
self.I = I
self.mass = mass
self.com = com
class State:
"""The State object holds all *time-varying* data for a model.
Time-varying data includes particle positions, velocities, rigid body states, and
anything that is output from the integrator as derived data, e.g.: forces.
The exact attributes depend on the contents of the model. State objects should
generally be created using the :func:`Model.state()` function.
Attributes:
particle_q (torch.Tensor): Tensor of particle positions
particle_qd (torch.Tensor): Tensor of particle velocities
joint_q (torch.Tensor): Tensor of joint coordinates
joint_qd (torch.Tensor): Tensor of joint velocities
joint_act (torch.Tensor): Tensor of joint actuation values
"""
def __init__(self):
self.particle_count = 0
self.link_count = 0
# def flatten(self):
# """Returns a list of Tensors stored by the state
# This function is intended to be used internal-only but can be used to obtain
# a set of all tensors owned by the state.
# """
# tensors = []
# # particles
# if (self.particle_count):
# tensors.append(self.particle_q)
# tensors.append(self.particle_qd)
# # articulations
# if (self.link_count):
# tensors.append(self.joint_q)
# tensors.append(self.joint_qd)
# tensors.append(self.joint_act)
# return tensors
def flatten(self):
"""Returns a list of Tensors stored by the state
This function is intended to be used internal-only but can be used to obtain
a set of all tensors owned by the state.
"""
tensors = []
# build a list of all tensor attributes
for attr, value in self.__dict__.items():
if (torch.is_tensor(value)):
tensors.append(value)
return tensors
class Model:
"""Holds the definition of the simulation model
This class holds the non-time varying description of the system, i.e.:
all geometry, constraints, and parameters used to describe the simulation.
Attributes:
particle_q (torch.Tensor): Particle positions, shape [particle_count, 3], float
particle_qd (torch.Tensor): Particle velocities, shape [particle_count, 3], float
particle_mass (torch.Tensor): Particle mass, shape [particle_count], float
particle_inv_mass (torch.Tensor): Particle inverse mass, shape [particle_count], float
shape_transform (torch.Tensor): Rigid shape transforms, shape [shape_count, 7], float
shape_body (torch.Tensor): Rigid shape body index, shape [shape_count], int
shape_geo_type (torch.Tensor): Rigid shape geometry type, [shape_count], int
shape_geo_src (torch.Tensor): Rigid shape geometry source, shape [shape_count], int
shape_geo_scale (torch.Tensor): Rigid shape geometry scale, shape [shape_count, 3], float
shape_materials (torch.Tensor): Rigid shape contact materials, shape [shape_count, 4], float
spring_indices (torch.Tensor): Particle spring indices, shape [spring_count*2], int
spring_rest_length (torch.Tensor): Particle spring rest length, shape [spring_count], float
spring_stiffness (torch.Tensor): Particle spring stiffness, shape [spring_count], float
spring_damping (torch.Tensor): Particle spring damping, shape [spring_count], float
spring_control (torch.Tensor): Particle spring activation, shape [spring_count], float
tri_indices (torch.Tensor): Triangle element indices, shape [tri_count*3], int
tri_poses (torch.Tensor): Triangle element rest pose, shape [tri_count, 2, 2], float
tri_activations (torch.Tensor): Triangle element activations, shape [tri_count], float
edge_indices (torch.Tensor): Bending edge indices, shape [edge_count*2], int
edge_rest_angle (torch.Tensor): Bending edge rest angle, shape [edge_count], float
tet_indices (torch.Tensor): Tetrahedral element indices, shape [tet_count*4], int
tet_poses (torch.Tensor): Tetrahedral rest poses, shape [tet_count, 3, 3], float
tet_activations (torch.Tensor): Tetrahedral volumetric activations, shape [tet_count], float
tet_materials (torch.Tensor): Tetrahedral elastic parameters in form :math:`k_{mu}, k_{lambda}, k_{damp}`, shape [tet_count, 3]
body_X_cm (torch.Tensor): Rigid body center of mass (in local frame), shape [link_count, 7], float
body_I_m (torch.Tensor): Rigid body inertia tensor (relative to COM), shape [link_count, 3, 3], float
articulation_start (torch.Tensor): Articulation start offset, shape [num_articulations], int
joint_q (torch.Tensor): Joint coordinate, shape [joint_coord_count], float
joint_qd (torch.Tensor): Joint velocity, shape [joint_dof_count], float
joint_type (torch.Tensor): Joint type, shape [joint_count], int
joint_parent (torch.Tensor): Joint parent, shape [joint_count], int
joint_X_pj (torch.Tensor): Joint transform in parent frame, shape [joint_count, 7], float
joint_X_cm (torch.Tensor): Joint mass frame in child frame, shape [joint_count, 7], float
joint_axis (torch.Tensor): Joint axis in child frame, shape [joint_count, 3], float
joint_q_start (torch.Tensor): Joint coordinate offset, shape [joint_count], int
joint_qd_start (torch.Tensor): Joint velocity offset, shape [joint_count], int
joint_armature (torch.Tensor): Armature for each joint, shape [joint_count], float
joint_target_ke (torch.Tensor): Joint stiffness, shape [joint_count], float
joint_target_kd (torch.Tensor): Joint damping, shape [joint_count], float
joint_target (torch.Tensor): Joint target, shape [joint_count], float
particle_count (int): Total number of particles in the system
joint_coord_count (int): Total number of joint coordinates in the system
joint_dof_count (int): Total number of joint dofs in the system
link_count (int): Total number of links in the system
shape_count (int): Total number of shapes in the system
tri_count (int): Total number of triangles in the system
tet_count (int): Total number of tetrahedra in the system
edge_count (int): Total number of edges in the system
spring_count (int): Total number of springs in the system
contact_count (int): Total number of contacts in the system
Note:
It is strongly recommended to use the ModelBuilder to construct a simulation rather
than creating your own Model object directly, however it is possible to do so if
desired.
"""
def __init__(self, adapter):
self.particle_q = None
self.particle_qd = None
self.particle_mass = None
self.particle_inv_mass = None
self.shape_transform = None
self.shape_body = None
self.shape_geo_type = None
self.shape_geo_src = None
self.shape_geo_scale = None
self.shape_materials = None
self.spring_indices = None
self.spring_rest_length = None
self.spring_stiffness = None
self.spring_damping = None
self.spring_control = None
self.tri_indices = None
self.tri_poses = None
self.tri_activations = None
self.edge_indices = None
self.edge_rest_angle = None
self.tet_indices = None
self.tet_poses = None
self.tet_activations = None
self.tet_materials = None
self.body_X_cm = None
self.body_I_m = None
self.articulation_start = None
self.joint_q = None
self.joint_qd = None
self.joint_type = None
self.joint_parent = None
self.joint_X_pj = None
self.joint_X_cm = None
self.joint_axis = None
self.joint_q_start = None
self.joint_qd_start = None
self.joint_armature = None
self.joint_target_ke = None
self.joint_target_kd = None
self.joint_target = None
self.particle_count = 0
self.joint_coord_count = 0
self.joint_dof_count = 0
self.link_count = 0
self.shape_count = 0
self.tri_count = 0
self.tet_count = 0
self.edge_count = 0
self.spring_count = 0
self.contact_count = 0
self.gravity = torch.tensor((0.0, -9.8, 0.0), dtype=torch.float32, device=adapter)
self.contact_distance = 0.1
self.contact_ke = 1.e+3
self.contact_kd = 0.0
self.contact_kf = 1.e+3
self.contact_mu = 0.5
self.tri_ke = 100.0
self.tri_ka = 100.0
self.tri_kd = 10.0
self.tri_kb = 100.0
self.tri_drag = 0.0
self.tri_lift = 0.0
self.edge_ke = 100.0
self.edge_kd = 0.0
self.particle_radius = 0.1
self.adapter = adapter
def state(self) -> State:
"""Returns a state object for the model
The returned state will be initialized with the initial configuration given in
the model description.
"""
s = State()
s.particle_count = self.particle_count
s.link_count = self.link_count
#--------------------------------
# dynamic state (input, output)
# particles
if (self.particle_count):
s.particle_q = torch.clone(self.particle_q)
s.particle_qd = torch.clone(self.particle_qd)
# articulations
if (self.link_count):
s.joint_q = torch.clone(self.joint_q)
s.joint_qd = torch.clone(self.joint_qd)
s.joint_act = torch.zeros_like(self.joint_qd)
s.joint_q.requires_grad = True
s.joint_qd.requires_grad = True
#--------------------------------
# derived state (output only)
if (self.particle_count):
s.particle_f = torch.empty_like(self.particle_qd, requires_grad=True)
if (self.link_count):
# joints
s.joint_qdd = torch.empty_like(self.joint_qd, requires_grad=True)
s.joint_tau = torch.empty_like(self.joint_qd, requires_grad=True)
s.joint_S_s = torch.empty((self.joint_dof_count, 6), dtype=torch.float32, device=self.adapter, requires_grad=True)
# derived rigid body data (maximal coordinates)
s.body_X_sc = torch.empty((self.link_count, 7), dtype=torch.float32, device=self.adapter, requires_grad=True)
s.body_X_sm = torch.empty((self.link_count, 7), dtype=torch.float32, device=self.adapter, requires_grad=True)
s.body_I_s = torch.empty((self.link_count, 6, 6), dtype=torch.float32, device=self.adapter, requires_grad=True)
s.body_v_s = torch.empty((self.link_count, 6), dtype=torch.float32, device=self.adapter, requires_grad=True)
s.body_a_s = torch.empty((self.link_count, 6), dtype=torch.float32, device=self.adapter, requires_grad=True)
s.body_f_s = torch.zeros((self.link_count, 6), dtype=torch.float32, device=self.adapter, requires_grad=True)
#s.body_ft_s = torch.zeros((self.link_count, 6), dtype=torch.float32, device=self.adapter, requires_grad=True)
#s.body_f_ext_s = torch.zeros((self.link_count, 6), dtype=torch.float32, device=self.adapter, requires_grad=True)
return s
def alloc_mass_matrix(self):
if (self.link_count):
# system matrices
self.M = torch.zeros(self.M_size, dtype=torch.float32, device=self.adapter, requires_grad=True)
self.J = torch.zeros(self.J_size, dtype=torch.float32, device=self.adapter, requires_grad=True)
self.P = torch.empty(self.J_size, dtype=torch.float32, device=self.adapter, requires_grad=True)
self.H = torch.empty(self.H_size, dtype=torch.float32, device=self.adapter, requires_grad=True)
# zero since only upper triangle is set which can trigger NaN detection
self.L = torch.zeros(self.H_size, dtype=torch.float32, device=self.adapter, requires_grad=True)
def flatten(self):
"""Returns a list of Tensors stored by the model
This function is intended to be used internal-only but can be used to obtain
a set of all tensors owned by the model.
"""
tensors = []
# build a list of all tensor attributes
for attr, value in self.__dict__.items():
if (torch.is_tensor(value)):
tensors.append(value)
return tensors
# builds contacts
def collide(self, state: State):
"""Constructs a set of contacts between rigid bodies and ground
This method performs collision detection between rigid body vertices in the scene and updates
the model's set of contacts stored as the following attributes:
* **contact_body0**: Tensor of ints with first rigid body index
* **contact_body1**: Tensor of ints with second rigid body index (currently always -1 to indicate ground)
* **contact_point0**: Tensor of Vec3 representing contact point in local frame of body0
* **contact_dist**: Tensor of float values representing the distance to maintain
* **contact_material**: Tensor contact material indices
Args:
state: The state of the simulation at which to perform collision detection
Note:
Currently this method uses an 'all pairs' approach to contact generation that is
state indepdendent. In the future this will change and will create a node in
the computational graph to propagate gradients as a function of state.
Todo:
Only ground-plane collision is currently implemented. Since the ground is static
it is acceptable to call this method once at initialization time.
"""
body0 = []
body1 = []
point = []
dist = []
mat = []
def add_contact(b0, b1, t, p0, d, m):
body0.append(b0)
body1.append(b1)
point.append(transform_point(t, np.array(p0)))
dist.append(d)
mat.append(m)
for i in range(self.shape_count):
# transform from shape to body
X_bs = transform_expand(self.shape_transform[i].tolist())
geo_type = self.shape_geo_type[i].item()
if (geo_type == GEO_SPHERE):
radius = self.shape_geo_scale[i][0].item()
add_contact(self.shape_body[i], -1, X_bs, (0.0, 0.0, 0.0), radius, i)
elif (geo_type == GEO_CAPSULE):
radius = self.shape_geo_scale[i][0].item()
half_width = self.shape_geo_scale[i][1].item()
add_contact(self.shape_body[i], -1, X_bs, (-half_width, 0.0, 0.0), radius, i)
add_contact(self.shape_body[i], -1, X_bs, (half_width, 0.0, 0.0), radius, i)
elif (geo_type == GEO_BOX):
edges = self.shape_geo_scale[i].tolist()
add_contact(self.shape_body[i], -1, X_bs, (-edges[0], -edges[1], -edges[2]), 0.0, i)
add_contact(self.shape_body[i], -1, X_bs, ( edges[0], -edges[1], -edges[2]), 0.0, i)
add_contact(self.shape_body[i], -1, X_bs, (-edges[0], edges[1], -edges[2]), 0.0, i)
add_contact(self.shape_body[i], -1, X_bs, (edges[0], edges[1], -edges[2]), 0.0, i)
add_contact(self.shape_body[i], -1, X_bs, (-edges[0], -edges[1], edges[2]), 0.0, i)
add_contact(self.shape_body[i], -1, X_bs, (edges[0], -edges[1], edges[2]), 0.0, i)
add_contact(self.shape_body[i], -1, X_bs, (-edges[0], edges[1], edges[2]), 0.0, i)
add_contact(self.shape_body[i], -1, X_bs, (edges[0], edges[1], edges[2]), 0.0, i)
elif (geo_type == GEO_MESH):
mesh = self.shape_geo_src[i]
scale = self.shape_geo_scale[i]
for v in mesh.vertices:
p = (v[0] * scale[0], v[1] * scale[1], v[2] * scale[2])
add_contact(self.shape_body[i], -1, X_bs, p, 0.0, i)
# send to torch
self.contact_body0 = torch.tensor(body0, dtype=torch.int32, device=self.adapter)
self.contact_body1 = torch.tensor(body1, dtype=torch.int32, device=self.adapter)
self.contact_point0 = torch.tensor(point, dtype=torch.float32, device=self.adapter)
self.contact_dist = torch.tensor(dist, dtype=torch.float32, device=self.adapter)
self.contact_material = torch.tensor(mat, dtype=torch.int32, device=self.adapter)
self.contact_count = len(body0)
class ModelBuilder:
"""A helper class for building simulation models at runtime.
Use the ModelBuilder to construct a simulation scene. The ModelBuilder
is independent of PyTorch and builds the scene representation using
standard Python data structures, this means it is not differentiable. Once :func:`finalize()`
has been called the ModelBuilder transfers all data to Torch tensors and returns
an object that may be used for simulation.
Example:
>>> import dflex as df
>>>
>>> builder = df.ModelBuilder()
>>>
>>> # anchor point (zero mass)
>>> builder.add_particle((0, 1.0, 0.0), (0.0, 0.0, 0.0), 0.0)
>>>
>>> # build chain
>>> for i in range(1,10):
>>> builder.add_particle((i, 1.0, 0.0), (0.0, 0.0, 0.0), 1.0)
>>> builder.add_spring(i-1, i, 1.e+3, 0.0, 0)
>>>
>>> # create model
>>> model = builder.finalize()
Note:
It is strongly recommended to use the ModelBuilder to construct a simulation rather
than creating your own Model object directly, however it is possible to do so if
desired.
"""
def __init__(self):
# particles
self.particle_q = []
self.particle_qd = []
self.particle_mass = []
# shapes
self.shape_transform = []
self.shape_body = []
self.shape_geo_type = []
self.shape_geo_scale = []
self.shape_geo_src = []
self.shape_materials = []
# geometry
self.geo_meshes = []
self.geo_sdfs = []
# springs
self.spring_indices = []
self.spring_rest_length = []
self.spring_stiffness = []
self.spring_damping = []
self.spring_control = []
# triangles
self.tri_indices = []
self.tri_poses = []
self.tri_activations = []
# edges (bending)
self.edge_indices = []
self.edge_rest_angle = []
# tetrahedra
self.tet_indices = []
self.tet_poses = []
self.tet_activations = []
self.tet_materials = []
# muscles
self.muscle_start = []
self.muscle_params = []
self.muscle_activation = []
self.muscle_links = []
self.muscle_points = []
# rigid bodies
self.joint_parent = [] # index of the parent body (constant)
self.joint_child = [] # index of the child body (constant)
self.joint_axis = [] # joint axis in child joint frame (constant)
self.joint_X_pj = [] # frame of joint in parent (constant)
self.joint_X_cm = [] # frame of child com (in child coordinates) (constant)
self.joint_q_start = [] # joint offset in the q array
self.joint_qd_start = [] # joint offset in the qd array
self.joint_type = []
self.joint_armature = []
self.joint_target_ke = []
self.joint_target_kd = []
self.joint_target = []
self.joint_limit_lower = []
self.joint_limit_upper = []
self.joint_limit_ke = []
self.joint_limit_kd = []
self.joint_q = [] # generalized coordinates (input)
self.joint_qd = [] # generalized velocities (input)
self.joint_qdd = [] # generalized accelerations (id,fd)
self.joint_tau = [] # generalized actuation (input)
self.joint_u = [] # generalized total torque (fd)
self.body_mass = []
self.body_inertia = []
self.body_com = []
self.articulation_start = []
def add_articulation(self) -> int:
"""Add an articulation object, all subsequently added links (see: :func:`add_link`) will belong to this articulation object.
Calling this method multiple times 'closes' any previous articulations and begins a new one.
Returns:
The index of the articulation
"""
self.articulation_start.append(len(self.joint_type))
return len(self.articulation_start)-1
# rigids, register a rigid body and return its index.
def add_link(
self,
parent : int,
X_pj : Transform,
axis : Vec3,
type : int,
armature: float=0.01,
stiffness: float=0.0,
damping: float=0.0,
limit_lower: float=-1.e+3,
limit_upper: float=1.e+3,
limit_ke: float=100.0,
limit_kd: float=10.0,
com: Vec3=np.zeros(3),
I_m: Mat33=np.zeros((3, 3)),
m: float=0.0) -> int:
"""Adds a rigid body to the model.
Args:
parent: The index of the parent body
X_pj: The location of the joint in the parent's local frame connecting this body
axis: The joint axis
type: The type of joint, should be one of: JOINT_PRISMATIC, JOINT_REVOLUTE, JOINT_BALL, JOINT_FIXED, or JOINT_FREE
armature: Additional inertia around the joint axis
stiffness: Spring stiffness that attempts to return joint to zero position
damping: Spring damping that attempts to remove joint velocity
com: The center of mass of the body w.r.t its origin
I_m: The 3x3 inertia tensor of the body (specified relative to the center of mass)
m: The mass of the body
Returns:
The index of the body in the model
Note:
If the mass (m) is zero then the body is treated as kinematic with no dynamics
"""
# joint data
self.joint_type.append(type)
self.joint_axis.append(np.array(axis))
self.joint_parent.append(parent)
self.joint_X_pj.append(X_pj)
self.joint_target_ke.append(stiffness)
self.joint_target_kd.append(damping)
self.joint_limit_ke.append(limit_ke)
self.joint_limit_kd.append(limit_kd)
self.joint_q_start.append(len(self.joint_q))
self.joint_qd_start.append(len(self.joint_qd))
if (type == JOINT_PRISMATIC):
self.joint_q.append(0.0)
self.joint_qd.append(0.0)
self.joint_target.append(0.0)
self.joint_armature.append(armature)
self.joint_limit_lower.append(limit_lower)
self.joint_limit_upper.append(limit_upper)
elif (type == JOINT_REVOLUTE):
self.joint_q.append(0.0)
self.joint_qd.append(0.0)
self.joint_target.append(0.0)
self.joint_armature.append(armature)
self.joint_limit_lower.append(limit_lower)
self.joint_limit_upper.append(limit_upper)
elif (type == JOINT_BALL):
# quaternion
self.joint_q.append(0.0)
self.joint_q.append(0.0)
self.joint_q.append(0.0)
self.joint_q.append(1.0)
# angular velocity
self.joint_qd.append(0.0)
self.joint_qd.append(0.0)
self.joint_qd.append(0.0)
# pd targets
self.joint_target.append(0.0)
self.joint_target.append(0.0)
self.joint_target.append(0.0)
self.joint_target.append(0.0)
self.joint_armature.append(armature)
self.joint_armature.append(armature)
self.joint_armature.append(armature)
self.joint_limit_lower.append(limit_lower)
self.joint_limit_lower.append(limit_lower)
self.joint_limit_lower.append(limit_lower)
self.joint_limit_lower.append(0.0)
self.joint_limit_upper.append(limit_upper)
self.joint_limit_upper.append(limit_upper)
self.joint_limit_upper.append(limit_upper)
self.joint_limit_upper.append(0.0)
elif (type == JOINT_FIXED):
pass
elif (type == JOINT_FREE):
# translation
self.joint_q.append(0.0)
self.joint_q.append(0.0)
self.joint_q.append(0.0)
# quaternion
self.joint_q.append(0.0)
self.joint_q.append(0.0)
self.joint_q.append(0.0)
self.joint_q.append(1.0)
# note armature for free joints should always be zero, better to modify the body inertia directly
self.joint_armature.append(0.0)
self.joint_armature.append(0.0)
self.joint_armature.append(0.0)
self.joint_armature.append(0.0)
self.joint_armature.append(0.0)
self.joint_armature.append(0.0)
self.joint_target.append(0.0)
self.joint_target.append(0.0)
self.joint_target.append(0.0)
self.joint_target.append(0.0)
self.joint_target.append(0.0)
self.joint_target.append(0.0)
self.joint_target.append(0.0)
self.joint_limit_lower.append(0.0)
self.joint_limit_lower.append(0.0)
self.joint_limit_lower.append(0.0)
self.joint_limit_lower.append(0.0)
self.joint_limit_lower.append(0.0)
self.joint_limit_lower.append(0.0)
self.joint_limit_lower.append(0.0)
self.joint_limit_upper.append(0.0)
self.joint_limit_upper.append(0.0)
self.joint_limit_upper.append(0.0)
self.joint_limit_upper.append(0.0)
self.joint_limit_upper.append(0.0)
self.joint_limit_upper.append(0.0)
self.joint_limit_upper.append(0.0)
# joint velocities
for i in range(6):
self.joint_qd.append(0.0)
self.body_inertia.append(np.zeros((3, 3)))
self.body_mass.append(0.0)
self.body_com.append(np.zeros(3))
# return index of body
return len(self.joint_type) - 1
# muscles
def add_muscle(self, links: List[int], positions: List[Vec3], f0: float, lm: float, lt: float, lmax: float, pen: float) -> float:
"""Adds a muscle-tendon activation unit
Args:
links: A list of link indices for each waypoint
positions: A list of positions of each waypoint in the link's local frame
f0: Force scaling
lm: Muscle length
lt: Tendon length
lmax: Maximally efficient muscle length
Returns:
The index of the muscle in the model
"""
n = len(links)
self.muscle_start.append(len(self.muscle_links))
self.muscle_params.append((f0, lm, lt, lmax, pen))
self.muscle_activation.append(0.0)
for i in range(n):
self.muscle_links.append(links[i])
self.muscle_points.append(positions[i])
# return the index of the muscle
return len(self.muscle_start)-1
# shapes
def add_shape_plane(self, plane: Vec4=(0.0, 1.0, 0.0, 0.0), ke: float=1.e+5, kd: float=1000.0, kf: float=1000.0, mu: float=0.5):
"""Adds a plane collision shape
Args:
plane: The plane equation in form a*x + b*y + c*z + d = 0
ke: The contact elastic stiffness
kd: The contact damping stiffness
kf: The contact friction stiffness
mu: The coefficient of friction
"""
self._add_shape(-1, (0.0, 0.0, 0.0), (0.0, 0.0, 0.0), GEO_PLANE, plane, None, 0.0, ke, kd, kf, mu)
def add_shape_sphere(self, body, pos: Vec3=(0.0, 0.0, 0.0), rot: Quat=(0.0, 0.0, 0.0, 1.0), radius: float=1.0, density: float=1000.0, ke: float=1.e+5, kd: float=1000.0, kf: float=1000.0, mu: float=0.5):
"""Adds a sphere collision shape to a link.
Args:
body: The index of the parent link this shape belongs to
pos: The location of the shape with respect to the parent frame
rot: The rotation of the shape with respect to the parent frame
radius: The radius of the sphere
density: The density of the shape
ke: The contact elastic stiffness
kd: The contact damping stiffness
kf: The contact friction stiffness
mu: The coefficient of friction
"""
self._add_shape(body, pos, rot, GEO_SPHERE, (radius, 0.0, 0.0, 0.0), None, density, ke, kd, kf, mu)
def add_shape_box(self,
body : int,
pos: Vec3=(0.0, 0.0, 0.0),
rot: Quat=(0.0, 0.0, 0.0, 1.0),
hx: float=0.5,
hy: float=0.5,
hz: float=0.5,
density: float=1000.0,
ke: float=1.e+5,
kd: float=1000.0,
kf: float=1000.0,
mu: float=0.5):
"""Adds a box collision shape to a link.
Args:
body: The index of the parent link this shape belongs to
pos: The location of the shape with respect to the parent frame
rot: The rotation of the shape with respect to the parent frame
hx: The half-extents along the x-axis
hy: The half-extents along the y-axis
hz: The half-extents along the z-axis
density: The density of the shape
ke: The contact elastic stiffness
kd: The contact damping stiffness
kf: The contact friction stiffness
mu: The coefficient of friction
"""
self._add_shape(body, pos, rot, GEO_BOX, (hx, hy, hz, 0.0), None, density, ke, kd, kf, mu)
def add_shape_capsule(self,
body: int,
pos: Vec3=(0.0, 0.0, 0.0),
rot: Quat=(0.0, 0.0, 0.0, 1.0),
radius: float=1.0,
half_width: float=0.5,
density: float=1000.0,
ke: float=1.e+5,
kd: float=1000.0,
kf: float=1000.0,
mu: float=0.5):
"""Adds a capsule collision shape to a link.
Args:
body: The index of the parent link this shape belongs to
pos: The location of the shape with respect to the parent frame
rot: The rotation of the shape with respect to the parent frame
radius: The radius of the capsule
half_width: The half length of the center cylinder along the x-axis
density: The density of the shape
ke: The contact elastic stiffness
kd: The contact damping stiffness
kf: The contact friction stiffness
mu: The coefficient of friction
"""
self._add_shape(body, pos, rot, GEO_CAPSULE, (radius, half_width, 0.0, 0.0), None, density, ke, kd, kf, mu)
def add_shape_mesh(self,
body: int,
pos: Vec3=(0.0, 0.0, 0.0),
rot: Quat=(0.0, 0.0, 0.0, 1.0),
mesh: Mesh=None,
scale: Vec3=(1.0, 1.0, 1.0),
density: float=1000.0,
ke: float=1.e+5,
kd: float=1000.0,
kf: float=1000.0,
mu: float=0.5):
"""Adds a triangle mesh collision shape to a link.
Args:
body: The index of the parent link this shape belongs to
pos: The location of the shape with respect to the parent frame
rot: The rotation of the shape with respect to the parent frame
mesh: The mesh object
scale: Scale to use for the collider
density: The density of the shape
ke: The contact elastic stiffness
kd: The contact damping stiffness
kf: The contact friction stiffness
mu: The coefficient of friction
"""
self._add_shape(body, pos, rot, GEO_MESH, (scale[0], scale[1], scale[2], 0.0), mesh, density, ke, kd, kf, mu)
def _add_shape(self, body , pos, rot, type, scale, src, density, ke, kd, kf, mu):
self.shape_body.append(body)
self.shape_transform.append(transform(pos, rot))
self.shape_geo_type.append(type)
self.shape_geo_scale.append((scale[0], scale[1], scale[2]))
self.shape_geo_src.append(src)
self.shape_materials.append((ke, kd, kf, mu))
(m, I) = self._compute_shape_mass(type, scale, src, density)
self._update_body_mass(body, m, I, np.array(pos), np.array(rot))
# particles
def add_particle(self, pos : Vec3, vel : Vec3, mass : float) -> int:
"""Adds a single particle to the model
Args:
pos: The initial position of the particle
vel: The initial velocity of the particle
mass: The mass of the particle
Note:
Set the mass equal to zero to create a 'kinematic' particle that does is not subject to dynamics.
Returns:
The index of the particle in the system
"""
self.particle_q.append(pos)
self.particle_qd.append(vel)
self.particle_mass.append(mass)
return len(self.particle_q) - 1
def add_spring(self, i : int, j, ke : float, kd : float, control: float):
"""Adds a spring between two particles in the system
Args:
i: The index of the first particle
j: The index of the second particle
ke: The elastic stiffness of the spring
kd: The damping stiffness of the spring
control: The actuation level of the spring
Note:
The spring is created with a rest-length based on the distance
between the particles in their initial configuration.
"""
self.spring_indices.append(i)
self.spring_indices.append(j)
self.spring_stiffness.append(ke)
self.spring_damping.append(kd)
self.spring_control.append(control)
# compute rest length
p = self.particle_q[i]
q = self.particle_q[j]
delta = np.subtract(p, q)
l = np.sqrt(np.dot(delta, delta))
self.spring_rest_length.append(l)
def add_triangle(self, i : int, j : int, k : int) -> float:
"""Adds a trianglular FEM element between three particles in the system.
Triangles are modeled as viscoelastic elements with elastic stiffness and damping
Parameters specfied on the model. See model.tri_ke, model.tri_kd.
Args:
i: The index of the first particle
j: The index of the second particle
k: The index of the third particle
Return:
The area of the triangle
Note:
The triangle is created with a rest-length based on the distance
between the particles in their initial configuration.
Todo:
* Expose elastic paramters on a per-element basis
"""
# compute basis for 2D rest pose
p = np.array(self.particle_q[i])
q = np.array(self.particle_q[j])
r = np.array(self.particle_q[k])
qp = q - p
rp = r - p
# construct basis aligned with the triangle
n = normalize(np.cross(qp, rp))
e1 = normalize(qp)
e2 = normalize(np.cross(n, e1))
R = np.matrix((e1, e2))
M = np.matrix((qp, rp))
D = R * M.T
inv_D = np.linalg.inv(D)
area = np.linalg.det(D) / 2.0
if (area < 0.0):
print("inverted triangle element")
self.tri_indices.append((i, j, k))
self.tri_poses.append(inv_D.tolist())
self.tri_activations.append(0.0)
return area
def add_tetrahedron(self, i: int, j: int, k: int, l: int, k_mu: float=1.e+3, k_lambda: float=1.e+3, k_damp: float=0.0) -> float:
"""Adds a tetrahedral FEM element between four particles in the system.
Tetrahdera are modeled as viscoelastic elements with a NeoHookean energy
density based on [Smith et al. 2018].
Args:
i: The index of the first particle
j: The index of the second particle
k: The index of the third particle
l: The index of the fourth particle
k_mu: The first elastic Lame parameter
k_lambda: The second elastic Lame parameter
k_damp: The element's damping stiffness
Return:
The volume of the tetrahedron
Note:
The tetrahedron is created with a rest-pose based on the particle's initial configruation
"""
# compute basis for 2D rest pose
p = np.array(self.particle_q[i])
q = np.array(self.particle_q[j])
r = np.array(self.particle_q[k])
s = np.array(self.particle_q[l])
qp = q - p
rp = r - p
sp = s - p
Dm = np.matrix((qp, rp, sp)).T
volume = np.linalg.det(Dm) / 6.0
if (volume <= 0.0):
print("inverted tetrahedral element")
else:
inv_Dm = np.linalg.inv(Dm)
self.tet_indices.append((i, j, k, l))
self.tet_poses.append(inv_Dm.tolist())
self.tet_activations.append(0.0)
self.tet_materials.append((k_mu, k_lambda, k_damp))
return volume
def add_edge(self, i: int, j: int, k: int, l: int, rest: float=None):
"""Adds a bending edge element between four particles in the system.
Bending elements are designed to be between two connected triangles. Then
bending energy is based of [Bridson et al. 2002]. Bending stiffness is controlled
by the `model.tri_kb` parameter.
Args:
i: The index of the first particle
j: The index of the second particle
k: The index of the third particle
l: The index of the fourth particle
rest: The rest angle across the edge in radians, if not specified it will be computed
Note:
The edge lies between the particles indexed by 'k' and 'l' parameters with the opposing
vertices indexed by 'i' and 'j'. This defines two connected triangles with counter clockwise
winding: (i, k, l), (j, l, k).
"""
# compute rest angle
if (rest == None):
x1 = np.array(self.particle_q[i])
x2 = np.array(self.particle_q[j])
x3 = np.array(self.particle_q[k])
x4 = np.array(self.particle_q[l])
n1 = normalize(np.cross(x3 - x1, x4 - x1))
n2 = normalize(np.cross(x4 - x2, x3 - x2))
e = normalize(x4 - x3)
d = np.clip(np.dot(n2, n1), -1.0, 1.0)
angle = math.acos(d)
sign = np.sign(np.dot(np.cross(n2, n1), e))
rest = angle * sign
self.edge_indices.append((i, j, k, l))
self.edge_rest_angle.append(rest)
def add_cloth_grid(self,
pos: Vec3,
rot: Quat,
vel: Vec3,
dim_x: int,
dim_y: int,
cell_x: float,
cell_y: float,
mass: float,
reverse_winding: bool=False,
fix_left: bool=False,
fix_right: bool=False,
fix_top: bool=False,
fix_bottom: bool=False):
"""Helper to create a regular planar cloth grid
Creates a rectangular grid of particles with FEM triangles and bending elements
automatically.
Args:
pos: The position of the cloth in world space
rot: The orientation of the cloth in world space
vel: The velocity of the cloth in world space
dim_x_: The number of rectangular cells along the x-axis
dim_y: The number of rectangular cells along the y-axis
cell_x: The width of each cell in the x-direction
cell_y: The width of each cell in the y-direction
mass: The mass of each particle
reverse_winding: Flip the winding of the mesh
fix_left: Make the left-most edge of particles kinematic (fixed in place)
fix_right: Make the right-most edge of particles kinematic
fix_top: Make the top-most edge of particles kinematic
fix_bottom: Make the bottom-most edge of particles kinematic
"""
def grid_index(x, y, dim_x):
return y * dim_x + x
start_vertex = len(self.particle_q)
start_tri = len(self.tri_indices)
for y in range(0, dim_y + 1):
for x in range(0, dim_x + 1):
g = np.array((x * cell_x, y * cell_y, 0.0))
p = quat_rotate(rot, g) + pos
m = mass
if (x == 0 and fix_left):
m = 0.0
elif (x == dim_x and fix_right):
m = 0.0
elif (y == 0 and fix_bottom):
m = 0.0
elif (y == dim_y and fix_top):
m = 0.0
self.add_particle(p, vel, m)
if (x > 0 and y > 0):
if (reverse_winding):
tri1 = (start_vertex + grid_index(x - 1, y - 1, dim_x + 1),
start_vertex + grid_index(x, y - 1, dim_x + 1),
start_vertex + grid_index(x, y, dim_x + 1))
tri2 = (start_vertex + grid_index(x - 1, y - 1, dim_x + 1),
start_vertex + grid_index(x, y, dim_x + 1),
start_vertex + grid_index(x - 1, y, dim_x + 1))
self.add_triangle(*tri1)
self.add_triangle(*tri2)
else:
tri1 = (start_vertex + grid_index(x - 1, y - 1, dim_x + 1),
start_vertex + grid_index(x, y - 1, dim_x + 1),
start_vertex + grid_index(x - 1, y, dim_x + 1))
tri2 = (start_vertex + grid_index(x, y - 1, dim_x + 1),
start_vertex + grid_index(x, y, dim_x + 1),
start_vertex + grid_index(x - 1, y, dim_x + 1))
self.add_triangle(*tri1)
self.add_triangle(*tri2)
end_vertex = len(self.particle_q)
end_tri = len(self.tri_indices)
# bending constraints, could create these explicitly for a grid but this
# is a good test of the adjacency structure
adj = MeshAdjacency(self.tri_indices[start_tri:end_tri], end_tri - start_tri)
for k, e in adj.edges.items():
# skip open edges
if (e.f0 == -1 or e.f1 == -1):
continue
self.add_edge(e.o0, e.o1, e.v0, e.v1) # opposite 0, opposite 1, vertex 0, vertex 1
def add_cloth_mesh(self, pos: Vec3, rot: Quat, scale: float, vel: Vec3, vertices: List[Vec3], indices: List[int], density: float, edge_callback=None, face_callback=None):
"""Helper to create a cloth model from a regular triangle mesh
Creates one FEM triangle element and one bending element for every face
and edge in the input triangle mesh
Args:
pos: The position of the cloth in world space
rot: The orientation of the cloth in world space
vel: The velocity of the cloth in world space
vertices: A list of vertex positions
indices: A list of triangle indices, 3 entries per-face
density: The density per-area of the mesh
edge_callback: A user callback when an edge is created
face_callback: A user callback when a face is created
Note:
The mesh should be two manifold.
"""
num_tris = int(len(indices) / 3)
start_vertex = len(self.particle_q)
start_tri = len(self.tri_indices)
# particles
for i, v in enumerate(vertices):
p = quat_rotate(rot, v * scale) + pos
self.add_particle(p, vel, 0.0)
# triangles
for t in range(num_tris):
i = start_vertex + indices[t * 3 + 0]
j = start_vertex + indices[t * 3 + 1]
k = start_vertex + indices[t * 3 + 2]
if (face_callback):
face_callback(i, j, k)
area = self.add_triangle(i, j, k)
# add area fraction to particles
if (area > 0.0):
self.particle_mass[i] += density * area / 3.0
self.particle_mass[j] += density * area / 3.0
self.particle_mass[k] += density * area / 3.0
end_vertex = len(self.particle_q)
end_tri = len(self.tri_indices)
adj = MeshAdjacency(self.tri_indices[start_tri:end_tri], end_tri - start_tri)
# bend constraints
for k, e in adj.edges.items():
# skip open edges
if (e.f0 == -1 or e.f1 == -1):
continue
if (edge_callback):
edge_callback(e.f0, e.f1)
self.add_edge(e.o0, e.o1, e.v0, e.v1)
def add_soft_grid(self,
pos: Vec3,
rot: Quat,
vel: Vec3,
dim_x: int,
dim_y: int,
dim_z: int,
cell_x: float,
cell_y: float,
cell_z: float,
density: float,
k_mu: float,
k_lambda: float,
k_damp: float,
fix_left: bool=False,
fix_right: bool=False,
fix_top: bool=False,
fix_bottom: bool=False):
"""Helper to create a rectangular tetrahedral FEM grid
Creates a regular grid of FEM tetrhedra and surface triangles. Useful for example
to create beams and sheets. Each hexahedral cell is decomposed into 5
tetrahedral elements.
Args:
pos: The position of the solid in world space
rot: The orientation of the solid in world space
vel: The velocity of the solid in world space
dim_x_: The number of rectangular cells along the x-axis
dim_y: The number of rectangular cells along the y-axis
dim_z: The number of rectangular cells along the z-axis
cell_x: The width of each cell in the x-direction
cell_y: The width of each cell in the y-direction
cell_z: The width of each cell in the z-direction
density: The density of each particle
k_mu: The first elastic Lame parameter
k_lambda: The second elastic Lame parameter
k_damp: The damping stiffness
fix_left: Make the left-most edge of particles kinematic (fixed in place)
fix_right: Make the right-most edge of particles kinematic
fix_top: Make the top-most edge of particles kinematic
fix_bottom: Make the bottom-most edge of particles kinematic
"""
start_vertex = len(self.particle_q)
mass = cell_x * cell_y * cell_z * density
for z in range(dim_z + 1):
for y in range(dim_y + 1):
for x in range(dim_x + 1):
v = np.array((x * cell_x, y * cell_y, z * cell_z))
m = mass
if (fix_left and x == 0):
m = 0.0
if (fix_right and x == dim_x):
m = 0.0
if (fix_top and y == dim_y):
m = 0.0
if (fix_bottom and y == 0):
m = 0.0
p = quat_rotate(rot, v) + pos
self.add_particle(p, vel, m)
# dict of open faces
faces = {}
def add_face(i: int, j: int, k: int):
key = tuple(sorted((i, j, k)))
if key not in faces:
faces[key] = (i, j, k)
else:
del faces[key]
def add_tet(i: int, j: int, k: int, l: int):
self.add_tetrahedron(i, j, k, l, k_mu, k_lambda, k_damp)
add_face(i, k, j)
add_face(j, k, l)
add_face(i, j, l)
add_face(i, l, k)
def grid_index(x, y, z):
return (dim_x + 1) * (dim_y + 1) * z + (dim_x + 1) * y + x
for z in range(dim_z):
for y in range(dim_y):
for x in range(dim_x):
v0 = grid_index(x, y, z) + start_vertex
v1 = grid_index(x + 1, y, z) + start_vertex
v2 = grid_index(x + 1, y, z + 1) + start_vertex
v3 = grid_index(x, y, z + 1) + start_vertex
v4 = grid_index(x, y + 1, z) + start_vertex
v5 = grid_index(x + 1, y + 1, z) + start_vertex
v6 = grid_index(x + 1, y + 1, z + 1) + start_vertex
v7 = grid_index(x, y + 1, z + 1) + start_vertex
if (((x & 1) ^ (y & 1) ^ (z & 1))):
add_tet(v0, v1, v4, v3)
add_tet(v2, v3, v6, v1)
add_tet(v5, v4, v1, v6)
add_tet(v7, v6, v3, v4)
add_tet(v4, v1, v6, v3)
else:
add_tet(v1, v2, v5, v0)
add_tet(v3, v0, v7, v2)
add_tet(v4, v7, v0, v5)
add_tet(v6, v5, v2, v7)
add_tet(v5, v2, v7, v0)
# add triangles
for k, v in faces.items():
self.add_triangle(v[0], v[1], v[2])
def add_soft_mesh(self, pos: Vec3, rot: Quat, scale: float, vel: Vec3, vertices: List[Vec3], indices: List[int], density: float, k_mu: float, k_lambda: float, k_damp: float):
"""Helper to create a tetrahedral model from an input tetrahedral mesh
Args:
pos: The position of the solid in world space
rot: The orientation of the solid in world space
vel: The velocity of the solid in world space
vertices: A list of vertex positions
indices: A list of tetrahedron indices, 4 entries per-element
density: The density per-area of the mesh
k_mu: The first elastic Lame parameter
k_lambda: The second elastic Lame parameter
k_damp: The damping stiffness
"""
num_tets = int(len(indices) / 4)
start_vertex = len(self.particle_q)
start_tri = len(self.tri_indices)
# dict of open faces
faces = {}
def add_face(i, j, k):
key = tuple(sorted((i, j, k)))
if key not in faces:
faces[key] = (i, j, k)
else:
del faces[key]
# add particles
for v in vertices:
p = quat_rotate(rot, v * scale) + pos
self.add_particle(p, vel, 0.0)
# add tetrahedra
for t in range(num_tets):
v0 = start_vertex + indices[t * 4 + 0]
v1 = start_vertex + indices[t * 4 + 1]
v2 = start_vertex + indices[t * 4 + 2]
v3 = start_vertex + indices[t * 4 + 3]
volume = self.add_tetrahedron(v0, v1, v2, v3, k_mu, k_lambda, k_damp)
# distribute volume fraction to particles
if (volume > 0.0):
self.particle_mass[v0] += density * volume / 4.0
self.particle_mass[v1] += density * volume / 4.0
self.particle_mass[v2] += density * volume / 4.0
self.particle_mass[v3] += density * volume / 4.0
# build open faces
add_face(v0, v2, v1)
add_face(v1, v2, v3)
add_face(v0, v1, v3)
add_face(v0, v3, v2)
# add triangles
for k, v in faces.items():
try:
self.add_triangle(v[0], v[1], v[2])
except np.linalg.LinAlgError:
continue
def compute_sphere_inertia(self, density: float, r: float) -> tuple:
"""Helper to compute mass and inertia of a sphere
Args:
density: The sphere density
r: The sphere radius
Returns:
A tuple of (mass, inertia) with inertia specified around the origin
"""
v = 4.0 / 3.0 * math.pi * r * r * r
m = density * v
Ia = 2.0 / 5.0 * m * r * r
I = np.array([[Ia, 0.0, 0.0], [0.0, Ia, 0.0], [0.0, 0.0, Ia]])
return (m, I)
def compute_capsule_inertia(self, density: float, r: float, l: float) -> tuple:
"""Helper to compute mass and inertia of a capsule
Args:
density: The capsule density
r: The capsule radius
l: The capsule length (full width of the interior cylinder)
Returns:
A tuple of (mass, inertia) with inertia specified around the origin
"""
ms = density * (4.0 / 3.0) * math.pi * r * r * r
mc = density * math.pi * r * r * l
# total mass
m = ms + mc
# adapted from ODE
Ia = mc * (0.25 * r * r + (1.0 / 12.0) * l * l) + ms * (0.4 * r * r + 0.375 * r * l + 0.25 * l * l)
Ib = (mc * 0.5 + ms * 0.4) * r * r
I = np.array([[Ib, 0.0, 0.0], [0.0, Ia, 0.0], [0.0, 0.0, Ia]])
return (m, I)
def compute_box_inertia(self, density: float, w: float, h: float, d: float) -> tuple:
"""Helper to compute mass and inertia of a box
Args:
density: The box density
w: The box width along the x-axis
h: The box height along the y-axis
d: The box depth along the z-axis
Returns:
A tuple of (mass, inertia) with inertia specified around the origin
"""
v = w * h * d
m = density * v
Ia = 1.0 / 12.0 * m * (h * h + d * d)
Ib = 1.0 / 12.0 * m * (w * w + d * d)
Ic = 1.0 / 12.0 * m * (w * w + h * h)
I = np.array([[Ia, 0.0, 0.0], [0.0, Ib, 0.0], [0.0, 0.0, Ic]])
return (m, I)
def _compute_shape_mass(self, type, scale, src, density):
if density == 0: # zero density means fixed
return 0, np.zeros((3, 3))
if (type == GEO_SPHERE):
return self.compute_sphere_inertia(density, scale[0])
elif (type == GEO_BOX):
return self.compute_box_inertia(density, scale[0] * 2.0, scale[1] * 2.0, scale[2] * 2.0)
elif (type == GEO_CAPSULE):
return self.compute_capsule_inertia(density, scale[0], scale[1] * 2.0)
elif (type == GEO_MESH):
#todo: non-uniform scale of inertia tensor
s = scale[0] # eventually want to compute moment of inertia for mesh.
return (density * src.mass * s * s * s, density * src.I * s * s * s * s * s)
# incrementally updates rigid body mass with additional mass and inertia expressed at a local to the body
def _update_body_mass(self, i, m, I, p, q):
if (i == -1):
return
# find new COM
new_mass = self.body_mass[i] + m
if new_mass == 0.0: # no mass
return
new_com = (self.body_com[i] * self.body_mass[i] + p * m) / new_mass
# shift inertia to new COM
com_offset = new_com - self.body_com[i]
shape_offset = new_com - p
new_inertia = transform_inertia(self.body_mass[i], self.body_inertia[i], com_offset, quat_identity()) + transform_inertia(
m, I, shape_offset, q)
self.body_mass[i] = new_mass
self.body_inertia[i] = new_inertia
self.body_com[i] = new_com
# returns a (model, state) pair given the description
def finalize(self, adapter: str) -> Model:
"""Convert this builder object to a concrete model for simulation.
After building simulation elements this method should be called to transfer
all data to PyTorch tensors ready for simulation.
Args:
adapter: The simulation adapter to use, e.g.: 'cpu', 'cuda'
Returns:
A model object.
"""
# construct particle inv masses
particle_inv_mass = []
for m in self.particle_mass:
if (m > 0.0):
particle_inv_mass.append(1.0 / m)
else:
particle_inv_mass.append(0.0)
#-------------------------------------
# construct Model (non-time varying) data
m = Model(adapter)
#---------------------
# particles
# state (initial)
m.particle_q = torch.tensor(self.particle_q, dtype=torch.float32, device=adapter)
m.particle_qd = torch.tensor(self.particle_qd, dtype=torch.float32, device=adapter)
# model
m.particle_mass = torch.tensor(self.particle_mass, dtype=torch.float32, device=adapter)
m.particle_inv_mass = torch.tensor(particle_inv_mass, dtype=torch.float32, device=adapter)
#---------------------
# collision geometry
m.shape_transform = torch.tensor(transform_flatten_list(self.shape_transform), dtype=torch.float32, device=adapter)
m.shape_body = torch.tensor(self.shape_body, dtype=torch.int32, device=adapter)
m.shape_geo_type = torch.tensor(self.shape_geo_type, dtype=torch.int32, device=adapter)
m.shape_geo_src = self.shape_geo_src
m.shape_geo_scale = torch.tensor(self.shape_geo_scale, dtype=torch.float32, device=adapter)
m.shape_materials = torch.tensor(self.shape_materials, dtype=torch.float32, device=adapter)
#---------------------
# springs
m.spring_indices = torch.tensor(self.spring_indices, dtype=torch.int32, device=adapter)
m.spring_rest_length = torch.tensor(self.spring_rest_length, dtype=torch.float32, device=adapter)
m.spring_stiffness = torch.tensor(self.spring_stiffness, dtype=torch.float32, device=adapter)
m.spring_damping = torch.tensor(self.spring_damping, dtype=torch.float32, device=adapter)
m.spring_control = torch.tensor(self.spring_control, dtype=torch.float32, device=adapter)
#---------------------
# triangles
m.tri_indices = torch.tensor(self.tri_indices, dtype=torch.int32, device=adapter)
m.tri_poses = torch.tensor(self.tri_poses, dtype=torch.float32, device=adapter)
m.tri_activations = torch.tensor(self.tri_activations, dtype=torch.float32, device=adapter)
#---------------------
# edges
m.edge_indices = torch.tensor(self.edge_indices, dtype=torch.int32, device=adapter)
m.edge_rest_angle = torch.tensor(self.edge_rest_angle, dtype=torch.float32, device=adapter)
#---------------------
# tetrahedra
m.tet_indices = torch.tensor(self.tet_indices, dtype=torch.int32, device=adapter)
m.tet_poses = torch.tensor(self.tet_poses, dtype=torch.float32, device=adapter)
m.tet_activations = torch.tensor(self.tet_activations, dtype=torch.float32, device=adapter)
m.tet_materials = torch.tensor(self.tet_materials, dtype=torch.float32, device=adapter)
#-----------------------
# muscles
muscle_count = len(self.muscle_start)
# close the muscle waypoint indices
self.muscle_start.append(len(self.muscle_links))
m.muscle_start = torch.tensor(self.muscle_start, dtype=torch.int32, device=adapter)
m.muscle_params = torch.tensor(self.muscle_params, dtype=torch.float32, device=adapter)
m.muscle_links = torch.tensor(self.muscle_links, dtype=torch.int32, device=adapter)
m.muscle_points = torch.tensor(self.muscle_points, dtype=torch.float32, device=adapter)
m.muscle_activation = torch.tensor(self.muscle_activation, dtype=torch.float32, device=adapter)
#--------------------------------------
# articulations
# build 6x6 spatial inertia and COM transform
body_X_cm = []
body_I_m = []
for i in range(len(self.body_inertia)):
body_I_m.append(spatial_matrix_from_inertia(self.body_inertia[i], self.body_mass[i]))
body_X_cm.append(transform(self.body_com[i], quat_identity()))
m.body_I_m = torch.tensor(body_I_m, dtype=torch.float32, device=adapter)
articulation_count = len(self.articulation_start)
joint_coord_count = len(self.joint_q)
joint_dof_count = len(self.joint_qd)
# 'close' the start index arrays with a sentinel value
self.joint_q_start.append(len(self.joint_q))
self.joint_qd_start.append(len(self.joint_qd))
self.articulation_start.append(len(self.joint_type))
# calculate total size and offsets of Jacobian and mass matrices for entire system
m.J_size = 0
m.M_size = 0
m.H_size = 0
articulation_J_start = []
articulation_M_start = []
articulation_H_start = []
articulation_M_rows = []
articulation_H_rows = []
articulation_J_rows = []
articulation_J_cols = []
articulation_dof_start = []
articulation_coord_start = []
for i in range(articulation_count):
first_joint = self.articulation_start[i]
last_joint = self.articulation_start[i+1]
first_coord = self.joint_q_start[first_joint]
last_coord = self.joint_q_start[last_joint]
first_dof = self.joint_qd_start[first_joint]
last_dof = self.joint_qd_start[last_joint]
joint_count = last_joint-first_joint
dof_count = last_dof-first_dof
coord_count = last_coord-first_coord
articulation_J_start.append(m.J_size)
articulation_M_start.append(m.M_size)
articulation_H_start.append(m.H_size)
articulation_dof_start.append(first_dof)
articulation_coord_start.append(first_coord)
# bit of data duplication here, but will leave it as such for clarity
articulation_M_rows.append(joint_count*6)
articulation_H_rows.append(dof_count)
articulation_J_rows.append(joint_count*6)
articulation_J_cols.append(dof_count)
m.J_size += 6*joint_count*dof_count
m.M_size += 6*joint_count*6*joint_count
m.H_size += dof_count*dof_count
m.articulation_joint_start = torch.tensor(self.articulation_start, dtype=torch.int32, device=adapter)
# matrix offsets for batched gemm
m.articulation_J_start = torch.tensor(articulation_J_start, dtype=torch.int32, device=adapter)
m.articulation_M_start = torch.tensor(articulation_M_start, dtype=torch.int32, device=adapter)
m.articulation_H_start = torch.tensor(articulation_H_start, dtype=torch.int32, device=adapter)
m.articulation_M_rows = torch.tensor(articulation_M_rows, dtype=torch.int32, device=adapter)
m.articulation_H_rows = torch.tensor(articulation_H_rows, dtype=torch.int32, device=adapter)
m.articulation_J_rows = torch.tensor(articulation_J_rows, dtype=torch.int32, device=adapter)
m.articulation_J_cols = torch.tensor(articulation_J_cols, dtype=torch.int32, device=adapter)
m.articulation_dof_start = torch.tensor(articulation_dof_start, dtype=torch.int32, device=adapter)
m.articulation_coord_start = torch.tensor(articulation_coord_start, dtype=torch.int32, device=adapter)
# state (initial)
m.joint_q = torch.tensor(self.joint_q, dtype=torch.float32, device=adapter)
m.joint_qd = torch.tensor(self.joint_qd, dtype=torch.float32, device=adapter)
# model
m.joint_type = torch.tensor(self.joint_type, dtype=torch.int32, device=adapter)
m.joint_parent = torch.tensor(self.joint_parent, dtype=torch.int32, device=adapter)
m.joint_X_pj = torch.tensor(transform_flatten_list(self.joint_X_pj), dtype=torch.float32, device=adapter)
m.joint_X_cm = torch.tensor(transform_flatten_list(body_X_cm), dtype=torch.float32, device=adapter)
m.joint_axis = torch.tensor(self.joint_axis, dtype=torch.float32, device=adapter)
m.joint_q_start = torch.tensor(self.joint_q_start, dtype=torch.int32, device=adapter)
m.joint_qd_start = torch.tensor(self.joint_qd_start, dtype=torch.int32, device=adapter)
# dynamics properties
m.joint_armature = torch.tensor(self.joint_armature, dtype=torch.float32, device=adapter)
m.joint_target = torch.tensor(self.joint_target, dtype=torch.float32, device=adapter)
m.joint_target_ke = torch.tensor(self.joint_target_ke, dtype=torch.float32, device=adapter)
m.joint_target_kd = torch.tensor(self.joint_target_kd, dtype=torch.float32, device=adapter)
m.joint_limit_lower = torch.tensor(self.joint_limit_lower, dtype=torch.float32, device=adapter)
m.joint_limit_upper = torch.tensor(self.joint_limit_upper, dtype=torch.float32, device=adapter)
m.joint_limit_ke = torch.tensor(self.joint_limit_ke, dtype=torch.float32, device=adapter)
m.joint_limit_kd = torch.tensor(self.joint_limit_kd, dtype=torch.float32, device=adapter)
# counts
m.particle_count = len(self.particle_q)
m.articulation_count = articulation_count
m.joint_coord_count = joint_coord_count
m.joint_dof_count = joint_dof_count
m.muscle_count = muscle_count
m.link_count = len(self.joint_type)
m.shape_count = len(self.shape_geo_type)
m.tri_count = len(self.tri_poses)
m.tet_count = len(self.tet_poses)
m.edge_count = len(self.edge_rest_angle)
m.spring_count = len(self.spring_rest_length)
m.contact_count = 0
# store refs to geometry
m.geo_meshes = self.geo_meshes
m.geo_sdfs = self.geo_sdfs
# enable ground plane
m.ground = True
m.enable_tri_collisions = False
m.gravity = torch.tensor((0.0, -9.8, 0.0), dtype=torch.float32, device=adapter)
# allocate space for mass / jacobian matrices
m.alloc_mass_matrix()
return m
| 71,060 | Python | 36.798404 | 206 | 0.562046 |
vstrozzi/FRL-SHAC-Extension/dflex/dflex/adjoint.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import os
import imp
import ast
import math
import inspect
import typing
import weakref
import numpy as np
import torch
import torch.utils.cpp_extension
import dflex.config
import copy
# Todo
#-----
#
# [ ] Unary ops (e.g.: -)
# [ ] Inplace ops (e.g.: +=, -=)
# [ ] Conditionals
# [ ] Loops (unrolled)
# [ ] Auto-gen PyTorch operator
# [ ] CUDA kernel code gen + dynamic compilation
# -----
operators = {}
functions = {}
cuda_functions = {}
kernels = {}
#----------------------
# built-in types
class float3:
def __init__(self):
x = 0.0
y = 0.0
z = 0.0
class float4:
def __init__(self):
x = 0.0
y = 0.0
z = 0.0
w = 0.0
class quat:
def __init__(self):
x = 0.0
y = 0.0
z = 0.0
w = 1.0
class mat22:
def __init__(self):
pass
class mat33:
def __init__(self):
pass
class spatial_vector:
def __init__(self):
pass
class spatial_matrix:
def __init__(self):
pass
class spatial_transform:
def __init__(self):
pass
class void:
def __init__(self):
pass
class tensor:
def __init__(self, type):
self.type = type
self.requires_grad = True
self.__name__ = "tensor<" + type.__name__ + ">"
#----------------------
# register built-in function
def builtin(key):
def insert(func):
func.key = key
func.prefix = "df::"
functions[key] = func
return func
return insert
#---------------------------------
# built-in operators +,-,*,/
@builtin("add")
class AddFunc:
@staticmethod
def value_type(args):
return args[0].type
@builtin("sub")
class SubFunc:
@staticmethod
def value_type(args):
return args[0].type
@builtin("mod")
class ModFunc:
@staticmethod
def value_type(args):
return args[0].type
@builtin("mul")
class MulFunc:
@staticmethod
def value_type(args):
# todo: encode type operator type globally
if (args[0].type == mat33 and args[1].type == float3):
return float3
if (args[0].type == spatial_matrix and args[1].type == spatial_vector):
return spatial_vector
else:
return args[0].type
@builtin("div")
class DivFunc:
@staticmethod
def value_type(args):
return args[0].type
#----------------------
# map operator nodes to builtin
operators[ast.Add] = "add"
operators[ast.Sub] = "sub"
operators[ast.Mult] = "mul"
operators[ast.Div] = "div"
operators[ast.FloorDiv] = "div"
operators[ast.Mod] = "mod"
operators[ast.Gt] = ">"
operators[ast.Lt] = "<"
operators[ast.GtE] = ">="
operators[ast.LtE] = "<="
operators[ast.Eq] = "=="
operators[ast.NotEq] = "!="
#----------------------
# built-in functions
@builtin("min")
class MinFunc:
@staticmethod
def value_type(args):
return float
@builtin("max")
class MaxFunc:
@staticmethod
def value_type(args):
return float
@builtin("leaky_max")
class LeakyMaxFunc:
@staticmethod
def value_type(args):
return float
@builtin("leaky_min")
class LeakyMinFunc:
@staticmethod
def value_type(args):
return float
@builtin("clamp")
class ClampFunc:
@staticmethod
def value_type(args):
return float
@builtin("step")
class StepFunc:
@staticmethod
def value_type(args):
return float
@builtin("nonzero")
class NonZeroFunc:
@staticmethod
def value_type(args):
return float
@builtin("sign")
class SignFunc:
@staticmethod
def value_type(args):
return float
@builtin("abs")
class AbsFunc:
@staticmethod
def value_type(args):
return float
@builtin("sin")
class SinFunc:
@staticmethod
def value_type(args):
return float
@builtin("cos")
class CosFunc:
@staticmethod
def value_type(args):
return float
@builtin("acos")
class ACosFunc:
@staticmethod
def value_type(args):
return float
@builtin("sin")
class SinFunc:
@staticmethod
def value_type(args):
return float
@builtin("cos")
class CosFunc:
@staticmethod
def value_type(args):
return float
@builtin("sqrt")
class SqrtFunc:
@staticmethod
def value_type(args):
return float
@builtin("dot")
class DotFunc:
@staticmethod
def value_type(args):
return float
@builtin("cross")
class CrossFunc:
@staticmethod
def value_type(args):
return float3
@builtin("skew")
class SkewFunc:
@staticmethod
def value_type(args):
return mat33
@builtin("length")
class LengthFunc:
@staticmethod
def value_type(args):
return float
@builtin("normalize")
class NormalizeFunc:
@staticmethod
def value_type(args):
return args[0].type
@builtin("select")
class SelectFunc:
@staticmethod
def value_type(args):
return args[1].type
@builtin("rotate")
class RotateFunc:
@staticmethod
def value_type(args):
return float3
@builtin("rotate_inv")
class RotateInvFunc:
@staticmethod
def value_type(args):
return float3
@builtin("determinant")
class DeterminantFunc:
@staticmethod
def value_type(args):
return float
@builtin("transpose")
class TransposeFunc:
@staticmethod
def value_type(args):
return args[0].type
@builtin("load")
class LoadFunc:
@staticmethod
def value_type(args):
if (type(args[0].type) != tensor):
raise Exception("Load input 0 must be a tensor")
if (args[1].type != int):
raise Exception("Load input 1 must be a int")
return args[0].type.type
@builtin("store")
class StoreFunc:
@staticmethod
def value_type(args):
if (type(args[0].type) != tensor):
raise Exception("Store input 0 must be a tensor")
if (args[1].type != int):
raise Exception("Store input 1 must be a int")
if (args[2].type != args[0].type.type):
raise Exception("Store input 2 must be of the same type as the tensor")
return None
@builtin("atomic_add")
class AtomicAddFunc:
@staticmethod
def value_type(args):
return None
@builtin("atomic_sub")
class AtomicSubFunc:
@staticmethod
def value_type(args):
return None
@builtin("tid")
class ThreadIdFunc:
@staticmethod
def value_type(args):
return int
# type construtors
@builtin("float")
class floatFunc:
@staticmethod
def value_type(args):
return float
@builtin("int")
class IntFunc:
@staticmethod
def value_type(args):
return int
@builtin("float3")
class Float3Func:
@staticmethod
def value_type(args):
return float3
@builtin("quat")
class QuatFunc:
@staticmethod
def value_type(args):
return quat
@builtin("quat_identity")
class QuatIdentityFunc:
@staticmethod
def value_type(args):
return quat
@builtin("quat_from_axis_angle")
class QuatAxisAngleFunc:
@staticmethod
def value_type(args):
return quat
@builtin("mat22")
class Mat22Func:
@staticmethod
def value_type(args):
return mat22
@builtin("mat33")
class Mat33Func:
@staticmethod
def value_type(args):
return mat33
@builtin("spatial_vector")
class SpatialVectorFunc:
@staticmethod
def value_type(args):
return spatial_vector
# built-in spatial operators
@builtin("spatial_transform")
class TransformFunc:
@staticmethod
def value_type(args):
return spatial_transform
@builtin("spatial_transform_identity")
class TransformIdentity:
@staticmethod
def value_type(args):
return spatial_transform
@builtin("inverse")
class Inverse:
@staticmethod
def value_type(args):
return quat
# @builtin("spatial_transform_inverse")
# class TransformInverse:
# @staticmethod
# def value_type(args):
# return spatial_transform
@builtin("spatial_transform_get_translation")
class TransformGetTranslation:
@staticmethod
def value_type(args):
return float3
@builtin("spatial_transform_get_rotation")
class TransformGetRotation:
@staticmethod
def value_type(args):
return quat
@builtin("spatial_transform_multiply")
class TransformMulFunc:
@staticmethod
def value_type(args):
return spatial_transform
# @builtin("spatial_transform_inertia")
# class TransformInertiaFunc:
# @staticmethod
# def value_type(args):
# return spatial_matrix
@builtin("spatial_adjoint")
class SpatialAdjoint:
@staticmethod
def value_type(args):
return spatial_matrix
@builtin("spatial_dot")
class SpatialDotFunc:
@staticmethod
def value_type(args):
return float
@builtin("spatial_cross")
class SpatialDotFunc:
@staticmethod
def value_type(args):
return spatial_vector
@builtin("spatial_cross_dual")
class SpatialDotFunc:
@staticmethod
def value_type(args):
return spatial_vector
@builtin("spatial_transform_point")
class SpatialTransformPointFunc:
@staticmethod
def value_type(args):
return float3
@builtin("spatial_transform_vector")
class SpatialTransformVectorFunc:
@staticmethod
def value_type(args):
return float3
@builtin("spatial_top")
class SpatialTopFunc:
@staticmethod
def value_type(args):
return float3
@builtin("spatial_bottom")
class SpatialBottomFunc:
@staticmethod
def value_type(args):
return float3
@builtin("spatial_jacobian")
class SpatialJacobian:
@staticmethod
def value_type(args):
return None
@builtin("spatial_mass")
class SpatialMass:
@staticmethod
def value_type(args):
return None
@builtin("dense_gemm")
class DenseGemm:
@staticmethod
def value_type(args):
return None
@builtin("dense_gemm_batched")
class DenseGemmBatched:
@staticmethod
def value_type(args):
return None
@builtin("dense_chol")
class DenseChol:
@staticmethod
def value_type(args):
return None
@builtin("dense_chol_batched")
class DenseCholBatched:
@staticmethod
def value_type(args):
return None
@builtin("dense_subs")
class DenseSubs:
@staticmethod
def value_type(args):
return None
@builtin("dense_solve")
class DenseSolve:
@staticmethod
def value_type(args):
return None
@builtin("dense_solve_batched")
class DenseSolve:
@staticmethod
def value_type(args):
return None
# helpers
@builtin("index")
class IndexFunc:
@staticmethod
def value_type(args):
return float
@builtin("print")
class PrintFunc:
@staticmethod
def value_type(args):
return None
class Var:
def __init__(adj, label, type, requires_grad=False, constant=None):
adj.label = label
adj.type = type
adj.requires_grad = requires_grad
adj.constant = constant
def __str__(adj):
return adj.label
def ctype(self):
if (isinstance(self.type, tensor)):
if self.type.type == float3:
return str("df::" + self.type.type.__name__) + "*"
return str(self.type.type.__name__) + "*"
elif self.type == float3:
return "df::" + str(self.type.__name__)
else:
return str(self.type.__name__)
#--------------------
# Storage class for partial AST up to a return statement.
class Stmt:
def __init__(self, cond, forward, forward_replay, reverse, ret_forward, ret_line):
self.cond = cond # condition, can be None
self.forward = forward # all forward code outside of conditional branch *since last return*
self.forward_replay = forward_replay
self.reverse = reverse # all reverse code including the reverse of any code in ret_forward
self.ret_forward = ret_forward # all forward commands in the return statement except the actual return statement
self.ret_line = ret_line # actual return statement
#------------------------------------------------------------------------
# Source code transformer, this class takes a Python function and
# computes its adjoint using single-pass translation of the function's AST
class Adjoint:
def __init__(adj, func, device='cpu'):
adj.func = func
adj.device = device
adj.symbols = {} # map from symbols to adjoint variables
adj.variables = [] # list of local variables (in order)
adj.args = [] # list of function arguments (in order)
adj.cond = None # condition variable if in branch
adj.return_var = None # return type for function or kernel
# build AST from function object
adj.source = inspect.getsource(func)
adj.tree = ast.parse(adj.source)
# parse argument types
arg_types = typing.get_type_hints(func)
# add variables and symbol map for each argument
for name, t in arg_types.items():
adj.symbols[name] = Var(name, t, False)
# build ordered list of args
for a in adj.tree.body[0].args.args:
adj.args.append(adj.symbols[a.arg])
# primal statements (allows different statements in replay)
adj.body_forward = []
adj.body_forward_replay = []
adj.body_reverse = []
adj.output = []
adj.indent_count = 0
adj.label_count = 0
# recursively evaluate function body
adj.eval(adj.tree.body[0])
# code generation methods
def format_template(adj, template, input_vars, output_var):
# output var is always the 0th index
args = [output_var] + input_vars
s = template.format(*args)
return s
# generates a comma separated list of args
def format_args(adj, prefix, indices):
args = ""
sep = ""
for i in indices:
args += sep + prefix + str(i)
sep = ", "
return args
def add_var(adj, type=None, constant=None):
index = len(adj.variables)
v = Var(str(index), type=type, constant=constant)
adj.variables.append(v)
return v
def add_constant(adj, n):
output = adj.add_var(type=type(n), constant=n)
#adj.add_forward("var_{} = {};".format(output, n))
return output
def add_load(adj, input):
output = adj.add_var(input.type)
adj.add_forward("var_{} = {};".format(output, input))
adj.add_reverse("adj_{} += adj_{};".format(input, output))
return output
def add_operator(adj, op, inputs):
# todo: just using first input as the output type, would need some
# type inference here to support things like float3 = float*float3
output = adj.add_var(inputs[0].type)
transformer = operators[op.__class__]
for t in transformer.forward():
adj.add_forward(adj.format_template(t, inputs, output))
for t in transformer.reverse():
adj.add_reverse(adj.format_template(t, inputs, output))
return output
def add_comp(adj, op_strings, left, comps):
output = adj.add_var(bool)
s = "var_" + str(output) + " = " + ("(" * len(comps)) + "var_" + str(left) + " "
for op, comp in zip(op_strings, comps):
s += op + " var_" + str(comp) + ") "
s = s.rstrip() + ";"
adj.add_forward(s)
return output
def add_bool_op(adj, op_string, exprs):
output = adj.add_var(bool)
command = "var_" + str(output) + " = " + (" " + op_string + " ").join(["var_" + str(expr) for expr in exprs]) + ";"
adj.add_forward(command)
return output
def add_call(adj, func, inputs, prefix='df::'):
# expression (zero output), e.g.: tid()
if (func.value_type(inputs) == None):
forward_call = prefix + "{}({});".format(func.key, adj.format_args("var_", inputs))
adj.add_forward(forward_call)
if (len(inputs)):
reverse_call = prefix + "{}({}, {});".format("adj_" + func.key, adj.format_args("var_", inputs), adj.format_args("adj_", inputs))
adj.add_reverse(reverse_call)
return None
# function (one output)
else:
output = adj.add_var(func.value_type(inputs))
forward_call = "var_{} = ".format(output) + prefix + "{}({});".format(func.key, adj.format_args("var_", inputs))
adj.add_forward(forward_call)
if (len(inputs)):
reverse_call = prefix + "{}({}, {}, {});".format(
"adj_" + func.key, adj.format_args("var_", inputs), adj.format_args("adj_", inputs), adj.format_args("adj_", [output]))
adj.add_reverse(reverse_call)
return output
def add_return(adj, var):
if (var == None):
adj.add_forward("return;".format(var), "goto label{};".format(adj.label_count))
else:
adj.add_forward("return var_{};".format(var), "goto label{};".format(adj.label_count))
adj.add_reverse("adj_" + str(var) + " += adj_ret;")
adj.add_reverse("label{}:;".format(adj.label_count))
adj.label_count += 1
# define an if statement
def begin_if(adj, cond):
adj.add_forward("if (var_{}) {{".format(cond))
adj.add_reverse("}")
adj.indent_count += 1
def end_if(adj, cond):
adj.indent_count -= 1
adj.add_forward("}")
adj.add_reverse("if (var_{}) {{".format(cond))
# define a for-loop
def begin_for(adj, iter, start, end):
# note that dynamic for-loops must not mutate any previous state, so we don't need to re-run them in the reverse pass
adj.add_forward("for (var_{0}=var_{1}; var_{0} < var_{2}; ++var_{0}) {{".format(iter, start, end), "if (false) {")
adj.add_reverse("}")
adj.indent_count += 1
def end_for(adj, iter, start, end):
adj.indent_count -= 1
adj.add_forward("}")
adj.add_reverse("for (var_{0}=var_{2}-1; var_{0} >= var_{1}; --var_{0}) {{".format(iter, start, end))
# append a statement to the forward pass
def add_forward(adj, statement, statement_replay=None):
prefix = ""
for i in range(adj.indent_count):
prefix += "\t"
adj.body_forward.append(prefix + statement)
# allow for different statement in reverse kernel replay
if (statement_replay):
adj.body_forward_replay.append(prefix + statement_replay)
else:
adj.body_forward_replay.append(prefix + statement)
# append a statement to the reverse pass
def add_reverse(adj, statement):
prefix = ""
for i in range(adj.indent_count):
prefix += "\t"
adj.body_reverse.append(prefix + statement)
def eval(adj, node):
try:
if (isinstance(node, ast.FunctionDef)):
out = None
for f in node.body:
out = adj.eval(f)
if 'return' in adj.symbols and adj.symbols['return'] is not None:
out = adj.symbols['return']
stmt = Stmt(None, adj.body_forward, adj.body_forward_replay, reversed(adj.body_reverse), [], "")
adj.output.append(stmt)
else:
stmt = Stmt(None, adj.body_forward, adj.body_forward_replay, reversed(adj.body_reverse), [], "")
adj.output.append(stmt)
return out
elif (isinstance(node, ast.If)): # if statement
if len(node.orelse) != 0:
raise SyntaxError("Else statements not currently supported")
if len(node.body) == 0:
return None
# save symbol map
symbols_prev = adj.symbols.copy()
# eval condition
cond = adj.eval(node.test)
# eval body
adj.begin_if(cond)
for stmt in node.body:
adj.eval(stmt)
adj.end_if(cond)
# detect symbols with conflicting definitions (assigned inside the branch)
for items in symbols_prev.items():
sym = items[0]
var1 = items[1]
var2 = adj.symbols[sym]
if var1 != var2:
# insert a phi function that
# selects var1, var2 based on cond
out = adj.add_call(functions["select"], [cond, var1, var2])
adj.symbols[sym] = out
return None
elif (isinstance(node, ast.Compare)):
# node.left, node.ops (list of ops), node.comparators (things to compare to)
# e.g. (left ops[0] node.comparators[0]) ops[1] node.comparators[1]
left = adj.eval(node.left)
comps = [adj.eval(comp) for comp in node.comparators]
op_strings = [operators[type(op)] for op in node.ops]
out = adj.add_comp(op_strings, left, comps)
return out
elif (isinstance(node, ast.BoolOp)):
# op, expr list values (e.g. and and a list of things anded together)
op = node.op
if isinstance(op, ast.And):
func = "&&"
elif isinstance(op, ast.Or):
func = "||"
else:
raise KeyError("Op {} is not supported".format(op))
out = adj.add_bool_op(func, [adj.eval(expr) for expr in node.values])
# import pdb
# pdb.set_trace()
return out
elif (isinstance(node, ast.Name)):
# lookup symbol, if it has already been assigned to a variable then return the existing mapping
if (node.id in adj.symbols):
return adj.symbols[node.id]
else:
raise KeyError("Referencing undefined symbol: " + str(node.id))
elif (isinstance(node, ast.Num)):
# lookup constant, if it has already been assigned then return existing var
# currently disabled, since assigning constant in a branch means it
key = (node.n, type(node.n))
if (key in adj.symbols):
return adj.symbols[key]
else:
out = adj.add_constant(node.n)
adj.symbols[key] = out
return out
#out = adj.add_constant(node.n)
#return out
elif (isinstance(node, ast.BinOp)):
# evaluate binary operator arguments
left = adj.eval(node.left)
right = adj.eval(node.right)
name = operators[type(node.op)]
func = functions[name]
out = adj.add_call(func, [left, right])
return out
elif (isinstance(node, ast.UnaryOp)):
# evaluate unary op arguments
arg = adj.eval(node.operand)
out = adj.add_operator(node.op, [arg])
return out
elif (isinstance(node, ast.For)):
if (len(node.iter.args) != 2):
raise Exception("For loop ranges must be of form range(start, end) with both start and end specified and no skip specifier.")
# check if loop range is compile time constant
unroll = True
for a in node.iter.args:
if (isinstance(a, ast.Num) == False):
unroll = False
break
if (unroll):
# constant loop, unroll
start = node.iter.args[0].n
end = node.iter.args[1].n
for i in range(start, end):
var_iter = adj.add_constant(i)
adj.symbols[node.target.id] = var_iter
# eval body
for s in node.body:
adj.eval(s)
else:
# dynamic loop, body must be side-effect free, i.e.: not
# overwrite memory locations used by previous operations
start = adj.eval(node.iter.args[0])
end = adj.eval(node.iter.args[1])
# add iterator variable
iter = adj.add_var(int)
adj.symbols[node.target.id] = iter
adj.begin_for(iter, start, end)
# eval body
for s in node.body:
adj.eval(s)
adj.end_for(iter, start, end)
elif (isinstance(node, ast.Expr)):
return adj.eval(node.value)
elif (isinstance(node, ast.Call)):
name = None
# determine if call is to a builtin (attribute), or to a user-func (name)
if (isinstance(node.func, ast.Attribute)):
name = node.func.attr
elif (isinstance(node.func, ast.Name)):
name = node.func.id
# check it exists
if name not in functions:
raise KeyError("Could not find function {}".format(name))
if adj.device == 'cuda' and name in cuda_functions:
func = cuda_functions[name]
else:
func = functions[name]
args = []
# eval all arguments
for arg in node.args:
var = adj.eval(arg)
args.append(var)
# add var with value type from the function
out = adj.add_call(func, args, prefix=func.prefix)
return out
elif (isinstance(node, ast.Subscript)):
target = adj.eval(node.value)
indices = []
if isinstance(node.slice, ast.Tuple):
# handles the M[i, j] case
for arg in node.slice.elts:
var = adj.eval(arg)
indices.append(var)
else:
# simple expression
var = adj.eval(node.slice)
indices.append(var)
out = adj.add_call(functions["index"], [target, *indices])
return out
elif (isinstance(node, ast.Assign)):
# if adj.cond is not None:
# raise SyntaxError("error, cannot assign variables in a conditional branch")
# evaluate rhs
out = adj.eval(node.value)
# update symbol map (assumes lhs is a Name node)
adj.symbols[node.targets[0].id] = out
return out
elif (isinstance(node, ast.Return)):
cond = adj.cond # None if not in branch, else branch boolean
out = adj.eval(node.value)
adj.symbols['return'] = out
if out is not None: # set return type of function
return_var = out
if adj.return_var is not None and adj.return_var.ctype() != return_var.ctype():
raise TypeError("error, function returned different types")
adj.return_var = return_var
adj.add_return(out)
return out
elif node is None:
return None
else:
print("[WARNING] ast node of type {} not supported".format(type(node)))
except Exception as e:
# print error / line number
lines = adj.source.splitlines()
print("Error: {} while transforming node {} in func: {} at line: {} col: {}: \n {}".format(e, type(node), adj.func.__name__, node.lineno, node.col_offset, lines[max(node.lineno-1, 0)]))
raise
#----------------
# code generation
cpu_module_header = '''
#define CPU
#include "adjoint.h"
using namespace df;
template <typename T>
T cast(torch::Tensor t)
{{
return (T)(t.data_ptr());
}}
'''
cuda_module_header = '''
#define CUDA
#include "adjoint.h"
using namespace df;
template <typename T>
T cast(torch::Tensor t)
{{
return (T)(t.data_ptr());
}}
'''
cpu_function_template = '''
{return_type} {name}_cpu_func({forward_args})
{{
{forward_body}
}}
void adj_{name}_cpu_func({forward_args}, {reverse_args})
{{
{reverse_body}
}}
'''
cuda_function_template = '''
CUDA_CALLABLE {return_type} {name}_cuda_func({forward_args})
{{
{forward_body}
}}
CUDA_CALLABLE void adj_{name}_cuda_func({forward_args}, {reverse_args})
{{
{reverse_body}
}}
'''
cuda_kernel_template = '''
__global__ void {name}_cuda_kernel_forward(int dim, {forward_args})
{{
{forward_body}
}}
__global__ void {name}_cuda_kernel_backward(int dim, {forward_args}, {reverse_args})
{{
{reverse_body}
}}
'''
cpu_kernel_template = '''
void {name}_cpu_kernel_forward({forward_args})
{{
{forward_body}
}}
void {name}_cpu_kernel_backward({forward_args}, {reverse_args})
{{
{reverse_body}
}}
'''
cuda_module_template = '''
// Python entry points
void {name}_cuda_forward(int dim, {forward_args})
{{
{name}_cuda_kernel_forward<<<(dim + 256 - 1) / 256, 256>>>(dim, {forward_params});
//check_cuda(cudaPeekAtLastError());
//check_cuda(cudaDeviceSynchronize());
}}
void {name}_cuda_backward(int dim, {forward_args}, {reverse_args})
{{
{name}_cuda_kernel_backward<<<(dim + 256 - 1) / 256, 256>>>(dim, {forward_params}, {reverse_params});
//check_cuda(cudaPeekAtLastError());
//check_cuda(cudaDeviceSynchronize());
}}
'''
cpu_module_template = '''
// Python entry points
void {name}_cpu_forward(int dim, {forward_args})
{{
for (int i=0; i < dim; ++i)
{{
s_threadIdx = i;
{name}_cpu_kernel_forward({forward_params});
}}
}}
void {name}_cpu_backward(int dim, {forward_args}, {reverse_args})
{{
for (int i=0; i < dim; ++i)
{{
s_threadIdx = i;
{name}_cpu_kernel_backward({forward_params}, {reverse_params});
}}
}}
'''
cuda_module_header_template = '''
// Python entry points
void {name}_cuda_forward(int dim, {forward_args});
void {name}_cuda_backward(int dim, {forward_args}, {reverse_args});
'''
cpu_module_header_template = '''
// Python entry points
void {name}_cpu_forward(int dim, {forward_args});
void {name}_cpu_backward(int dim, {forward_args}, {reverse_args});
'''
def indent(args, stops=1):
sep = "\n"
for i in range(stops):
sep += "\t"
return sep + args.replace(", ", "," + sep)
def codegen_func_forward_body(adj, device='cpu', indent=4):
body = []
indent_block = " " * indent
for stmt in adj.output:
for f in stmt.forward:
body += [f + "\n"]
if stmt.cond is not None:
body += ["if (" + str(stmt.cond) + ") {\n"]
for l in stmt.ret_forward:
body += [indent_block + l + "\n"]
body += [indent_block + stmt.ret_line + "\n"]
body += ["}\n"]
else:
for l in stmt.ret_forward:
body += [l + "\n"]
body += [stmt.ret_line + "\n"]
break # break once unconditional return is encountered
return "".join([indent_block + l for l in body])
def codegen_func_forward(adj, func_type='kernel', device='cpu'):
s = ""
# primal vars
s += " //---------\n"
s += " // primal vars\n"
for var in adj.variables:
if var.constant == None:
s += " " + var.ctype() + " var_" + str(var.label) + ";\n"
else:
s += " const " + var.ctype() + " var_" + str(var.label) + " = " + str(var.constant) + ";\n"
# forward pass
s += " //---------\n"
s += " // forward\n"
if device == 'cpu':
s += codegen_func_forward_body(adj, device=device, indent=4)
elif device == 'cuda':
if func_type == 'kernel':
s += " int var_idx = blockDim.x * blockIdx.x + threadIdx.x;\n"
s += " if (var_idx < dim) {\n"
s += codegen_func_forward_body(adj, device=device, indent=8)
s += " }\n"
else:
s += codegen_func_forward_body(adj, device=device, indent=4)
return s
def codegen_func_reverse_body(adj, device='cpu', indent=4):
body = []
indent_block = " " * indent
for stmt in adj.output:
# forward pass
body += ["//---------\n"]
body += ["// forward\n"]
for f in stmt.forward_replay:
body += [f + "\n"]
if stmt.cond is not None:
body += ["if (" + str(stmt.cond) + ") {\n"]
for l in stmt.ret_forward:
body += [indent_block + l + "\n"]
# reverse pass
body += [indent_block + "//---------\n"]
body += [indent_block + "// reverse\n"]
for l in stmt.reverse:
body += [indent_block + l + "\n"]
body += [indent_block + "return;\n"]
body += ["}\n"]
else:
for l in stmt.ret_forward:
body += [l + "\n"]
# reverse pass
body += ["//---------\n"]
body += ["// reverse\n"]
for l in stmt.reverse:
body += [l + "\n"]
body += ["return;\n"]
break # break once unconditional return is encountered
return "".join([indent_block + l for l in body])
def codegen_func_reverse(adj, func_type='kernel', device='cpu'):
s = ""
# primal vars
s += " //---------\n"
s += " // primal vars\n"
for var in adj.variables:
if var.constant == None:
s += " " + var.ctype() + " var_" + str(var.label) + ";\n"
else:
s += " const " + var.ctype() + " var_" + str(var.label) + " = " + str(var.constant) + ";\n"
# dual vars
s += " //---------\n"
s += " // dual vars\n"
for var in adj.variables:
s += " " + var.ctype() + " adj_" + str(var.label) + " = 0;\n"
if device == 'cpu':
s += codegen_func_reverse_body(adj, device=device, indent=4)
elif device == 'cuda':
if func_type == 'kernel':
s += " int var_idx = blockDim.x * blockIdx.x + threadIdx.x;\n"
s += " if (var_idx < dim) {\n"
s += codegen_func_reverse_body(adj, device=device, indent=8)
s += " }\n"
else:
s += codegen_func_reverse_body(adj, device=device, indent=4)
else:
raise ValueError("Device {} not supported for codegen".format(device))
return s
def codegen_func(adj, device='cpu'):
# forward header
# return_type = "void"
return_type = 'void' if adj.return_var is None else adj.return_var.ctype()
# s = "{} {}_forward(".format(return_type, adj.func.__name__)
# sep = ""
# for arg in adj.args:
# if (arg.label != 'return'):
# s += sep + str(arg.type.__name__) + " var_" + arg.label
# sep = ", "
# reverse header
# s = "void {}_reverse(".format(adj.func.__name__)
# return s
forward_args = ""
reverse_args = ""
# s = ""
# forward args
sep = ""
for arg in adj.args:
forward_args += sep + arg.ctype() + " var_" + arg.label
sep = ", "
# reverse args
sep = ""
for arg in adj.args:
if "*" in arg.ctype():
reverse_args += sep + arg.ctype() + " adj_" + arg.label
else:
reverse_args += sep + arg.ctype() + " & adj_" + arg.label
sep = ", "
reverse_args += sep + return_type + " & adj_ret"
# reverse args
# add primal version of parameters
# sep = ""
# for var in adj.args:
# if (var.label != 'return'):
# s += sep + var.ctype() + " var_" + var.label
# sep = ", "
# # add adjoint version of parameters
# for var in adj.args:
# if (var.label != 'return'):
# s += sep + var.ctype() + "& adj_" + var.label
# sep = ", "
# # add adjoint of output
# if ('return' in adj.symbols and adj.symbols['return'] != None):
# s += sep + str(adj.symbols['return'].type.__name__) + " adj_" + str(adj.symbols['return'])
# codegen body
forward_body = codegen_func_forward(adj, func_type='function', device=device)
reverse_body = codegen_func_reverse(adj, func_type='function', device=device)
if device == 'cpu':
template = cpu_function_template
elif device == 'cuda':
template = cuda_function_template
else:
raise ValueError("Device {} is not supported".format(device))
s = template.format(name=adj.func.__name__,
return_type=return_type,
forward_args=indent(forward_args),
reverse_args=indent(reverse_args),
forward_body=forward_body,
reverse_body=reverse_body)
return s
def codegen_kernel(adj, device='cpu'):
forward_args = ""
reverse_args = ""
# forward args
sep = ""
for arg in adj.args:
forward_args += sep + arg.ctype() + " var_" + arg.label
sep = ", "
# reverse args
sep = ""
for arg in adj.args:
reverse_args += sep + arg.ctype() + " adj_" + arg.label
sep = ", "
# codegen body
forward_body = codegen_func_forward(adj, func_type='kernel', device=device)
reverse_body = codegen_func_reverse(adj, func_type='kernel', device=device)
# import pdb
# pdb.set_trace()
if device == 'cpu':
template = cpu_kernel_template
elif device == 'cuda':
template = cuda_kernel_template
else:
raise ValueError("Device {} is not supported".format(device))
s = template.format(name=adj.func.__name__,
forward_args=indent(forward_args),
reverse_args=indent(reverse_args),
forward_body=forward_body,
reverse_body=reverse_body)
return s
def codegen_module(adj, device='cpu'):
forward_args = ""
reverse_args = ""
forward_params = ""
reverse_params = ""
sep = ""
for arg in adj.args:
if (isinstance(arg.type, tensor)):
forward_args += sep + "torch::Tensor var_" + arg.label
forward_params += sep + "cast<" + arg.ctype() + ">(var_" + arg.label + ")"
else:
forward_args += sep + arg.ctype() + " var_" + arg.label
forward_params += sep + "var_" + arg.label
sep = ", "
sep = ""
for arg in adj.args:
if (isinstance(arg.type, tensor)):
reverse_args += sep + "torch::Tensor adj_" + arg.label
reverse_params += sep + "cast<" + arg.ctype() + ">(adj_" + arg.label + ")"
else:
reverse_args += sep + arg.ctype() + " adj_" + arg.label
reverse_params += sep + "adj_" + arg.label
sep = ", "
if device == 'cpu':
template = cpu_module_template
elif device == 'cuda':
template = cuda_module_template
else:
raise ValueError("Device {} is not supported".format(device))
s = template.format(name=adj.func.__name__,
forward_args=indent(forward_args),
reverse_args=indent(reverse_args),
forward_params=indent(forward_params, 3),
reverse_params=indent(reverse_params, 3))
return s
def codegen_module_decl(adj, device='cpu'):
forward_args = ""
reverse_args = ""
forward_params = ""
reverse_params = ""
sep = ""
for arg in adj.args:
if (isinstance(arg.type, tensor)):
forward_args += sep + "torch::Tensor var_" + arg.label
forward_params += sep + "cast<" + arg.ctype() + ">(var_" + arg.label + ")"
else:
forward_args += sep + arg.ctype() + " var_" + arg.label
forward_params += sep + "var_" + arg.label
sep = ", "
sep = ""
for arg in adj.args:
if (isinstance(arg.type, tensor)):
reverse_args += sep + "torch::Tensor adj_" + arg.label
reverse_params += sep + "cast<" + arg.ctype() + ">(adj_" + arg.label + ")"
else:
reverse_args += sep + arg.ctype() + " adj_" + arg.label
reverse_params += sep + "adj_" + arg.label
sep = ", "
if device == 'cpu':
template = cpu_module_header_template
elif device == 'cuda':
template = cuda_module_header_template
else:
raise ValueError("Device {} is not supported".format(device))
s = template.format(name=adj.func.__name__, forward_args=indent(forward_args), reverse_args=indent(reverse_args))
return s
# runs vcvars and copies back the build environment, PyTorch should really be doing this
def set_build_env():
if os.name == 'nt':
# VS2019 (required for PyTorch headers)
vcvars_path = "C:\\Program Files (x86)\\Microsoft Visual Studio\\2019\\Community\\VC\\Auxiliary\Build\\vcvars64.bat"
s = '"{}" && set'.format(vcvars_path)
output = os.popen(s).read()
for line in output.splitlines():
pair = line.split("=", 1)
if (len(pair) >= 2):
os.environ[pair[0]] = pair[1]
else: # nothing needed for Linux or Mac
pass
def import_module(module_name, path):
# https://stackoverflow.com/questions/67631/how-to-import-a-module-given-the-full-path
file, path, description = imp.find_module(module_name, [path])
# Close the .so file after load.
with file:
return imp.load_module(module_name, file, path, description)
def rename(name, return_type):
def func(cls):
cls.__name__ = name
cls.key = name
cls.prefix = ""
cls.return_type = return_type
return cls
return func
user_funcs = {}
user_kernels = {}
def func(f):
user_funcs[f.__name__] = f
# adj = Adjoint(f)
# print(adj.codegen_forward())
# print(adj.codegen_reverse())
# set_build_env()
# include_path = os.path.dirname(os.path.realpath(__file__))
# # requires PyTorch hotfix https://github.com/pytorch/pytorch/pull/33002
# test_cuda = torch.utils.cpp_extension.load_inline('test_cuda', [cpp_template], None, ["test_forward_1", "test_backward_1"], extra_include_paths=include_path, verbose=True)
# help(test_cuda)
def kernel(f):
# stores source and compiled entry points for a kernel (will be populated after module loads)
class Kernel:
def __init__(self, f):
self.func = f
def register(self, module):
# lookup entry points based on name
self.forward_cpu = eval("module." + self.func.__name__ + "_cpu_forward")
self.backward_cpu = eval("module." + self.func.__name__ + "_cpu_backward")
if (torch.cuda.is_available()):
self.forward_cuda = eval("module." + self.func.__name__ + "_cuda_forward")
self.backward_cuda = eval("module." + self.func.__name__ + "_cuda_backward")
k = Kernel(f)
# register globally
user_kernels[f.__name__] = k
return k
def compile():
use_cuda = torch.cuda.is_available()
if not use_cuda:
print("[INFO] CUDA support not found. Disabling CUDA kernel compilation.")
cpp_source = ""
cuda_source = ""
cpp_source += cpu_module_header
cuda_source += cuda_module_header
# kernels
entry_points = []
# functions
for name, func in user_funcs.items():
adj = Adjoint(func, device='cpu')
cpp_source += codegen_func(adj, device='cpu')
adj = Adjoint(func, device='cuda')
cuda_source += codegen_func(adj, device='cuda')
# import pdb
# pdb.set_trace()
import copy
@rename(func.__name__ + "_cpu_func", adj.return_var.type)
class Func:
@classmethod
def value_type(cls, *args):
return cls.return_type
functions[func.__name__] = Func
@rename(func.__name__ + "_cuda_func", adj.return_var.type)
class CUDAFunc:
@classmethod
def value_type(cls, *args):
return cls.return_type
cuda_functions[func.__name__] = CUDAFunc
for name, kernel in user_kernels.items():
if use_cuda:
# each kernel gets an entry point in the module
entry_points.append(name + "_cuda_forward")
entry_points.append(name + "_cuda_backward")
# each kernel gets an entry point in the module
entry_points.append(name + "_cpu_forward")
entry_points.append(name + "_cpu_backward")
if use_cuda:
adj = Adjoint(kernel.func, device='cuda')
cuda_source += codegen_kernel(adj, device='cuda')
cuda_source += codegen_module(adj, device='cuda')
cpp_source += codegen_module_decl(adj, device='cuda')
adj = Adjoint(kernel.func, device='cpu')
cpp_source += codegen_kernel(adj, device='cpu')
cpp_source += codegen_module(adj, device='cpu')
cpp_source += codegen_module_decl(adj, device='cpu')
include_path = os.path.dirname(os.path.realpath(__file__))
build_path = os.path.dirname(os.path.realpath(__file__)) + "/kernels"
cache_file = build_path + "/adjoint.gen"
if (os.path.exists(build_path) == False):
os.mkdir(build_path)
# test cache
if (os.path.exists(cache_file)):
f = open(cache_file, 'r')
cache_string = f.read()
f.close()
if (cache_string == cpp_source):
print("Using cached kernels")
module = import_module("kernels", build_path)
# register kernel methods
for k in user_kernels.values():
k.register(module)
return module
# print("ignoring rebuild, using stale kernels")
# module = import_module("kernels", build_path)
# return module
# cache stale, rebuild
print("Rebuilding kernels")
set_build_env()
# debug config
#module = torch.utils.cpp_extension.load_inline('kernels', [cpp_source], None, entry_points, extra_cflags=["/Zi", "/Od"], extra_ldflags=["/DEBUG"], build_directory=build_path, extra_include_paths=[include_path], verbose=True)
if os.name == 'nt':
cpp_flags = ["/Ox", "-DNDEBUG", "/fp:fast"]
ld_flags = ["-DNDEBUG"]
# cpp_flags = ["/Zi", "/Od", "/DEBUG"]
# ld_flags = ["/DEBUG"]
else:
cpp_flags = ["-Z", "-O2", "-DNDEBUG"]
ld_flags = ["-DNDEBUG"]
# just use minimum to ensure compatability
cuda_flags = ['-gencode=arch=compute_61,code=compute_61']
# release config
if use_cuda:
module = torch.utils.cpp_extension.load_inline('kernels',
cpp_sources=[cpp_source],
cuda_sources=[cuda_source],
functions=entry_points,
extra_cflags=cpp_flags,
extra_ldflags=ld_flags,
extra_cuda_cflags=cuda_flags,
build_directory=build_path,
extra_include_paths=[include_path],
verbose=True,
with_pytorch_error_handling=False)
else:
module = torch.utils.cpp_extension.load_inline('kernels',
cpp_sources=[cpp_source],
cuda_sources=[],
functions=entry_points,
extra_cflags=cpp_flags,
extra_ldflags=ld_flags,
extra_cuda_cflags=cuda_flags,
build_directory=build_path,
extra_include_paths=[include_path],
verbose=True,
with_pytorch_error_handling=False)
# update cache
f = open(cache_file, 'w')
f.write(cpp_source)
f.close()
# register kernel methods
for k in user_kernels.values():
k.register(module)
return module
#---------------------------------------------
# Helper functions for launching kernels as Torch ops
def check_adapter(l, a):
for t in l:
if torch.is_tensor(t):
assert(t.device.type == a)
def check_finite(l):
for t in l:
if torch.is_tensor(t):
assert(t.is_contiguous())
if (torch.isnan(t).any() == True):
print(t)
assert(torch.isnan(t).any() == False)
else:
assert(math.isnan(t) == False)
def filter_grads(grads):
"""helper that takes a list of gradient tensors and makes non-outputs None
as required by PyTorch when returning from a custom op
"""
outputs = []
for g in grads:
if torch.is_tensor(g) and len(g) > 0:
outputs.append(g)
else:
outputs.append(None)
return tuple(outputs)
def make_empty(outputs, device):
empty = []
for o in outputs:
empty.append(torch.FloatTensor().to(device))
return empty
def make_contiguous(grads):
ret = []
for g in grads:
ret.append(g.contiguous())
return ret
def copy_params(params):
out = []
for p in params:
if torch.is_tensor(p):
c = p.clone()
if c.dtype == torch.float32:
c.requires_grad_()
out.append(c)
else:
out.append(p)
return out
def assert_device(device, inputs):
"""helper that asserts that all Tensors in inputs reside on the specified
device (device should be cpu or cuda). Also checks that dtypes are correct.
"""
for arg in inputs:
if isinstance(arg, torch.Tensor):
if (arg.dtype == torch.float64) or (arg.dtype == torch.float16):
raise TypeError("Tensor {arg} has invalid dtype {dtype}".format(arg=arg, dtype=arg.dtype))
if device == 'cpu':
if arg.is_cuda: # make sure all tensors are on the right device. Can fail silently in the CUDA kernel.
raise TypeError("Tensor {arg} is using CUDA but was expected to be on the CPU.".format(arg=arg))
elif torch.device(device).type == 'cuda': #elif device.startswith('cuda'):
if not arg.is_cuda:
raise TypeError("Tensor {arg} is not on a CUDA device but was expected to be using CUDA.".format(arg=arg))
else:
raise ValueError("Device {} is not supported".format(device))
def to_weak_list(s):
w = []
for o in s:
w.append(weakref.ref(o))
return w
def to_strong_list(w):
s = []
for o in w:
s.append(o())
return s
# standalone method to launch a kernel using PyTorch graph (skip custom tape)
def launch_torch(func, dim, inputs, outputs, adapter, preserve_output=False, check_grad=False, no_grad=False):
num_inputs = len(inputs)
num_outputs = len(outputs)
# define autograd type
class TorchFunc(torch.autograd.Function):
@staticmethod
def forward(ctx, *args):
#local_inputs = args[0:num_inputs]
#local_outputs = args[num_inputs:len(args)]
# save for backward
#ctx.inputs = list(local_inputs)
ctx.inputs = args
local_outputs = []
for o in outputs:
local_outputs.append(torch.zeros_like(o, requires_grad=True))
ctx.outputs = local_outputs
# ensure inputs match adapter
assert_device(adapter, args)
# launch
if adapter == 'cpu':
func.forward_cpu(*[dim, *args, *ctx.outputs])
elif torch.device(adapter).type == 'cuda': #elif adapter.startswith('cuda'):
func.forward_cuda(*[dim, *args, *ctx.outputs])
ret = tuple(ctx.outputs)
return ret
@staticmethod
def backward(ctx, *grads):
# ensure grads are contiguous in memory
adj_outputs = make_contiguous(grads)
# alloc grads
adj_inputs = alloc_grads(ctx.inputs, adapter)
# if we don't need outputs then make empty tensors to skip the write
local_outputs = ctx.outputs
# if preserve_output == True:
# local_outputs = ctx.outputs
# else:
# local_outputs = []
# for o in range(num_outputs):
# local_outputs.append(torch.FloatTensor().to(adapter))
# print("backward")
# print("--------")
# print (" inputs")
# for i in ctx.inputs:
# print(i)
# print (" outputs")
# for o in ctx.outputs:
# print(o)
# print (" adj_inputs")
# for adj_i in adj_inputs:
# print(adj_i)
# print (" adj_outputs")
# for adj_o in adj_outputs:
# print(adj_o)
# launch
if adapter == 'cpu':
func.backward_cpu(*[dim, *ctx.inputs, *local_outputs, *adj_inputs, *adj_outputs])
elif torch.device(adapter).type == 'cuda': #elif adapter.startswith('cuda'):
func.backward_cuda(*[dim, *ctx.inputs, *local_outputs, *adj_inputs, *adj_outputs])
# filter grads replaces empty tensors / constant params with None
ret = list(filter_grads(adj_inputs))
for i in range(num_outputs):
ret.append(None)
return tuple(ret)
# run
params = [*inputs]
torch.set_printoptions(edgeitems=3)
if (check_grad == True and no_grad == False):
try:
torch.autograd.gradcheck(TorchFunc.apply, params, eps=1e-2, atol=1e-3, rtol=1.e-3, raise_exception=True)
except Exception as e:
print(str(func.func.__name__) + " failed: " + str(e))
output = TorchFunc.apply(*params)
return output
class Tape:
def __init__(self):
self.launches = []
# dictionary mapping Tensor inputs to their adjoint
self.adjoints = {}
def launch(self, func, dim, inputs, outputs, adapter, preserve_output=False, skip_check_grad=False):
if (dim > 0):
# run kernel
if adapter == 'cpu':
func.forward_cpu(*[dim, *inputs, *outputs])
elif torch.device(adapter).type == 'cuda': #adapter.startswith('cuda'):
func.forward_cuda(*[dim, *inputs, *outputs])
if dflex.config.verify_fp:
check_adapter(inputs, adapter)
check_adapter(outputs, adapter)
check_finite(inputs)
check_finite(outputs)
# record launch
if dflex.config.no_grad == False:
self.launches.append([func, dim, inputs, outputs, adapter, preserve_output])
# optionally run grad check
if dflex.config.check_grad == True and skip_check_grad == False:
# copy inputs and outputs to avoid disturbing the computational graph
inputs_copy = copy_params(inputs)
outputs_copy = copy_params(outputs)
launch_torch(func, dim, inputs_copy, outputs_copy, adapter, preserve_output, check_grad=True)
def replay(self):
for kernel in reversed(self.launches):
func = kernel[0]
dim = kernel[1]
inputs = kernel[2]
#outputs = to_strong_list(kernel[3])
outputs = kernel[3]
adapter = kernel[4]
# lookup adj_inputs
adj_inputs = []
adj_outputs = []
# build input adjoints
for i in inputs:
if i in self.adjoints:
adj_inputs.append(self.adjoints[i])
else:
if torch.is_tensor(i):
adj_inputs.append(self.alloc_grad(i))
else:
adj_inputs.append(type(i)())
# build output adjoints
for o in outputs:
if o in self.adjoints:
adj_outputs.append(self.adjoints[o])
else:
# no output adjoint means the output wasn't used in the loss function so
# allocate a zero tensor (they will still be read by the kernels)
adj_outputs.append(self.alloc_grad(o))
# launch reverse
if adapter == 'cpu':
func.backward_cpu(*[dim, *inputs, *outputs, *adj_inputs, *adj_outputs])
elif torch.device(adapter).type == 'cuda': #elif adapter.startswith('cuda'):
func.backward_cuda(*[dim, *inputs, *outputs, *adj_inputs, *adj_outputs])
if dflex.config.verify_fp:
check_finite(inputs)
check_finite(outputs)
check_finite(adj_inputs)
check_finite(adj_outputs)
def reset(self):
self.adjoints = {}
self.launches = []
def alloc_grad(self, t):
if t.dtype == torch.float32 and t.requires_grad:
# zero tensor
self.adjoints[t] = torch.zeros_like(t)
return self.adjoints[t]
else:
# null tensor
return torch.FloatTensor().to(t.device)
# helper that given a set of inputs, will generate a set of output grad buffers
def alloc_grads(inputs, adapter):
"""helper that generates output grad buffers for a set of inputs
on the specified device.
Args:
inputs (iterable of Tensors, other literals): list of Tensors
to generate gradient buffers for. Non-tensors are ignored.
adapter (str, optional): name of torch device for storage location
of allocated gradient buffers. Defaults to 'cpu'.
"""
grads = []
for arg in inputs:
if (torch.is_tensor(arg)):
if (arg.requires_grad and arg.dtype == torch.float):
grads.append(torch.zeros_like(arg, device=adapter))
#grads.append(lookup_grad(arg))
else:
grads.append(torch.FloatTensor().to(adapter))
else:
grads.append(type(arg)())
return grads
def matmul(tape, m, n, k, t1, t2, A, B, C, adapter):
if (adapter == 'cpu'):
threads = 1
else:
threads = 256 # should match the threadblock size
tape.launch(
func=dflex.eval_dense_gemm,
dim=threads,
inputs=[
m,
n,
k,
t1,
t2,
A,
B,
],
outputs=[
C
],
adapter=adapter,
preserve_output=False)
def matmul_batched(tape, batch_count, m, n, k, t1, t2, A_start, B_start, C_start, A, B, C, adapter):
if (adapter == 'cpu'):
threads = batch_count
else:
threads = 256*batch_count # must match the threadblock size used in adjoint.py
tape.launch(
func=dflex.eval_dense_gemm_batched,
dim=threads,
inputs=[
m,
n,
k,
t1,
t2,
A_start,
B_start,
C_start,
A,
B,
],
outputs=[
C
],
adapter=adapter,
preserve_output=False) | 61,318 | Python | 25.683638 | 229 | 0.535161 |
vstrozzi/FRL-SHAC-Extension/dflex/dflex/kernels/main.cpp | #include <torch/extension.h>
#define CPU
#include "adjoint.h"
using namespace df;
template <typename T>
T cast(torch::Tensor t)
{{
return (T)(t.data_ptr());
}}
float test_cpu_func(
float var_c)
{
//---------
// primal vars
const float var_0 = 1.0;
const int var_1 = 2;
float var_2;
const float var_3 = 3.0;
int var_4;
bool var_5;
const float var_6 = 2.0;
float var_7;
const float var_8 = 6.0;
float var_9;
//---------
// forward
var_2 = df::float(var_1);
var_4 = df::int(var_3);
df::print(var_2);
df::print(var_4);
var_5 = (var_c < var_3);
if (var_5) {
}
var_7 = df::select(var_5, var_0, var_6);
var_9 = df::mul(var_7, var_8);
return var_9;
}
void adj_test_cpu_func(
float var_c,
float & adj_c,
float & adj_ret)
{
//---------
// primal vars
const float var_0 = 1.0;
const int var_1 = 2;
float var_2;
const float var_3 = 3.0;
int var_4;
bool var_5;
const float var_6 = 2.0;
float var_7;
const float var_8 = 6.0;
float var_9;
//---------
// dual vars
float adj_0 = 0;
int adj_1 = 0;
float adj_2 = 0;
float adj_3 = 0;
int adj_4 = 0;
bool adj_5 = 0;
float adj_6 = 0;
float adj_7 = 0;
float adj_8 = 0;
float adj_9 = 0;
//---------
// forward
var_2 = df::float(var_1);
var_4 = df::int(var_3);
df::print(var_2);
df::print(var_4);
var_5 = (var_c < var_3);
if (var_5) {
}
var_7 = df::select(var_5, var_0, var_6);
var_9 = df::mul(var_7, var_8);
goto label0;
//---------
// reverse
label0:;
adj_9 += adj_ret;
df::adj_mul(var_7, var_8, adj_7, adj_8, adj_9);
df::adj_select(var_5, var_0, var_6, adj_5, adj_0, adj_6, adj_7);
if (var_5) {
}
df::adj_print(var_4, adj_4);
df::adj_print(var_2, adj_2);
df::adj_int(var_3, adj_3, adj_4);
df::adj_float(var_1, adj_1, adj_2);
return;
}
df::float3 triangle_closest_point_barycentric_cpu_func(
df::float3 var_a,
df::float3 var_b,
df::float3 var_c,
df::float3 var_p)
{
//---------
// primal vars
df::float3 var_0;
df::float3 var_1;
df::float3 var_2;
float var_3;
float var_4;
const float var_5 = 0.0;
bool var_6;
bool var_7;
bool var_8;
const float var_9 = 1.0;
df::float3 var_10;
df::float3 var_11;
float var_12;
float var_13;
bool var_14;
bool var_15;
bool var_16;
df::float3 var_17;
df::float3 var_18;
float var_19;
float var_20;
float var_21;
float var_22;
float var_23;
bool var_24;
bool var_25;
bool var_26;
bool var_27;
float var_28;
df::float3 var_29;
df::float3 var_30;
df::float3 var_31;
float var_32;
float var_33;
bool var_34;
bool var_35;
bool var_36;
df::float3 var_37;
df::float3 var_38;
float var_39;
float var_40;
float var_41;
float var_42;
float var_43;
bool var_44;
bool var_45;
bool var_46;
bool var_47;
float var_48;
df::float3 var_49;
df::float3 var_50;
float var_51;
float var_52;
float var_53;
float var_54;
float var_55;
float var_56;
float var_57;
float var_58;
bool var_59;
float var_60;
bool var_61;
float var_62;
bool var_63;
bool var_64;
float var_65;
df::float3 var_66;
df::float3 var_67;
float var_68;
float var_69;
float var_70;
float var_71;
float var_72;
float var_73;
float var_74;
df::float3 var_75;
//---------
// forward
var_0 = df::sub(var_b, var_a);
var_1 = df::sub(var_c, var_a);
var_2 = df::sub(var_p, var_a);
var_3 = df::dot(var_0, var_2);
var_4 = df::dot(var_1, var_2);
var_6 = (var_3 <= var_5);
var_7 = (var_4 <= var_5);
var_8 = var_6 && var_7;
if (var_8) {
var_10 = df::float3(var_9, var_5, var_5);
return var_10;
}
var_11 = df::sub(var_p, var_b);
var_12 = df::dot(var_0, var_11);
var_13 = df::dot(var_1, var_11);
var_14 = (var_12 >= var_5);
var_15 = (var_13 <= var_12);
var_16 = var_14 && var_15;
if (var_16) {
var_17 = df::float3(var_5, var_9, var_5);
return var_17;
}
var_18 = df::select(var_16, var_10, var_17);
var_19 = df::mul(var_3, var_13);
var_20 = df::mul(var_12, var_4);
var_21 = df::sub(var_19, var_20);
var_22 = df::sub(var_3, var_12);
var_23 = df::div(var_3, var_22);
var_24 = (var_21 <= var_5);
var_25 = (var_3 >= var_5);
var_26 = (var_12 <= var_5);
var_27 = var_24 && var_25 && var_26;
if (var_27) {
var_28 = df::sub(var_9, var_23);
var_29 = df::float3(var_28, var_23, var_5);
return var_29;
}
var_30 = df::select(var_27, var_18, var_29);
var_31 = df::sub(var_p, var_c);
var_32 = df::dot(var_0, var_31);
var_33 = df::dot(var_1, var_31);
var_34 = (var_33 >= var_5);
var_35 = (var_32 <= var_33);
var_36 = var_34 && var_35;
if (var_36) {
var_37 = df::float3(var_5, var_5, var_9);
return var_37;
}
var_38 = df::select(var_36, var_30, var_37);
var_39 = df::mul(var_32, var_4);
var_40 = df::mul(var_3, var_33);
var_41 = df::sub(var_39, var_40);
var_42 = df::sub(var_4, var_33);
var_43 = df::div(var_4, var_42);
var_44 = (var_41 <= var_5);
var_45 = (var_4 >= var_5);
var_46 = (var_33 <= var_5);
var_47 = var_44 && var_45 && var_46;
if (var_47) {
var_48 = df::sub(var_9, var_43);
var_49 = df::float3(var_48, var_5, var_43);
return var_49;
}
var_50 = df::select(var_47, var_38, var_49);
var_51 = df::mul(var_12, var_33);
var_52 = df::mul(var_32, var_13);
var_53 = df::sub(var_51, var_52);
var_54 = df::sub(var_13, var_12);
var_55 = df::sub(var_13, var_12);
var_56 = df::sub(var_32, var_33);
var_57 = df::add(var_55, var_56);
var_58 = df::div(var_54, var_57);
var_59 = (var_53 <= var_5);
var_60 = df::sub(var_13, var_12);
var_61 = (var_60 >= var_5);
var_62 = df::sub(var_32, var_33);
var_63 = (var_62 >= var_5);
var_64 = var_59 && var_61 && var_63;
if (var_64) {
var_65 = df::sub(var_9, var_58);
var_66 = df::float3(var_5, var_58, var_65);
return var_66;
}
var_67 = df::select(var_64, var_50, var_66);
var_68 = df::add(var_53, var_41);
var_69 = df::add(var_68, var_21);
var_70 = df::div(var_9, var_69);
var_71 = df::mul(var_41, var_70);
var_72 = df::mul(var_21, var_70);
var_73 = df::sub(var_9, var_71);
var_74 = df::sub(var_73, var_72);
var_75 = df::float3(var_74, var_71, var_72);
return var_75;
}
void adj_triangle_closest_point_barycentric_cpu_func(
df::float3 var_a,
df::float3 var_b,
df::float3 var_c,
df::float3 var_p,
df::float3 & adj_a,
df::float3 & adj_b,
df::float3 & adj_c,
df::float3 & adj_p,
df::float3 & adj_ret)
{
//---------
// primal vars
df::float3 var_0;
df::float3 var_1;
df::float3 var_2;
float var_3;
float var_4;
const float var_5 = 0.0;
bool var_6;
bool var_7;
bool var_8;
const float var_9 = 1.0;
df::float3 var_10;
df::float3 var_11;
float var_12;
float var_13;
bool var_14;
bool var_15;
bool var_16;
df::float3 var_17;
df::float3 var_18;
float var_19;
float var_20;
float var_21;
float var_22;
float var_23;
bool var_24;
bool var_25;
bool var_26;
bool var_27;
float var_28;
df::float3 var_29;
df::float3 var_30;
df::float3 var_31;
float var_32;
float var_33;
bool var_34;
bool var_35;
bool var_36;
df::float3 var_37;
df::float3 var_38;
float var_39;
float var_40;
float var_41;
float var_42;
float var_43;
bool var_44;
bool var_45;
bool var_46;
bool var_47;
float var_48;
df::float3 var_49;
df::float3 var_50;
float var_51;
float var_52;
float var_53;
float var_54;
float var_55;
float var_56;
float var_57;
float var_58;
bool var_59;
float var_60;
bool var_61;
float var_62;
bool var_63;
bool var_64;
float var_65;
df::float3 var_66;
df::float3 var_67;
float var_68;
float var_69;
float var_70;
float var_71;
float var_72;
float var_73;
float var_74;
df::float3 var_75;
//---------
// dual vars
df::float3 adj_0 = 0;
df::float3 adj_1 = 0;
df::float3 adj_2 = 0;
float adj_3 = 0;
float adj_4 = 0;
float adj_5 = 0;
bool adj_6 = 0;
bool adj_7 = 0;
bool adj_8 = 0;
float adj_9 = 0;
df::float3 adj_10 = 0;
df::float3 adj_11 = 0;
float adj_12 = 0;
float adj_13 = 0;
bool adj_14 = 0;
bool adj_15 = 0;
bool adj_16 = 0;
df::float3 adj_17 = 0;
df::float3 adj_18 = 0;
float adj_19 = 0;
float adj_20 = 0;
float adj_21 = 0;
float adj_22 = 0;
float adj_23 = 0;
bool adj_24 = 0;
bool adj_25 = 0;
bool adj_26 = 0;
bool adj_27 = 0;
float adj_28 = 0;
df::float3 adj_29 = 0;
df::float3 adj_30 = 0;
df::float3 adj_31 = 0;
float adj_32 = 0;
float adj_33 = 0;
bool adj_34 = 0;
bool adj_35 = 0;
bool adj_36 = 0;
df::float3 adj_37 = 0;
df::float3 adj_38 = 0;
float adj_39 = 0;
float adj_40 = 0;
float adj_41 = 0;
float adj_42 = 0;
float adj_43 = 0;
bool adj_44 = 0;
bool adj_45 = 0;
bool adj_46 = 0;
bool adj_47 = 0;
float adj_48 = 0;
df::float3 adj_49 = 0;
df::float3 adj_50 = 0;
float adj_51 = 0;
float adj_52 = 0;
float adj_53 = 0;
float adj_54 = 0;
float adj_55 = 0;
float adj_56 = 0;
float adj_57 = 0;
float adj_58 = 0;
bool adj_59 = 0;
float adj_60 = 0;
bool adj_61 = 0;
float adj_62 = 0;
bool adj_63 = 0;
bool adj_64 = 0;
float adj_65 = 0;
df::float3 adj_66 = 0;
df::float3 adj_67 = 0;
float adj_68 = 0;
float adj_69 = 0;
float adj_70 = 0;
float adj_71 = 0;
float adj_72 = 0;
float adj_73 = 0;
float adj_74 = 0;
df::float3 adj_75 = 0;
//---------
// forward
var_0 = df::sub(var_b, var_a);
var_1 = df::sub(var_c, var_a);
var_2 = df::sub(var_p, var_a);
var_3 = df::dot(var_0, var_2);
var_4 = df::dot(var_1, var_2);
var_6 = (var_3 <= var_5);
var_7 = (var_4 <= var_5);
var_8 = var_6 && var_7;
if (var_8) {
var_10 = df::float3(var_9, var_5, var_5);
goto label0;
}
var_11 = df::sub(var_p, var_b);
var_12 = df::dot(var_0, var_11);
var_13 = df::dot(var_1, var_11);
var_14 = (var_12 >= var_5);
var_15 = (var_13 <= var_12);
var_16 = var_14 && var_15;
if (var_16) {
var_17 = df::float3(var_5, var_9, var_5);
goto label1;
}
var_18 = df::select(var_16, var_10, var_17);
var_19 = df::mul(var_3, var_13);
var_20 = df::mul(var_12, var_4);
var_21 = df::sub(var_19, var_20);
var_22 = df::sub(var_3, var_12);
var_23 = df::div(var_3, var_22);
var_24 = (var_21 <= var_5);
var_25 = (var_3 >= var_5);
var_26 = (var_12 <= var_5);
var_27 = var_24 && var_25 && var_26;
if (var_27) {
var_28 = df::sub(var_9, var_23);
var_29 = df::float3(var_28, var_23, var_5);
goto label2;
}
var_30 = df::select(var_27, var_18, var_29);
var_31 = df::sub(var_p, var_c);
var_32 = df::dot(var_0, var_31);
var_33 = df::dot(var_1, var_31);
var_34 = (var_33 >= var_5);
var_35 = (var_32 <= var_33);
var_36 = var_34 && var_35;
if (var_36) {
var_37 = df::float3(var_5, var_5, var_9);
goto label3;
}
var_38 = df::select(var_36, var_30, var_37);
var_39 = df::mul(var_32, var_4);
var_40 = df::mul(var_3, var_33);
var_41 = df::sub(var_39, var_40);
var_42 = df::sub(var_4, var_33);
var_43 = df::div(var_4, var_42);
var_44 = (var_41 <= var_5);
var_45 = (var_4 >= var_5);
var_46 = (var_33 <= var_5);
var_47 = var_44 && var_45 && var_46;
if (var_47) {
var_48 = df::sub(var_9, var_43);
var_49 = df::float3(var_48, var_5, var_43);
goto label4;
}
var_50 = df::select(var_47, var_38, var_49);
var_51 = df::mul(var_12, var_33);
var_52 = df::mul(var_32, var_13);
var_53 = df::sub(var_51, var_52);
var_54 = df::sub(var_13, var_12);
var_55 = df::sub(var_13, var_12);
var_56 = df::sub(var_32, var_33);
var_57 = df::add(var_55, var_56);
var_58 = df::div(var_54, var_57);
var_59 = (var_53 <= var_5);
var_60 = df::sub(var_13, var_12);
var_61 = (var_60 >= var_5);
var_62 = df::sub(var_32, var_33);
var_63 = (var_62 >= var_5);
var_64 = var_59 && var_61 && var_63;
if (var_64) {
var_65 = df::sub(var_9, var_58);
var_66 = df::float3(var_5, var_58, var_65);
goto label5;
}
var_67 = df::select(var_64, var_50, var_66);
var_68 = df::add(var_53, var_41);
var_69 = df::add(var_68, var_21);
var_70 = df::div(var_9, var_69);
var_71 = df::mul(var_41, var_70);
var_72 = df::mul(var_21, var_70);
var_73 = df::sub(var_9, var_71);
var_74 = df::sub(var_73, var_72);
var_75 = df::float3(var_74, var_71, var_72);
goto label6;
//---------
// reverse
label6:;
adj_75 += adj_ret;
df::adj_float3(var_74, var_71, var_72, adj_74, adj_71, adj_72, adj_75);
df::adj_sub(var_73, var_72, adj_73, adj_72, adj_74);
df::adj_sub(var_9, var_71, adj_9, adj_71, adj_73);
df::adj_mul(var_21, var_70, adj_21, adj_70, adj_72);
df::adj_mul(var_41, var_70, adj_41, adj_70, adj_71);
df::adj_div(var_9, var_69, adj_9, adj_69, adj_70);
df::adj_add(var_68, var_21, adj_68, adj_21, adj_69);
df::adj_add(var_53, var_41, adj_53, adj_41, adj_68);
df::adj_select(var_64, var_50, var_66, adj_64, adj_50, adj_66, adj_67);
if (var_64) {
label5:;
adj_66 += adj_ret;
df::adj_float3(var_5, var_58, var_65, adj_5, adj_58, adj_65, adj_66);
df::adj_sub(var_9, var_58, adj_9, adj_58, adj_65);
}
df::adj_sub(var_32, var_33, adj_32, adj_33, adj_62);
df::adj_sub(var_13, var_12, adj_13, adj_12, adj_60);
df::adj_div(var_54, var_57, adj_54, adj_57, adj_58);
df::adj_add(var_55, var_56, adj_55, adj_56, adj_57);
df::adj_sub(var_32, var_33, adj_32, adj_33, adj_56);
df::adj_sub(var_13, var_12, adj_13, adj_12, adj_55);
df::adj_sub(var_13, var_12, adj_13, adj_12, adj_54);
df::adj_sub(var_51, var_52, adj_51, adj_52, adj_53);
df::adj_mul(var_32, var_13, adj_32, adj_13, adj_52);
df::adj_mul(var_12, var_33, adj_12, adj_33, adj_51);
df::adj_select(var_47, var_38, var_49, adj_47, adj_38, adj_49, adj_50);
if (var_47) {
label4:;
adj_49 += adj_ret;
df::adj_float3(var_48, var_5, var_43, adj_48, adj_5, adj_43, adj_49);
df::adj_sub(var_9, var_43, adj_9, adj_43, adj_48);
}
df::adj_div(var_4, var_42, adj_4, adj_42, adj_43);
df::adj_sub(var_4, var_33, adj_4, adj_33, adj_42);
df::adj_sub(var_39, var_40, adj_39, adj_40, adj_41);
df::adj_mul(var_3, var_33, adj_3, adj_33, adj_40);
df::adj_mul(var_32, var_4, adj_32, adj_4, adj_39);
df::adj_select(var_36, var_30, var_37, adj_36, adj_30, adj_37, adj_38);
if (var_36) {
label3:;
adj_37 += adj_ret;
df::adj_float3(var_5, var_5, var_9, adj_5, adj_5, adj_9, adj_37);
}
df::adj_dot(var_1, var_31, adj_1, adj_31, adj_33);
df::adj_dot(var_0, var_31, adj_0, adj_31, adj_32);
df::adj_sub(var_p, var_c, adj_p, adj_c, adj_31);
df::adj_select(var_27, var_18, var_29, adj_27, adj_18, adj_29, adj_30);
if (var_27) {
label2:;
adj_29 += adj_ret;
df::adj_float3(var_28, var_23, var_5, adj_28, adj_23, adj_5, adj_29);
df::adj_sub(var_9, var_23, adj_9, adj_23, adj_28);
}
df::adj_div(var_3, var_22, adj_3, adj_22, adj_23);
df::adj_sub(var_3, var_12, adj_3, adj_12, adj_22);
df::adj_sub(var_19, var_20, adj_19, adj_20, adj_21);
df::adj_mul(var_12, var_4, adj_12, adj_4, adj_20);
df::adj_mul(var_3, var_13, adj_3, adj_13, adj_19);
df::adj_select(var_16, var_10, var_17, adj_16, adj_10, adj_17, adj_18);
if (var_16) {
label1:;
adj_17 += adj_ret;
df::adj_float3(var_5, var_9, var_5, adj_5, adj_9, adj_5, adj_17);
}
df::adj_dot(var_1, var_11, adj_1, adj_11, adj_13);
df::adj_dot(var_0, var_11, adj_0, adj_11, adj_12);
df::adj_sub(var_p, var_b, adj_p, adj_b, adj_11);
if (var_8) {
label0:;
adj_10 += adj_ret;
df::adj_float3(var_9, var_5, var_5, adj_9, adj_5, adj_5, adj_10);
}
df::adj_dot(var_1, var_2, adj_1, adj_2, adj_4);
df::adj_dot(var_0, var_2, adj_0, adj_2, adj_3);
df::adj_sub(var_p, var_a, adj_p, adj_a, adj_2);
df::adj_sub(var_c, var_a, adj_c, adj_a, adj_1);
df::adj_sub(var_b, var_a, adj_b, adj_a, adj_0);
return;
}
float sphere_sdf_cpu_func(
df::float3 var_center,
float var_radius,
df::float3 var_p)
{
//---------
// primal vars
df::float3 var_0;
float var_1;
float var_2;
//---------
// forward
var_0 = df::sub(var_p, var_center);
var_1 = df::length(var_0);
var_2 = df::sub(var_1, var_radius);
return var_2;
}
void adj_sphere_sdf_cpu_func(
df::float3 var_center,
float var_radius,
df::float3 var_p,
df::float3 & adj_center,
float & adj_radius,
df::float3 & adj_p,
float & adj_ret)
{
//---------
// primal vars
df::float3 var_0;
float var_1;
float var_2;
//---------
// dual vars
df::float3 adj_0 = 0;
float adj_1 = 0;
float adj_2 = 0;
//---------
// forward
var_0 = df::sub(var_p, var_center);
var_1 = df::length(var_0);
var_2 = df::sub(var_1, var_radius);
goto label0;
//---------
// reverse
label0:;
adj_2 += adj_ret;
df::adj_sub(var_1, var_radius, adj_1, adj_radius, adj_2);
df::adj_length(var_0, adj_0, adj_1);
df::adj_sub(var_p, var_center, adj_p, adj_center, adj_0);
return;
}
df::float3 sphere_sdf_grad_cpu_func(
df::float3 var_center,
float var_radius,
df::float3 var_p)
{
//---------
// primal vars
df::float3 var_0;
df::float3 var_1;
//---------
// forward
var_0 = df::sub(var_p, var_center);
var_1 = df::normalize(var_0);
return var_1;
}
void adj_sphere_sdf_grad_cpu_func(
df::float3 var_center,
float var_radius,
df::float3 var_p,
df::float3 & adj_center,
float & adj_radius,
df::float3 & adj_p,
df::float3 & adj_ret)
{
//---------
// primal vars
df::float3 var_0;
df::float3 var_1;
//---------
// dual vars
df::float3 adj_0 = 0;
df::float3 adj_1 = 0;
//---------
// forward
var_0 = df::sub(var_p, var_center);
var_1 = df::normalize(var_0);
goto label0;
//---------
// reverse
label0:;
adj_1 += adj_ret;
df::adj_normalize(var_0, adj_0, adj_1);
df::adj_sub(var_p, var_center, adj_p, adj_center, adj_0);
return;
}
float box_sdf_cpu_func(
df::float3 var_upper,
df::float3 var_p)
{
//---------
// primal vars
const int var_0 = 0;
float var_1;
float var_2;
float var_3;
float var_4;
const int var_5 = 1;
float var_6;
float var_7;
float var_8;
float var_9;
const int var_10 = 2;
float var_11;
float var_12;
float var_13;
float var_14;
const float var_15 = 0.0;
float var_16;
float var_17;
float var_18;
df::float3 var_19;
float var_20;
float var_21;
float var_22;
float var_23;
float var_24;
//---------
// forward
var_1 = df::index(var_p, var_0);
var_2 = df::abs(var_1);
var_3 = df::index(var_upper, var_0);
var_4 = df::sub(var_2, var_3);
var_6 = df::index(var_p, var_5);
var_7 = df::abs(var_6);
var_8 = df::index(var_upper, var_5);
var_9 = df::sub(var_7, var_8);
var_11 = df::index(var_p, var_10);
var_12 = df::abs(var_11);
var_13 = df::index(var_upper, var_10);
var_14 = df::sub(var_12, var_13);
var_16 = df::max(var_4, var_15);
var_17 = df::max(var_9, var_15);
var_18 = df::max(var_14, var_15);
var_19 = df::float3(var_16, var_17, var_18);
var_20 = df::length(var_19);
var_21 = df::max(var_9, var_14);
var_22 = df::max(var_4, var_21);
var_23 = df::min(var_22, var_15);
var_24 = df::add(var_20, var_23);
return var_24;
}
void adj_box_sdf_cpu_func(
df::float3 var_upper,
df::float3 var_p,
df::float3 & adj_upper,
df::float3 & adj_p,
float & adj_ret)
{
//---------
// primal vars
const int var_0 = 0;
float var_1;
float var_2;
float var_3;
float var_4;
const int var_5 = 1;
float var_6;
float var_7;
float var_8;
float var_9;
const int var_10 = 2;
float var_11;
float var_12;
float var_13;
float var_14;
const float var_15 = 0.0;
float var_16;
float var_17;
float var_18;
df::float3 var_19;
float var_20;
float var_21;
float var_22;
float var_23;
float var_24;
//---------
// dual vars
int adj_0 = 0;
float adj_1 = 0;
float adj_2 = 0;
float adj_3 = 0;
float adj_4 = 0;
int adj_5 = 0;
float adj_6 = 0;
float adj_7 = 0;
float adj_8 = 0;
float adj_9 = 0;
int adj_10 = 0;
float adj_11 = 0;
float adj_12 = 0;
float adj_13 = 0;
float adj_14 = 0;
float adj_15 = 0;
float adj_16 = 0;
float adj_17 = 0;
float adj_18 = 0;
df::float3 adj_19 = 0;
float adj_20 = 0;
float adj_21 = 0;
float adj_22 = 0;
float adj_23 = 0;
float adj_24 = 0;
//---------
// forward
var_1 = df::index(var_p, var_0);
var_2 = df::abs(var_1);
var_3 = df::index(var_upper, var_0);
var_4 = df::sub(var_2, var_3);
var_6 = df::index(var_p, var_5);
var_7 = df::abs(var_6);
var_8 = df::index(var_upper, var_5);
var_9 = df::sub(var_7, var_8);
var_11 = df::index(var_p, var_10);
var_12 = df::abs(var_11);
var_13 = df::index(var_upper, var_10);
var_14 = df::sub(var_12, var_13);
var_16 = df::max(var_4, var_15);
var_17 = df::max(var_9, var_15);
var_18 = df::max(var_14, var_15);
var_19 = df::float3(var_16, var_17, var_18);
var_20 = df::length(var_19);
var_21 = df::max(var_9, var_14);
var_22 = df::max(var_4, var_21);
var_23 = df::min(var_22, var_15);
var_24 = df::add(var_20, var_23);
goto label0;
//---------
// reverse
label0:;
adj_24 += adj_ret;
df::adj_add(var_20, var_23, adj_20, adj_23, adj_24);
df::adj_min(var_22, var_15, adj_22, adj_15, adj_23);
df::adj_max(var_4, var_21, adj_4, adj_21, adj_22);
df::adj_max(var_9, var_14, adj_9, adj_14, adj_21);
df::adj_length(var_19, adj_19, adj_20);
df::adj_float3(var_16, var_17, var_18, adj_16, adj_17, adj_18, adj_19);
df::adj_max(var_14, var_15, adj_14, adj_15, adj_18);
df::adj_max(var_9, var_15, adj_9, adj_15, adj_17);
df::adj_max(var_4, var_15, adj_4, adj_15, adj_16);
df::adj_sub(var_12, var_13, adj_12, adj_13, adj_14);
df::adj_index(var_upper, var_10, adj_upper, adj_10, adj_13);
df::adj_abs(var_11, adj_11, adj_12);
df::adj_index(var_p, var_10, adj_p, adj_10, adj_11);
df::adj_sub(var_7, var_8, adj_7, adj_8, adj_9);
df::adj_index(var_upper, var_5, adj_upper, adj_5, adj_8);
df::adj_abs(var_6, adj_6, adj_7);
df::adj_index(var_p, var_5, adj_p, adj_5, adj_6);
df::adj_sub(var_2, var_3, adj_2, adj_3, adj_4);
df::adj_index(var_upper, var_0, adj_upper, adj_0, adj_3);
df::adj_abs(var_1, adj_1, adj_2);
df::adj_index(var_p, var_0, adj_p, adj_0, adj_1);
return;
}
df::float3 box_sdf_grad_cpu_func(
df::float3 var_upper,
df::float3 var_p)
{
//---------
// primal vars
const int var_0 = 0;
float var_1;
float var_2;
float var_3;
float var_4;
const int var_5 = 1;
float var_6;
float var_7;
float var_8;
float var_9;
const int var_10 = 2;
float var_11;
float var_12;
float var_13;
float var_14;
const float var_15 = 0.0;
bool var_16;
bool var_17;
bool var_18;
bool var_19;
float var_20;
float var_21;
float var_22;
float var_23;
float var_24;
float var_25;
float var_26;
float var_27;
float var_28;
float var_29;
float var_30;
float var_31;
float var_32;
float var_33;
float var_34;
df::float3 var_35;
df::float3 var_36;
df::float3 var_37;
float var_38;
float var_39;
float var_40;
float var_41;
float var_42;
float var_43;
bool var_44;
bool var_45;
bool var_46;
df::float3 var_47;
df::float3 var_48;
bool var_49;
bool var_50;
bool var_51;
df::float3 var_52;
df::float3 var_53;
bool var_54;
bool var_55;
bool var_56;
df::float3 var_57;
df::float3 var_58;
//---------
// forward
var_1 = df::index(var_p, var_0);
var_2 = df::abs(var_1);
var_3 = df::index(var_upper, var_0);
var_4 = df::sub(var_2, var_3);
var_6 = df::index(var_p, var_5);
var_7 = df::abs(var_6);
var_8 = df::index(var_upper, var_5);
var_9 = df::sub(var_7, var_8);
var_11 = df::index(var_p, var_10);
var_12 = df::abs(var_11);
var_13 = df::index(var_upper, var_10);
var_14 = df::sub(var_12, var_13);
var_16 = (var_4 > var_15);
var_17 = (var_9 > var_15);
var_18 = (var_14 > var_15);
var_19 = var_16 || var_17 || var_18;
if (var_19) {
var_20 = df::index(var_p, var_0);
var_21 = df::index(var_upper, var_0);
var_22 = df::sub(var_15, var_21);
var_23 = df::index(var_upper, var_0);
var_24 = df::clamp(var_20, var_22, var_23);
var_25 = df::index(var_p, var_5);
var_26 = df::index(var_upper, var_5);
var_27 = df::sub(var_15, var_26);
var_28 = df::index(var_upper, var_5);
var_29 = df::clamp(var_25, var_27, var_28);
var_30 = df::index(var_p, var_10);
var_31 = df::index(var_upper, var_10);
var_32 = df::sub(var_15, var_31);
var_33 = df::index(var_upper, var_10);
var_34 = df::clamp(var_30, var_32, var_33);
var_35 = df::float3(var_24, var_29, var_34);
var_36 = df::sub(var_p, var_35);
var_37 = df::normalize(var_36);
return var_37;
}
var_38 = df::index(var_p, var_0);
var_39 = df::sign(var_38);
var_40 = df::index(var_p, var_5);
var_41 = df::sign(var_40);
var_42 = df::index(var_p, var_10);
var_43 = df::sign(var_42);
var_44 = (var_4 > var_9);
var_45 = (var_4 > var_14);
var_46 = var_44 && var_45;
if (var_46) {
var_47 = df::float3(var_39, var_15, var_15);
return var_47;
}
var_48 = df::select(var_46, var_37, var_47);
var_49 = (var_9 > var_4);
var_50 = (var_9 > var_14);
var_51 = var_49 && var_50;
if (var_51) {
var_52 = df::float3(var_15, var_41, var_15);
return var_52;
}
var_53 = df::select(var_51, var_48, var_52);
var_54 = (var_14 > var_4);
var_55 = (var_14 > var_9);
var_56 = var_54 && var_55;
if (var_56) {
var_57 = df::float3(var_15, var_15, var_43);
return var_57;
}
var_58 = df::select(var_56, var_53, var_57);
}
void adj_box_sdf_grad_cpu_func(
df::float3 var_upper,
df::float3 var_p,
df::float3 & adj_upper,
df::float3 & adj_p,
df::float3 & adj_ret)
{
//---------
// primal vars
const int var_0 = 0;
float var_1;
float var_2;
float var_3;
float var_4;
const int var_5 = 1;
float var_6;
float var_7;
float var_8;
float var_9;
const int var_10 = 2;
float var_11;
float var_12;
float var_13;
float var_14;
const float var_15 = 0.0;
bool var_16;
bool var_17;
bool var_18;
bool var_19;
float var_20;
float var_21;
float var_22;
float var_23;
float var_24;
float var_25;
float var_26;
float var_27;
float var_28;
float var_29;
float var_30;
float var_31;
float var_32;
float var_33;
float var_34;
df::float3 var_35;
df::float3 var_36;
df::float3 var_37;
float var_38;
float var_39;
float var_40;
float var_41;
float var_42;
float var_43;
bool var_44;
bool var_45;
bool var_46;
df::float3 var_47;
df::float3 var_48;
bool var_49;
bool var_50;
bool var_51;
df::float3 var_52;
df::float3 var_53;
bool var_54;
bool var_55;
bool var_56;
df::float3 var_57;
df::float3 var_58;
//---------
// dual vars
int adj_0 = 0;
float adj_1 = 0;
float adj_2 = 0;
float adj_3 = 0;
float adj_4 = 0;
int adj_5 = 0;
float adj_6 = 0;
float adj_7 = 0;
float adj_8 = 0;
float adj_9 = 0;
int adj_10 = 0;
float adj_11 = 0;
float adj_12 = 0;
float adj_13 = 0;
float adj_14 = 0;
float adj_15 = 0;
bool adj_16 = 0;
bool adj_17 = 0;
bool adj_18 = 0;
bool adj_19 = 0;
float adj_20 = 0;
float adj_21 = 0;
float adj_22 = 0;
float adj_23 = 0;
float adj_24 = 0;
float adj_25 = 0;
float adj_26 = 0;
float adj_27 = 0;
float adj_28 = 0;
float adj_29 = 0;
float adj_30 = 0;
float adj_31 = 0;
float adj_32 = 0;
float adj_33 = 0;
float adj_34 = 0;
df::float3 adj_35 = 0;
df::float3 adj_36 = 0;
df::float3 adj_37 = 0;
float adj_38 = 0;
float adj_39 = 0;
float adj_40 = 0;
float adj_41 = 0;
float adj_42 = 0;
float adj_43 = 0;
bool adj_44 = 0;
bool adj_45 = 0;
bool adj_46 = 0;
df::float3 adj_47 = 0;
df::float3 adj_48 = 0;
bool adj_49 = 0;
bool adj_50 = 0;
bool adj_51 = 0;
df::float3 adj_52 = 0;
df::float3 adj_53 = 0;
bool adj_54 = 0;
bool adj_55 = 0;
bool adj_56 = 0;
df::float3 adj_57 = 0;
df::float3 adj_58 = 0;
//---------
// forward
var_1 = df::index(var_p, var_0);
var_2 = df::abs(var_1);
var_3 = df::index(var_upper, var_0);
var_4 = df::sub(var_2, var_3);
var_6 = df::index(var_p, var_5);
var_7 = df::abs(var_6);
var_8 = df::index(var_upper, var_5);
var_9 = df::sub(var_7, var_8);
var_11 = df::index(var_p, var_10);
var_12 = df::abs(var_11);
var_13 = df::index(var_upper, var_10);
var_14 = df::sub(var_12, var_13);
var_16 = (var_4 > var_15);
var_17 = (var_9 > var_15);
var_18 = (var_14 > var_15);
var_19 = var_16 || var_17 || var_18;
if (var_19) {
var_20 = df::index(var_p, var_0);
var_21 = df::index(var_upper, var_0);
var_22 = df::sub(var_15, var_21);
var_23 = df::index(var_upper, var_0);
var_24 = df::clamp(var_20, var_22, var_23);
var_25 = df::index(var_p, var_5);
var_26 = df::index(var_upper, var_5);
var_27 = df::sub(var_15, var_26);
var_28 = df::index(var_upper, var_5);
var_29 = df::clamp(var_25, var_27, var_28);
var_30 = df::index(var_p, var_10);
var_31 = df::index(var_upper, var_10);
var_32 = df::sub(var_15, var_31);
var_33 = df::index(var_upper, var_10);
var_34 = df::clamp(var_30, var_32, var_33);
var_35 = df::float3(var_24, var_29, var_34);
var_36 = df::sub(var_p, var_35);
var_37 = df::normalize(var_36);
goto label0;
}
var_38 = df::index(var_p, var_0);
var_39 = df::sign(var_38);
var_40 = df::index(var_p, var_5);
var_41 = df::sign(var_40);
var_42 = df::index(var_p, var_10);
var_43 = df::sign(var_42);
var_44 = (var_4 > var_9);
var_45 = (var_4 > var_14);
var_46 = var_44 && var_45;
if (var_46) {
var_47 = df::float3(var_39, var_15, var_15);
goto label1;
}
var_48 = df::select(var_46, var_37, var_47);
var_49 = (var_9 > var_4);
var_50 = (var_9 > var_14);
var_51 = var_49 && var_50;
if (var_51) {
var_52 = df::float3(var_15, var_41, var_15);
goto label2;
}
var_53 = df::select(var_51, var_48, var_52);
var_54 = (var_14 > var_4);
var_55 = (var_14 > var_9);
var_56 = var_54 && var_55;
if (var_56) {
var_57 = df::float3(var_15, var_15, var_43);
goto label3;
}
var_58 = df::select(var_56, var_53, var_57);
//---------
// reverse
df::adj_select(var_56, var_53, var_57, adj_56, adj_53, adj_57, adj_58);
if (var_56) {
label3:;
adj_57 += adj_ret;
df::adj_float3(var_15, var_15, var_43, adj_15, adj_15, adj_43, adj_57);
}
df::adj_select(var_51, var_48, var_52, adj_51, adj_48, adj_52, adj_53);
if (var_51) {
label2:;
adj_52 += adj_ret;
df::adj_float3(var_15, var_41, var_15, adj_15, adj_41, adj_15, adj_52);
}
df::adj_select(var_46, var_37, var_47, adj_46, adj_37, adj_47, adj_48);
if (var_46) {
label1:;
adj_47 += adj_ret;
df::adj_float3(var_39, var_15, var_15, adj_39, adj_15, adj_15, adj_47);
}
df::adj_sign(var_42, adj_42, adj_43);
df::adj_index(var_p, var_10, adj_p, adj_10, adj_42);
df::adj_sign(var_40, adj_40, adj_41);
df::adj_index(var_p, var_5, adj_p, adj_5, adj_40);
df::adj_sign(var_38, adj_38, adj_39);
df::adj_index(var_p, var_0, adj_p, adj_0, adj_38);
if (var_19) {
label0:;
adj_37 += adj_ret;
df::adj_normalize(var_36, adj_36, adj_37);
df::adj_sub(var_p, var_35, adj_p, adj_35, adj_36);
df::adj_float3(var_24, var_29, var_34, adj_24, adj_29, adj_34, adj_35);
df::adj_clamp(var_30, var_32, var_33, adj_30, adj_32, adj_33, adj_34);
df::adj_index(var_upper, var_10, adj_upper, adj_10, adj_33);
df::adj_sub(var_15, var_31, adj_15, adj_31, adj_32);
df::adj_index(var_upper, var_10, adj_upper, adj_10, adj_31);
df::adj_index(var_p, var_10, adj_p, adj_10, adj_30);
df::adj_clamp(var_25, var_27, var_28, adj_25, adj_27, adj_28, adj_29);
df::adj_index(var_upper, var_5, adj_upper, adj_5, adj_28);
df::adj_sub(var_15, var_26, adj_15, adj_26, adj_27);
df::adj_index(var_upper, var_5, adj_upper, adj_5, adj_26);
df::adj_index(var_p, var_5, adj_p, adj_5, adj_25);
df::adj_clamp(var_20, var_22, var_23, adj_20, adj_22, adj_23, adj_24);
df::adj_index(var_upper, var_0, adj_upper, adj_0, adj_23);
df::adj_sub(var_15, var_21, adj_15, adj_21, adj_22);
df::adj_index(var_upper, var_0, adj_upper, adj_0, adj_21);
df::adj_index(var_p, var_0, adj_p, adj_0, adj_20);
}
df::adj_sub(var_12, var_13, adj_12, adj_13, adj_14);
df::adj_index(var_upper, var_10, adj_upper, adj_10, adj_13);
df::adj_abs(var_11, adj_11, adj_12);
df::adj_index(var_p, var_10, adj_p, adj_10, adj_11);
df::adj_sub(var_7, var_8, adj_7, adj_8, adj_9);
df::adj_index(var_upper, var_5, adj_upper, adj_5, adj_8);
df::adj_abs(var_6, adj_6, adj_7);
df::adj_index(var_p, var_5, adj_p, adj_5, adj_6);
df::adj_sub(var_2, var_3, adj_2, adj_3, adj_4);
df::adj_index(var_upper, var_0, adj_upper, adj_0, adj_3);
df::adj_abs(var_1, adj_1, adj_2);
df::adj_index(var_p, var_0, adj_p, adj_0, adj_1);
return;
}
float capsule_sdf_cpu_func(
float var_radius,
float var_half_width,
df::float3 var_p)
{
//---------
// primal vars
const int var_0 = 0;
float var_1;
bool var_2;
float var_3;
float var_4;
const int var_5 = 1;
float var_6;
const int var_7 = 2;
float var_8;
df::float3 var_9;
float var_10;
float var_11;
float var_12;
const float var_13 = 0.0;
float var_14;
bool var_15;
float var_16;
float var_17;
float var_18;
float var_19;
df::float3 var_20;
float var_21;
float var_22;
float var_23;
float var_24;
float var_25;
df::float3 var_26;
float var_27;
float var_28;
//---------
// forward
var_1 = df::index(var_p, var_0);
var_2 = (var_1 > var_half_width);
if (var_2) {
var_3 = df::index(var_p, var_0);
var_4 = df::sub(var_3, var_half_width);
var_6 = df::index(var_p, var_5);
var_8 = df::index(var_p, var_7);
var_9 = df::float3(var_4, var_6, var_8);
var_10 = df::length(var_9);
var_11 = df::sub(var_10, var_radius);
return var_11;
}
var_12 = df::index(var_p, var_0);
var_14 = df::sub(var_13, var_half_width);
var_15 = (var_12 < var_14);
if (var_15) {
var_16 = df::index(var_p, var_0);
var_17 = df::add(var_16, var_half_width);
var_18 = df::index(var_p, var_5);
var_19 = df::index(var_p, var_7);
var_20 = df::float3(var_17, var_18, var_19);
var_21 = df::length(var_20);
var_22 = df::sub(var_21, var_radius);
return var_22;
}
var_23 = df::select(var_15, var_11, var_22);
var_24 = df::index(var_p, var_5);
var_25 = df::index(var_p, var_7);
var_26 = df::float3(var_13, var_24, var_25);
var_27 = df::length(var_26);
var_28 = df::sub(var_27, var_radius);
return var_28;
}
void adj_capsule_sdf_cpu_func(
float var_radius,
float var_half_width,
df::float3 var_p,
float & adj_radius,
float & adj_half_width,
df::float3 & adj_p,
float & adj_ret)
{
//---------
// primal vars
const int var_0 = 0;
float var_1;
bool var_2;
float var_3;
float var_4;
const int var_5 = 1;
float var_6;
const int var_7 = 2;
float var_8;
df::float3 var_9;
float var_10;
float var_11;
float var_12;
const float var_13 = 0.0;
float var_14;
bool var_15;
float var_16;
float var_17;
float var_18;
float var_19;
df::float3 var_20;
float var_21;
float var_22;
float var_23;
float var_24;
float var_25;
df::float3 var_26;
float var_27;
float var_28;
//---------
// dual vars
int adj_0 = 0;
float adj_1 = 0;
bool adj_2 = 0;
float adj_3 = 0;
float adj_4 = 0;
int adj_5 = 0;
float adj_6 = 0;
int adj_7 = 0;
float adj_8 = 0;
df::float3 adj_9 = 0;
float adj_10 = 0;
float adj_11 = 0;
float adj_12 = 0;
float adj_13 = 0;
float adj_14 = 0;
bool adj_15 = 0;
float adj_16 = 0;
float adj_17 = 0;
float adj_18 = 0;
float adj_19 = 0;
df::float3 adj_20 = 0;
float adj_21 = 0;
float adj_22 = 0;
float adj_23 = 0;
float adj_24 = 0;
float adj_25 = 0;
df::float3 adj_26 = 0;
float adj_27 = 0;
float adj_28 = 0;
//---------
// forward
var_1 = df::index(var_p, var_0);
var_2 = (var_1 > var_half_width);
if (var_2) {
var_3 = df::index(var_p, var_0);
var_4 = df::sub(var_3, var_half_width);
var_6 = df::index(var_p, var_5);
var_8 = df::index(var_p, var_7);
var_9 = df::float3(var_4, var_6, var_8);
var_10 = df::length(var_9);
var_11 = df::sub(var_10, var_radius);
goto label0;
}
var_12 = df::index(var_p, var_0);
var_14 = df::sub(var_13, var_half_width);
var_15 = (var_12 < var_14);
if (var_15) {
var_16 = df::index(var_p, var_0);
var_17 = df::add(var_16, var_half_width);
var_18 = df::index(var_p, var_5);
var_19 = df::index(var_p, var_7);
var_20 = df::float3(var_17, var_18, var_19);
var_21 = df::length(var_20);
var_22 = df::sub(var_21, var_radius);
goto label1;
}
var_23 = df::select(var_15, var_11, var_22);
var_24 = df::index(var_p, var_5);
var_25 = df::index(var_p, var_7);
var_26 = df::float3(var_13, var_24, var_25);
var_27 = df::length(var_26);
var_28 = df::sub(var_27, var_radius);
goto label2;
//---------
// reverse
label2:;
adj_28 += adj_ret;
df::adj_sub(var_27, var_radius, adj_27, adj_radius, adj_28);
df::adj_length(var_26, adj_26, adj_27);
df::adj_float3(var_13, var_24, var_25, adj_13, adj_24, adj_25, adj_26);
df::adj_index(var_p, var_7, adj_p, adj_7, adj_25);
df::adj_index(var_p, var_5, adj_p, adj_5, adj_24);
df::adj_select(var_15, var_11, var_22, adj_15, adj_11, adj_22, adj_23);
if (var_15) {
label1:;
adj_22 += adj_ret;
df::adj_sub(var_21, var_radius, adj_21, adj_radius, adj_22);
df::adj_length(var_20, adj_20, adj_21);
df::adj_float3(var_17, var_18, var_19, adj_17, adj_18, adj_19, adj_20);
df::adj_index(var_p, var_7, adj_p, adj_7, adj_19);
df::adj_index(var_p, var_5, adj_p, adj_5, adj_18);
df::adj_add(var_16, var_half_width, adj_16, adj_half_width, adj_17);
df::adj_index(var_p, var_0, adj_p, adj_0, adj_16);
}
df::adj_sub(var_13, var_half_width, adj_13, adj_half_width, adj_14);
df::adj_index(var_p, var_0, adj_p, adj_0, adj_12);
if (var_2) {
label0:;
adj_11 += adj_ret;
df::adj_sub(var_10, var_radius, adj_10, adj_radius, adj_11);
df::adj_length(var_9, adj_9, adj_10);
df::adj_float3(var_4, var_6, var_8, adj_4, adj_6, adj_8, adj_9);
df::adj_index(var_p, var_7, adj_p, adj_7, adj_8);
df::adj_index(var_p, var_5, adj_p, adj_5, adj_6);
df::adj_sub(var_3, var_half_width, adj_3, adj_half_width, adj_4);
df::adj_index(var_p, var_0, adj_p, adj_0, adj_3);
}
df::adj_index(var_p, var_0, adj_p, adj_0, adj_1);
return;
}
df::float3 capsule_sdf_grad_cpu_func(
float var_radius,
float var_half_width,
df::float3 var_p)
{
//---------
// primal vars
const int var_0 = 0;
float var_1;
bool var_2;
float var_3;
float var_4;
const int var_5 = 1;
float var_6;
const int var_7 = 2;
float var_8;
df::float3 var_9;
df::float3 var_10;
float var_11;
const float var_12 = 0.0;
float var_13;
bool var_14;
float var_15;
float var_16;
float var_17;
float var_18;
df::float3 var_19;
df::float3 var_20;
df::float3 var_21;
float var_22;
float var_23;
df::float3 var_24;
df::float3 var_25;
//---------
// forward
var_1 = df::index(var_p, var_0);
var_2 = (var_1 > var_half_width);
if (var_2) {
var_3 = df::index(var_p, var_0);
var_4 = df::sub(var_3, var_half_width);
var_6 = df::index(var_p, var_5);
var_8 = df::index(var_p, var_7);
var_9 = df::float3(var_4, var_6, var_8);
var_10 = df::normalize(var_9);
return var_10;
}
var_11 = df::index(var_p, var_0);
var_13 = df::sub(var_12, var_half_width);
var_14 = (var_11 < var_13);
if (var_14) {
var_15 = df::index(var_p, var_0);
var_16 = df::add(var_15, var_half_width);
var_17 = df::index(var_p, var_5);
var_18 = df::index(var_p, var_7);
var_19 = df::float3(var_16, var_17, var_18);
var_20 = df::normalize(var_19);
return var_20;
}
var_21 = df::select(var_14, var_10, var_20);
var_22 = df::index(var_p, var_5);
var_23 = df::index(var_p, var_7);
var_24 = df::float3(var_12, var_22, var_23);
var_25 = df::normalize(var_24);
return var_25;
}
void adj_capsule_sdf_grad_cpu_func(
float var_radius,
float var_half_width,
df::float3 var_p,
float & adj_radius,
float & adj_half_width,
df::float3 & adj_p,
df::float3 & adj_ret)
{
//---------
// primal vars
const int var_0 = 0;
float var_1;
bool var_2;
float var_3;
float var_4;
const int var_5 = 1;
float var_6;
const int var_7 = 2;
float var_8;
df::float3 var_9;
df::float3 var_10;
float var_11;
const float var_12 = 0.0;
float var_13;
bool var_14;
float var_15;
float var_16;
float var_17;
float var_18;
df::float3 var_19;
df::float3 var_20;
df::float3 var_21;
float var_22;
float var_23;
df::float3 var_24;
df::float3 var_25;
//---------
// dual vars
int adj_0 = 0;
float adj_1 = 0;
bool adj_2 = 0;
float adj_3 = 0;
float adj_4 = 0;
int adj_5 = 0;
float adj_6 = 0;
int adj_7 = 0;
float adj_8 = 0;
df::float3 adj_9 = 0;
df::float3 adj_10 = 0;
float adj_11 = 0;
float adj_12 = 0;
float adj_13 = 0;
bool adj_14 = 0;
float adj_15 = 0;
float adj_16 = 0;
float adj_17 = 0;
float adj_18 = 0;
df::float3 adj_19 = 0;
df::float3 adj_20 = 0;
df::float3 adj_21 = 0;
float adj_22 = 0;
float adj_23 = 0;
df::float3 adj_24 = 0;
df::float3 adj_25 = 0;
//---------
// forward
var_1 = df::index(var_p, var_0);
var_2 = (var_1 > var_half_width);
if (var_2) {
var_3 = df::index(var_p, var_0);
var_4 = df::sub(var_3, var_half_width);
var_6 = df::index(var_p, var_5);
var_8 = df::index(var_p, var_7);
var_9 = df::float3(var_4, var_6, var_8);
var_10 = df::normalize(var_9);
goto label0;
}
var_11 = df::index(var_p, var_0);
var_13 = df::sub(var_12, var_half_width);
var_14 = (var_11 < var_13);
if (var_14) {
var_15 = df::index(var_p, var_0);
var_16 = df::add(var_15, var_half_width);
var_17 = df::index(var_p, var_5);
var_18 = df::index(var_p, var_7);
var_19 = df::float3(var_16, var_17, var_18);
var_20 = df::normalize(var_19);
goto label1;
}
var_21 = df::select(var_14, var_10, var_20);
var_22 = df::index(var_p, var_5);
var_23 = df::index(var_p, var_7);
var_24 = df::float3(var_12, var_22, var_23);
var_25 = df::normalize(var_24);
goto label2;
//---------
// reverse
label2:;
adj_25 += adj_ret;
df::adj_normalize(var_24, adj_24, adj_25);
df::adj_float3(var_12, var_22, var_23, adj_12, adj_22, adj_23, adj_24);
df::adj_index(var_p, var_7, adj_p, adj_7, adj_23);
df::adj_index(var_p, var_5, adj_p, adj_5, adj_22);
df::adj_select(var_14, var_10, var_20, adj_14, adj_10, adj_20, adj_21);
if (var_14) {
label1:;
adj_20 += adj_ret;
df::adj_normalize(var_19, adj_19, adj_20);
df::adj_float3(var_16, var_17, var_18, adj_16, adj_17, adj_18, adj_19);
df::adj_index(var_p, var_7, adj_p, adj_7, adj_18);
df::adj_index(var_p, var_5, adj_p, adj_5, adj_17);
df::adj_add(var_15, var_half_width, adj_15, adj_half_width, adj_16);
df::adj_index(var_p, var_0, adj_p, adj_0, adj_15);
}
df::adj_sub(var_12, var_half_width, adj_12, adj_half_width, adj_13);
df::adj_index(var_p, var_0, adj_p, adj_0, adj_11);
if (var_2) {
label0:;
adj_10 += adj_ret;
df::adj_normalize(var_9, adj_9, adj_10);
df::adj_float3(var_4, var_6, var_8, adj_4, adj_6, adj_8, adj_9);
df::adj_index(var_p, var_7, adj_p, adj_7, adj_8);
df::adj_index(var_p, var_5, adj_p, adj_5, adj_6);
df::adj_sub(var_3, var_half_width, adj_3, adj_half_width, adj_4);
df::adj_index(var_p, var_0, adj_p, adj_0, adj_3);
}
df::adj_index(var_p, var_0, adj_p, adj_0, adj_1);
return;
}
spatial_vector spatial_transform_twist_cpu_func(
spatial_transform var_t,
spatial_vector var_x)
{
//---------
// primal vars
quat var_0;
df::float3 var_1;
df::float3 var_2;
df::float3 var_3;
df::float3 var_4;
df::float3 var_5;
df::float3 var_6;
df::float3 var_7;
spatial_vector var_8;
//---------
// forward
var_0 = df::spatial_transform_get_rotation(var_t);
var_1 = df::spatial_transform_get_translation(var_t);
var_2 = df::spatial_top(var_x);
var_3 = df::spatial_bottom(var_x);
var_4 = df::rotate(var_0, var_2);
var_5 = df::rotate(var_0, var_3);
var_6 = df::cross(var_1, var_4);
var_7 = df::add(var_5, var_6);
var_8 = df::spatial_vector(var_4, var_7);
return var_8;
}
void adj_spatial_transform_twist_cpu_func(
spatial_transform var_t,
spatial_vector var_x,
spatial_transform & adj_t,
spatial_vector & adj_x,
spatial_vector & adj_ret)
{
//---------
// primal vars
quat var_0;
df::float3 var_1;
df::float3 var_2;
df::float3 var_3;
df::float3 var_4;
df::float3 var_5;
df::float3 var_6;
df::float3 var_7;
spatial_vector var_8;
//---------
// dual vars
quat adj_0 = 0;
df::float3 adj_1 = 0;
df::float3 adj_2 = 0;
df::float3 adj_3 = 0;
df::float3 adj_4 = 0;
df::float3 adj_5 = 0;
df::float3 adj_6 = 0;
df::float3 adj_7 = 0;
spatial_vector adj_8 = 0;
//---------
// forward
var_0 = df::spatial_transform_get_rotation(var_t);
var_1 = df::spatial_transform_get_translation(var_t);
var_2 = df::spatial_top(var_x);
var_3 = df::spatial_bottom(var_x);
var_4 = df::rotate(var_0, var_2);
var_5 = df::rotate(var_0, var_3);
var_6 = df::cross(var_1, var_4);
var_7 = df::add(var_5, var_6);
var_8 = df::spatial_vector(var_4, var_7);
goto label0;
//---------
// reverse
label0:;
adj_8 += adj_ret;
df::adj_spatial_vector(var_4, var_7, adj_4, adj_7, adj_8);
df::adj_add(var_5, var_6, adj_5, adj_6, adj_7);
df::adj_cross(var_1, var_4, adj_1, adj_4, adj_6);
df::adj_rotate(var_0, var_3, adj_0, adj_3, adj_5);
df::adj_rotate(var_0, var_2, adj_0, adj_2, adj_4);
df::adj_spatial_bottom(var_x, adj_x, adj_3);
df::adj_spatial_top(var_x, adj_x, adj_2);
df::adj_spatial_transform_get_translation(var_t, adj_t, adj_1);
df::adj_spatial_transform_get_rotation(var_t, adj_t, adj_0);
return;
}
spatial_vector spatial_transform_wrench_cpu_func(
spatial_transform var_t,
spatial_vector var_x)
{
//---------
// primal vars
quat var_0;
df::float3 var_1;
df::float3 var_2;
df::float3 var_3;
df::float3 var_4;
df::float3 var_5;
df::float3 var_6;
df::float3 var_7;
spatial_vector var_8;
//---------
// forward
var_0 = df::spatial_transform_get_rotation(var_t);
var_1 = df::spatial_transform_get_translation(var_t);
var_2 = df::spatial_top(var_x);
var_3 = df::spatial_bottom(var_x);
var_4 = df::rotate(var_0, var_3);
var_5 = df::rotate(var_0, var_2);
var_6 = df::cross(var_1, var_4);
var_7 = df::add(var_5, var_6);
var_8 = df::spatial_vector(var_7, var_4);
return var_8;
}
void adj_spatial_transform_wrench_cpu_func(
spatial_transform var_t,
spatial_vector var_x,
spatial_transform & adj_t,
spatial_vector & adj_x,
spatial_vector & adj_ret)
{
//---------
// primal vars
quat var_0;
df::float3 var_1;
df::float3 var_2;
df::float3 var_3;
df::float3 var_4;
df::float3 var_5;
df::float3 var_6;
df::float3 var_7;
spatial_vector var_8;
//---------
// dual vars
quat adj_0 = 0;
df::float3 adj_1 = 0;
df::float3 adj_2 = 0;
df::float3 adj_3 = 0;
df::float3 adj_4 = 0;
df::float3 adj_5 = 0;
df::float3 adj_6 = 0;
df::float3 adj_7 = 0;
spatial_vector adj_8 = 0;
//---------
// forward
var_0 = df::spatial_transform_get_rotation(var_t);
var_1 = df::spatial_transform_get_translation(var_t);
var_2 = df::spatial_top(var_x);
var_3 = df::spatial_bottom(var_x);
var_4 = df::rotate(var_0, var_3);
var_5 = df::rotate(var_0, var_2);
var_6 = df::cross(var_1, var_4);
var_7 = df::add(var_5, var_6);
var_8 = df::spatial_vector(var_7, var_4);
goto label0;
//---------
// reverse
label0:;
adj_8 += adj_ret;
df::adj_spatial_vector(var_7, var_4, adj_7, adj_4, adj_8);
df::adj_add(var_5, var_6, adj_5, adj_6, adj_7);
df::adj_cross(var_1, var_4, adj_1, adj_4, adj_6);
df::adj_rotate(var_0, var_2, adj_0, adj_2, adj_5);
df::adj_rotate(var_0, var_3, adj_0, adj_3, adj_4);
df::adj_spatial_bottom(var_x, adj_x, adj_3);
df::adj_spatial_top(var_x, adj_x, adj_2);
df::adj_spatial_transform_get_translation(var_t, adj_t, adj_1);
df::adj_spatial_transform_get_rotation(var_t, adj_t, adj_0);
return;
}
spatial_transform spatial_transform_inverse_cpu_func(
spatial_transform var_t)
{
//---------
// primal vars
df::float3 var_0;
quat var_1;
quat var_2;
df::float3 var_3;
const float var_4 = 0.0;
const float var_5 = 1.0;
float var_6;
df::float3 var_7;
spatial_transform var_8;
//---------
// forward
var_0 = df::spatial_transform_get_translation(var_t);
var_1 = df::spatial_transform_get_rotation(var_t);
var_2 = df::inverse(var_1);
var_3 = df::rotate(var_2, var_0);
var_6 = df::sub(var_4, var_5);
var_7 = df::mul(var_3, var_6);
var_8 = df::spatial_transform(var_7, var_2);
return var_8;
}
void adj_spatial_transform_inverse_cpu_func(
spatial_transform var_t,
spatial_transform & adj_t,
spatial_transform & adj_ret)
{
//---------
// primal vars
df::float3 var_0;
quat var_1;
quat var_2;
df::float3 var_3;
const float var_4 = 0.0;
const float var_5 = 1.0;
float var_6;
df::float3 var_7;
spatial_transform var_8;
//---------
// dual vars
df::float3 adj_0 = 0;
quat adj_1 = 0;
quat adj_2 = 0;
df::float3 adj_3 = 0;
float adj_4 = 0;
float adj_5 = 0;
float adj_6 = 0;
df::float3 adj_7 = 0;
spatial_transform adj_8 = 0;
//---------
// forward
var_0 = df::spatial_transform_get_translation(var_t);
var_1 = df::spatial_transform_get_rotation(var_t);
var_2 = df::inverse(var_1);
var_3 = df::rotate(var_2, var_0);
var_6 = df::sub(var_4, var_5);
var_7 = df::mul(var_3, var_6);
var_8 = df::spatial_transform(var_7, var_2);
goto label0;
//---------
// reverse
label0:;
adj_8 += adj_ret;
df::adj_spatial_transform(var_7, var_2, adj_7, adj_2, adj_8);
df::adj_mul(var_3, var_6, adj_3, adj_6, adj_7);
df::adj_sub(var_4, var_5, adj_4, adj_5, adj_6);
df::adj_rotate(var_2, var_0, adj_2, adj_0, adj_3);
df::adj_inverse(var_1, adj_1, adj_2);
df::adj_spatial_transform_get_rotation(var_t, adj_t, adj_1);
df::adj_spatial_transform_get_translation(var_t, adj_t, adj_0);
return;
}
spatial_matrix spatial_transform_inertia_cpu_func(
spatial_transform var_t,
spatial_matrix var_I)
{
//---------
// primal vars
spatial_transform var_0;
quat var_1;
df::float3 var_2;
const float var_3 = 1.0;
const float var_4 = 0.0;
df::float3 var_5;
df::float3 var_6;
df::float3 var_7;
df::float3 var_8;
df::float3 var_9;
df::float3 var_10;
mat33 var_11;
mat33 var_12;
mat33 var_13;
spatial_matrix var_14;
spatial_matrix var_15;
spatial_matrix var_16;
spatial_matrix var_17;
//---------
// forward
var_0 = spatial_transform_inverse_cpu_func(var_t);
var_1 = df::spatial_transform_get_rotation(var_0);
var_2 = df::spatial_transform_get_translation(var_0);
var_5 = df::float3(var_3, var_4, var_4);
var_6 = df::rotate(var_1, var_5);
var_7 = df::float3(var_4, var_3, var_4);
var_8 = df::rotate(var_1, var_7);
var_9 = df::float3(var_4, var_4, var_3);
var_10 = df::rotate(var_1, var_9);
var_11 = df::mat33(var_6, var_8, var_10);
var_12 = df::skew(var_2);
var_13 = df::mul(var_12, var_11);
var_14 = df::spatial_adjoint(var_11, var_13);
var_15 = df::transpose(var_14);
var_16 = df::mul(var_15, var_I);
var_17 = df::mul(var_16, var_14);
return var_17;
}
void adj_spatial_transform_inertia_cpu_func(
spatial_transform var_t,
spatial_matrix var_I,
spatial_transform & adj_t,
spatial_matrix & adj_I,
spatial_matrix & adj_ret)
{
//---------
// primal vars
spatial_transform var_0;
quat var_1;
df::float3 var_2;
const float var_3 = 1.0;
const float var_4 = 0.0;
df::float3 var_5;
df::float3 var_6;
df::float3 var_7;
df::float3 var_8;
df::float3 var_9;
df::float3 var_10;
mat33 var_11;
mat33 var_12;
mat33 var_13;
spatial_matrix var_14;
spatial_matrix var_15;
spatial_matrix var_16;
spatial_matrix var_17;
//---------
// dual vars
spatial_transform adj_0 = 0;
quat adj_1 = 0;
df::float3 adj_2 = 0;
float adj_3 = 0;
float adj_4 = 0;
df::float3 adj_5 = 0;
df::float3 adj_6 = 0;
df::float3 adj_7 = 0;
df::float3 adj_8 = 0;
df::float3 adj_9 = 0;
df::float3 adj_10 = 0;
mat33 adj_11 = 0;
mat33 adj_12 = 0;
mat33 adj_13 = 0;
spatial_matrix adj_14 = 0;
spatial_matrix adj_15 = 0;
spatial_matrix adj_16 = 0;
spatial_matrix adj_17 = 0;
//---------
// forward
var_0 = spatial_transform_inverse_cpu_func(var_t);
var_1 = df::spatial_transform_get_rotation(var_0);
var_2 = df::spatial_transform_get_translation(var_0);
var_5 = df::float3(var_3, var_4, var_4);
var_6 = df::rotate(var_1, var_5);
var_7 = df::float3(var_4, var_3, var_4);
var_8 = df::rotate(var_1, var_7);
var_9 = df::float3(var_4, var_4, var_3);
var_10 = df::rotate(var_1, var_9);
var_11 = df::mat33(var_6, var_8, var_10);
var_12 = df::skew(var_2);
var_13 = df::mul(var_12, var_11);
var_14 = df::spatial_adjoint(var_11, var_13);
var_15 = df::transpose(var_14);
var_16 = df::mul(var_15, var_I);
var_17 = df::mul(var_16, var_14);
goto label0;
//---------
// reverse
label0:;
adj_17 += adj_ret;
df::adj_mul(var_16, var_14, adj_16, adj_14, adj_17);
df::adj_mul(var_15, var_I, adj_15, adj_I, adj_16);
df::adj_transpose(var_14, adj_14, adj_15);
df::adj_spatial_adjoint(var_11, var_13, adj_11, adj_13, adj_14);
df::adj_mul(var_12, var_11, adj_12, adj_11, adj_13);
df::adj_skew(var_2, adj_2, adj_12);
df::adj_mat33(var_6, var_8, var_10, adj_6, adj_8, adj_10, adj_11);
df::adj_rotate(var_1, var_9, adj_1, adj_9, adj_10);
df::adj_float3(var_4, var_4, var_3, adj_4, adj_4, adj_3, adj_9);
df::adj_rotate(var_1, var_7, adj_1, adj_7, adj_8);
df::adj_float3(var_4, var_3, var_4, adj_4, adj_3, adj_4, adj_7);
df::adj_rotate(var_1, var_5, adj_1, adj_5, adj_6);
df::adj_float3(var_3, var_4, var_4, adj_3, adj_4, adj_4, adj_5);
df::adj_spatial_transform_get_translation(var_0, adj_0, adj_2);
df::adj_spatial_transform_get_rotation(var_0, adj_0, adj_1);
adj_spatial_transform_inverse_cpu_func(var_t, adj_t, adj_0);
return;
}
int compute_muscle_force_cpu_func(
int var_i,
spatial_transform* var_body_X_s,
spatial_vector* var_body_v_s,
int* var_muscle_links,
df::float3* var_muscle_points,
float var_muscle_activation,
spatial_vector* var_body_f_s)
{
//---------
// primal vars
int var_0;
const int var_1 = 1;
int var_2;
int var_3;
bool var_4;
const int var_5 = 0;
df::float3 var_6;
int var_7;
df::float3 var_8;
spatial_transform var_9;
spatial_transform var_10;
df::float3 var_11;
df::float3 var_12;
df::float3 var_13;
df::float3 var_14;
df::float3 var_15;
df::float3 var_16;
spatial_vector var_17;
df::float3 var_18;
spatial_vector var_19;
//---------
// forward
var_0 = df::load(var_muscle_links, var_i);
var_2 = df::add(var_i, var_1);
var_3 = df::load(var_muscle_links, var_2);
var_4 = (var_0 == var_3);
if (var_4) {
return var_5;
}
var_6 = df::load(var_muscle_points, var_i);
var_7 = df::add(var_i, var_1);
var_8 = df::load(var_muscle_points, var_7);
var_9 = df::load(var_body_X_s, var_0);
var_10 = df::load(var_body_X_s, var_3);
var_11 = df::spatial_transform_point(var_9, var_6);
var_12 = df::spatial_transform_point(var_10, var_8);
var_13 = df::sub(var_12, var_11);
var_14 = df::normalize(var_13);
var_15 = df::mul(var_14, var_muscle_activation);
var_16 = df::cross(var_11, var_15);
var_17 = df::spatial_vector(var_16, var_15);
df::atomic_sub(var_body_f_s, var_0, var_17);
var_18 = df::cross(var_12, var_15);
var_19 = df::spatial_vector(var_18, var_15);
df::atomic_add(var_body_f_s, var_3, var_19);
return var_5;
}
void adj_compute_muscle_force_cpu_func(
int var_i,
spatial_transform* var_body_X_s,
spatial_vector* var_body_v_s,
int* var_muscle_links,
df::float3* var_muscle_points,
float var_muscle_activation,
spatial_vector* var_body_f_s,
int & adj_i,
spatial_transform* adj_body_X_s,
spatial_vector* adj_body_v_s,
int* adj_muscle_links,
df::float3* adj_muscle_points,
float & adj_muscle_activation,
spatial_vector* adj_body_f_s,
int & adj_ret)
{
//---------
// primal vars
int var_0;
const int var_1 = 1;
int var_2;
int var_3;
bool var_4;
const int var_5 = 0;
df::float3 var_6;
int var_7;
df::float3 var_8;
spatial_transform var_9;
spatial_transform var_10;
df::float3 var_11;
df::float3 var_12;
df::float3 var_13;
df::float3 var_14;
df::float3 var_15;
df::float3 var_16;
spatial_vector var_17;
df::float3 var_18;
spatial_vector var_19;
//---------
// dual vars
int adj_0 = 0;
int adj_1 = 0;
int adj_2 = 0;
int adj_3 = 0;
bool adj_4 = 0;
int adj_5 = 0;
df::float3 adj_6 = 0;
int adj_7 = 0;
df::float3 adj_8 = 0;
spatial_transform adj_9 = 0;
spatial_transform adj_10 = 0;
df::float3 adj_11 = 0;
df::float3 adj_12 = 0;
df::float3 adj_13 = 0;
df::float3 adj_14 = 0;
df::float3 adj_15 = 0;
df::float3 adj_16 = 0;
spatial_vector adj_17 = 0;
df::float3 adj_18 = 0;
spatial_vector adj_19 = 0;
//---------
// forward
var_0 = df::load(var_muscle_links, var_i);
var_2 = df::add(var_i, var_1);
var_3 = df::load(var_muscle_links, var_2);
var_4 = (var_0 == var_3);
if (var_4) {
goto label0;
}
var_6 = df::load(var_muscle_points, var_i);
var_7 = df::add(var_i, var_1);
var_8 = df::load(var_muscle_points, var_7);
var_9 = df::load(var_body_X_s, var_0);
var_10 = df::load(var_body_X_s, var_3);
var_11 = df::spatial_transform_point(var_9, var_6);
var_12 = df::spatial_transform_point(var_10, var_8);
var_13 = df::sub(var_12, var_11);
var_14 = df::normalize(var_13);
var_15 = df::mul(var_14, var_muscle_activation);
var_16 = df::cross(var_11, var_15);
var_17 = df::spatial_vector(var_16, var_15);
df::atomic_sub(var_body_f_s, var_0, var_17);
var_18 = df::cross(var_12, var_15);
var_19 = df::spatial_vector(var_18, var_15);
df::atomic_add(var_body_f_s, var_3, var_19);
goto label1;
//---------
// reverse
label1:;
adj_5 += adj_ret;
df::adj_atomic_add(var_body_f_s, var_3, var_19, adj_body_f_s, adj_3, adj_19);
df::adj_spatial_vector(var_18, var_15, adj_18, adj_15, adj_19);
df::adj_cross(var_12, var_15, adj_12, adj_15, adj_18);
df::adj_atomic_sub(var_body_f_s, var_0, var_17, adj_body_f_s, adj_0, adj_17);
df::adj_spatial_vector(var_16, var_15, adj_16, adj_15, adj_17);
df::adj_cross(var_11, var_15, adj_11, adj_15, adj_16);
df::adj_mul(var_14, var_muscle_activation, adj_14, adj_muscle_activation, adj_15);
df::adj_normalize(var_13, adj_13, adj_14);
df::adj_sub(var_12, var_11, adj_12, adj_11, adj_13);
df::adj_spatial_transform_point(var_10, var_8, adj_10, adj_8, adj_12);
df::adj_spatial_transform_point(var_9, var_6, adj_9, adj_6, adj_11);
df::adj_load(var_body_X_s, var_3, adj_body_X_s, adj_3, adj_10);
df::adj_load(var_body_X_s, var_0, adj_body_X_s, adj_0, adj_9);
df::adj_load(var_muscle_points, var_7, adj_muscle_points, adj_7, adj_8);
df::adj_add(var_i, var_1, adj_i, adj_1, adj_7);
df::adj_load(var_muscle_points, var_i, adj_muscle_points, adj_i, adj_6);
if (var_4) {
label0:;
adj_5 += adj_ret;
}
df::adj_load(var_muscle_links, var_2, adj_muscle_links, adj_2, adj_3);
df::adj_add(var_i, var_1, adj_i, adj_1, adj_2);
df::adj_load(var_muscle_links, var_i, adj_muscle_links, adj_i, adj_0);
return;
}
spatial_transform jcalc_transform_cpu_func(
int var_type,
df::float3 var_axis,
float* var_joint_q,
int var_start)
{
//---------
// primal vars
const int var_0 = 0;
bool var_1;
float var_2;
df::float3 var_3;
quat var_4;
spatial_transform var_5;
const int var_6 = 1;
bool var_7;
float var_8;
const float var_9 = 0.0;
df::float3 var_10;
quat var_11;
spatial_transform var_12;
float var_13;
spatial_transform var_14;
spatial_transform var_15;
const int var_16 = 2;
bool var_17;
int var_18;
float var_19;
int var_20;
float var_21;
int var_22;
float var_23;
const int var_24 = 3;
int var_25;
float var_26;
df::float3 var_27;
quat var_28;
spatial_transform var_29;
spatial_transform var_30;
spatial_transform var_31;
bool var_32;
spatial_transform var_33;
spatial_transform var_34;
spatial_transform var_35;
const int var_36 = 4;
bool var_37;
int var_38;
float var_39;
int var_40;
float var_41;
int var_42;
float var_43;
int var_44;
float var_45;
int var_46;
float var_47;
const int var_48 = 5;
int var_49;
float var_50;
const int var_51 = 6;
int var_52;
float var_53;
df::float3 var_54;
quat var_55;
spatial_transform var_56;
spatial_transform var_57;
spatial_transform var_58;
float var_59;
float var_60;
float var_61;
float var_62;
spatial_transform var_63;
//---------
// forward
var_1 = (var_type == var_0);
if (var_1) {
var_2 = df::load(var_joint_q, var_start);
var_3 = df::mul(var_axis, var_2);
var_4 = df::quat_identity();
var_5 = df::spatial_transform(var_3, var_4);
return var_5;
}
var_7 = (var_type == var_6);
if (var_7) {
var_8 = df::load(var_joint_q, var_start);
var_10 = df::float3(var_9, var_9, var_9);
var_11 = df::quat_from_axis_angle(var_axis, var_8);
var_12 = df::spatial_transform(var_10, var_11);
return var_12;
}
var_13 = df::select(var_7, var_2, var_8);
var_14 = df::select(var_7, var_5, var_12);
var_15 = df::select(var_7, var_5, var_12);
var_17 = (var_type == var_16);
if (var_17) {
var_18 = df::add(var_start, var_0);
var_19 = df::load(var_joint_q, var_18);
var_20 = df::add(var_start, var_6);
var_21 = df::load(var_joint_q, var_20);
var_22 = df::add(var_start, var_16);
var_23 = df::load(var_joint_q, var_22);
var_25 = df::add(var_start, var_24);
var_26 = df::load(var_joint_q, var_25);
var_27 = df::float3(var_9, var_9, var_9);
var_28 = df::quat(var_19, var_21, var_23, var_26);
var_29 = df::spatial_transform(var_27, var_28);
return var_29;
}
var_30 = df::select(var_17, var_14, var_29);
var_31 = df::select(var_17, var_15, var_29);
var_32 = (var_type == var_24);
if (var_32) {
var_33 = df::spatial_transform_identity();
return var_33;
}
var_34 = df::select(var_32, var_30, var_33);
var_35 = df::select(var_32, var_31, var_33);
var_37 = (var_type == var_36);
if (var_37) {
var_38 = df::add(var_start, var_0);
var_39 = df::load(var_joint_q, var_38);
var_40 = df::add(var_start, var_6);
var_41 = df::load(var_joint_q, var_40);
var_42 = df::add(var_start, var_16);
var_43 = df::load(var_joint_q, var_42);
var_44 = df::add(var_start, var_24);
var_45 = df::load(var_joint_q, var_44);
var_46 = df::add(var_start, var_36);
var_47 = df::load(var_joint_q, var_46);
var_49 = df::add(var_start, var_48);
var_50 = df::load(var_joint_q, var_49);
var_52 = df::add(var_start, var_51);
var_53 = df::load(var_joint_q, var_52);
var_54 = df::float3(var_39, var_41, var_43);
var_55 = df::quat(var_45, var_47, var_50, var_53);
var_56 = df::spatial_transform(var_54, var_55);
return var_56;
}
var_57 = df::select(var_37, var_34, var_56);
var_58 = df::select(var_37, var_35, var_56);
var_59 = df::select(var_37, var_19, var_45);
var_60 = df::select(var_37, var_21, var_47);
var_61 = df::select(var_37, var_23, var_50);
var_62 = df::select(var_37, var_26, var_53);
var_63 = df::spatial_transform_identity();
return var_63;
}
void adj_jcalc_transform_cpu_func(
int var_type,
df::float3 var_axis,
float* var_joint_q,
int var_start,
int & adj_type,
df::float3 & adj_axis,
float* adj_joint_q,
int & adj_start,
spatial_transform & adj_ret)
{
//---------
// primal vars
const int var_0 = 0;
bool var_1;
float var_2;
df::float3 var_3;
quat var_4;
spatial_transform var_5;
const int var_6 = 1;
bool var_7;
float var_8;
const float var_9 = 0.0;
df::float3 var_10;
quat var_11;
spatial_transform var_12;
float var_13;
spatial_transform var_14;
spatial_transform var_15;
const int var_16 = 2;
bool var_17;
int var_18;
float var_19;
int var_20;
float var_21;
int var_22;
float var_23;
const int var_24 = 3;
int var_25;
float var_26;
df::float3 var_27;
quat var_28;
spatial_transform var_29;
spatial_transform var_30;
spatial_transform var_31;
bool var_32;
spatial_transform var_33;
spatial_transform var_34;
spatial_transform var_35;
const int var_36 = 4;
bool var_37;
int var_38;
float var_39;
int var_40;
float var_41;
int var_42;
float var_43;
int var_44;
float var_45;
int var_46;
float var_47;
const int var_48 = 5;
int var_49;
float var_50;
const int var_51 = 6;
int var_52;
float var_53;
df::float3 var_54;
quat var_55;
spatial_transform var_56;
spatial_transform var_57;
spatial_transform var_58;
float var_59;
float var_60;
float var_61;
float var_62;
spatial_transform var_63;
//---------
// dual vars
int adj_0 = 0;
bool adj_1 = 0;
float adj_2 = 0;
df::float3 adj_3 = 0;
quat adj_4 = 0;
spatial_transform adj_5 = 0;
int adj_6 = 0;
bool adj_7 = 0;
float adj_8 = 0;
float adj_9 = 0;
df::float3 adj_10 = 0;
quat adj_11 = 0;
spatial_transform adj_12 = 0;
float adj_13 = 0;
spatial_transform adj_14 = 0;
spatial_transform adj_15 = 0;
int adj_16 = 0;
bool adj_17 = 0;
int adj_18 = 0;
float adj_19 = 0;
int adj_20 = 0;
float adj_21 = 0;
int adj_22 = 0;
float adj_23 = 0;
int adj_24 = 0;
int adj_25 = 0;
float adj_26 = 0;
df::float3 adj_27 = 0;
quat adj_28 = 0;
spatial_transform adj_29 = 0;
spatial_transform adj_30 = 0;
spatial_transform adj_31 = 0;
bool adj_32 = 0;
spatial_transform adj_33 = 0;
spatial_transform adj_34 = 0;
spatial_transform adj_35 = 0;
int adj_36 = 0;
bool adj_37 = 0;
int adj_38 = 0;
float adj_39 = 0;
int adj_40 = 0;
float adj_41 = 0;
int adj_42 = 0;
float adj_43 = 0;
int adj_44 = 0;
float adj_45 = 0;
int adj_46 = 0;
float adj_47 = 0;
int adj_48 = 0;
int adj_49 = 0;
float adj_50 = 0;
int adj_51 = 0;
int adj_52 = 0;
float adj_53 = 0;
df::float3 adj_54 = 0;
quat adj_55 = 0;
spatial_transform adj_56 = 0;
spatial_transform adj_57 = 0;
spatial_transform adj_58 = 0;
float adj_59 = 0;
float adj_60 = 0;
float adj_61 = 0;
float adj_62 = 0;
spatial_transform adj_63 = 0;
//---------
// forward
var_1 = (var_type == var_0);
if (var_1) {
var_2 = df::load(var_joint_q, var_start);
var_3 = df::mul(var_axis, var_2);
var_4 = df::quat_identity();
var_5 = df::spatial_transform(var_3, var_4);
goto label0;
}
var_7 = (var_type == var_6);
if (var_7) {
var_8 = df::load(var_joint_q, var_start);
var_10 = df::float3(var_9, var_9, var_9);
var_11 = df::quat_from_axis_angle(var_axis, var_8);
var_12 = df::spatial_transform(var_10, var_11);
goto label1;
}
var_13 = df::select(var_7, var_2, var_8);
var_14 = df::select(var_7, var_5, var_12);
var_15 = df::select(var_7, var_5, var_12);
var_17 = (var_type == var_16);
if (var_17) {
var_18 = df::add(var_start, var_0);
var_19 = df::load(var_joint_q, var_18);
var_20 = df::add(var_start, var_6);
var_21 = df::load(var_joint_q, var_20);
var_22 = df::add(var_start, var_16);
var_23 = df::load(var_joint_q, var_22);
var_25 = df::add(var_start, var_24);
var_26 = df::load(var_joint_q, var_25);
var_27 = df::float3(var_9, var_9, var_9);
var_28 = df::quat(var_19, var_21, var_23, var_26);
var_29 = df::spatial_transform(var_27, var_28);
goto label2;
}
var_30 = df::select(var_17, var_14, var_29);
var_31 = df::select(var_17, var_15, var_29);
var_32 = (var_type == var_24);
if (var_32) {
var_33 = df::spatial_transform_identity();
goto label3;
}
var_34 = df::select(var_32, var_30, var_33);
var_35 = df::select(var_32, var_31, var_33);
var_37 = (var_type == var_36);
if (var_37) {
var_38 = df::add(var_start, var_0);
var_39 = df::load(var_joint_q, var_38);
var_40 = df::add(var_start, var_6);
var_41 = df::load(var_joint_q, var_40);
var_42 = df::add(var_start, var_16);
var_43 = df::load(var_joint_q, var_42);
var_44 = df::add(var_start, var_24);
var_45 = df::load(var_joint_q, var_44);
var_46 = df::add(var_start, var_36);
var_47 = df::load(var_joint_q, var_46);
var_49 = df::add(var_start, var_48);
var_50 = df::load(var_joint_q, var_49);
var_52 = df::add(var_start, var_51);
var_53 = df::load(var_joint_q, var_52);
var_54 = df::float3(var_39, var_41, var_43);
var_55 = df::quat(var_45, var_47, var_50, var_53);
var_56 = df::spatial_transform(var_54, var_55);
goto label4;
}
var_57 = df::select(var_37, var_34, var_56);
var_58 = df::select(var_37, var_35, var_56);
var_59 = df::select(var_37, var_19, var_45);
var_60 = df::select(var_37, var_21, var_47);
var_61 = df::select(var_37, var_23, var_50);
var_62 = df::select(var_37, var_26, var_53);
var_63 = df::spatial_transform_identity();
goto label5;
//---------
// reverse
label5:;
adj_63 += adj_ret;
df::adj_select(var_37, var_26, var_53, adj_37, adj_26, adj_53, adj_62);
df::adj_select(var_37, var_23, var_50, adj_37, adj_23, adj_50, adj_61);
df::adj_select(var_37, var_21, var_47, adj_37, adj_21, adj_47, adj_60);
df::adj_select(var_37, var_19, var_45, adj_37, adj_19, adj_45, adj_59);
df::adj_select(var_37, var_35, var_56, adj_37, adj_35, adj_56, adj_58);
df::adj_select(var_37, var_34, var_56, adj_37, adj_34, adj_56, adj_57);
if (var_37) {
label4:;
adj_56 += adj_ret;
df::adj_spatial_transform(var_54, var_55, adj_54, adj_55, adj_56);
df::adj_quat(var_45, var_47, var_50, var_53, adj_45, adj_47, adj_50, adj_53, adj_55);
df::adj_float3(var_39, var_41, var_43, adj_39, adj_41, adj_43, adj_54);
df::adj_load(var_joint_q, var_52, adj_joint_q, adj_52, adj_53);
df::adj_add(var_start, var_51, adj_start, adj_51, adj_52);
df::adj_load(var_joint_q, var_49, adj_joint_q, adj_49, adj_50);
df::adj_add(var_start, var_48, adj_start, adj_48, adj_49);
df::adj_load(var_joint_q, var_46, adj_joint_q, adj_46, adj_47);
df::adj_add(var_start, var_36, adj_start, adj_36, adj_46);
df::adj_load(var_joint_q, var_44, adj_joint_q, adj_44, adj_45);
df::adj_add(var_start, var_24, adj_start, adj_24, adj_44);
df::adj_load(var_joint_q, var_42, adj_joint_q, adj_42, adj_43);
df::adj_add(var_start, var_16, adj_start, adj_16, adj_42);
df::adj_load(var_joint_q, var_40, adj_joint_q, adj_40, adj_41);
df::adj_add(var_start, var_6, adj_start, adj_6, adj_40);
df::adj_load(var_joint_q, var_38, adj_joint_q, adj_38, adj_39);
df::adj_add(var_start, var_0, adj_start, adj_0, adj_38);
}
df::adj_select(var_32, var_31, var_33, adj_32, adj_31, adj_33, adj_35);
df::adj_select(var_32, var_30, var_33, adj_32, adj_30, adj_33, adj_34);
if (var_32) {
label3:;
adj_33 += adj_ret;
}
df::adj_select(var_17, var_15, var_29, adj_17, adj_15, adj_29, adj_31);
df::adj_select(var_17, var_14, var_29, adj_17, adj_14, adj_29, adj_30);
if (var_17) {
label2:;
adj_29 += adj_ret;
df::adj_spatial_transform(var_27, var_28, adj_27, adj_28, adj_29);
df::adj_quat(var_19, var_21, var_23, var_26, adj_19, adj_21, adj_23, adj_26, adj_28);
df::adj_float3(var_9, var_9, var_9, adj_9, adj_9, adj_9, adj_27);
df::adj_load(var_joint_q, var_25, adj_joint_q, adj_25, adj_26);
df::adj_add(var_start, var_24, adj_start, adj_24, adj_25);
df::adj_load(var_joint_q, var_22, adj_joint_q, adj_22, adj_23);
df::adj_add(var_start, var_16, adj_start, adj_16, adj_22);
df::adj_load(var_joint_q, var_20, adj_joint_q, adj_20, adj_21);
df::adj_add(var_start, var_6, adj_start, adj_6, adj_20);
df::adj_load(var_joint_q, var_18, adj_joint_q, adj_18, adj_19);
df::adj_add(var_start, var_0, adj_start, adj_0, adj_18);
}
df::adj_select(var_7, var_5, var_12, adj_7, adj_5, adj_12, adj_15);
df::adj_select(var_7, var_5, var_12, adj_7, adj_5, adj_12, adj_14);
df::adj_select(var_7, var_2, var_8, adj_7, adj_2, adj_8, adj_13);
if (var_7) {
label1:;
adj_12 += adj_ret;
df::adj_spatial_transform(var_10, var_11, adj_10, adj_11, adj_12);
df::adj_quat_from_axis_angle(var_axis, var_8, adj_axis, adj_8, adj_11);
df::adj_float3(var_9, var_9, var_9, adj_9, adj_9, adj_9, adj_10);
df::adj_load(var_joint_q, var_start, adj_joint_q, adj_start, adj_8);
}
if (var_1) {
label0:;
adj_5 += adj_ret;
df::adj_spatial_transform(var_3, var_4, adj_3, adj_4, adj_5);
df::adj_mul(var_axis, var_2, adj_axis, adj_2, adj_3);
df::adj_load(var_joint_q, var_start, adj_joint_q, adj_start, adj_2);
}
return;
}
spatial_vector jcalc_motion_cpu_func(
int var_type,
df::float3 var_axis,
spatial_transform var_X_sc,
spatial_vector* var_joint_S_s,
float* var_joint_qd,
int var_joint_start)
{
//---------
// primal vars
const int var_0 = 0;
bool var_1;
const float var_2 = 0.0;
df::float3 var_3;
spatial_vector var_4;
spatial_vector var_5;
float var_6;
spatial_vector var_7;
const int var_8 = 1;
bool var_9;
df::float3 var_10;
spatial_vector var_11;
spatial_vector var_12;
float var_13;
spatial_vector var_14;
spatial_vector var_15;
spatial_vector var_16;
spatial_vector var_17;
const int var_18 = 2;
bool var_19;
int var_20;
float var_21;
int var_22;
float var_23;
int var_24;
float var_25;
df::float3 var_26;
const float var_27 = 1.0;
spatial_vector var_28;
spatial_vector var_29;
spatial_vector var_30;
spatial_vector var_31;
spatial_vector var_32;
spatial_vector var_33;
int var_34;
int var_35;
int var_36;
float var_37;
spatial_vector var_38;
float var_39;
spatial_vector var_40;
spatial_vector var_41;
float var_42;
spatial_vector var_43;
spatial_vector var_44;
spatial_vector var_45;
const int var_46 = 3;
bool var_47;
spatial_vector var_48;
spatial_vector var_49;
const int var_50 = 4;
bool var_51;
int var_52;
float var_53;
int var_54;
float var_55;
int var_56;
float var_57;
int var_58;
float var_59;
int var_60;
float var_61;
const int var_62 = 5;
int var_63;
float var_64;
spatial_vector var_65;
int var_66;
spatial_vector var_67;
int var_68;
spatial_vector var_69;
int var_70;
spatial_vector var_71;
int var_72;
spatial_vector var_73;
int var_74;
spatial_vector var_75;
int var_76;
spatial_vector var_77;
spatial_vector var_78;
spatial_vector var_79;
spatial_vector var_80;
//---------
// forward
var_1 = (var_type == var_0);
if (var_1) {
var_3 = df::float3(var_2, var_2, var_2);
var_4 = df::spatial_vector(var_3, var_axis);
var_5 = spatial_transform_twist_cpu_func(var_X_sc, var_4);
var_6 = df::load(var_joint_qd, var_joint_start);
var_7 = df::mul(var_5, var_6);
df::store(var_joint_S_s, var_joint_start, var_5);
return var_7;
}
var_9 = (var_type == var_8);
if (var_9) {
var_10 = df::float3(var_2, var_2, var_2);
var_11 = df::spatial_vector(var_axis, var_10);
var_12 = spatial_transform_twist_cpu_func(var_X_sc, var_11);
var_13 = df::load(var_joint_qd, var_joint_start);
var_14 = df::mul(var_12, var_13);
df::store(var_joint_S_s, var_joint_start, var_12);
return var_14;
}
var_15 = df::select(var_9, var_5, var_12);
var_16 = df::select(var_9, var_7, var_14);
var_17 = df::select(var_9, var_7, var_14);
var_19 = (var_type == var_18);
if (var_19) {
var_20 = df::add(var_joint_start, var_0);
var_21 = df::load(var_joint_qd, var_20);
var_22 = df::add(var_joint_start, var_8);
var_23 = df::load(var_joint_qd, var_22);
var_24 = df::add(var_joint_start, var_18);
var_25 = df::load(var_joint_qd, var_24);
var_26 = df::float3(var_21, var_23, var_25);
var_28 = df::spatial_vector(var_27, var_2, var_2, var_2, var_2, var_2);
var_29 = spatial_transform_twist_cpu_func(var_X_sc, var_28);
var_30 = df::spatial_vector(var_2, var_27, var_2, var_2, var_2, var_2);
var_31 = spatial_transform_twist_cpu_func(var_X_sc, var_30);
var_32 = df::spatial_vector(var_2, var_2, var_27, var_2, var_2, var_2);
var_33 = spatial_transform_twist_cpu_func(var_X_sc, var_32);
var_34 = df::add(var_joint_start, var_0);
df::store(var_joint_S_s, var_34, var_29);
var_35 = df::add(var_joint_start, var_8);
df::store(var_joint_S_s, var_35, var_31);
var_36 = df::add(var_joint_start, var_18);
df::store(var_joint_S_s, var_36, var_33);
var_37 = df::index(var_26, var_0);
var_38 = df::mul(var_29, var_37);
var_39 = df::index(var_26, var_8);
var_40 = df::mul(var_31, var_39);
var_41 = df::add(var_38, var_40);
var_42 = df::index(var_26, var_18);
var_43 = df::mul(var_33, var_42);
var_44 = df::add(var_41, var_43);
return var_44;
}
var_45 = df::select(var_19, var_17, var_44);
var_47 = (var_type == var_46);
if (var_47) {
var_48 = df::spatial_vector();
return var_48;
}
var_49 = df::select(var_47, var_45, var_48);
var_51 = (var_type == var_50);
if (var_51) {
var_52 = df::add(var_joint_start, var_0);
var_53 = df::load(var_joint_qd, var_52);
var_54 = df::add(var_joint_start, var_8);
var_55 = df::load(var_joint_qd, var_54);
var_56 = df::add(var_joint_start, var_18);
var_57 = df::load(var_joint_qd, var_56);
var_58 = df::add(var_joint_start, var_46);
var_59 = df::load(var_joint_qd, var_58);
var_60 = df::add(var_joint_start, var_50);
var_61 = df::load(var_joint_qd, var_60);
var_63 = df::add(var_joint_start, var_62);
var_64 = df::load(var_joint_qd, var_63);
var_65 = df::spatial_vector(var_53, var_55, var_57, var_59, var_61, var_64);
var_66 = df::add(var_joint_start, var_0);
var_67 = df::spatial_vector(var_27, var_2, var_2, var_2, var_2, var_2);
df::store(var_joint_S_s, var_66, var_67);
var_68 = df::add(var_joint_start, var_8);
var_69 = df::spatial_vector(var_2, var_27, var_2, var_2, var_2, var_2);
df::store(var_joint_S_s, var_68, var_69);
var_70 = df::add(var_joint_start, var_18);
var_71 = df::spatial_vector(var_2, var_2, var_27, var_2, var_2, var_2);
df::store(var_joint_S_s, var_70, var_71);
var_72 = df::add(var_joint_start, var_46);
var_73 = df::spatial_vector(var_2, var_2, var_2, var_27, var_2, var_2);
df::store(var_joint_S_s, var_72, var_73);
var_74 = df::add(var_joint_start, var_50);
var_75 = df::spatial_vector(var_2, var_2, var_2, var_2, var_27, var_2);
df::store(var_joint_S_s, var_74, var_75);
var_76 = df::add(var_joint_start, var_62);
var_77 = df::spatial_vector(var_2, var_2, var_2, var_2, var_2, var_27);
df::store(var_joint_S_s, var_76, var_77);
return var_65;
}
var_78 = df::select(var_51, var_16, var_65);
var_79 = df::select(var_51, var_49, var_65);
var_80 = df::spatial_vector();
return var_80;
}
void adj_jcalc_motion_cpu_func(
int var_type,
df::float3 var_axis,
spatial_transform var_X_sc,
spatial_vector* var_joint_S_s,
float* var_joint_qd,
int var_joint_start,
int & adj_type,
df::float3 & adj_axis,
spatial_transform & adj_X_sc,
spatial_vector* adj_joint_S_s,
float* adj_joint_qd,
int & adj_joint_start,
spatial_vector & adj_ret)
{
//---------
// primal vars
const int var_0 = 0;
bool var_1;
const float var_2 = 0.0;
df::float3 var_3;
spatial_vector var_4;
spatial_vector var_5;
float var_6;
spatial_vector var_7;
const int var_8 = 1;
bool var_9;
df::float3 var_10;
spatial_vector var_11;
spatial_vector var_12;
float var_13;
spatial_vector var_14;
spatial_vector var_15;
spatial_vector var_16;
spatial_vector var_17;
const int var_18 = 2;
bool var_19;
int var_20;
float var_21;
int var_22;
float var_23;
int var_24;
float var_25;
df::float3 var_26;
const float var_27 = 1.0;
spatial_vector var_28;
spatial_vector var_29;
spatial_vector var_30;
spatial_vector var_31;
spatial_vector var_32;
spatial_vector var_33;
int var_34;
int var_35;
int var_36;
float var_37;
spatial_vector var_38;
float var_39;
spatial_vector var_40;
spatial_vector var_41;
float var_42;
spatial_vector var_43;
spatial_vector var_44;
spatial_vector var_45;
const int var_46 = 3;
bool var_47;
spatial_vector var_48;
spatial_vector var_49;
const int var_50 = 4;
bool var_51;
int var_52;
float var_53;
int var_54;
float var_55;
int var_56;
float var_57;
int var_58;
float var_59;
int var_60;
float var_61;
const int var_62 = 5;
int var_63;
float var_64;
spatial_vector var_65;
int var_66;
spatial_vector var_67;
int var_68;
spatial_vector var_69;
int var_70;
spatial_vector var_71;
int var_72;
spatial_vector var_73;
int var_74;
spatial_vector var_75;
int var_76;
spatial_vector var_77;
spatial_vector var_78;
spatial_vector var_79;
spatial_vector var_80;
//---------
// dual vars
int adj_0 = 0;
bool adj_1 = 0;
float adj_2 = 0;
df::float3 adj_3 = 0;
spatial_vector adj_4 = 0;
spatial_vector adj_5 = 0;
float adj_6 = 0;
spatial_vector adj_7 = 0;
int adj_8 = 0;
bool adj_9 = 0;
df::float3 adj_10 = 0;
spatial_vector adj_11 = 0;
spatial_vector adj_12 = 0;
float adj_13 = 0;
spatial_vector adj_14 = 0;
spatial_vector adj_15 = 0;
spatial_vector adj_16 = 0;
spatial_vector adj_17 = 0;
int adj_18 = 0;
bool adj_19 = 0;
int adj_20 = 0;
float adj_21 = 0;
int adj_22 = 0;
float adj_23 = 0;
int adj_24 = 0;
float adj_25 = 0;
df::float3 adj_26 = 0;
float adj_27 = 0;
spatial_vector adj_28 = 0;
spatial_vector adj_29 = 0;
spatial_vector adj_30 = 0;
spatial_vector adj_31 = 0;
spatial_vector adj_32 = 0;
spatial_vector adj_33 = 0;
int adj_34 = 0;
int adj_35 = 0;
int adj_36 = 0;
float adj_37 = 0;
spatial_vector adj_38 = 0;
float adj_39 = 0;
spatial_vector adj_40 = 0;
spatial_vector adj_41 = 0;
float adj_42 = 0;
spatial_vector adj_43 = 0;
spatial_vector adj_44 = 0;
spatial_vector adj_45 = 0;
int adj_46 = 0;
bool adj_47 = 0;
spatial_vector adj_48 = 0;
spatial_vector adj_49 = 0;
int adj_50 = 0;
bool adj_51 = 0;
int adj_52 = 0;
float adj_53 = 0;
int adj_54 = 0;
float adj_55 = 0;
int adj_56 = 0;
float adj_57 = 0;
int adj_58 = 0;
float adj_59 = 0;
int adj_60 = 0;
float adj_61 = 0;
int adj_62 = 0;
int adj_63 = 0;
float adj_64 = 0;
spatial_vector adj_65 = 0;
int adj_66 = 0;
spatial_vector adj_67 = 0;
int adj_68 = 0;
spatial_vector adj_69 = 0;
int adj_70 = 0;
spatial_vector adj_71 = 0;
int adj_72 = 0;
spatial_vector adj_73 = 0;
int adj_74 = 0;
spatial_vector adj_75 = 0;
int adj_76 = 0;
spatial_vector adj_77 = 0;
spatial_vector adj_78 = 0;
spatial_vector adj_79 = 0;
spatial_vector adj_80 = 0;
//---------
// forward
var_1 = (var_type == var_0);
if (var_1) {
var_3 = df::float3(var_2, var_2, var_2);
var_4 = df::spatial_vector(var_3, var_axis);
var_5 = spatial_transform_twist_cpu_func(var_X_sc, var_4);
var_6 = df::load(var_joint_qd, var_joint_start);
var_7 = df::mul(var_5, var_6);
df::store(var_joint_S_s, var_joint_start, var_5);
goto label0;
}
var_9 = (var_type == var_8);
if (var_9) {
var_10 = df::float3(var_2, var_2, var_2);
var_11 = df::spatial_vector(var_axis, var_10);
var_12 = spatial_transform_twist_cpu_func(var_X_sc, var_11);
var_13 = df::load(var_joint_qd, var_joint_start);
var_14 = df::mul(var_12, var_13);
df::store(var_joint_S_s, var_joint_start, var_12);
goto label1;
}
var_15 = df::select(var_9, var_5, var_12);
var_16 = df::select(var_9, var_7, var_14);
var_17 = df::select(var_9, var_7, var_14);
var_19 = (var_type == var_18);
if (var_19) {
var_20 = df::add(var_joint_start, var_0);
var_21 = df::load(var_joint_qd, var_20);
var_22 = df::add(var_joint_start, var_8);
var_23 = df::load(var_joint_qd, var_22);
var_24 = df::add(var_joint_start, var_18);
var_25 = df::load(var_joint_qd, var_24);
var_26 = df::float3(var_21, var_23, var_25);
var_28 = df::spatial_vector(var_27, var_2, var_2, var_2, var_2, var_2);
var_29 = spatial_transform_twist_cpu_func(var_X_sc, var_28);
var_30 = df::spatial_vector(var_2, var_27, var_2, var_2, var_2, var_2);
var_31 = spatial_transform_twist_cpu_func(var_X_sc, var_30);
var_32 = df::spatial_vector(var_2, var_2, var_27, var_2, var_2, var_2);
var_33 = spatial_transform_twist_cpu_func(var_X_sc, var_32);
var_34 = df::add(var_joint_start, var_0);
df::store(var_joint_S_s, var_34, var_29);
var_35 = df::add(var_joint_start, var_8);
df::store(var_joint_S_s, var_35, var_31);
var_36 = df::add(var_joint_start, var_18);
df::store(var_joint_S_s, var_36, var_33);
var_37 = df::index(var_26, var_0);
var_38 = df::mul(var_29, var_37);
var_39 = df::index(var_26, var_8);
var_40 = df::mul(var_31, var_39);
var_41 = df::add(var_38, var_40);
var_42 = df::index(var_26, var_18);
var_43 = df::mul(var_33, var_42);
var_44 = df::add(var_41, var_43);
goto label2;
}
var_45 = df::select(var_19, var_17, var_44);
var_47 = (var_type == var_46);
if (var_47) {
var_48 = df::spatial_vector();
goto label3;
}
var_49 = df::select(var_47, var_45, var_48);
var_51 = (var_type == var_50);
if (var_51) {
var_52 = df::add(var_joint_start, var_0);
var_53 = df::load(var_joint_qd, var_52);
var_54 = df::add(var_joint_start, var_8);
var_55 = df::load(var_joint_qd, var_54);
var_56 = df::add(var_joint_start, var_18);
var_57 = df::load(var_joint_qd, var_56);
var_58 = df::add(var_joint_start, var_46);
var_59 = df::load(var_joint_qd, var_58);
var_60 = df::add(var_joint_start, var_50);
var_61 = df::load(var_joint_qd, var_60);
var_63 = df::add(var_joint_start, var_62);
var_64 = df::load(var_joint_qd, var_63);
var_65 = df::spatial_vector(var_53, var_55, var_57, var_59, var_61, var_64);
var_66 = df::add(var_joint_start, var_0);
var_67 = df::spatial_vector(var_27, var_2, var_2, var_2, var_2, var_2);
df::store(var_joint_S_s, var_66, var_67);
var_68 = df::add(var_joint_start, var_8);
var_69 = df::spatial_vector(var_2, var_27, var_2, var_2, var_2, var_2);
df::store(var_joint_S_s, var_68, var_69);
var_70 = df::add(var_joint_start, var_18);
var_71 = df::spatial_vector(var_2, var_2, var_27, var_2, var_2, var_2);
df::store(var_joint_S_s, var_70, var_71);
var_72 = df::add(var_joint_start, var_46);
var_73 = df::spatial_vector(var_2, var_2, var_2, var_27, var_2, var_2);
df::store(var_joint_S_s, var_72, var_73);
var_74 = df::add(var_joint_start, var_50);
var_75 = df::spatial_vector(var_2, var_2, var_2, var_2, var_27, var_2);
df::store(var_joint_S_s, var_74, var_75);
var_76 = df::add(var_joint_start, var_62);
var_77 = df::spatial_vector(var_2, var_2, var_2, var_2, var_2, var_27);
df::store(var_joint_S_s, var_76, var_77);
goto label4;
}
var_78 = df::select(var_51, var_16, var_65);
var_79 = df::select(var_51, var_49, var_65);
var_80 = df::spatial_vector();
goto label5;
//---------
// reverse
label5:;
adj_80 += adj_ret;
df::adj_select(var_51, var_49, var_65, adj_51, adj_49, adj_65, adj_79);
df::adj_select(var_51, var_16, var_65, adj_51, adj_16, adj_65, adj_78);
if (var_51) {
label4:;
adj_65 += adj_ret;
df::adj_store(var_joint_S_s, var_76, var_77, adj_joint_S_s, adj_76, adj_77);
df::adj_spatial_vector(var_2, var_2, var_2, var_2, var_2, var_27, adj_2, adj_2, adj_2, adj_2, adj_2, adj_27, adj_77);
df::adj_add(var_joint_start, var_62, adj_joint_start, adj_62, adj_76);
df::adj_store(var_joint_S_s, var_74, var_75, adj_joint_S_s, adj_74, adj_75);
df::adj_spatial_vector(var_2, var_2, var_2, var_2, var_27, var_2, adj_2, adj_2, adj_2, adj_2, adj_27, adj_2, adj_75);
df::adj_add(var_joint_start, var_50, adj_joint_start, adj_50, adj_74);
df::adj_store(var_joint_S_s, var_72, var_73, adj_joint_S_s, adj_72, adj_73);
df::adj_spatial_vector(var_2, var_2, var_2, var_27, var_2, var_2, adj_2, adj_2, adj_2, adj_27, adj_2, adj_2, adj_73);
df::adj_add(var_joint_start, var_46, adj_joint_start, adj_46, adj_72);
df::adj_store(var_joint_S_s, var_70, var_71, adj_joint_S_s, adj_70, adj_71);
df::adj_spatial_vector(var_2, var_2, var_27, var_2, var_2, var_2, adj_2, adj_2, adj_27, adj_2, adj_2, adj_2, adj_71);
df::adj_add(var_joint_start, var_18, adj_joint_start, adj_18, adj_70);
df::adj_store(var_joint_S_s, var_68, var_69, adj_joint_S_s, adj_68, adj_69);
df::adj_spatial_vector(var_2, var_27, var_2, var_2, var_2, var_2, adj_2, adj_27, adj_2, adj_2, adj_2, adj_2, adj_69);
df::adj_add(var_joint_start, var_8, adj_joint_start, adj_8, adj_68);
df::adj_store(var_joint_S_s, var_66, var_67, adj_joint_S_s, adj_66, adj_67);
df::adj_spatial_vector(var_27, var_2, var_2, var_2, var_2, var_2, adj_27, adj_2, adj_2, adj_2, adj_2, adj_2, adj_67);
df::adj_add(var_joint_start, var_0, adj_joint_start, adj_0, adj_66);
df::adj_spatial_vector(var_53, var_55, var_57, var_59, var_61, var_64, adj_53, adj_55, adj_57, adj_59, adj_61, adj_64, adj_65);
df::adj_load(var_joint_qd, var_63, adj_joint_qd, adj_63, adj_64);
df::adj_add(var_joint_start, var_62, adj_joint_start, adj_62, adj_63);
df::adj_load(var_joint_qd, var_60, adj_joint_qd, adj_60, adj_61);
df::adj_add(var_joint_start, var_50, adj_joint_start, adj_50, adj_60);
df::adj_load(var_joint_qd, var_58, adj_joint_qd, adj_58, adj_59);
df::adj_add(var_joint_start, var_46, adj_joint_start, adj_46, adj_58);
df::adj_load(var_joint_qd, var_56, adj_joint_qd, adj_56, adj_57);
df::adj_add(var_joint_start, var_18, adj_joint_start, adj_18, adj_56);
df::adj_load(var_joint_qd, var_54, adj_joint_qd, adj_54, adj_55);
df::adj_add(var_joint_start, var_8, adj_joint_start, adj_8, adj_54);
df::adj_load(var_joint_qd, var_52, adj_joint_qd, adj_52, adj_53);
df::adj_add(var_joint_start, var_0, adj_joint_start, adj_0, adj_52);
}
df::adj_select(var_47, var_45, var_48, adj_47, adj_45, adj_48, adj_49);
if (var_47) {
label3:;
adj_48 += adj_ret;
}
df::adj_select(var_19, var_17, var_44, adj_19, adj_17, adj_44, adj_45);
if (var_19) {
label2:;
adj_44 += adj_ret;
df::adj_add(var_41, var_43, adj_41, adj_43, adj_44);
df::adj_mul(var_33, var_42, adj_33, adj_42, adj_43);
df::adj_index(var_26, var_18, adj_26, adj_18, adj_42);
df::adj_add(var_38, var_40, adj_38, adj_40, adj_41);
df::adj_mul(var_31, var_39, adj_31, adj_39, adj_40);
df::adj_index(var_26, var_8, adj_26, adj_8, adj_39);
df::adj_mul(var_29, var_37, adj_29, adj_37, adj_38);
df::adj_index(var_26, var_0, adj_26, adj_0, adj_37);
df::adj_store(var_joint_S_s, var_36, var_33, adj_joint_S_s, adj_36, adj_33);
df::adj_add(var_joint_start, var_18, adj_joint_start, adj_18, adj_36);
df::adj_store(var_joint_S_s, var_35, var_31, adj_joint_S_s, adj_35, adj_31);
df::adj_add(var_joint_start, var_8, adj_joint_start, adj_8, adj_35);
df::adj_store(var_joint_S_s, var_34, var_29, adj_joint_S_s, adj_34, adj_29);
df::adj_add(var_joint_start, var_0, adj_joint_start, adj_0, adj_34);
adj_spatial_transform_twist_cpu_func(var_X_sc, var_32, adj_X_sc, adj_32, adj_33);
df::adj_spatial_vector(var_2, var_2, var_27, var_2, var_2, var_2, adj_2, adj_2, adj_27, adj_2, adj_2, adj_2, adj_32);
adj_spatial_transform_twist_cpu_func(var_X_sc, var_30, adj_X_sc, adj_30, adj_31);
df::adj_spatial_vector(var_2, var_27, var_2, var_2, var_2, var_2, adj_2, adj_27, adj_2, adj_2, adj_2, adj_2, adj_30);
adj_spatial_transform_twist_cpu_func(var_X_sc, var_28, adj_X_sc, adj_28, adj_29);
df::adj_spatial_vector(var_27, var_2, var_2, var_2, var_2, var_2, adj_27, adj_2, adj_2, adj_2, adj_2, adj_2, adj_28);
df::adj_float3(var_21, var_23, var_25, adj_21, adj_23, adj_25, adj_26);
df::adj_load(var_joint_qd, var_24, adj_joint_qd, adj_24, adj_25);
df::adj_add(var_joint_start, var_18, adj_joint_start, adj_18, adj_24);
df::adj_load(var_joint_qd, var_22, adj_joint_qd, adj_22, adj_23);
df::adj_add(var_joint_start, var_8, adj_joint_start, adj_8, adj_22);
df::adj_load(var_joint_qd, var_20, adj_joint_qd, adj_20, adj_21);
df::adj_add(var_joint_start, var_0, adj_joint_start, adj_0, adj_20);
}
df::adj_select(var_9, var_7, var_14, adj_9, adj_7, adj_14, adj_17);
df::adj_select(var_9, var_7, var_14, adj_9, adj_7, adj_14, adj_16);
df::adj_select(var_9, var_5, var_12, adj_9, adj_5, adj_12, adj_15);
if (var_9) {
label1:;
adj_14 += adj_ret;
df::adj_store(var_joint_S_s, var_joint_start, var_12, adj_joint_S_s, adj_joint_start, adj_12);
df::adj_mul(var_12, var_13, adj_12, adj_13, adj_14);
df::adj_load(var_joint_qd, var_joint_start, adj_joint_qd, adj_joint_start, adj_13);
adj_spatial_transform_twist_cpu_func(var_X_sc, var_11, adj_X_sc, adj_11, adj_12);
df::adj_spatial_vector(var_axis, var_10, adj_axis, adj_10, adj_11);
df::adj_float3(var_2, var_2, var_2, adj_2, adj_2, adj_2, adj_10);
}
if (var_1) {
label0:;
adj_7 += adj_ret;
df::adj_store(var_joint_S_s, var_joint_start, var_5, adj_joint_S_s, adj_joint_start, adj_5);
df::adj_mul(var_5, var_6, adj_5, adj_6, adj_7);
df::adj_load(var_joint_qd, var_joint_start, adj_joint_qd, adj_joint_start, adj_6);
adj_spatial_transform_twist_cpu_func(var_X_sc, var_4, adj_X_sc, adj_4, adj_5);
df::adj_spatial_vector(var_3, var_axis, adj_3, adj_axis, adj_4);
df::adj_float3(var_2, var_2, var_2, adj_2, adj_2, adj_2, adj_3);
}
return;
}
int jcalc_tau_cpu_func(
int var_type,
float var_target_k_e,
float var_target_k_d,
float var_limit_k_e,
float var_limit_k_d,
spatial_vector* var_joint_S_s,
float* var_joint_q,
float* var_joint_qd,
float* var_joint_act,
float* var_joint_target,
float* var_joint_limit_lower,
float* var_joint_limit_upper,
int var_coord_start,
int var_dof_start,
spatial_vector var_body_f_s,
float* var_tau)
{
//---------
// primal vars
const int var_0 = 0;
bool var_1;
const int var_2 = 1;
bool var_3;
bool var_4;
spatial_vector var_5;
float var_6;
float var_7;
float var_8;
float var_9;
float var_10;
float var_11;
const float var_12 = 0.0;
bool var_13;
float var_14;
float var_15;
float var_16;
bool var_17;
float var_18;
float var_19;
float var_20;
float var_21;
float var_22;
float var_23;
float var_24;
float var_25;
float var_26;
float var_27;
float var_28;
float var_29;
float var_30;
float var_31;
float var_32;
const int var_33 = 2;
bool var_34;
int var_35;
float var_36;
int var_37;
float var_38;
int var_39;
float var_40;
df::float3 var_41;
int var_42;
float var_43;
int var_44;
float var_45;
int var_46;
float var_47;
df::float3 var_48;
const int var_49 = 0;
int var_50;
spatial_vector var_51;
float var_52;
float var_53;
int var_54;
float var_55;
float var_56;
float var_57;
float var_58;
float var_59;
float var_60;
const int var_61 = 1;
int var_62;
spatial_vector var_63;
float var_64;
float var_65;
int var_66;
float var_67;
float var_68;
float var_69;
float var_70;
float var_71;
float var_72;
const int var_73 = 2;
int var_74;
spatial_vector var_75;
float var_76;
float var_77;
int var_78;
float var_79;
float var_80;
float var_81;
float var_82;
float var_83;
float var_84;
spatial_vector var_85;
const int var_86 = 4;
bool var_87;
const int var_88 = 0;
int var_89;
spatial_vector var_90;
int var_91;
float var_92;
float var_93;
const int var_94 = 1;
int var_95;
spatial_vector var_96;
int var_97;
float var_98;
float var_99;
const int var_100 = 2;
int var_101;
spatial_vector var_102;
int var_103;
float var_104;
float var_105;
const int var_106 = 3;
int var_107;
spatial_vector var_108;
int var_109;
float var_110;
float var_111;
const int var_112 = 4;
int var_113;
spatial_vector var_114;
int var_115;
float var_116;
float var_117;
const int var_118 = 5;
int var_119;
spatial_vector var_120;
int var_121;
float var_122;
float var_123;
spatial_vector var_124;
int var_125;
//---------
// forward
var_1 = (var_type == var_0);
var_3 = (var_type == var_2);
var_4 = var_1 || var_3;
if (var_4) {
var_5 = df::load(var_joint_S_s, var_dof_start);
var_6 = df::load(var_joint_q, var_coord_start);
var_7 = df::load(var_joint_qd, var_dof_start);
var_8 = df::load(var_joint_act, var_dof_start);
var_9 = df::load(var_joint_target, var_coord_start);
var_10 = df::load(var_joint_limit_lower, var_coord_start);
var_11 = df::load(var_joint_limit_upper, var_coord_start);
var_13 = (var_6 < var_10);
if (var_13) {
var_14 = df::sub(var_10, var_6);
var_15 = df::mul(var_limit_k_e, var_14);
}
var_16 = df::select(var_13, var_12, var_15);
var_17 = (var_6 > var_11);
if (var_17) {
var_18 = df::sub(var_11, var_6);
var_19 = df::mul(var_limit_k_e, var_18);
}
var_20 = df::select(var_17, var_16, var_19);
var_21 = df::sub(var_12, var_limit_k_d);
var_22 = df::mul(var_21, var_7);
var_23 = df::spatial_dot(var_5, var_body_f_s);
var_24 = df::sub(var_12, var_23);
var_25 = df::sub(var_6, var_9);
var_26 = df::mul(var_target_k_e, var_25);
var_27 = df::sub(var_24, var_26);
var_28 = df::mul(var_target_k_d, var_7);
var_29 = df::sub(var_27, var_28);
var_30 = df::add(var_29, var_8);
var_31 = df::add(var_30, var_20);
var_32 = df::add(var_31, var_22);
df::store(var_tau, var_dof_start, var_32);
}
var_34 = (var_type == var_33);
if (var_34) {
var_35 = df::add(var_coord_start, var_0);
var_36 = df::load(var_joint_q, var_35);
var_37 = df::add(var_coord_start, var_2);
var_38 = df::load(var_joint_q, var_37);
var_39 = df::add(var_coord_start, var_33);
var_40 = df::load(var_joint_q, var_39);
var_41 = df::float3(var_36, var_38, var_40);
var_42 = df::add(var_dof_start, var_0);
var_43 = df::load(var_joint_qd, var_42);
var_44 = df::add(var_dof_start, var_2);
var_45 = df::load(var_joint_qd, var_44);
var_46 = df::add(var_dof_start, var_33);
var_47 = df::load(var_joint_qd, var_46);
var_48 = df::float3(var_43, var_45, var_47);
var_50 = df::add(var_dof_start, var_49);
var_51 = df::load(var_joint_S_s, var_50);
var_52 = df::index(var_48, var_49);
var_53 = df::index(var_41, var_49);
var_54 = df::add(var_dof_start, var_49);
var_55 = df::spatial_dot(var_51, var_body_f_s);
var_56 = df::sub(var_12, var_55);
var_57 = df::mul(var_52, var_target_k_d);
var_58 = df::sub(var_56, var_57);
var_59 = df::mul(var_53, var_target_k_e);
var_60 = df::sub(var_58, var_59);
df::store(var_tau, var_54, var_60);
var_62 = df::add(var_dof_start, var_61);
var_63 = df::load(var_joint_S_s, var_62);
var_64 = df::index(var_48, var_61);
var_65 = df::index(var_41, var_61);
var_66 = df::add(var_dof_start, var_61);
var_67 = df::spatial_dot(var_63, var_body_f_s);
var_68 = df::sub(var_12, var_67);
var_69 = df::mul(var_64, var_target_k_d);
var_70 = df::sub(var_68, var_69);
var_71 = df::mul(var_65, var_target_k_e);
var_72 = df::sub(var_70, var_71);
df::store(var_tau, var_66, var_72);
var_74 = df::add(var_dof_start, var_73);
var_75 = df::load(var_joint_S_s, var_74);
var_76 = df::index(var_48, var_73);
var_77 = df::index(var_41, var_73);
var_78 = df::add(var_dof_start, var_73);
var_79 = df::spatial_dot(var_75, var_body_f_s);
var_80 = df::sub(var_12, var_79);
var_81 = df::mul(var_76, var_target_k_d);
var_82 = df::sub(var_80, var_81);
var_83 = df::mul(var_77, var_target_k_e);
var_84 = df::sub(var_82, var_83);
df::store(var_tau, var_78, var_84);
}
var_85 = df::select(var_34, var_5, var_75);
var_87 = (var_type == var_86);
if (var_87) {
var_89 = df::add(var_dof_start, var_88);
var_90 = df::load(var_joint_S_s, var_89);
var_91 = df::add(var_dof_start, var_88);
var_92 = df::spatial_dot(var_90, var_body_f_s);
var_93 = df::sub(var_12, var_92);
df::store(var_tau, var_91, var_93);
var_95 = df::add(var_dof_start, var_94);
var_96 = df::load(var_joint_S_s, var_95);
var_97 = df::add(var_dof_start, var_94);
var_98 = df::spatial_dot(var_96, var_body_f_s);
var_99 = df::sub(var_12, var_98);
df::store(var_tau, var_97, var_99);
var_101 = df::add(var_dof_start, var_100);
var_102 = df::load(var_joint_S_s, var_101);
var_103 = df::add(var_dof_start, var_100);
var_104 = df::spatial_dot(var_102, var_body_f_s);
var_105 = df::sub(var_12, var_104);
df::store(var_tau, var_103, var_105);
var_107 = df::add(var_dof_start, var_106);
var_108 = df::load(var_joint_S_s, var_107);
var_109 = df::add(var_dof_start, var_106);
var_110 = df::spatial_dot(var_108, var_body_f_s);
var_111 = df::sub(var_12, var_110);
df::store(var_tau, var_109, var_111);
var_113 = df::add(var_dof_start, var_112);
var_114 = df::load(var_joint_S_s, var_113);
var_115 = df::add(var_dof_start, var_112);
var_116 = df::spatial_dot(var_114, var_body_f_s);
var_117 = df::sub(var_12, var_116);
df::store(var_tau, var_115, var_117);
var_119 = df::add(var_dof_start, var_118);
var_120 = df::load(var_joint_S_s, var_119);
var_121 = df::add(var_dof_start, var_118);
var_122 = df::spatial_dot(var_120, var_body_f_s);
var_123 = df::sub(var_12, var_122);
df::store(var_tau, var_121, var_123);
}
var_124 = df::select(var_87, var_85, var_120);
var_125 = df::select(var_87, var_73, var_118);
return var_0;
}
void adj_jcalc_tau_cpu_func(
int var_type,
float var_target_k_e,
float var_target_k_d,
float var_limit_k_e,
float var_limit_k_d,
spatial_vector* var_joint_S_s,
float* var_joint_q,
float* var_joint_qd,
float* var_joint_act,
float* var_joint_target,
float* var_joint_limit_lower,
float* var_joint_limit_upper,
int var_coord_start,
int var_dof_start,
spatial_vector var_body_f_s,
float* var_tau,
int & adj_type,
float & adj_target_k_e,
float & adj_target_k_d,
float & adj_limit_k_e,
float & adj_limit_k_d,
spatial_vector* adj_joint_S_s,
float* adj_joint_q,
float* adj_joint_qd,
float* adj_joint_act,
float* adj_joint_target,
float* adj_joint_limit_lower,
float* adj_joint_limit_upper,
int & adj_coord_start,
int & adj_dof_start,
spatial_vector & adj_body_f_s,
float* adj_tau,
int & adj_ret)
{
//---------
// primal vars
const int var_0 = 0;
bool var_1;
const int var_2 = 1;
bool var_3;
bool var_4;
spatial_vector var_5;
float var_6;
float var_7;
float var_8;
float var_9;
float var_10;
float var_11;
const float var_12 = 0.0;
bool var_13;
float var_14;
float var_15;
float var_16;
bool var_17;
float var_18;
float var_19;
float var_20;
float var_21;
float var_22;
float var_23;
float var_24;
float var_25;
float var_26;
float var_27;
float var_28;
float var_29;
float var_30;
float var_31;
float var_32;
const int var_33 = 2;
bool var_34;
int var_35;
float var_36;
int var_37;
float var_38;
int var_39;
float var_40;
df::float3 var_41;
int var_42;
float var_43;
int var_44;
float var_45;
int var_46;
float var_47;
df::float3 var_48;
const int var_49 = 0;
int var_50;
spatial_vector var_51;
float var_52;
float var_53;
int var_54;
float var_55;
float var_56;
float var_57;
float var_58;
float var_59;
float var_60;
const int var_61 = 1;
int var_62;
spatial_vector var_63;
float var_64;
float var_65;
int var_66;
float var_67;
float var_68;
float var_69;
float var_70;
float var_71;
float var_72;
const int var_73 = 2;
int var_74;
spatial_vector var_75;
float var_76;
float var_77;
int var_78;
float var_79;
float var_80;
float var_81;
float var_82;
float var_83;
float var_84;
spatial_vector var_85;
const int var_86 = 4;
bool var_87;
const int var_88 = 0;
int var_89;
spatial_vector var_90;
int var_91;
float var_92;
float var_93;
const int var_94 = 1;
int var_95;
spatial_vector var_96;
int var_97;
float var_98;
float var_99;
const int var_100 = 2;
int var_101;
spatial_vector var_102;
int var_103;
float var_104;
float var_105;
const int var_106 = 3;
int var_107;
spatial_vector var_108;
int var_109;
float var_110;
float var_111;
const int var_112 = 4;
int var_113;
spatial_vector var_114;
int var_115;
float var_116;
float var_117;
const int var_118 = 5;
int var_119;
spatial_vector var_120;
int var_121;
float var_122;
float var_123;
spatial_vector var_124;
int var_125;
//---------
// dual vars
int adj_0 = 0;
bool adj_1 = 0;
int adj_2 = 0;
bool adj_3 = 0;
bool adj_4 = 0;
spatial_vector adj_5 = 0;
float adj_6 = 0;
float adj_7 = 0;
float adj_8 = 0;
float adj_9 = 0;
float adj_10 = 0;
float adj_11 = 0;
float adj_12 = 0;
bool adj_13 = 0;
float adj_14 = 0;
float adj_15 = 0;
float adj_16 = 0;
bool adj_17 = 0;
float adj_18 = 0;
float adj_19 = 0;
float adj_20 = 0;
float adj_21 = 0;
float adj_22 = 0;
float adj_23 = 0;
float adj_24 = 0;
float adj_25 = 0;
float adj_26 = 0;
float adj_27 = 0;
float adj_28 = 0;
float adj_29 = 0;
float adj_30 = 0;
float adj_31 = 0;
float adj_32 = 0;
int adj_33 = 0;
bool adj_34 = 0;
int adj_35 = 0;
float adj_36 = 0;
int adj_37 = 0;
float adj_38 = 0;
int adj_39 = 0;
float adj_40 = 0;
df::float3 adj_41 = 0;
int adj_42 = 0;
float adj_43 = 0;
int adj_44 = 0;
float adj_45 = 0;
int adj_46 = 0;
float adj_47 = 0;
df::float3 adj_48 = 0;
int adj_49 = 0;
int adj_50 = 0;
spatial_vector adj_51 = 0;
float adj_52 = 0;
float adj_53 = 0;
int adj_54 = 0;
float adj_55 = 0;
float adj_56 = 0;
float adj_57 = 0;
float adj_58 = 0;
float adj_59 = 0;
float adj_60 = 0;
int adj_61 = 0;
int adj_62 = 0;
spatial_vector adj_63 = 0;
float adj_64 = 0;
float adj_65 = 0;
int adj_66 = 0;
float adj_67 = 0;
float adj_68 = 0;
float adj_69 = 0;
float adj_70 = 0;
float adj_71 = 0;
float adj_72 = 0;
int adj_73 = 0;
int adj_74 = 0;
spatial_vector adj_75 = 0;
float adj_76 = 0;
float adj_77 = 0;
int adj_78 = 0;
float adj_79 = 0;
float adj_80 = 0;
float adj_81 = 0;
float adj_82 = 0;
float adj_83 = 0;
float adj_84 = 0;
spatial_vector adj_85 = 0;
int adj_86 = 0;
bool adj_87 = 0;
int adj_88 = 0;
int adj_89 = 0;
spatial_vector adj_90 = 0;
int adj_91 = 0;
float adj_92 = 0;
float adj_93 = 0;
int adj_94 = 0;
int adj_95 = 0;
spatial_vector adj_96 = 0;
int adj_97 = 0;
float adj_98 = 0;
float adj_99 = 0;
int adj_100 = 0;
int adj_101 = 0;
spatial_vector adj_102 = 0;
int adj_103 = 0;
float adj_104 = 0;
float adj_105 = 0;
int adj_106 = 0;
int adj_107 = 0;
spatial_vector adj_108 = 0;
int adj_109 = 0;
float adj_110 = 0;
float adj_111 = 0;
int adj_112 = 0;
int adj_113 = 0;
spatial_vector adj_114 = 0;
int adj_115 = 0;
float adj_116 = 0;
float adj_117 = 0;
int adj_118 = 0;
int adj_119 = 0;
spatial_vector adj_120 = 0;
int adj_121 = 0;
float adj_122 = 0;
float adj_123 = 0;
spatial_vector adj_124 = 0;
int adj_125 = 0;
//---------
// forward
var_1 = (var_type == var_0);
var_3 = (var_type == var_2);
var_4 = var_1 || var_3;
if (var_4) {
var_5 = df::load(var_joint_S_s, var_dof_start);
var_6 = df::load(var_joint_q, var_coord_start);
var_7 = df::load(var_joint_qd, var_dof_start);
var_8 = df::load(var_joint_act, var_dof_start);
var_9 = df::load(var_joint_target, var_coord_start);
var_10 = df::load(var_joint_limit_lower, var_coord_start);
var_11 = df::load(var_joint_limit_upper, var_coord_start);
var_13 = (var_6 < var_10);
if (var_13) {
var_14 = df::sub(var_10, var_6);
var_15 = df::mul(var_limit_k_e, var_14);
}
var_16 = df::select(var_13, var_12, var_15);
var_17 = (var_6 > var_11);
if (var_17) {
var_18 = df::sub(var_11, var_6);
var_19 = df::mul(var_limit_k_e, var_18);
}
var_20 = df::select(var_17, var_16, var_19);
var_21 = df::sub(var_12, var_limit_k_d);
var_22 = df::mul(var_21, var_7);
var_23 = df::spatial_dot(var_5, var_body_f_s);
var_24 = df::sub(var_12, var_23);
var_25 = df::sub(var_6, var_9);
var_26 = df::mul(var_target_k_e, var_25);
var_27 = df::sub(var_24, var_26);
var_28 = df::mul(var_target_k_d, var_7);
var_29 = df::sub(var_27, var_28);
var_30 = df::add(var_29, var_8);
var_31 = df::add(var_30, var_20);
var_32 = df::add(var_31, var_22);
df::store(var_tau, var_dof_start, var_32);
}
var_34 = (var_type == var_33);
if (var_34) {
var_35 = df::add(var_coord_start, var_0);
var_36 = df::load(var_joint_q, var_35);
var_37 = df::add(var_coord_start, var_2);
var_38 = df::load(var_joint_q, var_37);
var_39 = df::add(var_coord_start, var_33);
var_40 = df::load(var_joint_q, var_39);
var_41 = df::float3(var_36, var_38, var_40);
var_42 = df::add(var_dof_start, var_0);
var_43 = df::load(var_joint_qd, var_42);
var_44 = df::add(var_dof_start, var_2);
var_45 = df::load(var_joint_qd, var_44);
var_46 = df::add(var_dof_start, var_33);
var_47 = df::load(var_joint_qd, var_46);
var_48 = df::float3(var_43, var_45, var_47);
var_50 = df::add(var_dof_start, var_49);
var_51 = df::load(var_joint_S_s, var_50);
var_52 = df::index(var_48, var_49);
var_53 = df::index(var_41, var_49);
var_54 = df::add(var_dof_start, var_49);
var_55 = df::spatial_dot(var_51, var_body_f_s);
var_56 = df::sub(var_12, var_55);
var_57 = df::mul(var_52, var_target_k_d);
var_58 = df::sub(var_56, var_57);
var_59 = df::mul(var_53, var_target_k_e);
var_60 = df::sub(var_58, var_59);
df::store(var_tau, var_54, var_60);
var_62 = df::add(var_dof_start, var_61);
var_63 = df::load(var_joint_S_s, var_62);
var_64 = df::index(var_48, var_61);
var_65 = df::index(var_41, var_61);
var_66 = df::add(var_dof_start, var_61);
var_67 = df::spatial_dot(var_63, var_body_f_s);
var_68 = df::sub(var_12, var_67);
var_69 = df::mul(var_64, var_target_k_d);
var_70 = df::sub(var_68, var_69);
var_71 = df::mul(var_65, var_target_k_e);
var_72 = df::sub(var_70, var_71);
df::store(var_tau, var_66, var_72);
var_74 = df::add(var_dof_start, var_73);
var_75 = df::load(var_joint_S_s, var_74);
var_76 = df::index(var_48, var_73);
var_77 = df::index(var_41, var_73);
var_78 = df::add(var_dof_start, var_73);
var_79 = df::spatial_dot(var_75, var_body_f_s);
var_80 = df::sub(var_12, var_79);
var_81 = df::mul(var_76, var_target_k_d);
var_82 = df::sub(var_80, var_81);
var_83 = df::mul(var_77, var_target_k_e);
var_84 = df::sub(var_82, var_83);
df::store(var_tau, var_78, var_84);
}
var_85 = df::select(var_34, var_5, var_75);
var_87 = (var_type == var_86);
if (var_87) {
var_89 = df::add(var_dof_start, var_88);
var_90 = df::load(var_joint_S_s, var_89);
var_91 = df::add(var_dof_start, var_88);
var_92 = df::spatial_dot(var_90, var_body_f_s);
var_93 = df::sub(var_12, var_92);
df::store(var_tau, var_91, var_93);
var_95 = df::add(var_dof_start, var_94);
var_96 = df::load(var_joint_S_s, var_95);
var_97 = df::add(var_dof_start, var_94);
var_98 = df::spatial_dot(var_96, var_body_f_s);
var_99 = df::sub(var_12, var_98);
df::store(var_tau, var_97, var_99);
var_101 = df::add(var_dof_start, var_100);
var_102 = df::load(var_joint_S_s, var_101);
var_103 = df::add(var_dof_start, var_100);
var_104 = df::spatial_dot(var_102, var_body_f_s);
var_105 = df::sub(var_12, var_104);
df::store(var_tau, var_103, var_105);
var_107 = df::add(var_dof_start, var_106);
var_108 = df::load(var_joint_S_s, var_107);
var_109 = df::add(var_dof_start, var_106);
var_110 = df::spatial_dot(var_108, var_body_f_s);
var_111 = df::sub(var_12, var_110);
df::store(var_tau, var_109, var_111);
var_113 = df::add(var_dof_start, var_112);
var_114 = df::load(var_joint_S_s, var_113);
var_115 = df::add(var_dof_start, var_112);
var_116 = df::spatial_dot(var_114, var_body_f_s);
var_117 = df::sub(var_12, var_116);
df::store(var_tau, var_115, var_117);
var_119 = df::add(var_dof_start, var_118);
var_120 = df::load(var_joint_S_s, var_119);
var_121 = df::add(var_dof_start, var_118);
var_122 = df::spatial_dot(var_120, var_body_f_s);
var_123 = df::sub(var_12, var_122);
df::store(var_tau, var_121, var_123);
}
var_124 = df::select(var_87, var_85, var_120);
var_125 = df::select(var_87, var_73, var_118);
goto label0;
//---------
// reverse
label0:;
adj_0 += adj_ret;
df::adj_select(var_87, var_73, var_118, adj_87, adj_73, adj_118, adj_125);
df::adj_select(var_87, var_85, var_120, adj_87, adj_85, adj_120, adj_124);
if (var_87) {
df::adj_store(var_tau, var_121, var_123, adj_tau, adj_121, adj_123);
df::adj_sub(var_12, var_122, adj_12, adj_122, adj_123);
df::adj_spatial_dot(var_120, var_body_f_s, adj_120, adj_body_f_s, adj_122);
df::adj_add(var_dof_start, var_118, adj_dof_start, adj_118, adj_121);
df::adj_load(var_joint_S_s, var_119, adj_joint_S_s, adj_119, adj_120);
df::adj_add(var_dof_start, var_118, adj_dof_start, adj_118, adj_119);
df::adj_store(var_tau, var_115, var_117, adj_tau, adj_115, adj_117);
df::adj_sub(var_12, var_116, adj_12, adj_116, adj_117);
df::adj_spatial_dot(var_114, var_body_f_s, adj_114, adj_body_f_s, adj_116);
df::adj_add(var_dof_start, var_112, adj_dof_start, adj_112, adj_115);
df::adj_load(var_joint_S_s, var_113, adj_joint_S_s, adj_113, adj_114);
df::adj_add(var_dof_start, var_112, adj_dof_start, adj_112, adj_113);
df::adj_store(var_tau, var_109, var_111, adj_tau, adj_109, adj_111);
df::adj_sub(var_12, var_110, adj_12, adj_110, adj_111);
df::adj_spatial_dot(var_108, var_body_f_s, adj_108, adj_body_f_s, adj_110);
df::adj_add(var_dof_start, var_106, adj_dof_start, adj_106, adj_109);
df::adj_load(var_joint_S_s, var_107, adj_joint_S_s, adj_107, adj_108);
df::adj_add(var_dof_start, var_106, adj_dof_start, adj_106, adj_107);
df::adj_store(var_tau, var_103, var_105, adj_tau, adj_103, adj_105);
df::adj_sub(var_12, var_104, adj_12, adj_104, adj_105);
df::adj_spatial_dot(var_102, var_body_f_s, adj_102, adj_body_f_s, adj_104);
df::adj_add(var_dof_start, var_100, adj_dof_start, adj_100, adj_103);
df::adj_load(var_joint_S_s, var_101, adj_joint_S_s, adj_101, adj_102);
df::adj_add(var_dof_start, var_100, adj_dof_start, adj_100, adj_101);
df::adj_store(var_tau, var_97, var_99, adj_tau, adj_97, adj_99);
df::adj_sub(var_12, var_98, adj_12, adj_98, adj_99);
df::adj_spatial_dot(var_96, var_body_f_s, adj_96, adj_body_f_s, adj_98);
df::adj_add(var_dof_start, var_94, adj_dof_start, adj_94, adj_97);
df::adj_load(var_joint_S_s, var_95, adj_joint_S_s, adj_95, adj_96);
df::adj_add(var_dof_start, var_94, adj_dof_start, adj_94, adj_95);
df::adj_store(var_tau, var_91, var_93, adj_tau, adj_91, adj_93);
df::adj_sub(var_12, var_92, adj_12, adj_92, adj_93);
df::adj_spatial_dot(var_90, var_body_f_s, adj_90, adj_body_f_s, adj_92);
df::adj_add(var_dof_start, var_88, adj_dof_start, adj_88, adj_91);
df::adj_load(var_joint_S_s, var_89, adj_joint_S_s, adj_89, adj_90);
df::adj_add(var_dof_start, var_88, adj_dof_start, adj_88, adj_89);
}
df::adj_select(var_34, var_5, var_75, adj_34, adj_5, adj_75, adj_85);
if (var_34) {
df::adj_store(var_tau, var_78, var_84, adj_tau, adj_78, adj_84);
df::adj_sub(var_82, var_83, adj_82, adj_83, adj_84);
df::adj_mul(var_77, var_target_k_e, adj_77, adj_target_k_e, adj_83);
df::adj_sub(var_80, var_81, adj_80, adj_81, adj_82);
df::adj_mul(var_76, var_target_k_d, adj_76, adj_target_k_d, adj_81);
df::adj_sub(var_12, var_79, adj_12, adj_79, adj_80);
df::adj_spatial_dot(var_75, var_body_f_s, adj_75, adj_body_f_s, adj_79);
df::adj_add(var_dof_start, var_73, adj_dof_start, adj_73, adj_78);
df::adj_index(var_41, var_73, adj_41, adj_73, adj_77);
df::adj_index(var_48, var_73, adj_48, adj_73, adj_76);
df::adj_load(var_joint_S_s, var_74, adj_joint_S_s, adj_74, adj_75);
df::adj_add(var_dof_start, var_73, adj_dof_start, adj_73, adj_74);
df::adj_store(var_tau, var_66, var_72, adj_tau, adj_66, adj_72);
df::adj_sub(var_70, var_71, adj_70, adj_71, adj_72);
df::adj_mul(var_65, var_target_k_e, adj_65, adj_target_k_e, adj_71);
df::adj_sub(var_68, var_69, adj_68, adj_69, adj_70);
df::adj_mul(var_64, var_target_k_d, adj_64, adj_target_k_d, adj_69);
df::adj_sub(var_12, var_67, adj_12, adj_67, adj_68);
df::adj_spatial_dot(var_63, var_body_f_s, adj_63, adj_body_f_s, adj_67);
df::adj_add(var_dof_start, var_61, adj_dof_start, adj_61, adj_66);
df::adj_index(var_41, var_61, adj_41, adj_61, adj_65);
df::adj_index(var_48, var_61, adj_48, adj_61, adj_64);
df::adj_load(var_joint_S_s, var_62, adj_joint_S_s, adj_62, adj_63);
df::adj_add(var_dof_start, var_61, adj_dof_start, adj_61, adj_62);
df::adj_store(var_tau, var_54, var_60, adj_tau, adj_54, adj_60);
df::adj_sub(var_58, var_59, adj_58, adj_59, adj_60);
df::adj_mul(var_53, var_target_k_e, adj_53, adj_target_k_e, adj_59);
df::adj_sub(var_56, var_57, adj_56, adj_57, adj_58);
df::adj_mul(var_52, var_target_k_d, adj_52, adj_target_k_d, adj_57);
df::adj_sub(var_12, var_55, adj_12, adj_55, adj_56);
df::adj_spatial_dot(var_51, var_body_f_s, adj_51, adj_body_f_s, adj_55);
df::adj_add(var_dof_start, var_49, adj_dof_start, adj_49, adj_54);
df::adj_index(var_41, var_49, adj_41, adj_49, adj_53);
df::adj_index(var_48, var_49, adj_48, adj_49, adj_52);
df::adj_load(var_joint_S_s, var_50, adj_joint_S_s, adj_50, adj_51);
df::adj_add(var_dof_start, var_49, adj_dof_start, adj_49, adj_50);
df::adj_float3(var_43, var_45, var_47, adj_43, adj_45, adj_47, adj_48);
df::adj_load(var_joint_qd, var_46, adj_joint_qd, adj_46, adj_47);
df::adj_add(var_dof_start, var_33, adj_dof_start, adj_33, adj_46);
df::adj_load(var_joint_qd, var_44, adj_joint_qd, adj_44, adj_45);
df::adj_add(var_dof_start, var_2, adj_dof_start, adj_2, adj_44);
df::adj_load(var_joint_qd, var_42, adj_joint_qd, adj_42, adj_43);
df::adj_add(var_dof_start, var_0, adj_dof_start, adj_0, adj_42);
df::adj_float3(var_36, var_38, var_40, adj_36, adj_38, adj_40, adj_41);
df::adj_load(var_joint_q, var_39, adj_joint_q, adj_39, adj_40);
df::adj_add(var_coord_start, var_33, adj_coord_start, adj_33, adj_39);
df::adj_load(var_joint_q, var_37, adj_joint_q, adj_37, adj_38);
df::adj_add(var_coord_start, var_2, adj_coord_start, adj_2, adj_37);
df::adj_load(var_joint_q, var_35, adj_joint_q, adj_35, adj_36);
df::adj_add(var_coord_start, var_0, adj_coord_start, adj_0, adj_35);
}
if (var_4) {
df::adj_store(var_tau, var_dof_start, var_32, adj_tau, adj_dof_start, adj_32);
df::adj_add(var_31, var_22, adj_31, adj_22, adj_32);
df::adj_add(var_30, var_20, adj_30, adj_20, adj_31);
df::adj_add(var_29, var_8, adj_29, adj_8, adj_30);
df::adj_sub(var_27, var_28, adj_27, adj_28, adj_29);
df::adj_mul(var_target_k_d, var_7, adj_target_k_d, adj_7, adj_28);
df::adj_sub(var_24, var_26, adj_24, adj_26, adj_27);
df::adj_mul(var_target_k_e, var_25, adj_target_k_e, adj_25, adj_26);
df::adj_sub(var_6, var_9, adj_6, adj_9, adj_25);
df::adj_sub(var_12, var_23, adj_12, adj_23, adj_24);
df::adj_spatial_dot(var_5, var_body_f_s, adj_5, adj_body_f_s, adj_23);
df::adj_mul(var_21, var_7, adj_21, adj_7, adj_22);
df::adj_sub(var_12, var_limit_k_d, adj_12, adj_limit_k_d, adj_21);
df::adj_select(var_17, var_16, var_19, adj_17, adj_16, adj_19, adj_20);
if (var_17) {
df::adj_mul(var_limit_k_e, var_18, adj_limit_k_e, adj_18, adj_19);
df::adj_sub(var_11, var_6, adj_11, adj_6, adj_18);
}
df::adj_select(var_13, var_12, var_15, adj_13, adj_12, adj_15, adj_16);
if (var_13) {
df::adj_mul(var_limit_k_e, var_14, adj_limit_k_e, adj_14, adj_15);
df::adj_sub(var_10, var_6, adj_10, adj_6, adj_14);
}
df::adj_load(var_joint_limit_upper, var_coord_start, adj_joint_limit_upper, adj_coord_start, adj_11);
df::adj_load(var_joint_limit_lower, var_coord_start, adj_joint_limit_lower, adj_coord_start, adj_10);
df::adj_load(var_joint_target, var_coord_start, adj_joint_target, adj_coord_start, adj_9);
df::adj_load(var_joint_act, var_dof_start, adj_joint_act, adj_dof_start, adj_8);
df::adj_load(var_joint_qd, var_dof_start, adj_joint_qd, adj_dof_start, adj_7);
df::adj_load(var_joint_q, var_coord_start, adj_joint_q, adj_coord_start, adj_6);
df::adj_load(var_joint_S_s, var_dof_start, adj_joint_S_s, adj_dof_start, adj_5);
}
return;
}
int jcalc_integrate_cpu_func(
int var_type,
float* var_joint_q,
float* var_joint_qd,
float* var_joint_qdd,
int var_coord_start,
int var_dof_start,
float var_dt,
float* var_joint_q_new,
float* var_joint_qd_new)
{
//---------
// primal vars
const int var_0 = 0;
bool var_1;
const int var_2 = 1;
bool var_3;
bool var_4;
float var_5;
float var_6;
float var_7;
float var_8;
float var_9;
float var_10;
float var_11;
const int var_12 = 2;
bool var_13;
int var_14;
float var_15;
int var_16;
float var_17;
int var_18;
float var_19;
df::float3 var_20;
int var_21;
float var_22;
int var_23;
float var_24;
int var_25;
float var_26;
df::float3 var_27;
int var_28;
float var_29;
int var_30;
float var_31;
int var_32;
float var_33;
const int var_34 = 3;
int var_35;
float var_36;
quat var_37;
df::float3 var_38;
df::float3 var_39;
const float var_40 = 0.0;
quat var_41;
quat var_42;
const float var_43 = 0.5;
quat var_44;
quat var_45;
quat var_46;
quat var_47;
int var_48;
float var_49;
int var_50;
float var_51;
int var_52;
float var_53;
int var_54;
float var_55;
int var_56;
float var_57;
int var_58;
float var_59;
int var_60;
float var_61;
const int var_62 = 4;
bool var_63;
int var_64;
float var_65;
int var_66;
float var_67;
int var_68;
float var_69;
df::float3 var_70;
int var_71;
float var_72;
int var_73;
float var_74;
const int var_75 = 5;
int var_76;
float var_77;
df::float3 var_78;
int var_79;
float var_80;
int var_81;
float var_82;
int var_83;
float var_84;
df::float3 var_85;
int var_86;
float var_87;
int var_88;
float var_89;
int var_90;
float var_91;
df::float3 var_92;
df::float3 var_93;
df::float3 var_94;
df::float3 var_95;
df::float3 var_96;
int var_97;
float var_98;
int var_99;
float var_100;
int var_101;
float var_102;
df::float3 var_103;
df::float3 var_104;
df::float3 var_105;
int var_106;
float var_107;
int var_108;
float var_109;
int var_110;
float var_111;
const int var_112 = 6;
int var_113;
float var_114;
quat var_115;
quat var_116;
quat var_117;
quat var_118;
df::float3 var_119;
df::float3 var_120;
quat var_121;
quat var_122;
quat var_123;
int var_124;
float var_125;
int var_126;
float var_127;
int var_128;
float var_129;
int var_130;
float var_131;
int var_132;
float var_133;
int var_134;
float var_135;
int var_136;
float var_137;
int var_138;
float var_139;
int var_140;
float var_141;
int var_142;
float var_143;
int var_144;
float var_145;
int var_146;
float var_147;
int var_148;
float var_149;
//---------
// forward
var_1 = (var_type == var_0);
var_3 = (var_type == var_2);
var_4 = var_1 || var_3;
if (var_4) {
var_5 = df::load(var_joint_qdd, var_dof_start);
var_6 = df::load(var_joint_qd, var_dof_start);
var_7 = df::load(var_joint_q, var_coord_start);
var_8 = df::mul(var_5, var_dt);
var_9 = df::add(var_6, var_8);
var_10 = df::mul(var_9, var_dt);
var_11 = df::add(var_7, var_10);
df::store(var_joint_qd_new, var_dof_start, var_9);
df::store(var_joint_q_new, var_coord_start, var_11);
}
var_13 = (var_type == var_12);
if (var_13) {
var_14 = df::add(var_dof_start, var_0);
var_15 = df::load(var_joint_qdd, var_14);
var_16 = df::add(var_dof_start, var_2);
var_17 = df::load(var_joint_qdd, var_16);
var_18 = df::add(var_dof_start, var_12);
var_19 = df::load(var_joint_qdd, var_18);
var_20 = df::float3(var_15, var_17, var_19);
var_21 = df::add(var_dof_start, var_0);
var_22 = df::load(var_joint_qd, var_21);
var_23 = df::add(var_dof_start, var_2);
var_24 = df::load(var_joint_qd, var_23);
var_25 = df::add(var_dof_start, var_12);
var_26 = df::load(var_joint_qd, var_25);
var_27 = df::float3(var_22, var_24, var_26);
var_28 = df::add(var_coord_start, var_0);
var_29 = df::load(var_joint_q, var_28);
var_30 = df::add(var_coord_start, var_2);
var_31 = df::load(var_joint_q, var_30);
var_32 = df::add(var_coord_start, var_12);
var_33 = df::load(var_joint_q, var_32);
var_35 = df::add(var_coord_start, var_34);
var_36 = df::load(var_joint_q, var_35);
var_37 = df::quat(var_29, var_31, var_33, var_36);
var_38 = df::mul(var_20, var_dt);
var_39 = df::add(var_27, var_38);
var_41 = df::quat(var_39, var_40);
var_42 = df::mul(var_41, var_37);
var_44 = df::mul(var_42, var_43);
var_45 = df::mul(var_44, var_dt);
var_46 = df::add(var_37, var_45);
var_47 = df::normalize(var_46);
var_48 = df::add(var_coord_start, var_0);
var_49 = df::index(var_47, var_0);
df::store(var_joint_q_new, var_48, var_49);
var_50 = df::add(var_coord_start, var_2);
var_51 = df::index(var_47, var_2);
df::store(var_joint_q_new, var_50, var_51);
var_52 = df::add(var_coord_start, var_12);
var_53 = df::index(var_47, var_12);
df::store(var_joint_q_new, var_52, var_53);
var_54 = df::add(var_coord_start, var_34);
var_55 = df::index(var_47, var_34);
df::store(var_joint_q_new, var_54, var_55);
var_56 = df::add(var_dof_start, var_0);
var_57 = df::index(var_39, var_0);
df::store(var_joint_qd_new, var_56, var_57);
var_58 = df::add(var_dof_start, var_2);
var_59 = df::index(var_39, var_2);
df::store(var_joint_qd_new, var_58, var_59);
var_60 = df::add(var_dof_start, var_12);
var_61 = df::index(var_39, var_12);
df::store(var_joint_qd_new, var_60, var_61);
}
var_63 = (var_type == var_62);
if (var_63) {
var_64 = df::add(var_dof_start, var_0);
var_65 = df::load(var_joint_qdd, var_64);
var_66 = df::add(var_dof_start, var_2);
var_67 = df::load(var_joint_qdd, var_66);
var_68 = df::add(var_dof_start, var_12);
var_69 = df::load(var_joint_qdd, var_68);
var_70 = df::float3(var_65, var_67, var_69);
var_71 = df::add(var_dof_start, var_34);
var_72 = df::load(var_joint_qdd, var_71);
var_73 = df::add(var_dof_start, var_62);
var_74 = df::load(var_joint_qdd, var_73);
var_76 = df::add(var_dof_start, var_75);
var_77 = df::load(var_joint_qdd, var_76);
var_78 = df::float3(var_72, var_74, var_77);
var_79 = df::add(var_dof_start, var_0);
var_80 = df::load(var_joint_qd, var_79);
var_81 = df::add(var_dof_start, var_2);
var_82 = df::load(var_joint_qd, var_81);
var_83 = df::add(var_dof_start, var_12);
var_84 = df::load(var_joint_qd, var_83);
var_85 = df::float3(var_80, var_82, var_84);
var_86 = df::add(var_dof_start, var_34);
var_87 = df::load(var_joint_qd, var_86);
var_88 = df::add(var_dof_start, var_62);
var_89 = df::load(var_joint_qd, var_88);
var_90 = df::add(var_dof_start, var_75);
var_91 = df::load(var_joint_qd, var_90);
var_92 = df::float3(var_87, var_89, var_91);
var_93 = df::mul(var_70, var_dt);
var_94 = df::add(var_85, var_93);
var_95 = df::mul(var_78, var_dt);
var_96 = df::add(var_92, var_95);
var_97 = df::add(var_coord_start, var_0);
var_98 = df::load(var_joint_q, var_97);
var_99 = df::add(var_coord_start, var_2);
var_100 = df::load(var_joint_q, var_99);
var_101 = df::add(var_coord_start, var_12);
var_102 = df::load(var_joint_q, var_101);
var_103 = df::float3(var_98, var_100, var_102);
var_104 = df::cross(var_94, var_103);
var_105 = df::add(var_96, var_104);
var_106 = df::add(var_coord_start, var_34);
var_107 = df::load(var_joint_q, var_106);
var_108 = df::add(var_coord_start, var_62);
var_109 = df::load(var_joint_q, var_108);
var_110 = df::add(var_coord_start, var_75);
var_111 = df::load(var_joint_q, var_110);
var_113 = df::add(var_coord_start, var_112);
var_114 = df::load(var_joint_q, var_113);
var_115 = df::quat(var_107, var_109, var_111, var_114);
var_116 = df::quat(var_94, var_40);
var_117 = df::mul(var_116, var_115);
var_118 = df::mul(var_117, var_43);
var_119 = df::mul(var_105, var_dt);
var_120 = df::add(var_103, var_119);
var_121 = df::mul(var_118, var_dt);
var_122 = df::add(var_115, var_121);
var_123 = df::normalize(var_122);
var_124 = df::add(var_coord_start, var_0);
var_125 = df::index(var_120, var_0);
df::store(var_joint_q_new, var_124, var_125);
var_126 = df::add(var_coord_start, var_2);
var_127 = df::index(var_120, var_2);
df::store(var_joint_q_new, var_126, var_127);
var_128 = df::add(var_coord_start, var_12);
var_129 = df::index(var_120, var_12);
df::store(var_joint_q_new, var_128, var_129);
var_130 = df::add(var_coord_start, var_34);
var_131 = df::index(var_123, var_0);
df::store(var_joint_q_new, var_130, var_131);
var_132 = df::add(var_coord_start, var_62);
var_133 = df::index(var_123, var_2);
df::store(var_joint_q_new, var_132, var_133);
var_134 = df::add(var_coord_start, var_75);
var_135 = df::index(var_123, var_12);
df::store(var_joint_q_new, var_134, var_135);
var_136 = df::add(var_coord_start, var_112);
var_137 = df::index(var_123, var_34);
df::store(var_joint_q_new, var_136, var_137);
var_138 = df::add(var_dof_start, var_0);
var_139 = df::index(var_94, var_0);
df::store(var_joint_qd_new, var_138, var_139);
var_140 = df::add(var_dof_start, var_2);
var_141 = df::index(var_94, var_2);
df::store(var_joint_qd_new, var_140, var_141);
var_142 = df::add(var_dof_start, var_12);
var_143 = df::index(var_94, var_12);
df::store(var_joint_qd_new, var_142, var_143);
var_144 = df::add(var_dof_start, var_34);
var_145 = df::index(var_96, var_0);
df::store(var_joint_qd_new, var_144, var_145);
var_146 = df::add(var_dof_start, var_62);
var_147 = df::index(var_96, var_2);
df::store(var_joint_qd_new, var_146, var_147);
var_148 = df::add(var_dof_start, var_75);
var_149 = df::index(var_96, var_12);
df::store(var_joint_qd_new, var_148, var_149);
}
return var_0;
}
void adj_jcalc_integrate_cpu_func(
int var_type,
float* var_joint_q,
float* var_joint_qd,
float* var_joint_qdd,
int var_coord_start,
int var_dof_start,
float var_dt,
float* var_joint_q_new,
float* var_joint_qd_new,
int & adj_type,
float* adj_joint_q,
float* adj_joint_qd,
float* adj_joint_qdd,
int & adj_coord_start,
int & adj_dof_start,
float & adj_dt,
float* adj_joint_q_new,
float* adj_joint_qd_new,
int & adj_ret)
{
//---------
// primal vars
const int var_0 = 0;
bool var_1;
const int var_2 = 1;
bool var_3;
bool var_4;
float var_5;
float var_6;
float var_7;
float var_8;
float var_9;
float var_10;
float var_11;
const int var_12 = 2;
bool var_13;
int var_14;
float var_15;
int var_16;
float var_17;
int var_18;
float var_19;
df::float3 var_20;
int var_21;
float var_22;
int var_23;
float var_24;
int var_25;
float var_26;
df::float3 var_27;
int var_28;
float var_29;
int var_30;
float var_31;
int var_32;
float var_33;
const int var_34 = 3;
int var_35;
float var_36;
quat var_37;
df::float3 var_38;
df::float3 var_39;
const float var_40 = 0.0;
quat var_41;
quat var_42;
const float var_43 = 0.5;
quat var_44;
quat var_45;
quat var_46;
quat var_47;
int var_48;
float var_49;
int var_50;
float var_51;
int var_52;
float var_53;
int var_54;
float var_55;
int var_56;
float var_57;
int var_58;
float var_59;
int var_60;
float var_61;
const int var_62 = 4;
bool var_63;
int var_64;
float var_65;
int var_66;
float var_67;
int var_68;
float var_69;
df::float3 var_70;
int var_71;
float var_72;
int var_73;
float var_74;
const int var_75 = 5;
int var_76;
float var_77;
df::float3 var_78;
int var_79;
float var_80;
int var_81;
float var_82;
int var_83;
float var_84;
df::float3 var_85;
int var_86;
float var_87;
int var_88;
float var_89;
int var_90;
float var_91;
df::float3 var_92;
df::float3 var_93;
df::float3 var_94;
df::float3 var_95;
df::float3 var_96;
int var_97;
float var_98;
int var_99;
float var_100;
int var_101;
float var_102;
df::float3 var_103;
df::float3 var_104;
df::float3 var_105;
int var_106;
float var_107;
int var_108;
float var_109;
int var_110;
float var_111;
const int var_112 = 6;
int var_113;
float var_114;
quat var_115;
quat var_116;
quat var_117;
quat var_118;
df::float3 var_119;
df::float3 var_120;
quat var_121;
quat var_122;
quat var_123;
int var_124;
float var_125;
int var_126;
float var_127;
int var_128;
float var_129;
int var_130;
float var_131;
int var_132;
float var_133;
int var_134;
float var_135;
int var_136;
float var_137;
int var_138;
float var_139;
int var_140;
float var_141;
int var_142;
float var_143;
int var_144;
float var_145;
int var_146;
float var_147;
int var_148;
float var_149;
//---------
// dual vars
int adj_0 = 0;
bool adj_1 = 0;
int adj_2 = 0;
bool adj_3 = 0;
bool adj_4 = 0;
float adj_5 = 0;
float adj_6 = 0;
float adj_7 = 0;
float adj_8 = 0;
float adj_9 = 0;
float adj_10 = 0;
float adj_11 = 0;
int adj_12 = 0;
bool adj_13 = 0;
int adj_14 = 0;
float adj_15 = 0;
int adj_16 = 0;
float adj_17 = 0;
int adj_18 = 0;
float adj_19 = 0;
df::float3 adj_20 = 0;
int adj_21 = 0;
float adj_22 = 0;
int adj_23 = 0;
float adj_24 = 0;
int adj_25 = 0;
float adj_26 = 0;
df::float3 adj_27 = 0;
int adj_28 = 0;
float adj_29 = 0;
int adj_30 = 0;
float adj_31 = 0;
int adj_32 = 0;
float adj_33 = 0;
int adj_34 = 0;
int adj_35 = 0;
float adj_36 = 0;
quat adj_37 = 0;
df::float3 adj_38 = 0;
df::float3 adj_39 = 0;
float adj_40 = 0;
quat adj_41 = 0;
quat adj_42 = 0;
float adj_43 = 0;
quat adj_44 = 0;
quat adj_45 = 0;
quat adj_46 = 0;
quat adj_47 = 0;
int adj_48 = 0;
float adj_49 = 0;
int adj_50 = 0;
float adj_51 = 0;
int adj_52 = 0;
float adj_53 = 0;
int adj_54 = 0;
float adj_55 = 0;
int adj_56 = 0;
float adj_57 = 0;
int adj_58 = 0;
float adj_59 = 0;
int adj_60 = 0;
float adj_61 = 0;
int adj_62 = 0;
bool adj_63 = 0;
int adj_64 = 0;
float adj_65 = 0;
int adj_66 = 0;
float adj_67 = 0;
int adj_68 = 0;
float adj_69 = 0;
df::float3 adj_70 = 0;
int adj_71 = 0;
float adj_72 = 0;
int adj_73 = 0;
float adj_74 = 0;
int adj_75 = 0;
int adj_76 = 0;
float adj_77 = 0;
df::float3 adj_78 = 0;
int adj_79 = 0;
float adj_80 = 0;
int adj_81 = 0;
float adj_82 = 0;
int adj_83 = 0;
float adj_84 = 0;
df::float3 adj_85 = 0;
int adj_86 = 0;
float adj_87 = 0;
int adj_88 = 0;
float adj_89 = 0;
int adj_90 = 0;
float adj_91 = 0;
df::float3 adj_92 = 0;
df::float3 adj_93 = 0;
df::float3 adj_94 = 0;
df::float3 adj_95 = 0;
df::float3 adj_96 = 0;
int adj_97 = 0;
float adj_98 = 0;
int adj_99 = 0;
float adj_100 = 0;
int adj_101 = 0;
float adj_102 = 0;
df::float3 adj_103 = 0;
df::float3 adj_104 = 0;
df::float3 adj_105 = 0;
int adj_106 = 0;
float adj_107 = 0;
int adj_108 = 0;
float adj_109 = 0;
int adj_110 = 0;
float adj_111 = 0;
int adj_112 = 0;
int adj_113 = 0;
float adj_114 = 0;
quat adj_115 = 0;
quat adj_116 = 0;
quat adj_117 = 0;
quat adj_118 = 0;
df::float3 adj_119 = 0;
df::float3 adj_120 = 0;
quat adj_121 = 0;
quat adj_122 = 0;
quat adj_123 = 0;
int adj_124 = 0;
float adj_125 = 0;
int adj_126 = 0;
float adj_127 = 0;
int adj_128 = 0;
float adj_129 = 0;
int adj_130 = 0;
float adj_131 = 0;
int adj_132 = 0;
float adj_133 = 0;
int adj_134 = 0;
float adj_135 = 0;
int adj_136 = 0;
float adj_137 = 0;
int adj_138 = 0;
float adj_139 = 0;
int adj_140 = 0;
float adj_141 = 0;
int adj_142 = 0;
float adj_143 = 0;
int adj_144 = 0;
float adj_145 = 0;
int adj_146 = 0;
float adj_147 = 0;
int adj_148 = 0;
float adj_149 = 0;
//---------
// forward
var_1 = (var_type == var_0);
var_3 = (var_type == var_2);
var_4 = var_1 || var_3;
if (var_4) {
var_5 = df::load(var_joint_qdd, var_dof_start);
var_6 = df::load(var_joint_qd, var_dof_start);
var_7 = df::load(var_joint_q, var_coord_start);
var_8 = df::mul(var_5, var_dt);
var_9 = df::add(var_6, var_8);
var_10 = df::mul(var_9, var_dt);
var_11 = df::add(var_7, var_10);
df::store(var_joint_qd_new, var_dof_start, var_9);
df::store(var_joint_q_new, var_coord_start, var_11);
}
var_13 = (var_type == var_12);
if (var_13) {
var_14 = df::add(var_dof_start, var_0);
var_15 = df::load(var_joint_qdd, var_14);
var_16 = df::add(var_dof_start, var_2);
var_17 = df::load(var_joint_qdd, var_16);
var_18 = df::add(var_dof_start, var_12);
var_19 = df::load(var_joint_qdd, var_18);
var_20 = df::float3(var_15, var_17, var_19);
var_21 = df::add(var_dof_start, var_0);
var_22 = df::load(var_joint_qd, var_21);
var_23 = df::add(var_dof_start, var_2);
var_24 = df::load(var_joint_qd, var_23);
var_25 = df::add(var_dof_start, var_12);
var_26 = df::load(var_joint_qd, var_25);
var_27 = df::float3(var_22, var_24, var_26);
var_28 = df::add(var_coord_start, var_0);
var_29 = df::load(var_joint_q, var_28);
var_30 = df::add(var_coord_start, var_2);
var_31 = df::load(var_joint_q, var_30);
var_32 = df::add(var_coord_start, var_12);
var_33 = df::load(var_joint_q, var_32);
var_35 = df::add(var_coord_start, var_34);
var_36 = df::load(var_joint_q, var_35);
var_37 = df::quat(var_29, var_31, var_33, var_36);
var_38 = df::mul(var_20, var_dt);
var_39 = df::add(var_27, var_38);
var_41 = df::quat(var_39, var_40);
var_42 = df::mul(var_41, var_37);
var_44 = df::mul(var_42, var_43);
var_45 = df::mul(var_44, var_dt);
var_46 = df::add(var_37, var_45);
var_47 = df::normalize(var_46);
var_48 = df::add(var_coord_start, var_0);
var_49 = df::index(var_47, var_0);
df::store(var_joint_q_new, var_48, var_49);
var_50 = df::add(var_coord_start, var_2);
var_51 = df::index(var_47, var_2);
df::store(var_joint_q_new, var_50, var_51);
var_52 = df::add(var_coord_start, var_12);
var_53 = df::index(var_47, var_12);
df::store(var_joint_q_new, var_52, var_53);
var_54 = df::add(var_coord_start, var_34);
var_55 = df::index(var_47, var_34);
df::store(var_joint_q_new, var_54, var_55);
var_56 = df::add(var_dof_start, var_0);
var_57 = df::index(var_39, var_0);
df::store(var_joint_qd_new, var_56, var_57);
var_58 = df::add(var_dof_start, var_2);
var_59 = df::index(var_39, var_2);
df::store(var_joint_qd_new, var_58, var_59);
var_60 = df::add(var_dof_start, var_12);
var_61 = df::index(var_39, var_12);
df::store(var_joint_qd_new, var_60, var_61);
}
var_63 = (var_type == var_62);
if (var_63) {
var_64 = df::add(var_dof_start, var_0);
var_65 = df::load(var_joint_qdd, var_64);
var_66 = df::add(var_dof_start, var_2);
var_67 = df::load(var_joint_qdd, var_66);
var_68 = df::add(var_dof_start, var_12);
var_69 = df::load(var_joint_qdd, var_68);
var_70 = df::float3(var_65, var_67, var_69);
var_71 = df::add(var_dof_start, var_34);
var_72 = df::load(var_joint_qdd, var_71);
var_73 = df::add(var_dof_start, var_62);
var_74 = df::load(var_joint_qdd, var_73);
var_76 = df::add(var_dof_start, var_75);
var_77 = df::load(var_joint_qdd, var_76);
var_78 = df::float3(var_72, var_74, var_77);
var_79 = df::add(var_dof_start, var_0);
var_80 = df::load(var_joint_qd, var_79);
var_81 = df::add(var_dof_start, var_2);
var_82 = df::load(var_joint_qd, var_81);
var_83 = df::add(var_dof_start, var_12);
var_84 = df::load(var_joint_qd, var_83);
var_85 = df::float3(var_80, var_82, var_84);
var_86 = df::add(var_dof_start, var_34);
var_87 = df::load(var_joint_qd, var_86);
var_88 = df::add(var_dof_start, var_62);
var_89 = df::load(var_joint_qd, var_88);
var_90 = df::add(var_dof_start, var_75);
var_91 = df::load(var_joint_qd, var_90);
var_92 = df::float3(var_87, var_89, var_91);
var_93 = df::mul(var_70, var_dt);
var_94 = df::add(var_85, var_93);
var_95 = df::mul(var_78, var_dt);
var_96 = df::add(var_92, var_95);
var_97 = df::add(var_coord_start, var_0);
var_98 = df::load(var_joint_q, var_97);
var_99 = df::add(var_coord_start, var_2);
var_100 = df::load(var_joint_q, var_99);
var_101 = df::add(var_coord_start, var_12);
var_102 = df::load(var_joint_q, var_101);
var_103 = df::float3(var_98, var_100, var_102);
var_104 = df::cross(var_94, var_103);
var_105 = df::add(var_96, var_104);
var_106 = df::add(var_coord_start, var_34);
var_107 = df::load(var_joint_q, var_106);
var_108 = df::add(var_coord_start, var_62);
var_109 = df::load(var_joint_q, var_108);
var_110 = df::add(var_coord_start, var_75);
var_111 = df::load(var_joint_q, var_110);
var_113 = df::add(var_coord_start, var_112);
var_114 = df::load(var_joint_q, var_113);
var_115 = df::quat(var_107, var_109, var_111, var_114);
var_116 = df::quat(var_94, var_40);
var_117 = df::mul(var_116, var_115);
var_118 = df::mul(var_117, var_43);
var_119 = df::mul(var_105, var_dt);
var_120 = df::add(var_103, var_119);
var_121 = df::mul(var_118, var_dt);
var_122 = df::add(var_115, var_121);
var_123 = df::normalize(var_122);
var_124 = df::add(var_coord_start, var_0);
var_125 = df::index(var_120, var_0);
df::store(var_joint_q_new, var_124, var_125);
var_126 = df::add(var_coord_start, var_2);
var_127 = df::index(var_120, var_2);
df::store(var_joint_q_new, var_126, var_127);
var_128 = df::add(var_coord_start, var_12);
var_129 = df::index(var_120, var_12);
df::store(var_joint_q_new, var_128, var_129);
var_130 = df::add(var_coord_start, var_34);
var_131 = df::index(var_123, var_0);
df::store(var_joint_q_new, var_130, var_131);
var_132 = df::add(var_coord_start, var_62);
var_133 = df::index(var_123, var_2);
df::store(var_joint_q_new, var_132, var_133);
var_134 = df::add(var_coord_start, var_75);
var_135 = df::index(var_123, var_12);
df::store(var_joint_q_new, var_134, var_135);
var_136 = df::add(var_coord_start, var_112);
var_137 = df::index(var_123, var_34);
df::store(var_joint_q_new, var_136, var_137);
var_138 = df::add(var_dof_start, var_0);
var_139 = df::index(var_94, var_0);
df::store(var_joint_qd_new, var_138, var_139);
var_140 = df::add(var_dof_start, var_2);
var_141 = df::index(var_94, var_2);
df::store(var_joint_qd_new, var_140, var_141);
var_142 = df::add(var_dof_start, var_12);
var_143 = df::index(var_94, var_12);
df::store(var_joint_qd_new, var_142, var_143);
var_144 = df::add(var_dof_start, var_34);
var_145 = df::index(var_96, var_0);
df::store(var_joint_qd_new, var_144, var_145);
var_146 = df::add(var_dof_start, var_62);
var_147 = df::index(var_96, var_2);
df::store(var_joint_qd_new, var_146, var_147);
var_148 = df::add(var_dof_start, var_75);
var_149 = df::index(var_96, var_12);
df::store(var_joint_qd_new, var_148, var_149);
}
goto label0;
//---------
// reverse
label0:;
adj_0 += adj_ret;
if (var_63) {
df::adj_store(var_joint_qd_new, var_148, var_149, adj_joint_qd_new, adj_148, adj_149);
df::adj_index(var_96, var_12, adj_96, adj_12, adj_149);
df::adj_add(var_dof_start, var_75, adj_dof_start, adj_75, adj_148);
df::adj_store(var_joint_qd_new, var_146, var_147, adj_joint_qd_new, adj_146, adj_147);
df::adj_index(var_96, var_2, adj_96, adj_2, adj_147);
df::adj_add(var_dof_start, var_62, adj_dof_start, adj_62, adj_146);
df::adj_store(var_joint_qd_new, var_144, var_145, adj_joint_qd_new, adj_144, adj_145);
df::adj_index(var_96, var_0, adj_96, adj_0, adj_145);
df::adj_add(var_dof_start, var_34, adj_dof_start, adj_34, adj_144);
df::adj_store(var_joint_qd_new, var_142, var_143, adj_joint_qd_new, adj_142, adj_143);
df::adj_index(var_94, var_12, adj_94, adj_12, adj_143);
df::adj_add(var_dof_start, var_12, adj_dof_start, adj_12, adj_142);
df::adj_store(var_joint_qd_new, var_140, var_141, adj_joint_qd_new, adj_140, adj_141);
df::adj_index(var_94, var_2, adj_94, adj_2, adj_141);
df::adj_add(var_dof_start, var_2, adj_dof_start, adj_2, adj_140);
df::adj_store(var_joint_qd_new, var_138, var_139, adj_joint_qd_new, adj_138, adj_139);
df::adj_index(var_94, var_0, adj_94, adj_0, adj_139);
df::adj_add(var_dof_start, var_0, adj_dof_start, adj_0, adj_138);
df::adj_store(var_joint_q_new, var_136, var_137, adj_joint_q_new, adj_136, adj_137);
df::adj_index(var_123, var_34, adj_123, adj_34, adj_137);
df::adj_add(var_coord_start, var_112, adj_coord_start, adj_112, adj_136);
df::adj_store(var_joint_q_new, var_134, var_135, adj_joint_q_new, adj_134, adj_135);
df::adj_index(var_123, var_12, adj_123, adj_12, adj_135);
df::adj_add(var_coord_start, var_75, adj_coord_start, adj_75, adj_134);
df::adj_store(var_joint_q_new, var_132, var_133, adj_joint_q_new, adj_132, adj_133);
df::adj_index(var_123, var_2, adj_123, adj_2, adj_133);
df::adj_add(var_coord_start, var_62, adj_coord_start, adj_62, adj_132);
df::adj_store(var_joint_q_new, var_130, var_131, adj_joint_q_new, adj_130, adj_131);
df::adj_index(var_123, var_0, adj_123, adj_0, adj_131);
df::adj_add(var_coord_start, var_34, adj_coord_start, adj_34, adj_130);
df::adj_store(var_joint_q_new, var_128, var_129, adj_joint_q_new, adj_128, adj_129);
df::adj_index(var_120, var_12, adj_120, adj_12, adj_129);
df::adj_add(var_coord_start, var_12, adj_coord_start, adj_12, adj_128);
df::adj_store(var_joint_q_new, var_126, var_127, adj_joint_q_new, adj_126, adj_127);
df::adj_index(var_120, var_2, adj_120, adj_2, adj_127);
df::adj_add(var_coord_start, var_2, adj_coord_start, adj_2, adj_126);
df::adj_store(var_joint_q_new, var_124, var_125, adj_joint_q_new, adj_124, adj_125);
df::adj_index(var_120, var_0, adj_120, adj_0, adj_125);
df::adj_add(var_coord_start, var_0, adj_coord_start, adj_0, adj_124);
df::adj_normalize(var_122, adj_122, adj_123);
df::adj_add(var_115, var_121, adj_115, adj_121, adj_122);
df::adj_mul(var_118, var_dt, adj_118, adj_dt, adj_121);
df::adj_add(var_103, var_119, adj_103, adj_119, adj_120);
df::adj_mul(var_105, var_dt, adj_105, adj_dt, adj_119);
df::adj_mul(var_117, var_43, adj_117, adj_43, adj_118);
df::adj_mul(var_116, var_115, adj_116, adj_115, adj_117);
df::adj_quat(var_94, var_40, adj_94, adj_40, adj_116);
df::adj_quat(var_107, var_109, var_111, var_114, adj_107, adj_109, adj_111, adj_114, adj_115);
df::adj_load(var_joint_q, var_113, adj_joint_q, adj_113, adj_114);
df::adj_add(var_coord_start, var_112, adj_coord_start, adj_112, adj_113);
df::adj_load(var_joint_q, var_110, adj_joint_q, adj_110, adj_111);
df::adj_add(var_coord_start, var_75, adj_coord_start, adj_75, adj_110);
df::adj_load(var_joint_q, var_108, adj_joint_q, adj_108, adj_109);
df::adj_add(var_coord_start, var_62, adj_coord_start, adj_62, adj_108);
df::adj_load(var_joint_q, var_106, adj_joint_q, adj_106, adj_107);
df::adj_add(var_coord_start, var_34, adj_coord_start, adj_34, adj_106);
df::adj_add(var_96, var_104, adj_96, adj_104, adj_105);
df::adj_cross(var_94, var_103, adj_94, adj_103, adj_104);
df::adj_float3(var_98, var_100, var_102, adj_98, adj_100, adj_102, adj_103);
df::adj_load(var_joint_q, var_101, adj_joint_q, adj_101, adj_102);
df::adj_add(var_coord_start, var_12, adj_coord_start, adj_12, adj_101);
df::adj_load(var_joint_q, var_99, adj_joint_q, adj_99, adj_100);
df::adj_add(var_coord_start, var_2, adj_coord_start, adj_2, adj_99);
df::adj_load(var_joint_q, var_97, adj_joint_q, adj_97, adj_98);
df::adj_add(var_coord_start, var_0, adj_coord_start, adj_0, adj_97);
df::adj_add(var_92, var_95, adj_92, adj_95, adj_96);
df::adj_mul(var_78, var_dt, adj_78, adj_dt, adj_95);
df::adj_add(var_85, var_93, adj_85, adj_93, adj_94);
df::adj_mul(var_70, var_dt, adj_70, adj_dt, adj_93);
df::adj_float3(var_87, var_89, var_91, adj_87, adj_89, adj_91, adj_92);
df::adj_load(var_joint_qd, var_90, adj_joint_qd, adj_90, adj_91);
df::adj_add(var_dof_start, var_75, adj_dof_start, adj_75, adj_90);
df::adj_load(var_joint_qd, var_88, adj_joint_qd, adj_88, adj_89);
df::adj_add(var_dof_start, var_62, adj_dof_start, adj_62, adj_88);
df::adj_load(var_joint_qd, var_86, adj_joint_qd, adj_86, adj_87);
df::adj_add(var_dof_start, var_34, adj_dof_start, adj_34, adj_86);
df::adj_float3(var_80, var_82, var_84, adj_80, adj_82, adj_84, adj_85);
df::adj_load(var_joint_qd, var_83, adj_joint_qd, adj_83, adj_84);
df::adj_add(var_dof_start, var_12, adj_dof_start, adj_12, adj_83);
df::adj_load(var_joint_qd, var_81, adj_joint_qd, adj_81, adj_82);
df::adj_add(var_dof_start, var_2, adj_dof_start, adj_2, adj_81);
df::adj_load(var_joint_qd, var_79, adj_joint_qd, adj_79, adj_80);
df::adj_add(var_dof_start, var_0, adj_dof_start, adj_0, adj_79);
df::adj_float3(var_72, var_74, var_77, adj_72, adj_74, adj_77, adj_78);
df::adj_load(var_joint_qdd, var_76, adj_joint_qdd, adj_76, adj_77);
df::adj_add(var_dof_start, var_75, adj_dof_start, adj_75, adj_76);
df::adj_load(var_joint_qdd, var_73, adj_joint_qdd, adj_73, adj_74);
df::adj_add(var_dof_start, var_62, adj_dof_start, adj_62, adj_73);
df::adj_load(var_joint_qdd, var_71, adj_joint_qdd, adj_71, adj_72);
df::adj_add(var_dof_start, var_34, adj_dof_start, adj_34, adj_71);
df::adj_float3(var_65, var_67, var_69, adj_65, adj_67, adj_69, adj_70);
df::adj_load(var_joint_qdd, var_68, adj_joint_qdd, adj_68, adj_69);
df::adj_add(var_dof_start, var_12, adj_dof_start, adj_12, adj_68);
df::adj_load(var_joint_qdd, var_66, adj_joint_qdd, adj_66, adj_67);
df::adj_add(var_dof_start, var_2, adj_dof_start, adj_2, adj_66);
df::adj_load(var_joint_qdd, var_64, adj_joint_qdd, adj_64, adj_65);
df::adj_add(var_dof_start, var_0, adj_dof_start, adj_0, adj_64);
}
if (var_13) {
df::adj_store(var_joint_qd_new, var_60, var_61, adj_joint_qd_new, adj_60, adj_61);
df::adj_index(var_39, var_12, adj_39, adj_12, adj_61);
df::adj_add(var_dof_start, var_12, adj_dof_start, adj_12, adj_60);
df::adj_store(var_joint_qd_new, var_58, var_59, adj_joint_qd_new, adj_58, adj_59);
df::adj_index(var_39, var_2, adj_39, adj_2, adj_59);
df::adj_add(var_dof_start, var_2, adj_dof_start, adj_2, adj_58);
df::adj_store(var_joint_qd_new, var_56, var_57, adj_joint_qd_new, adj_56, adj_57);
df::adj_index(var_39, var_0, adj_39, adj_0, adj_57);
df::adj_add(var_dof_start, var_0, adj_dof_start, adj_0, adj_56);
df::adj_store(var_joint_q_new, var_54, var_55, adj_joint_q_new, adj_54, adj_55);
df::adj_index(var_47, var_34, adj_47, adj_34, adj_55);
df::adj_add(var_coord_start, var_34, adj_coord_start, adj_34, adj_54);
df::adj_store(var_joint_q_new, var_52, var_53, adj_joint_q_new, adj_52, adj_53);
df::adj_index(var_47, var_12, adj_47, adj_12, adj_53);
df::adj_add(var_coord_start, var_12, adj_coord_start, adj_12, adj_52);
df::adj_store(var_joint_q_new, var_50, var_51, adj_joint_q_new, adj_50, adj_51);
df::adj_index(var_47, var_2, adj_47, adj_2, adj_51);
df::adj_add(var_coord_start, var_2, adj_coord_start, adj_2, adj_50);
df::adj_store(var_joint_q_new, var_48, var_49, adj_joint_q_new, adj_48, adj_49);
df::adj_index(var_47, var_0, adj_47, adj_0, adj_49);
df::adj_add(var_coord_start, var_0, adj_coord_start, adj_0, adj_48);
df::adj_normalize(var_46, adj_46, adj_47);
df::adj_add(var_37, var_45, adj_37, adj_45, adj_46);
df::adj_mul(var_44, var_dt, adj_44, adj_dt, adj_45);
df::adj_mul(var_42, var_43, adj_42, adj_43, adj_44);
df::adj_mul(var_41, var_37, adj_41, adj_37, adj_42);
df::adj_quat(var_39, var_40, adj_39, adj_40, adj_41);
df::adj_add(var_27, var_38, adj_27, adj_38, adj_39);
df::adj_mul(var_20, var_dt, adj_20, adj_dt, adj_38);
df::adj_quat(var_29, var_31, var_33, var_36, adj_29, adj_31, adj_33, adj_36, adj_37);
df::adj_load(var_joint_q, var_35, adj_joint_q, adj_35, adj_36);
df::adj_add(var_coord_start, var_34, adj_coord_start, adj_34, adj_35);
df::adj_load(var_joint_q, var_32, adj_joint_q, adj_32, adj_33);
df::adj_add(var_coord_start, var_12, adj_coord_start, adj_12, adj_32);
df::adj_load(var_joint_q, var_30, adj_joint_q, adj_30, adj_31);
df::adj_add(var_coord_start, var_2, adj_coord_start, adj_2, adj_30);
df::adj_load(var_joint_q, var_28, adj_joint_q, adj_28, adj_29);
df::adj_add(var_coord_start, var_0, adj_coord_start, adj_0, adj_28);
df::adj_float3(var_22, var_24, var_26, adj_22, adj_24, adj_26, adj_27);
df::adj_load(var_joint_qd, var_25, adj_joint_qd, adj_25, adj_26);
df::adj_add(var_dof_start, var_12, adj_dof_start, adj_12, adj_25);
df::adj_load(var_joint_qd, var_23, adj_joint_qd, adj_23, adj_24);
df::adj_add(var_dof_start, var_2, adj_dof_start, adj_2, adj_23);
df::adj_load(var_joint_qd, var_21, adj_joint_qd, adj_21, adj_22);
df::adj_add(var_dof_start, var_0, adj_dof_start, adj_0, adj_21);
df::adj_float3(var_15, var_17, var_19, adj_15, adj_17, adj_19, adj_20);
df::adj_load(var_joint_qdd, var_18, adj_joint_qdd, adj_18, adj_19);
df::adj_add(var_dof_start, var_12, adj_dof_start, adj_12, adj_18);
df::adj_load(var_joint_qdd, var_16, adj_joint_qdd, adj_16, adj_17);
df::adj_add(var_dof_start, var_2, adj_dof_start, adj_2, adj_16);
df::adj_load(var_joint_qdd, var_14, adj_joint_qdd, adj_14, adj_15);
df::adj_add(var_dof_start, var_0, adj_dof_start, adj_0, adj_14);
}
if (var_4) {
df::adj_store(var_joint_q_new, var_coord_start, var_11, adj_joint_q_new, adj_coord_start, adj_11);
df::adj_store(var_joint_qd_new, var_dof_start, var_9, adj_joint_qd_new, adj_dof_start, adj_9);
df::adj_add(var_7, var_10, adj_7, adj_10, adj_11);
df::adj_mul(var_9, var_dt, adj_9, adj_dt, adj_10);
df::adj_add(var_6, var_8, adj_6, adj_8, adj_9);
df::adj_mul(var_5, var_dt, adj_5, adj_dt, adj_8);
df::adj_load(var_joint_q, var_coord_start, adj_joint_q, adj_coord_start, adj_7);
df::adj_load(var_joint_qd, var_dof_start, adj_joint_qd, adj_dof_start, adj_6);
df::adj_load(var_joint_qdd, var_dof_start, adj_joint_qdd, adj_dof_start, adj_5);
}
return;
}
int compute_link_transform_cpu_func(
int var_i,
int* var_joint_type,
int* var_joint_parent,
int* var_joint_q_start,
int* var_joint_qd_start,
float* var_joint_q,
spatial_transform* var_joint_X_pj,
spatial_transform* var_joint_X_cm,
df::float3* var_joint_axis,
spatial_transform* var_body_X_sc,
spatial_transform* var_body_X_sm)
{
//---------
// primal vars
int var_0;
spatial_transform var_1;
const int var_2 = 0;
bool var_3;
spatial_transform var_4;
spatial_transform var_5;
int var_6;
df::float3 var_7;
int var_8;
int var_9;
spatial_transform var_10;
spatial_transform var_11;
spatial_transform var_12;
spatial_transform var_13;
spatial_transform var_14;
spatial_transform var_15;
//---------
// forward
var_0 = df::load(var_joint_parent, var_i);
var_1 = df::spatial_transform_identity();
var_3 = (var_0 >= var_2);
if (var_3) {
var_4 = df::load(var_body_X_sc, var_0);
}
var_5 = df::select(var_3, var_1, var_4);
var_6 = df::load(var_joint_type, var_i);
var_7 = df::load(var_joint_axis, var_i);
var_8 = df::load(var_joint_q_start, var_i);
var_9 = df::load(var_joint_qd_start, var_i);
var_10 = jcalc_transform_cpu_func(var_6, var_7, var_joint_q, var_8);
var_11 = df::load(var_joint_X_pj, var_i);
var_12 = df::spatial_transform_multiply(var_11, var_10);
var_13 = df::spatial_transform_multiply(var_5, var_12);
var_14 = df::load(var_joint_X_cm, var_i);
var_15 = df::spatial_transform_multiply(var_13, var_14);
df::store(var_body_X_sc, var_i, var_13);
df::store(var_body_X_sm, var_i, var_15);
return var_2;
}
void adj_compute_link_transform_cpu_func(
int var_i,
int* var_joint_type,
int* var_joint_parent,
int* var_joint_q_start,
int* var_joint_qd_start,
float* var_joint_q,
spatial_transform* var_joint_X_pj,
spatial_transform* var_joint_X_cm,
df::float3* var_joint_axis,
spatial_transform* var_body_X_sc,
spatial_transform* var_body_X_sm,
int & adj_i,
int* adj_joint_type,
int* adj_joint_parent,
int* adj_joint_q_start,
int* adj_joint_qd_start,
float* adj_joint_q,
spatial_transform* adj_joint_X_pj,
spatial_transform* adj_joint_X_cm,
df::float3* adj_joint_axis,
spatial_transform* adj_body_X_sc,
spatial_transform* adj_body_X_sm,
int & adj_ret)
{
//---------
// primal vars
int var_0;
spatial_transform var_1;
const int var_2 = 0;
bool var_3;
spatial_transform var_4;
spatial_transform var_5;
int var_6;
df::float3 var_7;
int var_8;
int var_9;
spatial_transform var_10;
spatial_transform var_11;
spatial_transform var_12;
spatial_transform var_13;
spatial_transform var_14;
spatial_transform var_15;
//---------
// dual vars
int adj_0 = 0;
spatial_transform adj_1 = 0;
int adj_2 = 0;
bool adj_3 = 0;
spatial_transform adj_4 = 0;
spatial_transform adj_5 = 0;
int adj_6 = 0;
df::float3 adj_7 = 0;
int adj_8 = 0;
int adj_9 = 0;
spatial_transform adj_10 = 0;
spatial_transform adj_11 = 0;
spatial_transform adj_12 = 0;
spatial_transform adj_13 = 0;
spatial_transform adj_14 = 0;
spatial_transform adj_15 = 0;
//---------
// forward
var_0 = df::load(var_joint_parent, var_i);
var_1 = df::spatial_transform_identity();
var_3 = (var_0 >= var_2);
if (var_3) {
var_4 = df::load(var_body_X_sc, var_0);
}
var_5 = df::select(var_3, var_1, var_4);
var_6 = df::load(var_joint_type, var_i);
var_7 = df::load(var_joint_axis, var_i);
var_8 = df::load(var_joint_q_start, var_i);
var_9 = df::load(var_joint_qd_start, var_i);
var_10 = jcalc_transform_cpu_func(var_6, var_7, var_joint_q, var_8);
var_11 = df::load(var_joint_X_pj, var_i);
var_12 = df::spatial_transform_multiply(var_11, var_10);
var_13 = df::spatial_transform_multiply(var_5, var_12);
var_14 = df::load(var_joint_X_cm, var_i);
var_15 = df::spatial_transform_multiply(var_13, var_14);
df::store(var_body_X_sc, var_i, var_13);
df::store(var_body_X_sm, var_i, var_15);
goto label0;
//---------
// reverse
label0:;
adj_2 += adj_ret;
df::adj_store(var_body_X_sm, var_i, var_15, adj_body_X_sm, adj_i, adj_15);
df::adj_store(var_body_X_sc, var_i, var_13, adj_body_X_sc, adj_i, adj_13);
df::adj_spatial_transform_multiply(var_13, var_14, adj_13, adj_14, adj_15);
df::adj_load(var_joint_X_cm, var_i, adj_joint_X_cm, adj_i, adj_14);
df::adj_spatial_transform_multiply(var_5, var_12, adj_5, adj_12, adj_13);
df::adj_spatial_transform_multiply(var_11, var_10, adj_11, adj_10, adj_12);
df::adj_load(var_joint_X_pj, var_i, adj_joint_X_pj, adj_i, adj_11);
adj_jcalc_transform_cpu_func(var_6, var_7, var_joint_q, var_8, adj_6, adj_7, adj_joint_q, adj_8, adj_10);
df::adj_load(var_joint_qd_start, var_i, adj_joint_qd_start, adj_i, adj_9);
df::adj_load(var_joint_q_start, var_i, adj_joint_q_start, adj_i, adj_8);
df::adj_load(var_joint_axis, var_i, adj_joint_axis, adj_i, adj_7);
df::adj_load(var_joint_type, var_i, adj_joint_type, adj_i, adj_6);
df::adj_select(var_3, var_1, var_4, adj_3, adj_1, adj_4, adj_5);
if (var_3) {
df::adj_load(var_body_X_sc, var_0, adj_body_X_sc, adj_0, adj_4);
}
df::adj_load(var_joint_parent, var_i, adj_joint_parent, adj_i, adj_0);
return;
}
int compute_link_velocity_cpu_func(
int var_i,
int* var_joint_type,
int* var_joint_parent,
int* var_joint_qd_start,
float* var_joint_qd,
df::float3* var_joint_axis,
spatial_matrix* var_body_I_m,
spatial_transform* var_body_X_sc,
spatial_transform* var_body_X_sm,
spatial_transform* var_joint_X_pj,
df::float3* var_gravity,
spatial_vector* var_joint_S_s,
spatial_matrix* var_body_I_s,
spatial_vector* var_body_v_s,
spatial_vector* var_body_f_s,
spatial_vector* var_body_a_s)
{
//---------
// primal vars
int var_0;
df::float3 var_1;
int var_2;
int var_3;
spatial_transform var_4;
spatial_transform var_5;
const int var_6 = 0;
bool var_7;
spatial_transform var_8;
spatial_transform var_9;
spatial_transform var_10;
spatial_transform var_11;
spatial_vector var_12;
spatial_vector var_13;
spatial_vector var_14;
bool var_15;
spatial_vector var_16;
spatial_vector var_17;
spatial_vector var_18;
spatial_vector var_19;
spatial_vector var_20;
spatial_vector var_21;
spatial_vector var_22;
spatial_transform var_23;
spatial_matrix var_24;
df::float3 var_25;
const int var_26 = 3;
float var_27;
df::float3 var_28;
spatial_vector var_29;
spatial_vector var_30;
df::float3 var_31;
quat var_32;
spatial_transform var_33;
spatial_vector var_34;
spatial_matrix var_35;
spatial_vector var_36;
spatial_vector var_37;
spatial_vector var_38;
spatial_vector var_39;
spatial_vector var_40;
//---------
// forward
var_0 = df::load(var_joint_type, var_i);
var_1 = df::load(var_joint_axis, var_i);
var_2 = df::load(var_joint_parent, var_i);
var_3 = df::load(var_joint_qd_start, var_i);
var_4 = df::load(var_body_X_sc, var_i);
var_5 = df::spatial_transform_identity();
var_7 = (var_2 >= var_6);
if (var_7) {
var_8 = df::load(var_body_X_sc, var_2);
}
var_9 = df::select(var_7, var_5, var_8);
var_10 = df::load(var_joint_X_pj, var_i);
var_11 = df::spatial_transform_multiply(var_9, var_10);
var_12 = jcalc_motion_cpu_func(var_0, var_1, var_11, var_joint_S_s, var_joint_qd, var_3);
var_13 = df::spatial_vector();
var_14 = df::spatial_vector();
var_15 = (var_2 >= var_6);
if (var_15) {
var_16 = df::load(var_body_v_s, var_2);
var_17 = df::load(var_body_a_s, var_2);
}
var_18 = df::select(var_15, var_13, var_16);
var_19 = df::select(var_15, var_14, var_17);
var_20 = df::add(var_18, var_12);
var_21 = df::spatial_cross(var_20, var_12);
var_22 = df::add(var_19, var_21);
var_23 = df::load(var_body_X_sm, var_i);
var_24 = df::load(var_body_I_m, var_i);
var_25 = df::load(var_gravity, var_6);
var_27 = df::index(var_24, var_26, var_26);
var_28 = df::float3();
var_29 = df::spatial_vector(var_28, var_25);
var_30 = df::mul(var_29, var_27);
var_31 = df::spatial_transform_get_translation(var_23);
var_32 = df::quat_identity();
var_33 = df::spatial_transform(var_31, var_32);
var_34 = spatial_transform_wrench_cpu_func(var_33, var_30);
var_35 = spatial_transform_inertia_cpu_func(var_23, var_24);
var_36 = df::mul(var_35, var_22);
var_37 = df::mul(var_35, var_20);
var_38 = df::spatial_cross_dual(var_20, var_37);
var_39 = df::add(var_36, var_38);
df::store(var_body_v_s, var_i, var_20);
df::store(var_body_a_s, var_i, var_22);
var_40 = df::sub(var_39, var_34);
df::store(var_body_f_s, var_i, var_40);
df::store(var_body_I_s, var_i, var_35);
return var_6;
}
void adj_compute_link_velocity_cpu_func(
int var_i,
int* var_joint_type,
int* var_joint_parent,
int* var_joint_qd_start,
float* var_joint_qd,
df::float3* var_joint_axis,
spatial_matrix* var_body_I_m,
spatial_transform* var_body_X_sc,
spatial_transform* var_body_X_sm,
spatial_transform* var_joint_X_pj,
df::float3* var_gravity,
spatial_vector* var_joint_S_s,
spatial_matrix* var_body_I_s,
spatial_vector* var_body_v_s,
spatial_vector* var_body_f_s,
spatial_vector* var_body_a_s,
int & adj_i,
int* adj_joint_type,
int* adj_joint_parent,
int* adj_joint_qd_start,
float* adj_joint_qd,
df::float3* adj_joint_axis,
spatial_matrix* adj_body_I_m,
spatial_transform* adj_body_X_sc,
spatial_transform* adj_body_X_sm,
spatial_transform* adj_joint_X_pj,
df::float3* adj_gravity,
spatial_vector* adj_joint_S_s,
spatial_matrix* adj_body_I_s,
spatial_vector* adj_body_v_s,
spatial_vector* adj_body_f_s,
spatial_vector* adj_body_a_s,
int & adj_ret)
{
//---------
// primal vars
int var_0;
df::float3 var_1;
int var_2;
int var_3;
spatial_transform var_4;
spatial_transform var_5;
const int var_6 = 0;
bool var_7;
spatial_transform var_8;
spatial_transform var_9;
spatial_transform var_10;
spatial_transform var_11;
spatial_vector var_12;
spatial_vector var_13;
spatial_vector var_14;
bool var_15;
spatial_vector var_16;
spatial_vector var_17;
spatial_vector var_18;
spatial_vector var_19;
spatial_vector var_20;
spatial_vector var_21;
spatial_vector var_22;
spatial_transform var_23;
spatial_matrix var_24;
df::float3 var_25;
const int var_26 = 3;
float var_27;
df::float3 var_28;
spatial_vector var_29;
spatial_vector var_30;
df::float3 var_31;
quat var_32;
spatial_transform var_33;
spatial_vector var_34;
spatial_matrix var_35;
spatial_vector var_36;
spatial_vector var_37;
spatial_vector var_38;
spatial_vector var_39;
spatial_vector var_40;
//---------
// dual vars
int adj_0 = 0;
df::float3 adj_1 = 0;
int adj_2 = 0;
int adj_3 = 0;
spatial_transform adj_4 = 0;
spatial_transform adj_5 = 0;
int adj_6 = 0;
bool adj_7 = 0;
spatial_transform adj_8 = 0;
spatial_transform adj_9 = 0;
spatial_transform adj_10 = 0;
spatial_transform adj_11 = 0;
spatial_vector adj_12 = 0;
spatial_vector adj_13 = 0;
spatial_vector adj_14 = 0;
bool adj_15 = 0;
spatial_vector adj_16 = 0;
spatial_vector adj_17 = 0;
spatial_vector adj_18 = 0;
spatial_vector adj_19 = 0;
spatial_vector adj_20 = 0;
spatial_vector adj_21 = 0;
spatial_vector adj_22 = 0;
spatial_transform adj_23 = 0;
spatial_matrix adj_24 = 0;
df::float3 adj_25 = 0;
int adj_26 = 0;
float adj_27 = 0;
df::float3 adj_28 = 0;
spatial_vector adj_29 = 0;
spatial_vector adj_30 = 0;
df::float3 adj_31 = 0;
quat adj_32 = 0;
spatial_transform adj_33 = 0;
spatial_vector adj_34 = 0;
spatial_matrix adj_35 = 0;
spatial_vector adj_36 = 0;
spatial_vector adj_37 = 0;
spatial_vector adj_38 = 0;
spatial_vector adj_39 = 0;
spatial_vector adj_40 = 0;
//---------
// forward
var_0 = df::load(var_joint_type, var_i);
var_1 = df::load(var_joint_axis, var_i);
var_2 = df::load(var_joint_parent, var_i);
var_3 = df::load(var_joint_qd_start, var_i);
var_4 = df::load(var_body_X_sc, var_i);
var_5 = df::spatial_transform_identity();
var_7 = (var_2 >= var_6);
if (var_7) {
var_8 = df::load(var_body_X_sc, var_2);
}
var_9 = df::select(var_7, var_5, var_8);
var_10 = df::load(var_joint_X_pj, var_i);
var_11 = df::spatial_transform_multiply(var_9, var_10);
var_12 = jcalc_motion_cpu_func(var_0, var_1, var_11, var_joint_S_s, var_joint_qd, var_3);
var_13 = df::spatial_vector();
var_14 = df::spatial_vector();
var_15 = (var_2 >= var_6);
if (var_15) {
var_16 = df::load(var_body_v_s, var_2);
var_17 = df::load(var_body_a_s, var_2);
}
var_18 = df::select(var_15, var_13, var_16);
var_19 = df::select(var_15, var_14, var_17);
var_20 = df::add(var_18, var_12);
var_21 = df::spatial_cross(var_20, var_12);
var_22 = df::add(var_19, var_21);
var_23 = df::load(var_body_X_sm, var_i);
var_24 = df::load(var_body_I_m, var_i);
var_25 = df::load(var_gravity, var_6);
var_27 = df::index(var_24, var_26, var_26);
var_28 = df::float3();
var_29 = df::spatial_vector(var_28, var_25);
var_30 = df::mul(var_29, var_27);
var_31 = df::spatial_transform_get_translation(var_23);
var_32 = df::quat_identity();
var_33 = df::spatial_transform(var_31, var_32);
var_34 = spatial_transform_wrench_cpu_func(var_33, var_30);
var_35 = spatial_transform_inertia_cpu_func(var_23, var_24);
var_36 = df::mul(var_35, var_22);
var_37 = df::mul(var_35, var_20);
var_38 = df::spatial_cross_dual(var_20, var_37);
var_39 = df::add(var_36, var_38);
df::store(var_body_v_s, var_i, var_20);
df::store(var_body_a_s, var_i, var_22);
var_40 = df::sub(var_39, var_34);
df::store(var_body_f_s, var_i, var_40);
df::store(var_body_I_s, var_i, var_35);
goto label0;
//---------
// reverse
label0:;
adj_6 += adj_ret;
df::adj_store(var_body_I_s, var_i, var_35, adj_body_I_s, adj_i, adj_35);
df::adj_store(var_body_f_s, var_i, var_40, adj_body_f_s, adj_i, adj_40);
df::adj_sub(var_39, var_34, adj_39, adj_34, adj_40);
df::adj_store(var_body_a_s, var_i, var_22, adj_body_a_s, adj_i, adj_22);
df::adj_store(var_body_v_s, var_i, var_20, adj_body_v_s, adj_i, adj_20);
df::adj_add(var_36, var_38, adj_36, adj_38, adj_39);
df::adj_spatial_cross_dual(var_20, var_37, adj_20, adj_37, adj_38);
df::adj_mul(var_35, var_20, adj_35, adj_20, adj_37);
df::adj_mul(var_35, var_22, adj_35, adj_22, adj_36);
adj_spatial_transform_inertia_cpu_func(var_23, var_24, adj_23, adj_24, adj_35);
adj_spatial_transform_wrench_cpu_func(var_33, var_30, adj_33, adj_30, adj_34);
df::adj_spatial_transform(var_31, var_32, adj_31, adj_32, adj_33);
df::adj_spatial_transform_get_translation(var_23, adj_23, adj_31);
df::adj_mul(var_29, var_27, adj_29, adj_27, adj_30);
df::adj_spatial_vector(var_28, var_25, adj_28, adj_25, adj_29);
df::adj_index(var_24, var_26, var_26, adj_24, adj_26, adj_26, adj_27);
df::adj_load(var_gravity, var_6, adj_gravity, adj_6, adj_25);
df::adj_load(var_body_I_m, var_i, adj_body_I_m, adj_i, adj_24);
df::adj_load(var_body_X_sm, var_i, adj_body_X_sm, adj_i, adj_23);
df::adj_add(var_19, var_21, adj_19, adj_21, adj_22);
df::adj_spatial_cross(var_20, var_12, adj_20, adj_12, adj_21);
df::adj_add(var_18, var_12, adj_18, adj_12, adj_20);
df::adj_select(var_15, var_14, var_17, adj_15, adj_14, adj_17, adj_19);
df::adj_select(var_15, var_13, var_16, adj_15, adj_13, adj_16, adj_18);
if (var_15) {
df::adj_load(var_body_a_s, var_2, adj_body_a_s, adj_2, adj_17);
df::adj_load(var_body_v_s, var_2, adj_body_v_s, adj_2, adj_16);
}
adj_jcalc_motion_cpu_func(var_0, var_1, var_11, var_joint_S_s, var_joint_qd, var_3, adj_0, adj_1, adj_11, adj_joint_S_s, adj_joint_qd, adj_3, adj_12);
df::adj_spatial_transform_multiply(var_9, var_10, adj_9, adj_10, adj_11);
df::adj_load(var_joint_X_pj, var_i, adj_joint_X_pj, adj_i, adj_10);
df::adj_select(var_7, var_5, var_8, adj_7, adj_5, adj_8, adj_9);
if (var_7) {
df::adj_load(var_body_X_sc, var_2, adj_body_X_sc, adj_2, adj_8);
}
df::adj_load(var_body_X_sc, var_i, adj_body_X_sc, adj_i, adj_4);
df::adj_load(var_joint_qd_start, var_i, adj_joint_qd_start, adj_i, adj_3);
df::adj_load(var_joint_parent, var_i, adj_joint_parent, adj_i, adj_2);
df::adj_load(var_joint_axis, var_i, adj_joint_axis, adj_i, adj_1);
df::adj_load(var_joint_type, var_i, adj_joint_type, adj_i, adj_0);
return;
}
int compute_link_tau_cpu_func(
int var_offset,
int var_joint_end,
int* var_joint_type,
int* var_joint_parent,
int* var_joint_q_start,
int* var_joint_qd_start,
float* var_joint_q,
float* var_joint_qd,
float* var_joint_act,
float* var_joint_target,
float* var_joint_target_ke,
float* var_joint_target_kd,
float* var_joint_limit_lower,
float* var_joint_limit_upper,
float* var_joint_limit_ke,
float* var_joint_limit_kd,
spatial_vector* var_joint_S_s,
spatial_vector* var_body_fb_s,
spatial_vector* var_body_ft_s,
float* var_tau)
{
//---------
// primal vars
int var_0;
const int var_1 = 1;
int var_2;
int var_3;
int var_4;
int var_5;
int var_6;
float var_7;
float var_8;
float var_9;
float var_10;
spatial_vector var_11;
spatial_vector var_12;
spatial_vector var_13;
int var_14;
const int var_15 = 0;
bool var_16;
//---------
// forward
var_0 = df::sub(var_joint_end, var_offset);
var_2 = df::sub(var_0, var_1);
var_3 = df::load(var_joint_type, var_2);
var_4 = df::load(var_joint_parent, var_2);
var_5 = df::load(var_joint_qd_start, var_2);
var_6 = df::load(var_joint_q_start, var_2);
var_7 = df::load(var_joint_target_ke, var_2);
var_8 = df::load(var_joint_target_kd, var_2);
var_9 = df::load(var_joint_limit_ke, var_2);
var_10 = df::load(var_joint_limit_kd, var_2);
var_11 = df::load(var_body_fb_s, var_2);
var_12 = df::load(var_body_ft_s, var_2);
var_13 = df::add(var_11, var_12);
var_14 = jcalc_tau_cpu_func(var_3, var_7, var_8, var_9, var_10, var_joint_S_s, var_joint_q, var_joint_qd, var_joint_act, var_joint_target, var_joint_limit_lower, var_joint_limit_upper, var_6, var_5, var_13, var_tau);
var_16 = (var_4 >= var_15);
if (var_16) {
df::atomic_add(var_body_ft_s, var_4, var_13);
}
return var_15;
}
void adj_compute_link_tau_cpu_func(
int var_offset,
int var_joint_end,
int* var_joint_type,
int* var_joint_parent,
int* var_joint_q_start,
int* var_joint_qd_start,
float* var_joint_q,
float* var_joint_qd,
float* var_joint_act,
float* var_joint_target,
float* var_joint_target_ke,
float* var_joint_target_kd,
float* var_joint_limit_lower,
float* var_joint_limit_upper,
float* var_joint_limit_ke,
float* var_joint_limit_kd,
spatial_vector* var_joint_S_s,
spatial_vector* var_body_fb_s,
spatial_vector* var_body_ft_s,
float* var_tau,
int & adj_offset,
int & adj_joint_end,
int* adj_joint_type,
int* adj_joint_parent,
int* adj_joint_q_start,
int* adj_joint_qd_start,
float* adj_joint_q,
float* adj_joint_qd,
float* adj_joint_act,
float* adj_joint_target,
float* adj_joint_target_ke,
float* adj_joint_target_kd,
float* adj_joint_limit_lower,
float* adj_joint_limit_upper,
float* adj_joint_limit_ke,
float* adj_joint_limit_kd,
spatial_vector* adj_joint_S_s,
spatial_vector* adj_body_fb_s,
spatial_vector* adj_body_ft_s,
float* adj_tau,
int & adj_ret)
{
//---------
// primal vars
int var_0;
const int var_1 = 1;
int var_2;
int var_3;
int var_4;
int var_5;
int var_6;
float var_7;
float var_8;
float var_9;
float var_10;
spatial_vector var_11;
spatial_vector var_12;
spatial_vector var_13;
int var_14;
const int var_15 = 0;
bool var_16;
//---------
// dual vars
int adj_0 = 0;
int adj_1 = 0;
int adj_2 = 0;
int adj_3 = 0;
int adj_4 = 0;
int adj_5 = 0;
int adj_6 = 0;
float adj_7 = 0;
float adj_8 = 0;
float adj_9 = 0;
float adj_10 = 0;
spatial_vector adj_11 = 0;
spatial_vector adj_12 = 0;
spatial_vector adj_13 = 0;
int adj_14 = 0;
int adj_15 = 0;
bool adj_16 = 0;
//---------
// forward
var_0 = df::sub(var_joint_end, var_offset);
var_2 = df::sub(var_0, var_1);
var_3 = df::load(var_joint_type, var_2);
var_4 = df::load(var_joint_parent, var_2);
var_5 = df::load(var_joint_qd_start, var_2);
var_6 = df::load(var_joint_q_start, var_2);
var_7 = df::load(var_joint_target_ke, var_2);
var_8 = df::load(var_joint_target_kd, var_2);
var_9 = df::load(var_joint_limit_ke, var_2);
var_10 = df::load(var_joint_limit_kd, var_2);
var_11 = df::load(var_body_fb_s, var_2);
var_12 = df::load(var_body_ft_s, var_2);
var_13 = df::add(var_11, var_12);
var_14 = jcalc_tau_cpu_func(var_3, var_7, var_8, var_9, var_10, var_joint_S_s, var_joint_q, var_joint_qd, var_joint_act, var_joint_target, var_joint_limit_lower, var_joint_limit_upper, var_6, var_5, var_13, var_tau);
var_16 = (var_4 >= var_15);
if (var_16) {
df::atomic_add(var_body_ft_s, var_4, var_13);
}
goto label0;
//---------
// reverse
label0:;
adj_15 += adj_ret;
if (var_16) {
df::adj_atomic_add(var_body_ft_s, var_4, var_13, adj_body_ft_s, adj_4, adj_13);
}
adj_jcalc_tau_cpu_func(var_3, var_7, var_8, var_9, var_10, var_joint_S_s, var_joint_q, var_joint_qd, var_joint_act, var_joint_target, var_joint_limit_lower, var_joint_limit_upper, var_6, var_5, var_13, var_tau, adj_3, adj_7, adj_8, adj_9, adj_10, adj_joint_S_s, adj_joint_q, adj_joint_qd, adj_joint_act, adj_joint_target, adj_joint_limit_lower, adj_joint_limit_upper, adj_6, adj_5, adj_13, adj_tau, adj_14);
df::adj_add(var_11, var_12, adj_11, adj_12, adj_13);
df::adj_load(var_body_ft_s, var_2, adj_body_ft_s, adj_2, adj_12);
df::adj_load(var_body_fb_s, var_2, adj_body_fb_s, adj_2, adj_11);
df::adj_load(var_joint_limit_kd, var_2, adj_joint_limit_kd, adj_2, adj_10);
df::adj_load(var_joint_limit_ke, var_2, adj_joint_limit_ke, adj_2, adj_9);
df::adj_load(var_joint_target_kd, var_2, adj_joint_target_kd, adj_2, adj_8);
df::adj_load(var_joint_target_ke, var_2, adj_joint_target_ke, adj_2, adj_7);
df::adj_load(var_joint_q_start, var_2, adj_joint_q_start, adj_2, adj_6);
df::adj_load(var_joint_qd_start, var_2, adj_joint_qd_start, adj_2, adj_5);
df::adj_load(var_joint_parent, var_2, adj_joint_parent, adj_2, adj_4);
df::adj_load(var_joint_type, var_2, adj_joint_type, adj_2, adj_3);
df::adj_sub(var_0, var_1, adj_0, adj_1, adj_2);
df::adj_sub(var_joint_end, var_offset, adj_joint_end, adj_offset, adj_0);
return;
}
void integrate_particles_cpu_kernel_forward(
df::float3* var_x,
df::float3* var_v,
df::float3* var_f,
float* var_w,
df::float3* var_gravity,
float var_dt,
df::float3* var_x_new,
df::float3* var_v_new)
{
//---------
// primal vars
int var_0;
df::float3 var_1;
df::float3 var_2;
df::float3 var_3;
float var_4;
const int var_5 = 0;
df::float3 var_6;
df::float3 var_7;
const float var_8 = 0.0;
float var_9;
float var_10;
df::float3 var_11;
df::float3 var_12;
df::float3 var_13;
df::float3 var_14;
df::float3 var_15;
df::float3 var_16;
//---------
// forward
var_0 = df::tid();
var_1 = df::load(var_x, var_0);
var_2 = df::load(var_v, var_0);
var_3 = df::load(var_f, var_0);
var_4 = df::load(var_w, var_0);
var_6 = df::load(var_gravity, var_5);
var_7 = df::mul(var_3, var_4);
var_9 = df::sub(var_8, var_4);
var_10 = df::step(var_9);
var_11 = df::mul(var_6, var_10);
var_12 = df::add(var_7, var_11);
var_13 = df::mul(var_12, var_dt);
var_14 = df::add(var_2, var_13);
var_15 = df::mul(var_14, var_dt);
var_16 = df::add(var_1, var_15);
df::store(var_x_new, var_0, var_16);
df::store(var_v_new, var_0, var_14);
}
void integrate_particles_cpu_kernel_backward(
df::float3* var_x,
df::float3* var_v,
df::float3* var_f,
float* var_w,
df::float3* var_gravity,
float var_dt,
df::float3* var_x_new,
df::float3* var_v_new,
df::float3* adj_x,
df::float3* adj_v,
df::float3* adj_f,
float* adj_w,
df::float3* adj_gravity,
float adj_dt,
df::float3* adj_x_new,
df::float3* adj_v_new)
{
//---------
// primal vars
int var_0;
df::float3 var_1;
df::float3 var_2;
df::float3 var_3;
float var_4;
const int var_5 = 0;
df::float3 var_6;
df::float3 var_7;
const float var_8 = 0.0;
float var_9;
float var_10;
df::float3 var_11;
df::float3 var_12;
df::float3 var_13;
df::float3 var_14;
df::float3 var_15;
df::float3 var_16;
//---------
// dual vars
int adj_0 = 0;
df::float3 adj_1 = 0;
df::float3 adj_2 = 0;
df::float3 adj_3 = 0;
float adj_4 = 0;
int adj_5 = 0;
df::float3 adj_6 = 0;
df::float3 adj_7 = 0;
float adj_8 = 0;
float adj_9 = 0;
float adj_10 = 0;
df::float3 adj_11 = 0;
df::float3 adj_12 = 0;
df::float3 adj_13 = 0;
df::float3 adj_14 = 0;
df::float3 adj_15 = 0;
df::float3 adj_16 = 0;
//---------
// forward
var_0 = df::tid();
var_1 = df::load(var_x, var_0);
var_2 = df::load(var_v, var_0);
var_3 = df::load(var_f, var_0);
var_4 = df::load(var_w, var_0);
var_6 = df::load(var_gravity, var_5);
var_7 = df::mul(var_3, var_4);
var_9 = df::sub(var_8, var_4);
var_10 = df::step(var_9);
var_11 = df::mul(var_6, var_10);
var_12 = df::add(var_7, var_11);
var_13 = df::mul(var_12, var_dt);
var_14 = df::add(var_2, var_13);
var_15 = df::mul(var_14, var_dt);
var_16 = df::add(var_1, var_15);
df::store(var_x_new, var_0, var_16);
df::store(var_v_new, var_0, var_14);
//---------
// reverse
df::adj_store(var_v_new, var_0, var_14, adj_v_new, adj_0, adj_14);
df::adj_store(var_x_new, var_0, var_16, adj_x_new, adj_0, adj_16);
df::adj_add(var_1, var_15, adj_1, adj_15, adj_16);
df::adj_mul(var_14, var_dt, adj_14, adj_dt, adj_15);
df::adj_add(var_2, var_13, adj_2, adj_13, adj_14);
df::adj_mul(var_12, var_dt, adj_12, adj_dt, adj_13);
df::adj_add(var_7, var_11, adj_7, adj_11, adj_12);
df::adj_mul(var_6, var_10, adj_6, adj_10, adj_11);
df::adj_step(var_9, adj_9, adj_10);
df::adj_sub(var_8, var_4, adj_8, adj_4, adj_9);
df::adj_mul(var_3, var_4, adj_3, adj_4, adj_7);
df::adj_load(var_gravity, var_5, adj_gravity, adj_5, adj_6);
df::adj_load(var_w, var_0, adj_w, adj_0, adj_4);
df::adj_load(var_f, var_0, adj_f, adj_0, adj_3);
df::adj_load(var_v, var_0, adj_v, adj_0, adj_2);
df::adj_load(var_x, var_0, adj_x, adj_0, adj_1);
return;
}
// Python entry points
void integrate_particles_cpu_forward(int dim,
torch::Tensor var_x,
torch::Tensor var_v,
torch::Tensor var_f,
torch::Tensor var_w,
torch::Tensor var_gravity,
float var_dt,
torch::Tensor var_x_new,
torch::Tensor var_v_new)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
integrate_particles_cpu_kernel_forward(
cast<df::float3*>(var_x),
cast<df::float3*>(var_v),
cast<df::float3*>(var_f),
cast<float*>(var_w),
cast<df::float3*>(var_gravity),
var_dt,
cast<df::float3*>(var_x_new),
cast<df::float3*>(var_v_new));
}
}
void integrate_particles_cpu_backward(int dim,
torch::Tensor var_x,
torch::Tensor var_v,
torch::Tensor var_f,
torch::Tensor var_w,
torch::Tensor var_gravity,
float var_dt,
torch::Tensor var_x_new,
torch::Tensor var_v_new,
torch::Tensor adj_x,
torch::Tensor adj_v,
torch::Tensor adj_f,
torch::Tensor adj_w,
torch::Tensor adj_gravity,
float adj_dt,
torch::Tensor adj_x_new,
torch::Tensor adj_v_new)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
integrate_particles_cpu_kernel_backward(
cast<df::float3*>(var_x),
cast<df::float3*>(var_v),
cast<df::float3*>(var_f),
cast<float*>(var_w),
cast<df::float3*>(var_gravity),
var_dt,
cast<df::float3*>(var_x_new),
cast<df::float3*>(var_v_new),
cast<df::float3*>(adj_x),
cast<df::float3*>(adj_v),
cast<df::float3*>(adj_f),
cast<float*>(adj_w),
cast<df::float3*>(adj_gravity),
adj_dt,
cast<df::float3*>(adj_x_new),
cast<df::float3*>(adj_v_new));
}
}
// Python entry points
void integrate_particles_cpu_forward(int dim,
torch::Tensor var_x,
torch::Tensor var_v,
torch::Tensor var_f,
torch::Tensor var_w,
torch::Tensor var_gravity,
float var_dt,
torch::Tensor var_x_new,
torch::Tensor var_v_new);
void integrate_particles_cpu_backward(int dim,
torch::Tensor var_x,
torch::Tensor var_v,
torch::Tensor var_f,
torch::Tensor var_w,
torch::Tensor var_gravity,
float var_dt,
torch::Tensor var_x_new,
torch::Tensor var_v_new,
torch::Tensor adj_x,
torch::Tensor adj_v,
torch::Tensor adj_f,
torch::Tensor adj_w,
torch::Tensor adj_gravity,
float adj_dt,
torch::Tensor adj_x_new,
torch::Tensor adj_v_new);
void integrate_rigids_cpu_kernel_forward(
df::float3* var_rigid_x,
quat* var_rigid_r,
df::float3* var_rigid_v,
df::float3* var_rigid_w,
df::float3* var_rigid_f,
df::float3* var_rigid_t,
float* var_inv_m,
mat33* var_inv_I,
df::float3* var_gravity,
float var_dt,
df::float3* var_rigid_x_new,
quat* var_rigid_r_new,
df::float3* var_rigid_v_new,
df::float3* var_rigid_w_new)
{
//---------
// primal vars
int var_0;
df::float3 var_1;
quat var_2;
df::float3 var_3;
df::float3 var_4;
df::float3 var_5;
df::float3 var_6;
float var_7;
mat33 var_8;
const int var_9 = 0;
df::float3 var_10;
df::float3 var_11;
float var_12;
df::float3 var_13;
df::float3 var_14;
df::float3 var_15;
df::float3 var_16;
df::float3 var_17;
df::float3 var_18;
df::float3 var_19;
df::float3 var_20;
df::float3 var_21;
df::float3 var_22;
df::float3 var_23;
df::float3 var_24;
const float var_25 = 0.0;
quat var_26;
quat var_27;
const float var_28 = 0.5;
quat var_29;
quat var_30;
quat var_31;
quat var_32;
//---------
// forward
var_0 = df::tid();
var_1 = df::load(var_rigid_x, var_0);
var_2 = df::load(var_rigid_r, var_0);
var_3 = df::load(var_rigid_v, var_0);
var_4 = df::load(var_rigid_w, var_0);
var_5 = df::load(var_rigid_f, var_0);
var_6 = df::load(var_rigid_t, var_0);
var_7 = df::load(var_inv_m, var_0);
var_8 = df::load(var_inv_I, var_0);
var_10 = df::load(var_gravity, var_9);
var_11 = df::mul(var_5, var_7);
var_12 = df::nonzero(var_7);
var_13 = df::mul(var_10, var_12);
var_14 = df::add(var_11, var_13);
var_15 = df::mul(var_14, var_dt);
var_16 = df::add(var_3, var_15);
var_17 = df::mul(var_16, var_dt);
var_18 = df::add(var_1, var_17);
var_19 = df::rotate_inv(var_2, var_4);
var_20 = df::rotate_inv(var_2, var_6);
var_21 = df::mul(var_8, var_20);
var_22 = df::mul(var_21, var_dt);
var_23 = df::add(var_19, var_22);
var_24 = df::rotate(var_2, var_23);
var_26 = df::quat(var_24, var_25);
var_27 = df::mul(var_26, var_2);
var_29 = df::mul(var_27, var_28);
var_30 = df::mul(var_29, var_dt);
var_31 = df::add(var_2, var_30);
var_32 = df::normalize(var_31);
df::store(var_rigid_x_new, var_0, var_18);
df::store(var_rigid_r_new, var_0, var_32);
df::store(var_rigid_v_new, var_0, var_16);
df::store(var_rigid_w_new, var_0, var_24);
}
void integrate_rigids_cpu_kernel_backward(
df::float3* var_rigid_x,
quat* var_rigid_r,
df::float3* var_rigid_v,
df::float3* var_rigid_w,
df::float3* var_rigid_f,
df::float3* var_rigid_t,
float* var_inv_m,
mat33* var_inv_I,
df::float3* var_gravity,
float var_dt,
df::float3* var_rigid_x_new,
quat* var_rigid_r_new,
df::float3* var_rigid_v_new,
df::float3* var_rigid_w_new,
df::float3* adj_rigid_x,
quat* adj_rigid_r,
df::float3* adj_rigid_v,
df::float3* adj_rigid_w,
df::float3* adj_rigid_f,
df::float3* adj_rigid_t,
float* adj_inv_m,
mat33* adj_inv_I,
df::float3* adj_gravity,
float adj_dt,
df::float3* adj_rigid_x_new,
quat* adj_rigid_r_new,
df::float3* adj_rigid_v_new,
df::float3* adj_rigid_w_new)
{
//---------
// primal vars
int var_0;
df::float3 var_1;
quat var_2;
df::float3 var_3;
df::float3 var_4;
df::float3 var_5;
df::float3 var_6;
float var_7;
mat33 var_8;
const int var_9 = 0;
df::float3 var_10;
df::float3 var_11;
float var_12;
df::float3 var_13;
df::float3 var_14;
df::float3 var_15;
df::float3 var_16;
df::float3 var_17;
df::float3 var_18;
df::float3 var_19;
df::float3 var_20;
df::float3 var_21;
df::float3 var_22;
df::float3 var_23;
df::float3 var_24;
const float var_25 = 0.0;
quat var_26;
quat var_27;
const float var_28 = 0.5;
quat var_29;
quat var_30;
quat var_31;
quat var_32;
//---------
// dual vars
int adj_0 = 0;
df::float3 adj_1 = 0;
quat adj_2 = 0;
df::float3 adj_3 = 0;
df::float3 adj_4 = 0;
df::float3 adj_5 = 0;
df::float3 adj_6 = 0;
float adj_7 = 0;
mat33 adj_8 = 0;
int adj_9 = 0;
df::float3 adj_10 = 0;
df::float3 adj_11 = 0;
float adj_12 = 0;
df::float3 adj_13 = 0;
df::float3 adj_14 = 0;
df::float3 adj_15 = 0;
df::float3 adj_16 = 0;
df::float3 adj_17 = 0;
df::float3 adj_18 = 0;
df::float3 adj_19 = 0;
df::float3 adj_20 = 0;
df::float3 adj_21 = 0;
df::float3 adj_22 = 0;
df::float3 adj_23 = 0;
df::float3 adj_24 = 0;
float adj_25 = 0;
quat adj_26 = 0;
quat adj_27 = 0;
float adj_28 = 0;
quat adj_29 = 0;
quat adj_30 = 0;
quat adj_31 = 0;
quat adj_32 = 0;
//---------
// forward
var_0 = df::tid();
var_1 = df::load(var_rigid_x, var_0);
var_2 = df::load(var_rigid_r, var_0);
var_3 = df::load(var_rigid_v, var_0);
var_4 = df::load(var_rigid_w, var_0);
var_5 = df::load(var_rigid_f, var_0);
var_6 = df::load(var_rigid_t, var_0);
var_7 = df::load(var_inv_m, var_0);
var_8 = df::load(var_inv_I, var_0);
var_10 = df::load(var_gravity, var_9);
var_11 = df::mul(var_5, var_7);
var_12 = df::nonzero(var_7);
var_13 = df::mul(var_10, var_12);
var_14 = df::add(var_11, var_13);
var_15 = df::mul(var_14, var_dt);
var_16 = df::add(var_3, var_15);
var_17 = df::mul(var_16, var_dt);
var_18 = df::add(var_1, var_17);
var_19 = df::rotate_inv(var_2, var_4);
var_20 = df::rotate_inv(var_2, var_6);
var_21 = df::mul(var_8, var_20);
var_22 = df::mul(var_21, var_dt);
var_23 = df::add(var_19, var_22);
var_24 = df::rotate(var_2, var_23);
var_26 = df::quat(var_24, var_25);
var_27 = df::mul(var_26, var_2);
var_29 = df::mul(var_27, var_28);
var_30 = df::mul(var_29, var_dt);
var_31 = df::add(var_2, var_30);
var_32 = df::normalize(var_31);
df::store(var_rigid_x_new, var_0, var_18);
df::store(var_rigid_r_new, var_0, var_32);
df::store(var_rigid_v_new, var_0, var_16);
df::store(var_rigid_w_new, var_0, var_24);
//---------
// reverse
df::adj_store(var_rigid_w_new, var_0, var_24, adj_rigid_w_new, adj_0, adj_24);
df::adj_store(var_rigid_v_new, var_0, var_16, adj_rigid_v_new, adj_0, adj_16);
df::adj_store(var_rigid_r_new, var_0, var_32, adj_rigid_r_new, adj_0, adj_32);
df::adj_store(var_rigid_x_new, var_0, var_18, adj_rigid_x_new, adj_0, adj_18);
df::adj_normalize(var_31, adj_31, adj_32);
df::adj_add(var_2, var_30, adj_2, adj_30, adj_31);
df::adj_mul(var_29, var_dt, adj_29, adj_dt, adj_30);
df::adj_mul(var_27, var_28, adj_27, adj_28, adj_29);
df::adj_mul(var_26, var_2, adj_26, adj_2, adj_27);
df::adj_quat(var_24, var_25, adj_24, adj_25, adj_26);
df::adj_rotate(var_2, var_23, adj_2, adj_23, adj_24);
df::adj_add(var_19, var_22, adj_19, adj_22, adj_23);
df::adj_mul(var_21, var_dt, adj_21, adj_dt, adj_22);
df::adj_mul(var_8, var_20, adj_8, adj_20, adj_21);
df::adj_rotate_inv(var_2, var_6, adj_2, adj_6, adj_20);
df::adj_rotate_inv(var_2, var_4, adj_2, adj_4, adj_19);
df::adj_add(var_1, var_17, adj_1, adj_17, adj_18);
df::adj_mul(var_16, var_dt, adj_16, adj_dt, adj_17);
df::adj_add(var_3, var_15, adj_3, adj_15, adj_16);
df::adj_mul(var_14, var_dt, adj_14, adj_dt, adj_15);
df::adj_add(var_11, var_13, adj_11, adj_13, adj_14);
df::adj_mul(var_10, var_12, adj_10, adj_12, adj_13);
df::adj_nonzero(var_7, adj_7, adj_12);
df::adj_mul(var_5, var_7, adj_5, adj_7, adj_11);
df::adj_load(var_gravity, var_9, adj_gravity, adj_9, adj_10);
df::adj_load(var_inv_I, var_0, adj_inv_I, adj_0, adj_8);
df::adj_load(var_inv_m, var_0, adj_inv_m, adj_0, adj_7);
df::adj_load(var_rigid_t, var_0, adj_rigid_t, adj_0, adj_6);
df::adj_load(var_rigid_f, var_0, adj_rigid_f, adj_0, adj_5);
df::adj_load(var_rigid_w, var_0, adj_rigid_w, adj_0, adj_4);
df::adj_load(var_rigid_v, var_0, adj_rigid_v, adj_0, adj_3);
df::adj_load(var_rigid_r, var_0, adj_rigid_r, adj_0, adj_2);
df::adj_load(var_rigid_x, var_0, adj_rigid_x, adj_0, adj_1);
return;
}
// Python entry points
void integrate_rigids_cpu_forward(int dim,
torch::Tensor var_rigid_x,
torch::Tensor var_rigid_r,
torch::Tensor var_rigid_v,
torch::Tensor var_rigid_w,
torch::Tensor var_rigid_f,
torch::Tensor var_rigid_t,
torch::Tensor var_inv_m,
torch::Tensor var_inv_I,
torch::Tensor var_gravity,
float var_dt,
torch::Tensor var_rigid_x_new,
torch::Tensor var_rigid_r_new,
torch::Tensor var_rigid_v_new,
torch::Tensor var_rigid_w_new)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
integrate_rigids_cpu_kernel_forward(
cast<df::float3*>(var_rigid_x),
cast<quat*>(var_rigid_r),
cast<df::float3*>(var_rigid_v),
cast<df::float3*>(var_rigid_w),
cast<df::float3*>(var_rigid_f),
cast<df::float3*>(var_rigid_t),
cast<float*>(var_inv_m),
cast<mat33*>(var_inv_I),
cast<df::float3*>(var_gravity),
var_dt,
cast<df::float3*>(var_rigid_x_new),
cast<quat*>(var_rigid_r_new),
cast<df::float3*>(var_rigid_v_new),
cast<df::float3*>(var_rigid_w_new));
}
}
void integrate_rigids_cpu_backward(int dim,
torch::Tensor var_rigid_x,
torch::Tensor var_rigid_r,
torch::Tensor var_rigid_v,
torch::Tensor var_rigid_w,
torch::Tensor var_rigid_f,
torch::Tensor var_rigid_t,
torch::Tensor var_inv_m,
torch::Tensor var_inv_I,
torch::Tensor var_gravity,
float var_dt,
torch::Tensor var_rigid_x_new,
torch::Tensor var_rigid_r_new,
torch::Tensor var_rigid_v_new,
torch::Tensor var_rigid_w_new,
torch::Tensor adj_rigid_x,
torch::Tensor adj_rigid_r,
torch::Tensor adj_rigid_v,
torch::Tensor adj_rigid_w,
torch::Tensor adj_rigid_f,
torch::Tensor adj_rigid_t,
torch::Tensor adj_inv_m,
torch::Tensor adj_inv_I,
torch::Tensor adj_gravity,
float adj_dt,
torch::Tensor adj_rigid_x_new,
torch::Tensor adj_rigid_r_new,
torch::Tensor adj_rigid_v_new,
torch::Tensor adj_rigid_w_new)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
integrate_rigids_cpu_kernel_backward(
cast<df::float3*>(var_rigid_x),
cast<quat*>(var_rigid_r),
cast<df::float3*>(var_rigid_v),
cast<df::float3*>(var_rigid_w),
cast<df::float3*>(var_rigid_f),
cast<df::float3*>(var_rigid_t),
cast<float*>(var_inv_m),
cast<mat33*>(var_inv_I),
cast<df::float3*>(var_gravity),
var_dt,
cast<df::float3*>(var_rigid_x_new),
cast<quat*>(var_rigid_r_new),
cast<df::float3*>(var_rigid_v_new),
cast<df::float3*>(var_rigid_w_new),
cast<df::float3*>(adj_rigid_x),
cast<quat*>(adj_rigid_r),
cast<df::float3*>(adj_rigid_v),
cast<df::float3*>(adj_rigid_w),
cast<df::float3*>(adj_rigid_f),
cast<df::float3*>(adj_rigid_t),
cast<float*>(adj_inv_m),
cast<mat33*>(adj_inv_I),
cast<df::float3*>(adj_gravity),
adj_dt,
cast<df::float3*>(adj_rigid_x_new),
cast<quat*>(adj_rigid_r_new),
cast<df::float3*>(adj_rigid_v_new),
cast<df::float3*>(adj_rigid_w_new));
}
}
// Python entry points
void integrate_rigids_cpu_forward(int dim,
torch::Tensor var_rigid_x,
torch::Tensor var_rigid_r,
torch::Tensor var_rigid_v,
torch::Tensor var_rigid_w,
torch::Tensor var_rigid_f,
torch::Tensor var_rigid_t,
torch::Tensor var_inv_m,
torch::Tensor var_inv_I,
torch::Tensor var_gravity,
float var_dt,
torch::Tensor var_rigid_x_new,
torch::Tensor var_rigid_r_new,
torch::Tensor var_rigid_v_new,
torch::Tensor var_rigid_w_new);
void integrate_rigids_cpu_backward(int dim,
torch::Tensor var_rigid_x,
torch::Tensor var_rigid_r,
torch::Tensor var_rigid_v,
torch::Tensor var_rigid_w,
torch::Tensor var_rigid_f,
torch::Tensor var_rigid_t,
torch::Tensor var_inv_m,
torch::Tensor var_inv_I,
torch::Tensor var_gravity,
float var_dt,
torch::Tensor var_rigid_x_new,
torch::Tensor var_rigid_r_new,
torch::Tensor var_rigid_v_new,
torch::Tensor var_rigid_w_new,
torch::Tensor adj_rigid_x,
torch::Tensor adj_rigid_r,
torch::Tensor adj_rigid_v,
torch::Tensor adj_rigid_w,
torch::Tensor adj_rigid_f,
torch::Tensor adj_rigid_t,
torch::Tensor adj_inv_m,
torch::Tensor adj_inv_I,
torch::Tensor adj_gravity,
float adj_dt,
torch::Tensor adj_rigid_x_new,
torch::Tensor adj_rigid_r_new,
torch::Tensor adj_rigid_v_new,
torch::Tensor adj_rigid_w_new);
void eval_springs_cpu_kernel_forward(
df::float3* var_x,
df::float3* var_v,
int* var_spring_indices,
float* var_spring_rest_lengths,
float* var_spring_stiffness,
float* var_spring_damping,
df::float3* var_f)
{
//---------
// primal vars
int var_0;
const int var_1 = 2;
int var_2;
const int var_3 = 0;
int var_4;
int var_5;
int var_6;
const int var_7 = 1;
int var_8;
int var_9;
float var_10;
float var_11;
float var_12;
df::float3 var_13;
df::float3 var_14;
df::float3 var_15;
df::float3 var_16;
df::float3 var_17;
df::float3 var_18;
float var_19;
const float var_20 = 1.0;
float var_21;
df::float3 var_22;
float var_23;
float var_24;
float var_25;
float var_26;
float var_27;
df::float3 var_28;
//---------
// forward
var_0 = df::tid();
var_2 = df::mul(var_0, var_1);
var_4 = df::add(var_2, var_3);
var_5 = df::load(var_spring_indices, var_4);
var_6 = df::mul(var_0, var_1);
var_8 = df::add(var_6, var_7);
var_9 = df::load(var_spring_indices, var_8);
var_10 = df::load(var_spring_stiffness, var_0);
var_11 = df::load(var_spring_damping, var_0);
var_12 = df::load(var_spring_rest_lengths, var_0);
var_13 = df::load(var_x, var_5);
var_14 = df::load(var_x, var_9);
var_15 = df::load(var_v, var_5);
var_16 = df::load(var_v, var_9);
var_17 = df::sub(var_13, var_14);
var_18 = df::sub(var_15, var_16);
var_19 = df::length(var_17);
var_21 = df::div(var_20, var_19);
var_22 = df::mul(var_17, var_21);
var_23 = df::sub(var_19, var_12);
var_24 = df::dot(var_22, var_18);
var_25 = df::mul(var_10, var_23);
var_26 = df::mul(var_11, var_24);
var_27 = df::add(var_25, var_26);
var_28 = df::mul(var_22, var_27);
df::atomic_sub(var_f, var_5, var_28);
df::atomic_add(var_f, var_9, var_28);
}
void eval_springs_cpu_kernel_backward(
df::float3* var_x,
df::float3* var_v,
int* var_spring_indices,
float* var_spring_rest_lengths,
float* var_spring_stiffness,
float* var_spring_damping,
df::float3* var_f,
df::float3* adj_x,
df::float3* adj_v,
int* adj_spring_indices,
float* adj_spring_rest_lengths,
float* adj_spring_stiffness,
float* adj_spring_damping,
df::float3* adj_f)
{
//---------
// primal vars
int var_0;
const int var_1 = 2;
int var_2;
const int var_3 = 0;
int var_4;
int var_5;
int var_6;
const int var_7 = 1;
int var_8;
int var_9;
float var_10;
float var_11;
float var_12;
df::float3 var_13;
df::float3 var_14;
df::float3 var_15;
df::float3 var_16;
df::float3 var_17;
df::float3 var_18;
float var_19;
const float var_20 = 1.0;
float var_21;
df::float3 var_22;
float var_23;
float var_24;
float var_25;
float var_26;
float var_27;
df::float3 var_28;
//---------
// dual vars
int adj_0 = 0;
int adj_1 = 0;
int adj_2 = 0;
int adj_3 = 0;
int adj_4 = 0;
int adj_5 = 0;
int adj_6 = 0;
int adj_7 = 0;
int adj_8 = 0;
int adj_9 = 0;
float adj_10 = 0;
float adj_11 = 0;
float adj_12 = 0;
df::float3 adj_13 = 0;
df::float3 adj_14 = 0;
df::float3 adj_15 = 0;
df::float3 adj_16 = 0;
df::float3 adj_17 = 0;
df::float3 adj_18 = 0;
float adj_19 = 0;
float adj_20 = 0;
float adj_21 = 0;
df::float3 adj_22 = 0;
float adj_23 = 0;
float adj_24 = 0;
float adj_25 = 0;
float adj_26 = 0;
float adj_27 = 0;
df::float3 adj_28 = 0;
//---------
// forward
var_0 = df::tid();
var_2 = df::mul(var_0, var_1);
var_4 = df::add(var_2, var_3);
var_5 = df::load(var_spring_indices, var_4);
var_6 = df::mul(var_0, var_1);
var_8 = df::add(var_6, var_7);
var_9 = df::load(var_spring_indices, var_8);
var_10 = df::load(var_spring_stiffness, var_0);
var_11 = df::load(var_spring_damping, var_0);
var_12 = df::load(var_spring_rest_lengths, var_0);
var_13 = df::load(var_x, var_5);
var_14 = df::load(var_x, var_9);
var_15 = df::load(var_v, var_5);
var_16 = df::load(var_v, var_9);
var_17 = df::sub(var_13, var_14);
var_18 = df::sub(var_15, var_16);
var_19 = df::length(var_17);
var_21 = df::div(var_20, var_19);
var_22 = df::mul(var_17, var_21);
var_23 = df::sub(var_19, var_12);
var_24 = df::dot(var_22, var_18);
var_25 = df::mul(var_10, var_23);
var_26 = df::mul(var_11, var_24);
var_27 = df::add(var_25, var_26);
var_28 = df::mul(var_22, var_27);
df::atomic_sub(var_f, var_5, var_28);
df::atomic_add(var_f, var_9, var_28);
//---------
// reverse
df::adj_atomic_add(var_f, var_9, var_28, adj_f, adj_9, adj_28);
df::adj_atomic_sub(var_f, var_5, var_28, adj_f, adj_5, adj_28);
df::adj_mul(var_22, var_27, adj_22, adj_27, adj_28);
df::adj_add(var_25, var_26, adj_25, adj_26, adj_27);
df::adj_mul(var_11, var_24, adj_11, adj_24, adj_26);
df::adj_mul(var_10, var_23, adj_10, adj_23, adj_25);
df::adj_dot(var_22, var_18, adj_22, adj_18, adj_24);
df::adj_sub(var_19, var_12, adj_19, adj_12, adj_23);
df::adj_mul(var_17, var_21, adj_17, adj_21, adj_22);
df::adj_div(var_20, var_19, adj_20, adj_19, adj_21);
df::adj_length(var_17, adj_17, adj_19);
df::adj_sub(var_15, var_16, adj_15, adj_16, adj_18);
df::adj_sub(var_13, var_14, adj_13, adj_14, adj_17);
df::adj_load(var_v, var_9, adj_v, adj_9, adj_16);
df::adj_load(var_v, var_5, adj_v, adj_5, adj_15);
df::adj_load(var_x, var_9, adj_x, adj_9, adj_14);
df::adj_load(var_x, var_5, adj_x, adj_5, adj_13);
df::adj_load(var_spring_rest_lengths, var_0, adj_spring_rest_lengths, adj_0, adj_12);
df::adj_load(var_spring_damping, var_0, adj_spring_damping, adj_0, adj_11);
df::adj_load(var_spring_stiffness, var_0, adj_spring_stiffness, adj_0, adj_10);
df::adj_load(var_spring_indices, var_8, adj_spring_indices, adj_8, adj_9);
df::adj_add(var_6, var_7, adj_6, adj_7, adj_8);
df::adj_mul(var_0, var_1, adj_0, adj_1, adj_6);
df::adj_load(var_spring_indices, var_4, adj_spring_indices, adj_4, adj_5);
df::adj_add(var_2, var_3, adj_2, adj_3, adj_4);
df::adj_mul(var_0, var_1, adj_0, adj_1, adj_2);
return;
}
// Python entry points
void eval_springs_cpu_forward(int dim,
torch::Tensor var_x,
torch::Tensor var_v,
torch::Tensor var_spring_indices,
torch::Tensor var_spring_rest_lengths,
torch::Tensor var_spring_stiffness,
torch::Tensor var_spring_damping,
torch::Tensor var_f)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_springs_cpu_kernel_forward(
cast<df::float3*>(var_x),
cast<df::float3*>(var_v),
cast<int*>(var_spring_indices),
cast<float*>(var_spring_rest_lengths),
cast<float*>(var_spring_stiffness),
cast<float*>(var_spring_damping),
cast<df::float3*>(var_f));
}
}
void eval_springs_cpu_backward(int dim,
torch::Tensor var_x,
torch::Tensor var_v,
torch::Tensor var_spring_indices,
torch::Tensor var_spring_rest_lengths,
torch::Tensor var_spring_stiffness,
torch::Tensor var_spring_damping,
torch::Tensor var_f,
torch::Tensor adj_x,
torch::Tensor adj_v,
torch::Tensor adj_spring_indices,
torch::Tensor adj_spring_rest_lengths,
torch::Tensor adj_spring_stiffness,
torch::Tensor adj_spring_damping,
torch::Tensor adj_f)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_springs_cpu_kernel_backward(
cast<df::float3*>(var_x),
cast<df::float3*>(var_v),
cast<int*>(var_spring_indices),
cast<float*>(var_spring_rest_lengths),
cast<float*>(var_spring_stiffness),
cast<float*>(var_spring_damping),
cast<df::float3*>(var_f),
cast<df::float3*>(adj_x),
cast<df::float3*>(adj_v),
cast<int*>(adj_spring_indices),
cast<float*>(adj_spring_rest_lengths),
cast<float*>(adj_spring_stiffness),
cast<float*>(adj_spring_damping),
cast<df::float3*>(adj_f));
}
}
// Python entry points
void eval_springs_cpu_forward(int dim,
torch::Tensor var_x,
torch::Tensor var_v,
torch::Tensor var_spring_indices,
torch::Tensor var_spring_rest_lengths,
torch::Tensor var_spring_stiffness,
torch::Tensor var_spring_damping,
torch::Tensor var_f);
void eval_springs_cpu_backward(int dim,
torch::Tensor var_x,
torch::Tensor var_v,
torch::Tensor var_spring_indices,
torch::Tensor var_spring_rest_lengths,
torch::Tensor var_spring_stiffness,
torch::Tensor var_spring_damping,
torch::Tensor var_f,
torch::Tensor adj_x,
torch::Tensor adj_v,
torch::Tensor adj_spring_indices,
torch::Tensor adj_spring_rest_lengths,
torch::Tensor adj_spring_stiffness,
torch::Tensor adj_spring_damping,
torch::Tensor adj_f);
void eval_triangles_cpu_kernel_forward(
df::float3* var_x,
df::float3* var_v,
int* var_indices,
mat22* var_pose,
float* var_activation,
float var_k_mu,
float var_k_lambda,
float var_k_damp,
float var_k_drag,
float var_k_lift,
df::float3* var_f)
{
//---------
// primal vars
int var_0;
const int var_1 = 3;
int var_2;
const int var_3 = 0;
int var_4;
int var_5;
int var_6;
const int var_7 = 1;
int var_8;
int var_9;
int var_10;
const int var_11 = 2;
int var_12;
int var_13;
df::float3 var_14;
df::float3 var_15;
df::float3 var_16;
df::float3 var_17;
df::float3 var_18;
df::float3 var_19;
df::float3 var_20;
df::float3 var_21;
mat22 var_22;
float var_23;
const float var_24 = 2.0;
float var_25;
const float var_26 = 1.0;
float var_27;
float var_28;
float var_29;
float var_30;
float var_31;
df::float3 var_32;
float var_33;
df::float3 var_34;
df::float3 var_35;
float var_36;
df::float3 var_37;
float var_38;
df::float3 var_39;
df::float3 var_40;
float var_41;
df::float3 var_42;
float var_43;
df::float3 var_44;
df::float3 var_45;
df::float3 var_46;
float var_47;
df::float3 var_48;
float var_49;
df::float3 var_50;
df::float3 var_51;
df::float3 var_52;
float var_53;
float var_54;
df::float3 var_55;
float var_56;
const float var_57 = 0.5;
float var_58;
float var_59;
float var_60;
float var_61;
float var_62;
df::float3 var_63;
df::float3 var_64;
df::float3 var_65;
df::float3 var_66;
df::float3 var_67;
df::float3 var_68;
df::float3 var_69;
float var_70;
float var_71;
float var_72;
float var_73;
df::float3 var_74;
float var_75;
float var_76;
float var_77;
float var_78;
df::float3 var_79;
df::float3 var_80;
float var_81;
df::float3 var_82;
df::float3 var_83;
df::float3 var_84;
df::float3 var_85;
df::float3 var_86;
const float var_87 = 0.3333;
df::float3 var_88;
df::float3 var_89;
float var_90;
float var_91;
float var_92;
float var_93;
df::float3 var_94;
float var_95;
const float var_96 = 1.57079;
float var_97;
float var_98;
float var_99;
float var_100;
df::float3 var_101;
float var_102;
df::float3 var_103;
df::float3 var_104;
df::float3 var_105;
df::float3 var_106;
df::float3 var_107;
df::float3 var_108;
df::float3 var_109;
//---------
// forward
var_0 = df::tid();
var_2 = df::mul(var_0, var_1);
var_4 = df::add(var_2, var_3);
var_5 = df::load(var_indices, var_4);
var_6 = df::mul(var_0, var_1);
var_8 = df::add(var_6, var_7);
var_9 = df::load(var_indices, var_8);
var_10 = df::mul(var_0, var_1);
var_12 = df::add(var_10, var_11);
var_13 = df::load(var_indices, var_12);
var_14 = df::load(var_x, var_5);
var_15 = df::load(var_x, var_9);
var_16 = df::load(var_x, var_13);
var_17 = df::load(var_v, var_5);
var_18 = df::load(var_v, var_9);
var_19 = df::load(var_v, var_13);
var_20 = df::sub(var_15, var_14);
var_21 = df::sub(var_16, var_14);
var_22 = df::load(var_pose, var_0);
var_23 = df::determinant(var_22);
var_25 = df::mul(var_23, var_24);
var_27 = df::div(var_26, var_25);
var_28 = df::mul(var_k_mu, var_27);
var_29 = df::mul(var_k_lambda, var_27);
var_30 = df::mul(var_k_damp, var_27);
var_31 = df::index(var_22, var_3, var_3);
var_32 = df::mul(var_20, var_31);
var_33 = df::index(var_22, var_7, var_3);
var_34 = df::mul(var_21, var_33);
var_35 = df::add(var_32, var_34);
var_36 = df::index(var_22, var_3, var_7);
var_37 = df::mul(var_20, var_36);
var_38 = df::index(var_22, var_7, var_7);
var_39 = df::mul(var_21, var_38);
var_40 = df::add(var_37, var_39);
var_41 = df::index(var_22, var_3, var_3);
var_42 = df::mul(var_35, var_41);
var_43 = df::index(var_22, var_3, var_7);
var_44 = df::mul(var_40, var_43);
var_45 = df::add(var_42, var_44);
var_46 = df::mul(var_45, var_28);
var_47 = df::index(var_22, var_7, var_3);
var_48 = df::mul(var_35, var_47);
var_49 = df::index(var_22, var_7, var_7);
var_50 = df::mul(var_40, var_49);
var_51 = df::add(var_48, var_50);
var_52 = df::mul(var_51, var_28);
var_53 = df::div(var_28, var_29);
var_54 = df::add(var_26, var_53);
var_55 = df::cross(var_20, var_21);
var_56 = df::length(var_55);
var_58 = df::mul(var_56, var_57);
var_59 = df::load(var_activation, var_0);
var_60 = df::mul(var_58, var_25);
var_61 = df::sub(var_60, var_54);
var_62 = df::add(var_61, var_59);
var_63 = df::normalize(var_55);
var_64 = df::cross(var_21, var_63);
var_65 = df::mul(var_64, var_25);
var_66 = df::mul(var_65, var_57);
var_67 = df::cross(var_63, var_20);
var_68 = df::mul(var_67, var_25);
var_69 = df::mul(var_68, var_57);
var_70 = df::mul(var_29, var_62);
var_71 = df::dot(var_66, var_18);
var_72 = df::dot(var_69, var_19);
var_73 = df::add(var_71, var_72);
var_74 = df::add(var_66, var_69);
var_75 = df::dot(var_74, var_17);
var_76 = df::sub(var_73, var_75);
var_77 = df::mul(var_30, var_76);
var_78 = df::add(var_70, var_77);
var_79 = df::mul(var_66, var_78);
var_80 = df::add(var_46, var_79);
var_81 = df::add(var_70, var_77);
var_82 = df::mul(var_69, var_81);
var_83 = df::add(var_52, var_82);
var_84 = df::add(var_80, var_83);
var_85 = df::add(var_17, var_19);
var_86 = df::add(var_85, var_18);
var_88 = df::mul(var_86, var_87);
var_89 = df::normalize(var_88);
var_90 = df::mul(var_k_drag, var_58);
var_91 = df::dot(var_63, var_88);
var_92 = df::abs(var_91);
var_93 = df::mul(var_90, var_92);
var_94 = df::mul(var_88, var_93);
var_95 = df::mul(var_k_lift, var_58);
var_97 = df::dot(var_63, var_89);
var_98 = df::acos(var_97);
var_99 = df::sub(var_96, var_98);
var_100 = df::mul(var_95, var_99);
var_101 = df::mul(var_63, var_100);
var_102 = df::dot(var_88, var_88);
var_103 = df::mul(var_101, var_102);
var_104 = df::sub(var_84, var_94);
var_105 = df::sub(var_104, var_103);
var_106 = df::add(var_80, var_94);
var_107 = df::add(var_106, var_103);
var_108 = df::add(var_83, var_94);
var_109 = df::add(var_108, var_103);
df::atomic_add(var_f, var_5, var_105);
df::atomic_sub(var_f, var_9, var_107);
df::atomic_sub(var_f, var_13, var_109);
}
void eval_triangles_cpu_kernel_backward(
df::float3* var_x,
df::float3* var_v,
int* var_indices,
mat22* var_pose,
float* var_activation,
float var_k_mu,
float var_k_lambda,
float var_k_damp,
float var_k_drag,
float var_k_lift,
df::float3* var_f,
df::float3* adj_x,
df::float3* adj_v,
int* adj_indices,
mat22* adj_pose,
float* adj_activation,
float adj_k_mu,
float adj_k_lambda,
float adj_k_damp,
float adj_k_drag,
float adj_k_lift,
df::float3* adj_f)
{
//---------
// primal vars
int var_0;
const int var_1 = 3;
int var_2;
const int var_3 = 0;
int var_4;
int var_5;
int var_6;
const int var_7 = 1;
int var_8;
int var_9;
int var_10;
const int var_11 = 2;
int var_12;
int var_13;
df::float3 var_14;
df::float3 var_15;
df::float3 var_16;
df::float3 var_17;
df::float3 var_18;
df::float3 var_19;
df::float3 var_20;
df::float3 var_21;
mat22 var_22;
float var_23;
const float var_24 = 2.0;
float var_25;
const float var_26 = 1.0;
float var_27;
float var_28;
float var_29;
float var_30;
float var_31;
df::float3 var_32;
float var_33;
df::float3 var_34;
df::float3 var_35;
float var_36;
df::float3 var_37;
float var_38;
df::float3 var_39;
df::float3 var_40;
float var_41;
df::float3 var_42;
float var_43;
df::float3 var_44;
df::float3 var_45;
df::float3 var_46;
float var_47;
df::float3 var_48;
float var_49;
df::float3 var_50;
df::float3 var_51;
df::float3 var_52;
float var_53;
float var_54;
df::float3 var_55;
float var_56;
const float var_57 = 0.5;
float var_58;
float var_59;
float var_60;
float var_61;
float var_62;
df::float3 var_63;
df::float3 var_64;
df::float3 var_65;
df::float3 var_66;
df::float3 var_67;
df::float3 var_68;
df::float3 var_69;
float var_70;
float var_71;
float var_72;
float var_73;
df::float3 var_74;
float var_75;
float var_76;
float var_77;
float var_78;
df::float3 var_79;
df::float3 var_80;
float var_81;
df::float3 var_82;
df::float3 var_83;
df::float3 var_84;
df::float3 var_85;
df::float3 var_86;
const float var_87 = 0.3333;
df::float3 var_88;
df::float3 var_89;
float var_90;
float var_91;
float var_92;
float var_93;
df::float3 var_94;
float var_95;
const float var_96 = 1.57079;
float var_97;
float var_98;
float var_99;
float var_100;
df::float3 var_101;
float var_102;
df::float3 var_103;
df::float3 var_104;
df::float3 var_105;
df::float3 var_106;
df::float3 var_107;
df::float3 var_108;
df::float3 var_109;
//---------
// dual vars
int adj_0 = 0;
int adj_1 = 0;
int adj_2 = 0;
int adj_3 = 0;
int adj_4 = 0;
int adj_5 = 0;
int adj_6 = 0;
int adj_7 = 0;
int adj_8 = 0;
int adj_9 = 0;
int adj_10 = 0;
int adj_11 = 0;
int adj_12 = 0;
int adj_13 = 0;
df::float3 adj_14 = 0;
df::float3 adj_15 = 0;
df::float3 adj_16 = 0;
df::float3 adj_17 = 0;
df::float3 adj_18 = 0;
df::float3 adj_19 = 0;
df::float3 adj_20 = 0;
df::float3 adj_21 = 0;
mat22 adj_22 = 0;
float adj_23 = 0;
float adj_24 = 0;
float adj_25 = 0;
float adj_26 = 0;
float adj_27 = 0;
float adj_28 = 0;
float adj_29 = 0;
float adj_30 = 0;
float adj_31 = 0;
df::float3 adj_32 = 0;
float adj_33 = 0;
df::float3 adj_34 = 0;
df::float3 adj_35 = 0;
float adj_36 = 0;
df::float3 adj_37 = 0;
float adj_38 = 0;
df::float3 adj_39 = 0;
df::float3 adj_40 = 0;
float adj_41 = 0;
df::float3 adj_42 = 0;
float adj_43 = 0;
df::float3 adj_44 = 0;
df::float3 adj_45 = 0;
df::float3 adj_46 = 0;
float adj_47 = 0;
df::float3 adj_48 = 0;
float adj_49 = 0;
df::float3 adj_50 = 0;
df::float3 adj_51 = 0;
df::float3 adj_52 = 0;
float adj_53 = 0;
float adj_54 = 0;
df::float3 adj_55 = 0;
float adj_56 = 0;
float adj_57 = 0;
float adj_58 = 0;
float adj_59 = 0;
float adj_60 = 0;
float adj_61 = 0;
float adj_62 = 0;
df::float3 adj_63 = 0;
df::float3 adj_64 = 0;
df::float3 adj_65 = 0;
df::float3 adj_66 = 0;
df::float3 adj_67 = 0;
df::float3 adj_68 = 0;
df::float3 adj_69 = 0;
float adj_70 = 0;
float adj_71 = 0;
float adj_72 = 0;
float adj_73 = 0;
df::float3 adj_74 = 0;
float adj_75 = 0;
float adj_76 = 0;
float adj_77 = 0;
float adj_78 = 0;
df::float3 adj_79 = 0;
df::float3 adj_80 = 0;
float adj_81 = 0;
df::float3 adj_82 = 0;
df::float3 adj_83 = 0;
df::float3 adj_84 = 0;
df::float3 adj_85 = 0;
df::float3 adj_86 = 0;
float adj_87 = 0;
df::float3 adj_88 = 0;
df::float3 adj_89 = 0;
float adj_90 = 0;
float adj_91 = 0;
float adj_92 = 0;
float adj_93 = 0;
df::float3 adj_94 = 0;
float adj_95 = 0;
float adj_96 = 0;
float adj_97 = 0;
float adj_98 = 0;
float adj_99 = 0;
float adj_100 = 0;
df::float3 adj_101 = 0;
float adj_102 = 0;
df::float3 adj_103 = 0;
df::float3 adj_104 = 0;
df::float3 adj_105 = 0;
df::float3 adj_106 = 0;
df::float3 adj_107 = 0;
df::float3 adj_108 = 0;
df::float3 adj_109 = 0;
//---------
// forward
var_0 = df::tid();
var_2 = df::mul(var_0, var_1);
var_4 = df::add(var_2, var_3);
var_5 = df::load(var_indices, var_4);
var_6 = df::mul(var_0, var_1);
var_8 = df::add(var_6, var_7);
var_9 = df::load(var_indices, var_8);
var_10 = df::mul(var_0, var_1);
var_12 = df::add(var_10, var_11);
var_13 = df::load(var_indices, var_12);
var_14 = df::load(var_x, var_5);
var_15 = df::load(var_x, var_9);
var_16 = df::load(var_x, var_13);
var_17 = df::load(var_v, var_5);
var_18 = df::load(var_v, var_9);
var_19 = df::load(var_v, var_13);
var_20 = df::sub(var_15, var_14);
var_21 = df::sub(var_16, var_14);
var_22 = df::load(var_pose, var_0);
var_23 = df::determinant(var_22);
var_25 = df::mul(var_23, var_24);
var_27 = df::div(var_26, var_25);
var_28 = df::mul(var_k_mu, var_27);
var_29 = df::mul(var_k_lambda, var_27);
var_30 = df::mul(var_k_damp, var_27);
var_31 = df::index(var_22, var_3, var_3);
var_32 = df::mul(var_20, var_31);
var_33 = df::index(var_22, var_7, var_3);
var_34 = df::mul(var_21, var_33);
var_35 = df::add(var_32, var_34);
var_36 = df::index(var_22, var_3, var_7);
var_37 = df::mul(var_20, var_36);
var_38 = df::index(var_22, var_7, var_7);
var_39 = df::mul(var_21, var_38);
var_40 = df::add(var_37, var_39);
var_41 = df::index(var_22, var_3, var_3);
var_42 = df::mul(var_35, var_41);
var_43 = df::index(var_22, var_3, var_7);
var_44 = df::mul(var_40, var_43);
var_45 = df::add(var_42, var_44);
var_46 = df::mul(var_45, var_28);
var_47 = df::index(var_22, var_7, var_3);
var_48 = df::mul(var_35, var_47);
var_49 = df::index(var_22, var_7, var_7);
var_50 = df::mul(var_40, var_49);
var_51 = df::add(var_48, var_50);
var_52 = df::mul(var_51, var_28);
var_53 = df::div(var_28, var_29);
var_54 = df::add(var_26, var_53);
var_55 = df::cross(var_20, var_21);
var_56 = df::length(var_55);
var_58 = df::mul(var_56, var_57);
var_59 = df::load(var_activation, var_0);
var_60 = df::mul(var_58, var_25);
var_61 = df::sub(var_60, var_54);
var_62 = df::add(var_61, var_59);
var_63 = df::normalize(var_55);
var_64 = df::cross(var_21, var_63);
var_65 = df::mul(var_64, var_25);
var_66 = df::mul(var_65, var_57);
var_67 = df::cross(var_63, var_20);
var_68 = df::mul(var_67, var_25);
var_69 = df::mul(var_68, var_57);
var_70 = df::mul(var_29, var_62);
var_71 = df::dot(var_66, var_18);
var_72 = df::dot(var_69, var_19);
var_73 = df::add(var_71, var_72);
var_74 = df::add(var_66, var_69);
var_75 = df::dot(var_74, var_17);
var_76 = df::sub(var_73, var_75);
var_77 = df::mul(var_30, var_76);
var_78 = df::add(var_70, var_77);
var_79 = df::mul(var_66, var_78);
var_80 = df::add(var_46, var_79);
var_81 = df::add(var_70, var_77);
var_82 = df::mul(var_69, var_81);
var_83 = df::add(var_52, var_82);
var_84 = df::add(var_80, var_83);
var_85 = df::add(var_17, var_19);
var_86 = df::add(var_85, var_18);
var_88 = df::mul(var_86, var_87);
var_89 = df::normalize(var_88);
var_90 = df::mul(var_k_drag, var_58);
var_91 = df::dot(var_63, var_88);
var_92 = df::abs(var_91);
var_93 = df::mul(var_90, var_92);
var_94 = df::mul(var_88, var_93);
var_95 = df::mul(var_k_lift, var_58);
var_97 = df::dot(var_63, var_89);
var_98 = df::acos(var_97);
var_99 = df::sub(var_96, var_98);
var_100 = df::mul(var_95, var_99);
var_101 = df::mul(var_63, var_100);
var_102 = df::dot(var_88, var_88);
var_103 = df::mul(var_101, var_102);
var_104 = df::sub(var_84, var_94);
var_105 = df::sub(var_104, var_103);
var_106 = df::add(var_80, var_94);
var_107 = df::add(var_106, var_103);
var_108 = df::add(var_83, var_94);
var_109 = df::add(var_108, var_103);
df::atomic_add(var_f, var_5, var_105);
df::atomic_sub(var_f, var_9, var_107);
df::atomic_sub(var_f, var_13, var_109);
//---------
// reverse
df::adj_atomic_sub(var_f, var_13, var_109, adj_f, adj_13, adj_109);
df::adj_atomic_sub(var_f, var_9, var_107, adj_f, adj_9, adj_107);
df::adj_atomic_add(var_f, var_5, var_105, adj_f, adj_5, adj_105);
df::adj_add(var_108, var_103, adj_108, adj_103, adj_109);
df::adj_add(var_83, var_94, adj_83, adj_94, adj_108);
df::adj_add(var_106, var_103, adj_106, adj_103, adj_107);
df::adj_add(var_80, var_94, adj_80, adj_94, adj_106);
df::adj_sub(var_104, var_103, adj_104, adj_103, adj_105);
df::adj_sub(var_84, var_94, adj_84, adj_94, adj_104);
df::adj_mul(var_101, var_102, adj_101, adj_102, adj_103);
df::adj_dot(var_88, var_88, adj_88, adj_88, adj_102);
df::adj_mul(var_63, var_100, adj_63, adj_100, adj_101);
df::adj_mul(var_95, var_99, adj_95, adj_99, adj_100);
df::adj_sub(var_96, var_98, adj_96, adj_98, adj_99);
df::adj_acos(var_97, adj_97, adj_98);
df::adj_dot(var_63, var_89, adj_63, adj_89, adj_97);
df::adj_mul(var_k_lift, var_58, adj_k_lift, adj_58, adj_95);
df::adj_mul(var_88, var_93, adj_88, adj_93, adj_94);
df::adj_mul(var_90, var_92, adj_90, adj_92, adj_93);
df::adj_abs(var_91, adj_91, adj_92);
df::adj_dot(var_63, var_88, adj_63, adj_88, adj_91);
df::adj_mul(var_k_drag, var_58, adj_k_drag, adj_58, adj_90);
df::adj_normalize(var_88, adj_88, adj_89);
df::adj_mul(var_86, var_87, adj_86, adj_87, adj_88);
df::adj_add(var_85, var_18, adj_85, adj_18, adj_86);
df::adj_add(var_17, var_19, adj_17, adj_19, adj_85);
df::adj_add(var_80, var_83, adj_80, adj_83, adj_84);
df::adj_add(var_52, var_82, adj_52, adj_82, adj_83);
df::adj_mul(var_69, var_81, adj_69, adj_81, adj_82);
df::adj_add(var_70, var_77, adj_70, adj_77, adj_81);
df::adj_add(var_46, var_79, adj_46, adj_79, adj_80);
df::adj_mul(var_66, var_78, adj_66, adj_78, adj_79);
df::adj_add(var_70, var_77, adj_70, adj_77, adj_78);
df::adj_mul(var_30, var_76, adj_30, adj_76, adj_77);
df::adj_sub(var_73, var_75, adj_73, adj_75, adj_76);
df::adj_dot(var_74, var_17, adj_74, adj_17, adj_75);
df::adj_add(var_66, var_69, adj_66, adj_69, adj_74);
df::adj_add(var_71, var_72, adj_71, adj_72, adj_73);
df::adj_dot(var_69, var_19, adj_69, adj_19, adj_72);
df::adj_dot(var_66, var_18, adj_66, adj_18, adj_71);
df::adj_mul(var_29, var_62, adj_29, adj_62, adj_70);
df::adj_mul(var_68, var_57, adj_68, adj_57, adj_69);
df::adj_mul(var_67, var_25, adj_67, adj_25, adj_68);
df::adj_cross(var_63, var_20, adj_63, adj_20, adj_67);
df::adj_mul(var_65, var_57, adj_65, adj_57, adj_66);
df::adj_mul(var_64, var_25, adj_64, adj_25, adj_65);
df::adj_cross(var_21, var_63, adj_21, adj_63, adj_64);
df::adj_normalize(var_55, adj_55, adj_63);
df::adj_add(var_61, var_59, adj_61, adj_59, adj_62);
df::adj_sub(var_60, var_54, adj_60, adj_54, adj_61);
df::adj_mul(var_58, var_25, adj_58, adj_25, adj_60);
df::adj_load(var_activation, var_0, adj_activation, adj_0, adj_59);
df::adj_mul(var_56, var_57, adj_56, adj_57, adj_58);
df::adj_length(var_55, adj_55, adj_56);
df::adj_cross(var_20, var_21, adj_20, adj_21, adj_55);
df::adj_add(var_26, var_53, adj_26, adj_53, adj_54);
df::adj_div(var_28, var_29, adj_28, adj_29, adj_53);
df::adj_mul(var_51, var_28, adj_51, adj_28, adj_52);
df::adj_add(var_48, var_50, adj_48, adj_50, adj_51);
df::adj_mul(var_40, var_49, adj_40, adj_49, adj_50);
df::adj_index(var_22, var_7, var_7, adj_22, adj_7, adj_7, adj_49);
df::adj_mul(var_35, var_47, adj_35, adj_47, adj_48);
df::adj_index(var_22, var_7, var_3, adj_22, adj_7, adj_3, adj_47);
df::adj_mul(var_45, var_28, adj_45, adj_28, adj_46);
df::adj_add(var_42, var_44, adj_42, adj_44, adj_45);
df::adj_mul(var_40, var_43, adj_40, adj_43, adj_44);
df::adj_index(var_22, var_3, var_7, adj_22, adj_3, adj_7, adj_43);
df::adj_mul(var_35, var_41, adj_35, adj_41, adj_42);
df::adj_index(var_22, var_3, var_3, adj_22, adj_3, adj_3, adj_41);
df::adj_add(var_37, var_39, adj_37, adj_39, adj_40);
df::adj_mul(var_21, var_38, adj_21, adj_38, adj_39);
df::adj_index(var_22, var_7, var_7, adj_22, adj_7, adj_7, adj_38);
df::adj_mul(var_20, var_36, adj_20, adj_36, adj_37);
df::adj_index(var_22, var_3, var_7, adj_22, adj_3, adj_7, adj_36);
df::adj_add(var_32, var_34, adj_32, adj_34, adj_35);
df::adj_mul(var_21, var_33, adj_21, adj_33, adj_34);
df::adj_index(var_22, var_7, var_3, adj_22, adj_7, adj_3, adj_33);
df::adj_mul(var_20, var_31, adj_20, adj_31, adj_32);
df::adj_index(var_22, var_3, var_3, adj_22, adj_3, adj_3, adj_31);
df::adj_mul(var_k_damp, var_27, adj_k_damp, adj_27, adj_30);
df::adj_mul(var_k_lambda, var_27, adj_k_lambda, adj_27, adj_29);
df::adj_mul(var_k_mu, var_27, adj_k_mu, adj_27, adj_28);
df::adj_div(var_26, var_25, adj_26, adj_25, adj_27);
df::adj_mul(var_23, var_24, adj_23, adj_24, adj_25);
df::adj_determinant(var_22, adj_22, adj_23);
df::adj_load(var_pose, var_0, adj_pose, adj_0, adj_22);
df::adj_sub(var_16, var_14, adj_16, adj_14, adj_21);
df::adj_sub(var_15, var_14, adj_15, adj_14, adj_20);
df::adj_load(var_v, var_13, adj_v, adj_13, adj_19);
df::adj_load(var_v, var_9, adj_v, adj_9, adj_18);
df::adj_load(var_v, var_5, adj_v, adj_5, adj_17);
df::adj_load(var_x, var_13, adj_x, adj_13, adj_16);
df::adj_load(var_x, var_9, adj_x, adj_9, adj_15);
df::adj_load(var_x, var_5, adj_x, adj_5, adj_14);
df::adj_load(var_indices, var_12, adj_indices, adj_12, adj_13);
df::adj_add(var_10, var_11, adj_10, adj_11, adj_12);
df::adj_mul(var_0, var_1, adj_0, adj_1, adj_10);
df::adj_load(var_indices, var_8, adj_indices, adj_8, adj_9);
df::adj_add(var_6, var_7, adj_6, adj_7, adj_8);
df::adj_mul(var_0, var_1, adj_0, adj_1, adj_6);
df::adj_load(var_indices, var_4, adj_indices, adj_4, adj_5);
df::adj_add(var_2, var_3, adj_2, adj_3, adj_4);
df::adj_mul(var_0, var_1, adj_0, adj_1, adj_2);
return;
}
// Python entry points
void eval_triangles_cpu_forward(int dim,
torch::Tensor var_x,
torch::Tensor var_v,
torch::Tensor var_indices,
torch::Tensor var_pose,
torch::Tensor var_activation,
float var_k_mu,
float var_k_lambda,
float var_k_damp,
float var_k_drag,
float var_k_lift,
torch::Tensor var_f)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_triangles_cpu_kernel_forward(
cast<df::float3*>(var_x),
cast<df::float3*>(var_v),
cast<int*>(var_indices),
cast<mat22*>(var_pose),
cast<float*>(var_activation),
var_k_mu,
var_k_lambda,
var_k_damp,
var_k_drag,
var_k_lift,
cast<df::float3*>(var_f));
}
}
void eval_triangles_cpu_backward(int dim,
torch::Tensor var_x,
torch::Tensor var_v,
torch::Tensor var_indices,
torch::Tensor var_pose,
torch::Tensor var_activation,
float var_k_mu,
float var_k_lambda,
float var_k_damp,
float var_k_drag,
float var_k_lift,
torch::Tensor var_f,
torch::Tensor adj_x,
torch::Tensor adj_v,
torch::Tensor adj_indices,
torch::Tensor adj_pose,
torch::Tensor adj_activation,
float adj_k_mu,
float adj_k_lambda,
float adj_k_damp,
float adj_k_drag,
float adj_k_lift,
torch::Tensor adj_f)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_triangles_cpu_kernel_backward(
cast<df::float3*>(var_x),
cast<df::float3*>(var_v),
cast<int*>(var_indices),
cast<mat22*>(var_pose),
cast<float*>(var_activation),
var_k_mu,
var_k_lambda,
var_k_damp,
var_k_drag,
var_k_lift,
cast<df::float3*>(var_f),
cast<df::float3*>(adj_x),
cast<df::float3*>(adj_v),
cast<int*>(adj_indices),
cast<mat22*>(adj_pose),
cast<float*>(adj_activation),
adj_k_mu,
adj_k_lambda,
adj_k_damp,
adj_k_drag,
adj_k_lift,
cast<df::float3*>(adj_f));
}
}
// Python entry points
void eval_triangles_cpu_forward(int dim,
torch::Tensor var_x,
torch::Tensor var_v,
torch::Tensor var_indices,
torch::Tensor var_pose,
torch::Tensor var_activation,
float var_k_mu,
float var_k_lambda,
float var_k_damp,
float var_k_drag,
float var_k_lift,
torch::Tensor var_f);
void eval_triangles_cpu_backward(int dim,
torch::Tensor var_x,
torch::Tensor var_v,
torch::Tensor var_indices,
torch::Tensor var_pose,
torch::Tensor var_activation,
float var_k_mu,
float var_k_lambda,
float var_k_damp,
float var_k_drag,
float var_k_lift,
torch::Tensor var_f,
torch::Tensor adj_x,
torch::Tensor adj_v,
torch::Tensor adj_indices,
torch::Tensor adj_pose,
torch::Tensor adj_activation,
float adj_k_mu,
float adj_k_lambda,
float adj_k_damp,
float adj_k_drag,
float adj_k_lift,
torch::Tensor adj_f);
void eval_triangles_contact_cpu_kernel_forward(
int var_num_particles,
df::float3* var_x,
df::float3* var_v,
int* var_indices,
mat22* var_pose,
float* var_activation,
float var_k_mu,
float var_k_lambda,
float var_k_damp,
float var_k_drag,
float var_k_lift,
df::float3* var_f)
{
//---------
// primal vars
int var_0;
int var_1;
int var_2;
df::float3 var_3;
const int var_4 = 3;
int var_5;
const int var_6 = 0;
int var_7;
int var_8;
int var_9;
const int var_10 = 1;
int var_11;
int var_12;
int var_13;
const int var_14 = 2;
int var_15;
int var_16;
bool var_17;
bool var_18;
bool var_19;
bool var_20;
df::float3 var_21;
df::float3 var_22;
df::float3 var_23;
df::float3 var_24;
float var_25;
df::float3 var_26;
float var_27;
df::float3 var_28;
df::float3 var_29;
float var_30;
df::float3 var_31;
df::float3 var_32;
df::float3 var_33;
float var_34;
df::float3 var_35;
const float var_36 = 0.01;
float var_37;
const float var_38 = 0.0;
float var_39;
df::float3 var_40;
const float var_41 = 100000.0;
df::float3 var_42;
float var_43;
df::float3 var_44;
float var_45;
df::float3 var_46;
float var_47;
df::float3 var_48;
//---------
// forward
var_0 = df::tid();
var_1 = df::div(var_0, var_num_particles);
var_2 = df::mod(var_0, var_num_particles);
var_3 = df::load(var_x, var_2);
var_5 = df::mul(var_1, var_4);
var_7 = df::add(var_5, var_6);
var_8 = df::load(var_indices, var_7);
var_9 = df::mul(var_1, var_4);
var_11 = df::add(var_9, var_10);
var_12 = df::load(var_indices, var_11);
var_13 = df::mul(var_1, var_4);
var_15 = df::add(var_13, var_14);
var_16 = df::load(var_indices, var_15);
var_17 = (var_8 == var_2);
var_18 = (var_12 == var_2);
var_19 = (var_16 == var_2);
var_20 = var_17 || var_18 || var_19;
if (var_20) {
return;
}
var_21 = df::load(var_x, var_8);
var_22 = df::load(var_x, var_12);
var_23 = df::load(var_x, var_16);
var_24 = triangle_closest_point_barycentric_cpu_func(var_21, var_22, var_23, var_3);
var_25 = df::index(var_24, var_6);
var_26 = df::mul(var_21, var_25);
var_27 = df::index(var_24, var_10);
var_28 = df::mul(var_22, var_27);
var_29 = df::add(var_26, var_28);
var_30 = df::index(var_24, var_14);
var_31 = df::mul(var_23, var_30);
var_32 = df::add(var_29, var_31);
var_33 = df::sub(var_3, var_32);
var_34 = df::dot(var_33, var_33);
var_35 = df::normalize(var_33);
var_37 = df::sub(var_34, var_36);
var_39 = df::min(var_37, var_38);
var_40 = df::mul(var_35, var_39);
var_42 = df::mul(var_40, var_41);
df::atomic_sub(var_f, var_2, var_42);
var_43 = df::index(var_24, var_6);
var_44 = df::mul(var_42, var_43);
df::atomic_add(var_f, var_8, var_44);
var_45 = df::index(var_24, var_10);
var_46 = df::mul(var_42, var_45);
df::atomic_add(var_f, var_12, var_46);
var_47 = df::index(var_24, var_14);
var_48 = df::mul(var_42, var_47);
df::atomic_add(var_f, var_16, var_48);
}
void eval_triangles_contact_cpu_kernel_backward(
int var_num_particles,
df::float3* var_x,
df::float3* var_v,
int* var_indices,
mat22* var_pose,
float* var_activation,
float var_k_mu,
float var_k_lambda,
float var_k_damp,
float var_k_drag,
float var_k_lift,
df::float3* var_f,
int adj_num_particles,
df::float3* adj_x,
df::float3* adj_v,
int* adj_indices,
mat22* adj_pose,
float* adj_activation,
float adj_k_mu,
float adj_k_lambda,
float adj_k_damp,
float adj_k_drag,
float adj_k_lift,
df::float3* adj_f)
{
//---------
// primal vars
int var_0;
int var_1;
int var_2;
df::float3 var_3;
const int var_4 = 3;
int var_5;
const int var_6 = 0;
int var_7;
int var_8;
int var_9;
const int var_10 = 1;
int var_11;
int var_12;
int var_13;
const int var_14 = 2;
int var_15;
int var_16;
bool var_17;
bool var_18;
bool var_19;
bool var_20;
df::float3 var_21;
df::float3 var_22;
df::float3 var_23;
df::float3 var_24;
float var_25;
df::float3 var_26;
float var_27;
df::float3 var_28;
df::float3 var_29;
float var_30;
df::float3 var_31;
df::float3 var_32;
df::float3 var_33;
float var_34;
df::float3 var_35;
const float var_36 = 0.01;
float var_37;
const float var_38 = 0.0;
float var_39;
df::float3 var_40;
const float var_41 = 100000.0;
df::float3 var_42;
float var_43;
df::float3 var_44;
float var_45;
df::float3 var_46;
float var_47;
df::float3 var_48;
//---------
// dual vars
int adj_0 = 0;
int adj_1 = 0;
int adj_2 = 0;
df::float3 adj_3 = 0;
int adj_4 = 0;
int adj_5 = 0;
int adj_6 = 0;
int adj_7 = 0;
int adj_8 = 0;
int adj_9 = 0;
int adj_10 = 0;
int adj_11 = 0;
int adj_12 = 0;
int adj_13 = 0;
int adj_14 = 0;
int adj_15 = 0;
int adj_16 = 0;
bool adj_17 = 0;
bool adj_18 = 0;
bool adj_19 = 0;
bool adj_20 = 0;
df::float3 adj_21 = 0;
df::float3 adj_22 = 0;
df::float3 adj_23 = 0;
df::float3 adj_24 = 0;
float adj_25 = 0;
df::float3 adj_26 = 0;
float adj_27 = 0;
df::float3 adj_28 = 0;
df::float3 adj_29 = 0;
float adj_30 = 0;
df::float3 adj_31 = 0;
df::float3 adj_32 = 0;
df::float3 adj_33 = 0;
float adj_34 = 0;
df::float3 adj_35 = 0;
float adj_36 = 0;
float adj_37 = 0;
float adj_38 = 0;
float adj_39 = 0;
df::float3 adj_40 = 0;
float adj_41 = 0;
df::float3 adj_42 = 0;
float adj_43 = 0;
df::float3 adj_44 = 0;
float adj_45 = 0;
df::float3 adj_46 = 0;
float adj_47 = 0;
df::float3 adj_48 = 0;
//---------
// forward
var_0 = df::tid();
var_1 = df::div(var_0, var_num_particles);
var_2 = df::mod(var_0, var_num_particles);
var_3 = df::load(var_x, var_2);
var_5 = df::mul(var_1, var_4);
var_7 = df::add(var_5, var_6);
var_8 = df::load(var_indices, var_7);
var_9 = df::mul(var_1, var_4);
var_11 = df::add(var_9, var_10);
var_12 = df::load(var_indices, var_11);
var_13 = df::mul(var_1, var_4);
var_15 = df::add(var_13, var_14);
var_16 = df::load(var_indices, var_15);
var_17 = (var_8 == var_2);
var_18 = (var_12 == var_2);
var_19 = (var_16 == var_2);
var_20 = var_17 || var_18 || var_19;
if (var_20) {
goto label0;
}
var_21 = df::load(var_x, var_8);
var_22 = df::load(var_x, var_12);
var_23 = df::load(var_x, var_16);
var_24 = triangle_closest_point_barycentric_cpu_func(var_21, var_22, var_23, var_3);
var_25 = df::index(var_24, var_6);
var_26 = df::mul(var_21, var_25);
var_27 = df::index(var_24, var_10);
var_28 = df::mul(var_22, var_27);
var_29 = df::add(var_26, var_28);
var_30 = df::index(var_24, var_14);
var_31 = df::mul(var_23, var_30);
var_32 = df::add(var_29, var_31);
var_33 = df::sub(var_3, var_32);
var_34 = df::dot(var_33, var_33);
var_35 = df::normalize(var_33);
var_37 = df::sub(var_34, var_36);
var_39 = df::min(var_37, var_38);
var_40 = df::mul(var_35, var_39);
var_42 = df::mul(var_40, var_41);
df::atomic_sub(var_f, var_2, var_42);
var_43 = df::index(var_24, var_6);
var_44 = df::mul(var_42, var_43);
df::atomic_add(var_f, var_8, var_44);
var_45 = df::index(var_24, var_10);
var_46 = df::mul(var_42, var_45);
df::atomic_add(var_f, var_12, var_46);
var_47 = df::index(var_24, var_14);
var_48 = df::mul(var_42, var_47);
df::atomic_add(var_f, var_16, var_48);
//---------
// reverse
df::adj_atomic_add(var_f, var_16, var_48, adj_f, adj_16, adj_48);
df::adj_mul(var_42, var_47, adj_42, adj_47, adj_48);
df::adj_index(var_24, var_14, adj_24, adj_14, adj_47);
df::adj_atomic_add(var_f, var_12, var_46, adj_f, adj_12, adj_46);
df::adj_mul(var_42, var_45, adj_42, adj_45, adj_46);
df::adj_index(var_24, var_10, adj_24, adj_10, adj_45);
df::adj_atomic_add(var_f, var_8, var_44, adj_f, adj_8, adj_44);
df::adj_mul(var_42, var_43, adj_42, adj_43, adj_44);
df::adj_index(var_24, var_6, adj_24, adj_6, adj_43);
df::adj_atomic_sub(var_f, var_2, var_42, adj_f, adj_2, adj_42);
df::adj_mul(var_40, var_41, adj_40, adj_41, adj_42);
df::adj_mul(var_35, var_39, adj_35, adj_39, adj_40);
df::adj_min(var_37, var_38, adj_37, adj_38, adj_39);
df::adj_sub(var_34, var_36, adj_34, adj_36, adj_37);
df::adj_normalize(var_33, adj_33, adj_35);
df::adj_dot(var_33, var_33, adj_33, adj_33, adj_34);
df::adj_sub(var_3, var_32, adj_3, adj_32, adj_33);
df::adj_add(var_29, var_31, adj_29, adj_31, adj_32);
df::adj_mul(var_23, var_30, adj_23, adj_30, adj_31);
df::adj_index(var_24, var_14, adj_24, adj_14, adj_30);
df::adj_add(var_26, var_28, adj_26, adj_28, adj_29);
df::adj_mul(var_22, var_27, adj_22, adj_27, adj_28);
df::adj_index(var_24, var_10, adj_24, adj_10, adj_27);
df::adj_mul(var_21, var_25, adj_21, adj_25, adj_26);
df::adj_index(var_24, var_6, adj_24, adj_6, adj_25);
adj_triangle_closest_point_barycentric_cpu_func(var_21, var_22, var_23, var_3, adj_21, adj_22, adj_23, adj_3, adj_24);
df::adj_load(var_x, var_16, adj_x, adj_16, adj_23);
df::adj_load(var_x, var_12, adj_x, adj_12, adj_22);
df::adj_load(var_x, var_8, adj_x, adj_8, adj_21);
if (var_20) {
label0:;
}
df::adj_load(var_indices, var_15, adj_indices, adj_15, adj_16);
df::adj_add(var_13, var_14, adj_13, adj_14, adj_15);
df::adj_mul(var_1, var_4, adj_1, adj_4, adj_13);
df::adj_load(var_indices, var_11, adj_indices, adj_11, adj_12);
df::adj_add(var_9, var_10, adj_9, adj_10, adj_11);
df::adj_mul(var_1, var_4, adj_1, adj_4, adj_9);
df::adj_load(var_indices, var_7, adj_indices, adj_7, adj_8);
df::adj_add(var_5, var_6, adj_5, adj_6, adj_7);
df::adj_mul(var_1, var_4, adj_1, adj_4, adj_5);
df::adj_load(var_x, var_2, adj_x, adj_2, adj_3);
df::adj_mod(var_0, var_num_particles, adj_0, adj_num_particles, adj_2);
df::adj_div(var_0, var_num_particles, adj_0, adj_num_particles, adj_1);
return;
}
// Python entry points
void eval_triangles_contact_cpu_forward(int dim,
int var_num_particles,
torch::Tensor var_x,
torch::Tensor var_v,
torch::Tensor var_indices,
torch::Tensor var_pose,
torch::Tensor var_activation,
float var_k_mu,
float var_k_lambda,
float var_k_damp,
float var_k_drag,
float var_k_lift,
torch::Tensor var_f)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_triangles_contact_cpu_kernel_forward(
var_num_particles,
cast<df::float3*>(var_x),
cast<df::float3*>(var_v),
cast<int*>(var_indices),
cast<mat22*>(var_pose),
cast<float*>(var_activation),
var_k_mu,
var_k_lambda,
var_k_damp,
var_k_drag,
var_k_lift,
cast<df::float3*>(var_f));
}
}
void eval_triangles_contact_cpu_backward(int dim,
int var_num_particles,
torch::Tensor var_x,
torch::Tensor var_v,
torch::Tensor var_indices,
torch::Tensor var_pose,
torch::Tensor var_activation,
float var_k_mu,
float var_k_lambda,
float var_k_damp,
float var_k_drag,
float var_k_lift,
torch::Tensor var_f,
int adj_num_particles,
torch::Tensor adj_x,
torch::Tensor adj_v,
torch::Tensor adj_indices,
torch::Tensor adj_pose,
torch::Tensor adj_activation,
float adj_k_mu,
float adj_k_lambda,
float adj_k_damp,
float adj_k_drag,
float adj_k_lift,
torch::Tensor adj_f)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_triangles_contact_cpu_kernel_backward(
var_num_particles,
cast<df::float3*>(var_x),
cast<df::float3*>(var_v),
cast<int*>(var_indices),
cast<mat22*>(var_pose),
cast<float*>(var_activation),
var_k_mu,
var_k_lambda,
var_k_damp,
var_k_drag,
var_k_lift,
cast<df::float3*>(var_f),
adj_num_particles,
cast<df::float3*>(adj_x),
cast<df::float3*>(adj_v),
cast<int*>(adj_indices),
cast<mat22*>(adj_pose),
cast<float*>(adj_activation),
adj_k_mu,
adj_k_lambda,
adj_k_damp,
adj_k_drag,
adj_k_lift,
cast<df::float3*>(adj_f));
}
}
// Python entry points
void eval_triangles_contact_cpu_forward(int dim,
int var_num_particles,
torch::Tensor var_x,
torch::Tensor var_v,
torch::Tensor var_indices,
torch::Tensor var_pose,
torch::Tensor var_activation,
float var_k_mu,
float var_k_lambda,
float var_k_damp,
float var_k_drag,
float var_k_lift,
torch::Tensor var_f);
void eval_triangles_contact_cpu_backward(int dim,
int var_num_particles,
torch::Tensor var_x,
torch::Tensor var_v,
torch::Tensor var_indices,
torch::Tensor var_pose,
torch::Tensor var_activation,
float var_k_mu,
float var_k_lambda,
float var_k_damp,
float var_k_drag,
float var_k_lift,
torch::Tensor var_f,
int adj_num_particles,
torch::Tensor adj_x,
torch::Tensor adj_v,
torch::Tensor adj_indices,
torch::Tensor adj_pose,
torch::Tensor adj_activation,
float adj_k_mu,
float adj_k_lambda,
float adj_k_damp,
float adj_k_drag,
float adj_k_lift,
torch::Tensor adj_f);
void eval_triangles_rigid_contacts_cpu_kernel_forward(
int var_num_particles,
df::float3* var_x,
df::float3* var_v,
int* var_indices,
df::float3* var_rigid_x,
quat* var_rigid_r,
df::float3* var_rigid_v,
df::float3* var_rigid_w,
int* var_contact_body,
df::float3* var_contact_point,
float* var_contact_dist,
int* var_contact_mat,
float* var_materials,
df::float3* var_tri_f)
{
//---------
// primal vars
int var_0;
int var_1;
int var_2;
int var_3;
df::float3 var_4;
float var_5;
int var_6;
const int var_7 = 4;
int var_8;
const int var_9 = 0;
int var_10;
float var_11;
int var_12;
const int var_13 = 1;
int var_14;
float var_15;
int var_16;
const int var_17 = 2;
int var_18;
float var_19;
int var_20;
const int var_21 = 3;
int var_22;
float var_23;
df::float3 var_24;
quat var_25;
df::float3 var_26;
df::float3 var_27;
df::float3 var_28;
df::float3 var_29;
df::float3 var_30;
df::float3 var_31;
df::float3 var_32;
df::float3 var_33;
df::float3 var_34;
df::float3 var_35;
int var_36;
int var_37;
int var_38;
int var_39;
int var_40;
int var_41;
int var_42;
int var_43;
int var_44;
df::float3 var_45;
df::float3 var_46;
df::float3 var_47;
df::float3 var_48;
df::float3 var_49;
df::float3 var_50;
df::float3 var_51;
float var_52;
df::float3 var_53;
float var_54;
df::float3 var_55;
df::float3 var_56;
float var_57;
df::float3 var_58;
df::float3 var_59;
df::float3 var_60;
float var_61;
df::float3 var_62;
const float var_63 = 0.05;
float var_64;
const float var_65 = 0.0;
float var_66;
float var_67;
float var_68;
df::float3 var_69;
float var_70;
df::float3 var_71;
df::float3 var_72;
float var_73;
df::float3 var_74;
df::float3 var_75;
df::float3 var_76;
float var_77;
df::float3 var_78;
df::float3 var_79;
float var_80;
float var_81;
float var_82;
float var_83;
float var_84;
float var_85;
float var_86;
float var_87;
const float var_88 = 1.0;
df::float3 var_89;
df::float3 var_90;
df::float3 var_91;
df::float3 var_92;
df::float3 var_93;
float var_94;
float var_95;
df::float3 var_96;
float var_97;
float var_98;
df::float3 var_99;
df::float3 var_100;
df::float3 var_101;
float var_102;
float var_103;
df::float3 var_104;
float var_105;
df::float3 var_106;
df::float3 var_107;
float var_108;
df::float3 var_109;
float var_110;
df::float3 var_111;
float var_112;
df::float3 var_113;
//---------
// forward
var_0 = df::tid();
var_1 = df::div(var_0, var_num_particles);
var_2 = df::mod(var_0, var_num_particles);
var_3 = df::load(var_contact_body, var_2);
var_4 = df::load(var_contact_point, var_2);
var_5 = df::load(var_contact_dist, var_2);
var_6 = df::load(var_contact_mat, var_2);
var_8 = df::mul(var_6, var_7);
var_10 = df::add(var_8, var_9);
var_11 = df::load(var_materials, var_10);
var_12 = df::mul(var_6, var_7);
var_14 = df::add(var_12, var_13);
var_15 = df::load(var_materials, var_14);
var_16 = df::mul(var_6, var_7);
var_18 = df::add(var_16, var_17);
var_19 = df::load(var_materials, var_18);
var_20 = df::mul(var_6, var_7);
var_22 = df::add(var_20, var_21);
var_23 = df::load(var_materials, var_22);
var_24 = df::load(var_rigid_x, var_3);
var_25 = df::load(var_rigid_r, var_3);
var_26 = df::load(var_rigid_v, var_3);
var_27 = df::load(var_rigid_w, var_3);
var_28 = df::rotate(var_25, var_4);
var_29 = df::add(var_24, var_28);
var_30 = df::sub(var_29, var_24);
var_31 = df::normalize(var_30);
var_32 = df::mul(var_31, var_5);
var_33 = df::add(var_29, var_32);
var_34 = df::cross(var_27, var_30);
var_35 = df::add(var_26, var_34);
var_36 = df::mul(var_1, var_21);
var_37 = df::add(var_36, var_9);
var_38 = df::load(var_indices, var_37);
var_39 = df::mul(var_1, var_21);
var_40 = df::add(var_39, var_13);
var_41 = df::load(var_indices, var_40);
var_42 = df::mul(var_1, var_21);
var_43 = df::add(var_42, var_17);
var_44 = df::load(var_indices, var_43);
var_45 = df::load(var_x, var_38);
var_46 = df::load(var_x, var_41);
var_47 = df::load(var_x, var_44);
var_48 = df::load(var_v, var_38);
var_49 = df::load(var_v, var_41);
var_50 = df::load(var_v, var_44);
var_51 = triangle_closest_point_barycentric_cpu_func(var_45, var_46, var_47, var_33);
var_52 = df::index(var_51, var_9);
var_53 = df::mul(var_45, var_52);
var_54 = df::index(var_51, var_13);
var_55 = df::mul(var_46, var_54);
var_56 = df::add(var_53, var_55);
var_57 = df::index(var_51, var_17);
var_58 = df::mul(var_47, var_57);
var_59 = df::add(var_56, var_58);
var_60 = df::sub(var_33, var_59);
var_61 = df::dot(var_60, var_60);
var_62 = df::normalize(var_60);
var_64 = df::sub(var_61, var_63);
var_66 = df::min(var_64, var_65);
var_67 = df::mul(var_66, var_11);
var_68 = df::index(var_51, var_9);
var_69 = df::mul(var_48, var_68);
var_70 = df::index(var_51, var_13);
var_71 = df::mul(var_49, var_70);
var_72 = df::add(var_69, var_71);
var_73 = df::index(var_51, var_17);
var_74 = df::mul(var_50, var_73);
var_75 = df::add(var_72, var_74);
var_76 = df::sub(var_75, var_35);
var_77 = df::dot(var_62, var_76);
var_78 = df::mul(var_62, var_77);
var_79 = df::sub(var_76, var_78);
var_80 = df::max(var_77, var_65);
var_81 = df::mul(var_80, var_15);
var_82 = df::step(var_66);
var_83 = df::mul(var_81, var_82);
var_84 = df::sub(var_65, var_83);
var_85 = df::add(var_67, var_84);
var_86 = df::mul(var_23, var_85);
var_87 = df::sub(var_65, var_86);
var_89 = df::float3(var_65, var_65, var_88);
var_90 = df::cross(var_62, var_89);
var_91 = df::float3(var_88, var_65, var_65);
var_92 = df::cross(var_62, var_91);
var_93 = df::mul(var_90, var_19);
var_94 = df::dot(var_93, var_79);
var_95 = df::clamp(var_94, var_86, var_87);
var_96 = df::mul(var_92, var_19);
var_97 = df::dot(var_96, var_79);
var_98 = df::clamp(var_97, var_86, var_87);
var_99 = df::mul(var_90, var_95);
var_100 = df::mul(var_92, var_98);
var_101 = df::add(var_99, var_100);
var_102 = df::step(var_66);
var_103 = df::sub(var_65, var_102);
var_104 = df::mul(var_101, var_103);
var_105 = df::add(var_67, var_84);
var_106 = df::mul(var_62, var_105);
var_107 = df::add(var_106, var_104);
var_108 = df::index(var_51, var_9);
var_109 = df::mul(var_107, var_108);
df::atomic_add(var_tri_f, var_38, var_109);
var_110 = df::index(var_51, var_13);
var_111 = df::mul(var_107, var_110);
df::atomic_add(var_tri_f, var_41, var_111);
var_112 = df::index(var_51, var_17);
var_113 = df::mul(var_107, var_112);
df::atomic_add(var_tri_f, var_44, var_113);
}
void eval_triangles_rigid_contacts_cpu_kernel_backward(
int var_num_particles,
df::float3* var_x,
df::float3* var_v,
int* var_indices,
df::float3* var_rigid_x,
quat* var_rigid_r,
df::float3* var_rigid_v,
df::float3* var_rigid_w,
int* var_contact_body,
df::float3* var_contact_point,
float* var_contact_dist,
int* var_contact_mat,
float* var_materials,
df::float3* var_tri_f,
int adj_num_particles,
df::float3* adj_x,
df::float3* adj_v,
int* adj_indices,
df::float3* adj_rigid_x,
quat* adj_rigid_r,
df::float3* adj_rigid_v,
df::float3* adj_rigid_w,
int* adj_contact_body,
df::float3* adj_contact_point,
float* adj_contact_dist,
int* adj_contact_mat,
float* adj_materials,
df::float3* adj_tri_f)
{
//---------
// primal vars
int var_0;
int var_1;
int var_2;
int var_3;
df::float3 var_4;
float var_5;
int var_6;
const int var_7 = 4;
int var_8;
const int var_9 = 0;
int var_10;
float var_11;
int var_12;
const int var_13 = 1;
int var_14;
float var_15;
int var_16;
const int var_17 = 2;
int var_18;
float var_19;
int var_20;
const int var_21 = 3;
int var_22;
float var_23;
df::float3 var_24;
quat var_25;
df::float3 var_26;
df::float3 var_27;
df::float3 var_28;
df::float3 var_29;
df::float3 var_30;
df::float3 var_31;
df::float3 var_32;
df::float3 var_33;
df::float3 var_34;
df::float3 var_35;
int var_36;
int var_37;
int var_38;
int var_39;
int var_40;
int var_41;
int var_42;
int var_43;
int var_44;
df::float3 var_45;
df::float3 var_46;
df::float3 var_47;
df::float3 var_48;
df::float3 var_49;
df::float3 var_50;
df::float3 var_51;
float var_52;
df::float3 var_53;
float var_54;
df::float3 var_55;
df::float3 var_56;
float var_57;
df::float3 var_58;
df::float3 var_59;
df::float3 var_60;
float var_61;
df::float3 var_62;
const float var_63 = 0.05;
float var_64;
const float var_65 = 0.0;
float var_66;
float var_67;
float var_68;
df::float3 var_69;
float var_70;
df::float3 var_71;
df::float3 var_72;
float var_73;
df::float3 var_74;
df::float3 var_75;
df::float3 var_76;
float var_77;
df::float3 var_78;
df::float3 var_79;
float var_80;
float var_81;
float var_82;
float var_83;
float var_84;
float var_85;
float var_86;
float var_87;
const float var_88 = 1.0;
df::float3 var_89;
df::float3 var_90;
df::float3 var_91;
df::float3 var_92;
df::float3 var_93;
float var_94;
float var_95;
df::float3 var_96;
float var_97;
float var_98;
df::float3 var_99;
df::float3 var_100;
df::float3 var_101;
float var_102;
float var_103;
df::float3 var_104;
float var_105;
df::float3 var_106;
df::float3 var_107;
float var_108;
df::float3 var_109;
float var_110;
df::float3 var_111;
float var_112;
df::float3 var_113;
//---------
// dual vars
int adj_0 = 0;
int adj_1 = 0;
int adj_2 = 0;
int adj_3 = 0;
df::float3 adj_4 = 0;
float adj_5 = 0;
int adj_6 = 0;
int adj_7 = 0;
int adj_8 = 0;
int adj_9 = 0;
int adj_10 = 0;
float adj_11 = 0;
int adj_12 = 0;
int adj_13 = 0;
int adj_14 = 0;
float adj_15 = 0;
int adj_16 = 0;
int adj_17 = 0;
int adj_18 = 0;
float adj_19 = 0;
int adj_20 = 0;
int adj_21 = 0;
int adj_22 = 0;
float adj_23 = 0;
df::float3 adj_24 = 0;
quat adj_25 = 0;
df::float3 adj_26 = 0;
df::float3 adj_27 = 0;
df::float3 adj_28 = 0;
df::float3 adj_29 = 0;
df::float3 adj_30 = 0;
df::float3 adj_31 = 0;
df::float3 adj_32 = 0;
df::float3 adj_33 = 0;
df::float3 adj_34 = 0;
df::float3 adj_35 = 0;
int adj_36 = 0;
int adj_37 = 0;
int adj_38 = 0;
int adj_39 = 0;
int adj_40 = 0;
int adj_41 = 0;
int adj_42 = 0;
int adj_43 = 0;
int adj_44 = 0;
df::float3 adj_45 = 0;
df::float3 adj_46 = 0;
df::float3 adj_47 = 0;
df::float3 adj_48 = 0;
df::float3 adj_49 = 0;
df::float3 adj_50 = 0;
df::float3 adj_51 = 0;
float adj_52 = 0;
df::float3 adj_53 = 0;
float adj_54 = 0;
df::float3 adj_55 = 0;
df::float3 adj_56 = 0;
float adj_57 = 0;
df::float3 adj_58 = 0;
df::float3 adj_59 = 0;
df::float3 adj_60 = 0;
float adj_61 = 0;
df::float3 adj_62 = 0;
float adj_63 = 0;
float adj_64 = 0;
float adj_65 = 0;
float adj_66 = 0;
float adj_67 = 0;
float adj_68 = 0;
df::float3 adj_69 = 0;
float adj_70 = 0;
df::float3 adj_71 = 0;
df::float3 adj_72 = 0;
float adj_73 = 0;
df::float3 adj_74 = 0;
df::float3 adj_75 = 0;
df::float3 adj_76 = 0;
float adj_77 = 0;
df::float3 adj_78 = 0;
df::float3 adj_79 = 0;
float adj_80 = 0;
float adj_81 = 0;
float adj_82 = 0;
float adj_83 = 0;
float adj_84 = 0;
float adj_85 = 0;
float adj_86 = 0;
float adj_87 = 0;
float adj_88 = 0;
df::float3 adj_89 = 0;
df::float3 adj_90 = 0;
df::float3 adj_91 = 0;
df::float3 adj_92 = 0;
df::float3 adj_93 = 0;
float adj_94 = 0;
float adj_95 = 0;
df::float3 adj_96 = 0;
float adj_97 = 0;
float adj_98 = 0;
df::float3 adj_99 = 0;
df::float3 adj_100 = 0;
df::float3 adj_101 = 0;
float adj_102 = 0;
float adj_103 = 0;
df::float3 adj_104 = 0;
float adj_105 = 0;
df::float3 adj_106 = 0;
df::float3 adj_107 = 0;
float adj_108 = 0;
df::float3 adj_109 = 0;
float adj_110 = 0;
df::float3 adj_111 = 0;
float adj_112 = 0;
df::float3 adj_113 = 0;
//---------
// forward
var_0 = df::tid();
var_1 = df::div(var_0, var_num_particles);
var_2 = df::mod(var_0, var_num_particles);
var_3 = df::load(var_contact_body, var_2);
var_4 = df::load(var_contact_point, var_2);
var_5 = df::load(var_contact_dist, var_2);
var_6 = df::load(var_contact_mat, var_2);
var_8 = df::mul(var_6, var_7);
var_10 = df::add(var_8, var_9);
var_11 = df::load(var_materials, var_10);
var_12 = df::mul(var_6, var_7);
var_14 = df::add(var_12, var_13);
var_15 = df::load(var_materials, var_14);
var_16 = df::mul(var_6, var_7);
var_18 = df::add(var_16, var_17);
var_19 = df::load(var_materials, var_18);
var_20 = df::mul(var_6, var_7);
var_22 = df::add(var_20, var_21);
var_23 = df::load(var_materials, var_22);
var_24 = df::load(var_rigid_x, var_3);
var_25 = df::load(var_rigid_r, var_3);
var_26 = df::load(var_rigid_v, var_3);
var_27 = df::load(var_rigid_w, var_3);
var_28 = df::rotate(var_25, var_4);
var_29 = df::add(var_24, var_28);
var_30 = df::sub(var_29, var_24);
var_31 = df::normalize(var_30);
var_32 = df::mul(var_31, var_5);
var_33 = df::add(var_29, var_32);
var_34 = df::cross(var_27, var_30);
var_35 = df::add(var_26, var_34);
var_36 = df::mul(var_1, var_21);
var_37 = df::add(var_36, var_9);
var_38 = df::load(var_indices, var_37);
var_39 = df::mul(var_1, var_21);
var_40 = df::add(var_39, var_13);
var_41 = df::load(var_indices, var_40);
var_42 = df::mul(var_1, var_21);
var_43 = df::add(var_42, var_17);
var_44 = df::load(var_indices, var_43);
var_45 = df::load(var_x, var_38);
var_46 = df::load(var_x, var_41);
var_47 = df::load(var_x, var_44);
var_48 = df::load(var_v, var_38);
var_49 = df::load(var_v, var_41);
var_50 = df::load(var_v, var_44);
var_51 = triangle_closest_point_barycentric_cpu_func(var_45, var_46, var_47, var_33);
var_52 = df::index(var_51, var_9);
var_53 = df::mul(var_45, var_52);
var_54 = df::index(var_51, var_13);
var_55 = df::mul(var_46, var_54);
var_56 = df::add(var_53, var_55);
var_57 = df::index(var_51, var_17);
var_58 = df::mul(var_47, var_57);
var_59 = df::add(var_56, var_58);
var_60 = df::sub(var_33, var_59);
var_61 = df::dot(var_60, var_60);
var_62 = df::normalize(var_60);
var_64 = df::sub(var_61, var_63);
var_66 = df::min(var_64, var_65);
var_67 = df::mul(var_66, var_11);
var_68 = df::index(var_51, var_9);
var_69 = df::mul(var_48, var_68);
var_70 = df::index(var_51, var_13);
var_71 = df::mul(var_49, var_70);
var_72 = df::add(var_69, var_71);
var_73 = df::index(var_51, var_17);
var_74 = df::mul(var_50, var_73);
var_75 = df::add(var_72, var_74);
var_76 = df::sub(var_75, var_35);
var_77 = df::dot(var_62, var_76);
var_78 = df::mul(var_62, var_77);
var_79 = df::sub(var_76, var_78);
var_80 = df::max(var_77, var_65);
var_81 = df::mul(var_80, var_15);
var_82 = df::step(var_66);
var_83 = df::mul(var_81, var_82);
var_84 = df::sub(var_65, var_83);
var_85 = df::add(var_67, var_84);
var_86 = df::mul(var_23, var_85);
var_87 = df::sub(var_65, var_86);
var_89 = df::float3(var_65, var_65, var_88);
var_90 = df::cross(var_62, var_89);
var_91 = df::float3(var_88, var_65, var_65);
var_92 = df::cross(var_62, var_91);
var_93 = df::mul(var_90, var_19);
var_94 = df::dot(var_93, var_79);
var_95 = df::clamp(var_94, var_86, var_87);
var_96 = df::mul(var_92, var_19);
var_97 = df::dot(var_96, var_79);
var_98 = df::clamp(var_97, var_86, var_87);
var_99 = df::mul(var_90, var_95);
var_100 = df::mul(var_92, var_98);
var_101 = df::add(var_99, var_100);
var_102 = df::step(var_66);
var_103 = df::sub(var_65, var_102);
var_104 = df::mul(var_101, var_103);
var_105 = df::add(var_67, var_84);
var_106 = df::mul(var_62, var_105);
var_107 = df::add(var_106, var_104);
var_108 = df::index(var_51, var_9);
var_109 = df::mul(var_107, var_108);
df::atomic_add(var_tri_f, var_38, var_109);
var_110 = df::index(var_51, var_13);
var_111 = df::mul(var_107, var_110);
df::atomic_add(var_tri_f, var_41, var_111);
var_112 = df::index(var_51, var_17);
var_113 = df::mul(var_107, var_112);
df::atomic_add(var_tri_f, var_44, var_113);
//---------
// reverse
df::adj_atomic_add(var_tri_f, var_44, var_113, adj_tri_f, adj_44, adj_113);
df::adj_mul(var_107, var_112, adj_107, adj_112, adj_113);
df::adj_index(var_51, var_17, adj_51, adj_17, adj_112);
df::adj_atomic_add(var_tri_f, var_41, var_111, adj_tri_f, adj_41, adj_111);
df::adj_mul(var_107, var_110, adj_107, adj_110, adj_111);
df::adj_index(var_51, var_13, adj_51, adj_13, adj_110);
df::adj_atomic_add(var_tri_f, var_38, var_109, adj_tri_f, adj_38, adj_109);
df::adj_mul(var_107, var_108, adj_107, adj_108, adj_109);
df::adj_index(var_51, var_9, adj_51, adj_9, adj_108);
df::adj_add(var_106, var_104, adj_106, adj_104, adj_107);
df::adj_mul(var_62, var_105, adj_62, adj_105, adj_106);
df::adj_add(var_67, var_84, adj_67, adj_84, adj_105);
df::adj_mul(var_101, var_103, adj_101, adj_103, adj_104);
df::adj_sub(var_65, var_102, adj_65, adj_102, adj_103);
df::adj_step(var_66, adj_66, adj_102);
df::adj_add(var_99, var_100, adj_99, adj_100, adj_101);
df::adj_mul(var_92, var_98, adj_92, adj_98, adj_100);
df::adj_mul(var_90, var_95, adj_90, adj_95, adj_99);
df::adj_clamp(var_97, var_86, var_87, adj_97, adj_86, adj_87, adj_98);
df::adj_dot(var_96, var_79, adj_96, adj_79, adj_97);
df::adj_mul(var_92, var_19, adj_92, adj_19, adj_96);
df::adj_clamp(var_94, var_86, var_87, adj_94, adj_86, adj_87, adj_95);
df::adj_dot(var_93, var_79, adj_93, adj_79, adj_94);
df::adj_mul(var_90, var_19, adj_90, adj_19, adj_93);
df::adj_cross(var_62, var_91, adj_62, adj_91, adj_92);
df::adj_float3(var_88, var_65, var_65, adj_88, adj_65, adj_65, adj_91);
df::adj_cross(var_62, var_89, adj_62, adj_89, adj_90);
df::adj_float3(var_65, var_65, var_88, adj_65, adj_65, adj_88, adj_89);
df::adj_sub(var_65, var_86, adj_65, adj_86, adj_87);
df::adj_mul(var_23, var_85, adj_23, adj_85, adj_86);
df::adj_add(var_67, var_84, adj_67, adj_84, adj_85);
df::adj_sub(var_65, var_83, adj_65, adj_83, adj_84);
df::adj_mul(var_81, var_82, adj_81, adj_82, adj_83);
df::adj_step(var_66, adj_66, adj_82);
df::adj_mul(var_80, var_15, adj_80, adj_15, adj_81);
df::adj_max(var_77, var_65, adj_77, adj_65, adj_80);
df::adj_sub(var_76, var_78, adj_76, adj_78, adj_79);
df::adj_mul(var_62, var_77, adj_62, adj_77, adj_78);
df::adj_dot(var_62, var_76, adj_62, adj_76, adj_77);
df::adj_sub(var_75, var_35, adj_75, adj_35, adj_76);
df::adj_add(var_72, var_74, adj_72, adj_74, adj_75);
df::adj_mul(var_50, var_73, adj_50, adj_73, adj_74);
df::adj_index(var_51, var_17, adj_51, adj_17, adj_73);
df::adj_add(var_69, var_71, adj_69, adj_71, adj_72);
df::adj_mul(var_49, var_70, adj_49, adj_70, adj_71);
df::adj_index(var_51, var_13, adj_51, adj_13, adj_70);
df::adj_mul(var_48, var_68, adj_48, adj_68, adj_69);
df::adj_index(var_51, var_9, adj_51, adj_9, adj_68);
df::adj_mul(var_66, var_11, adj_66, adj_11, adj_67);
df::adj_min(var_64, var_65, adj_64, adj_65, adj_66);
df::adj_sub(var_61, var_63, adj_61, adj_63, adj_64);
df::adj_normalize(var_60, adj_60, adj_62);
df::adj_dot(var_60, var_60, adj_60, adj_60, adj_61);
df::adj_sub(var_33, var_59, adj_33, adj_59, adj_60);
df::adj_add(var_56, var_58, adj_56, adj_58, adj_59);
df::adj_mul(var_47, var_57, adj_47, adj_57, adj_58);
df::adj_index(var_51, var_17, adj_51, adj_17, adj_57);
df::adj_add(var_53, var_55, adj_53, adj_55, adj_56);
df::adj_mul(var_46, var_54, adj_46, adj_54, adj_55);
df::adj_index(var_51, var_13, adj_51, adj_13, adj_54);
df::adj_mul(var_45, var_52, adj_45, adj_52, adj_53);
df::adj_index(var_51, var_9, adj_51, adj_9, adj_52);
adj_triangle_closest_point_barycentric_cpu_func(var_45, var_46, var_47, var_33, adj_45, adj_46, adj_47, adj_33, adj_51);
df::adj_load(var_v, var_44, adj_v, adj_44, adj_50);
df::adj_load(var_v, var_41, adj_v, adj_41, adj_49);
df::adj_load(var_v, var_38, adj_v, adj_38, adj_48);
df::adj_load(var_x, var_44, adj_x, adj_44, adj_47);
df::adj_load(var_x, var_41, adj_x, adj_41, adj_46);
df::adj_load(var_x, var_38, adj_x, adj_38, adj_45);
df::adj_load(var_indices, var_43, adj_indices, adj_43, adj_44);
df::adj_add(var_42, var_17, adj_42, adj_17, adj_43);
df::adj_mul(var_1, var_21, adj_1, adj_21, adj_42);
df::adj_load(var_indices, var_40, adj_indices, adj_40, adj_41);
df::adj_add(var_39, var_13, adj_39, adj_13, adj_40);
df::adj_mul(var_1, var_21, adj_1, adj_21, adj_39);
df::adj_load(var_indices, var_37, adj_indices, adj_37, adj_38);
df::adj_add(var_36, var_9, adj_36, adj_9, adj_37);
df::adj_mul(var_1, var_21, adj_1, adj_21, adj_36);
df::adj_add(var_26, var_34, adj_26, adj_34, adj_35);
df::adj_cross(var_27, var_30, adj_27, adj_30, adj_34);
df::adj_add(var_29, var_32, adj_29, adj_32, adj_33);
df::adj_mul(var_31, var_5, adj_31, adj_5, adj_32);
df::adj_normalize(var_30, adj_30, adj_31);
df::adj_sub(var_29, var_24, adj_29, adj_24, adj_30);
df::adj_add(var_24, var_28, adj_24, adj_28, adj_29);
df::adj_rotate(var_25, var_4, adj_25, adj_4, adj_28);
df::adj_load(var_rigid_w, var_3, adj_rigid_w, adj_3, adj_27);
df::adj_load(var_rigid_v, var_3, adj_rigid_v, adj_3, adj_26);
df::adj_load(var_rigid_r, var_3, adj_rigid_r, adj_3, adj_25);
df::adj_load(var_rigid_x, var_3, adj_rigid_x, adj_3, adj_24);
df::adj_load(var_materials, var_22, adj_materials, adj_22, adj_23);
df::adj_add(var_20, var_21, adj_20, adj_21, adj_22);
df::adj_mul(var_6, var_7, adj_6, adj_7, adj_20);
df::adj_load(var_materials, var_18, adj_materials, adj_18, adj_19);
df::adj_add(var_16, var_17, adj_16, adj_17, adj_18);
df::adj_mul(var_6, var_7, adj_6, adj_7, adj_16);
df::adj_load(var_materials, var_14, adj_materials, adj_14, adj_15);
df::adj_add(var_12, var_13, adj_12, adj_13, adj_14);
df::adj_mul(var_6, var_7, adj_6, adj_7, adj_12);
df::adj_load(var_materials, var_10, adj_materials, adj_10, adj_11);
df::adj_add(var_8, var_9, adj_8, adj_9, adj_10);
df::adj_mul(var_6, var_7, adj_6, adj_7, adj_8);
df::adj_load(var_contact_mat, var_2, adj_contact_mat, adj_2, adj_6);
df::adj_load(var_contact_dist, var_2, adj_contact_dist, adj_2, adj_5);
df::adj_load(var_contact_point, var_2, adj_contact_point, adj_2, adj_4);
df::adj_load(var_contact_body, var_2, adj_contact_body, adj_2, adj_3);
df::adj_mod(var_0, var_num_particles, adj_0, adj_num_particles, adj_2);
df::adj_div(var_0, var_num_particles, adj_0, adj_num_particles, adj_1);
return;
}
// Python entry points
void eval_triangles_rigid_contacts_cpu_forward(int dim,
int var_num_particles,
torch::Tensor var_x,
torch::Tensor var_v,
torch::Tensor var_indices,
torch::Tensor var_rigid_x,
torch::Tensor var_rigid_r,
torch::Tensor var_rigid_v,
torch::Tensor var_rigid_w,
torch::Tensor var_contact_body,
torch::Tensor var_contact_point,
torch::Tensor var_contact_dist,
torch::Tensor var_contact_mat,
torch::Tensor var_materials,
torch::Tensor var_tri_f)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_triangles_rigid_contacts_cpu_kernel_forward(
var_num_particles,
cast<df::float3*>(var_x),
cast<df::float3*>(var_v),
cast<int*>(var_indices),
cast<df::float3*>(var_rigid_x),
cast<quat*>(var_rigid_r),
cast<df::float3*>(var_rigid_v),
cast<df::float3*>(var_rigid_w),
cast<int*>(var_contact_body),
cast<df::float3*>(var_contact_point),
cast<float*>(var_contact_dist),
cast<int*>(var_contact_mat),
cast<float*>(var_materials),
cast<df::float3*>(var_tri_f));
}
}
void eval_triangles_rigid_contacts_cpu_backward(int dim,
int var_num_particles,
torch::Tensor var_x,
torch::Tensor var_v,
torch::Tensor var_indices,
torch::Tensor var_rigid_x,
torch::Tensor var_rigid_r,
torch::Tensor var_rigid_v,
torch::Tensor var_rigid_w,
torch::Tensor var_contact_body,
torch::Tensor var_contact_point,
torch::Tensor var_contact_dist,
torch::Tensor var_contact_mat,
torch::Tensor var_materials,
torch::Tensor var_tri_f,
int adj_num_particles,
torch::Tensor adj_x,
torch::Tensor adj_v,
torch::Tensor adj_indices,
torch::Tensor adj_rigid_x,
torch::Tensor adj_rigid_r,
torch::Tensor adj_rigid_v,
torch::Tensor adj_rigid_w,
torch::Tensor adj_contact_body,
torch::Tensor adj_contact_point,
torch::Tensor adj_contact_dist,
torch::Tensor adj_contact_mat,
torch::Tensor adj_materials,
torch::Tensor adj_tri_f)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_triangles_rigid_contacts_cpu_kernel_backward(
var_num_particles,
cast<df::float3*>(var_x),
cast<df::float3*>(var_v),
cast<int*>(var_indices),
cast<df::float3*>(var_rigid_x),
cast<quat*>(var_rigid_r),
cast<df::float3*>(var_rigid_v),
cast<df::float3*>(var_rigid_w),
cast<int*>(var_contact_body),
cast<df::float3*>(var_contact_point),
cast<float*>(var_contact_dist),
cast<int*>(var_contact_mat),
cast<float*>(var_materials),
cast<df::float3*>(var_tri_f),
adj_num_particles,
cast<df::float3*>(adj_x),
cast<df::float3*>(adj_v),
cast<int*>(adj_indices),
cast<df::float3*>(adj_rigid_x),
cast<quat*>(adj_rigid_r),
cast<df::float3*>(adj_rigid_v),
cast<df::float3*>(adj_rigid_w),
cast<int*>(adj_contact_body),
cast<df::float3*>(adj_contact_point),
cast<float*>(adj_contact_dist),
cast<int*>(adj_contact_mat),
cast<float*>(adj_materials),
cast<df::float3*>(adj_tri_f));
}
}
// Python entry points
void eval_triangles_rigid_contacts_cpu_forward(int dim,
int var_num_particles,
torch::Tensor var_x,
torch::Tensor var_v,
torch::Tensor var_indices,
torch::Tensor var_rigid_x,
torch::Tensor var_rigid_r,
torch::Tensor var_rigid_v,
torch::Tensor var_rigid_w,
torch::Tensor var_contact_body,
torch::Tensor var_contact_point,
torch::Tensor var_contact_dist,
torch::Tensor var_contact_mat,
torch::Tensor var_materials,
torch::Tensor var_tri_f);
void eval_triangles_rigid_contacts_cpu_backward(int dim,
int var_num_particles,
torch::Tensor var_x,
torch::Tensor var_v,
torch::Tensor var_indices,
torch::Tensor var_rigid_x,
torch::Tensor var_rigid_r,
torch::Tensor var_rigid_v,
torch::Tensor var_rigid_w,
torch::Tensor var_contact_body,
torch::Tensor var_contact_point,
torch::Tensor var_contact_dist,
torch::Tensor var_contact_mat,
torch::Tensor var_materials,
torch::Tensor var_tri_f,
int adj_num_particles,
torch::Tensor adj_x,
torch::Tensor adj_v,
torch::Tensor adj_indices,
torch::Tensor adj_rigid_x,
torch::Tensor adj_rigid_r,
torch::Tensor adj_rigid_v,
torch::Tensor adj_rigid_w,
torch::Tensor adj_contact_body,
torch::Tensor adj_contact_point,
torch::Tensor adj_contact_dist,
torch::Tensor adj_contact_mat,
torch::Tensor adj_materials,
torch::Tensor adj_tri_f);
void eval_bending_cpu_kernel_forward(
df::float3* var_x,
df::float3* var_v,
int* var_indices,
float* var_rest,
float var_ke,
float var_kd,
df::float3* var_f)
{
//---------
// primal vars
int var_0;
const int var_1 = 4;
int var_2;
const int var_3 = 0;
int var_4;
int var_5;
int var_6;
const int var_7 = 1;
int var_8;
int var_9;
int var_10;
const int var_11 = 2;
int var_12;
int var_13;
int var_14;
const int var_15 = 3;
int var_16;
int var_17;
float var_18;
df::float3 var_19;
df::float3 var_20;
df::float3 var_21;
df::float3 var_22;
df::float3 var_23;
df::float3 var_24;
df::float3 var_25;
df::float3 var_26;
df::float3 var_27;
df::float3 var_28;
df::float3 var_29;
df::float3 var_30;
df::float3 var_31;
df::float3 var_32;
float var_33;
float var_34;
const float var_35 = 1.0;
float var_36;
float var_37;
float var_38;
float var_39;
float var_40;
df::float3 var_41;
df::float3 var_42;
df::float3 var_43;
df::float3 var_44;
df::float3 var_45;
df::float3 var_46;
float var_47;
df::float3 var_48;
float var_49;
float var_50;
float var_51;
float var_52;
df::float3 var_53;
df::float3 var_54;
df::float3 var_55;
float var_56;
df::float3 var_57;
df::float3 var_58;
float var_59;
df::float3 var_60;
df::float3 var_61;
df::float3 var_62;
float var_63;
df::float3 var_64;
df::float3 var_65;
float var_66;
df::float3 var_67;
df::float3 var_68;
float var_69;
float var_70;
float var_71;
float var_72;
float var_73;
float var_74;
float var_75;
float var_76;
float var_77;
float var_78;
const float var_79 = 0.0;
float var_80;
float var_81;
float var_82;
df::float3 var_83;
df::float3 var_84;
df::float3 var_85;
df::float3 var_86;
//---------
// forward
var_0 = df::tid();
var_2 = df::mul(var_0, var_1);
var_4 = df::add(var_2, var_3);
var_5 = df::load(var_indices, var_4);
var_6 = df::mul(var_0, var_1);
var_8 = df::add(var_6, var_7);
var_9 = df::load(var_indices, var_8);
var_10 = df::mul(var_0, var_1);
var_12 = df::add(var_10, var_11);
var_13 = df::load(var_indices, var_12);
var_14 = df::mul(var_0, var_1);
var_16 = df::add(var_14, var_15);
var_17 = df::load(var_indices, var_16);
var_18 = df::load(var_rest, var_0);
var_19 = df::load(var_x, var_5);
var_20 = df::load(var_x, var_9);
var_21 = df::load(var_x, var_13);
var_22 = df::load(var_x, var_17);
var_23 = df::load(var_v, var_5);
var_24 = df::load(var_v, var_9);
var_25 = df::load(var_v, var_13);
var_26 = df::load(var_v, var_17);
var_27 = df::sub(var_21, var_19);
var_28 = df::sub(var_22, var_19);
var_29 = df::cross(var_27, var_28);
var_30 = df::sub(var_22, var_20);
var_31 = df::sub(var_21, var_20);
var_32 = df::cross(var_30, var_31);
var_33 = df::length(var_29);
var_34 = df::length(var_32);
var_36 = df::div(var_35, var_33);
var_37 = df::div(var_35, var_34);
var_38 = df::dot(var_29, var_32);
var_39 = df::mul(var_38, var_36);
var_40 = df::mul(var_39, var_37);
var_41 = df::mul(var_29, var_36);
var_42 = df::mul(var_41, var_36);
var_43 = df::mul(var_32, var_37);
var_44 = df::mul(var_43, var_37);
var_45 = df::sub(var_22, var_21);
var_46 = df::normalize(var_45);
var_47 = df::length(var_45);
var_48 = df::cross(var_44, var_42);
var_49 = df::dot(var_48, var_46);
var_50 = df::sign(var_49);
var_51 = df::acos(var_40);
var_52 = df::mul(var_51, var_50);
var_53 = df::mul(var_42, var_47);
var_54 = df::mul(var_44, var_47);
var_55 = df::sub(var_19, var_22);
var_56 = df::dot(var_55, var_46);
var_57 = df::mul(var_42, var_56);
var_58 = df::sub(var_20, var_22);
var_59 = df::dot(var_58, var_46);
var_60 = df::mul(var_44, var_59);
var_61 = df::add(var_57, var_60);
var_62 = df::sub(var_21, var_19);
var_63 = df::dot(var_62, var_46);
var_64 = df::mul(var_42, var_63);
var_65 = df::sub(var_21, var_20);
var_66 = df::dot(var_65, var_46);
var_67 = df::mul(var_44, var_66);
var_68 = df::add(var_64, var_67);
var_69 = df::sub(var_52, var_18);
var_70 = df::mul(var_ke, var_69);
var_71 = df::dot(var_53, var_23);
var_72 = df::dot(var_54, var_24);
var_73 = df::add(var_71, var_72);
var_74 = df::dot(var_61, var_25);
var_75 = df::add(var_73, var_74);
var_76 = df::dot(var_68, var_26);
var_77 = df::add(var_75, var_76);
var_78 = df::mul(var_kd, var_77);
var_80 = df::add(var_70, var_78);
var_81 = df::mul(var_47, var_80);
var_82 = df::sub(var_79, var_81);
var_83 = df::mul(var_53, var_82);
df::atomic_add(var_f, var_5, var_83);
var_84 = df::mul(var_54, var_82);
df::atomic_add(var_f, var_9, var_84);
var_85 = df::mul(var_61, var_82);
df::atomic_add(var_f, var_13, var_85);
var_86 = df::mul(var_68, var_82);
df::atomic_add(var_f, var_17, var_86);
}
void eval_bending_cpu_kernel_backward(
df::float3* var_x,
df::float3* var_v,
int* var_indices,
float* var_rest,
float var_ke,
float var_kd,
df::float3* var_f,
df::float3* adj_x,
df::float3* adj_v,
int* adj_indices,
float* adj_rest,
float adj_ke,
float adj_kd,
df::float3* adj_f)
{
//---------
// primal vars
int var_0;
const int var_1 = 4;
int var_2;
const int var_3 = 0;
int var_4;
int var_5;
int var_6;
const int var_7 = 1;
int var_8;
int var_9;
int var_10;
const int var_11 = 2;
int var_12;
int var_13;
int var_14;
const int var_15 = 3;
int var_16;
int var_17;
float var_18;
df::float3 var_19;
df::float3 var_20;
df::float3 var_21;
df::float3 var_22;
df::float3 var_23;
df::float3 var_24;
df::float3 var_25;
df::float3 var_26;
df::float3 var_27;
df::float3 var_28;
df::float3 var_29;
df::float3 var_30;
df::float3 var_31;
df::float3 var_32;
float var_33;
float var_34;
const float var_35 = 1.0;
float var_36;
float var_37;
float var_38;
float var_39;
float var_40;
df::float3 var_41;
df::float3 var_42;
df::float3 var_43;
df::float3 var_44;
df::float3 var_45;
df::float3 var_46;
float var_47;
df::float3 var_48;
float var_49;
float var_50;
float var_51;
float var_52;
df::float3 var_53;
df::float3 var_54;
df::float3 var_55;
float var_56;
df::float3 var_57;
df::float3 var_58;
float var_59;
df::float3 var_60;
df::float3 var_61;
df::float3 var_62;
float var_63;
df::float3 var_64;
df::float3 var_65;
float var_66;
df::float3 var_67;
df::float3 var_68;
float var_69;
float var_70;
float var_71;
float var_72;
float var_73;
float var_74;
float var_75;
float var_76;
float var_77;
float var_78;
const float var_79 = 0.0;
float var_80;
float var_81;
float var_82;
df::float3 var_83;
df::float3 var_84;
df::float3 var_85;
df::float3 var_86;
//---------
// dual vars
int adj_0 = 0;
int adj_1 = 0;
int adj_2 = 0;
int adj_3 = 0;
int adj_4 = 0;
int adj_5 = 0;
int adj_6 = 0;
int adj_7 = 0;
int adj_8 = 0;
int adj_9 = 0;
int adj_10 = 0;
int adj_11 = 0;
int adj_12 = 0;
int adj_13 = 0;
int adj_14 = 0;
int adj_15 = 0;
int adj_16 = 0;
int adj_17 = 0;
float adj_18 = 0;
df::float3 adj_19 = 0;
df::float3 adj_20 = 0;
df::float3 adj_21 = 0;
df::float3 adj_22 = 0;
df::float3 adj_23 = 0;
df::float3 adj_24 = 0;
df::float3 adj_25 = 0;
df::float3 adj_26 = 0;
df::float3 adj_27 = 0;
df::float3 adj_28 = 0;
df::float3 adj_29 = 0;
df::float3 adj_30 = 0;
df::float3 adj_31 = 0;
df::float3 adj_32 = 0;
float adj_33 = 0;
float adj_34 = 0;
float adj_35 = 0;
float adj_36 = 0;
float adj_37 = 0;
float adj_38 = 0;
float adj_39 = 0;
float adj_40 = 0;
df::float3 adj_41 = 0;
df::float3 adj_42 = 0;
df::float3 adj_43 = 0;
df::float3 adj_44 = 0;
df::float3 adj_45 = 0;
df::float3 adj_46 = 0;
float adj_47 = 0;
df::float3 adj_48 = 0;
float adj_49 = 0;
float adj_50 = 0;
float adj_51 = 0;
float adj_52 = 0;
df::float3 adj_53 = 0;
df::float3 adj_54 = 0;
df::float3 adj_55 = 0;
float adj_56 = 0;
df::float3 adj_57 = 0;
df::float3 adj_58 = 0;
float adj_59 = 0;
df::float3 adj_60 = 0;
df::float3 adj_61 = 0;
df::float3 adj_62 = 0;
float adj_63 = 0;
df::float3 adj_64 = 0;
df::float3 adj_65 = 0;
float adj_66 = 0;
df::float3 adj_67 = 0;
df::float3 adj_68 = 0;
float adj_69 = 0;
float adj_70 = 0;
float adj_71 = 0;
float adj_72 = 0;
float adj_73 = 0;
float adj_74 = 0;
float adj_75 = 0;
float adj_76 = 0;
float adj_77 = 0;
float adj_78 = 0;
float adj_79 = 0;
float adj_80 = 0;
float adj_81 = 0;
float adj_82 = 0;
df::float3 adj_83 = 0;
df::float3 adj_84 = 0;
df::float3 adj_85 = 0;
df::float3 adj_86 = 0;
//---------
// forward
var_0 = df::tid();
var_2 = df::mul(var_0, var_1);
var_4 = df::add(var_2, var_3);
var_5 = df::load(var_indices, var_4);
var_6 = df::mul(var_0, var_1);
var_8 = df::add(var_6, var_7);
var_9 = df::load(var_indices, var_8);
var_10 = df::mul(var_0, var_1);
var_12 = df::add(var_10, var_11);
var_13 = df::load(var_indices, var_12);
var_14 = df::mul(var_0, var_1);
var_16 = df::add(var_14, var_15);
var_17 = df::load(var_indices, var_16);
var_18 = df::load(var_rest, var_0);
var_19 = df::load(var_x, var_5);
var_20 = df::load(var_x, var_9);
var_21 = df::load(var_x, var_13);
var_22 = df::load(var_x, var_17);
var_23 = df::load(var_v, var_5);
var_24 = df::load(var_v, var_9);
var_25 = df::load(var_v, var_13);
var_26 = df::load(var_v, var_17);
var_27 = df::sub(var_21, var_19);
var_28 = df::sub(var_22, var_19);
var_29 = df::cross(var_27, var_28);
var_30 = df::sub(var_22, var_20);
var_31 = df::sub(var_21, var_20);
var_32 = df::cross(var_30, var_31);
var_33 = df::length(var_29);
var_34 = df::length(var_32);
var_36 = df::div(var_35, var_33);
var_37 = df::div(var_35, var_34);
var_38 = df::dot(var_29, var_32);
var_39 = df::mul(var_38, var_36);
var_40 = df::mul(var_39, var_37);
var_41 = df::mul(var_29, var_36);
var_42 = df::mul(var_41, var_36);
var_43 = df::mul(var_32, var_37);
var_44 = df::mul(var_43, var_37);
var_45 = df::sub(var_22, var_21);
var_46 = df::normalize(var_45);
var_47 = df::length(var_45);
var_48 = df::cross(var_44, var_42);
var_49 = df::dot(var_48, var_46);
var_50 = df::sign(var_49);
var_51 = df::acos(var_40);
var_52 = df::mul(var_51, var_50);
var_53 = df::mul(var_42, var_47);
var_54 = df::mul(var_44, var_47);
var_55 = df::sub(var_19, var_22);
var_56 = df::dot(var_55, var_46);
var_57 = df::mul(var_42, var_56);
var_58 = df::sub(var_20, var_22);
var_59 = df::dot(var_58, var_46);
var_60 = df::mul(var_44, var_59);
var_61 = df::add(var_57, var_60);
var_62 = df::sub(var_21, var_19);
var_63 = df::dot(var_62, var_46);
var_64 = df::mul(var_42, var_63);
var_65 = df::sub(var_21, var_20);
var_66 = df::dot(var_65, var_46);
var_67 = df::mul(var_44, var_66);
var_68 = df::add(var_64, var_67);
var_69 = df::sub(var_52, var_18);
var_70 = df::mul(var_ke, var_69);
var_71 = df::dot(var_53, var_23);
var_72 = df::dot(var_54, var_24);
var_73 = df::add(var_71, var_72);
var_74 = df::dot(var_61, var_25);
var_75 = df::add(var_73, var_74);
var_76 = df::dot(var_68, var_26);
var_77 = df::add(var_75, var_76);
var_78 = df::mul(var_kd, var_77);
var_80 = df::add(var_70, var_78);
var_81 = df::mul(var_47, var_80);
var_82 = df::sub(var_79, var_81);
var_83 = df::mul(var_53, var_82);
df::atomic_add(var_f, var_5, var_83);
var_84 = df::mul(var_54, var_82);
df::atomic_add(var_f, var_9, var_84);
var_85 = df::mul(var_61, var_82);
df::atomic_add(var_f, var_13, var_85);
var_86 = df::mul(var_68, var_82);
df::atomic_add(var_f, var_17, var_86);
//---------
// reverse
df::adj_atomic_add(var_f, var_17, var_86, adj_f, adj_17, adj_86);
df::adj_mul(var_68, var_82, adj_68, adj_82, adj_86);
df::adj_atomic_add(var_f, var_13, var_85, adj_f, adj_13, adj_85);
df::adj_mul(var_61, var_82, adj_61, adj_82, adj_85);
df::adj_atomic_add(var_f, var_9, var_84, adj_f, adj_9, adj_84);
df::adj_mul(var_54, var_82, adj_54, adj_82, adj_84);
df::adj_atomic_add(var_f, var_5, var_83, adj_f, adj_5, adj_83);
df::adj_mul(var_53, var_82, adj_53, adj_82, adj_83);
df::adj_sub(var_79, var_81, adj_79, adj_81, adj_82);
df::adj_mul(var_47, var_80, adj_47, adj_80, adj_81);
df::adj_add(var_70, var_78, adj_70, adj_78, adj_80);
df::adj_mul(var_kd, var_77, adj_kd, adj_77, adj_78);
df::adj_add(var_75, var_76, adj_75, adj_76, adj_77);
df::adj_dot(var_68, var_26, adj_68, adj_26, adj_76);
df::adj_add(var_73, var_74, adj_73, adj_74, adj_75);
df::adj_dot(var_61, var_25, adj_61, adj_25, adj_74);
df::adj_add(var_71, var_72, adj_71, adj_72, adj_73);
df::adj_dot(var_54, var_24, adj_54, adj_24, adj_72);
df::adj_dot(var_53, var_23, adj_53, adj_23, adj_71);
df::adj_mul(var_ke, var_69, adj_ke, adj_69, adj_70);
df::adj_sub(var_52, var_18, adj_52, adj_18, adj_69);
df::adj_add(var_64, var_67, adj_64, adj_67, adj_68);
df::adj_mul(var_44, var_66, adj_44, adj_66, adj_67);
df::adj_dot(var_65, var_46, adj_65, adj_46, adj_66);
df::adj_sub(var_21, var_20, adj_21, adj_20, adj_65);
df::adj_mul(var_42, var_63, adj_42, adj_63, adj_64);
df::adj_dot(var_62, var_46, adj_62, adj_46, adj_63);
df::adj_sub(var_21, var_19, adj_21, adj_19, adj_62);
df::adj_add(var_57, var_60, adj_57, adj_60, adj_61);
df::adj_mul(var_44, var_59, adj_44, adj_59, adj_60);
df::adj_dot(var_58, var_46, adj_58, adj_46, adj_59);
df::adj_sub(var_20, var_22, adj_20, adj_22, adj_58);
df::adj_mul(var_42, var_56, adj_42, adj_56, adj_57);
df::adj_dot(var_55, var_46, adj_55, adj_46, adj_56);
df::adj_sub(var_19, var_22, adj_19, adj_22, adj_55);
df::adj_mul(var_44, var_47, adj_44, adj_47, adj_54);
df::adj_mul(var_42, var_47, adj_42, adj_47, adj_53);
df::adj_mul(var_51, var_50, adj_51, adj_50, adj_52);
df::adj_acos(var_40, adj_40, adj_51);
df::adj_sign(var_49, adj_49, adj_50);
df::adj_dot(var_48, var_46, adj_48, adj_46, adj_49);
df::adj_cross(var_44, var_42, adj_44, adj_42, adj_48);
df::adj_length(var_45, adj_45, adj_47);
df::adj_normalize(var_45, adj_45, adj_46);
df::adj_sub(var_22, var_21, adj_22, adj_21, adj_45);
df::adj_mul(var_43, var_37, adj_43, adj_37, adj_44);
df::adj_mul(var_32, var_37, adj_32, adj_37, adj_43);
df::adj_mul(var_41, var_36, adj_41, adj_36, adj_42);
df::adj_mul(var_29, var_36, adj_29, adj_36, adj_41);
df::adj_mul(var_39, var_37, adj_39, adj_37, adj_40);
df::adj_mul(var_38, var_36, adj_38, adj_36, adj_39);
df::adj_dot(var_29, var_32, adj_29, adj_32, adj_38);
df::adj_div(var_35, var_34, adj_35, adj_34, adj_37);
df::adj_div(var_35, var_33, adj_35, adj_33, adj_36);
df::adj_length(var_32, adj_32, adj_34);
df::adj_length(var_29, adj_29, adj_33);
df::adj_cross(var_30, var_31, adj_30, adj_31, adj_32);
df::adj_sub(var_21, var_20, adj_21, adj_20, adj_31);
df::adj_sub(var_22, var_20, adj_22, adj_20, adj_30);
df::adj_cross(var_27, var_28, adj_27, adj_28, adj_29);
df::adj_sub(var_22, var_19, adj_22, adj_19, adj_28);
df::adj_sub(var_21, var_19, adj_21, adj_19, adj_27);
df::adj_load(var_v, var_17, adj_v, adj_17, adj_26);
df::adj_load(var_v, var_13, adj_v, adj_13, adj_25);
df::adj_load(var_v, var_9, adj_v, adj_9, adj_24);
df::adj_load(var_v, var_5, adj_v, adj_5, adj_23);
df::adj_load(var_x, var_17, adj_x, adj_17, adj_22);
df::adj_load(var_x, var_13, adj_x, adj_13, adj_21);
df::adj_load(var_x, var_9, adj_x, adj_9, adj_20);
df::adj_load(var_x, var_5, adj_x, adj_5, adj_19);
df::adj_load(var_rest, var_0, adj_rest, adj_0, adj_18);
df::adj_load(var_indices, var_16, adj_indices, adj_16, adj_17);
df::adj_add(var_14, var_15, adj_14, adj_15, adj_16);
df::adj_mul(var_0, var_1, adj_0, adj_1, adj_14);
df::adj_load(var_indices, var_12, adj_indices, adj_12, adj_13);
df::adj_add(var_10, var_11, adj_10, adj_11, adj_12);
df::adj_mul(var_0, var_1, adj_0, adj_1, adj_10);
df::adj_load(var_indices, var_8, adj_indices, adj_8, adj_9);
df::adj_add(var_6, var_7, adj_6, adj_7, adj_8);
df::adj_mul(var_0, var_1, adj_0, adj_1, adj_6);
df::adj_load(var_indices, var_4, adj_indices, adj_4, adj_5);
df::adj_add(var_2, var_3, adj_2, adj_3, adj_4);
df::adj_mul(var_0, var_1, adj_0, adj_1, adj_2);
return;
}
// Python entry points
void eval_bending_cpu_forward(int dim,
torch::Tensor var_x,
torch::Tensor var_v,
torch::Tensor var_indices,
torch::Tensor var_rest,
float var_ke,
float var_kd,
torch::Tensor var_f)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_bending_cpu_kernel_forward(
cast<df::float3*>(var_x),
cast<df::float3*>(var_v),
cast<int*>(var_indices),
cast<float*>(var_rest),
var_ke,
var_kd,
cast<df::float3*>(var_f));
}
}
void eval_bending_cpu_backward(int dim,
torch::Tensor var_x,
torch::Tensor var_v,
torch::Tensor var_indices,
torch::Tensor var_rest,
float var_ke,
float var_kd,
torch::Tensor var_f,
torch::Tensor adj_x,
torch::Tensor adj_v,
torch::Tensor adj_indices,
torch::Tensor adj_rest,
float adj_ke,
float adj_kd,
torch::Tensor adj_f)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_bending_cpu_kernel_backward(
cast<df::float3*>(var_x),
cast<df::float3*>(var_v),
cast<int*>(var_indices),
cast<float*>(var_rest),
var_ke,
var_kd,
cast<df::float3*>(var_f),
cast<df::float3*>(adj_x),
cast<df::float3*>(adj_v),
cast<int*>(adj_indices),
cast<float*>(adj_rest),
adj_ke,
adj_kd,
cast<df::float3*>(adj_f));
}
}
// Python entry points
void eval_bending_cpu_forward(int dim,
torch::Tensor var_x,
torch::Tensor var_v,
torch::Tensor var_indices,
torch::Tensor var_rest,
float var_ke,
float var_kd,
torch::Tensor var_f);
void eval_bending_cpu_backward(int dim,
torch::Tensor var_x,
torch::Tensor var_v,
torch::Tensor var_indices,
torch::Tensor var_rest,
float var_ke,
float var_kd,
torch::Tensor var_f,
torch::Tensor adj_x,
torch::Tensor adj_v,
torch::Tensor adj_indices,
torch::Tensor adj_rest,
float adj_ke,
float adj_kd,
torch::Tensor adj_f);
void eval_tetrahedra_cpu_kernel_forward(
df::float3* var_x,
df::float3* var_v,
int* var_indices,
mat33* var_pose,
float* var_activation,
float* var_materials,
df::float3* var_f)
{
//---------
// primal vars
int var_0;
const int var_1 = 4;
int var_2;
const int var_3 = 0;
int var_4;
int var_5;
int var_6;
const int var_7 = 1;
int var_8;
int var_9;
int var_10;
const int var_11 = 2;
int var_12;
int var_13;
int var_14;
const int var_15 = 3;
int var_16;
int var_17;
float var_18;
int var_19;
int var_20;
float var_21;
int var_22;
int var_23;
float var_24;
int var_25;
int var_26;
float var_27;
df::float3 var_28;
df::float3 var_29;
df::float3 var_30;
df::float3 var_31;
df::float3 var_32;
df::float3 var_33;
df::float3 var_34;
df::float3 var_35;
df::float3 var_36;
df::float3 var_37;
df::float3 var_38;
df::float3 var_39;
df::float3 var_40;
df::float3 var_41;
mat33 var_42;
mat33 var_43;
float var_44;
const float var_45 = 6.0;
float var_46;
const float var_47 = 1.0;
float var_48;
float var_49;
float var_50;
const float var_51 = 4.0;
float var_52;
float var_53;
float var_54;
float var_55;
float var_56;
float var_57;
mat33 var_58;
mat33 var_59;
mat33 var_60;
float var_61;
float var_62;
float var_63;
df::float3 var_64;
float var_65;
float var_66;
float var_67;
df::float3 var_68;
float var_69;
float var_70;
float var_71;
df::float3 var_72;
float var_73;
float var_74;
float var_75;
float var_76;
float var_77;
mat33 var_78;
float var_79;
float var_80;
float var_81;
mat33 var_82;
mat33 var_83;
mat33 var_84;
mat33 var_85;
mat33 var_86;
float var_87;
float var_88;
float var_89;
df::float3 var_90;
float var_91;
float var_92;
float var_93;
df::float3 var_94;
float var_95;
float var_96;
float var_97;
df::float3 var_98;
float var_99;
float var_100;
df::float3 var_101;
df::float3 var_102;
df::float3 var_103;
df::float3 var_104;
df::float3 var_105;
df::float3 var_106;
float var_107;
float var_108;
float var_109;
float var_110;
float var_111;
float var_112;
float var_113;
float var_114;
float var_115;
float var_116;
df::float3 var_117;
df::float3 var_118;
df::float3 var_119;
df::float3 var_120;
df::float3 var_121;
df::float3 var_122;
df::float3 var_123;
df::float3 var_124;
const float var_125 = 0.0;
float var_126;
df::float3 var_127;
//---------
// forward
var_0 = df::tid();
var_2 = df::mul(var_0, var_1);
var_4 = df::add(var_2, var_3);
var_5 = df::load(var_indices, var_4);
var_6 = df::mul(var_0, var_1);
var_8 = df::add(var_6, var_7);
var_9 = df::load(var_indices, var_8);
var_10 = df::mul(var_0, var_1);
var_12 = df::add(var_10, var_11);
var_13 = df::load(var_indices, var_12);
var_14 = df::mul(var_0, var_1);
var_16 = df::add(var_14, var_15);
var_17 = df::load(var_indices, var_16);
var_18 = df::load(var_activation, var_0);
var_19 = df::mul(var_0, var_15);
var_20 = df::add(var_19, var_3);
var_21 = df::load(var_materials, var_20);
var_22 = df::mul(var_0, var_15);
var_23 = df::add(var_22, var_7);
var_24 = df::load(var_materials, var_23);
var_25 = df::mul(var_0, var_15);
var_26 = df::add(var_25, var_11);
var_27 = df::load(var_materials, var_26);
var_28 = df::load(var_x, var_5);
var_29 = df::load(var_x, var_9);
var_30 = df::load(var_x, var_13);
var_31 = df::load(var_x, var_17);
var_32 = df::load(var_v, var_5);
var_33 = df::load(var_v, var_9);
var_34 = df::load(var_v, var_13);
var_35 = df::load(var_v, var_17);
var_36 = df::sub(var_29, var_28);
var_37 = df::sub(var_30, var_28);
var_38 = df::sub(var_31, var_28);
var_39 = df::sub(var_33, var_32);
var_40 = df::sub(var_34, var_32);
var_41 = df::sub(var_35, var_32);
var_42 = df::mat33(var_36, var_37, var_38);
var_43 = df::load(var_pose, var_0);
var_44 = df::determinant(var_43);
var_46 = df::mul(var_44, var_45);
var_48 = df::div(var_47, var_46);
var_49 = df::div(var_21, var_24);
var_50 = df::add(var_47, var_49);
var_52 = df::mul(var_51, var_24);
var_53 = df::div(var_21, var_52);
var_54 = df::sub(var_50, var_53);
var_55 = df::mul(var_21, var_48);
var_56 = df::mul(var_24, var_48);
var_57 = df::mul(var_27, var_48);
var_58 = df::mul(var_42, var_43);
var_59 = df::mat33(var_39, var_40, var_41);
var_60 = df::mul(var_59, var_43);
var_61 = df::index(var_58, var_3, var_3);
var_62 = df::index(var_58, var_7, var_3);
var_63 = df::index(var_58, var_11, var_3);
var_64 = df::float3(var_61, var_62, var_63);
var_65 = df::index(var_58, var_3, var_7);
var_66 = df::index(var_58, var_7, var_7);
var_67 = df::index(var_58, var_11, var_7);
var_68 = df::float3(var_65, var_66, var_67);
var_69 = df::index(var_58, var_3, var_11);
var_70 = df::index(var_58, var_7, var_11);
var_71 = df::index(var_58, var_11, var_11);
var_72 = df::float3(var_69, var_70, var_71);
var_73 = df::dot(var_64, var_64);
var_74 = df::dot(var_68, var_68);
var_75 = df::add(var_73, var_74);
var_76 = df::dot(var_72, var_72);
var_77 = df::add(var_75, var_76);
var_78 = df::mul(var_58, var_55);
var_79 = df::add(var_77, var_47);
var_80 = df::div(var_47, var_79);
var_81 = df::sub(var_47, var_80);
var_82 = df::mul(var_78, var_81);
var_83 = df::mul(var_60, var_57);
var_84 = df::add(var_82, var_83);
var_85 = df::transpose(var_43);
var_86 = df::mul(var_84, var_85);
var_87 = df::index(var_86, var_3, var_3);
var_88 = df::index(var_86, var_7, var_3);
var_89 = df::index(var_86, var_11, var_3);
var_90 = df::float3(var_87, var_88, var_89);
var_91 = df::index(var_86, var_3, var_7);
var_92 = df::index(var_86, var_7, var_7);
var_93 = df::index(var_86, var_11, var_7);
var_94 = df::float3(var_91, var_92, var_93);
var_95 = df::index(var_86, var_3, var_11);
var_96 = df::index(var_86, var_7, var_11);
var_97 = df::index(var_86, var_11, var_11);
var_98 = df::float3(var_95, var_96, var_97);
var_99 = df::determinant(var_58);
var_100 = df::div(var_46, var_45);
var_101 = df::cross(var_37, var_38);
var_102 = df::mul(var_101, var_100);
var_103 = df::cross(var_38, var_36);
var_104 = df::mul(var_103, var_100);
var_105 = df::cross(var_36, var_37);
var_106 = df::mul(var_105, var_100);
var_107 = df::sub(var_99, var_54);
var_108 = df::add(var_107, var_18);
var_109 = df::mul(var_108, var_56);
var_110 = df::dot(var_102, var_33);
var_111 = df::dot(var_104, var_34);
var_112 = df::add(var_110, var_111);
var_113 = df::dot(var_106, var_35);
var_114 = df::add(var_112, var_113);
var_115 = df::mul(var_114, var_57);
var_116 = df::add(var_109, var_115);
var_117 = df::mul(var_102, var_116);
var_118 = df::add(var_90, var_117);
var_119 = df::mul(var_104, var_116);
var_120 = df::add(var_94, var_119);
var_121 = df::mul(var_106, var_116);
var_122 = df::add(var_98, var_121);
var_123 = df::add(var_118, var_120);
var_124 = df::add(var_123, var_122);
var_126 = df::sub(var_125, var_47);
var_127 = df::mul(var_124, var_126);
df::atomic_sub(var_f, var_5, var_127);
df::atomic_sub(var_f, var_9, var_118);
df::atomic_sub(var_f, var_13, var_120);
df::atomic_sub(var_f, var_17, var_122);
}
void eval_tetrahedra_cpu_kernel_backward(
df::float3* var_x,
df::float3* var_v,
int* var_indices,
mat33* var_pose,
float* var_activation,
float* var_materials,
df::float3* var_f,
df::float3* adj_x,
df::float3* adj_v,
int* adj_indices,
mat33* adj_pose,
float* adj_activation,
float* adj_materials,
df::float3* adj_f)
{
//---------
// primal vars
int var_0;
const int var_1 = 4;
int var_2;
const int var_3 = 0;
int var_4;
int var_5;
int var_6;
const int var_7 = 1;
int var_8;
int var_9;
int var_10;
const int var_11 = 2;
int var_12;
int var_13;
int var_14;
const int var_15 = 3;
int var_16;
int var_17;
float var_18;
int var_19;
int var_20;
float var_21;
int var_22;
int var_23;
float var_24;
int var_25;
int var_26;
float var_27;
df::float3 var_28;
df::float3 var_29;
df::float3 var_30;
df::float3 var_31;
df::float3 var_32;
df::float3 var_33;
df::float3 var_34;
df::float3 var_35;
df::float3 var_36;
df::float3 var_37;
df::float3 var_38;
df::float3 var_39;
df::float3 var_40;
df::float3 var_41;
mat33 var_42;
mat33 var_43;
float var_44;
const float var_45 = 6.0;
float var_46;
const float var_47 = 1.0;
float var_48;
float var_49;
float var_50;
const float var_51 = 4.0;
float var_52;
float var_53;
float var_54;
float var_55;
float var_56;
float var_57;
mat33 var_58;
mat33 var_59;
mat33 var_60;
float var_61;
float var_62;
float var_63;
df::float3 var_64;
float var_65;
float var_66;
float var_67;
df::float3 var_68;
float var_69;
float var_70;
float var_71;
df::float3 var_72;
float var_73;
float var_74;
float var_75;
float var_76;
float var_77;
mat33 var_78;
float var_79;
float var_80;
float var_81;
mat33 var_82;
mat33 var_83;
mat33 var_84;
mat33 var_85;
mat33 var_86;
float var_87;
float var_88;
float var_89;
df::float3 var_90;
float var_91;
float var_92;
float var_93;
df::float3 var_94;
float var_95;
float var_96;
float var_97;
df::float3 var_98;
float var_99;
float var_100;
df::float3 var_101;
df::float3 var_102;
df::float3 var_103;
df::float3 var_104;
df::float3 var_105;
df::float3 var_106;
float var_107;
float var_108;
float var_109;
float var_110;
float var_111;
float var_112;
float var_113;
float var_114;
float var_115;
float var_116;
df::float3 var_117;
df::float3 var_118;
df::float3 var_119;
df::float3 var_120;
df::float3 var_121;
df::float3 var_122;
df::float3 var_123;
df::float3 var_124;
const float var_125 = 0.0;
float var_126;
df::float3 var_127;
//---------
// dual vars
int adj_0 = 0;
int adj_1 = 0;
int adj_2 = 0;
int adj_3 = 0;
int adj_4 = 0;
int adj_5 = 0;
int adj_6 = 0;
int adj_7 = 0;
int adj_8 = 0;
int adj_9 = 0;
int adj_10 = 0;
int adj_11 = 0;
int adj_12 = 0;
int adj_13 = 0;
int adj_14 = 0;
int adj_15 = 0;
int adj_16 = 0;
int adj_17 = 0;
float adj_18 = 0;
int adj_19 = 0;
int adj_20 = 0;
float adj_21 = 0;
int adj_22 = 0;
int adj_23 = 0;
float adj_24 = 0;
int adj_25 = 0;
int adj_26 = 0;
float adj_27 = 0;
df::float3 adj_28 = 0;
df::float3 adj_29 = 0;
df::float3 adj_30 = 0;
df::float3 adj_31 = 0;
df::float3 adj_32 = 0;
df::float3 adj_33 = 0;
df::float3 adj_34 = 0;
df::float3 adj_35 = 0;
df::float3 adj_36 = 0;
df::float3 adj_37 = 0;
df::float3 adj_38 = 0;
df::float3 adj_39 = 0;
df::float3 adj_40 = 0;
df::float3 adj_41 = 0;
mat33 adj_42 = 0;
mat33 adj_43 = 0;
float adj_44 = 0;
float adj_45 = 0;
float adj_46 = 0;
float adj_47 = 0;
float adj_48 = 0;
float adj_49 = 0;
float adj_50 = 0;
float adj_51 = 0;
float adj_52 = 0;
float adj_53 = 0;
float adj_54 = 0;
float adj_55 = 0;
float adj_56 = 0;
float adj_57 = 0;
mat33 adj_58 = 0;
mat33 adj_59 = 0;
mat33 adj_60 = 0;
float adj_61 = 0;
float adj_62 = 0;
float adj_63 = 0;
df::float3 adj_64 = 0;
float adj_65 = 0;
float adj_66 = 0;
float adj_67 = 0;
df::float3 adj_68 = 0;
float adj_69 = 0;
float adj_70 = 0;
float adj_71 = 0;
df::float3 adj_72 = 0;
float adj_73 = 0;
float adj_74 = 0;
float adj_75 = 0;
float adj_76 = 0;
float adj_77 = 0;
mat33 adj_78 = 0;
float adj_79 = 0;
float adj_80 = 0;
float adj_81 = 0;
mat33 adj_82 = 0;
mat33 adj_83 = 0;
mat33 adj_84 = 0;
mat33 adj_85 = 0;
mat33 adj_86 = 0;
float adj_87 = 0;
float adj_88 = 0;
float adj_89 = 0;
df::float3 adj_90 = 0;
float adj_91 = 0;
float adj_92 = 0;
float adj_93 = 0;
df::float3 adj_94 = 0;
float adj_95 = 0;
float adj_96 = 0;
float adj_97 = 0;
df::float3 adj_98 = 0;
float adj_99 = 0;
float adj_100 = 0;
df::float3 adj_101 = 0;
df::float3 adj_102 = 0;
df::float3 adj_103 = 0;
df::float3 adj_104 = 0;
df::float3 adj_105 = 0;
df::float3 adj_106 = 0;
float adj_107 = 0;
float adj_108 = 0;
float adj_109 = 0;
float adj_110 = 0;
float adj_111 = 0;
float adj_112 = 0;
float adj_113 = 0;
float adj_114 = 0;
float adj_115 = 0;
float adj_116 = 0;
df::float3 adj_117 = 0;
df::float3 adj_118 = 0;
df::float3 adj_119 = 0;
df::float3 adj_120 = 0;
df::float3 adj_121 = 0;
df::float3 adj_122 = 0;
df::float3 adj_123 = 0;
df::float3 adj_124 = 0;
float adj_125 = 0;
float adj_126 = 0;
df::float3 adj_127 = 0;
//---------
// forward
var_0 = df::tid();
var_2 = df::mul(var_0, var_1);
var_4 = df::add(var_2, var_3);
var_5 = df::load(var_indices, var_4);
var_6 = df::mul(var_0, var_1);
var_8 = df::add(var_6, var_7);
var_9 = df::load(var_indices, var_8);
var_10 = df::mul(var_0, var_1);
var_12 = df::add(var_10, var_11);
var_13 = df::load(var_indices, var_12);
var_14 = df::mul(var_0, var_1);
var_16 = df::add(var_14, var_15);
var_17 = df::load(var_indices, var_16);
var_18 = df::load(var_activation, var_0);
var_19 = df::mul(var_0, var_15);
var_20 = df::add(var_19, var_3);
var_21 = df::load(var_materials, var_20);
var_22 = df::mul(var_0, var_15);
var_23 = df::add(var_22, var_7);
var_24 = df::load(var_materials, var_23);
var_25 = df::mul(var_0, var_15);
var_26 = df::add(var_25, var_11);
var_27 = df::load(var_materials, var_26);
var_28 = df::load(var_x, var_5);
var_29 = df::load(var_x, var_9);
var_30 = df::load(var_x, var_13);
var_31 = df::load(var_x, var_17);
var_32 = df::load(var_v, var_5);
var_33 = df::load(var_v, var_9);
var_34 = df::load(var_v, var_13);
var_35 = df::load(var_v, var_17);
var_36 = df::sub(var_29, var_28);
var_37 = df::sub(var_30, var_28);
var_38 = df::sub(var_31, var_28);
var_39 = df::sub(var_33, var_32);
var_40 = df::sub(var_34, var_32);
var_41 = df::sub(var_35, var_32);
var_42 = df::mat33(var_36, var_37, var_38);
var_43 = df::load(var_pose, var_0);
var_44 = df::determinant(var_43);
var_46 = df::mul(var_44, var_45);
var_48 = df::div(var_47, var_46);
var_49 = df::div(var_21, var_24);
var_50 = df::add(var_47, var_49);
var_52 = df::mul(var_51, var_24);
var_53 = df::div(var_21, var_52);
var_54 = df::sub(var_50, var_53);
var_55 = df::mul(var_21, var_48);
var_56 = df::mul(var_24, var_48);
var_57 = df::mul(var_27, var_48);
var_58 = df::mul(var_42, var_43);
var_59 = df::mat33(var_39, var_40, var_41);
var_60 = df::mul(var_59, var_43);
var_61 = df::index(var_58, var_3, var_3);
var_62 = df::index(var_58, var_7, var_3);
var_63 = df::index(var_58, var_11, var_3);
var_64 = df::float3(var_61, var_62, var_63);
var_65 = df::index(var_58, var_3, var_7);
var_66 = df::index(var_58, var_7, var_7);
var_67 = df::index(var_58, var_11, var_7);
var_68 = df::float3(var_65, var_66, var_67);
var_69 = df::index(var_58, var_3, var_11);
var_70 = df::index(var_58, var_7, var_11);
var_71 = df::index(var_58, var_11, var_11);
var_72 = df::float3(var_69, var_70, var_71);
var_73 = df::dot(var_64, var_64);
var_74 = df::dot(var_68, var_68);
var_75 = df::add(var_73, var_74);
var_76 = df::dot(var_72, var_72);
var_77 = df::add(var_75, var_76);
var_78 = df::mul(var_58, var_55);
var_79 = df::add(var_77, var_47);
var_80 = df::div(var_47, var_79);
var_81 = df::sub(var_47, var_80);
var_82 = df::mul(var_78, var_81);
var_83 = df::mul(var_60, var_57);
var_84 = df::add(var_82, var_83);
var_85 = df::transpose(var_43);
var_86 = df::mul(var_84, var_85);
var_87 = df::index(var_86, var_3, var_3);
var_88 = df::index(var_86, var_7, var_3);
var_89 = df::index(var_86, var_11, var_3);
var_90 = df::float3(var_87, var_88, var_89);
var_91 = df::index(var_86, var_3, var_7);
var_92 = df::index(var_86, var_7, var_7);
var_93 = df::index(var_86, var_11, var_7);
var_94 = df::float3(var_91, var_92, var_93);
var_95 = df::index(var_86, var_3, var_11);
var_96 = df::index(var_86, var_7, var_11);
var_97 = df::index(var_86, var_11, var_11);
var_98 = df::float3(var_95, var_96, var_97);
var_99 = df::determinant(var_58);
var_100 = df::div(var_46, var_45);
var_101 = df::cross(var_37, var_38);
var_102 = df::mul(var_101, var_100);
var_103 = df::cross(var_38, var_36);
var_104 = df::mul(var_103, var_100);
var_105 = df::cross(var_36, var_37);
var_106 = df::mul(var_105, var_100);
var_107 = df::sub(var_99, var_54);
var_108 = df::add(var_107, var_18);
var_109 = df::mul(var_108, var_56);
var_110 = df::dot(var_102, var_33);
var_111 = df::dot(var_104, var_34);
var_112 = df::add(var_110, var_111);
var_113 = df::dot(var_106, var_35);
var_114 = df::add(var_112, var_113);
var_115 = df::mul(var_114, var_57);
var_116 = df::add(var_109, var_115);
var_117 = df::mul(var_102, var_116);
var_118 = df::add(var_90, var_117);
var_119 = df::mul(var_104, var_116);
var_120 = df::add(var_94, var_119);
var_121 = df::mul(var_106, var_116);
var_122 = df::add(var_98, var_121);
var_123 = df::add(var_118, var_120);
var_124 = df::add(var_123, var_122);
var_126 = df::sub(var_125, var_47);
var_127 = df::mul(var_124, var_126);
df::atomic_sub(var_f, var_5, var_127);
df::atomic_sub(var_f, var_9, var_118);
df::atomic_sub(var_f, var_13, var_120);
df::atomic_sub(var_f, var_17, var_122);
//---------
// reverse
df::adj_atomic_sub(var_f, var_17, var_122, adj_f, adj_17, adj_122);
df::adj_atomic_sub(var_f, var_13, var_120, adj_f, adj_13, adj_120);
df::adj_atomic_sub(var_f, var_9, var_118, adj_f, adj_9, adj_118);
df::adj_atomic_sub(var_f, var_5, var_127, adj_f, adj_5, adj_127);
df::adj_mul(var_124, var_126, adj_124, adj_126, adj_127);
df::adj_sub(var_125, var_47, adj_125, adj_47, adj_126);
df::adj_add(var_123, var_122, adj_123, adj_122, adj_124);
df::adj_add(var_118, var_120, adj_118, adj_120, adj_123);
df::adj_add(var_98, var_121, adj_98, adj_121, adj_122);
df::adj_mul(var_106, var_116, adj_106, adj_116, adj_121);
df::adj_add(var_94, var_119, adj_94, adj_119, adj_120);
df::adj_mul(var_104, var_116, adj_104, adj_116, adj_119);
df::adj_add(var_90, var_117, adj_90, adj_117, adj_118);
df::adj_mul(var_102, var_116, adj_102, adj_116, adj_117);
df::adj_add(var_109, var_115, adj_109, adj_115, adj_116);
df::adj_mul(var_114, var_57, adj_114, adj_57, adj_115);
df::adj_add(var_112, var_113, adj_112, adj_113, adj_114);
df::adj_dot(var_106, var_35, adj_106, adj_35, adj_113);
df::adj_add(var_110, var_111, adj_110, adj_111, adj_112);
df::adj_dot(var_104, var_34, adj_104, adj_34, adj_111);
df::adj_dot(var_102, var_33, adj_102, adj_33, adj_110);
df::adj_mul(var_108, var_56, adj_108, adj_56, adj_109);
df::adj_add(var_107, var_18, adj_107, adj_18, adj_108);
df::adj_sub(var_99, var_54, adj_99, adj_54, adj_107);
df::adj_mul(var_105, var_100, adj_105, adj_100, adj_106);
df::adj_cross(var_36, var_37, adj_36, adj_37, adj_105);
df::adj_mul(var_103, var_100, adj_103, adj_100, adj_104);
df::adj_cross(var_38, var_36, adj_38, adj_36, adj_103);
df::adj_mul(var_101, var_100, adj_101, adj_100, adj_102);
df::adj_cross(var_37, var_38, adj_37, adj_38, adj_101);
df::adj_div(var_46, var_45, adj_46, adj_45, adj_100);
df::adj_determinant(var_58, adj_58, adj_99);
df::adj_float3(var_95, var_96, var_97, adj_95, adj_96, adj_97, adj_98);
df::adj_index(var_86, var_11, var_11, adj_86, adj_11, adj_11, adj_97);
df::adj_index(var_86, var_7, var_11, adj_86, adj_7, adj_11, adj_96);
df::adj_index(var_86, var_3, var_11, adj_86, adj_3, adj_11, adj_95);
df::adj_float3(var_91, var_92, var_93, adj_91, adj_92, adj_93, adj_94);
df::adj_index(var_86, var_11, var_7, adj_86, adj_11, adj_7, adj_93);
df::adj_index(var_86, var_7, var_7, adj_86, adj_7, adj_7, adj_92);
df::adj_index(var_86, var_3, var_7, adj_86, adj_3, adj_7, adj_91);
df::adj_float3(var_87, var_88, var_89, adj_87, adj_88, adj_89, adj_90);
df::adj_index(var_86, var_11, var_3, adj_86, adj_11, adj_3, adj_89);
df::adj_index(var_86, var_7, var_3, adj_86, adj_7, adj_3, adj_88);
df::adj_index(var_86, var_3, var_3, adj_86, adj_3, adj_3, adj_87);
df::adj_mul(var_84, var_85, adj_84, adj_85, adj_86);
df::adj_transpose(var_43, adj_43, adj_85);
df::adj_add(var_82, var_83, adj_82, adj_83, adj_84);
df::adj_mul(var_60, var_57, adj_60, adj_57, adj_83);
df::adj_mul(var_78, var_81, adj_78, adj_81, adj_82);
df::adj_sub(var_47, var_80, adj_47, adj_80, adj_81);
df::adj_div(var_47, var_79, adj_47, adj_79, adj_80);
df::adj_add(var_77, var_47, adj_77, adj_47, adj_79);
df::adj_mul(var_58, var_55, adj_58, adj_55, adj_78);
df::adj_add(var_75, var_76, adj_75, adj_76, adj_77);
df::adj_dot(var_72, var_72, adj_72, adj_72, adj_76);
df::adj_add(var_73, var_74, adj_73, adj_74, adj_75);
df::adj_dot(var_68, var_68, adj_68, adj_68, adj_74);
df::adj_dot(var_64, var_64, adj_64, adj_64, adj_73);
df::adj_float3(var_69, var_70, var_71, adj_69, adj_70, adj_71, adj_72);
df::adj_index(var_58, var_11, var_11, adj_58, adj_11, adj_11, adj_71);
df::adj_index(var_58, var_7, var_11, adj_58, adj_7, adj_11, adj_70);
df::adj_index(var_58, var_3, var_11, adj_58, adj_3, adj_11, adj_69);
df::adj_float3(var_65, var_66, var_67, adj_65, adj_66, adj_67, adj_68);
df::adj_index(var_58, var_11, var_7, adj_58, adj_11, adj_7, adj_67);
df::adj_index(var_58, var_7, var_7, adj_58, adj_7, adj_7, adj_66);
df::adj_index(var_58, var_3, var_7, adj_58, adj_3, adj_7, adj_65);
df::adj_float3(var_61, var_62, var_63, adj_61, adj_62, adj_63, adj_64);
df::adj_index(var_58, var_11, var_3, adj_58, adj_11, adj_3, adj_63);
df::adj_index(var_58, var_7, var_3, adj_58, adj_7, adj_3, adj_62);
df::adj_index(var_58, var_3, var_3, adj_58, adj_3, adj_3, adj_61);
df::adj_mul(var_59, var_43, adj_59, adj_43, adj_60);
df::adj_mat33(var_39, var_40, var_41, adj_39, adj_40, adj_41, adj_59);
df::adj_mul(var_42, var_43, adj_42, adj_43, adj_58);
df::adj_mul(var_27, var_48, adj_27, adj_48, adj_57);
df::adj_mul(var_24, var_48, adj_24, adj_48, adj_56);
df::adj_mul(var_21, var_48, adj_21, adj_48, adj_55);
df::adj_sub(var_50, var_53, adj_50, adj_53, adj_54);
df::adj_div(var_21, var_52, adj_21, adj_52, adj_53);
df::adj_mul(var_51, var_24, adj_51, adj_24, adj_52);
df::adj_add(var_47, var_49, adj_47, adj_49, adj_50);
df::adj_div(var_21, var_24, adj_21, adj_24, adj_49);
df::adj_div(var_47, var_46, adj_47, adj_46, adj_48);
df::adj_mul(var_44, var_45, adj_44, adj_45, adj_46);
df::adj_determinant(var_43, adj_43, adj_44);
df::adj_load(var_pose, var_0, adj_pose, adj_0, adj_43);
df::adj_mat33(var_36, var_37, var_38, adj_36, adj_37, adj_38, adj_42);
df::adj_sub(var_35, var_32, adj_35, adj_32, adj_41);
df::adj_sub(var_34, var_32, adj_34, adj_32, adj_40);
df::adj_sub(var_33, var_32, adj_33, adj_32, adj_39);
df::adj_sub(var_31, var_28, adj_31, adj_28, adj_38);
df::adj_sub(var_30, var_28, adj_30, adj_28, adj_37);
df::adj_sub(var_29, var_28, adj_29, adj_28, adj_36);
df::adj_load(var_v, var_17, adj_v, adj_17, adj_35);
df::adj_load(var_v, var_13, adj_v, adj_13, adj_34);
df::adj_load(var_v, var_9, adj_v, adj_9, adj_33);
df::adj_load(var_v, var_5, adj_v, adj_5, adj_32);
df::adj_load(var_x, var_17, adj_x, adj_17, adj_31);
df::adj_load(var_x, var_13, adj_x, adj_13, adj_30);
df::adj_load(var_x, var_9, adj_x, adj_9, adj_29);
df::adj_load(var_x, var_5, adj_x, adj_5, adj_28);
df::adj_load(var_materials, var_26, adj_materials, adj_26, adj_27);
df::adj_add(var_25, var_11, adj_25, adj_11, adj_26);
df::adj_mul(var_0, var_15, adj_0, adj_15, adj_25);
df::adj_load(var_materials, var_23, adj_materials, adj_23, adj_24);
df::adj_add(var_22, var_7, adj_22, adj_7, adj_23);
df::adj_mul(var_0, var_15, adj_0, adj_15, adj_22);
df::adj_load(var_materials, var_20, adj_materials, adj_20, adj_21);
df::adj_add(var_19, var_3, adj_19, adj_3, adj_20);
df::adj_mul(var_0, var_15, adj_0, adj_15, adj_19);
df::adj_load(var_activation, var_0, adj_activation, adj_0, adj_18);
df::adj_load(var_indices, var_16, adj_indices, adj_16, adj_17);
df::adj_add(var_14, var_15, adj_14, adj_15, adj_16);
df::adj_mul(var_0, var_1, adj_0, adj_1, adj_14);
df::adj_load(var_indices, var_12, adj_indices, adj_12, adj_13);
df::adj_add(var_10, var_11, adj_10, adj_11, adj_12);
df::adj_mul(var_0, var_1, adj_0, adj_1, adj_10);
df::adj_load(var_indices, var_8, adj_indices, adj_8, adj_9);
df::adj_add(var_6, var_7, adj_6, adj_7, adj_8);
df::adj_mul(var_0, var_1, adj_0, adj_1, adj_6);
df::adj_load(var_indices, var_4, adj_indices, adj_4, adj_5);
df::adj_add(var_2, var_3, adj_2, adj_3, adj_4);
df::adj_mul(var_0, var_1, adj_0, adj_1, adj_2);
return;
}
// Python entry points
void eval_tetrahedra_cpu_forward(int dim,
torch::Tensor var_x,
torch::Tensor var_v,
torch::Tensor var_indices,
torch::Tensor var_pose,
torch::Tensor var_activation,
torch::Tensor var_materials,
torch::Tensor var_f)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_tetrahedra_cpu_kernel_forward(
cast<df::float3*>(var_x),
cast<df::float3*>(var_v),
cast<int*>(var_indices),
cast<mat33*>(var_pose),
cast<float*>(var_activation),
cast<float*>(var_materials),
cast<df::float3*>(var_f));
}
}
void eval_tetrahedra_cpu_backward(int dim,
torch::Tensor var_x,
torch::Tensor var_v,
torch::Tensor var_indices,
torch::Tensor var_pose,
torch::Tensor var_activation,
torch::Tensor var_materials,
torch::Tensor var_f,
torch::Tensor adj_x,
torch::Tensor adj_v,
torch::Tensor adj_indices,
torch::Tensor adj_pose,
torch::Tensor adj_activation,
torch::Tensor adj_materials,
torch::Tensor adj_f)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_tetrahedra_cpu_kernel_backward(
cast<df::float3*>(var_x),
cast<df::float3*>(var_v),
cast<int*>(var_indices),
cast<mat33*>(var_pose),
cast<float*>(var_activation),
cast<float*>(var_materials),
cast<df::float3*>(var_f),
cast<df::float3*>(adj_x),
cast<df::float3*>(adj_v),
cast<int*>(adj_indices),
cast<mat33*>(adj_pose),
cast<float*>(adj_activation),
cast<float*>(adj_materials),
cast<df::float3*>(adj_f));
}
}
// Python entry points
void eval_tetrahedra_cpu_forward(int dim,
torch::Tensor var_x,
torch::Tensor var_v,
torch::Tensor var_indices,
torch::Tensor var_pose,
torch::Tensor var_activation,
torch::Tensor var_materials,
torch::Tensor var_f);
void eval_tetrahedra_cpu_backward(int dim,
torch::Tensor var_x,
torch::Tensor var_v,
torch::Tensor var_indices,
torch::Tensor var_pose,
torch::Tensor var_activation,
torch::Tensor var_materials,
torch::Tensor var_f,
torch::Tensor adj_x,
torch::Tensor adj_v,
torch::Tensor adj_indices,
torch::Tensor adj_pose,
torch::Tensor adj_activation,
torch::Tensor adj_materials,
torch::Tensor adj_f);
void eval_contacts_cpu_kernel_forward(
df::float3* var_x,
df::float3* var_v,
float var_ke,
float var_kd,
float var_kf,
float var_mu,
df::float3* var_f)
{
//---------
// primal vars
int var_0;
df::float3 var_1;
df::float3 var_2;
const float var_3 = 0.0;
const float var_4 = 1.0;
df::float3 var_5;
float var_6;
const float var_7 = 0.01;
float var_8;
float var_9;
float var_10;
df::float3 var_11;
df::float3 var_12;
df::float3 var_13;
df::float3 var_14;
float var_15;
df::float3 var_16;
df::float3 var_17;
float var_18;
float var_19;
float var_20;
df::float3 var_21;
float var_22;
float var_23;
df::float3 var_24;
float var_25;
float var_26;
df::float3 var_27;
df::float3 var_28;
float var_29;
df::float3 var_30;
df::float3 var_31;
//---------
// forward
var_0 = df::tid();
var_1 = df::load(var_x, var_0);
var_2 = df::load(var_v, var_0);
var_5 = df::float3(var_3, var_4, var_3);
var_6 = df::dot(var_5, var_1);
var_8 = df::sub(var_6, var_7);
var_9 = df::min(var_8, var_3);
var_10 = df::dot(var_5, var_2);
var_11 = df::mul(var_5, var_10);
var_12 = df::sub(var_2, var_11);
var_13 = df::mul(var_5, var_9);
var_14 = df::mul(var_13, var_ke);
var_15 = df::min(var_10, var_3);
var_16 = df::mul(var_5, var_15);
var_17 = df::mul(var_16, var_kd);
var_18 = df::mul(var_mu, var_9);
var_19 = df::mul(var_18, var_ke);
var_20 = df::sub(var_3, var_19);
var_21 = df::float3(var_kf, var_3, var_3);
var_22 = df::dot(var_21, var_12);
var_23 = df::clamp(var_22, var_19, var_20);
var_24 = df::float3(var_3, var_3, var_kf);
var_25 = df::dot(var_24, var_12);
var_26 = df::clamp(var_25, var_19, var_20);
var_27 = df::float3(var_23, var_3, var_26);
var_28 = df::add(var_17, var_27);
var_29 = df::step(var_9);
var_30 = df::mul(var_28, var_29);
var_31 = df::add(var_14, var_30);
df::atomic_sub(var_f, var_0, var_31);
}
void eval_contacts_cpu_kernel_backward(
df::float3* var_x,
df::float3* var_v,
float var_ke,
float var_kd,
float var_kf,
float var_mu,
df::float3* var_f,
df::float3* adj_x,
df::float3* adj_v,
float adj_ke,
float adj_kd,
float adj_kf,
float adj_mu,
df::float3* adj_f)
{
//---------
// primal vars
int var_0;
df::float3 var_1;
df::float3 var_2;
const float var_3 = 0.0;
const float var_4 = 1.0;
df::float3 var_5;
float var_6;
const float var_7 = 0.01;
float var_8;
float var_9;
float var_10;
df::float3 var_11;
df::float3 var_12;
df::float3 var_13;
df::float3 var_14;
float var_15;
df::float3 var_16;
df::float3 var_17;
float var_18;
float var_19;
float var_20;
df::float3 var_21;
float var_22;
float var_23;
df::float3 var_24;
float var_25;
float var_26;
df::float3 var_27;
df::float3 var_28;
float var_29;
df::float3 var_30;
df::float3 var_31;
//---------
// dual vars
int adj_0 = 0;
df::float3 adj_1 = 0;
df::float3 adj_2 = 0;
float adj_3 = 0;
float adj_4 = 0;
df::float3 adj_5 = 0;
float adj_6 = 0;
float adj_7 = 0;
float adj_8 = 0;
float adj_9 = 0;
float adj_10 = 0;
df::float3 adj_11 = 0;
df::float3 adj_12 = 0;
df::float3 adj_13 = 0;
df::float3 adj_14 = 0;
float adj_15 = 0;
df::float3 adj_16 = 0;
df::float3 adj_17 = 0;
float adj_18 = 0;
float adj_19 = 0;
float adj_20 = 0;
df::float3 adj_21 = 0;
float adj_22 = 0;
float adj_23 = 0;
df::float3 adj_24 = 0;
float adj_25 = 0;
float adj_26 = 0;
df::float3 adj_27 = 0;
df::float3 adj_28 = 0;
float adj_29 = 0;
df::float3 adj_30 = 0;
df::float3 adj_31 = 0;
//---------
// forward
var_0 = df::tid();
var_1 = df::load(var_x, var_0);
var_2 = df::load(var_v, var_0);
var_5 = df::float3(var_3, var_4, var_3);
var_6 = df::dot(var_5, var_1);
var_8 = df::sub(var_6, var_7);
var_9 = df::min(var_8, var_3);
var_10 = df::dot(var_5, var_2);
var_11 = df::mul(var_5, var_10);
var_12 = df::sub(var_2, var_11);
var_13 = df::mul(var_5, var_9);
var_14 = df::mul(var_13, var_ke);
var_15 = df::min(var_10, var_3);
var_16 = df::mul(var_5, var_15);
var_17 = df::mul(var_16, var_kd);
var_18 = df::mul(var_mu, var_9);
var_19 = df::mul(var_18, var_ke);
var_20 = df::sub(var_3, var_19);
var_21 = df::float3(var_kf, var_3, var_3);
var_22 = df::dot(var_21, var_12);
var_23 = df::clamp(var_22, var_19, var_20);
var_24 = df::float3(var_3, var_3, var_kf);
var_25 = df::dot(var_24, var_12);
var_26 = df::clamp(var_25, var_19, var_20);
var_27 = df::float3(var_23, var_3, var_26);
var_28 = df::add(var_17, var_27);
var_29 = df::step(var_9);
var_30 = df::mul(var_28, var_29);
var_31 = df::add(var_14, var_30);
df::atomic_sub(var_f, var_0, var_31);
//---------
// reverse
df::adj_atomic_sub(var_f, var_0, var_31, adj_f, adj_0, adj_31);
df::adj_add(var_14, var_30, adj_14, adj_30, adj_31);
df::adj_mul(var_28, var_29, adj_28, adj_29, adj_30);
df::adj_step(var_9, adj_9, adj_29);
df::adj_add(var_17, var_27, adj_17, adj_27, adj_28);
df::adj_float3(var_23, var_3, var_26, adj_23, adj_3, adj_26, adj_27);
df::adj_clamp(var_25, var_19, var_20, adj_25, adj_19, adj_20, adj_26);
df::adj_dot(var_24, var_12, adj_24, adj_12, adj_25);
df::adj_float3(var_3, var_3, var_kf, adj_3, adj_3, adj_kf, adj_24);
df::adj_clamp(var_22, var_19, var_20, adj_22, adj_19, adj_20, adj_23);
df::adj_dot(var_21, var_12, adj_21, adj_12, adj_22);
df::adj_float3(var_kf, var_3, var_3, adj_kf, adj_3, adj_3, adj_21);
df::adj_sub(var_3, var_19, adj_3, adj_19, adj_20);
df::adj_mul(var_18, var_ke, adj_18, adj_ke, adj_19);
df::adj_mul(var_mu, var_9, adj_mu, adj_9, adj_18);
df::adj_mul(var_16, var_kd, adj_16, adj_kd, adj_17);
df::adj_mul(var_5, var_15, adj_5, adj_15, adj_16);
df::adj_min(var_10, var_3, adj_10, adj_3, adj_15);
df::adj_mul(var_13, var_ke, adj_13, adj_ke, adj_14);
df::adj_mul(var_5, var_9, adj_5, adj_9, adj_13);
df::adj_sub(var_2, var_11, adj_2, adj_11, adj_12);
df::adj_mul(var_5, var_10, adj_5, adj_10, adj_11);
df::adj_dot(var_5, var_2, adj_5, adj_2, adj_10);
df::adj_min(var_8, var_3, adj_8, adj_3, adj_9);
df::adj_sub(var_6, var_7, adj_6, adj_7, adj_8);
df::adj_dot(var_5, var_1, adj_5, adj_1, adj_6);
df::adj_float3(var_3, var_4, var_3, adj_3, adj_4, adj_3, adj_5);
df::adj_load(var_v, var_0, adj_v, adj_0, adj_2);
df::adj_load(var_x, var_0, adj_x, adj_0, adj_1);
return;
}
// Python entry points
void eval_contacts_cpu_forward(int dim,
torch::Tensor var_x,
torch::Tensor var_v,
float var_ke,
float var_kd,
float var_kf,
float var_mu,
torch::Tensor var_f)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_contacts_cpu_kernel_forward(
cast<df::float3*>(var_x),
cast<df::float3*>(var_v),
var_ke,
var_kd,
var_kf,
var_mu,
cast<df::float3*>(var_f));
}
}
void eval_contacts_cpu_backward(int dim,
torch::Tensor var_x,
torch::Tensor var_v,
float var_ke,
float var_kd,
float var_kf,
float var_mu,
torch::Tensor var_f,
torch::Tensor adj_x,
torch::Tensor adj_v,
float adj_ke,
float adj_kd,
float adj_kf,
float adj_mu,
torch::Tensor adj_f)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_contacts_cpu_kernel_backward(
cast<df::float3*>(var_x),
cast<df::float3*>(var_v),
var_ke,
var_kd,
var_kf,
var_mu,
cast<df::float3*>(var_f),
cast<df::float3*>(adj_x),
cast<df::float3*>(adj_v),
adj_ke,
adj_kd,
adj_kf,
adj_mu,
cast<df::float3*>(adj_f));
}
}
// Python entry points
void eval_contacts_cpu_forward(int dim,
torch::Tensor var_x,
torch::Tensor var_v,
float var_ke,
float var_kd,
float var_kf,
float var_mu,
torch::Tensor var_f);
void eval_contacts_cpu_backward(int dim,
torch::Tensor var_x,
torch::Tensor var_v,
float var_ke,
float var_kd,
float var_kf,
float var_mu,
torch::Tensor var_f,
torch::Tensor adj_x,
torch::Tensor adj_v,
float adj_ke,
float adj_kd,
float adj_kf,
float adj_mu,
torch::Tensor adj_f);
void eval_soft_contacts_cpu_kernel_forward(
int var_num_particles,
df::float3* var_particle_x,
df::float3* var_particle_v,
spatial_transform* var_body_X_sc,
spatial_vector* var_body_v_sc,
spatial_transform* var_shape_X_co,
int* var_shape_body,
int* var_shape_geo_type,
int* var_shape_geo_src,
df::float3* var_shape_geo_scale,
float* var_shape_materials,
float var_ke,
float var_kd,
float var_kf,
float var_mu,
df::float3* var_particle_f,
spatial_vector* var_body_f)
{
//---------
// primal vars
int var_0;
int var_1;
int var_2;
int var_3;
df::float3 var_4;
df::float3 var_5;
spatial_transform var_6;
const int var_7 = 0;
bool var_8;
spatial_transform var_9;
spatial_transform var_10;
spatial_transform var_11;
spatial_transform var_12;
spatial_transform var_13;
df::float3 var_14;
int var_15;
df::float3 var_16;
const float var_17 = 0.01;
const float var_18 = 0.0;
df::float3 var_19;
bool var_20;
df::float3 var_21;
float var_22;
float var_23;
float var_24;
float var_25;
df::float3 var_26;
float var_27;
df::float3 var_28;
df::float3 var_29;
float var_30;
df::float3 var_31;
const int var_32 = 1;
bool var_33;
float var_34;
float var_35;
float var_36;
df::float3 var_37;
df::float3 var_38;
float var_39;
df::float3 var_40;
const int var_41 = 2;
bool var_42;
float var_43;
float var_44;
float var_45;
float var_46;
float var_47;
float var_48;
float var_49;
df::float3 var_50;
df::float3 var_51;
float var_52;
df::float3 var_53;
spatial_vector var_54;
bool var_55;
spatial_vector var_56;
spatial_vector var_57;
df::float3 var_58;
df::float3 var_59;
df::float3 var_60;
df::float3 var_61;
df::float3 var_62;
float var_63;
df::float3 var_64;
df::float3 var_65;
df::float3 var_66;
df::float3 var_67;
float var_68;
df::float3 var_69;
df::float3 var_70;
float var_71;
float var_72;
float var_73;
df::float3 var_74;
float var_75;
float var_76;
df::float3 var_77;
float var_78;
float var_79;
df::float3 var_80;
df::float3 var_81;
float var_82;
df::float3 var_83;
df::float3 var_84;
df::float3 var_85;
bool var_86;
spatial_vector var_87;
//---------
// forward
var_0 = df::tid();
var_1 = df::div(var_0, var_num_particles);
var_2 = df::mod(var_0, var_num_particles);
var_3 = df::load(var_shape_body, var_1);
var_4 = df::load(var_particle_x, var_2);
var_5 = df::load(var_particle_v, var_2);
var_6 = df::spatial_transform_identity();
var_8 = (var_3 >= var_7);
if (var_8) {
var_9 = df::load(var_body_X_sc, var_3);
}
var_10 = df::select(var_8, var_6, var_9);
var_11 = df::load(var_shape_X_co, var_1);
var_12 = df::spatial_transform_multiply(var_10, var_11);
var_13 = spatial_transform_inverse_cpu_func(var_12);
var_14 = df::spatial_transform_point(var_13, var_4);
var_15 = df::load(var_shape_geo_type, var_1);
var_16 = df::load(var_shape_geo_scale, var_1);
var_19 = df::float3(var_18, var_18, var_18);
var_20 = (var_15 == var_7);
if (var_20) {
var_21 = df::float3(var_18, var_18, var_18);
var_22 = df::index(var_16, var_7);
var_23 = sphere_sdf_cpu_func(var_21, var_22, var_14);
var_24 = df::sub(var_23, var_17);
var_25 = df::min(var_24, var_18);
var_26 = df::float3(var_18, var_18, var_18);
var_27 = df::index(var_16, var_7);
var_28 = sphere_sdf_grad_cpu_func(var_26, var_27, var_14);
var_29 = df::spatial_transform_vector(var_12, var_28);
}
var_30 = df::select(var_20, var_18, var_25);
var_31 = df::select(var_20, var_19, var_29);
var_33 = (var_15 == var_32);
if (var_33) {
var_34 = box_sdf_cpu_func(var_16, var_14);
var_35 = df::sub(var_34, var_17);
var_36 = df::min(var_35, var_18);
var_37 = box_sdf_grad_cpu_func(var_16, var_14);
var_38 = df::spatial_transform_vector(var_12, var_37);
}
var_39 = df::select(var_33, var_30, var_36);
var_40 = df::select(var_33, var_31, var_38);
var_42 = (var_15 == var_41);
if (var_42) {
var_43 = df::index(var_16, var_7);
var_44 = df::index(var_16, var_32);
var_45 = capsule_sdf_cpu_func(var_43, var_44, var_14);
var_46 = df::sub(var_45, var_17);
var_47 = df::min(var_46, var_18);
var_48 = df::index(var_16, var_7);
var_49 = df::index(var_16, var_32);
var_50 = capsule_sdf_grad_cpu_func(var_48, var_49, var_14);
var_51 = df::spatial_transform_vector(var_12, var_50);
}
var_52 = df::select(var_42, var_39, var_47);
var_53 = df::select(var_42, var_40, var_51);
var_54 = df::spatial_vector();
var_55 = (var_3 >= var_7);
if (var_55) {
var_56 = df::load(var_body_v_sc, var_3);
}
var_57 = df::select(var_55, var_54, var_56);
var_58 = df::spatial_top(var_57);
var_59 = df::spatial_bottom(var_57);
var_60 = df::cross(var_58, var_4);
var_61 = df::add(var_59, var_60);
var_62 = df::sub(var_5, var_61);
var_63 = df::dot(var_53, var_62);
var_64 = df::mul(var_53, var_63);
var_65 = df::sub(var_62, var_64);
var_66 = df::mul(var_53, var_52);
var_67 = df::mul(var_66, var_ke);
var_68 = df::min(var_63, var_18);
var_69 = df::mul(var_53, var_68);
var_70 = df::mul(var_69, var_kd);
var_71 = df::mul(var_mu, var_52);
var_72 = df::mul(var_71, var_ke);
var_73 = df::sub(var_18, var_72);
var_74 = df::float3(var_kf, var_18, var_18);
var_75 = df::dot(var_74, var_65);
var_76 = df::clamp(var_75, var_72, var_73);
var_77 = df::float3(var_18, var_18, var_kf);
var_78 = df::dot(var_77, var_65);
var_79 = df::clamp(var_78, var_72, var_73);
var_80 = df::float3(var_76, var_18, var_79);
var_81 = df::add(var_70, var_80);
var_82 = df::step(var_52);
var_83 = df::mul(var_81, var_82);
var_84 = df::add(var_67, var_83);
var_85 = df::cross(var_4, var_84);
df::atomic_sub(var_particle_f, var_2, var_84);
var_86 = (var_3 >= var_7);
if (var_86) {
var_87 = df::spatial_vector(var_85, var_84);
df::atomic_sub(var_body_f, var_3, var_87);
}
}
void eval_soft_contacts_cpu_kernel_backward(
int var_num_particles,
df::float3* var_particle_x,
df::float3* var_particle_v,
spatial_transform* var_body_X_sc,
spatial_vector* var_body_v_sc,
spatial_transform* var_shape_X_co,
int* var_shape_body,
int* var_shape_geo_type,
int* var_shape_geo_src,
df::float3* var_shape_geo_scale,
float* var_shape_materials,
float var_ke,
float var_kd,
float var_kf,
float var_mu,
df::float3* var_particle_f,
spatial_vector* var_body_f,
int adj_num_particles,
df::float3* adj_particle_x,
df::float3* adj_particle_v,
spatial_transform* adj_body_X_sc,
spatial_vector* adj_body_v_sc,
spatial_transform* adj_shape_X_co,
int* adj_shape_body,
int* adj_shape_geo_type,
int* adj_shape_geo_src,
df::float3* adj_shape_geo_scale,
float* adj_shape_materials,
float adj_ke,
float adj_kd,
float adj_kf,
float adj_mu,
df::float3* adj_particle_f,
spatial_vector* adj_body_f)
{
//---------
// primal vars
int var_0;
int var_1;
int var_2;
int var_3;
df::float3 var_4;
df::float3 var_5;
spatial_transform var_6;
const int var_7 = 0;
bool var_8;
spatial_transform var_9;
spatial_transform var_10;
spatial_transform var_11;
spatial_transform var_12;
spatial_transform var_13;
df::float3 var_14;
int var_15;
df::float3 var_16;
const float var_17 = 0.01;
const float var_18 = 0.0;
df::float3 var_19;
bool var_20;
df::float3 var_21;
float var_22;
float var_23;
float var_24;
float var_25;
df::float3 var_26;
float var_27;
df::float3 var_28;
df::float3 var_29;
float var_30;
df::float3 var_31;
const int var_32 = 1;
bool var_33;
float var_34;
float var_35;
float var_36;
df::float3 var_37;
df::float3 var_38;
float var_39;
df::float3 var_40;
const int var_41 = 2;
bool var_42;
float var_43;
float var_44;
float var_45;
float var_46;
float var_47;
float var_48;
float var_49;
df::float3 var_50;
df::float3 var_51;
float var_52;
df::float3 var_53;
spatial_vector var_54;
bool var_55;
spatial_vector var_56;
spatial_vector var_57;
df::float3 var_58;
df::float3 var_59;
df::float3 var_60;
df::float3 var_61;
df::float3 var_62;
float var_63;
df::float3 var_64;
df::float3 var_65;
df::float3 var_66;
df::float3 var_67;
float var_68;
df::float3 var_69;
df::float3 var_70;
float var_71;
float var_72;
float var_73;
df::float3 var_74;
float var_75;
float var_76;
df::float3 var_77;
float var_78;
float var_79;
df::float3 var_80;
df::float3 var_81;
float var_82;
df::float3 var_83;
df::float3 var_84;
df::float3 var_85;
bool var_86;
spatial_vector var_87;
//---------
// dual vars
int adj_0 = 0;
int adj_1 = 0;
int adj_2 = 0;
int adj_3 = 0;
df::float3 adj_4 = 0;
df::float3 adj_5 = 0;
spatial_transform adj_6 = 0;
int adj_7 = 0;
bool adj_8 = 0;
spatial_transform adj_9 = 0;
spatial_transform adj_10 = 0;
spatial_transform adj_11 = 0;
spatial_transform adj_12 = 0;
spatial_transform adj_13 = 0;
df::float3 adj_14 = 0;
int adj_15 = 0;
df::float3 adj_16 = 0;
float adj_17 = 0;
float adj_18 = 0;
df::float3 adj_19 = 0;
bool adj_20 = 0;
df::float3 adj_21 = 0;
float adj_22 = 0;
float adj_23 = 0;
float adj_24 = 0;
float adj_25 = 0;
df::float3 adj_26 = 0;
float adj_27 = 0;
df::float3 adj_28 = 0;
df::float3 adj_29 = 0;
float adj_30 = 0;
df::float3 adj_31 = 0;
int adj_32 = 0;
bool adj_33 = 0;
float adj_34 = 0;
float adj_35 = 0;
float adj_36 = 0;
df::float3 adj_37 = 0;
df::float3 adj_38 = 0;
float adj_39 = 0;
df::float3 adj_40 = 0;
int adj_41 = 0;
bool adj_42 = 0;
float adj_43 = 0;
float adj_44 = 0;
float adj_45 = 0;
float adj_46 = 0;
float adj_47 = 0;
float adj_48 = 0;
float adj_49 = 0;
df::float3 adj_50 = 0;
df::float3 adj_51 = 0;
float adj_52 = 0;
df::float3 adj_53 = 0;
spatial_vector adj_54 = 0;
bool adj_55 = 0;
spatial_vector adj_56 = 0;
spatial_vector adj_57 = 0;
df::float3 adj_58 = 0;
df::float3 adj_59 = 0;
df::float3 adj_60 = 0;
df::float3 adj_61 = 0;
df::float3 adj_62 = 0;
float adj_63 = 0;
df::float3 adj_64 = 0;
df::float3 adj_65 = 0;
df::float3 adj_66 = 0;
df::float3 adj_67 = 0;
float adj_68 = 0;
df::float3 adj_69 = 0;
df::float3 adj_70 = 0;
float adj_71 = 0;
float adj_72 = 0;
float adj_73 = 0;
df::float3 adj_74 = 0;
float adj_75 = 0;
float adj_76 = 0;
df::float3 adj_77 = 0;
float adj_78 = 0;
float adj_79 = 0;
df::float3 adj_80 = 0;
df::float3 adj_81 = 0;
float adj_82 = 0;
df::float3 adj_83 = 0;
df::float3 adj_84 = 0;
df::float3 adj_85 = 0;
bool adj_86 = 0;
spatial_vector adj_87 = 0;
//---------
// forward
var_0 = df::tid();
var_1 = df::div(var_0, var_num_particles);
var_2 = df::mod(var_0, var_num_particles);
var_3 = df::load(var_shape_body, var_1);
var_4 = df::load(var_particle_x, var_2);
var_5 = df::load(var_particle_v, var_2);
var_6 = df::spatial_transform_identity();
var_8 = (var_3 >= var_7);
if (var_8) {
var_9 = df::load(var_body_X_sc, var_3);
}
var_10 = df::select(var_8, var_6, var_9);
var_11 = df::load(var_shape_X_co, var_1);
var_12 = df::spatial_transform_multiply(var_10, var_11);
var_13 = spatial_transform_inverse_cpu_func(var_12);
var_14 = df::spatial_transform_point(var_13, var_4);
var_15 = df::load(var_shape_geo_type, var_1);
var_16 = df::load(var_shape_geo_scale, var_1);
var_19 = df::float3(var_18, var_18, var_18);
var_20 = (var_15 == var_7);
if (var_20) {
var_21 = df::float3(var_18, var_18, var_18);
var_22 = df::index(var_16, var_7);
var_23 = sphere_sdf_cpu_func(var_21, var_22, var_14);
var_24 = df::sub(var_23, var_17);
var_25 = df::min(var_24, var_18);
var_26 = df::float3(var_18, var_18, var_18);
var_27 = df::index(var_16, var_7);
var_28 = sphere_sdf_grad_cpu_func(var_26, var_27, var_14);
var_29 = df::spatial_transform_vector(var_12, var_28);
}
var_30 = df::select(var_20, var_18, var_25);
var_31 = df::select(var_20, var_19, var_29);
var_33 = (var_15 == var_32);
if (var_33) {
var_34 = box_sdf_cpu_func(var_16, var_14);
var_35 = df::sub(var_34, var_17);
var_36 = df::min(var_35, var_18);
var_37 = box_sdf_grad_cpu_func(var_16, var_14);
var_38 = df::spatial_transform_vector(var_12, var_37);
}
var_39 = df::select(var_33, var_30, var_36);
var_40 = df::select(var_33, var_31, var_38);
var_42 = (var_15 == var_41);
if (var_42) {
var_43 = df::index(var_16, var_7);
var_44 = df::index(var_16, var_32);
var_45 = capsule_sdf_cpu_func(var_43, var_44, var_14);
var_46 = df::sub(var_45, var_17);
var_47 = df::min(var_46, var_18);
var_48 = df::index(var_16, var_7);
var_49 = df::index(var_16, var_32);
var_50 = capsule_sdf_grad_cpu_func(var_48, var_49, var_14);
var_51 = df::spatial_transform_vector(var_12, var_50);
}
var_52 = df::select(var_42, var_39, var_47);
var_53 = df::select(var_42, var_40, var_51);
var_54 = df::spatial_vector();
var_55 = (var_3 >= var_7);
if (var_55) {
var_56 = df::load(var_body_v_sc, var_3);
}
var_57 = df::select(var_55, var_54, var_56);
var_58 = df::spatial_top(var_57);
var_59 = df::spatial_bottom(var_57);
var_60 = df::cross(var_58, var_4);
var_61 = df::add(var_59, var_60);
var_62 = df::sub(var_5, var_61);
var_63 = df::dot(var_53, var_62);
var_64 = df::mul(var_53, var_63);
var_65 = df::sub(var_62, var_64);
var_66 = df::mul(var_53, var_52);
var_67 = df::mul(var_66, var_ke);
var_68 = df::min(var_63, var_18);
var_69 = df::mul(var_53, var_68);
var_70 = df::mul(var_69, var_kd);
var_71 = df::mul(var_mu, var_52);
var_72 = df::mul(var_71, var_ke);
var_73 = df::sub(var_18, var_72);
var_74 = df::float3(var_kf, var_18, var_18);
var_75 = df::dot(var_74, var_65);
var_76 = df::clamp(var_75, var_72, var_73);
var_77 = df::float3(var_18, var_18, var_kf);
var_78 = df::dot(var_77, var_65);
var_79 = df::clamp(var_78, var_72, var_73);
var_80 = df::float3(var_76, var_18, var_79);
var_81 = df::add(var_70, var_80);
var_82 = df::step(var_52);
var_83 = df::mul(var_81, var_82);
var_84 = df::add(var_67, var_83);
var_85 = df::cross(var_4, var_84);
df::atomic_sub(var_particle_f, var_2, var_84);
var_86 = (var_3 >= var_7);
if (var_86) {
var_87 = df::spatial_vector(var_85, var_84);
df::atomic_sub(var_body_f, var_3, var_87);
}
//---------
// reverse
if (var_86) {
df::adj_atomic_sub(var_body_f, var_3, var_87, adj_body_f, adj_3, adj_87);
df::adj_spatial_vector(var_85, var_84, adj_85, adj_84, adj_87);
}
df::adj_atomic_sub(var_particle_f, var_2, var_84, adj_particle_f, adj_2, adj_84);
df::adj_cross(var_4, var_84, adj_4, adj_84, adj_85);
df::adj_add(var_67, var_83, adj_67, adj_83, adj_84);
df::adj_mul(var_81, var_82, adj_81, adj_82, adj_83);
df::adj_step(var_52, adj_52, adj_82);
df::adj_add(var_70, var_80, adj_70, adj_80, adj_81);
df::adj_float3(var_76, var_18, var_79, adj_76, adj_18, adj_79, adj_80);
df::adj_clamp(var_78, var_72, var_73, adj_78, adj_72, adj_73, adj_79);
df::adj_dot(var_77, var_65, adj_77, adj_65, adj_78);
df::adj_float3(var_18, var_18, var_kf, adj_18, adj_18, adj_kf, adj_77);
df::adj_clamp(var_75, var_72, var_73, adj_75, adj_72, adj_73, adj_76);
df::adj_dot(var_74, var_65, adj_74, adj_65, adj_75);
df::adj_float3(var_kf, var_18, var_18, adj_kf, adj_18, adj_18, adj_74);
df::adj_sub(var_18, var_72, adj_18, adj_72, adj_73);
df::adj_mul(var_71, var_ke, adj_71, adj_ke, adj_72);
df::adj_mul(var_mu, var_52, adj_mu, adj_52, adj_71);
df::adj_mul(var_69, var_kd, adj_69, adj_kd, adj_70);
df::adj_mul(var_53, var_68, adj_53, adj_68, adj_69);
df::adj_min(var_63, var_18, adj_63, adj_18, adj_68);
df::adj_mul(var_66, var_ke, adj_66, adj_ke, adj_67);
df::adj_mul(var_53, var_52, adj_53, adj_52, adj_66);
df::adj_sub(var_62, var_64, adj_62, adj_64, adj_65);
df::adj_mul(var_53, var_63, adj_53, adj_63, adj_64);
df::adj_dot(var_53, var_62, adj_53, adj_62, adj_63);
df::adj_sub(var_5, var_61, adj_5, adj_61, adj_62);
df::adj_add(var_59, var_60, adj_59, adj_60, adj_61);
df::adj_cross(var_58, var_4, adj_58, adj_4, adj_60);
df::adj_spatial_bottom(var_57, adj_57, adj_59);
df::adj_spatial_top(var_57, adj_57, adj_58);
df::adj_select(var_55, var_54, var_56, adj_55, adj_54, adj_56, adj_57);
if (var_55) {
df::adj_load(var_body_v_sc, var_3, adj_body_v_sc, adj_3, adj_56);
}
df::adj_select(var_42, var_40, var_51, adj_42, adj_40, adj_51, adj_53);
df::adj_select(var_42, var_39, var_47, adj_42, adj_39, adj_47, adj_52);
if (var_42) {
df::adj_spatial_transform_vector(var_12, var_50, adj_12, adj_50, adj_51);
adj_capsule_sdf_grad_cpu_func(var_48, var_49, var_14, adj_48, adj_49, adj_14, adj_50);
df::adj_index(var_16, var_32, adj_16, adj_32, adj_49);
df::adj_index(var_16, var_7, adj_16, adj_7, adj_48);
df::adj_min(var_46, var_18, adj_46, adj_18, adj_47);
df::adj_sub(var_45, var_17, adj_45, adj_17, adj_46);
adj_capsule_sdf_cpu_func(var_43, var_44, var_14, adj_43, adj_44, adj_14, adj_45);
df::adj_index(var_16, var_32, adj_16, adj_32, adj_44);
df::adj_index(var_16, var_7, adj_16, adj_7, adj_43);
}
df::adj_select(var_33, var_31, var_38, adj_33, adj_31, adj_38, adj_40);
df::adj_select(var_33, var_30, var_36, adj_33, adj_30, adj_36, adj_39);
if (var_33) {
df::adj_spatial_transform_vector(var_12, var_37, adj_12, adj_37, adj_38);
adj_box_sdf_grad_cpu_func(var_16, var_14, adj_16, adj_14, adj_37);
df::adj_min(var_35, var_18, adj_35, adj_18, adj_36);
df::adj_sub(var_34, var_17, adj_34, adj_17, adj_35);
adj_box_sdf_cpu_func(var_16, var_14, adj_16, adj_14, adj_34);
}
df::adj_select(var_20, var_19, var_29, adj_20, adj_19, adj_29, adj_31);
df::adj_select(var_20, var_18, var_25, adj_20, adj_18, adj_25, adj_30);
if (var_20) {
df::adj_spatial_transform_vector(var_12, var_28, adj_12, adj_28, adj_29);
adj_sphere_sdf_grad_cpu_func(var_26, var_27, var_14, adj_26, adj_27, adj_14, adj_28);
df::adj_index(var_16, var_7, adj_16, adj_7, adj_27);
df::adj_float3(var_18, var_18, var_18, adj_18, adj_18, adj_18, adj_26);
df::adj_min(var_24, var_18, adj_24, adj_18, adj_25);
df::adj_sub(var_23, var_17, adj_23, adj_17, adj_24);
adj_sphere_sdf_cpu_func(var_21, var_22, var_14, adj_21, adj_22, adj_14, adj_23);
df::adj_index(var_16, var_7, adj_16, adj_7, adj_22);
df::adj_float3(var_18, var_18, var_18, adj_18, adj_18, adj_18, adj_21);
}
df::adj_float3(var_18, var_18, var_18, adj_18, adj_18, adj_18, adj_19);
df::adj_load(var_shape_geo_scale, var_1, adj_shape_geo_scale, adj_1, adj_16);
df::adj_load(var_shape_geo_type, var_1, adj_shape_geo_type, adj_1, adj_15);
df::adj_spatial_transform_point(var_13, var_4, adj_13, adj_4, adj_14);
adj_spatial_transform_inverse_cpu_func(var_12, adj_12, adj_13);
df::adj_spatial_transform_multiply(var_10, var_11, adj_10, adj_11, adj_12);
df::adj_load(var_shape_X_co, var_1, adj_shape_X_co, adj_1, adj_11);
df::adj_select(var_8, var_6, var_9, adj_8, adj_6, adj_9, adj_10);
if (var_8) {
df::adj_load(var_body_X_sc, var_3, adj_body_X_sc, adj_3, adj_9);
}
df::adj_load(var_particle_v, var_2, adj_particle_v, adj_2, adj_5);
df::adj_load(var_particle_x, var_2, adj_particle_x, adj_2, adj_4);
df::adj_load(var_shape_body, var_1, adj_shape_body, adj_1, adj_3);
df::adj_mod(var_0, var_num_particles, adj_0, adj_num_particles, adj_2);
df::adj_div(var_0, var_num_particles, adj_0, adj_num_particles, adj_1);
return;
}
// Python entry points
void eval_soft_contacts_cpu_forward(int dim,
int var_num_particles,
torch::Tensor var_particle_x,
torch::Tensor var_particle_v,
torch::Tensor var_body_X_sc,
torch::Tensor var_body_v_sc,
torch::Tensor var_shape_X_co,
torch::Tensor var_shape_body,
torch::Tensor var_shape_geo_type,
torch::Tensor var_shape_geo_src,
torch::Tensor var_shape_geo_scale,
torch::Tensor var_shape_materials,
float var_ke,
float var_kd,
float var_kf,
float var_mu,
torch::Tensor var_particle_f,
torch::Tensor var_body_f)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_soft_contacts_cpu_kernel_forward(
var_num_particles,
cast<df::float3*>(var_particle_x),
cast<df::float3*>(var_particle_v),
cast<spatial_transform*>(var_body_X_sc),
cast<spatial_vector*>(var_body_v_sc),
cast<spatial_transform*>(var_shape_X_co),
cast<int*>(var_shape_body),
cast<int*>(var_shape_geo_type),
cast<int*>(var_shape_geo_src),
cast<df::float3*>(var_shape_geo_scale),
cast<float*>(var_shape_materials),
var_ke,
var_kd,
var_kf,
var_mu,
cast<df::float3*>(var_particle_f),
cast<spatial_vector*>(var_body_f));
}
}
void eval_soft_contacts_cpu_backward(int dim,
int var_num_particles,
torch::Tensor var_particle_x,
torch::Tensor var_particle_v,
torch::Tensor var_body_X_sc,
torch::Tensor var_body_v_sc,
torch::Tensor var_shape_X_co,
torch::Tensor var_shape_body,
torch::Tensor var_shape_geo_type,
torch::Tensor var_shape_geo_src,
torch::Tensor var_shape_geo_scale,
torch::Tensor var_shape_materials,
float var_ke,
float var_kd,
float var_kf,
float var_mu,
torch::Tensor var_particle_f,
torch::Tensor var_body_f,
int adj_num_particles,
torch::Tensor adj_particle_x,
torch::Tensor adj_particle_v,
torch::Tensor adj_body_X_sc,
torch::Tensor adj_body_v_sc,
torch::Tensor adj_shape_X_co,
torch::Tensor adj_shape_body,
torch::Tensor adj_shape_geo_type,
torch::Tensor adj_shape_geo_src,
torch::Tensor adj_shape_geo_scale,
torch::Tensor adj_shape_materials,
float adj_ke,
float adj_kd,
float adj_kf,
float adj_mu,
torch::Tensor adj_particle_f,
torch::Tensor adj_body_f)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_soft_contacts_cpu_kernel_backward(
var_num_particles,
cast<df::float3*>(var_particle_x),
cast<df::float3*>(var_particle_v),
cast<spatial_transform*>(var_body_X_sc),
cast<spatial_vector*>(var_body_v_sc),
cast<spatial_transform*>(var_shape_X_co),
cast<int*>(var_shape_body),
cast<int*>(var_shape_geo_type),
cast<int*>(var_shape_geo_src),
cast<df::float3*>(var_shape_geo_scale),
cast<float*>(var_shape_materials),
var_ke,
var_kd,
var_kf,
var_mu,
cast<df::float3*>(var_particle_f),
cast<spatial_vector*>(var_body_f),
adj_num_particles,
cast<df::float3*>(adj_particle_x),
cast<df::float3*>(adj_particle_v),
cast<spatial_transform*>(adj_body_X_sc),
cast<spatial_vector*>(adj_body_v_sc),
cast<spatial_transform*>(adj_shape_X_co),
cast<int*>(adj_shape_body),
cast<int*>(adj_shape_geo_type),
cast<int*>(adj_shape_geo_src),
cast<df::float3*>(adj_shape_geo_scale),
cast<float*>(adj_shape_materials),
adj_ke,
adj_kd,
adj_kf,
adj_mu,
cast<df::float3*>(adj_particle_f),
cast<spatial_vector*>(adj_body_f));
}
}
// Python entry points
void eval_soft_contacts_cpu_forward(int dim,
int var_num_particles,
torch::Tensor var_particle_x,
torch::Tensor var_particle_v,
torch::Tensor var_body_X_sc,
torch::Tensor var_body_v_sc,
torch::Tensor var_shape_X_co,
torch::Tensor var_shape_body,
torch::Tensor var_shape_geo_type,
torch::Tensor var_shape_geo_src,
torch::Tensor var_shape_geo_scale,
torch::Tensor var_shape_materials,
float var_ke,
float var_kd,
float var_kf,
float var_mu,
torch::Tensor var_particle_f,
torch::Tensor var_body_f);
void eval_soft_contacts_cpu_backward(int dim,
int var_num_particles,
torch::Tensor var_particle_x,
torch::Tensor var_particle_v,
torch::Tensor var_body_X_sc,
torch::Tensor var_body_v_sc,
torch::Tensor var_shape_X_co,
torch::Tensor var_shape_body,
torch::Tensor var_shape_geo_type,
torch::Tensor var_shape_geo_src,
torch::Tensor var_shape_geo_scale,
torch::Tensor var_shape_materials,
float var_ke,
float var_kd,
float var_kf,
float var_mu,
torch::Tensor var_particle_f,
torch::Tensor var_body_f,
int adj_num_particles,
torch::Tensor adj_particle_x,
torch::Tensor adj_particle_v,
torch::Tensor adj_body_X_sc,
torch::Tensor adj_body_v_sc,
torch::Tensor adj_shape_X_co,
torch::Tensor adj_shape_body,
torch::Tensor adj_shape_geo_type,
torch::Tensor adj_shape_geo_src,
torch::Tensor adj_shape_geo_scale,
torch::Tensor adj_shape_materials,
float adj_ke,
float adj_kd,
float adj_kf,
float adj_mu,
torch::Tensor adj_particle_f,
torch::Tensor adj_body_f);
void eval_rigid_contacts_cpu_kernel_forward(
df::float3* var_rigid_x,
quat* var_rigid_r,
df::float3* var_rigid_v,
df::float3* var_rigid_w,
int* var_contact_body,
df::float3* var_contact_point,
float* var_contact_dist,
int* var_contact_mat,
float* var_materials,
df::float3* var_rigid_f,
df::float3* var_rigid_t)
{
//---------
// primal vars
int var_0;
int var_1;
df::float3 var_2;
float var_3;
int var_4;
const int var_5 = 4;
int var_6;
const int var_7 = 0;
int var_8;
float var_9;
int var_10;
const int var_11 = 1;
int var_12;
float var_13;
int var_14;
const int var_15 = 2;
int var_16;
float var_17;
int var_18;
const int var_19 = 3;
int var_20;
float var_21;
df::float3 var_22;
quat var_23;
df::float3 var_24;
df::float3 var_25;
const float var_26 = 0.0;
const float var_27 = 1.0;
df::float3 var_28;
df::float3 var_29;
df::float3 var_30;
df::float3 var_31;
df::float3 var_32;
df::float3 var_33;
df::float3 var_34;
df::float3 var_35;
float var_36;
float var_37;
float var_38;
df::float3 var_39;
df::float3 var_40;
float var_41;
float var_42;
float var_43;
float var_44;
float var_45;
float var_46;
float var_47;
float var_48;
df::float3 var_49;
float var_50;
float var_51;
df::float3 var_52;
float var_53;
float var_54;
df::float3 var_55;
float var_56;
df::float3 var_57;
float var_58;
df::float3 var_59;
df::float3 var_60;
df::float3 var_61;
//---------
// forward
var_0 = df::tid();
var_1 = df::load(var_contact_body, var_0);
var_2 = df::load(var_contact_point, var_0);
var_3 = df::load(var_contact_dist, var_0);
var_4 = df::load(var_contact_mat, var_0);
var_6 = df::mul(var_4, var_5);
var_8 = df::add(var_6, var_7);
var_9 = df::load(var_materials, var_8);
var_10 = df::mul(var_4, var_5);
var_12 = df::add(var_10, var_11);
var_13 = df::load(var_materials, var_12);
var_14 = df::mul(var_4, var_5);
var_16 = df::add(var_14, var_15);
var_17 = df::load(var_materials, var_16);
var_18 = df::mul(var_4, var_5);
var_20 = df::add(var_18, var_19);
var_21 = df::load(var_materials, var_20);
var_22 = df::load(var_rigid_x, var_1);
var_23 = df::load(var_rigid_r, var_1);
var_24 = df::load(var_rigid_v, var_1);
var_25 = df::load(var_rigid_w, var_1);
var_28 = df::float3(var_26, var_27, var_26);
var_29 = df::rotate(var_23, var_2);
var_30 = df::add(var_22, var_29);
var_31 = df::mul(var_28, var_3);
var_32 = df::sub(var_30, var_31);
var_33 = df::sub(var_32, var_22);
var_34 = df::cross(var_25, var_33);
var_35 = df::add(var_24, var_34);
var_36 = df::dot(var_28, var_32);
var_37 = df::min(var_36, var_26);
var_38 = df::dot(var_28, var_35);
var_39 = df::mul(var_28, var_38);
var_40 = df::sub(var_35, var_39);
var_41 = df::mul(var_37, var_9);
var_42 = df::min(var_38, var_26);
var_43 = df::mul(var_42, var_13);
var_44 = df::step(var_37);
var_45 = df::mul(var_43, var_44);
var_46 = df::add(var_41, var_45);
var_47 = df::mul(var_21, var_46);
var_48 = df::sub(var_26, var_47);
var_49 = df::float3(var_17, var_26, var_26);
var_50 = df::dot(var_49, var_40);
var_51 = df::clamp(var_50, var_47, var_48);
var_52 = df::float3(var_26, var_26, var_17);
var_53 = df::dot(var_52, var_40);
var_54 = df::clamp(var_53, var_47, var_48);
var_55 = df::float3(var_51, var_26, var_54);
var_56 = df::step(var_37);
var_57 = df::mul(var_55, var_56);
var_58 = df::add(var_41, var_45);
var_59 = df::mul(var_28, var_58);
var_60 = df::add(var_59, var_57);
var_61 = df::cross(var_33, var_60);
df::atomic_sub(var_rigid_f, var_1, var_60);
df::atomic_sub(var_rigid_t, var_1, var_61);
}
void eval_rigid_contacts_cpu_kernel_backward(
df::float3* var_rigid_x,
quat* var_rigid_r,
df::float3* var_rigid_v,
df::float3* var_rigid_w,
int* var_contact_body,
df::float3* var_contact_point,
float* var_contact_dist,
int* var_contact_mat,
float* var_materials,
df::float3* var_rigid_f,
df::float3* var_rigid_t,
df::float3* adj_rigid_x,
quat* adj_rigid_r,
df::float3* adj_rigid_v,
df::float3* adj_rigid_w,
int* adj_contact_body,
df::float3* adj_contact_point,
float* adj_contact_dist,
int* adj_contact_mat,
float* adj_materials,
df::float3* adj_rigid_f,
df::float3* adj_rigid_t)
{
//---------
// primal vars
int var_0;
int var_1;
df::float3 var_2;
float var_3;
int var_4;
const int var_5 = 4;
int var_6;
const int var_7 = 0;
int var_8;
float var_9;
int var_10;
const int var_11 = 1;
int var_12;
float var_13;
int var_14;
const int var_15 = 2;
int var_16;
float var_17;
int var_18;
const int var_19 = 3;
int var_20;
float var_21;
df::float3 var_22;
quat var_23;
df::float3 var_24;
df::float3 var_25;
const float var_26 = 0.0;
const float var_27 = 1.0;
df::float3 var_28;
df::float3 var_29;
df::float3 var_30;
df::float3 var_31;
df::float3 var_32;
df::float3 var_33;
df::float3 var_34;
df::float3 var_35;
float var_36;
float var_37;
float var_38;
df::float3 var_39;
df::float3 var_40;
float var_41;
float var_42;
float var_43;
float var_44;
float var_45;
float var_46;
float var_47;
float var_48;
df::float3 var_49;
float var_50;
float var_51;
df::float3 var_52;
float var_53;
float var_54;
df::float3 var_55;
float var_56;
df::float3 var_57;
float var_58;
df::float3 var_59;
df::float3 var_60;
df::float3 var_61;
//---------
// dual vars
int adj_0 = 0;
int adj_1 = 0;
df::float3 adj_2 = 0;
float adj_3 = 0;
int adj_4 = 0;
int adj_5 = 0;
int adj_6 = 0;
int adj_7 = 0;
int adj_8 = 0;
float adj_9 = 0;
int adj_10 = 0;
int adj_11 = 0;
int adj_12 = 0;
float adj_13 = 0;
int adj_14 = 0;
int adj_15 = 0;
int adj_16 = 0;
float adj_17 = 0;
int adj_18 = 0;
int adj_19 = 0;
int adj_20 = 0;
float adj_21 = 0;
df::float3 adj_22 = 0;
quat adj_23 = 0;
df::float3 adj_24 = 0;
df::float3 adj_25 = 0;
float adj_26 = 0;
float adj_27 = 0;
df::float3 adj_28 = 0;
df::float3 adj_29 = 0;
df::float3 adj_30 = 0;
df::float3 adj_31 = 0;
df::float3 adj_32 = 0;
df::float3 adj_33 = 0;
df::float3 adj_34 = 0;
df::float3 adj_35 = 0;
float adj_36 = 0;
float adj_37 = 0;
float adj_38 = 0;
df::float3 adj_39 = 0;
df::float3 adj_40 = 0;
float adj_41 = 0;
float adj_42 = 0;
float adj_43 = 0;
float adj_44 = 0;
float adj_45 = 0;
float adj_46 = 0;
float adj_47 = 0;
float adj_48 = 0;
df::float3 adj_49 = 0;
float adj_50 = 0;
float adj_51 = 0;
df::float3 adj_52 = 0;
float adj_53 = 0;
float adj_54 = 0;
df::float3 adj_55 = 0;
float adj_56 = 0;
df::float3 adj_57 = 0;
float adj_58 = 0;
df::float3 adj_59 = 0;
df::float3 adj_60 = 0;
df::float3 adj_61 = 0;
//---------
// forward
var_0 = df::tid();
var_1 = df::load(var_contact_body, var_0);
var_2 = df::load(var_contact_point, var_0);
var_3 = df::load(var_contact_dist, var_0);
var_4 = df::load(var_contact_mat, var_0);
var_6 = df::mul(var_4, var_5);
var_8 = df::add(var_6, var_7);
var_9 = df::load(var_materials, var_8);
var_10 = df::mul(var_4, var_5);
var_12 = df::add(var_10, var_11);
var_13 = df::load(var_materials, var_12);
var_14 = df::mul(var_4, var_5);
var_16 = df::add(var_14, var_15);
var_17 = df::load(var_materials, var_16);
var_18 = df::mul(var_4, var_5);
var_20 = df::add(var_18, var_19);
var_21 = df::load(var_materials, var_20);
var_22 = df::load(var_rigid_x, var_1);
var_23 = df::load(var_rigid_r, var_1);
var_24 = df::load(var_rigid_v, var_1);
var_25 = df::load(var_rigid_w, var_1);
var_28 = df::float3(var_26, var_27, var_26);
var_29 = df::rotate(var_23, var_2);
var_30 = df::add(var_22, var_29);
var_31 = df::mul(var_28, var_3);
var_32 = df::sub(var_30, var_31);
var_33 = df::sub(var_32, var_22);
var_34 = df::cross(var_25, var_33);
var_35 = df::add(var_24, var_34);
var_36 = df::dot(var_28, var_32);
var_37 = df::min(var_36, var_26);
var_38 = df::dot(var_28, var_35);
var_39 = df::mul(var_28, var_38);
var_40 = df::sub(var_35, var_39);
var_41 = df::mul(var_37, var_9);
var_42 = df::min(var_38, var_26);
var_43 = df::mul(var_42, var_13);
var_44 = df::step(var_37);
var_45 = df::mul(var_43, var_44);
var_46 = df::add(var_41, var_45);
var_47 = df::mul(var_21, var_46);
var_48 = df::sub(var_26, var_47);
var_49 = df::float3(var_17, var_26, var_26);
var_50 = df::dot(var_49, var_40);
var_51 = df::clamp(var_50, var_47, var_48);
var_52 = df::float3(var_26, var_26, var_17);
var_53 = df::dot(var_52, var_40);
var_54 = df::clamp(var_53, var_47, var_48);
var_55 = df::float3(var_51, var_26, var_54);
var_56 = df::step(var_37);
var_57 = df::mul(var_55, var_56);
var_58 = df::add(var_41, var_45);
var_59 = df::mul(var_28, var_58);
var_60 = df::add(var_59, var_57);
var_61 = df::cross(var_33, var_60);
df::atomic_sub(var_rigid_f, var_1, var_60);
df::atomic_sub(var_rigid_t, var_1, var_61);
//---------
// reverse
df::adj_atomic_sub(var_rigid_t, var_1, var_61, adj_rigid_t, adj_1, adj_61);
df::adj_atomic_sub(var_rigid_f, var_1, var_60, adj_rigid_f, adj_1, adj_60);
df::adj_cross(var_33, var_60, adj_33, adj_60, adj_61);
df::adj_add(var_59, var_57, adj_59, adj_57, adj_60);
df::adj_mul(var_28, var_58, adj_28, adj_58, adj_59);
df::adj_add(var_41, var_45, adj_41, adj_45, adj_58);
df::adj_mul(var_55, var_56, adj_55, adj_56, adj_57);
df::adj_step(var_37, adj_37, adj_56);
df::adj_float3(var_51, var_26, var_54, adj_51, adj_26, adj_54, adj_55);
df::adj_clamp(var_53, var_47, var_48, adj_53, adj_47, adj_48, adj_54);
df::adj_dot(var_52, var_40, adj_52, adj_40, adj_53);
df::adj_float3(var_26, var_26, var_17, adj_26, adj_26, adj_17, adj_52);
df::adj_clamp(var_50, var_47, var_48, adj_50, adj_47, adj_48, adj_51);
df::adj_dot(var_49, var_40, adj_49, adj_40, adj_50);
df::adj_float3(var_17, var_26, var_26, adj_17, adj_26, adj_26, adj_49);
df::adj_sub(var_26, var_47, adj_26, adj_47, adj_48);
df::adj_mul(var_21, var_46, adj_21, adj_46, adj_47);
df::adj_add(var_41, var_45, adj_41, adj_45, adj_46);
df::adj_mul(var_43, var_44, adj_43, adj_44, adj_45);
df::adj_step(var_37, adj_37, adj_44);
df::adj_mul(var_42, var_13, adj_42, adj_13, adj_43);
df::adj_min(var_38, var_26, adj_38, adj_26, adj_42);
df::adj_mul(var_37, var_9, adj_37, adj_9, adj_41);
df::adj_sub(var_35, var_39, adj_35, adj_39, adj_40);
df::adj_mul(var_28, var_38, adj_28, adj_38, adj_39);
df::adj_dot(var_28, var_35, adj_28, adj_35, adj_38);
df::adj_min(var_36, var_26, adj_36, adj_26, adj_37);
df::adj_dot(var_28, var_32, adj_28, adj_32, adj_36);
df::adj_add(var_24, var_34, adj_24, adj_34, adj_35);
df::adj_cross(var_25, var_33, adj_25, adj_33, adj_34);
df::adj_sub(var_32, var_22, adj_32, adj_22, adj_33);
df::adj_sub(var_30, var_31, adj_30, adj_31, adj_32);
df::adj_mul(var_28, var_3, adj_28, adj_3, adj_31);
df::adj_add(var_22, var_29, adj_22, adj_29, adj_30);
df::adj_rotate(var_23, var_2, adj_23, adj_2, adj_29);
df::adj_float3(var_26, var_27, var_26, adj_26, adj_27, adj_26, adj_28);
df::adj_load(var_rigid_w, var_1, adj_rigid_w, adj_1, adj_25);
df::adj_load(var_rigid_v, var_1, adj_rigid_v, adj_1, adj_24);
df::adj_load(var_rigid_r, var_1, adj_rigid_r, adj_1, adj_23);
df::adj_load(var_rigid_x, var_1, adj_rigid_x, adj_1, adj_22);
df::adj_load(var_materials, var_20, adj_materials, adj_20, adj_21);
df::adj_add(var_18, var_19, adj_18, adj_19, adj_20);
df::adj_mul(var_4, var_5, adj_4, adj_5, adj_18);
df::adj_load(var_materials, var_16, adj_materials, adj_16, adj_17);
df::adj_add(var_14, var_15, adj_14, adj_15, adj_16);
df::adj_mul(var_4, var_5, adj_4, adj_5, adj_14);
df::adj_load(var_materials, var_12, adj_materials, adj_12, adj_13);
df::adj_add(var_10, var_11, adj_10, adj_11, adj_12);
df::adj_mul(var_4, var_5, adj_4, adj_5, adj_10);
df::adj_load(var_materials, var_8, adj_materials, adj_8, adj_9);
df::adj_add(var_6, var_7, adj_6, adj_7, adj_8);
df::adj_mul(var_4, var_5, adj_4, adj_5, adj_6);
df::adj_load(var_contact_mat, var_0, adj_contact_mat, adj_0, adj_4);
df::adj_load(var_contact_dist, var_0, adj_contact_dist, adj_0, adj_3);
df::adj_load(var_contact_point, var_0, adj_contact_point, adj_0, adj_2);
df::adj_load(var_contact_body, var_0, adj_contact_body, adj_0, adj_1);
return;
}
// Python entry points
void eval_rigid_contacts_cpu_forward(int dim,
torch::Tensor var_rigid_x,
torch::Tensor var_rigid_r,
torch::Tensor var_rigid_v,
torch::Tensor var_rigid_w,
torch::Tensor var_contact_body,
torch::Tensor var_contact_point,
torch::Tensor var_contact_dist,
torch::Tensor var_contact_mat,
torch::Tensor var_materials,
torch::Tensor var_rigid_f,
torch::Tensor var_rigid_t)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_rigid_contacts_cpu_kernel_forward(
cast<df::float3*>(var_rigid_x),
cast<quat*>(var_rigid_r),
cast<df::float3*>(var_rigid_v),
cast<df::float3*>(var_rigid_w),
cast<int*>(var_contact_body),
cast<df::float3*>(var_contact_point),
cast<float*>(var_contact_dist),
cast<int*>(var_contact_mat),
cast<float*>(var_materials),
cast<df::float3*>(var_rigid_f),
cast<df::float3*>(var_rigid_t));
}
}
void eval_rigid_contacts_cpu_backward(int dim,
torch::Tensor var_rigid_x,
torch::Tensor var_rigid_r,
torch::Tensor var_rigid_v,
torch::Tensor var_rigid_w,
torch::Tensor var_contact_body,
torch::Tensor var_contact_point,
torch::Tensor var_contact_dist,
torch::Tensor var_contact_mat,
torch::Tensor var_materials,
torch::Tensor var_rigid_f,
torch::Tensor var_rigid_t,
torch::Tensor adj_rigid_x,
torch::Tensor adj_rigid_r,
torch::Tensor adj_rigid_v,
torch::Tensor adj_rigid_w,
torch::Tensor adj_contact_body,
torch::Tensor adj_contact_point,
torch::Tensor adj_contact_dist,
torch::Tensor adj_contact_mat,
torch::Tensor adj_materials,
torch::Tensor adj_rigid_f,
torch::Tensor adj_rigid_t)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_rigid_contacts_cpu_kernel_backward(
cast<df::float3*>(var_rigid_x),
cast<quat*>(var_rigid_r),
cast<df::float3*>(var_rigid_v),
cast<df::float3*>(var_rigid_w),
cast<int*>(var_contact_body),
cast<df::float3*>(var_contact_point),
cast<float*>(var_contact_dist),
cast<int*>(var_contact_mat),
cast<float*>(var_materials),
cast<df::float3*>(var_rigid_f),
cast<df::float3*>(var_rigid_t),
cast<df::float3*>(adj_rigid_x),
cast<quat*>(adj_rigid_r),
cast<df::float3*>(adj_rigid_v),
cast<df::float3*>(adj_rigid_w),
cast<int*>(adj_contact_body),
cast<df::float3*>(adj_contact_point),
cast<float*>(adj_contact_dist),
cast<int*>(adj_contact_mat),
cast<float*>(adj_materials),
cast<df::float3*>(adj_rigid_f),
cast<df::float3*>(adj_rigid_t));
}
}
// Python entry points
void eval_rigid_contacts_cpu_forward(int dim,
torch::Tensor var_rigid_x,
torch::Tensor var_rigid_r,
torch::Tensor var_rigid_v,
torch::Tensor var_rigid_w,
torch::Tensor var_contact_body,
torch::Tensor var_contact_point,
torch::Tensor var_contact_dist,
torch::Tensor var_contact_mat,
torch::Tensor var_materials,
torch::Tensor var_rigid_f,
torch::Tensor var_rigid_t);
void eval_rigid_contacts_cpu_backward(int dim,
torch::Tensor var_rigid_x,
torch::Tensor var_rigid_r,
torch::Tensor var_rigid_v,
torch::Tensor var_rigid_w,
torch::Tensor var_contact_body,
torch::Tensor var_contact_point,
torch::Tensor var_contact_dist,
torch::Tensor var_contact_mat,
torch::Tensor var_materials,
torch::Tensor var_rigid_f,
torch::Tensor var_rigid_t,
torch::Tensor adj_rigid_x,
torch::Tensor adj_rigid_r,
torch::Tensor adj_rigid_v,
torch::Tensor adj_rigid_w,
torch::Tensor adj_contact_body,
torch::Tensor adj_contact_point,
torch::Tensor adj_contact_dist,
torch::Tensor adj_contact_mat,
torch::Tensor adj_materials,
torch::Tensor adj_rigid_f,
torch::Tensor adj_rigid_t);
void eval_rigid_contacts_art_cpu_kernel_forward(
spatial_transform* var_body_X_s,
spatial_vector* var_body_v_s,
int* var_contact_body,
df::float3* var_contact_point,
float* var_contact_dist,
int* var_contact_mat,
float* var_materials,
spatial_vector* var_body_f_s)
{
//---------
// primal vars
int var_0;
int var_1;
df::float3 var_2;
float var_3;
int var_4;
const int var_5 = 4;
int var_6;
const int var_7 = 0;
int var_8;
float var_9;
int var_10;
const int var_11 = 1;
int var_12;
float var_13;
int var_14;
const int var_15 = 2;
int var_16;
float var_17;
int var_18;
const int var_19 = 3;
int var_20;
float var_21;
spatial_transform var_22;
spatial_vector var_23;
const float var_24 = 0.0;
const float var_25 = 1.0;
df::float3 var_26;
df::float3 var_27;
df::float3 var_28;
df::float3 var_29;
df::float3 var_30;
df::float3 var_31;
df::float3 var_32;
df::float3 var_33;
float var_34;
bool var_35;
float var_36;
df::float3 var_37;
df::float3 var_38;
float var_39;
float var_40;
float var_41;
float var_42;
float var_43;
float var_44;
float var_45;
float var_46;
float var_47;
float var_48;
df::float3 var_49;
float var_50;
float var_51;
df::float3 var_52;
float var_53;
float var_54;
df::float3 var_55;
float var_56;
float var_57;
float var_58;
float var_59;
float var_60;
float var_61;
df::float3 var_62;
float var_63;
df::float3 var_64;
float var_65;
df::float3 var_66;
df::float3 var_67;
df::float3 var_68;
spatial_vector var_69;
//---------
// forward
var_0 = df::tid();
var_1 = df::load(var_contact_body, var_0);
var_2 = df::load(var_contact_point, var_0);
var_3 = df::load(var_contact_dist, var_0);
var_4 = df::load(var_contact_mat, var_0);
var_6 = df::mul(var_4, var_5);
var_8 = df::add(var_6, var_7);
var_9 = df::load(var_materials, var_8);
var_10 = df::mul(var_4, var_5);
var_12 = df::add(var_10, var_11);
var_13 = df::load(var_materials, var_12);
var_14 = df::mul(var_4, var_5);
var_16 = df::add(var_14, var_15);
var_17 = df::load(var_materials, var_16);
var_18 = df::mul(var_4, var_5);
var_20 = df::add(var_18, var_19);
var_21 = df::load(var_materials, var_20);
var_22 = df::load(var_body_X_s, var_1);
var_23 = df::load(var_body_v_s, var_1);
var_26 = df::float3(var_24, var_25, var_24);
var_27 = df::spatial_transform_point(var_22, var_2);
var_28 = df::mul(var_26, var_3);
var_29 = df::sub(var_27, var_28);
var_30 = df::spatial_top(var_23);
var_31 = df::spatial_bottom(var_23);
var_32 = df::cross(var_30, var_29);
var_33 = df::add(var_31, var_32);
var_34 = df::dot(var_26, var_29);
var_35 = (var_34 >= var_24);
if (var_35) {
return;
}
var_36 = df::dot(var_26, var_33);
var_37 = df::mul(var_26, var_36);
var_38 = df::sub(var_33, var_37);
var_39 = df::mul(var_34, var_9);
var_40 = df::min(var_36, var_24);
var_41 = df::mul(var_40, var_13);
var_42 = df::step(var_34);
var_43 = df::mul(var_41, var_42);
var_44 = df::sub(var_24, var_34);
var_45 = df::mul(var_43, var_44);
var_46 = df::add(var_39, var_45);
var_47 = df::mul(var_21, var_46);
var_48 = df::sub(var_24, var_47);
var_49 = df::float3(var_17, var_24, var_24);
var_50 = df::dot(var_49, var_38);
var_51 = df::clamp(var_50, var_47, var_48);
var_52 = df::float3(var_24, var_24, var_17);
var_53 = df::dot(var_52, var_38);
var_54 = df::clamp(var_53, var_47, var_48);
var_55 = df::normalize(var_38);
var_56 = df::length(var_38);
var_57 = df::mul(var_17, var_56);
var_58 = df::mul(var_21, var_34);
var_59 = df::mul(var_58, var_9);
var_60 = df::sub(var_24, var_59);
var_61 = df::min(var_57, var_60);
var_62 = df::mul(var_55, var_61);
var_63 = df::step(var_34);
var_64 = df::mul(var_62, var_63);
var_65 = df::add(var_39, var_45);
var_66 = df::mul(var_26, var_65);
var_67 = df::add(var_66, var_64);
var_68 = df::cross(var_29, var_67);
var_69 = df::spatial_vector(var_68, var_67);
df::atomic_add(var_body_f_s, var_1, var_69);
}
void eval_rigid_contacts_art_cpu_kernel_backward(
spatial_transform* var_body_X_s,
spatial_vector* var_body_v_s,
int* var_contact_body,
df::float3* var_contact_point,
float* var_contact_dist,
int* var_contact_mat,
float* var_materials,
spatial_vector* var_body_f_s,
spatial_transform* adj_body_X_s,
spatial_vector* adj_body_v_s,
int* adj_contact_body,
df::float3* adj_contact_point,
float* adj_contact_dist,
int* adj_contact_mat,
float* adj_materials,
spatial_vector* adj_body_f_s)
{
//---------
// primal vars
int var_0;
int var_1;
df::float3 var_2;
float var_3;
int var_4;
const int var_5 = 4;
int var_6;
const int var_7 = 0;
int var_8;
float var_9;
int var_10;
const int var_11 = 1;
int var_12;
float var_13;
int var_14;
const int var_15 = 2;
int var_16;
float var_17;
int var_18;
const int var_19 = 3;
int var_20;
float var_21;
spatial_transform var_22;
spatial_vector var_23;
const float var_24 = 0.0;
const float var_25 = 1.0;
df::float3 var_26;
df::float3 var_27;
df::float3 var_28;
df::float3 var_29;
df::float3 var_30;
df::float3 var_31;
df::float3 var_32;
df::float3 var_33;
float var_34;
bool var_35;
float var_36;
df::float3 var_37;
df::float3 var_38;
float var_39;
float var_40;
float var_41;
float var_42;
float var_43;
float var_44;
float var_45;
float var_46;
float var_47;
float var_48;
df::float3 var_49;
float var_50;
float var_51;
df::float3 var_52;
float var_53;
float var_54;
df::float3 var_55;
float var_56;
float var_57;
float var_58;
float var_59;
float var_60;
float var_61;
df::float3 var_62;
float var_63;
df::float3 var_64;
float var_65;
df::float3 var_66;
df::float3 var_67;
df::float3 var_68;
spatial_vector var_69;
//---------
// dual vars
int adj_0 = 0;
int adj_1 = 0;
df::float3 adj_2 = 0;
float adj_3 = 0;
int adj_4 = 0;
int adj_5 = 0;
int adj_6 = 0;
int adj_7 = 0;
int adj_8 = 0;
float adj_9 = 0;
int adj_10 = 0;
int adj_11 = 0;
int adj_12 = 0;
float adj_13 = 0;
int adj_14 = 0;
int adj_15 = 0;
int adj_16 = 0;
float adj_17 = 0;
int adj_18 = 0;
int adj_19 = 0;
int adj_20 = 0;
float adj_21 = 0;
spatial_transform adj_22 = 0;
spatial_vector adj_23 = 0;
float adj_24 = 0;
float adj_25 = 0;
df::float3 adj_26 = 0;
df::float3 adj_27 = 0;
df::float3 adj_28 = 0;
df::float3 adj_29 = 0;
df::float3 adj_30 = 0;
df::float3 adj_31 = 0;
df::float3 adj_32 = 0;
df::float3 adj_33 = 0;
float adj_34 = 0;
bool adj_35 = 0;
float adj_36 = 0;
df::float3 adj_37 = 0;
df::float3 adj_38 = 0;
float adj_39 = 0;
float adj_40 = 0;
float adj_41 = 0;
float adj_42 = 0;
float adj_43 = 0;
float adj_44 = 0;
float adj_45 = 0;
float adj_46 = 0;
float adj_47 = 0;
float adj_48 = 0;
df::float3 adj_49 = 0;
float adj_50 = 0;
float adj_51 = 0;
df::float3 adj_52 = 0;
float adj_53 = 0;
float adj_54 = 0;
df::float3 adj_55 = 0;
float adj_56 = 0;
float adj_57 = 0;
float adj_58 = 0;
float adj_59 = 0;
float adj_60 = 0;
float adj_61 = 0;
df::float3 adj_62 = 0;
float adj_63 = 0;
df::float3 adj_64 = 0;
float adj_65 = 0;
df::float3 adj_66 = 0;
df::float3 adj_67 = 0;
df::float3 adj_68 = 0;
spatial_vector adj_69 = 0;
//---------
// forward
var_0 = df::tid();
var_1 = df::load(var_contact_body, var_0);
var_2 = df::load(var_contact_point, var_0);
var_3 = df::load(var_contact_dist, var_0);
var_4 = df::load(var_contact_mat, var_0);
var_6 = df::mul(var_4, var_5);
var_8 = df::add(var_6, var_7);
var_9 = df::load(var_materials, var_8);
var_10 = df::mul(var_4, var_5);
var_12 = df::add(var_10, var_11);
var_13 = df::load(var_materials, var_12);
var_14 = df::mul(var_4, var_5);
var_16 = df::add(var_14, var_15);
var_17 = df::load(var_materials, var_16);
var_18 = df::mul(var_4, var_5);
var_20 = df::add(var_18, var_19);
var_21 = df::load(var_materials, var_20);
var_22 = df::load(var_body_X_s, var_1);
var_23 = df::load(var_body_v_s, var_1);
var_26 = df::float3(var_24, var_25, var_24);
var_27 = df::spatial_transform_point(var_22, var_2);
var_28 = df::mul(var_26, var_3);
var_29 = df::sub(var_27, var_28);
var_30 = df::spatial_top(var_23);
var_31 = df::spatial_bottom(var_23);
var_32 = df::cross(var_30, var_29);
var_33 = df::add(var_31, var_32);
var_34 = df::dot(var_26, var_29);
var_35 = (var_34 >= var_24);
if (var_35) {
goto label0;
}
var_36 = df::dot(var_26, var_33);
var_37 = df::mul(var_26, var_36);
var_38 = df::sub(var_33, var_37);
var_39 = df::mul(var_34, var_9);
var_40 = df::min(var_36, var_24);
var_41 = df::mul(var_40, var_13);
var_42 = df::step(var_34);
var_43 = df::mul(var_41, var_42);
var_44 = df::sub(var_24, var_34);
var_45 = df::mul(var_43, var_44);
var_46 = df::add(var_39, var_45);
var_47 = df::mul(var_21, var_46);
var_48 = df::sub(var_24, var_47);
var_49 = df::float3(var_17, var_24, var_24);
var_50 = df::dot(var_49, var_38);
var_51 = df::clamp(var_50, var_47, var_48);
var_52 = df::float3(var_24, var_24, var_17);
var_53 = df::dot(var_52, var_38);
var_54 = df::clamp(var_53, var_47, var_48);
var_55 = df::normalize(var_38);
var_56 = df::length(var_38);
var_57 = df::mul(var_17, var_56);
var_58 = df::mul(var_21, var_34);
var_59 = df::mul(var_58, var_9);
var_60 = df::sub(var_24, var_59);
var_61 = df::min(var_57, var_60);
var_62 = df::mul(var_55, var_61);
var_63 = df::step(var_34);
var_64 = df::mul(var_62, var_63);
var_65 = df::add(var_39, var_45);
var_66 = df::mul(var_26, var_65);
var_67 = df::add(var_66, var_64);
var_68 = df::cross(var_29, var_67);
var_69 = df::spatial_vector(var_68, var_67);
df::atomic_add(var_body_f_s, var_1, var_69);
//---------
// reverse
df::adj_atomic_add(var_body_f_s, var_1, var_69, adj_body_f_s, adj_1, adj_69);
df::adj_spatial_vector(var_68, var_67, adj_68, adj_67, adj_69);
df::adj_cross(var_29, var_67, adj_29, adj_67, adj_68);
df::adj_add(var_66, var_64, adj_66, adj_64, adj_67);
df::adj_mul(var_26, var_65, adj_26, adj_65, adj_66);
df::adj_add(var_39, var_45, adj_39, adj_45, adj_65);
df::adj_mul(var_62, var_63, adj_62, adj_63, adj_64);
df::adj_step(var_34, adj_34, adj_63);
df::adj_mul(var_55, var_61, adj_55, adj_61, adj_62);
df::adj_min(var_57, var_60, adj_57, adj_60, adj_61);
df::adj_sub(var_24, var_59, adj_24, adj_59, adj_60);
df::adj_mul(var_58, var_9, adj_58, adj_9, adj_59);
df::adj_mul(var_21, var_34, adj_21, adj_34, adj_58);
df::adj_mul(var_17, var_56, adj_17, adj_56, adj_57);
df::adj_length(var_38, adj_38, adj_56);
df::adj_normalize(var_38, adj_38, adj_55);
df::adj_clamp(var_53, var_47, var_48, adj_53, adj_47, adj_48, adj_54);
df::adj_dot(var_52, var_38, adj_52, adj_38, adj_53);
df::adj_float3(var_24, var_24, var_17, adj_24, adj_24, adj_17, adj_52);
df::adj_clamp(var_50, var_47, var_48, adj_50, adj_47, adj_48, adj_51);
df::adj_dot(var_49, var_38, adj_49, adj_38, adj_50);
df::adj_float3(var_17, var_24, var_24, adj_17, adj_24, adj_24, adj_49);
df::adj_sub(var_24, var_47, adj_24, adj_47, adj_48);
df::adj_mul(var_21, var_46, adj_21, adj_46, adj_47);
df::adj_add(var_39, var_45, adj_39, adj_45, adj_46);
df::adj_mul(var_43, var_44, adj_43, adj_44, adj_45);
df::adj_sub(var_24, var_34, adj_24, adj_34, adj_44);
df::adj_mul(var_41, var_42, adj_41, adj_42, adj_43);
df::adj_step(var_34, adj_34, adj_42);
df::adj_mul(var_40, var_13, adj_40, adj_13, adj_41);
df::adj_min(var_36, var_24, adj_36, adj_24, adj_40);
df::adj_mul(var_34, var_9, adj_34, adj_9, adj_39);
df::adj_sub(var_33, var_37, adj_33, adj_37, adj_38);
df::adj_mul(var_26, var_36, adj_26, adj_36, adj_37);
df::adj_dot(var_26, var_33, adj_26, adj_33, adj_36);
if (var_35) {
label0:;
}
df::adj_dot(var_26, var_29, adj_26, adj_29, adj_34);
df::adj_add(var_31, var_32, adj_31, adj_32, adj_33);
df::adj_cross(var_30, var_29, adj_30, adj_29, adj_32);
df::adj_spatial_bottom(var_23, adj_23, adj_31);
df::adj_spatial_top(var_23, adj_23, adj_30);
df::adj_sub(var_27, var_28, adj_27, adj_28, adj_29);
df::adj_mul(var_26, var_3, adj_26, adj_3, adj_28);
df::adj_spatial_transform_point(var_22, var_2, adj_22, adj_2, adj_27);
df::adj_float3(var_24, var_25, var_24, adj_24, adj_25, adj_24, adj_26);
df::adj_load(var_body_v_s, var_1, adj_body_v_s, adj_1, adj_23);
df::adj_load(var_body_X_s, var_1, adj_body_X_s, adj_1, adj_22);
df::adj_load(var_materials, var_20, adj_materials, adj_20, adj_21);
df::adj_add(var_18, var_19, adj_18, adj_19, adj_20);
df::adj_mul(var_4, var_5, adj_4, adj_5, adj_18);
df::adj_load(var_materials, var_16, adj_materials, adj_16, adj_17);
df::adj_add(var_14, var_15, adj_14, adj_15, adj_16);
df::adj_mul(var_4, var_5, adj_4, adj_5, adj_14);
df::adj_load(var_materials, var_12, adj_materials, adj_12, adj_13);
df::adj_add(var_10, var_11, adj_10, adj_11, adj_12);
df::adj_mul(var_4, var_5, adj_4, adj_5, adj_10);
df::adj_load(var_materials, var_8, adj_materials, adj_8, adj_9);
df::adj_add(var_6, var_7, adj_6, adj_7, adj_8);
df::adj_mul(var_4, var_5, adj_4, adj_5, adj_6);
df::adj_load(var_contact_mat, var_0, adj_contact_mat, adj_0, adj_4);
df::adj_load(var_contact_dist, var_0, adj_contact_dist, adj_0, adj_3);
df::adj_load(var_contact_point, var_0, adj_contact_point, adj_0, adj_2);
df::adj_load(var_contact_body, var_0, adj_contact_body, adj_0, adj_1);
return;
}
// Python entry points
void eval_rigid_contacts_art_cpu_forward(int dim,
torch::Tensor var_body_X_s,
torch::Tensor var_body_v_s,
torch::Tensor var_contact_body,
torch::Tensor var_contact_point,
torch::Tensor var_contact_dist,
torch::Tensor var_contact_mat,
torch::Tensor var_materials,
torch::Tensor var_body_f_s)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_rigid_contacts_art_cpu_kernel_forward(
cast<spatial_transform*>(var_body_X_s),
cast<spatial_vector*>(var_body_v_s),
cast<int*>(var_contact_body),
cast<df::float3*>(var_contact_point),
cast<float*>(var_contact_dist),
cast<int*>(var_contact_mat),
cast<float*>(var_materials),
cast<spatial_vector*>(var_body_f_s));
}
}
void eval_rigid_contacts_art_cpu_backward(int dim,
torch::Tensor var_body_X_s,
torch::Tensor var_body_v_s,
torch::Tensor var_contact_body,
torch::Tensor var_contact_point,
torch::Tensor var_contact_dist,
torch::Tensor var_contact_mat,
torch::Tensor var_materials,
torch::Tensor var_body_f_s,
torch::Tensor adj_body_X_s,
torch::Tensor adj_body_v_s,
torch::Tensor adj_contact_body,
torch::Tensor adj_contact_point,
torch::Tensor adj_contact_dist,
torch::Tensor adj_contact_mat,
torch::Tensor adj_materials,
torch::Tensor adj_body_f_s)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_rigid_contacts_art_cpu_kernel_backward(
cast<spatial_transform*>(var_body_X_s),
cast<spatial_vector*>(var_body_v_s),
cast<int*>(var_contact_body),
cast<df::float3*>(var_contact_point),
cast<float*>(var_contact_dist),
cast<int*>(var_contact_mat),
cast<float*>(var_materials),
cast<spatial_vector*>(var_body_f_s),
cast<spatial_transform*>(adj_body_X_s),
cast<spatial_vector*>(adj_body_v_s),
cast<int*>(adj_contact_body),
cast<df::float3*>(adj_contact_point),
cast<float*>(adj_contact_dist),
cast<int*>(adj_contact_mat),
cast<float*>(adj_materials),
cast<spatial_vector*>(adj_body_f_s));
}
}
// Python entry points
void eval_rigid_contacts_art_cpu_forward(int dim,
torch::Tensor var_body_X_s,
torch::Tensor var_body_v_s,
torch::Tensor var_contact_body,
torch::Tensor var_contact_point,
torch::Tensor var_contact_dist,
torch::Tensor var_contact_mat,
torch::Tensor var_materials,
torch::Tensor var_body_f_s);
void eval_rigid_contacts_art_cpu_backward(int dim,
torch::Tensor var_body_X_s,
torch::Tensor var_body_v_s,
torch::Tensor var_contact_body,
torch::Tensor var_contact_point,
torch::Tensor var_contact_dist,
torch::Tensor var_contact_mat,
torch::Tensor var_materials,
torch::Tensor var_body_f_s,
torch::Tensor adj_body_X_s,
torch::Tensor adj_body_v_s,
torch::Tensor adj_contact_body,
torch::Tensor adj_contact_point,
torch::Tensor adj_contact_dist,
torch::Tensor adj_contact_mat,
torch::Tensor adj_materials,
torch::Tensor adj_body_f_s);
void eval_muscles_cpu_kernel_forward(
spatial_transform* var_body_X_s,
spatial_vector* var_body_v_s,
int* var_muscle_start,
float* var_muscle_params,
int* var_muscle_links,
df::float3* var_muscle_points,
float* var_muscle_activation,
spatial_vector* var_body_f_s)
{
//---------
// primal vars
int var_0;
int var_1;
const int var_2 = 1;
int var_3;
int var_4;
int var_5;
float var_6;
int var_7;
int var_8;
//---------
// forward
var_0 = df::tid();
var_1 = df::load(var_muscle_start, var_0);
var_3 = df::add(var_0, var_2);
var_4 = df::load(var_muscle_start, var_3);
var_5 = df::sub(var_4, var_2);
var_6 = df::load(var_muscle_activation, var_0);
for (var_7=var_1; var_7 < var_5; ++var_7) {
var_8 = compute_muscle_force_cpu_func(var_7, var_body_X_s, var_body_v_s, var_muscle_links, var_muscle_points, var_6, var_body_f_s);
}
}
void eval_muscles_cpu_kernel_backward(
spatial_transform* var_body_X_s,
spatial_vector* var_body_v_s,
int* var_muscle_start,
float* var_muscle_params,
int* var_muscle_links,
df::float3* var_muscle_points,
float* var_muscle_activation,
spatial_vector* var_body_f_s,
spatial_transform* adj_body_X_s,
spatial_vector* adj_body_v_s,
int* adj_muscle_start,
float* adj_muscle_params,
int* adj_muscle_links,
df::float3* adj_muscle_points,
float* adj_muscle_activation,
spatial_vector* adj_body_f_s)
{
//---------
// primal vars
int var_0;
int var_1;
const int var_2 = 1;
int var_3;
int var_4;
int var_5;
float var_6;
int var_7;
int var_8;
//---------
// dual vars
int adj_0 = 0;
int adj_1 = 0;
int adj_2 = 0;
int adj_3 = 0;
int adj_4 = 0;
int adj_5 = 0;
float adj_6 = 0;
int adj_7 = 0;
int adj_8 = 0;
//---------
// forward
var_0 = df::tid();
var_1 = df::load(var_muscle_start, var_0);
var_3 = df::add(var_0, var_2);
var_4 = df::load(var_muscle_start, var_3);
var_5 = df::sub(var_4, var_2);
var_6 = df::load(var_muscle_activation, var_0);
if (false) {
var_8 = compute_muscle_force_cpu_func(var_7, var_body_X_s, var_body_v_s, var_muscle_links, var_muscle_points, var_6, var_body_f_s);
}
//---------
// reverse
for (var_7=var_5-1; var_7 >= var_1; --var_7) {
adj_compute_muscle_force_cpu_func(var_7, var_body_X_s, var_body_v_s, var_muscle_links, var_muscle_points, var_6, var_body_f_s, adj_7, adj_body_X_s, adj_body_v_s, adj_muscle_links, adj_muscle_points, adj_6, adj_body_f_s, adj_8);
}
df::adj_load(var_muscle_activation, var_0, adj_muscle_activation, adj_0, adj_6);
df::adj_sub(var_4, var_2, adj_4, adj_2, adj_5);
df::adj_load(var_muscle_start, var_3, adj_muscle_start, adj_3, adj_4);
df::adj_add(var_0, var_2, adj_0, adj_2, adj_3);
df::adj_load(var_muscle_start, var_0, adj_muscle_start, adj_0, adj_1);
return;
}
// Python entry points
void eval_muscles_cpu_forward(int dim,
torch::Tensor var_body_X_s,
torch::Tensor var_body_v_s,
torch::Tensor var_muscle_start,
torch::Tensor var_muscle_params,
torch::Tensor var_muscle_links,
torch::Tensor var_muscle_points,
torch::Tensor var_muscle_activation,
torch::Tensor var_body_f_s)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_muscles_cpu_kernel_forward(
cast<spatial_transform*>(var_body_X_s),
cast<spatial_vector*>(var_body_v_s),
cast<int*>(var_muscle_start),
cast<float*>(var_muscle_params),
cast<int*>(var_muscle_links),
cast<df::float3*>(var_muscle_points),
cast<float*>(var_muscle_activation),
cast<spatial_vector*>(var_body_f_s));
}
}
void eval_muscles_cpu_backward(int dim,
torch::Tensor var_body_X_s,
torch::Tensor var_body_v_s,
torch::Tensor var_muscle_start,
torch::Tensor var_muscle_params,
torch::Tensor var_muscle_links,
torch::Tensor var_muscle_points,
torch::Tensor var_muscle_activation,
torch::Tensor var_body_f_s,
torch::Tensor adj_body_X_s,
torch::Tensor adj_body_v_s,
torch::Tensor adj_muscle_start,
torch::Tensor adj_muscle_params,
torch::Tensor adj_muscle_links,
torch::Tensor adj_muscle_points,
torch::Tensor adj_muscle_activation,
torch::Tensor adj_body_f_s)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_muscles_cpu_kernel_backward(
cast<spatial_transform*>(var_body_X_s),
cast<spatial_vector*>(var_body_v_s),
cast<int*>(var_muscle_start),
cast<float*>(var_muscle_params),
cast<int*>(var_muscle_links),
cast<df::float3*>(var_muscle_points),
cast<float*>(var_muscle_activation),
cast<spatial_vector*>(var_body_f_s),
cast<spatial_transform*>(adj_body_X_s),
cast<spatial_vector*>(adj_body_v_s),
cast<int*>(adj_muscle_start),
cast<float*>(adj_muscle_params),
cast<int*>(adj_muscle_links),
cast<df::float3*>(adj_muscle_points),
cast<float*>(adj_muscle_activation),
cast<spatial_vector*>(adj_body_f_s));
}
}
// Python entry points
void eval_muscles_cpu_forward(int dim,
torch::Tensor var_body_X_s,
torch::Tensor var_body_v_s,
torch::Tensor var_muscle_start,
torch::Tensor var_muscle_params,
torch::Tensor var_muscle_links,
torch::Tensor var_muscle_points,
torch::Tensor var_muscle_activation,
torch::Tensor var_body_f_s);
void eval_muscles_cpu_backward(int dim,
torch::Tensor var_body_X_s,
torch::Tensor var_body_v_s,
torch::Tensor var_muscle_start,
torch::Tensor var_muscle_params,
torch::Tensor var_muscle_links,
torch::Tensor var_muscle_points,
torch::Tensor var_muscle_activation,
torch::Tensor var_body_f_s,
torch::Tensor adj_body_X_s,
torch::Tensor adj_body_v_s,
torch::Tensor adj_muscle_start,
torch::Tensor adj_muscle_params,
torch::Tensor adj_muscle_links,
torch::Tensor adj_muscle_points,
torch::Tensor adj_muscle_activation,
torch::Tensor adj_body_f_s);
void eval_rigid_fk_cpu_kernel_forward(
int* var_articulation_start,
int* var_joint_type,
int* var_joint_parent,
int* var_joint_q_start,
int* var_joint_qd_start,
float* var_joint_q,
spatial_transform* var_joint_X_pj,
spatial_transform* var_joint_X_cm,
df::float3* var_joint_axis,
spatial_transform* var_body_X_sc,
spatial_transform* var_body_X_sm)
{
//---------
// primal vars
int var_0;
int var_1;
const int var_2 = 1;
int var_3;
int var_4;
int var_5;
int var_6;
//---------
// forward
var_0 = df::tid();
var_1 = df::load(var_articulation_start, var_0);
var_3 = df::add(var_0, var_2);
var_4 = df::load(var_articulation_start, var_3);
for (var_5=var_1; var_5 < var_4; ++var_5) {
var_6 = compute_link_transform_cpu_func(var_5, var_joint_type, var_joint_parent, var_joint_q_start, var_joint_qd_start, var_joint_q, var_joint_X_pj, var_joint_X_cm, var_joint_axis, var_body_X_sc, var_body_X_sm);
}
}
void eval_rigid_fk_cpu_kernel_backward(
int* var_articulation_start,
int* var_joint_type,
int* var_joint_parent,
int* var_joint_q_start,
int* var_joint_qd_start,
float* var_joint_q,
spatial_transform* var_joint_X_pj,
spatial_transform* var_joint_X_cm,
df::float3* var_joint_axis,
spatial_transform* var_body_X_sc,
spatial_transform* var_body_X_sm,
int* adj_articulation_start,
int* adj_joint_type,
int* adj_joint_parent,
int* adj_joint_q_start,
int* adj_joint_qd_start,
float* adj_joint_q,
spatial_transform* adj_joint_X_pj,
spatial_transform* adj_joint_X_cm,
df::float3* adj_joint_axis,
spatial_transform* adj_body_X_sc,
spatial_transform* adj_body_X_sm)
{
//---------
// primal vars
int var_0;
int var_1;
const int var_2 = 1;
int var_3;
int var_4;
int var_5;
int var_6;
//---------
// dual vars
int adj_0 = 0;
int adj_1 = 0;
int adj_2 = 0;
int adj_3 = 0;
int adj_4 = 0;
int adj_5 = 0;
int adj_6 = 0;
//---------
// forward
var_0 = df::tid();
var_1 = df::load(var_articulation_start, var_0);
var_3 = df::add(var_0, var_2);
var_4 = df::load(var_articulation_start, var_3);
if (false) {
var_6 = compute_link_transform_cpu_func(var_5, var_joint_type, var_joint_parent, var_joint_q_start, var_joint_qd_start, var_joint_q, var_joint_X_pj, var_joint_X_cm, var_joint_axis, var_body_X_sc, var_body_X_sm);
}
//---------
// reverse
for (var_5=var_4-1; var_5 >= var_1; --var_5) {
adj_compute_link_transform_cpu_func(var_5, var_joint_type, var_joint_parent, var_joint_q_start, var_joint_qd_start, var_joint_q, var_joint_X_pj, var_joint_X_cm, var_joint_axis, var_body_X_sc, var_body_X_sm, adj_5, adj_joint_type, adj_joint_parent, adj_joint_q_start, adj_joint_qd_start, adj_joint_q, adj_joint_X_pj, adj_joint_X_cm, adj_joint_axis, adj_body_X_sc, adj_body_X_sm, adj_6);
}
df::adj_load(var_articulation_start, var_3, adj_articulation_start, adj_3, adj_4);
df::adj_add(var_0, var_2, adj_0, adj_2, adj_3);
df::adj_load(var_articulation_start, var_0, adj_articulation_start, adj_0, adj_1);
return;
}
// Python entry points
void eval_rigid_fk_cpu_forward(int dim,
torch::Tensor var_articulation_start,
torch::Tensor var_joint_type,
torch::Tensor var_joint_parent,
torch::Tensor var_joint_q_start,
torch::Tensor var_joint_qd_start,
torch::Tensor var_joint_q,
torch::Tensor var_joint_X_pj,
torch::Tensor var_joint_X_cm,
torch::Tensor var_joint_axis,
torch::Tensor var_body_X_sc,
torch::Tensor var_body_X_sm)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_rigid_fk_cpu_kernel_forward(
cast<int*>(var_articulation_start),
cast<int*>(var_joint_type),
cast<int*>(var_joint_parent),
cast<int*>(var_joint_q_start),
cast<int*>(var_joint_qd_start),
cast<float*>(var_joint_q),
cast<spatial_transform*>(var_joint_X_pj),
cast<spatial_transform*>(var_joint_X_cm),
cast<df::float3*>(var_joint_axis),
cast<spatial_transform*>(var_body_X_sc),
cast<spatial_transform*>(var_body_X_sm));
}
}
void eval_rigid_fk_cpu_backward(int dim,
torch::Tensor var_articulation_start,
torch::Tensor var_joint_type,
torch::Tensor var_joint_parent,
torch::Tensor var_joint_q_start,
torch::Tensor var_joint_qd_start,
torch::Tensor var_joint_q,
torch::Tensor var_joint_X_pj,
torch::Tensor var_joint_X_cm,
torch::Tensor var_joint_axis,
torch::Tensor var_body_X_sc,
torch::Tensor var_body_X_sm,
torch::Tensor adj_articulation_start,
torch::Tensor adj_joint_type,
torch::Tensor adj_joint_parent,
torch::Tensor adj_joint_q_start,
torch::Tensor adj_joint_qd_start,
torch::Tensor adj_joint_q,
torch::Tensor adj_joint_X_pj,
torch::Tensor adj_joint_X_cm,
torch::Tensor adj_joint_axis,
torch::Tensor adj_body_X_sc,
torch::Tensor adj_body_X_sm)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_rigid_fk_cpu_kernel_backward(
cast<int*>(var_articulation_start),
cast<int*>(var_joint_type),
cast<int*>(var_joint_parent),
cast<int*>(var_joint_q_start),
cast<int*>(var_joint_qd_start),
cast<float*>(var_joint_q),
cast<spatial_transform*>(var_joint_X_pj),
cast<spatial_transform*>(var_joint_X_cm),
cast<df::float3*>(var_joint_axis),
cast<spatial_transform*>(var_body_X_sc),
cast<spatial_transform*>(var_body_X_sm),
cast<int*>(adj_articulation_start),
cast<int*>(adj_joint_type),
cast<int*>(adj_joint_parent),
cast<int*>(adj_joint_q_start),
cast<int*>(adj_joint_qd_start),
cast<float*>(adj_joint_q),
cast<spatial_transform*>(adj_joint_X_pj),
cast<spatial_transform*>(adj_joint_X_cm),
cast<df::float3*>(adj_joint_axis),
cast<spatial_transform*>(adj_body_X_sc),
cast<spatial_transform*>(adj_body_X_sm));
}
}
// Python entry points
void eval_rigid_fk_cpu_forward(int dim,
torch::Tensor var_articulation_start,
torch::Tensor var_joint_type,
torch::Tensor var_joint_parent,
torch::Tensor var_joint_q_start,
torch::Tensor var_joint_qd_start,
torch::Tensor var_joint_q,
torch::Tensor var_joint_X_pj,
torch::Tensor var_joint_X_cm,
torch::Tensor var_joint_axis,
torch::Tensor var_body_X_sc,
torch::Tensor var_body_X_sm);
void eval_rigid_fk_cpu_backward(int dim,
torch::Tensor var_articulation_start,
torch::Tensor var_joint_type,
torch::Tensor var_joint_parent,
torch::Tensor var_joint_q_start,
torch::Tensor var_joint_qd_start,
torch::Tensor var_joint_q,
torch::Tensor var_joint_X_pj,
torch::Tensor var_joint_X_cm,
torch::Tensor var_joint_axis,
torch::Tensor var_body_X_sc,
torch::Tensor var_body_X_sm,
torch::Tensor adj_articulation_start,
torch::Tensor adj_joint_type,
torch::Tensor adj_joint_parent,
torch::Tensor adj_joint_q_start,
torch::Tensor adj_joint_qd_start,
torch::Tensor adj_joint_q,
torch::Tensor adj_joint_X_pj,
torch::Tensor adj_joint_X_cm,
torch::Tensor adj_joint_axis,
torch::Tensor adj_body_X_sc,
torch::Tensor adj_body_X_sm);
void eval_rigid_id_cpu_kernel_forward(
int* var_articulation_start,
int* var_joint_type,
int* var_joint_parent,
int* var_joint_q_start,
int* var_joint_qd_start,
float* var_joint_q,
float* var_joint_qd,
df::float3* var_joint_axis,
float* var_joint_target_ke,
float* var_joint_target_kd,
spatial_matrix* var_body_I_m,
spatial_transform* var_body_X_sc,
spatial_transform* var_body_X_sm,
spatial_transform* var_joint_X_pj,
df::float3* var_gravity,
spatial_vector* var_joint_S_s,
spatial_matrix* var_body_I_s,
spatial_vector* var_body_v_s,
spatial_vector* var_body_f_s,
spatial_vector* var_body_a_s)
{
//---------
// primal vars
int var_0;
int var_1;
const int var_2 = 1;
int var_3;
int var_4;
int var_5;
int var_6;
int var_7;
//---------
// forward
var_0 = df::tid();
var_1 = df::load(var_articulation_start, var_0);
var_3 = df::add(var_0, var_2);
var_4 = df::load(var_articulation_start, var_3);
var_5 = df::sub(var_4, var_1);
for (var_6=var_1; var_6 < var_4; ++var_6) {
var_7 = compute_link_velocity_cpu_func(var_6, var_joint_type, var_joint_parent, var_joint_qd_start, var_joint_qd, var_joint_axis, var_body_I_m, var_body_X_sc, var_body_X_sm, var_joint_X_pj, var_gravity, var_joint_S_s, var_body_I_s, var_body_v_s, var_body_f_s, var_body_a_s);
}
}
void eval_rigid_id_cpu_kernel_backward(
int* var_articulation_start,
int* var_joint_type,
int* var_joint_parent,
int* var_joint_q_start,
int* var_joint_qd_start,
float* var_joint_q,
float* var_joint_qd,
df::float3* var_joint_axis,
float* var_joint_target_ke,
float* var_joint_target_kd,
spatial_matrix* var_body_I_m,
spatial_transform* var_body_X_sc,
spatial_transform* var_body_X_sm,
spatial_transform* var_joint_X_pj,
df::float3* var_gravity,
spatial_vector* var_joint_S_s,
spatial_matrix* var_body_I_s,
spatial_vector* var_body_v_s,
spatial_vector* var_body_f_s,
spatial_vector* var_body_a_s,
int* adj_articulation_start,
int* adj_joint_type,
int* adj_joint_parent,
int* adj_joint_q_start,
int* adj_joint_qd_start,
float* adj_joint_q,
float* adj_joint_qd,
df::float3* adj_joint_axis,
float* adj_joint_target_ke,
float* adj_joint_target_kd,
spatial_matrix* adj_body_I_m,
spatial_transform* adj_body_X_sc,
spatial_transform* adj_body_X_sm,
spatial_transform* adj_joint_X_pj,
df::float3* adj_gravity,
spatial_vector* adj_joint_S_s,
spatial_matrix* adj_body_I_s,
spatial_vector* adj_body_v_s,
spatial_vector* adj_body_f_s,
spatial_vector* adj_body_a_s)
{
//---------
// primal vars
int var_0;
int var_1;
const int var_2 = 1;
int var_3;
int var_4;
int var_5;
int var_6;
int var_7;
//---------
// dual vars
int adj_0 = 0;
int adj_1 = 0;
int adj_2 = 0;
int adj_3 = 0;
int adj_4 = 0;
int adj_5 = 0;
int adj_6 = 0;
int adj_7 = 0;
//---------
// forward
var_0 = df::tid();
var_1 = df::load(var_articulation_start, var_0);
var_3 = df::add(var_0, var_2);
var_4 = df::load(var_articulation_start, var_3);
var_5 = df::sub(var_4, var_1);
if (false) {
var_7 = compute_link_velocity_cpu_func(var_6, var_joint_type, var_joint_parent, var_joint_qd_start, var_joint_qd, var_joint_axis, var_body_I_m, var_body_X_sc, var_body_X_sm, var_joint_X_pj, var_gravity, var_joint_S_s, var_body_I_s, var_body_v_s, var_body_f_s, var_body_a_s);
}
//---------
// reverse
for (var_6=var_4-1; var_6 >= var_1; --var_6) {
adj_compute_link_velocity_cpu_func(var_6, var_joint_type, var_joint_parent, var_joint_qd_start, var_joint_qd, var_joint_axis, var_body_I_m, var_body_X_sc, var_body_X_sm, var_joint_X_pj, var_gravity, var_joint_S_s, var_body_I_s, var_body_v_s, var_body_f_s, var_body_a_s, adj_6, adj_joint_type, adj_joint_parent, adj_joint_qd_start, adj_joint_qd, adj_joint_axis, adj_body_I_m, adj_body_X_sc, adj_body_X_sm, adj_joint_X_pj, adj_gravity, adj_joint_S_s, adj_body_I_s, adj_body_v_s, adj_body_f_s, adj_body_a_s, adj_7);
}
df::adj_sub(var_4, var_1, adj_4, adj_1, adj_5);
df::adj_load(var_articulation_start, var_3, adj_articulation_start, adj_3, adj_4);
df::adj_add(var_0, var_2, adj_0, adj_2, adj_3);
df::adj_load(var_articulation_start, var_0, adj_articulation_start, adj_0, adj_1);
return;
}
// Python entry points
void eval_rigid_id_cpu_forward(int dim,
torch::Tensor var_articulation_start,
torch::Tensor var_joint_type,
torch::Tensor var_joint_parent,
torch::Tensor var_joint_q_start,
torch::Tensor var_joint_qd_start,
torch::Tensor var_joint_q,
torch::Tensor var_joint_qd,
torch::Tensor var_joint_axis,
torch::Tensor var_joint_target_ke,
torch::Tensor var_joint_target_kd,
torch::Tensor var_body_I_m,
torch::Tensor var_body_X_sc,
torch::Tensor var_body_X_sm,
torch::Tensor var_joint_X_pj,
torch::Tensor var_gravity,
torch::Tensor var_joint_S_s,
torch::Tensor var_body_I_s,
torch::Tensor var_body_v_s,
torch::Tensor var_body_f_s,
torch::Tensor var_body_a_s)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_rigid_id_cpu_kernel_forward(
cast<int*>(var_articulation_start),
cast<int*>(var_joint_type),
cast<int*>(var_joint_parent),
cast<int*>(var_joint_q_start),
cast<int*>(var_joint_qd_start),
cast<float*>(var_joint_q),
cast<float*>(var_joint_qd),
cast<df::float3*>(var_joint_axis),
cast<float*>(var_joint_target_ke),
cast<float*>(var_joint_target_kd),
cast<spatial_matrix*>(var_body_I_m),
cast<spatial_transform*>(var_body_X_sc),
cast<spatial_transform*>(var_body_X_sm),
cast<spatial_transform*>(var_joint_X_pj),
cast<df::float3*>(var_gravity),
cast<spatial_vector*>(var_joint_S_s),
cast<spatial_matrix*>(var_body_I_s),
cast<spatial_vector*>(var_body_v_s),
cast<spatial_vector*>(var_body_f_s),
cast<spatial_vector*>(var_body_a_s));
}
}
void eval_rigid_id_cpu_backward(int dim,
torch::Tensor var_articulation_start,
torch::Tensor var_joint_type,
torch::Tensor var_joint_parent,
torch::Tensor var_joint_q_start,
torch::Tensor var_joint_qd_start,
torch::Tensor var_joint_q,
torch::Tensor var_joint_qd,
torch::Tensor var_joint_axis,
torch::Tensor var_joint_target_ke,
torch::Tensor var_joint_target_kd,
torch::Tensor var_body_I_m,
torch::Tensor var_body_X_sc,
torch::Tensor var_body_X_sm,
torch::Tensor var_joint_X_pj,
torch::Tensor var_gravity,
torch::Tensor var_joint_S_s,
torch::Tensor var_body_I_s,
torch::Tensor var_body_v_s,
torch::Tensor var_body_f_s,
torch::Tensor var_body_a_s,
torch::Tensor adj_articulation_start,
torch::Tensor adj_joint_type,
torch::Tensor adj_joint_parent,
torch::Tensor adj_joint_q_start,
torch::Tensor adj_joint_qd_start,
torch::Tensor adj_joint_q,
torch::Tensor adj_joint_qd,
torch::Tensor adj_joint_axis,
torch::Tensor adj_joint_target_ke,
torch::Tensor adj_joint_target_kd,
torch::Tensor adj_body_I_m,
torch::Tensor adj_body_X_sc,
torch::Tensor adj_body_X_sm,
torch::Tensor adj_joint_X_pj,
torch::Tensor adj_gravity,
torch::Tensor adj_joint_S_s,
torch::Tensor adj_body_I_s,
torch::Tensor adj_body_v_s,
torch::Tensor adj_body_f_s,
torch::Tensor adj_body_a_s)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_rigid_id_cpu_kernel_backward(
cast<int*>(var_articulation_start),
cast<int*>(var_joint_type),
cast<int*>(var_joint_parent),
cast<int*>(var_joint_q_start),
cast<int*>(var_joint_qd_start),
cast<float*>(var_joint_q),
cast<float*>(var_joint_qd),
cast<df::float3*>(var_joint_axis),
cast<float*>(var_joint_target_ke),
cast<float*>(var_joint_target_kd),
cast<spatial_matrix*>(var_body_I_m),
cast<spatial_transform*>(var_body_X_sc),
cast<spatial_transform*>(var_body_X_sm),
cast<spatial_transform*>(var_joint_X_pj),
cast<df::float3*>(var_gravity),
cast<spatial_vector*>(var_joint_S_s),
cast<spatial_matrix*>(var_body_I_s),
cast<spatial_vector*>(var_body_v_s),
cast<spatial_vector*>(var_body_f_s),
cast<spatial_vector*>(var_body_a_s),
cast<int*>(adj_articulation_start),
cast<int*>(adj_joint_type),
cast<int*>(adj_joint_parent),
cast<int*>(adj_joint_q_start),
cast<int*>(adj_joint_qd_start),
cast<float*>(adj_joint_q),
cast<float*>(adj_joint_qd),
cast<df::float3*>(adj_joint_axis),
cast<float*>(adj_joint_target_ke),
cast<float*>(adj_joint_target_kd),
cast<spatial_matrix*>(adj_body_I_m),
cast<spatial_transform*>(adj_body_X_sc),
cast<spatial_transform*>(adj_body_X_sm),
cast<spatial_transform*>(adj_joint_X_pj),
cast<df::float3*>(adj_gravity),
cast<spatial_vector*>(adj_joint_S_s),
cast<spatial_matrix*>(adj_body_I_s),
cast<spatial_vector*>(adj_body_v_s),
cast<spatial_vector*>(adj_body_f_s),
cast<spatial_vector*>(adj_body_a_s));
}
}
// Python entry points
void eval_rigid_id_cpu_forward(int dim,
torch::Tensor var_articulation_start,
torch::Tensor var_joint_type,
torch::Tensor var_joint_parent,
torch::Tensor var_joint_q_start,
torch::Tensor var_joint_qd_start,
torch::Tensor var_joint_q,
torch::Tensor var_joint_qd,
torch::Tensor var_joint_axis,
torch::Tensor var_joint_target_ke,
torch::Tensor var_joint_target_kd,
torch::Tensor var_body_I_m,
torch::Tensor var_body_X_sc,
torch::Tensor var_body_X_sm,
torch::Tensor var_joint_X_pj,
torch::Tensor var_gravity,
torch::Tensor var_joint_S_s,
torch::Tensor var_body_I_s,
torch::Tensor var_body_v_s,
torch::Tensor var_body_f_s,
torch::Tensor var_body_a_s);
void eval_rigid_id_cpu_backward(int dim,
torch::Tensor var_articulation_start,
torch::Tensor var_joint_type,
torch::Tensor var_joint_parent,
torch::Tensor var_joint_q_start,
torch::Tensor var_joint_qd_start,
torch::Tensor var_joint_q,
torch::Tensor var_joint_qd,
torch::Tensor var_joint_axis,
torch::Tensor var_joint_target_ke,
torch::Tensor var_joint_target_kd,
torch::Tensor var_body_I_m,
torch::Tensor var_body_X_sc,
torch::Tensor var_body_X_sm,
torch::Tensor var_joint_X_pj,
torch::Tensor var_gravity,
torch::Tensor var_joint_S_s,
torch::Tensor var_body_I_s,
torch::Tensor var_body_v_s,
torch::Tensor var_body_f_s,
torch::Tensor var_body_a_s,
torch::Tensor adj_articulation_start,
torch::Tensor adj_joint_type,
torch::Tensor adj_joint_parent,
torch::Tensor adj_joint_q_start,
torch::Tensor adj_joint_qd_start,
torch::Tensor adj_joint_q,
torch::Tensor adj_joint_qd,
torch::Tensor adj_joint_axis,
torch::Tensor adj_joint_target_ke,
torch::Tensor adj_joint_target_kd,
torch::Tensor adj_body_I_m,
torch::Tensor adj_body_X_sc,
torch::Tensor adj_body_X_sm,
torch::Tensor adj_joint_X_pj,
torch::Tensor adj_gravity,
torch::Tensor adj_joint_S_s,
torch::Tensor adj_body_I_s,
torch::Tensor adj_body_v_s,
torch::Tensor adj_body_f_s,
torch::Tensor adj_body_a_s);
void eval_rigid_tau_cpu_kernel_forward(
int* var_articulation_start,
int* var_joint_type,
int* var_joint_parent,
int* var_joint_q_start,
int* var_joint_qd_start,
float* var_joint_q,
float* var_joint_qd,
float* var_joint_act,
float* var_joint_target,
float* var_joint_target_ke,
float* var_joint_target_kd,
float* var_joint_limit_lower,
float* var_joint_limit_upper,
float* var_joint_limit_ke,
float* var_joint_limit_kd,
df::float3* var_joint_axis,
spatial_vector* var_joint_S_s,
spatial_vector* var_body_fb_s,
spatial_vector* var_body_ft_s,
float* var_tau)
{
//---------
// primal vars
int var_0;
int var_1;
const int var_2 = 1;
int var_3;
int var_4;
int var_5;
const int var_6 = 0;
int var_7;
int var_8;
//---------
// forward
var_0 = df::tid();
var_1 = df::load(var_articulation_start, var_0);
var_3 = df::add(var_0, var_2);
var_4 = df::load(var_articulation_start, var_3);
var_5 = df::sub(var_4, var_1);
for (var_7=var_6; var_7 < var_5; ++var_7) {
var_8 = compute_link_tau_cpu_func(var_7, var_4, var_joint_type, var_joint_parent, var_joint_q_start, var_joint_qd_start, var_joint_q, var_joint_qd, var_joint_act, var_joint_target, var_joint_target_ke, var_joint_target_kd, var_joint_limit_lower, var_joint_limit_upper, var_joint_limit_ke, var_joint_limit_kd, var_joint_S_s, var_body_fb_s, var_body_ft_s, var_tau);
}
}
void eval_rigid_tau_cpu_kernel_backward(
int* var_articulation_start,
int* var_joint_type,
int* var_joint_parent,
int* var_joint_q_start,
int* var_joint_qd_start,
float* var_joint_q,
float* var_joint_qd,
float* var_joint_act,
float* var_joint_target,
float* var_joint_target_ke,
float* var_joint_target_kd,
float* var_joint_limit_lower,
float* var_joint_limit_upper,
float* var_joint_limit_ke,
float* var_joint_limit_kd,
df::float3* var_joint_axis,
spatial_vector* var_joint_S_s,
spatial_vector* var_body_fb_s,
spatial_vector* var_body_ft_s,
float* var_tau,
int* adj_articulation_start,
int* adj_joint_type,
int* adj_joint_parent,
int* adj_joint_q_start,
int* adj_joint_qd_start,
float* adj_joint_q,
float* adj_joint_qd,
float* adj_joint_act,
float* adj_joint_target,
float* adj_joint_target_ke,
float* adj_joint_target_kd,
float* adj_joint_limit_lower,
float* adj_joint_limit_upper,
float* adj_joint_limit_ke,
float* adj_joint_limit_kd,
df::float3* adj_joint_axis,
spatial_vector* adj_joint_S_s,
spatial_vector* adj_body_fb_s,
spatial_vector* adj_body_ft_s,
float* adj_tau)
{
//---------
// primal vars
int var_0;
int var_1;
const int var_2 = 1;
int var_3;
int var_4;
int var_5;
const int var_6 = 0;
int var_7;
int var_8;
//---------
// dual vars
int adj_0 = 0;
int adj_1 = 0;
int adj_2 = 0;
int adj_3 = 0;
int adj_4 = 0;
int adj_5 = 0;
int adj_6 = 0;
int adj_7 = 0;
int adj_8 = 0;
//---------
// forward
var_0 = df::tid();
var_1 = df::load(var_articulation_start, var_0);
var_3 = df::add(var_0, var_2);
var_4 = df::load(var_articulation_start, var_3);
var_5 = df::sub(var_4, var_1);
if (false) {
var_8 = compute_link_tau_cpu_func(var_7, var_4, var_joint_type, var_joint_parent, var_joint_q_start, var_joint_qd_start, var_joint_q, var_joint_qd, var_joint_act, var_joint_target, var_joint_target_ke, var_joint_target_kd, var_joint_limit_lower, var_joint_limit_upper, var_joint_limit_ke, var_joint_limit_kd, var_joint_S_s, var_body_fb_s, var_body_ft_s, var_tau);
}
//---------
// reverse
for (var_7=var_5-1; var_7 >= var_6; --var_7) {
adj_compute_link_tau_cpu_func(var_7, var_4, var_joint_type, var_joint_parent, var_joint_q_start, var_joint_qd_start, var_joint_q, var_joint_qd, var_joint_act, var_joint_target, var_joint_target_ke, var_joint_target_kd, var_joint_limit_lower, var_joint_limit_upper, var_joint_limit_ke, var_joint_limit_kd, var_joint_S_s, var_body_fb_s, var_body_ft_s, var_tau, adj_7, adj_4, adj_joint_type, adj_joint_parent, adj_joint_q_start, adj_joint_qd_start, adj_joint_q, adj_joint_qd, adj_joint_act, adj_joint_target, adj_joint_target_ke, adj_joint_target_kd, adj_joint_limit_lower, adj_joint_limit_upper, adj_joint_limit_ke, adj_joint_limit_kd, adj_joint_S_s, adj_body_fb_s, adj_body_ft_s, adj_tau, adj_8);
}
df::adj_sub(var_4, var_1, adj_4, adj_1, adj_5);
df::adj_load(var_articulation_start, var_3, adj_articulation_start, adj_3, adj_4);
df::adj_add(var_0, var_2, adj_0, adj_2, adj_3);
df::adj_load(var_articulation_start, var_0, adj_articulation_start, adj_0, adj_1);
return;
}
// Python entry points
void eval_rigid_tau_cpu_forward(int dim,
torch::Tensor var_articulation_start,
torch::Tensor var_joint_type,
torch::Tensor var_joint_parent,
torch::Tensor var_joint_q_start,
torch::Tensor var_joint_qd_start,
torch::Tensor var_joint_q,
torch::Tensor var_joint_qd,
torch::Tensor var_joint_act,
torch::Tensor var_joint_target,
torch::Tensor var_joint_target_ke,
torch::Tensor var_joint_target_kd,
torch::Tensor var_joint_limit_lower,
torch::Tensor var_joint_limit_upper,
torch::Tensor var_joint_limit_ke,
torch::Tensor var_joint_limit_kd,
torch::Tensor var_joint_axis,
torch::Tensor var_joint_S_s,
torch::Tensor var_body_fb_s,
torch::Tensor var_body_ft_s,
torch::Tensor var_tau)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_rigid_tau_cpu_kernel_forward(
cast<int*>(var_articulation_start),
cast<int*>(var_joint_type),
cast<int*>(var_joint_parent),
cast<int*>(var_joint_q_start),
cast<int*>(var_joint_qd_start),
cast<float*>(var_joint_q),
cast<float*>(var_joint_qd),
cast<float*>(var_joint_act),
cast<float*>(var_joint_target),
cast<float*>(var_joint_target_ke),
cast<float*>(var_joint_target_kd),
cast<float*>(var_joint_limit_lower),
cast<float*>(var_joint_limit_upper),
cast<float*>(var_joint_limit_ke),
cast<float*>(var_joint_limit_kd),
cast<df::float3*>(var_joint_axis),
cast<spatial_vector*>(var_joint_S_s),
cast<spatial_vector*>(var_body_fb_s),
cast<spatial_vector*>(var_body_ft_s),
cast<float*>(var_tau));
}
}
void eval_rigid_tau_cpu_backward(int dim,
torch::Tensor var_articulation_start,
torch::Tensor var_joint_type,
torch::Tensor var_joint_parent,
torch::Tensor var_joint_q_start,
torch::Tensor var_joint_qd_start,
torch::Tensor var_joint_q,
torch::Tensor var_joint_qd,
torch::Tensor var_joint_act,
torch::Tensor var_joint_target,
torch::Tensor var_joint_target_ke,
torch::Tensor var_joint_target_kd,
torch::Tensor var_joint_limit_lower,
torch::Tensor var_joint_limit_upper,
torch::Tensor var_joint_limit_ke,
torch::Tensor var_joint_limit_kd,
torch::Tensor var_joint_axis,
torch::Tensor var_joint_S_s,
torch::Tensor var_body_fb_s,
torch::Tensor var_body_ft_s,
torch::Tensor var_tau,
torch::Tensor adj_articulation_start,
torch::Tensor adj_joint_type,
torch::Tensor adj_joint_parent,
torch::Tensor adj_joint_q_start,
torch::Tensor adj_joint_qd_start,
torch::Tensor adj_joint_q,
torch::Tensor adj_joint_qd,
torch::Tensor adj_joint_act,
torch::Tensor adj_joint_target,
torch::Tensor adj_joint_target_ke,
torch::Tensor adj_joint_target_kd,
torch::Tensor adj_joint_limit_lower,
torch::Tensor adj_joint_limit_upper,
torch::Tensor adj_joint_limit_ke,
torch::Tensor adj_joint_limit_kd,
torch::Tensor adj_joint_axis,
torch::Tensor adj_joint_S_s,
torch::Tensor adj_body_fb_s,
torch::Tensor adj_body_ft_s,
torch::Tensor adj_tau)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_rigid_tau_cpu_kernel_backward(
cast<int*>(var_articulation_start),
cast<int*>(var_joint_type),
cast<int*>(var_joint_parent),
cast<int*>(var_joint_q_start),
cast<int*>(var_joint_qd_start),
cast<float*>(var_joint_q),
cast<float*>(var_joint_qd),
cast<float*>(var_joint_act),
cast<float*>(var_joint_target),
cast<float*>(var_joint_target_ke),
cast<float*>(var_joint_target_kd),
cast<float*>(var_joint_limit_lower),
cast<float*>(var_joint_limit_upper),
cast<float*>(var_joint_limit_ke),
cast<float*>(var_joint_limit_kd),
cast<df::float3*>(var_joint_axis),
cast<spatial_vector*>(var_joint_S_s),
cast<spatial_vector*>(var_body_fb_s),
cast<spatial_vector*>(var_body_ft_s),
cast<float*>(var_tau),
cast<int*>(adj_articulation_start),
cast<int*>(adj_joint_type),
cast<int*>(adj_joint_parent),
cast<int*>(adj_joint_q_start),
cast<int*>(adj_joint_qd_start),
cast<float*>(adj_joint_q),
cast<float*>(adj_joint_qd),
cast<float*>(adj_joint_act),
cast<float*>(adj_joint_target),
cast<float*>(adj_joint_target_ke),
cast<float*>(adj_joint_target_kd),
cast<float*>(adj_joint_limit_lower),
cast<float*>(adj_joint_limit_upper),
cast<float*>(adj_joint_limit_ke),
cast<float*>(adj_joint_limit_kd),
cast<df::float3*>(adj_joint_axis),
cast<spatial_vector*>(adj_joint_S_s),
cast<spatial_vector*>(adj_body_fb_s),
cast<spatial_vector*>(adj_body_ft_s),
cast<float*>(adj_tau));
}
}
// Python entry points
void eval_rigid_tau_cpu_forward(int dim,
torch::Tensor var_articulation_start,
torch::Tensor var_joint_type,
torch::Tensor var_joint_parent,
torch::Tensor var_joint_q_start,
torch::Tensor var_joint_qd_start,
torch::Tensor var_joint_q,
torch::Tensor var_joint_qd,
torch::Tensor var_joint_act,
torch::Tensor var_joint_target,
torch::Tensor var_joint_target_ke,
torch::Tensor var_joint_target_kd,
torch::Tensor var_joint_limit_lower,
torch::Tensor var_joint_limit_upper,
torch::Tensor var_joint_limit_ke,
torch::Tensor var_joint_limit_kd,
torch::Tensor var_joint_axis,
torch::Tensor var_joint_S_s,
torch::Tensor var_body_fb_s,
torch::Tensor var_body_ft_s,
torch::Tensor var_tau);
void eval_rigid_tau_cpu_backward(int dim,
torch::Tensor var_articulation_start,
torch::Tensor var_joint_type,
torch::Tensor var_joint_parent,
torch::Tensor var_joint_q_start,
torch::Tensor var_joint_qd_start,
torch::Tensor var_joint_q,
torch::Tensor var_joint_qd,
torch::Tensor var_joint_act,
torch::Tensor var_joint_target,
torch::Tensor var_joint_target_ke,
torch::Tensor var_joint_target_kd,
torch::Tensor var_joint_limit_lower,
torch::Tensor var_joint_limit_upper,
torch::Tensor var_joint_limit_ke,
torch::Tensor var_joint_limit_kd,
torch::Tensor var_joint_axis,
torch::Tensor var_joint_S_s,
torch::Tensor var_body_fb_s,
torch::Tensor var_body_ft_s,
torch::Tensor var_tau,
torch::Tensor adj_articulation_start,
torch::Tensor adj_joint_type,
torch::Tensor adj_joint_parent,
torch::Tensor adj_joint_q_start,
torch::Tensor adj_joint_qd_start,
torch::Tensor adj_joint_q,
torch::Tensor adj_joint_qd,
torch::Tensor adj_joint_act,
torch::Tensor adj_joint_target,
torch::Tensor adj_joint_target_ke,
torch::Tensor adj_joint_target_kd,
torch::Tensor adj_joint_limit_lower,
torch::Tensor adj_joint_limit_upper,
torch::Tensor adj_joint_limit_ke,
torch::Tensor adj_joint_limit_kd,
torch::Tensor adj_joint_axis,
torch::Tensor adj_joint_S_s,
torch::Tensor adj_body_fb_s,
torch::Tensor adj_body_ft_s,
torch::Tensor adj_tau);
void eval_rigid_jacobian_cpu_kernel_forward(
int* var_articulation_start,
int* var_articulation_J_start,
int* var_joint_parent,
int* var_joint_qd_start,
spatial_vector* var_joint_S_s,
float* var_J)
{
//---------
// primal vars
int var_0;
int var_1;
const int var_2 = 1;
int var_3;
int var_4;
int var_5;
int var_6;
//---------
// forward
var_0 = df::tid();
var_1 = df::load(var_articulation_start, var_0);
var_3 = df::add(var_0, var_2);
var_4 = df::load(var_articulation_start, var_3);
var_5 = df::sub(var_4, var_1);
var_6 = df::load(var_articulation_J_start, var_0);
df::spatial_jacobian(var_joint_S_s, var_joint_parent, var_joint_qd_start, var_1, var_5, var_6, var_J);
}
void eval_rigid_jacobian_cpu_kernel_backward(
int* var_articulation_start,
int* var_articulation_J_start,
int* var_joint_parent,
int* var_joint_qd_start,
spatial_vector* var_joint_S_s,
float* var_J,
int* adj_articulation_start,
int* adj_articulation_J_start,
int* adj_joint_parent,
int* adj_joint_qd_start,
spatial_vector* adj_joint_S_s,
float* adj_J)
{
//---------
// primal vars
int var_0;
int var_1;
const int var_2 = 1;
int var_3;
int var_4;
int var_5;
int var_6;
//---------
// dual vars
int adj_0 = 0;
int adj_1 = 0;
int adj_2 = 0;
int adj_3 = 0;
int adj_4 = 0;
int adj_5 = 0;
int adj_6 = 0;
//---------
// forward
var_0 = df::tid();
var_1 = df::load(var_articulation_start, var_0);
var_3 = df::add(var_0, var_2);
var_4 = df::load(var_articulation_start, var_3);
var_5 = df::sub(var_4, var_1);
var_6 = df::load(var_articulation_J_start, var_0);
df::spatial_jacobian(var_joint_S_s, var_joint_parent, var_joint_qd_start, var_1, var_5, var_6, var_J);
//---------
// reverse
df::adj_spatial_jacobian(var_joint_S_s, var_joint_parent, var_joint_qd_start, var_1, var_5, var_6, var_J, adj_joint_S_s, adj_joint_parent, adj_joint_qd_start, adj_1, adj_5, adj_6, adj_J);
df::adj_load(var_articulation_J_start, var_0, adj_articulation_J_start, adj_0, adj_6);
df::adj_sub(var_4, var_1, adj_4, adj_1, adj_5);
df::adj_load(var_articulation_start, var_3, adj_articulation_start, adj_3, adj_4);
df::adj_add(var_0, var_2, adj_0, adj_2, adj_3);
df::adj_load(var_articulation_start, var_0, adj_articulation_start, adj_0, adj_1);
return;
}
// Python entry points
void eval_rigid_jacobian_cpu_forward(int dim,
torch::Tensor var_articulation_start,
torch::Tensor var_articulation_J_start,
torch::Tensor var_joint_parent,
torch::Tensor var_joint_qd_start,
torch::Tensor var_joint_S_s,
torch::Tensor var_J)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_rigid_jacobian_cpu_kernel_forward(
cast<int*>(var_articulation_start),
cast<int*>(var_articulation_J_start),
cast<int*>(var_joint_parent),
cast<int*>(var_joint_qd_start),
cast<spatial_vector*>(var_joint_S_s),
cast<float*>(var_J));
}
}
void eval_rigid_jacobian_cpu_backward(int dim,
torch::Tensor var_articulation_start,
torch::Tensor var_articulation_J_start,
torch::Tensor var_joint_parent,
torch::Tensor var_joint_qd_start,
torch::Tensor var_joint_S_s,
torch::Tensor var_J,
torch::Tensor adj_articulation_start,
torch::Tensor adj_articulation_J_start,
torch::Tensor adj_joint_parent,
torch::Tensor adj_joint_qd_start,
torch::Tensor adj_joint_S_s,
torch::Tensor adj_J)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_rigid_jacobian_cpu_kernel_backward(
cast<int*>(var_articulation_start),
cast<int*>(var_articulation_J_start),
cast<int*>(var_joint_parent),
cast<int*>(var_joint_qd_start),
cast<spatial_vector*>(var_joint_S_s),
cast<float*>(var_J),
cast<int*>(adj_articulation_start),
cast<int*>(adj_articulation_J_start),
cast<int*>(adj_joint_parent),
cast<int*>(adj_joint_qd_start),
cast<spatial_vector*>(adj_joint_S_s),
cast<float*>(adj_J));
}
}
// Python entry points
void eval_rigid_jacobian_cpu_forward(int dim,
torch::Tensor var_articulation_start,
torch::Tensor var_articulation_J_start,
torch::Tensor var_joint_parent,
torch::Tensor var_joint_qd_start,
torch::Tensor var_joint_S_s,
torch::Tensor var_J);
void eval_rigid_jacobian_cpu_backward(int dim,
torch::Tensor var_articulation_start,
torch::Tensor var_articulation_J_start,
torch::Tensor var_joint_parent,
torch::Tensor var_joint_qd_start,
torch::Tensor var_joint_S_s,
torch::Tensor var_J,
torch::Tensor adj_articulation_start,
torch::Tensor adj_articulation_J_start,
torch::Tensor adj_joint_parent,
torch::Tensor adj_joint_qd_start,
torch::Tensor adj_joint_S_s,
torch::Tensor adj_J);
void eval_rigid_mass_cpu_kernel_forward(
int* var_articulation_start,
int* var_articulation_M_start,
spatial_matrix* var_body_I_s,
float* var_M)
{
//---------
// primal vars
int var_0;
int var_1;
const int var_2 = 1;
int var_3;
int var_4;
int var_5;
int var_6;
//---------
// forward
var_0 = df::tid();
var_1 = df::load(var_articulation_start, var_0);
var_3 = df::add(var_0, var_2);
var_4 = df::load(var_articulation_start, var_3);
var_5 = df::sub(var_4, var_1);
var_6 = df::load(var_articulation_M_start, var_0);
df::spatial_mass(var_body_I_s, var_1, var_5, var_6, var_M);
}
void eval_rigid_mass_cpu_kernel_backward(
int* var_articulation_start,
int* var_articulation_M_start,
spatial_matrix* var_body_I_s,
float* var_M,
int* adj_articulation_start,
int* adj_articulation_M_start,
spatial_matrix* adj_body_I_s,
float* adj_M)
{
//---------
// primal vars
int var_0;
int var_1;
const int var_2 = 1;
int var_3;
int var_4;
int var_5;
int var_6;
//---------
// dual vars
int adj_0 = 0;
int adj_1 = 0;
int adj_2 = 0;
int adj_3 = 0;
int adj_4 = 0;
int adj_5 = 0;
int adj_6 = 0;
//---------
// forward
var_0 = df::tid();
var_1 = df::load(var_articulation_start, var_0);
var_3 = df::add(var_0, var_2);
var_4 = df::load(var_articulation_start, var_3);
var_5 = df::sub(var_4, var_1);
var_6 = df::load(var_articulation_M_start, var_0);
df::spatial_mass(var_body_I_s, var_1, var_5, var_6, var_M);
//---------
// reverse
df::adj_spatial_mass(var_body_I_s, var_1, var_5, var_6, var_M, adj_body_I_s, adj_1, adj_5, adj_6, adj_M);
df::adj_load(var_articulation_M_start, var_0, adj_articulation_M_start, adj_0, adj_6);
df::adj_sub(var_4, var_1, adj_4, adj_1, adj_5);
df::adj_load(var_articulation_start, var_3, adj_articulation_start, adj_3, adj_4);
df::adj_add(var_0, var_2, adj_0, adj_2, adj_3);
df::adj_load(var_articulation_start, var_0, adj_articulation_start, adj_0, adj_1);
return;
}
// Python entry points
void eval_rigid_mass_cpu_forward(int dim,
torch::Tensor var_articulation_start,
torch::Tensor var_articulation_M_start,
torch::Tensor var_body_I_s,
torch::Tensor var_M)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_rigid_mass_cpu_kernel_forward(
cast<int*>(var_articulation_start),
cast<int*>(var_articulation_M_start),
cast<spatial_matrix*>(var_body_I_s),
cast<float*>(var_M));
}
}
void eval_rigid_mass_cpu_backward(int dim,
torch::Tensor var_articulation_start,
torch::Tensor var_articulation_M_start,
torch::Tensor var_body_I_s,
torch::Tensor var_M,
torch::Tensor adj_articulation_start,
torch::Tensor adj_articulation_M_start,
torch::Tensor adj_body_I_s,
torch::Tensor adj_M)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_rigid_mass_cpu_kernel_backward(
cast<int*>(var_articulation_start),
cast<int*>(var_articulation_M_start),
cast<spatial_matrix*>(var_body_I_s),
cast<float*>(var_M),
cast<int*>(adj_articulation_start),
cast<int*>(adj_articulation_M_start),
cast<spatial_matrix*>(adj_body_I_s),
cast<float*>(adj_M));
}
}
// Python entry points
void eval_rigid_mass_cpu_forward(int dim,
torch::Tensor var_articulation_start,
torch::Tensor var_articulation_M_start,
torch::Tensor var_body_I_s,
torch::Tensor var_M);
void eval_rigid_mass_cpu_backward(int dim,
torch::Tensor var_articulation_start,
torch::Tensor var_articulation_M_start,
torch::Tensor var_body_I_s,
torch::Tensor var_M,
torch::Tensor adj_articulation_start,
torch::Tensor adj_articulation_M_start,
torch::Tensor adj_body_I_s,
torch::Tensor adj_M);
void eval_dense_gemm_cpu_kernel_forward(
int var_m,
int var_n,
int var_p,
int var_t1,
int var_t2,
float* var_A,
float* var_B,
float* var_C)
{
//---------
// primal vars
//---------
// forward
df::dense_gemm(var_m, var_n, var_p, var_t1, var_t2, var_A, var_B, var_C);
}
void eval_dense_gemm_cpu_kernel_backward(
int var_m,
int var_n,
int var_p,
int var_t1,
int var_t2,
float* var_A,
float* var_B,
float* var_C,
int adj_m,
int adj_n,
int adj_p,
int adj_t1,
int adj_t2,
float* adj_A,
float* adj_B,
float* adj_C)
{
//---------
// primal vars
//---------
// dual vars
//---------
// forward
df::dense_gemm(var_m, var_n, var_p, var_t1, var_t2, var_A, var_B, var_C);
//---------
// reverse
df::adj_dense_gemm(var_m, var_n, var_p, var_t1, var_t2, var_A, var_B, var_C, adj_m, adj_n, adj_p, adj_t1, adj_t2, adj_A, adj_B, adj_C);
return;
}
// Python entry points
void eval_dense_gemm_cpu_forward(int dim,
int var_m,
int var_n,
int var_p,
int var_t1,
int var_t2,
torch::Tensor var_A,
torch::Tensor var_B,
torch::Tensor var_C)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_dense_gemm_cpu_kernel_forward(
var_m,
var_n,
var_p,
var_t1,
var_t2,
cast<float*>(var_A),
cast<float*>(var_B),
cast<float*>(var_C));
}
}
void eval_dense_gemm_cpu_backward(int dim,
int var_m,
int var_n,
int var_p,
int var_t1,
int var_t2,
torch::Tensor var_A,
torch::Tensor var_B,
torch::Tensor var_C,
int adj_m,
int adj_n,
int adj_p,
int adj_t1,
int adj_t2,
torch::Tensor adj_A,
torch::Tensor adj_B,
torch::Tensor adj_C)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_dense_gemm_cpu_kernel_backward(
var_m,
var_n,
var_p,
var_t1,
var_t2,
cast<float*>(var_A),
cast<float*>(var_B),
cast<float*>(var_C),
adj_m,
adj_n,
adj_p,
adj_t1,
adj_t2,
cast<float*>(adj_A),
cast<float*>(adj_B),
cast<float*>(adj_C));
}
}
// Python entry points
void eval_dense_gemm_cpu_forward(int dim,
int var_m,
int var_n,
int var_p,
int var_t1,
int var_t2,
torch::Tensor var_A,
torch::Tensor var_B,
torch::Tensor var_C);
void eval_dense_gemm_cpu_backward(int dim,
int var_m,
int var_n,
int var_p,
int var_t1,
int var_t2,
torch::Tensor var_A,
torch::Tensor var_B,
torch::Tensor var_C,
int adj_m,
int adj_n,
int adj_p,
int adj_t1,
int adj_t2,
torch::Tensor adj_A,
torch::Tensor adj_B,
torch::Tensor adj_C);
void eval_dense_gemm_batched_cpu_kernel_forward(
int* var_m,
int* var_n,
int* var_p,
int var_t1,
int var_t2,
int* var_A_start,
int* var_B_start,
int* var_C_start,
float* var_A,
float* var_B,
float* var_C)
{
//---------
// primal vars
//---------
// forward
df::dense_gemm_batched(var_m, var_n, var_p, var_t1, var_t2, var_A_start, var_B_start, var_C_start, var_A, var_B, var_C);
}
void eval_dense_gemm_batched_cpu_kernel_backward(
int* var_m,
int* var_n,
int* var_p,
int var_t1,
int var_t2,
int* var_A_start,
int* var_B_start,
int* var_C_start,
float* var_A,
float* var_B,
float* var_C,
int* adj_m,
int* adj_n,
int* adj_p,
int adj_t1,
int adj_t2,
int* adj_A_start,
int* adj_B_start,
int* adj_C_start,
float* adj_A,
float* adj_B,
float* adj_C)
{
//---------
// primal vars
//---------
// dual vars
//---------
// forward
df::dense_gemm_batched(var_m, var_n, var_p, var_t1, var_t2, var_A_start, var_B_start, var_C_start, var_A, var_B, var_C);
//---------
// reverse
df::adj_dense_gemm_batched(var_m, var_n, var_p, var_t1, var_t2, var_A_start, var_B_start, var_C_start, var_A, var_B, var_C, adj_m, adj_n, adj_p, adj_t1, adj_t2, adj_A_start, adj_B_start, adj_C_start, adj_A, adj_B, adj_C);
return;
}
// Python entry points
void eval_dense_gemm_batched_cpu_forward(int dim,
torch::Tensor var_m,
torch::Tensor var_n,
torch::Tensor var_p,
int var_t1,
int var_t2,
torch::Tensor var_A_start,
torch::Tensor var_B_start,
torch::Tensor var_C_start,
torch::Tensor var_A,
torch::Tensor var_B,
torch::Tensor var_C)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_dense_gemm_batched_cpu_kernel_forward(
cast<int*>(var_m),
cast<int*>(var_n),
cast<int*>(var_p),
var_t1,
var_t2,
cast<int*>(var_A_start),
cast<int*>(var_B_start),
cast<int*>(var_C_start),
cast<float*>(var_A),
cast<float*>(var_B),
cast<float*>(var_C));
}
}
void eval_dense_gemm_batched_cpu_backward(int dim,
torch::Tensor var_m,
torch::Tensor var_n,
torch::Tensor var_p,
int var_t1,
int var_t2,
torch::Tensor var_A_start,
torch::Tensor var_B_start,
torch::Tensor var_C_start,
torch::Tensor var_A,
torch::Tensor var_B,
torch::Tensor var_C,
torch::Tensor adj_m,
torch::Tensor adj_n,
torch::Tensor adj_p,
int adj_t1,
int adj_t2,
torch::Tensor adj_A_start,
torch::Tensor adj_B_start,
torch::Tensor adj_C_start,
torch::Tensor adj_A,
torch::Tensor adj_B,
torch::Tensor adj_C)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_dense_gemm_batched_cpu_kernel_backward(
cast<int*>(var_m),
cast<int*>(var_n),
cast<int*>(var_p),
var_t1,
var_t2,
cast<int*>(var_A_start),
cast<int*>(var_B_start),
cast<int*>(var_C_start),
cast<float*>(var_A),
cast<float*>(var_B),
cast<float*>(var_C),
cast<int*>(adj_m),
cast<int*>(adj_n),
cast<int*>(adj_p),
adj_t1,
adj_t2,
cast<int*>(adj_A_start),
cast<int*>(adj_B_start),
cast<int*>(adj_C_start),
cast<float*>(adj_A),
cast<float*>(adj_B),
cast<float*>(adj_C));
}
}
// Python entry points
void eval_dense_gemm_batched_cpu_forward(int dim,
torch::Tensor var_m,
torch::Tensor var_n,
torch::Tensor var_p,
int var_t1,
int var_t2,
torch::Tensor var_A_start,
torch::Tensor var_B_start,
torch::Tensor var_C_start,
torch::Tensor var_A,
torch::Tensor var_B,
torch::Tensor var_C);
void eval_dense_gemm_batched_cpu_backward(int dim,
torch::Tensor var_m,
torch::Tensor var_n,
torch::Tensor var_p,
int var_t1,
int var_t2,
torch::Tensor var_A_start,
torch::Tensor var_B_start,
torch::Tensor var_C_start,
torch::Tensor var_A,
torch::Tensor var_B,
torch::Tensor var_C,
torch::Tensor adj_m,
torch::Tensor adj_n,
torch::Tensor adj_p,
int adj_t1,
int adj_t2,
torch::Tensor adj_A_start,
torch::Tensor adj_B_start,
torch::Tensor adj_C_start,
torch::Tensor adj_A,
torch::Tensor adj_B,
torch::Tensor adj_C);
void eval_dense_cholesky_cpu_kernel_forward(
int var_n,
float* var_A,
float* var_regularization,
float* var_L)
{
//---------
// primal vars
//---------
// forward
df::dense_chol(var_n, var_A, var_regularization, var_L);
}
void eval_dense_cholesky_cpu_kernel_backward(
int var_n,
float* var_A,
float* var_regularization,
float* var_L,
int adj_n,
float* adj_A,
float* adj_regularization,
float* adj_L)
{
//---------
// primal vars
//---------
// dual vars
//---------
// forward
df::dense_chol(var_n, var_A, var_regularization, var_L);
//---------
// reverse
df::adj_dense_chol(var_n, var_A, var_regularization, var_L, adj_n, adj_A, adj_regularization, adj_L);
return;
}
// Python entry points
void eval_dense_cholesky_cpu_forward(int dim,
int var_n,
torch::Tensor var_A,
torch::Tensor var_regularization,
torch::Tensor var_L)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_dense_cholesky_cpu_kernel_forward(
var_n,
cast<float*>(var_A),
cast<float*>(var_regularization),
cast<float*>(var_L));
}
}
void eval_dense_cholesky_cpu_backward(int dim,
int var_n,
torch::Tensor var_A,
torch::Tensor var_regularization,
torch::Tensor var_L,
int adj_n,
torch::Tensor adj_A,
torch::Tensor adj_regularization,
torch::Tensor adj_L)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_dense_cholesky_cpu_kernel_backward(
var_n,
cast<float*>(var_A),
cast<float*>(var_regularization),
cast<float*>(var_L),
adj_n,
cast<float*>(adj_A),
cast<float*>(adj_regularization),
cast<float*>(adj_L));
}
}
// Python entry points
void eval_dense_cholesky_cpu_forward(int dim,
int var_n,
torch::Tensor var_A,
torch::Tensor var_regularization,
torch::Tensor var_L);
void eval_dense_cholesky_cpu_backward(int dim,
int var_n,
torch::Tensor var_A,
torch::Tensor var_regularization,
torch::Tensor var_L,
int adj_n,
torch::Tensor adj_A,
torch::Tensor adj_regularization,
torch::Tensor adj_L);
void eval_dense_cholesky_batched_cpu_kernel_forward(
int* var_A_start,
int* var_A_dim,
float* var_A,
float* var_regularization,
float* var_L)
{
//---------
// primal vars
//---------
// forward
df::dense_chol_batched(var_A_start, var_A_dim, var_A, var_regularization, var_L);
}
void eval_dense_cholesky_batched_cpu_kernel_backward(
int* var_A_start,
int* var_A_dim,
float* var_A,
float* var_regularization,
float* var_L,
int* adj_A_start,
int* adj_A_dim,
float* adj_A,
float* adj_regularization,
float* adj_L)
{
//---------
// primal vars
//---------
// dual vars
//---------
// forward
df::dense_chol_batched(var_A_start, var_A_dim, var_A, var_regularization, var_L);
//---------
// reverse
df::adj_dense_chol_batched(var_A_start, var_A_dim, var_A, var_regularization, var_L, adj_A_start, adj_A_dim, adj_A, adj_regularization, adj_L);
return;
}
// Python entry points
void eval_dense_cholesky_batched_cpu_forward(int dim,
torch::Tensor var_A_start,
torch::Tensor var_A_dim,
torch::Tensor var_A,
torch::Tensor var_regularization,
torch::Tensor var_L)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_dense_cholesky_batched_cpu_kernel_forward(
cast<int*>(var_A_start),
cast<int*>(var_A_dim),
cast<float*>(var_A),
cast<float*>(var_regularization),
cast<float*>(var_L));
}
}
void eval_dense_cholesky_batched_cpu_backward(int dim,
torch::Tensor var_A_start,
torch::Tensor var_A_dim,
torch::Tensor var_A,
torch::Tensor var_regularization,
torch::Tensor var_L,
torch::Tensor adj_A_start,
torch::Tensor adj_A_dim,
torch::Tensor adj_A,
torch::Tensor adj_regularization,
torch::Tensor adj_L)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_dense_cholesky_batched_cpu_kernel_backward(
cast<int*>(var_A_start),
cast<int*>(var_A_dim),
cast<float*>(var_A),
cast<float*>(var_regularization),
cast<float*>(var_L),
cast<int*>(adj_A_start),
cast<int*>(adj_A_dim),
cast<float*>(adj_A),
cast<float*>(adj_regularization),
cast<float*>(adj_L));
}
}
// Python entry points
void eval_dense_cholesky_batched_cpu_forward(int dim,
torch::Tensor var_A_start,
torch::Tensor var_A_dim,
torch::Tensor var_A,
torch::Tensor var_regularization,
torch::Tensor var_L);
void eval_dense_cholesky_batched_cpu_backward(int dim,
torch::Tensor var_A_start,
torch::Tensor var_A_dim,
torch::Tensor var_A,
torch::Tensor var_regularization,
torch::Tensor var_L,
torch::Tensor adj_A_start,
torch::Tensor adj_A_dim,
torch::Tensor adj_A,
torch::Tensor adj_regularization,
torch::Tensor adj_L);
void eval_dense_subs_cpu_kernel_forward(
int var_n,
float* var_L,
float* var_b,
float* var_x)
{
//---------
// primal vars
//---------
// forward
df::dense_subs(var_n, var_L, var_b, var_x);
}
void eval_dense_subs_cpu_kernel_backward(
int var_n,
float* var_L,
float* var_b,
float* var_x,
int adj_n,
float* adj_L,
float* adj_b,
float* adj_x)
{
//---------
// primal vars
//---------
// dual vars
//---------
// forward
df::dense_subs(var_n, var_L, var_b, var_x);
//---------
// reverse
df::adj_dense_subs(var_n, var_L, var_b, var_x, adj_n, adj_L, adj_b, adj_x);
return;
}
// Python entry points
void eval_dense_subs_cpu_forward(int dim,
int var_n,
torch::Tensor var_L,
torch::Tensor var_b,
torch::Tensor var_x)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_dense_subs_cpu_kernel_forward(
var_n,
cast<float*>(var_L),
cast<float*>(var_b),
cast<float*>(var_x));
}
}
void eval_dense_subs_cpu_backward(int dim,
int var_n,
torch::Tensor var_L,
torch::Tensor var_b,
torch::Tensor var_x,
int adj_n,
torch::Tensor adj_L,
torch::Tensor adj_b,
torch::Tensor adj_x)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_dense_subs_cpu_kernel_backward(
var_n,
cast<float*>(var_L),
cast<float*>(var_b),
cast<float*>(var_x),
adj_n,
cast<float*>(adj_L),
cast<float*>(adj_b),
cast<float*>(adj_x));
}
}
// Python entry points
void eval_dense_subs_cpu_forward(int dim,
int var_n,
torch::Tensor var_L,
torch::Tensor var_b,
torch::Tensor var_x);
void eval_dense_subs_cpu_backward(int dim,
int var_n,
torch::Tensor var_L,
torch::Tensor var_b,
torch::Tensor var_x,
int adj_n,
torch::Tensor adj_L,
torch::Tensor adj_b,
torch::Tensor adj_x);
void eval_dense_solve_cpu_kernel_forward(
int var_n,
float* var_A,
float* var_L,
float* var_b,
float* var_tmp,
float* var_x)
{
//---------
// primal vars
//---------
// forward
df::dense_solve(var_n, var_A, var_L, var_b, var_tmp, var_x);
}
void eval_dense_solve_cpu_kernel_backward(
int var_n,
float* var_A,
float* var_L,
float* var_b,
float* var_tmp,
float* var_x,
int adj_n,
float* adj_A,
float* adj_L,
float* adj_b,
float* adj_tmp,
float* adj_x)
{
//---------
// primal vars
//---------
// dual vars
//---------
// forward
df::dense_solve(var_n, var_A, var_L, var_b, var_tmp, var_x);
//---------
// reverse
df::adj_dense_solve(var_n, var_A, var_L, var_b, var_tmp, var_x, adj_n, adj_A, adj_L, adj_b, adj_tmp, adj_x);
return;
}
// Python entry points
void eval_dense_solve_cpu_forward(int dim,
int var_n,
torch::Tensor var_A,
torch::Tensor var_L,
torch::Tensor var_b,
torch::Tensor var_tmp,
torch::Tensor var_x)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_dense_solve_cpu_kernel_forward(
var_n,
cast<float*>(var_A),
cast<float*>(var_L),
cast<float*>(var_b),
cast<float*>(var_tmp),
cast<float*>(var_x));
}
}
void eval_dense_solve_cpu_backward(int dim,
int var_n,
torch::Tensor var_A,
torch::Tensor var_L,
torch::Tensor var_b,
torch::Tensor var_tmp,
torch::Tensor var_x,
int adj_n,
torch::Tensor adj_A,
torch::Tensor adj_L,
torch::Tensor adj_b,
torch::Tensor adj_tmp,
torch::Tensor adj_x)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_dense_solve_cpu_kernel_backward(
var_n,
cast<float*>(var_A),
cast<float*>(var_L),
cast<float*>(var_b),
cast<float*>(var_tmp),
cast<float*>(var_x),
adj_n,
cast<float*>(adj_A),
cast<float*>(adj_L),
cast<float*>(adj_b),
cast<float*>(adj_tmp),
cast<float*>(adj_x));
}
}
// Python entry points
void eval_dense_solve_cpu_forward(int dim,
int var_n,
torch::Tensor var_A,
torch::Tensor var_L,
torch::Tensor var_b,
torch::Tensor var_tmp,
torch::Tensor var_x);
void eval_dense_solve_cpu_backward(int dim,
int var_n,
torch::Tensor var_A,
torch::Tensor var_L,
torch::Tensor var_b,
torch::Tensor var_tmp,
torch::Tensor var_x,
int adj_n,
torch::Tensor adj_A,
torch::Tensor adj_L,
torch::Tensor adj_b,
torch::Tensor adj_tmp,
torch::Tensor adj_x);
void eval_dense_solve_batched_cpu_kernel_forward(
int* var_b_start,
int* var_A_start,
int* var_A_dim,
float* var_A,
float* var_L,
float* var_b,
float* var_tmp,
float* var_x)
{
//---------
// primal vars
//---------
// forward
df::dense_solve_batched(var_b_start, var_A_start, var_A_dim, var_A, var_L, var_b, var_tmp, var_x);
}
void eval_dense_solve_batched_cpu_kernel_backward(
int* var_b_start,
int* var_A_start,
int* var_A_dim,
float* var_A,
float* var_L,
float* var_b,
float* var_tmp,
float* var_x,
int* adj_b_start,
int* adj_A_start,
int* adj_A_dim,
float* adj_A,
float* adj_L,
float* adj_b,
float* adj_tmp,
float* adj_x)
{
//---------
// primal vars
//---------
// dual vars
//---------
// forward
df::dense_solve_batched(var_b_start, var_A_start, var_A_dim, var_A, var_L, var_b, var_tmp, var_x);
//---------
// reverse
df::adj_dense_solve_batched(var_b_start, var_A_start, var_A_dim, var_A, var_L, var_b, var_tmp, var_x, adj_b_start, adj_A_start, adj_A_dim, adj_A, adj_L, adj_b, adj_tmp, adj_x);
return;
}
// Python entry points
void eval_dense_solve_batched_cpu_forward(int dim,
torch::Tensor var_b_start,
torch::Tensor var_A_start,
torch::Tensor var_A_dim,
torch::Tensor var_A,
torch::Tensor var_L,
torch::Tensor var_b,
torch::Tensor var_tmp,
torch::Tensor var_x)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_dense_solve_batched_cpu_kernel_forward(
cast<int*>(var_b_start),
cast<int*>(var_A_start),
cast<int*>(var_A_dim),
cast<float*>(var_A),
cast<float*>(var_L),
cast<float*>(var_b),
cast<float*>(var_tmp),
cast<float*>(var_x));
}
}
void eval_dense_solve_batched_cpu_backward(int dim,
torch::Tensor var_b_start,
torch::Tensor var_A_start,
torch::Tensor var_A_dim,
torch::Tensor var_A,
torch::Tensor var_L,
torch::Tensor var_b,
torch::Tensor var_tmp,
torch::Tensor var_x,
torch::Tensor adj_b_start,
torch::Tensor adj_A_start,
torch::Tensor adj_A_dim,
torch::Tensor adj_A,
torch::Tensor adj_L,
torch::Tensor adj_b,
torch::Tensor adj_tmp,
torch::Tensor adj_x)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_dense_solve_batched_cpu_kernel_backward(
cast<int*>(var_b_start),
cast<int*>(var_A_start),
cast<int*>(var_A_dim),
cast<float*>(var_A),
cast<float*>(var_L),
cast<float*>(var_b),
cast<float*>(var_tmp),
cast<float*>(var_x),
cast<int*>(adj_b_start),
cast<int*>(adj_A_start),
cast<int*>(adj_A_dim),
cast<float*>(adj_A),
cast<float*>(adj_L),
cast<float*>(adj_b),
cast<float*>(adj_tmp),
cast<float*>(adj_x));
}
}
// Python entry points
void eval_dense_solve_batched_cpu_forward(int dim,
torch::Tensor var_b_start,
torch::Tensor var_A_start,
torch::Tensor var_A_dim,
torch::Tensor var_A,
torch::Tensor var_L,
torch::Tensor var_b,
torch::Tensor var_tmp,
torch::Tensor var_x);
void eval_dense_solve_batched_cpu_backward(int dim,
torch::Tensor var_b_start,
torch::Tensor var_A_start,
torch::Tensor var_A_dim,
torch::Tensor var_A,
torch::Tensor var_L,
torch::Tensor var_b,
torch::Tensor var_tmp,
torch::Tensor var_x,
torch::Tensor adj_b_start,
torch::Tensor adj_A_start,
torch::Tensor adj_A_dim,
torch::Tensor adj_A,
torch::Tensor adj_L,
torch::Tensor adj_b,
torch::Tensor adj_tmp,
torch::Tensor adj_x);
void eval_rigid_integrate_cpu_kernel_forward(
int* var_joint_type,
int* var_joint_q_start,
int* var_joint_qd_start,
float* var_joint_q,
float* var_joint_qd,
float* var_joint_qdd,
float var_dt,
float* var_joint_q_new,
float* var_joint_qd_new)
{
//---------
// primal vars
int var_0;
int var_1;
int var_2;
int var_3;
int var_4;
//---------
// forward
var_0 = df::tid();
var_1 = df::load(var_joint_type, var_0);
var_2 = df::load(var_joint_q_start, var_0);
var_3 = df::load(var_joint_qd_start, var_0);
var_4 = jcalc_integrate_cpu_func(var_1, var_joint_q, var_joint_qd, var_joint_qdd, var_2, var_3, var_dt, var_joint_q_new, var_joint_qd_new);
}
void eval_rigid_integrate_cpu_kernel_backward(
int* var_joint_type,
int* var_joint_q_start,
int* var_joint_qd_start,
float* var_joint_q,
float* var_joint_qd,
float* var_joint_qdd,
float var_dt,
float* var_joint_q_new,
float* var_joint_qd_new,
int* adj_joint_type,
int* adj_joint_q_start,
int* adj_joint_qd_start,
float* adj_joint_q,
float* adj_joint_qd,
float* adj_joint_qdd,
float adj_dt,
float* adj_joint_q_new,
float* adj_joint_qd_new)
{
//---------
// primal vars
int var_0;
int var_1;
int var_2;
int var_3;
int var_4;
//---------
// dual vars
int adj_0 = 0;
int adj_1 = 0;
int adj_2 = 0;
int adj_3 = 0;
int adj_4 = 0;
//---------
// forward
var_0 = df::tid();
var_1 = df::load(var_joint_type, var_0);
var_2 = df::load(var_joint_q_start, var_0);
var_3 = df::load(var_joint_qd_start, var_0);
var_4 = jcalc_integrate_cpu_func(var_1, var_joint_q, var_joint_qd, var_joint_qdd, var_2, var_3, var_dt, var_joint_q_new, var_joint_qd_new);
//---------
// reverse
adj_jcalc_integrate_cpu_func(var_1, var_joint_q, var_joint_qd, var_joint_qdd, var_2, var_3, var_dt, var_joint_q_new, var_joint_qd_new, adj_1, adj_joint_q, adj_joint_qd, adj_joint_qdd, adj_2, adj_3, adj_dt, adj_joint_q_new, adj_joint_qd_new, adj_4);
df::adj_load(var_joint_qd_start, var_0, adj_joint_qd_start, adj_0, adj_3);
df::adj_load(var_joint_q_start, var_0, adj_joint_q_start, adj_0, adj_2);
df::adj_load(var_joint_type, var_0, adj_joint_type, adj_0, adj_1);
return;
}
// Python entry points
void eval_rigid_integrate_cpu_forward(int dim,
torch::Tensor var_joint_type,
torch::Tensor var_joint_q_start,
torch::Tensor var_joint_qd_start,
torch::Tensor var_joint_q,
torch::Tensor var_joint_qd,
torch::Tensor var_joint_qdd,
float var_dt,
torch::Tensor var_joint_q_new,
torch::Tensor var_joint_qd_new)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_rigid_integrate_cpu_kernel_forward(
cast<int*>(var_joint_type),
cast<int*>(var_joint_q_start),
cast<int*>(var_joint_qd_start),
cast<float*>(var_joint_q),
cast<float*>(var_joint_qd),
cast<float*>(var_joint_qdd),
var_dt,
cast<float*>(var_joint_q_new),
cast<float*>(var_joint_qd_new));
}
}
void eval_rigid_integrate_cpu_backward(int dim,
torch::Tensor var_joint_type,
torch::Tensor var_joint_q_start,
torch::Tensor var_joint_qd_start,
torch::Tensor var_joint_q,
torch::Tensor var_joint_qd,
torch::Tensor var_joint_qdd,
float var_dt,
torch::Tensor var_joint_q_new,
torch::Tensor var_joint_qd_new,
torch::Tensor adj_joint_type,
torch::Tensor adj_joint_q_start,
torch::Tensor adj_joint_qd_start,
torch::Tensor adj_joint_q,
torch::Tensor adj_joint_qd,
torch::Tensor adj_joint_qdd,
float adj_dt,
torch::Tensor adj_joint_q_new,
torch::Tensor adj_joint_qd_new)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_rigid_integrate_cpu_kernel_backward(
cast<int*>(var_joint_type),
cast<int*>(var_joint_q_start),
cast<int*>(var_joint_qd_start),
cast<float*>(var_joint_q),
cast<float*>(var_joint_qd),
cast<float*>(var_joint_qdd),
var_dt,
cast<float*>(var_joint_q_new),
cast<float*>(var_joint_qd_new),
cast<int*>(adj_joint_type),
cast<int*>(adj_joint_q_start),
cast<int*>(adj_joint_qd_start),
cast<float*>(adj_joint_q),
cast<float*>(adj_joint_qd),
cast<float*>(adj_joint_qdd),
adj_dt,
cast<float*>(adj_joint_q_new),
cast<float*>(adj_joint_qd_new));
}
}
// Python entry points
void eval_rigid_integrate_cpu_forward(int dim,
torch::Tensor var_joint_type,
torch::Tensor var_joint_q_start,
torch::Tensor var_joint_qd_start,
torch::Tensor var_joint_q,
torch::Tensor var_joint_qd,
torch::Tensor var_joint_qdd,
float var_dt,
torch::Tensor var_joint_q_new,
torch::Tensor var_joint_qd_new);
void eval_rigid_integrate_cpu_backward(int dim,
torch::Tensor var_joint_type,
torch::Tensor var_joint_q_start,
torch::Tensor var_joint_qd_start,
torch::Tensor var_joint_q,
torch::Tensor var_joint_qd,
torch::Tensor var_joint_qdd,
float var_dt,
torch::Tensor var_joint_q_new,
torch::Tensor var_joint_qd_new,
torch::Tensor adj_joint_type,
torch::Tensor adj_joint_q_start,
torch::Tensor adj_joint_qd_start,
torch::Tensor adj_joint_q,
torch::Tensor adj_joint_qd,
torch::Tensor adj_joint_qdd,
float adj_dt,
torch::Tensor adj_joint_q_new,
torch::Tensor adj_joint_qd_new);
void solve_springs_cpu_kernel_forward(
df::float3* var_x,
df::float3* var_v,
float* var_invmass,
int* var_spring_indices,
float* var_spring_rest_lengths,
float* var_spring_stiffness,
float* var_spring_damping,
float var_dt,
df::float3* var_delta)
{
//---------
// primal vars
int var_0;
const int var_1 = 2;
int var_2;
const int var_3 = 0;
int var_4;
int var_5;
int var_6;
const int var_7 = 1;
int var_8;
int var_9;
float var_10;
float var_11;
float var_12;
df::float3 var_13;
df::float3 var_14;
df::float3 var_15;
df::float3 var_16;
df::float3 var_17;
df::float3 var_18;
float var_19;
const float var_20 = 1.0;
float var_21;
df::float3 var_22;
float var_23;
float var_24;
float var_25;
float var_26;
float var_27;
float var_28;
float var_29;
float var_30;
float var_31;
df::float3 var_32;
df::float3 var_33;
df::float3 var_34;
//---------
// forward
var_0 = df::tid();
var_2 = df::mul(var_0, var_1);
var_4 = df::add(var_2, var_3);
var_5 = df::load(var_spring_indices, var_4);
var_6 = df::mul(var_0, var_1);
var_8 = df::add(var_6, var_7);
var_9 = df::load(var_spring_indices, var_8);
var_10 = df::load(var_spring_stiffness, var_0);
var_11 = df::load(var_spring_damping, var_0);
var_12 = df::load(var_spring_rest_lengths, var_0);
var_13 = df::load(var_x, var_5);
var_14 = df::load(var_x, var_9);
var_15 = df::load(var_v, var_5);
var_16 = df::load(var_v, var_9);
var_17 = df::sub(var_13, var_14);
var_18 = df::sub(var_15, var_16);
var_19 = df::length(var_17);
var_21 = df::div(var_20, var_19);
var_22 = df::mul(var_17, var_21);
var_23 = df::sub(var_19, var_12);
var_24 = df::dot(var_22, var_18);
var_25 = df::load(var_invmass, var_5);
var_26 = df::load(var_invmass, var_9);
var_27 = df::add(var_25, var_26);
var_28 = df::mul(var_10, var_dt);
var_29 = df::mul(var_28, var_dt);
var_30 = df::div(var_20, var_29);
var_31 = df::div(var_23, var_27);
var_32 = df::mul(var_22, var_31);
var_33 = df::mul(var_32, var_25);
df::atomic_sub(var_delta, var_5, var_33);
var_34 = df::mul(var_32, var_26);
df::atomic_add(var_delta, var_9, var_34);
}
void solve_springs_cpu_kernel_backward(
df::float3* var_x,
df::float3* var_v,
float* var_invmass,
int* var_spring_indices,
float* var_spring_rest_lengths,
float* var_spring_stiffness,
float* var_spring_damping,
float var_dt,
df::float3* var_delta,
df::float3* adj_x,
df::float3* adj_v,
float* adj_invmass,
int* adj_spring_indices,
float* adj_spring_rest_lengths,
float* adj_spring_stiffness,
float* adj_spring_damping,
float adj_dt,
df::float3* adj_delta)
{
//---------
// primal vars
int var_0;
const int var_1 = 2;
int var_2;
const int var_3 = 0;
int var_4;
int var_5;
int var_6;
const int var_7 = 1;
int var_8;
int var_9;
float var_10;
float var_11;
float var_12;
df::float3 var_13;
df::float3 var_14;
df::float3 var_15;
df::float3 var_16;
df::float3 var_17;
df::float3 var_18;
float var_19;
const float var_20 = 1.0;
float var_21;
df::float3 var_22;
float var_23;
float var_24;
float var_25;
float var_26;
float var_27;
float var_28;
float var_29;
float var_30;
float var_31;
df::float3 var_32;
df::float3 var_33;
df::float3 var_34;
//---------
// dual vars
int adj_0 = 0;
int adj_1 = 0;
int adj_2 = 0;
int adj_3 = 0;
int adj_4 = 0;
int adj_5 = 0;
int adj_6 = 0;
int adj_7 = 0;
int adj_8 = 0;
int adj_9 = 0;
float adj_10 = 0;
float adj_11 = 0;
float adj_12 = 0;
df::float3 adj_13 = 0;
df::float3 adj_14 = 0;
df::float3 adj_15 = 0;
df::float3 adj_16 = 0;
df::float3 adj_17 = 0;
df::float3 adj_18 = 0;
float adj_19 = 0;
float adj_20 = 0;
float adj_21 = 0;
df::float3 adj_22 = 0;
float adj_23 = 0;
float adj_24 = 0;
float adj_25 = 0;
float adj_26 = 0;
float adj_27 = 0;
float adj_28 = 0;
float adj_29 = 0;
float adj_30 = 0;
float adj_31 = 0;
df::float3 adj_32 = 0;
df::float3 adj_33 = 0;
df::float3 adj_34 = 0;
//---------
// forward
var_0 = df::tid();
var_2 = df::mul(var_0, var_1);
var_4 = df::add(var_2, var_3);
var_5 = df::load(var_spring_indices, var_4);
var_6 = df::mul(var_0, var_1);
var_8 = df::add(var_6, var_7);
var_9 = df::load(var_spring_indices, var_8);
var_10 = df::load(var_spring_stiffness, var_0);
var_11 = df::load(var_spring_damping, var_0);
var_12 = df::load(var_spring_rest_lengths, var_0);
var_13 = df::load(var_x, var_5);
var_14 = df::load(var_x, var_9);
var_15 = df::load(var_v, var_5);
var_16 = df::load(var_v, var_9);
var_17 = df::sub(var_13, var_14);
var_18 = df::sub(var_15, var_16);
var_19 = df::length(var_17);
var_21 = df::div(var_20, var_19);
var_22 = df::mul(var_17, var_21);
var_23 = df::sub(var_19, var_12);
var_24 = df::dot(var_22, var_18);
var_25 = df::load(var_invmass, var_5);
var_26 = df::load(var_invmass, var_9);
var_27 = df::add(var_25, var_26);
var_28 = df::mul(var_10, var_dt);
var_29 = df::mul(var_28, var_dt);
var_30 = df::div(var_20, var_29);
var_31 = df::div(var_23, var_27);
var_32 = df::mul(var_22, var_31);
var_33 = df::mul(var_32, var_25);
df::atomic_sub(var_delta, var_5, var_33);
var_34 = df::mul(var_32, var_26);
df::atomic_add(var_delta, var_9, var_34);
//---------
// reverse
df::adj_atomic_add(var_delta, var_9, var_34, adj_delta, adj_9, adj_34);
df::adj_mul(var_32, var_26, adj_32, adj_26, adj_34);
df::adj_atomic_sub(var_delta, var_5, var_33, adj_delta, adj_5, adj_33);
df::adj_mul(var_32, var_25, adj_32, adj_25, adj_33);
df::adj_mul(var_22, var_31, adj_22, adj_31, adj_32);
df::adj_div(var_23, var_27, adj_23, adj_27, adj_31);
df::adj_div(var_20, var_29, adj_20, adj_29, adj_30);
df::adj_mul(var_28, var_dt, adj_28, adj_dt, adj_29);
df::adj_mul(var_10, var_dt, adj_10, adj_dt, adj_28);
df::adj_add(var_25, var_26, adj_25, adj_26, adj_27);
df::adj_load(var_invmass, var_9, adj_invmass, adj_9, adj_26);
df::adj_load(var_invmass, var_5, adj_invmass, adj_5, adj_25);
df::adj_dot(var_22, var_18, adj_22, adj_18, adj_24);
df::adj_sub(var_19, var_12, adj_19, adj_12, adj_23);
df::adj_mul(var_17, var_21, adj_17, adj_21, adj_22);
df::adj_div(var_20, var_19, adj_20, adj_19, adj_21);
df::adj_length(var_17, adj_17, adj_19);
df::adj_sub(var_15, var_16, adj_15, adj_16, adj_18);
df::adj_sub(var_13, var_14, adj_13, adj_14, adj_17);
df::adj_load(var_v, var_9, adj_v, adj_9, adj_16);
df::adj_load(var_v, var_5, adj_v, adj_5, adj_15);
df::adj_load(var_x, var_9, adj_x, adj_9, adj_14);
df::adj_load(var_x, var_5, adj_x, adj_5, adj_13);
df::adj_load(var_spring_rest_lengths, var_0, adj_spring_rest_lengths, adj_0, adj_12);
df::adj_load(var_spring_damping, var_0, adj_spring_damping, adj_0, adj_11);
df::adj_load(var_spring_stiffness, var_0, adj_spring_stiffness, adj_0, adj_10);
df::adj_load(var_spring_indices, var_8, adj_spring_indices, adj_8, adj_9);
df::adj_add(var_6, var_7, adj_6, adj_7, adj_8);
df::adj_mul(var_0, var_1, adj_0, adj_1, adj_6);
df::adj_load(var_spring_indices, var_4, adj_spring_indices, adj_4, adj_5);
df::adj_add(var_2, var_3, adj_2, adj_3, adj_4);
df::adj_mul(var_0, var_1, adj_0, adj_1, adj_2);
return;
}
// Python entry points
void solve_springs_cpu_forward(int dim,
torch::Tensor var_x,
torch::Tensor var_v,
torch::Tensor var_invmass,
torch::Tensor var_spring_indices,
torch::Tensor var_spring_rest_lengths,
torch::Tensor var_spring_stiffness,
torch::Tensor var_spring_damping,
float var_dt,
torch::Tensor var_delta)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
solve_springs_cpu_kernel_forward(
cast<df::float3*>(var_x),
cast<df::float3*>(var_v),
cast<float*>(var_invmass),
cast<int*>(var_spring_indices),
cast<float*>(var_spring_rest_lengths),
cast<float*>(var_spring_stiffness),
cast<float*>(var_spring_damping),
var_dt,
cast<df::float3*>(var_delta));
}
}
void solve_springs_cpu_backward(int dim,
torch::Tensor var_x,
torch::Tensor var_v,
torch::Tensor var_invmass,
torch::Tensor var_spring_indices,
torch::Tensor var_spring_rest_lengths,
torch::Tensor var_spring_stiffness,
torch::Tensor var_spring_damping,
float var_dt,
torch::Tensor var_delta,
torch::Tensor adj_x,
torch::Tensor adj_v,
torch::Tensor adj_invmass,
torch::Tensor adj_spring_indices,
torch::Tensor adj_spring_rest_lengths,
torch::Tensor adj_spring_stiffness,
torch::Tensor adj_spring_damping,
float adj_dt,
torch::Tensor adj_delta)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
solve_springs_cpu_kernel_backward(
cast<df::float3*>(var_x),
cast<df::float3*>(var_v),
cast<float*>(var_invmass),
cast<int*>(var_spring_indices),
cast<float*>(var_spring_rest_lengths),
cast<float*>(var_spring_stiffness),
cast<float*>(var_spring_damping),
var_dt,
cast<df::float3*>(var_delta),
cast<df::float3*>(adj_x),
cast<df::float3*>(adj_v),
cast<float*>(adj_invmass),
cast<int*>(adj_spring_indices),
cast<float*>(adj_spring_rest_lengths),
cast<float*>(adj_spring_stiffness),
cast<float*>(adj_spring_damping),
adj_dt,
cast<df::float3*>(adj_delta));
}
}
// Python entry points
void solve_springs_cpu_forward(int dim,
torch::Tensor var_x,
torch::Tensor var_v,
torch::Tensor var_invmass,
torch::Tensor var_spring_indices,
torch::Tensor var_spring_rest_lengths,
torch::Tensor var_spring_stiffness,
torch::Tensor var_spring_damping,
float var_dt,
torch::Tensor var_delta);
void solve_springs_cpu_backward(int dim,
torch::Tensor var_x,
torch::Tensor var_v,
torch::Tensor var_invmass,
torch::Tensor var_spring_indices,
torch::Tensor var_spring_rest_lengths,
torch::Tensor var_spring_stiffness,
torch::Tensor var_spring_damping,
float var_dt,
torch::Tensor var_delta,
torch::Tensor adj_x,
torch::Tensor adj_v,
torch::Tensor adj_invmass,
torch::Tensor adj_spring_indices,
torch::Tensor adj_spring_rest_lengths,
torch::Tensor adj_spring_stiffness,
torch::Tensor adj_spring_damping,
float adj_dt,
torch::Tensor adj_delta);
void solve_tetrahedra_cpu_kernel_forward(
df::float3* var_x,
df::float3* var_v,
float* var_inv_mass,
int* var_indices,
mat33* var_pose,
float* var_activation,
float* var_materials,
float var_dt,
float var_relaxation,
df::float3* var_delta)
{
//---------
// primal vars
int var_0;
const int var_1 = 4;
int var_2;
const int var_3 = 0;
int var_4;
int var_5;
int var_6;
const int var_7 = 1;
int var_8;
int var_9;
int var_10;
const int var_11 = 2;
int var_12;
int var_13;
int var_14;
const int var_15 = 3;
int var_16;
int var_17;
float var_18;
int var_19;
int var_20;
float var_21;
int var_22;
int var_23;
float var_24;
int var_25;
int var_26;
float var_27;
df::float3 var_28;
df::float3 var_29;
df::float3 var_30;
df::float3 var_31;
df::float3 var_32;
df::float3 var_33;
df::float3 var_34;
df::float3 var_35;
float var_36;
float var_37;
float var_38;
float var_39;
df::float3 var_40;
df::float3 var_41;
df::float3 var_42;
df::float3 var_43;
df::float3 var_44;
df::float3 var_45;
mat33 var_46;
mat33 var_47;
float var_48;
const float var_49 = 6.0;
float var_50;
const float var_51 = 1.0;
float var_52;
mat33 var_53;
float var_54;
float var_55;
float var_56;
df::float3 var_57;
float var_58;
float var_59;
float var_60;
df::float3 var_61;
float var_62;
float var_63;
float var_64;
df::float3 var_65;
float var_66;
float var_67;
float var_68;
float var_69;
float var_70;
const float var_71 = 3.0;
float var_72;
float var_73;
float var_74;
const float var_75 = 0.0;
bool var_76;
bool var_77;
float var_78;
float var_79;
mat33 var_80;
mat33 var_81;
float var_82;
mat33 var_83;
float var_84;
float var_85;
float var_86;
float var_87;
float var_88;
df::float3 var_89;
float var_90;
float var_91;
float var_92;
df::float3 var_93;
float var_94;
float var_95;
float var_96;
df::float3 var_97;
df::float3 var_98;
df::float3 var_99;
float var_100;
df::float3 var_101;
float var_102;
float var_103;
float var_104;
float var_105;
float var_106;
float var_107;
float var_108;
float var_109;
float var_110;
float var_111;
float var_112;
float var_113;
float var_114;
float var_115;
float var_116;
float var_117;
float var_118;
df::float3 var_119;
df::float3 var_120;
df::float3 var_121;
df::float3 var_122;
float var_123;
float var_124;
float var_125;
df::float3 var_126;
df::float3 var_127;
df::float3 var_128;
df::float3 var_129;
df::float3 var_130;
df::float3 var_131;
df::float3 var_132;
df::float3 var_133;
float var_134;
df::float3 var_135;
float var_136;
float var_137;
float var_138;
float var_139;
float var_140;
float var_141;
float var_142;
float var_143;
float var_144;
float var_145;
float var_146;
float var_147;
float var_148;
float var_149;
float var_150;
float var_151;
float var_152;
df::float3 var_153;
df::float3 var_154;
df::float3 var_155;
df::float3 var_156;
df::float3 var_157;
df::float3 var_158;
df::float3 var_159;
df::float3 var_160;
df::float3 var_161;
df::float3 var_162;
df::float3 var_163;
df::float3 var_164;
df::float3 var_165;
df::float3 var_166;
df::float3 var_167;
df::float3 var_168;
//---------
// forward
var_0 = df::tid();
var_2 = df::mul(var_0, var_1);
var_4 = df::add(var_2, var_3);
var_5 = df::load(var_indices, var_4);
var_6 = df::mul(var_0, var_1);
var_8 = df::add(var_6, var_7);
var_9 = df::load(var_indices, var_8);
var_10 = df::mul(var_0, var_1);
var_12 = df::add(var_10, var_11);
var_13 = df::load(var_indices, var_12);
var_14 = df::mul(var_0, var_1);
var_16 = df::add(var_14, var_15);
var_17 = df::load(var_indices, var_16);
var_18 = df::load(var_activation, var_0);
var_19 = df::mul(var_0, var_15);
var_20 = df::add(var_19, var_3);
var_21 = df::load(var_materials, var_20);
var_22 = df::mul(var_0, var_15);
var_23 = df::add(var_22, var_7);
var_24 = df::load(var_materials, var_23);
var_25 = df::mul(var_0, var_15);
var_26 = df::add(var_25, var_11);
var_27 = df::load(var_materials, var_26);
var_28 = df::load(var_x, var_5);
var_29 = df::load(var_x, var_9);
var_30 = df::load(var_x, var_13);
var_31 = df::load(var_x, var_17);
var_32 = df::load(var_v, var_5);
var_33 = df::load(var_v, var_9);
var_34 = df::load(var_v, var_13);
var_35 = df::load(var_v, var_17);
var_36 = df::load(var_inv_mass, var_5);
var_37 = df::load(var_inv_mass, var_9);
var_38 = df::load(var_inv_mass, var_13);
var_39 = df::load(var_inv_mass, var_17);
var_40 = df::sub(var_29, var_28);
var_41 = df::sub(var_30, var_28);
var_42 = df::sub(var_31, var_28);
var_43 = df::sub(var_33, var_32);
var_44 = df::sub(var_34, var_32);
var_45 = df::sub(var_35, var_32);
var_46 = df::mat33(var_40, var_41, var_42);
var_47 = df::load(var_pose, var_0);
var_48 = df::determinant(var_47);
var_50 = df::mul(var_48, var_49);
var_52 = df::div(var_51, var_50);
var_53 = df::mul(var_46, var_47);
var_54 = df::index(var_53, var_3, var_3);
var_55 = df::index(var_53, var_7, var_3);
var_56 = df::index(var_53, var_11, var_3);
var_57 = df::float3(var_54, var_55, var_56);
var_58 = df::index(var_53, var_3, var_7);
var_59 = df::index(var_53, var_7, var_7);
var_60 = df::index(var_53, var_11, var_7);
var_61 = df::float3(var_58, var_59, var_60);
var_62 = df::index(var_53, var_3, var_11);
var_63 = df::index(var_53, var_7, var_11);
var_64 = df::index(var_53, var_11, var_11);
var_65 = df::float3(var_62, var_63, var_64);
var_66 = df::dot(var_57, var_57);
var_67 = df::dot(var_61, var_61);
var_68 = df::add(var_66, var_67);
var_69 = df::dot(var_65, var_65);
var_70 = df::add(var_68, var_69);
var_72 = df::sub(var_70, var_71);
var_73 = df::abs(var_72);
var_74 = df::sqrt(var_73);
var_76 = (var_74 == var_75);
if (var_76) {
return;
}
var_77 = (var_70 < var_71);
if (var_77) {
var_78 = df::sub(var_75, var_74);
}
var_79 = df::select(var_77, var_74, var_78);
var_80 = df::transpose(var_47);
var_81 = df::mul(var_53, var_80);
var_82 = df::div(var_51, var_79);
var_83 = df::mul(var_81, var_82);
var_84 = df::div(var_21, var_24);
var_85 = df::add(var_51, var_84);
var_86 = df::index(var_83, var_3, var_3);
var_87 = df::index(var_83, var_7, var_3);
var_88 = df::index(var_83, var_11, var_3);
var_89 = df::float3(var_86, var_87, var_88);
var_90 = df::index(var_83, var_3, var_7);
var_91 = df::index(var_83, var_7, var_7);
var_92 = df::index(var_83, var_11, var_7);
var_93 = df::float3(var_90, var_91, var_92);
var_94 = df::index(var_83, var_3, var_11);
var_95 = df::index(var_83, var_7, var_11);
var_96 = df::index(var_83, var_11, var_11);
var_97 = df::float3(var_94, var_95, var_96);
var_98 = df::add(var_89, var_93);
var_99 = df::add(var_98, var_97);
var_100 = df::sub(var_75, var_51);
var_101 = df::mul(var_99, var_100);
var_102 = df::dot(var_101, var_101);
var_103 = df::mul(var_102, var_36);
var_104 = df::dot(var_89, var_89);
var_105 = df::mul(var_104, var_37);
var_106 = df::add(var_103, var_105);
var_107 = df::dot(var_93, var_93);
var_108 = df::mul(var_107, var_38);
var_109 = df::add(var_106, var_108);
var_110 = df::dot(var_97, var_97);
var_111 = df::mul(var_110, var_39);
var_112 = df::add(var_109, var_111);
var_113 = df::mul(var_21, var_dt);
var_114 = df::mul(var_113, var_dt);
var_115 = df::mul(var_114, var_52);
var_116 = df::div(var_51, var_115);
var_117 = df::add(var_112, var_116);
var_118 = df::div(var_74, var_117);
var_119 = df::mul(var_101, var_118);
var_120 = df::mul(var_89, var_118);
var_121 = df::mul(var_93, var_118);
var_122 = df::mul(var_97, var_118);
var_123 = df::determinant(var_53);
var_124 = df::sub(var_123, var_85);
var_125 = df::div(var_50, var_49);
var_126 = df::cross(var_41, var_42);
var_127 = df::mul(var_126, var_125);
var_128 = df::cross(var_42, var_40);
var_129 = df::mul(var_128, var_125);
var_130 = df::cross(var_40, var_41);
var_131 = df::mul(var_130, var_125);
var_132 = df::add(var_127, var_129);
var_133 = df::add(var_132, var_131);
var_134 = df::sub(var_75, var_51);
var_135 = df::mul(var_133, var_134);
var_136 = df::dot(var_135, var_135);
var_137 = df::mul(var_136, var_36);
var_138 = df::dot(var_127, var_127);
var_139 = df::mul(var_138, var_37);
var_140 = df::add(var_137, var_139);
var_141 = df::dot(var_129, var_129);
var_142 = df::mul(var_141, var_38);
var_143 = df::add(var_140, var_142);
var_144 = df::dot(var_131, var_131);
var_145 = df::mul(var_144, var_39);
var_146 = df::add(var_143, var_145);
var_147 = df::mul(var_24, var_dt);
var_148 = df::mul(var_147, var_dt);
var_149 = df::mul(var_148, var_52);
var_150 = df::div(var_51, var_149);
var_151 = df::add(var_146, var_150);
var_152 = df::div(var_124, var_151);
var_153 = df::mul(var_135, var_152);
var_154 = df::add(var_119, var_153);
var_155 = df::mul(var_127, var_152);
var_156 = df::add(var_120, var_155);
var_157 = df::mul(var_129, var_152);
var_158 = df::add(var_121, var_157);
var_159 = df::mul(var_131, var_152);
var_160 = df::add(var_122, var_159);
var_161 = df::mul(var_154, var_36);
var_162 = df::mul(var_161, var_relaxation);
df::atomic_sub(var_delta, var_5, var_162);
var_163 = df::mul(var_156, var_37);
var_164 = df::mul(var_163, var_relaxation);
df::atomic_sub(var_delta, var_9, var_164);
var_165 = df::mul(var_158, var_38);
var_166 = df::mul(var_165, var_relaxation);
df::atomic_sub(var_delta, var_13, var_166);
var_167 = df::mul(var_160, var_39);
var_168 = df::mul(var_167, var_relaxation);
df::atomic_sub(var_delta, var_17, var_168);
}
void solve_tetrahedra_cpu_kernel_backward(
df::float3* var_x,
df::float3* var_v,
float* var_inv_mass,
int* var_indices,
mat33* var_pose,
float* var_activation,
float* var_materials,
float var_dt,
float var_relaxation,
df::float3* var_delta,
df::float3* adj_x,
df::float3* adj_v,
float* adj_inv_mass,
int* adj_indices,
mat33* adj_pose,
float* adj_activation,
float* adj_materials,
float adj_dt,
float adj_relaxation,
df::float3* adj_delta)
{
//---------
// primal vars
int var_0;
const int var_1 = 4;
int var_2;
const int var_3 = 0;
int var_4;
int var_5;
int var_6;
const int var_7 = 1;
int var_8;
int var_9;
int var_10;
const int var_11 = 2;
int var_12;
int var_13;
int var_14;
const int var_15 = 3;
int var_16;
int var_17;
float var_18;
int var_19;
int var_20;
float var_21;
int var_22;
int var_23;
float var_24;
int var_25;
int var_26;
float var_27;
df::float3 var_28;
df::float3 var_29;
df::float3 var_30;
df::float3 var_31;
df::float3 var_32;
df::float3 var_33;
df::float3 var_34;
df::float3 var_35;
float var_36;
float var_37;
float var_38;
float var_39;
df::float3 var_40;
df::float3 var_41;
df::float3 var_42;
df::float3 var_43;
df::float3 var_44;
df::float3 var_45;
mat33 var_46;
mat33 var_47;
float var_48;
const float var_49 = 6.0;
float var_50;
const float var_51 = 1.0;
float var_52;
mat33 var_53;
float var_54;
float var_55;
float var_56;
df::float3 var_57;
float var_58;
float var_59;
float var_60;
df::float3 var_61;
float var_62;
float var_63;
float var_64;
df::float3 var_65;
float var_66;
float var_67;
float var_68;
float var_69;
float var_70;
const float var_71 = 3.0;
float var_72;
float var_73;
float var_74;
const float var_75 = 0.0;
bool var_76;
bool var_77;
float var_78;
float var_79;
mat33 var_80;
mat33 var_81;
float var_82;
mat33 var_83;
float var_84;
float var_85;
float var_86;
float var_87;
float var_88;
df::float3 var_89;
float var_90;
float var_91;
float var_92;
df::float3 var_93;
float var_94;
float var_95;
float var_96;
df::float3 var_97;
df::float3 var_98;
df::float3 var_99;
float var_100;
df::float3 var_101;
float var_102;
float var_103;
float var_104;
float var_105;
float var_106;
float var_107;
float var_108;
float var_109;
float var_110;
float var_111;
float var_112;
float var_113;
float var_114;
float var_115;
float var_116;
float var_117;
float var_118;
df::float3 var_119;
df::float3 var_120;
df::float3 var_121;
df::float3 var_122;
float var_123;
float var_124;
float var_125;
df::float3 var_126;
df::float3 var_127;
df::float3 var_128;
df::float3 var_129;
df::float3 var_130;
df::float3 var_131;
df::float3 var_132;
df::float3 var_133;
float var_134;
df::float3 var_135;
float var_136;
float var_137;
float var_138;
float var_139;
float var_140;
float var_141;
float var_142;
float var_143;
float var_144;
float var_145;
float var_146;
float var_147;
float var_148;
float var_149;
float var_150;
float var_151;
float var_152;
df::float3 var_153;
df::float3 var_154;
df::float3 var_155;
df::float3 var_156;
df::float3 var_157;
df::float3 var_158;
df::float3 var_159;
df::float3 var_160;
df::float3 var_161;
df::float3 var_162;
df::float3 var_163;
df::float3 var_164;
df::float3 var_165;
df::float3 var_166;
df::float3 var_167;
df::float3 var_168;
//---------
// dual vars
int adj_0 = 0;
int adj_1 = 0;
int adj_2 = 0;
int adj_3 = 0;
int adj_4 = 0;
int adj_5 = 0;
int adj_6 = 0;
int adj_7 = 0;
int adj_8 = 0;
int adj_9 = 0;
int adj_10 = 0;
int adj_11 = 0;
int adj_12 = 0;
int adj_13 = 0;
int adj_14 = 0;
int adj_15 = 0;
int adj_16 = 0;
int adj_17 = 0;
float adj_18 = 0;
int adj_19 = 0;
int adj_20 = 0;
float adj_21 = 0;
int adj_22 = 0;
int adj_23 = 0;
float adj_24 = 0;
int adj_25 = 0;
int adj_26 = 0;
float adj_27 = 0;
df::float3 adj_28 = 0;
df::float3 adj_29 = 0;
df::float3 adj_30 = 0;
df::float3 adj_31 = 0;
df::float3 adj_32 = 0;
df::float3 adj_33 = 0;
df::float3 adj_34 = 0;
df::float3 adj_35 = 0;
float adj_36 = 0;
float adj_37 = 0;
float adj_38 = 0;
float adj_39 = 0;
df::float3 adj_40 = 0;
df::float3 adj_41 = 0;
df::float3 adj_42 = 0;
df::float3 adj_43 = 0;
df::float3 adj_44 = 0;
df::float3 adj_45 = 0;
mat33 adj_46 = 0;
mat33 adj_47 = 0;
float adj_48 = 0;
float adj_49 = 0;
float adj_50 = 0;
float adj_51 = 0;
float adj_52 = 0;
mat33 adj_53 = 0;
float adj_54 = 0;
float adj_55 = 0;
float adj_56 = 0;
df::float3 adj_57 = 0;
float adj_58 = 0;
float adj_59 = 0;
float adj_60 = 0;
df::float3 adj_61 = 0;
float adj_62 = 0;
float adj_63 = 0;
float adj_64 = 0;
df::float3 adj_65 = 0;
float adj_66 = 0;
float adj_67 = 0;
float adj_68 = 0;
float adj_69 = 0;
float adj_70 = 0;
float adj_71 = 0;
float adj_72 = 0;
float adj_73 = 0;
float adj_74 = 0;
float adj_75 = 0;
bool adj_76 = 0;
bool adj_77 = 0;
float adj_78 = 0;
float adj_79 = 0;
mat33 adj_80 = 0;
mat33 adj_81 = 0;
float adj_82 = 0;
mat33 adj_83 = 0;
float adj_84 = 0;
float adj_85 = 0;
float adj_86 = 0;
float adj_87 = 0;
float adj_88 = 0;
df::float3 adj_89 = 0;
float adj_90 = 0;
float adj_91 = 0;
float adj_92 = 0;
df::float3 adj_93 = 0;
float adj_94 = 0;
float adj_95 = 0;
float adj_96 = 0;
df::float3 adj_97 = 0;
df::float3 adj_98 = 0;
df::float3 adj_99 = 0;
float adj_100 = 0;
df::float3 adj_101 = 0;
float adj_102 = 0;
float adj_103 = 0;
float adj_104 = 0;
float adj_105 = 0;
float adj_106 = 0;
float adj_107 = 0;
float adj_108 = 0;
float adj_109 = 0;
float adj_110 = 0;
float adj_111 = 0;
float adj_112 = 0;
float adj_113 = 0;
float adj_114 = 0;
float adj_115 = 0;
float adj_116 = 0;
float adj_117 = 0;
float adj_118 = 0;
df::float3 adj_119 = 0;
df::float3 adj_120 = 0;
df::float3 adj_121 = 0;
df::float3 adj_122 = 0;
float adj_123 = 0;
float adj_124 = 0;
float adj_125 = 0;
df::float3 adj_126 = 0;
df::float3 adj_127 = 0;
df::float3 adj_128 = 0;
df::float3 adj_129 = 0;
df::float3 adj_130 = 0;
df::float3 adj_131 = 0;
df::float3 adj_132 = 0;
df::float3 adj_133 = 0;
float adj_134 = 0;
df::float3 adj_135 = 0;
float adj_136 = 0;
float adj_137 = 0;
float adj_138 = 0;
float adj_139 = 0;
float adj_140 = 0;
float adj_141 = 0;
float adj_142 = 0;
float adj_143 = 0;
float adj_144 = 0;
float adj_145 = 0;
float adj_146 = 0;
float adj_147 = 0;
float adj_148 = 0;
float adj_149 = 0;
float adj_150 = 0;
float adj_151 = 0;
float adj_152 = 0;
df::float3 adj_153 = 0;
df::float3 adj_154 = 0;
df::float3 adj_155 = 0;
df::float3 adj_156 = 0;
df::float3 adj_157 = 0;
df::float3 adj_158 = 0;
df::float3 adj_159 = 0;
df::float3 adj_160 = 0;
df::float3 adj_161 = 0;
df::float3 adj_162 = 0;
df::float3 adj_163 = 0;
df::float3 adj_164 = 0;
df::float3 adj_165 = 0;
df::float3 adj_166 = 0;
df::float3 adj_167 = 0;
df::float3 adj_168 = 0;
//---------
// forward
var_0 = df::tid();
var_2 = df::mul(var_0, var_1);
var_4 = df::add(var_2, var_3);
var_5 = df::load(var_indices, var_4);
var_6 = df::mul(var_0, var_1);
var_8 = df::add(var_6, var_7);
var_9 = df::load(var_indices, var_8);
var_10 = df::mul(var_0, var_1);
var_12 = df::add(var_10, var_11);
var_13 = df::load(var_indices, var_12);
var_14 = df::mul(var_0, var_1);
var_16 = df::add(var_14, var_15);
var_17 = df::load(var_indices, var_16);
var_18 = df::load(var_activation, var_0);
var_19 = df::mul(var_0, var_15);
var_20 = df::add(var_19, var_3);
var_21 = df::load(var_materials, var_20);
var_22 = df::mul(var_0, var_15);
var_23 = df::add(var_22, var_7);
var_24 = df::load(var_materials, var_23);
var_25 = df::mul(var_0, var_15);
var_26 = df::add(var_25, var_11);
var_27 = df::load(var_materials, var_26);
var_28 = df::load(var_x, var_5);
var_29 = df::load(var_x, var_9);
var_30 = df::load(var_x, var_13);
var_31 = df::load(var_x, var_17);
var_32 = df::load(var_v, var_5);
var_33 = df::load(var_v, var_9);
var_34 = df::load(var_v, var_13);
var_35 = df::load(var_v, var_17);
var_36 = df::load(var_inv_mass, var_5);
var_37 = df::load(var_inv_mass, var_9);
var_38 = df::load(var_inv_mass, var_13);
var_39 = df::load(var_inv_mass, var_17);
var_40 = df::sub(var_29, var_28);
var_41 = df::sub(var_30, var_28);
var_42 = df::sub(var_31, var_28);
var_43 = df::sub(var_33, var_32);
var_44 = df::sub(var_34, var_32);
var_45 = df::sub(var_35, var_32);
var_46 = df::mat33(var_40, var_41, var_42);
var_47 = df::load(var_pose, var_0);
var_48 = df::determinant(var_47);
var_50 = df::mul(var_48, var_49);
var_52 = df::div(var_51, var_50);
var_53 = df::mul(var_46, var_47);
var_54 = df::index(var_53, var_3, var_3);
var_55 = df::index(var_53, var_7, var_3);
var_56 = df::index(var_53, var_11, var_3);
var_57 = df::float3(var_54, var_55, var_56);
var_58 = df::index(var_53, var_3, var_7);
var_59 = df::index(var_53, var_7, var_7);
var_60 = df::index(var_53, var_11, var_7);
var_61 = df::float3(var_58, var_59, var_60);
var_62 = df::index(var_53, var_3, var_11);
var_63 = df::index(var_53, var_7, var_11);
var_64 = df::index(var_53, var_11, var_11);
var_65 = df::float3(var_62, var_63, var_64);
var_66 = df::dot(var_57, var_57);
var_67 = df::dot(var_61, var_61);
var_68 = df::add(var_66, var_67);
var_69 = df::dot(var_65, var_65);
var_70 = df::add(var_68, var_69);
var_72 = df::sub(var_70, var_71);
var_73 = df::abs(var_72);
var_74 = df::sqrt(var_73);
var_76 = (var_74 == var_75);
if (var_76) {
goto label0;
}
var_77 = (var_70 < var_71);
if (var_77) {
var_78 = df::sub(var_75, var_74);
}
var_79 = df::select(var_77, var_74, var_78);
var_80 = df::transpose(var_47);
var_81 = df::mul(var_53, var_80);
var_82 = df::div(var_51, var_79);
var_83 = df::mul(var_81, var_82);
var_84 = df::div(var_21, var_24);
var_85 = df::add(var_51, var_84);
var_86 = df::index(var_83, var_3, var_3);
var_87 = df::index(var_83, var_7, var_3);
var_88 = df::index(var_83, var_11, var_3);
var_89 = df::float3(var_86, var_87, var_88);
var_90 = df::index(var_83, var_3, var_7);
var_91 = df::index(var_83, var_7, var_7);
var_92 = df::index(var_83, var_11, var_7);
var_93 = df::float3(var_90, var_91, var_92);
var_94 = df::index(var_83, var_3, var_11);
var_95 = df::index(var_83, var_7, var_11);
var_96 = df::index(var_83, var_11, var_11);
var_97 = df::float3(var_94, var_95, var_96);
var_98 = df::add(var_89, var_93);
var_99 = df::add(var_98, var_97);
var_100 = df::sub(var_75, var_51);
var_101 = df::mul(var_99, var_100);
var_102 = df::dot(var_101, var_101);
var_103 = df::mul(var_102, var_36);
var_104 = df::dot(var_89, var_89);
var_105 = df::mul(var_104, var_37);
var_106 = df::add(var_103, var_105);
var_107 = df::dot(var_93, var_93);
var_108 = df::mul(var_107, var_38);
var_109 = df::add(var_106, var_108);
var_110 = df::dot(var_97, var_97);
var_111 = df::mul(var_110, var_39);
var_112 = df::add(var_109, var_111);
var_113 = df::mul(var_21, var_dt);
var_114 = df::mul(var_113, var_dt);
var_115 = df::mul(var_114, var_52);
var_116 = df::div(var_51, var_115);
var_117 = df::add(var_112, var_116);
var_118 = df::div(var_74, var_117);
var_119 = df::mul(var_101, var_118);
var_120 = df::mul(var_89, var_118);
var_121 = df::mul(var_93, var_118);
var_122 = df::mul(var_97, var_118);
var_123 = df::determinant(var_53);
var_124 = df::sub(var_123, var_85);
var_125 = df::div(var_50, var_49);
var_126 = df::cross(var_41, var_42);
var_127 = df::mul(var_126, var_125);
var_128 = df::cross(var_42, var_40);
var_129 = df::mul(var_128, var_125);
var_130 = df::cross(var_40, var_41);
var_131 = df::mul(var_130, var_125);
var_132 = df::add(var_127, var_129);
var_133 = df::add(var_132, var_131);
var_134 = df::sub(var_75, var_51);
var_135 = df::mul(var_133, var_134);
var_136 = df::dot(var_135, var_135);
var_137 = df::mul(var_136, var_36);
var_138 = df::dot(var_127, var_127);
var_139 = df::mul(var_138, var_37);
var_140 = df::add(var_137, var_139);
var_141 = df::dot(var_129, var_129);
var_142 = df::mul(var_141, var_38);
var_143 = df::add(var_140, var_142);
var_144 = df::dot(var_131, var_131);
var_145 = df::mul(var_144, var_39);
var_146 = df::add(var_143, var_145);
var_147 = df::mul(var_24, var_dt);
var_148 = df::mul(var_147, var_dt);
var_149 = df::mul(var_148, var_52);
var_150 = df::div(var_51, var_149);
var_151 = df::add(var_146, var_150);
var_152 = df::div(var_124, var_151);
var_153 = df::mul(var_135, var_152);
var_154 = df::add(var_119, var_153);
var_155 = df::mul(var_127, var_152);
var_156 = df::add(var_120, var_155);
var_157 = df::mul(var_129, var_152);
var_158 = df::add(var_121, var_157);
var_159 = df::mul(var_131, var_152);
var_160 = df::add(var_122, var_159);
var_161 = df::mul(var_154, var_36);
var_162 = df::mul(var_161, var_relaxation);
df::atomic_sub(var_delta, var_5, var_162);
var_163 = df::mul(var_156, var_37);
var_164 = df::mul(var_163, var_relaxation);
df::atomic_sub(var_delta, var_9, var_164);
var_165 = df::mul(var_158, var_38);
var_166 = df::mul(var_165, var_relaxation);
df::atomic_sub(var_delta, var_13, var_166);
var_167 = df::mul(var_160, var_39);
var_168 = df::mul(var_167, var_relaxation);
df::atomic_sub(var_delta, var_17, var_168);
//---------
// reverse
df::adj_atomic_sub(var_delta, var_17, var_168, adj_delta, adj_17, adj_168);
df::adj_mul(var_167, var_relaxation, adj_167, adj_relaxation, adj_168);
df::adj_mul(var_160, var_39, adj_160, adj_39, adj_167);
df::adj_atomic_sub(var_delta, var_13, var_166, adj_delta, adj_13, adj_166);
df::adj_mul(var_165, var_relaxation, adj_165, adj_relaxation, adj_166);
df::adj_mul(var_158, var_38, adj_158, adj_38, adj_165);
df::adj_atomic_sub(var_delta, var_9, var_164, adj_delta, adj_9, adj_164);
df::adj_mul(var_163, var_relaxation, adj_163, adj_relaxation, adj_164);
df::adj_mul(var_156, var_37, adj_156, adj_37, adj_163);
df::adj_atomic_sub(var_delta, var_5, var_162, adj_delta, adj_5, adj_162);
df::adj_mul(var_161, var_relaxation, adj_161, adj_relaxation, adj_162);
df::adj_mul(var_154, var_36, adj_154, adj_36, adj_161);
df::adj_add(var_122, var_159, adj_122, adj_159, adj_160);
df::adj_mul(var_131, var_152, adj_131, adj_152, adj_159);
df::adj_add(var_121, var_157, adj_121, adj_157, adj_158);
df::adj_mul(var_129, var_152, adj_129, adj_152, adj_157);
df::adj_add(var_120, var_155, adj_120, adj_155, adj_156);
df::adj_mul(var_127, var_152, adj_127, adj_152, adj_155);
df::adj_add(var_119, var_153, adj_119, adj_153, adj_154);
df::adj_mul(var_135, var_152, adj_135, adj_152, adj_153);
df::adj_div(var_124, var_151, adj_124, adj_151, adj_152);
df::adj_add(var_146, var_150, adj_146, adj_150, adj_151);
df::adj_div(var_51, var_149, adj_51, adj_149, adj_150);
df::adj_mul(var_148, var_52, adj_148, adj_52, adj_149);
df::adj_mul(var_147, var_dt, adj_147, adj_dt, adj_148);
df::adj_mul(var_24, var_dt, adj_24, adj_dt, adj_147);
df::adj_add(var_143, var_145, adj_143, adj_145, adj_146);
df::adj_mul(var_144, var_39, adj_144, adj_39, adj_145);
df::adj_dot(var_131, var_131, adj_131, adj_131, adj_144);
df::adj_add(var_140, var_142, adj_140, adj_142, adj_143);
df::adj_mul(var_141, var_38, adj_141, adj_38, adj_142);
df::adj_dot(var_129, var_129, adj_129, adj_129, adj_141);
df::adj_add(var_137, var_139, adj_137, adj_139, adj_140);
df::adj_mul(var_138, var_37, adj_138, adj_37, adj_139);
df::adj_dot(var_127, var_127, adj_127, adj_127, adj_138);
df::adj_mul(var_136, var_36, adj_136, adj_36, adj_137);
df::adj_dot(var_135, var_135, adj_135, adj_135, adj_136);
df::adj_mul(var_133, var_134, adj_133, adj_134, adj_135);
df::adj_sub(var_75, var_51, adj_75, adj_51, adj_134);
df::adj_add(var_132, var_131, adj_132, adj_131, adj_133);
df::adj_add(var_127, var_129, adj_127, adj_129, adj_132);
df::adj_mul(var_130, var_125, adj_130, adj_125, adj_131);
df::adj_cross(var_40, var_41, adj_40, adj_41, adj_130);
df::adj_mul(var_128, var_125, adj_128, adj_125, adj_129);
df::adj_cross(var_42, var_40, adj_42, adj_40, adj_128);
df::adj_mul(var_126, var_125, adj_126, adj_125, adj_127);
df::adj_cross(var_41, var_42, adj_41, adj_42, adj_126);
df::adj_div(var_50, var_49, adj_50, adj_49, adj_125);
df::adj_sub(var_123, var_85, adj_123, adj_85, adj_124);
df::adj_determinant(var_53, adj_53, adj_123);
df::adj_mul(var_97, var_118, adj_97, adj_118, adj_122);
df::adj_mul(var_93, var_118, adj_93, adj_118, adj_121);
df::adj_mul(var_89, var_118, adj_89, adj_118, adj_120);
df::adj_mul(var_101, var_118, adj_101, adj_118, adj_119);
df::adj_div(var_74, var_117, adj_74, adj_117, adj_118);
df::adj_add(var_112, var_116, adj_112, adj_116, adj_117);
df::adj_div(var_51, var_115, adj_51, adj_115, adj_116);
df::adj_mul(var_114, var_52, adj_114, adj_52, adj_115);
df::adj_mul(var_113, var_dt, adj_113, adj_dt, adj_114);
df::adj_mul(var_21, var_dt, adj_21, adj_dt, adj_113);
df::adj_add(var_109, var_111, adj_109, adj_111, adj_112);
df::adj_mul(var_110, var_39, adj_110, adj_39, adj_111);
df::adj_dot(var_97, var_97, adj_97, adj_97, adj_110);
df::adj_add(var_106, var_108, adj_106, adj_108, adj_109);
df::adj_mul(var_107, var_38, adj_107, adj_38, adj_108);
df::adj_dot(var_93, var_93, adj_93, adj_93, adj_107);
df::adj_add(var_103, var_105, adj_103, adj_105, adj_106);
df::adj_mul(var_104, var_37, adj_104, adj_37, adj_105);
df::adj_dot(var_89, var_89, adj_89, adj_89, adj_104);
df::adj_mul(var_102, var_36, adj_102, adj_36, adj_103);
df::adj_dot(var_101, var_101, adj_101, adj_101, adj_102);
df::adj_mul(var_99, var_100, adj_99, adj_100, adj_101);
df::adj_sub(var_75, var_51, adj_75, adj_51, adj_100);
df::adj_add(var_98, var_97, adj_98, adj_97, adj_99);
df::adj_add(var_89, var_93, adj_89, adj_93, adj_98);
df::adj_float3(var_94, var_95, var_96, adj_94, adj_95, adj_96, adj_97);
df::adj_index(var_83, var_11, var_11, adj_83, adj_11, adj_11, adj_96);
df::adj_index(var_83, var_7, var_11, adj_83, adj_7, adj_11, adj_95);
df::adj_index(var_83, var_3, var_11, adj_83, adj_3, adj_11, adj_94);
df::adj_float3(var_90, var_91, var_92, adj_90, adj_91, adj_92, adj_93);
df::adj_index(var_83, var_11, var_7, adj_83, adj_11, adj_7, adj_92);
df::adj_index(var_83, var_7, var_7, adj_83, adj_7, adj_7, adj_91);
df::adj_index(var_83, var_3, var_7, adj_83, adj_3, adj_7, adj_90);
df::adj_float3(var_86, var_87, var_88, adj_86, adj_87, adj_88, adj_89);
df::adj_index(var_83, var_11, var_3, adj_83, adj_11, adj_3, adj_88);
df::adj_index(var_83, var_7, var_3, adj_83, adj_7, adj_3, adj_87);
df::adj_index(var_83, var_3, var_3, adj_83, adj_3, adj_3, adj_86);
df::adj_add(var_51, var_84, adj_51, adj_84, adj_85);
df::adj_div(var_21, var_24, adj_21, adj_24, adj_84);
df::adj_mul(var_81, var_82, adj_81, adj_82, adj_83);
df::adj_div(var_51, var_79, adj_51, adj_79, adj_82);
df::adj_mul(var_53, var_80, adj_53, adj_80, adj_81);
df::adj_transpose(var_47, adj_47, adj_80);
df::adj_select(var_77, var_74, var_78, adj_77, adj_74, adj_78, adj_79);
if (var_77) {
df::adj_sub(var_75, var_74, adj_75, adj_74, adj_78);
}
if (var_76) {
label0:;
}
df::adj_sqrt(var_73, adj_73, adj_74);
df::adj_abs(var_72, adj_72, adj_73);
df::adj_sub(var_70, var_71, adj_70, adj_71, adj_72);
df::adj_add(var_68, var_69, adj_68, adj_69, adj_70);
df::adj_dot(var_65, var_65, adj_65, adj_65, adj_69);
df::adj_add(var_66, var_67, adj_66, adj_67, adj_68);
df::adj_dot(var_61, var_61, adj_61, adj_61, adj_67);
df::adj_dot(var_57, var_57, adj_57, adj_57, adj_66);
df::adj_float3(var_62, var_63, var_64, adj_62, adj_63, adj_64, adj_65);
df::adj_index(var_53, var_11, var_11, adj_53, adj_11, adj_11, adj_64);
df::adj_index(var_53, var_7, var_11, adj_53, adj_7, adj_11, adj_63);
df::adj_index(var_53, var_3, var_11, adj_53, adj_3, adj_11, adj_62);
df::adj_float3(var_58, var_59, var_60, adj_58, adj_59, adj_60, adj_61);
df::adj_index(var_53, var_11, var_7, adj_53, adj_11, adj_7, adj_60);
df::adj_index(var_53, var_7, var_7, adj_53, adj_7, adj_7, adj_59);
df::adj_index(var_53, var_3, var_7, adj_53, adj_3, adj_7, adj_58);
df::adj_float3(var_54, var_55, var_56, adj_54, adj_55, adj_56, adj_57);
df::adj_index(var_53, var_11, var_3, adj_53, adj_11, adj_3, adj_56);
df::adj_index(var_53, var_7, var_3, adj_53, adj_7, adj_3, adj_55);
df::adj_index(var_53, var_3, var_3, adj_53, adj_3, adj_3, adj_54);
df::adj_mul(var_46, var_47, adj_46, adj_47, adj_53);
df::adj_div(var_51, var_50, adj_51, adj_50, adj_52);
df::adj_mul(var_48, var_49, adj_48, adj_49, adj_50);
df::adj_determinant(var_47, adj_47, adj_48);
df::adj_load(var_pose, var_0, adj_pose, adj_0, adj_47);
df::adj_mat33(var_40, var_41, var_42, adj_40, adj_41, adj_42, adj_46);
df::adj_sub(var_35, var_32, adj_35, adj_32, adj_45);
df::adj_sub(var_34, var_32, adj_34, adj_32, adj_44);
df::adj_sub(var_33, var_32, adj_33, adj_32, adj_43);
df::adj_sub(var_31, var_28, adj_31, adj_28, adj_42);
df::adj_sub(var_30, var_28, adj_30, adj_28, adj_41);
df::adj_sub(var_29, var_28, adj_29, adj_28, adj_40);
df::adj_load(var_inv_mass, var_17, adj_inv_mass, adj_17, adj_39);
df::adj_load(var_inv_mass, var_13, adj_inv_mass, adj_13, adj_38);
df::adj_load(var_inv_mass, var_9, adj_inv_mass, adj_9, adj_37);
df::adj_load(var_inv_mass, var_5, adj_inv_mass, adj_5, adj_36);
df::adj_load(var_v, var_17, adj_v, adj_17, adj_35);
df::adj_load(var_v, var_13, adj_v, adj_13, adj_34);
df::adj_load(var_v, var_9, adj_v, adj_9, adj_33);
df::adj_load(var_v, var_5, adj_v, adj_5, adj_32);
df::adj_load(var_x, var_17, adj_x, adj_17, adj_31);
df::adj_load(var_x, var_13, adj_x, adj_13, adj_30);
df::adj_load(var_x, var_9, adj_x, adj_9, adj_29);
df::adj_load(var_x, var_5, adj_x, adj_5, adj_28);
df::adj_load(var_materials, var_26, adj_materials, adj_26, adj_27);
df::adj_add(var_25, var_11, adj_25, adj_11, adj_26);
df::adj_mul(var_0, var_15, adj_0, adj_15, adj_25);
df::adj_load(var_materials, var_23, adj_materials, adj_23, adj_24);
df::adj_add(var_22, var_7, adj_22, adj_7, adj_23);
df::adj_mul(var_0, var_15, adj_0, adj_15, adj_22);
df::adj_load(var_materials, var_20, adj_materials, adj_20, adj_21);
df::adj_add(var_19, var_3, adj_19, adj_3, adj_20);
df::adj_mul(var_0, var_15, adj_0, adj_15, adj_19);
df::adj_load(var_activation, var_0, adj_activation, adj_0, adj_18);
df::adj_load(var_indices, var_16, adj_indices, adj_16, adj_17);
df::adj_add(var_14, var_15, adj_14, adj_15, adj_16);
df::adj_mul(var_0, var_1, adj_0, adj_1, adj_14);
df::adj_load(var_indices, var_12, adj_indices, adj_12, adj_13);
df::adj_add(var_10, var_11, adj_10, adj_11, adj_12);
df::adj_mul(var_0, var_1, adj_0, adj_1, adj_10);
df::adj_load(var_indices, var_8, adj_indices, adj_8, adj_9);
df::adj_add(var_6, var_7, adj_6, adj_7, adj_8);
df::adj_mul(var_0, var_1, adj_0, adj_1, adj_6);
df::adj_load(var_indices, var_4, adj_indices, adj_4, adj_5);
df::adj_add(var_2, var_3, adj_2, adj_3, adj_4);
df::adj_mul(var_0, var_1, adj_0, adj_1, adj_2);
return;
}
// Python entry points
void solve_tetrahedra_cpu_forward(int dim,
torch::Tensor var_x,
torch::Tensor var_v,
torch::Tensor var_inv_mass,
torch::Tensor var_indices,
torch::Tensor var_pose,
torch::Tensor var_activation,
torch::Tensor var_materials,
float var_dt,
float var_relaxation,
torch::Tensor var_delta)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
solve_tetrahedra_cpu_kernel_forward(
cast<df::float3*>(var_x),
cast<df::float3*>(var_v),
cast<float*>(var_inv_mass),
cast<int*>(var_indices),
cast<mat33*>(var_pose),
cast<float*>(var_activation),
cast<float*>(var_materials),
var_dt,
var_relaxation,
cast<df::float3*>(var_delta));
}
}
void solve_tetrahedra_cpu_backward(int dim,
torch::Tensor var_x,
torch::Tensor var_v,
torch::Tensor var_inv_mass,
torch::Tensor var_indices,
torch::Tensor var_pose,
torch::Tensor var_activation,
torch::Tensor var_materials,
float var_dt,
float var_relaxation,
torch::Tensor var_delta,
torch::Tensor adj_x,
torch::Tensor adj_v,
torch::Tensor adj_inv_mass,
torch::Tensor adj_indices,
torch::Tensor adj_pose,
torch::Tensor adj_activation,
torch::Tensor adj_materials,
float adj_dt,
float adj_relaxation,
torch::Tensor adj_delta)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
solve_tetrahedra_cpu_kernel_backward(
cast<df::float3*>(var_x),
cast<df::float3*>(var_v),
cast<float*>(var_inv_mass),
cast<int*>(var_indices),
cast<mat33*>(var_pose),
cast<float*>(var_activation),
cast<float*>(var_materials),
var_dt,
var_relaxation,
cast<df::float3*>(var_delta),
cast<df::float3*>(adj_x),
cast<df::float3*>(adj_v),
cast<float*>(adj_inv_mass),
cast<int*>(adj_indices),
cast<mat33*>(adj_pose),
cast<float*>(adj_activation),
cast<float*>(adj_materials),
adj_dt,
adj_relaxation,
cast<df::float3*>(adj_delta));
}
}
// Python entry points
void solve_tetrahedra_cpu_forward(int dim,
torch::Tensor var_x,
torch::Tensor var_v,
torch::Tensor var_inv_mass,
torch::Tensor var_indices,
torch::Tensor var_pose,
torch::Tensor var_activation,
torch::Tensor var_materials,
float var_dt,
float var_relaxation,
torch::Tensor var_delta);
void solve_tetrahedra_cpu_backward(int dim,
torch::Tensor var_x,
torch::Tensor var_v,
torch::Tensor var_inv_mass,
torch::Tensor var_indices,
torch::Tensor var_pose,
torch::Tensor var_activation,
torch::Tensor var_materials,
float var_dt,
float var_relaxation,
torch::Tensor var_delta,
torch::Tensor adj_x,
torch::Tensor adj_v,
torch::Tensor adj_inv_mass,
torch::Tensor adj_indices,
torch::Tensor adj_pose,
torch::Tensor adj_activation,
torch::Tensor adj_materials,
float adj_dt,
float adj_relaxation,
torch::Tensor adj_delta);
void solve_contacts_cpu_kernel_forward(
df::float3* var_x,
df::float3* var_v,
float* var_inv_mass,
float var_mu,
float var_dt,
df::float3* var_delta)
{
//---------
// primal vars
int var_0;
df::float3 var_1;
df::float3 var_2;
float var_3;
const float var_4 = 0.0;
const float var_5 = 1.0;
df::float3 var_6;
float var_7;
const float var_8 = 0.01;
float var_9;
bool var_10;
df::float3 var_11;
float var_12;
df::float3 var_13;
df::float3 var_14;
float var_15;
float var_16;
float var_17;
float var_18;
float var_19;
df::float3 var_20;
df::float3 var_21;
df::float3 var_22;
//---------
// forward
var_0 = df::tid();
var_1 = df::load(var_x, var_0);
var_2 = df::load(var_v, var_0);
var_3 = df::load(var_inv_mass, var_0);
var_6 = df::float3(var_4, var_5, var_4);
var_7 = df::dot(var_6, var_1);
var_9 = df::sub(var_7, var_8);
var_10 = (var_9 > var_4);
if (var_10) {
return;
}
var_11 = df::mul(var_6, var_9);
var_12 = df::dot(var_6, var_2);
var_13 = df::mul(var_6, var_12);
var_14 = df::sub(var_2, var_13);
var_15 = df::mul(var_mu, var_9);
var_16 = df::length(var_14);
var_17 = df::mul(var_16, var_dt);
var_18 = df::sub(var_4, var_17);
var_19 = df::max(var_15, var_18);
var_20 = df::normalize(var_14);
var_21 = df::mul(var_20, var_19);
var_22 = df::sub(var_21, var_11);
df::atomic_add(var_delta, var_0, var_22);
}
void solve_contacts_cpu_kernel_backward(
df::float3* var_x,
df::float3* var_v,
float* var_inv_mass,
float var_mu,
float var_dt,
df::float3* var_delta,
df::float3* adj_x,
df::float3* adj_v,
float* adj_inv_mass,
float adj_mu,
float adj_dt,
df::float3* adj_delta)
{
//---------
// primal vars
int var_0;
df::float3 var_1;
df::float3 var_2;
float var_3;
const float var_4 = 0.0;
const float var_5 = 1.0;
df::float3 var_6;
float var_7;
const float var_8 = 0.01;
float var_9;
bool var_10;
df::float3 var_11;
float var_12;
df::float3 var_13;
df::float3 var_14;
float var_15;
float var_16;
float var_17;
float var_18;
float var_19;
df::float3 var_20;
df::float3 var_21;
df::float3 var_22;
//---------
// dual vars
int adj_0 = 0;
df::float3 adj_1 = 0;
df::float3 adj_2 = 0;
float adj_3 = 0;
float adj_4 = 0;
float adj_5 = 0;
df::float3 adj_6 = 0;
float adj_7 = 0;
float adj_8 = 0;
float adj_9 = 0;
bool adj_10 = 0;
df::float3 adj_11 = 0;
float adj_12 = 0;
df::float3 adj_13 = 0;
df::float3 adj_14 = 0;
float adj_15 = 0;
float adj_16 = 0;
float adj_17 = 0;
float adj_18 = 0;
float adj_19 = 0;
df::float3 adj_20 = 0;
df::float3 adj_21 = 0;
df::float3 adj_22 = 0;
//---------
// forward
var_0 = df::tid();
var_1 = df::load(var_x, var_0);
var_2 = df::load(var_v, var_0);
var_3 = df::load(var_inv_mass, var_0);
var_6 = df::float3(var_4, var_5, var_4);
var_7 = df::dot(var_6, var_1);
var_9 = df::sub(var_7, var_8);
var_10 = (var_9 > var_4);
if (var_10) {
goto label0;
}
var_11 = df::mul(var_6, var_9);
var_12 = df::dot(var_6, var_2);
var_13 = df::mul(var_6, var_12);
var_14 = df::sub(var_2, var_13);
var_15 = df::mul(var_mu, var_9);
var_16 = df::length(var_14);
var_17 = df::mul(var_16, var_dt);
var_18 = df::sub(var_4, var_17);
var_19 = df::max(var_15, var_18);
var_20 = df::normalize(var_14);
var_21 = df::mul(var_20, var_19);
var_22 = df::sub(var_21, var_11);
df::atomic_add(var_delta, var_0, var_22);
//---------
// reverse
df::adj_atomic_add(var_delta, var_0, var_22, adj_delta, adj_0, adj_22);
df::adj_sub(var_21, var_11, adj_21, adj_11, adj_22);
df::adj_mul(var_20, var_19, adj_20, adj_19, adj_21);
df::adj_normalize(var_14, adj_14, adj_20);
df::adj_max(var_15, var_18, adj_15, adj_18, adj_19);
df::adj_sub(var_4, var_17, adj_4, adj_17, adj_18);
df::adj_mul(var_16, var_dt, adj_16, adj_dt, adj_17);
df::adj_length(var_14, adj_14, adj_16);
df::adj_mul(var_mu, var_9, adj_mu, adj_9, adj_15);
df::adj_sub(var_2, var_13, adj_2, adj_13, adj_14);
df::adj_mul(var_6, var_12, adj_6, adj_12, adj_13);
df::adj_dot(var_6, var_2, adj_6, adj_2, adj_12);
df::adj_mul(var_6, var_9, adj_6, adj_9, adj_11);
if (var_10) {
label0:;
}
df::adj_sub(var_7, var_8, adj_7, adj_8, adj_9);
df::adj_dot(var_6, var_1, adj_6, adj_1, adj_7);
df::adj_float3(var_4, var_5, var_4, adj_4, adj_5, adj_4, adj_6);
df::adj_load(var_inv_mass, var_0, adj_inv_mass, adj_0, adj_3);
df::adj_load(var_v, var_0, adj_v, adj_0, adj_2);
df::adj_load(var_x, var_0, adj_x, adj_0, adj_1);
return;
}
// Python entry points
void solve_contacts_cpu_forward(int dim,
torch::Tensor var_x,
torch::Tensor var_v,
torch::Tensor var_inv_mass,
float var_mu,
float var_dt,
torch::Tensor var_delta)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
solve_contacts_cpu_kernel_forward(
cast<df::float3*>(var_x),
cast<df::float3*>(var_v),
cast<float*>(var_inv_mass),
var_mu,
var_dt,
cast<df::float3*>(var_delta));
}
}
void solve_contacts_cpu_backward(int dim,
torch::Tensor var_x,
torch::Tensor var_v,
torch::Tensor var_inv_mass,
float var_mu,
float var_dt,
torch::Tensor var_delta,
torch::Tensor adj_x,
torch::Tensor adj_v,
torch::Tensor adj_inv_mass,
float adj_mu,
float adj_dt,
torch::Tensor adj_delta)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
solve_contacts_cpu_kernel_backward(
cast<df::float3*>(var_x),
cast<df::float3*>(var_v),
cast<float*>(var_inv_mass),
var_mu,
var_dt,
cast<df::float3*>(var_delta),
cast<df::float3*>(adj_x),
cast<df::float3*>(adj_v),
cast<float*>(adj_inv_mass),
adj_mu,
adj_dt,
cast<df::float3*>(adj_delta));
}
}
// Python entry points
void solve_contacts_cpu_forward(int dim,
torch::Tensor var_x,
torch::Tensor var_v,
torch::Tensor var_inv_mass,
float var_mu,
float var_dt,
torch::Tensor var_delta);
void solve_contacts_cpu_backward(int dim,
torch::Tensor var_x,
torch::Tensor var_v,
torch::Tensor var_inv_mass,
float var_mu,
float var_dt,
torch::Tensor var_delta,
torch::Tensor adj_x,
torch::Tensor adj_v,
torch::Tensor adj_inv_mass,
float adj_mu,
float adj_dt,
torch::Tensor adj_delta);
void apply_deltas_cpu_kernel_forward(
df::float3* var_x_orig,
df::float3* var_v_orig,
df::float3* var_x_pred,
df::float3* var_delta,
float var_dt,
df::float3* var_x_out,
df::float3* var_v_out)
{
//---------
// primal vars
int var_0;
df::float3 var_1;
df::float3 var_2;
df::float3 var_3;
df::float3 var_4;
df::float3 var_5;
df::float3 var_6;
//---------
// forward
var_0 = df::tid();
var_1 = df::load(var_x_orig, var_0);
var_2 = df::load(var_x_pred, var_0);
var_3 = df::load(var_delta, var_0);
var_4 = df::add(var_2, var_3);
var_5 = df::sub(var_4, var_1);
var_6 = df::div(var_5, var_dt);
df::store(var_x_out, var_0, var_4);
df::store(var_v_out, var_0, var_6);
}
void apply_deltas_cpu_kernel_backward(
df::float3* var_x_orig,
df::float3* var_v_orig,
df::float3* var_x_pred,
df::float3* var_delta,
float var_dt,
df::float3* var_x_out,
df::float3* var_v_out,
df::float3* adj_x_orig,
df::float3* adj_v_orig,
df::float3* adj_x_pred,
df::float3* adj_delta,
float adj_dt,
df::float3* adj_x_out,
df::float3* adj_v_out)
{
//---------
// primal vars
int var_0;
df::float3 var_1;
df::float3 var_2;
df::float3 var_3;
df::float3 var_4;
df::float3 var_5;
df::float3 var_6;
//---------
// dual vars
int adj_0 = 0;
df::float3 adj_1 = 0;
df::float3 adj_2 = 0;
df::float3 adj_3 = 0;
df::float3 adj_4 = 0;
df::float3 adj_5 = 0;
df::float3 adj_6 = 0;
//---------
// forward
var_0 = df::tid();
var_1 = df::load(var_x_orig, var_0);
var_2 = df::load(var_x_pred, var_0);
var_3 = df::load(var_delta, var_0);
var_4 = df::add(var_2, var_3);
var_5 = df::sub(var_4, var_1);
var_6 = df::div(var_5, var_dt);
df::store(var_x_out, var_0, var_4);
df::store(var_v_out, var_0, var_6);
//---------
// reverse
df::adj_store(var_v_out, var_0, var_6, adj_v_out, adj_0, adj_6);
df::adj_store(var_x_out, var_0, var_4, adj_x_out, adj_0, adj_4);
df::adj_div(var_5, var_dt, adj_5, adj_dt, adj_6);
df::adj_sub(var_4, var_1, adj_4, adj_1, adj_5);
df::adj_add(var_2, var_3, adj_2, adj_3, adj_4);
df::adj_load(var_delta, var_0, adj_delta, adj_0, adj_3);
df::adj_load(var_x_pred, var_0, adj_x_pred, adj_0, adj_2);
df::adj_load(var_x_orig, var_0, adj_x_orig, adj_0, adj_1);
return;
}
// Python entry points
void apply_deltas_cpu_forward(int dim,
torch::Tensor var_x_orig,
torch::Tensor var_v_orig,
torch::Tensor var_x_pred,
torch::Tensor var_delta,
float var_dt,
torch::Tensor var_x_out,
torch::Tensor var_v_out)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
apply_deltas_cpu_kernel_forward(
cast<df::float3*>(var_x_orig),
cast<df::float3*>(var_v_orig),
cast<df::float3*>(var_x_pred),
cast<df::float3*>(var_delta),
var_dt,
cast<df::float3*>(var_x_out),
cast<df::float3*>(var_v_out));
}
}
void apply_deltas_cpu_backward(int dim,
torch::Tensor var_x_orig,
torch::Tensor var_v_orig,
torch::Tensor var_x_pred,
torch::Tensor var_delta,
float var_dt,
torch::Tensor var_x_out,
torch::Tensor var_v_out,
torch::Tensor adj_x_orig,
torch::Tensor adj_v_orig,
torch::Tensor adj_x_pred,
torch::Tensor adj_delta,
float adj_dt,
torch::Tensor adj_x_out,
torch::Tensor adj_v_out)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
apply_deltas_cpu_kernel_backward(
cast<df::float3*>(var_x_orig),
cast<df::float3*>(var_v_orig),
cast<df::float3*>(var_x_pred),
cast<df::float3*>(var_delta),
var_dt,
cast<df::float3*>(var_x_out),
cast<df::float3*>(var_v_out),
cast<df::float3*>(adj_x_orig),
cast<df::float3*>(adj_v_orig),
cast<df::float3*>(adj_x_pred),
cast<df::float3*>(adj_delta),
adj_dt,
cast<df::float3*>(adj_x_out),
cast<df::float3*>(adj_v_out));
}
}
// Python entry points
void apply_deltas_cpu_forward(int dim,
torch::Tensor var_x_orig,
torch::Tensor var_v_orig,
torch::Tensor var_x_pred,
torch::Tensor var_delta,
float var_dt,
torch::Tensor var_x_out,
torch::Tensor var_v_out);
void apply_deltas_cpu_backward(int dim,
torch::Tensor var_x_orig,
torch::Tensor var_v_orig,
torch::Tensor var_x_pred,
torch::Tensor var_delta,
float var_dt,
torch::Tensor var_x_out,
torch::Tensor var_v_out,
torch::Tensor adj_x_orig,
torch::Tensor adj_v_orig,
torch::Tensor adj_x_pred,
torch::Tensor adj_delta,
float adj_dt,
torch::Tensor adj_x_out,
torch::Tensor adj_v_out);
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("integrate_particles_cpu_forward", integrate_particles_cpu_forward, "integrate_particles_cpu_forward");
m.def("integrate_particles_cpu_backward", integrate_particles_cpu_backward, "integrate_particles_cpu_backward");
m.def("integrate_rigids_cpu_forward", integrate_rigids_cpu_forward, "integrate_rigids_cpu_forward");
m.def("integrate_rigids_cpu_backward", integrate_rigids_cpu_backward, "integrate_rigids_cpu_backward");
m.def("eval_springs_cpu_forward", eval_springs_cpu_forward, "eval_springs_cpu_forward");
m.def("eval_springs_cpu_backward", eval_springs_cpu_backward, "eval_springs_cpu_backward");
m.def("eval_triangles_cpu_forward", eval_triangles_cpu_forward, "eval_triangles_cpu_forward");
m.def("eval_triangles_cpu_backward", eval_triangles_cpu_backward, "eval_triangles_cpu_backward");
m.def("eval_triangles_contact_cpu_forward", eval_triangles_contact_cpu_forward, "eval_triangles_contact_cpu_forward");
m.def("eval_triangles_contact_cpu_backward", eval_triangles_contact_cpu_backward, "eval_triangles_contact_cpu_backward");
m.def("eval_triangles_rigid_contacts_cpu_forward", eval_triangles_rigid_contacts_cpu_forward, "eval_triangles_rigid_contacts_cpu_forward");
m.def("eval_triangles_rigid_contacts_cpu_backward", eval_triangles_rigid_contacts_cpu_backward, "eval_triangles_rigid_contacts_cpu_backward");
m.def("eval_bending_cpu_forward", eval_bending_cpu_forward, "eval_bending_cpu_forward");
m.def("eval_bending_cpu_backward", eval_bending_cpu_backward, "eval_bending_cpu_backward");
m.def("eval_tetrahedra_cpu_forward", eval_tetrahedra_cpu_forward, "eval_tetrahedra_cpu_forward");
m.def("eval_tetrahedra_cpu_backward", eval_tetrahedra_cpu_backward, "eval_tetrahedra_cpu_backward");
m.def("eval_contacts_cpu_forward", eval_contacts_cpu_forward, "eval_contacts_cpu_forward");
m.def("eval_contacts_cpu_backward", eval_contacts_cpu_backward, "eval_contacts_cpu_backward");
m.def("eval_soft_contacts_cpu_forward", eval_soft_contacts_cpu_forward, "eval_soft_contacts_cpu_forward");
m.def("eval_soft_contacts_cpu_backward", eval_soft_contacts_cpu_backward, "eval_soft_contacts_cpu_backward");
m.def("eval_rigid_contacts_cpu_forward", eval_rigid_contacts_cpu_forward, "eval_rigid_contacts_cpu_forward");
m.def("eval_rigid_contacts_cpu_backward", eval_rigid_contacts_cpu_backward, "eval_rigid_contacts_cpu_backward");
m.def("eval_rigid_contacts_art_cpu_forward", eval_rigid_contacts_art_cpu_forward, "eval_rigid_contacts_art_cpu_forward");
m.def("eval_rigid_contacts_art_cpu_backward", eval_rigid_contacts_art_cpu_backward, "eval_rigid_contacts_art_cpu_backward");
m.def("eval_muscles_cpu_forward", eval_muscles_cpu_forward, "eval_muscles_cpu_forward");
m.def("eval_muscles_cpu_backward", eval_muscles_cpu_backward, "eval_muscles_cpu_backward");
m.def("eval_rigid_fk_cpu_forward", eval_rigid_fk_cpu_forward, "eval_rigid_fk_cpu_forward");
m.def("eval_rigid_fk_cpu_backward", eval_rigid_fk_cpu_backward, "eval_rigid_fk_cpu_backward");
m.def("eval_rigid_id_cpu_forward", eval_rigid_id_cpu_forward, "eval_rigid_id_cpu_forward");
m.def("eval_rigid_id_cpu_backward", eval_rigid_id_cpu_backward, "eval_rigid_id_cpu_backward");
m.def("eval_rigid_tau_cpu_forward", eval_rigid_tau_cpu_forward, "eval_rigid_tau_cpu_forward");
m.def("eval_rigid_tau_cpu_backward", eval_rigid_tau_cpu_backward, "eval_rigid_tau_cpu_backward");
m.def("eval_rigid_jacobian_cpu_forward", eval_rigid_jacobian_cpu_forward, "eval_rigid_jacobian_cpu_forward");
m.def("eval_rigid_jacobian_cpu_backward", eval_rigid_jacobian_cpu_backward, "eval_rigid_jacobian_cpu_backward");
m.def("eval_rigid_mass_cpu_forward", eval_rigid_mass_cpu_forward, "eval_rigid_mass_cpu_forward");
m.def("eval_rigid_mass_cpu_backward", eval_rigid_mass_cpu_backward, "eval_rigid_mass_cpu_backward");
m.def("eval_dense_gemm_cpu_forward", eval_dense_gemm_cpu_forward, "eval_dense_gemm_cpu_forward");
m.def("eval_dense_gemm_cpu_backward", eval_dense_gemm_cpu_backward, "eval_dense_gemm_cpu_backward");
m.def("eval_dense_gemm_batched_cpu_forward", eval_dense_gemm_batched_cpu_forward, "eval_dense_gemm_batched_cpu_forward");
m.def("eval_dense_gemm_batched_cpu_backward", eval_dense_gemm_batched_cpu_backward, "eval_dense_gemm_batched_cpu_backward");
m.def("eval_dense_cholesky_cpu_forward", eval_dense_cholesky_cpu_forward, "eval_dense_cholesky_cpu_forward");
m.def("eval_dense_cholesky_cpu_backward", eval_dense_cholesky_cpu_backward, "eval_dense_cholesky_cpu_backward");
m.def("eval_dense_cholesky_batched_cpu_forward", eval_dense_cholesky_batched_cpu_forward, "eval_dense_cholesky_batched_cpu_forward");
m.def("eval_dense_cholesky_batched_cpu_backward", eval_dense_cholesky_batched_cpu_backward, "eval_dense_cholesky_batched_cpu_backward");
m.def("eval_dense_subs_cpu_forward", eval_dense_subs_cpu_forward, "eval_dense_subs_cpu_forward");
m.def("eval_dense_subs_cpu_backward", eval_dense_subs_cpu_backward, "eval_dense_subs_cpu_backward");
m.def("eval_dense_solve_cpu_forward", eval_dense_solve_cpu_forward, "eval_dense_solve_cpu_forward");
m.def("eval_dense_solve_cpu_backward", eval_dense_solve_cpu_backward, "eval_dense_solve_cpu_backward");
m.def("eval_dense_solve_batched_cpu_forward", eval_dense_solve_batched_cpu_forward, "eval_dense_solve_batched_cpu_forward");
m.def("eval_dense_solve_batched_cpu_backward", eval_dense_solve_batched_cpu_backward, "eval_dense_solve_batched_cpu_backward");
m.def("eval_rigid_integrate_cpu_forward", eval_rigid_integrate_cpu_forward, "eval_rigid_integrate_cpu_forward");
m.def("eval_rigid_integrate_cpu_backward", eval_rigid_integrate_cpu_backward, "eval_rigid_integrate_cpu_backward");
m.def("solve_springs_cpu_forward", solve_springs_cpu_forward, "solve_springs_cpu_forward");
m.def("solve_springs_cpu_backward", solve_springs_cpu_backward, "solve_springs_cpu_backward");
m.def("solve_tetrahedra_cpu_forward", solve_tetrahedra_cpu_forward, "solve_tetrahedra_cpu_forward");
m.def("solve_tetrahedra_cpu_backward", solve_tetrahedra_cpu_backward, "solve_tetrahedra_cpu_backward");
m.def("solve_contacts_cpu_forward", solve_contacts_cpu_forward, "solve_contacts_cpu_forward");
m.def("solve_contacts_cpu_backward", solve_contacts_cpu_backward, "solve_contacts_cpu_backward");
m.def("apply_deltas_cpu_forward", apply_deltas_cpu_forward, "apply_deltas_cpu_forward");
m.def("apply_deltas_cpu_backward", apply_deltas_cpu_backward, "apply_deltas_cpu_backward");
} | 532,217 | C++ | 29.40377 | 700 | 0.581105 |
vstrozzi/FRL-SHAC-Extension/dflex/tests/test_humanoid.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import math
import torch
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import dflex as df
import numpy as np
np.set_printoptions(precision=5, linewidth=256, suppress=True)
from pxr import Usd, UsdGeom, Gf
import test_util
class Robot:
frame_dt = 1.0/60.0
episode_duration = 2.0 # seconds
episode_frames = int(episode_duration/frame_dt)
sim_substeps = 16
sim_dt = frame_dt / sim_substeps
sim_steps = int(episode_duration / sim_dt)
sim_time = 0.0
train_iters = 1024
train_rate = 0.05
ground = True
name = "humanoid"
regularization = 1.e-3
phase_count = 8
def __init__(self, depth=1, mode='numpy', render=True, adapter='cpu'):
torch.manual_seed(42)
builder = df.sim.ModelBuilder()
self.adapter = adapter
self.mode = mode
self.render = render
# humanoid
test_util.urdf_load(
builder,
"assets/humanoid.urdf",
df.transform((0.0, 1.35, 0.0), df.quat_from_axis_angle((1.0, 0.0, 0.0), -math.pi*0.5)),
#df.transform((0.0, 0.65, 0.0), df.quat_identity()),
floating=True,
shape_ke=1.e+3*5.0,
shape_kd=1.e+2*2.0,
shape_kf=1.e+2,
shape_mu=0.5)
# set pd-stiffness
for i in range(len(builder.joint_target_ke)):
builder.joint_target_ke[i] = 10.0
builder.joint_target_kd[i] = 1.0
# finalize model
self.model = builder.finalize(adapter)
self.model.ground = self.ground
self.model.gravity = torch.tensor((0.0, -9.81, 0.0), dtype=torch.float32, device=adapter)
#self.model.gravity = torch.tensor((0.0, 0.0, 0.0), device=adapter)
self.model.joint_q.requires_grad_()
self.model.joint_qd.requires_grad_()
#self.actions = torch.zeros((self.episode_frames, len(self.model.joint_qd)), dtype=torch.float32, device=adapter, requires_grad=True)
#self.actions = torch.zeros(1, device=adapter, requires_grad=True)
self.network = torch.nn.Sequential(torch.nn.Linear(self.phase_count, len(self.model.joint_qd), bias=False), torch.nn.Tanh()).to(adapter)
self.action_strength = 0.0
self.action_penalty = 0.01
self.balance_reward = 15.0
self.forward_reward = 1.0
self.discount_scale = 3.0
self.discount_factor = 0.5
self.target = torch.tensor((0.0, 0.65, 0.0, 0.0, 0.0, 0.0, 1.0), dtype=torch.float32, device=adapter, requires_grad=False)
#-----------------------
# set up Usd renderer
if (self.render):
self.stage = Usd.Stage.CreateNew("outputs/" + self.name + ".usd")
self.renderer = df.render.UsdRenderer(self.model, self.stage)
self.renderer.draw_points = True
self.renderer.draw_springs = True
self.renderer.draw_shapes = True
self.render_time = 0.0
self.integrator = df.sim.SemiImplicitIntegrator()
def set_target(self, x, name):
self.target = torch.tensor(x, device='cpu')
self.renderer.add_sphere(self.target.tolist(), 0.1, name)
def loss(self, render=True):
#---------------
# run simulation
self.sim_time = 0.0
self.state = self.model.state()
if (self.model.ground):
self.model.collide(self.state)
loss = torch.zeros(1, requires_grad=True, dtype=torch.float32, device=self.model.adapter)
for f in range(0, self.episode_frames):
# df.config.no_grad = True
#df.config.verify_fp = True
# simulate
with df.ScopedTimer("fk-id-dflex", detailed=False, active=False):
phases = torch.zeros(self.phase_count, device=self.model.adapter)
# build sinusoidal phase inputs
for p in range(self.phase_count):
phases[p] = math.sin(10.0 * (self.sim_time + 0.5 * p * math.pi))
actions = self.network(phases)*self.action_strength
for i in range(0, self.sim_substeps):
self.state.joint_act = actions
self.state = self.integrator.forward(self.model, self.state, self.sim_dt)
self.sim_time += self.sim_dt
discount_time = self.sim_time
discount = math.pow(self.discount_factor, discount_time*self.discount_scale)
loss = loss - self.state.joint_q[1]*self.state.joint_q[1]*self.balance_reward*discount #torch.norm(actions)*self.action_penalty
# loss = loss + self.state.joint_qd[5]*self.state.joint_q[1]# + torch.norm(actions)*self.action_penalty
# render
with df.ScopedTimer("render", False):
if (self.render):
self.render_time += self.frame_dt
self.renderer.update(self.state, self.render_time)
if (self.render):
try:
self.stage.Save()
except:
print("USD save error")
return loss
def run(self):
df.config.no_grad = True
#with torch.no_grad():
l = self.loss()
def verify(self, eps=1.e-4):
frame = 60
params = self.actions[frame]
n = len(params)
# evaluate analytic gradient
l = self.loss(render=False)
l.backward()
# evaluate numeric gradient
grad_analytic = self.actions.grad[frame].numpy()
grad_numeric = np.zeros(n)
with torch.no_grad():
df.config.no_grad = True
for i in range(n):
mid = params[i].item()
params[i] = mid - eps
left = self.loss(render=False)
params[i] = mid + eps
right = self.loss(render=False)
# reset
params[i] = mid
# numeric grad
grad_numeric[i] = (right-left)/(2.0*eps)
# report
print("grad_numeric: " + str(grad_numeric))
print("grad_analytic: " + str(grad_analytic))
def train(self, mode='gd'):
# param to train
self.step_count = 0
self.best_loss = math.inf
render_freq = 1
optimizer = None
params = self.network.parameters()
def closure():
if (optimizer):
optimizer.zero_grad()
# render every N steps
render = False
if ((self.step_count % render_freq) == 0):
render = True
with df.ScopedTimer("forward", detailed=True):
#with torch.autograd.detect_anomaly():
l = self.loss(render)
with df.ScopedTimer("backward", detailed=True):
#with torch.autograd.detect_anomaly():
l.backward()
#print("vel: " + str(params[0]))
#print("grad: " + str(params[0].grad))
#print("--------")
print(str(self.step_count) + ": " + str(l))
self.step_count += 1
# save best trajectory
if (l.item() < self.best_loss):
self.save()
self.best_loss = l.item()
return l
with df.ScopedTimer("step"):
if (mode == 'gd'):
# simple Gradient Descent
for i in range(self.train_iters):
closure()
with torch.no_grad():
params[0] -= self.train_rate * params[0].grad
params[0].grad.zero_()
else:
# L-BFGS
if (mode == 'lbfgs'):
optimizer = torch.optim.LBFGS(params, lr=1.0, tolerance_grad=1.e-9, line_search_fn="strong_wolfe")
# Adam
if (mode == 'adam'):
optimizer = torch.optim.Adam(params, lr=self.train_rate)
# SGD
if (mode == 'sgd'):
optimizer = torch.optim.SGD(params, lr=self.train_rate, momentum=0.8, nesterov=True)
# train
for i in range(self.train_iters):
print("Step: " + str(i))
optimizer.step(closure)
def save(self):
torch.save(self.network, "outputs/" + self.name + ".pt")
def load(self):
self.network = torch.load("outputs/" + self.name + ".pt")
#---------
robot = Robot(depth=1, mode='dflex', render=True, adapter='cpu')
robot.run()
#robot.load()
#robot.train(mode='adam')
| 9,237 | Python | 28.514377 | 144 | 0.533831 |
vstrozzi/FRL-SHAC-Extension/dflex/tests/test_bending.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import math
import torch
import time
import cProfile
import numpy as np
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import dflex as df
from pxr import Usd, UsdGeom, Gf
class Bending:
sim_duration = 10.0 # seconds
sim_substeps = 32
sim_dt = (1.0 / 60.0) / sim_substeps
sim_steps = int(sim_duration / sim_dt)
sim_time = 0.0
train_iters = 200
train_rate = 0.01
def __init__(self, adapter='cpu'):
torch.manual_seed(42)
builder = df.sim.ModelBuilder()
if (True):
mesh = Usd.Stage.Open("assets/icosphere_open.usda")
geom = UsdGeom.Mesh(mesh.GetPrimAtPath("/Shell/Mesh"))
#mesh = Usd.Stage.Open("assets/cylinder_long_open.usda")
#geom = UsdGeom.Mesh(mesh.GetPrimAtPath("/CylinderLong/CylinderLong"))
points = geom.GetPointsAttr().Get()
indices = geom.GetFaceVertexIndicesAttr().Get()
counts = geom.GetFaceVertexCountsAttr().Get()
linear_vel = np.array((1.0, 0.0, 0.0))
angular_vel = np.array((0.0, 0.0, 0.0))
center = np.array((0.0, 1.6, 0.0))
radius = 0.5
r = df.quat_multiply(df.quat_from_axis_angle((0.0, 0.0, 1.0), math.pi * 0.0), df.quat_from_axis_angle((1.0, 0.0, 0.0), -math.pi * 0.5))
builder.add_cloth_mesh(pos=center, rot=(0.0, 0.0, 0.0, 1.0), scale=radius, vel=(0.0, 0.0, 0.0), vertices=points, indices=indices, density=10.0)
for i in range(len(builder.particle_qd)):
v = np.cross(np.array(builder.particle_q) - center, angular_vel)
builder.particle_qd[i] = v + linear_vel
self.model = builder.finalize(adapter)
self.model.tri_ke = 2000.0
self.model.tri_ka = 2000.0
self.model.tri_kd = 3.0
self.model.tri_lift = 0.0
self.model.tri_drag = 0.0
self.model.edge_ke = 20.0
self.model.edge_kd = 0.3
self.model.gravity = torch.tensor((0.0, -10.0, 0.0), device=adapter)
else:
builder.add_particle(pos=(1.0, 2.0, 1.0), vel=(0.0, 0.0, 0.0), mass=0.0)
builder.add_particle(pos=(1.0, 2.0, -1.0), vel=(0.0, 0.0, 0.0), mass=0.0)
builder.add_particle(pos=(-1.0, 2.0, -1.0), vel=(0.0, 0.0, 0.0), mass=0.0)
builder.add_particle(pos=(-1.0, 2.0, 1.0), vel=(0.0, 0.0, 0.0), mass=1.0)
builder.add_triangle(0, 1, 2)
builder.add_triangle(0, 2, 3)
builder.add_edge(1, 3, 2, 0)
builder.edge_rest_angle[0] = -math.pi * 0.6
self.model = builder.finalize(adapter)
self.model.tri_ke = 2000.0
self.model.tri_ka = 2000.0
self.model.tri_kd = 3.0
self.model.tri_lift = 0.0
self.model.tri_drag = 0.0
self.model.edge_ke = 20.0
self.model.edge_kd = 1.7
self.model.gravity = torch.tensor((0.0, 0.0, 0.0), device=adapter)
# contact params
self.model.contact_ke = 1.e+4
self.model.contact_kd = 1.0
self.model.contact_kf = 1000.0
self.model.contact_mu = 5.0
self.model.particle_radius = 0.01
self.model.ground = True
# training params
self.target_pos = torch.tensor((4.0, 2.0, 0.0), device=adapter)
#-----------------------
# set up Usd renderer
self.stage = Usd.Stage.CreateNew("outputs/bending.usd")
self.renderer = df.render.UsdRenderer(self.model, self.stage)
self.renderer.draw_points = True
self.renderer.draw_springs = True
self.renderer.draw_shapes = True
self.renderer.add_sphere(self.target_pos.tolist(), 0.1, "target")
self.integrator = df.sim.SemiImplicitIntegrator()
def loss(self, render=True):
#-----------------------
# run simulation
self.state = self.model.state()
loss = torch.zeros(1, requires_grad=True)
for i in range(0, self.sim_steps):
# forward dynamics
self.state = self.integrator.forward(self.model, self.state, self.sim_dt)
# render
if (render and (i % self.sim_substeps == 0)):
self.sim_time += self.sim_dt * self.sim_substeps
self.renderer.update(self.state, self.sim_time)
# loss
#com_loss = torch.mean(self.state.particle_qd*self.model.particle_mass[:, None], 0)
#act_loss = torch.norm(activation)*self.activation_penalty
#loss = loss - com_loss[1]
return loss
def run(self):
with torch.no_grad():
l = self.loss()
self.stage.Save()
def train(self, mode='gd'):
# param to train
self.step_count = 0
render_freq = 1
optimizer = None
def closure():
optimizer.zero_grad()
# render every N steps
render = False
if ((self.step_count % render_freq) == 0):
render = True
l = self.loss(render)
l.backward()
print(str(self.step_count) + ": " + str(l))
self.step_count += 1
try:
if (render):
self.stage.Save()
except:
print("USD save error")
return l
if (mode == 'gd'):
# simple Gradient Descent
for i in range(self.train_iters):
closure()
with torch.no_grad():
param -= self.train_rate * param.grad
else:
# L-BFGS
if (mode == 'lbfgs'):
optimizer = torch.optim.LBFGS(self.network.parameters(), lr=1.0, tolerance_grad=1.e-5, tolerance_change=0.01, line_search_fn="strong_wolfe")
# Adam
if (mode == 'adam'):
optimizer = torch.optim.Adam(self.network.parameters(), lr=self.train_rate)
# SGD
if (mode == 'sgd'):
optimizer = torch.optim.SGD(self.network.parameters(), lr=self.train_rate, momentum=0.5)
# train
for i in range(self.train_iters):
optimizer.step(closure)
# final save
try:
if (render):
self.stage.Save()
except:
print("USD save error")
def save(self, file):
torch.save(self.network, file)
def load(self, file):
self.network = torch.load(file)
self.network.eval()
#---------
bending = Bending(adapter='cpu')
bending.run()
#bending.train('lbfgs')
#bending.train('sgd')
| 7,170 | Python | 28.755187 | 156 | 0.54212 |
vstrozzi/FRL-SHAC-Extension/dflex/tests/test_ant.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import math
import torch
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import dflex as df
import numpy as np
np.set_printoptions(precision=5, linewidth=256, suppress=True)
from pxr import Usd, UsdGeom, Gf
import test_util
import xml.etree.ElementTree as ET
class Robot:
frame_dt = 1.0/60.0
episode_duration = 2.0 # seconds
episode_frames = int(episode_duration/frame_dt)
sim_substeps = 32
sim_dt = frame_dt / sim_substeps
sim_steps = int(episode_duration / sim_dt)
sim_time = 0.0
train_iters = 1024
train_rate = 0.001
phase_count = 8
phase_step = math.pi / phase_count * 2.0
phase_freq = 6.0
ground = True
name = "humanoid"
regularization = 1.e-3
def __init__(self, depth=1, mode='numpy', render=True, adapter='cpu'):
torch.manual_seed(42)
builder = df.sim.ModelBuilder()
self.adapter = adapter
self.mode = mode
self.render = render
self.parse_mjcf("./assets/" + self.name + ".xml", builder,
stiffness=0.0,
damping=0.0,
contact_ke=1.e+3,
contact_kd=1.e+3,
contact_kf=1.e+2,
contact_mu=0.75,
limit_ke=1.e+2,
limit_kd=1.e+1)
# base transform
# set joint targets to rest pose in mjcf
if (self.name == "ant"):
builder.joint_q[0:3] = [0.0, 0.70, 0.0]
builder.joint_q[3:7] = df.quat_from_axis_angle((1.0, 0.0, 0.0), -math.pi*0.5)
builder.joint_q[7:] = [0.0, 1.0, 0.0, -1.0, 0.0, -1.0, 0.0, 1.0]
builder.joint_target[7:] = [0.0, 1.0, 0.0, -1.0, 0.0, -1.0, 0.0, 1.0]
if (self.name == "humanoid"):
builder.joint_q[0:3] = [0.0, 1.70, 0.0]
builder.joint_q[3:7] = df.quat_from_axis_angle((1.0, 0.0, 0.0), -math.pi*0.5)
# width = 0.1
# radius = 0.05
# builder.add_articulation()
# body = -1
# for i in range(1):
# body = builder.add_link(
# parent=body,
# X_pj=df.transform((2.0*width, 0.0, 0.0), df.quat_identity()),
# axis=(0.0, 0.0, 1.0),
# damping=0.0,
# stiffness=0.0,
# limit_lower=np.deg2rad(-30.0),
# limit_upper=np.deg2rad(30.0),
# limit_ke=100.0,
# limit_kd=10.0,
# type=df.JOINT_REVOLUTE)
# shape = builder.add_shape_capsule(body, pos=(width, 0.0, 0.0), half_width=width, radius=radius)
# self.ground = False
# finalize model
self.model = builder.finalize(adapter)
self.model.ground = self.ground
self.model.gravity = torch.tensor((0.0, -9.81, 0.0), dtype=torch.float32, device=adapter)
#self.model.gravity = torch.tensor((0.0, 0.0, 0.0), device=adapter)
self.model.joint_q.requires_grad_()
self.model.joint_qd.requires_grad_()
self.network = torch.nn.Sequential(torch.nn.Linear(self.phase_count, len(self.model.joint_qd)-6, bias=False), torch.nn.Tanh()).to(adapter)
self.action_strength = 150.0
self.action_penalty = 0.01
self.balance_reward = 15.0
self.forward_reward = 1.0
self.discount_scale = 1.0
self.discount_factor = 0.5
self.target = torch.tensor((0.0, 0.65, 0.0, 0.0, 0.0, 0.0, 1.0), dtype=torch.float32, device=adapter, requires_grad=False)
#-----------------------
# set up Usd renderer
if (self.render):
self.stage = Usd.Stage.CreateNew("outputs/" + self.name + ".usd")
self.renderer = df.render.UsdRenderer(self.model, self.stage)
self.renderer.draw_points = True
self.renderer.draw_springs = True
self.renderer.draw_shapes = True
self.render_time = 0.0
self.integrator = df.sim.SemiImplicitIntegrator()
def parse_mjcf(
self,
filename,
builder,
density=1000.0,
stiffness=0.0,
damping=0.0,
contact_ke=1000.0,
contact_kd=100.0,
contact_kf=100.0,
contact_mu=0.5,
limit_ke=100.0,
limit_kd=10.0):
file = ET.parse(filename)
root = file.getroot()
# map node names to link indices
self.node_map = {}
self.xform_map = {}
self.mesh_map = {}
type_map = {
"ball": df.JOINT_BALL,
"hinge": df.JOINT_REVOLUTE,
"slide": df.JOINT_PRISMATIC,
"free": df.JOINT_FREE,
"fixed": df.JOINT_FIXED
}
def parse_float(node, key, default):
if key in node.attrib:
return float(node.attrib[key])
else:
return default
def parse_vec(node, key, default):
if key in node.attrib:
return np.fromstring(node.attrib[key], sep=" ")
else:
return np.array(default)
def parse_body(body, parent):
body_name = body.attrib["name"]
body_pos = np.fromstring(body.attrib["pos"], sep=" ")
#-----------------
# add body for each joint
for joint in body.findall("joint"):
joint_name = joint.attrib["name"],
joint_type = type_map[joint.attrib["type"]]
joint_axis = parse_vec(joint, "axis", (0.0, 0.0, 0.0))
joint_pos = parse_vec(joint, "pos", (0.0, 0.0, 0.0))
joint_range = parse_vec(joint, "range", (-3.0, 3.0))
joint_armature = parse_float(joint, "armature", 0.0)
joint_stiffness = parse_float(joint, "stiffness", stiffness)
joint_damping = parse_float(joint, "damping", damping)
joint_axis = df.normalize(joint_axis)
if (parent == -1):
body_pos = np.array((0.0, 0.0, 0.0))
link = builder.add_link(
parent,
X_pj=df.transform(body_pos, df.quat_identity()),
axis=joint_axis,
type=joint_type,
limit_lower=np.deg2rad(joint_range[0]),
limit_upper=np.deg2rad(joint_range[1]),
limit_ke=limit_ke,
limit_kd=limit_kd,
stiffness=joint_stiffness,
damping=joint_damping,
armature=joint_armature)
parent = link
body_pos = [0.0, 0.0, 0.0] # todo: assumes that serial joints are all aligned at the same point
#-----------------
# add shapes
for geom in body.findall("geom"):
geom_name = geom.attrib["name"]
geom_type = geom.attrib["type"]
geom_size = parse_vec(geom, "size", [1.0])
geom_pos = parse_vec(geom, "pos", (0.0, 0.0, 0.0))
geom_rot = parse_vec(geom, "quat", (0.0, 0.0, 0.0, 1.0))
if (geom_type == "sphere"):
builder.add_shape_sphere(
link,
pos=geom_pos,
rot=geom_rot,
radius=geom_size[0],
density=density,
ke=contact_ke,
kd=contact_kd,
kf=contact_kf,
mu=contact_mu)
elif (geom_type == "capsule"):
if ("fromto" in geom.attrib):
geom_fromto = parse_vec(geom, "fromto", (0.0, 0.0, 0.0, 1.0, 0.0, 0.0))
start = geom_fromto[0:3]
end = geom_fromto[3:6]
# compute rotation to align dflex capsule (along x-axis), with mjcf fromto direction
axis = df.normalize(end-start)
angle = math.acos(np.dot(axis, (1.0, 0.0, 0.0)))
axis = df.normalize(np.cross(axis, (1.0, 0.0, 0.0)))
geom_pos = (start + end)*0.5
geom_rot = df.quat_from_axis_angle(axis, -angle)
geom_radius = geom_size[0]
geom_width = np.linalg.norm(end-start)*0.5
else:
geom_radius = geom_size[0]
geom_width = geom_size[1]
geom_pos = parse_vec(geom, "pos", (0.0, 0.0, 0.0))
builder.add_shape_capsule(
link,
pos=geom_pos,
rot=geom_rot,
radius=geom_radius,
half_width=geom_width,
density=density,
ke=contact_ke,
kd=contact_kd,
kf=contact_kf,
mu=contact_mu)
else:
print("Type: " + geom_type + " unsupported")
#-----------------
# recurse
for child in body.findall("body"):
parse_body(child, link)
#-----------------
# start articulation
builder.add_articulation()
world = root.find("worldbody")
for body in world.findall("body"):
parse_body(body, -1)
def set_target(self, x, name):
self.target = torch.tensor(x, device=self.adapter)
self.renderer.add_sphere(self.target.tolist(), 0.1, name)
def loss(self, render=True):
#---------------
# run simulation
self.sim_time = 0.0
self.state = self.model.state()
if (self.model.ground):
self.model.collide(self.state)
loss = torch.zeros(1, requires_grad=True, dtype=torch.float32, device=self.model.adapter)
for f in range(0, self.episode_frames):
# build sinusoidal input phases
# with df.ScopedTimer("inference", False):
# phases = torch.zeros(self.phase_count, device=self.model.adapter)
# for p in range(self.phase_count):
# phases[p] = math.sin(self.phase_freq * self.sim_time + p * self.phase_step)
# # compute activations (joint torques)
# actions = self.network(phases) * self.action_strength
# simulate
with df.ScopedTimer("simulate", detailed=False, active=True):
for i in range(0, self.sim_substeps):
# apply actions
#self.state.joint_act[6:] = actions
self.state = self.integrator.forward(self.model, self.state, self.sim_dt, i==0)
self.sim_time += self.sim_dt
discount_time = self.sim_time
discount = math.pow(self.discount_factor, discount_time*self.discount_scale)
pos = self.state.joint_q[0:3]
vel = df.get_body_linear_velocity(self.state.joint_qd[0:6], pos)
loss = loss - discount*vel[0] # + torch.norm(self.state.joint_q[1]-0.5)
# render
with df.ScopedTimer("render", False):
if (self.render):
self.render_time += self.frame_dt
self.renderer.update(self.state, self.render_time)
try:
self.stage.Save()
except:
print("USD save error")
return loss
def run(self):
df.config.no_grad = True
with torch.no_grad():
l = self.loss()
def verify(self, eps=1.e-4):
frame = 60
params = self.actions[frame]
n = len(params)
# evaluate analytic gradient
l = self.loss(render=False)
l.backward()
# evaluate numeric gradient
grad_analytic = self.actions.grad[frame].numpy()
grad_numeric = np.zeros(n)
with torch.no_grad():
df.config.no_grad = True
for i in range(n):
mid = params[i].item()
params[i] = mid - eps
left = self.loss(render=False)
params[i] = mid + eps
right = self.loss(render=False)
# reset
params[i] = mid
# numeric grad
grad_numeric[i] = (right-left)/(2.0*eps)
# report
print("grad_numeric: " + str(grad_numeric))
print("grad_analytic: " + str(grad_analytic))
def train(self, mode='gd'):
# param to train
self.step_count = 0
self.best_loss = math.inf
render_freq = 1
optimizer = None
params = self.network.parameters()
def closure():
if (optimizer):
optimizer.zero_grad()
# render every N steps
render = False
if ((self.step_count % render_freq) == 0):
render = True
with df.ScopedTimer("forward"):
#with torch.autograd.detect_anomaly():
l = self.loss(render)
with df.ScopedTimer("backward"):
#with torch.autograd.detect_anomaly():
l.backward()
#print("vel: " + str(params[0]))
#print("grad: " + str(params[0].grad))
#print("--------")
print(str(self.step_count) + ": " + str(l))
self.step_count += 1
#df.util.mem_report()
# save best trajectory
if (l.item() < self.best_loss):
self.save()
self.best_loss = l.item()
return l
with df.ScopedTimer("step"):
if (mode == 'gd'):
# simple Gradient Descent
for i in range(self.train_iters):
closure()
with torch.no_grad():
for p in list(params):
p -= self.train_rate * p.grad
p.grad.zero_()
else:
# L-BFGS
if (mode == 'lbfgs'):
optimizer = torch.optim.LBFGS(params, lr=1.0, tolerance_grad=1.e-9, line_search_fn="strong_wolfe")
# Adam
if (mode == 'adam'):
optimizer = torch.optim.Adam(params, lr=self.train_rate)
# SGD
if (mode == 'sgd'):
optimizer = torch.optim.SGD(params, lr=self.train_rate, momentum=0.8, nesterov=True)
# train
for i in range(self.train_iters):
print("Step: " + str(i))
optimizer.step(closure)
# final save
try:
if (render):
self.stage.Save()
except:
print("USD save error")
def save(self):
torch.save(self.network, "outputs/" + self.name + ".pt")
def load(self):
self.network = torch.load("outputs/" + self.name + ".pt")
#---------
#robot = Robot(depth=1, mode='dflex', render=True, adapter='cpu')
#robot.load()
#robot.run()
robot = Robot(depth=1, mode='dflex', render=True, adapter='cuda')
#robot.load()
#robot.train(mode='adam')
robot.run() | 16,157 | Python | 29.486792 | 146 | 0.47744 |
vstrozzi/FRL-SHAC-Extension/dflex/tests/test_jelly.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import math
import torch
import time
import timeit
import cProfile
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import dflex as df
from pxr import Usd, UsdGeom, Gf
class Bending:
sim_duration = 5.0 # seconds
sim_substeps = 16
sim_dt = (1.0 / 60.0) / sim_substeps
sim_steps = int(sim_duration / sim_dt)
sim_time = 0.0
train_iters = 200
train_rate = 0.01
phase_count = 8
phase_step = math.pi / phase_count * 2.0
phase_freq = 2.5
def __init__(self, adapter='cpu'):
torch.manual_seed(42)
r = df.quat_multiply(df.quat_from_axis_angle((0.0, 0.0, 1.0), math.pi * 0.0), df.quat_from_axis_angle((1.0, 0.0, 0.0), -math.pi * 0.5))
builder = df.sim.ModelBuilder()
mesh = Usd.Stage.Open("assets/jellyfish.usda")
geom = UsdGeom.Mesh(mesh.GetPrimAtPath("/Icosphere/Icosphere"))
points = geom.GetPointsAttr().Get()
indices = geom.GetFaceVertexIndicesAttr().Get()
counts = geom.GetFaceVertexCountsAttr().Get()
face_materials = [-1] * len(counts)
face_subsets = UsdGeom.Subset.GetAllGeomSubsets(geom)
for i, s in enumerate(face_subsets):
face_subset_indices = s.GetIndicesAttr().Get()
for f in face_subset_indices:
face_materials[f] = i
active_material = 0
active_scale = []
def add_edge(f0, f1):
if (face_materials[f0] == active_material and face_materials[f1] == active_material):
active_scale.append(1.0)
else:
active_scale.append(0.0)
builder.add_cloth_mesh(pos=(0.0, 2.5, 0.0),
rot=r,
scale=1.0,
vel=(0.0, 0.0, 0.0),
vertices=points,
indices=indices,
edge_callback=add_edge,
density=100.0)
self.model = builder.finalize(adapter)
self.model.tri_ke = 5000.0
self.model.tri_ka = 5000.0
self.model.tri_kd = 100.0
self.model.tri_lift = 1000.0
self.model.tri_drag = 0.0
self.model.edge_ke = 20.0
self.model.edge_kd = 1.0 #2.5
self.model.contact_ke = 1.e+4
self.model.contact_kd = 0.0
self.model.contact_kf = 1000.0
self.model.contact_mu = 0.5
self.model.particle_radius = 0.01
self.model.ground = False
self.model.gravity = torch.tensor((0.0, 0.0, 0.0), device=adapter)
# training params
self.target_pos = torch.tensor((4.0, 2.0, 0.0), device=adapter)
self.rest_angle = self.model.edge_rest_angle
# one fully connected layer + tanh activation
self.network = torch.nn.Sequential(torch.nn.Linear(self.phase_count, self.model.edge_count, bias=False), torch.nn.Tanh()).to(adapter)
self.activation_strength = math.pi * 0.3
self.activation_scale = torch.tensor(active_scale, device=adapter)
self.activation_penalty = 0.0
#-----------------------
# set up Usd renderer
self.stage = Usd.Stage.CreateNew("outputs/jelly.usd")
if (self.stage):
self.renderer = df.render.UsdRenderer(self.model, self.stage)
self.renderer.draw_points = True
self.renderer.draw_springs = True
self.renderer.draw_shapes = True
self.renderer.add_sphere(self.target_pos.tolist(), 0.1, "target")
self.integrator = df.sim.SemiImplicitIntegrator()
def loss(self, render=True):
#-----------------------
# run simulation
self.state = self.model.state()
loss = torch.zeros(1, requires_grad=True, device=self.model.adapter)
for i in range(0, self.sim_steps):
# build sinusoidal input phases
phases = torch.zeros(self.phase_count, device=self.model.adapter)
for p in range(self.phase_count):
phases[p] = math.sin(self.phase_freq*self.sim_time + p * self.phase_step)
# compute activations (rest angles)
activation = (self.network(phases)) * self.activation_strength * self.activation_scale
self.model.edge_rest_angle = self.rest_angle + activation
# forward dynamics
with df.ScopedTimer("simulate", False):
self.state = self.integrator.forward(self.model, self.state, self.sim_dt)
# render
with df.ScopedTimer("render", False):
if (self.stage and render and (i % self.sim_substeps == 0)):
self.sim_time += self.sim_dt * self.sim_substeps
self.renderer.update(self.state, self.sim_time)
# loss
with df.ScopedTimer("loss", False):
com_loss = torch.mean(self.state.particle_qd * self.model.particle_mass[:, None], 0)
act_loss = torch.norm(activation) * self.activation_penalty
loss = loss - com_loss[1] - act_loss
return loss
def run(self):
with torch.no_grad():
l = self.loss()
if (self.stage):
self.stage.Save()
def train(self, mode='gd'):
# param to train
self.step_count = 0
render_freq = 1
optimizer = None
def closure():
optimizer.zero_grad()
# render every N steps
render = False
if ((self.step_count % render_freq) == 0):
render = True
with df.ScopedTimer("forward"):
l = self.loss(render)
with df.ScopedTimer("backward"):
l.backward()
print(str(self.step_count) + ": " + str(l))
self.step_count += 1
with df.ScopedTimer("save"):
try:
if (render):
self.stage.Save()
except:
print("USD save error")
return l
with df.ScopedTimer("step"):
if (mode == 'gd'):
# simple Gradient Descent
for i in range(self.train_iters):
closure()
with torch.no_grad():
param -= self.train_rate * param.grad
else:
# L-BFGS
if (mode == 'lbfgs'):
optimizer = torch.optim.LBFGS(self.network.parameters(), lr=1.0, tolerance_grad=1.e-5, tolerance_change=0.01, line_search_fn="strong_wolfe")
# Adam
if (mode == 'adam'):
optimizer = torch.optim.Adam(self.network.parameters(), lr=self.train_rate)
# SGD
if (mode == 'sgd'):
optimizer = torch.optim.SGD(self.network.parameters(), lr=self.train_rate, momentum=0.8, nesterov=True)
# train
for i in range(self.train_iters):
optimizer.step(closure)
# final save
try:
if (render):
self.stage.Save()
except:
print("USD save error")
def save(self, file):
torch.save(self.network, file)
def load(self, file):
self.network = torch.load(file)
self.network.eval()
#---------
bending = Bending(adapter='cpu')
#bending.load('jelly_10358.net')
#bending.run()
#bending.train('lbfgs')
bending.train('sgd')
| 8,059 | Python | 30.119691 | 160 | 0.540514 |
vstrozzi/FRL-SHAC-Extension/dflex/tests/test_ball_joint.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import math
import torch
import os
import sys
# to allow tests to import the module they belong to
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import dflex as df
import numpy as np
np.set_printoptions(precision=5, linewidth=256, suppress=True)
from pxr import Usd, UsdGeom, Gf
import test_util
class JointTest:
sim_duration = 4.0 # seconds
sim_substeps = 16
sim_dt = (1.0 / 60.0) / sim_substeps
sim_steps = int(sim_duration / sim_dt)
sim_time = 0.0
train_iters = 128
train_rate = 10.0
ground = False
name = "ball_joint"
regularization = 1.e-3
def __init__(self, depth=1, mode='numpy', render=True, adapter='cpu'):
torch.manual_seed(42)
builder = df.sim.ModelBuilder()
self.adapter = adapter
self.mode = mode
self.render = render
width = 0.1
radius = 0.05
builder.add_articulation()
body = -1
for i in range(8):
body = builder.add_link(body, df.transform((2.0*width, 0.0, 0.0), df.quat_identity()), axis=(0.0, 0.0, 1.0), damping=1.0, stiffness=500.0, type=df.JOINT_BALL)
shape = builder.add_shape_capsule(body, pos=(width, 0.0, 0.0), half_width=width, radius=radius)
builder.joint_qd[1] = 1.0
self.pole_angle_penalty = 10.0
self.pole_velocity_penalty = 0.5
self.cart_action_penalty = 1.e-7
self.cart_velocity_penalty = 1.0
self.cart_position_penalty = 2.0
# finalize model
self.model = builder.finalize(adapter)
self.model.ground = self.ground
self.model.gravity = torch.tensor((0.0, -9.81, 0.0), device=adapter)
#-----------------------
# set up Usd renderer
if (self.render):
self.stage = Usd.Stage.CreateNew("outputs/" + self.name + ".usd")
self.renderer = df.render.UsdRenderer(self.model, self.stage)
self.renderer.draw_points = True
self.renderer.draw_springs = True
self.renderer.draw_shapes = True
self.render_time = 0.0
self.integrator = df.sim.SemiImplicitIntegrator()
def set_target(self, x, name):
self.target = torch.tensor(x, device='cpu')
self.renderer.add_sphere(self.target.tolist(), 0.1, name)
def loss(self):
#---------------
# run simulation
self.sim_time = 0.0
# initial state
self.state = self.model.state()
loss = torch.zeros(1, requires_grad=True, device=self.model.adapter)
for i in range(0, self.sim_steps):
# simulate
with df.ScopedTimer("fd", active=False):
self.state = self.integrator.forward(self.model, self.state, self.sim_dt)
# render
with df.ScopedTimer("render", active=False):
if (self.render and (i % self.sim_substeps == 0)):
with torch.no_grad():
# render scene
self.render_time += self.sim_dt * self.sim_substeps
self.renderer.update(self.state, self.render_time)
self.sim_time += self.sim_dt
return loss
def run(self):
l = self.loss()
if (self.render):
self.stage.Save()
def verify(self, eps=1.e-4):
params = self.actions
n = 1#len(params)
self.render = False
# evaluate analytic gradient
l = self.loss()
l.backward()
# evaluate numeric gradient
grad_analytic = params.grad.cpu().numpy()
grad_numeric = np.zeros(n)
with torch.no_grad():
df.config.no_grad = True
for i in range(1):
mid = params[0][i].item()
params[0][i] = mid - eps
left = self.loss()
params[0][i] = mid + eps
right = self.loss()
# reset
params[0][i] = mid
# numeric grad
grad_numeric[i] = (right-left)/(2.0*eps)
# report
print("grad_numeric: " + str(grad_numeric))
print("grad_analytic: " + str(grad_analytic))
def train(self, mode='gd'):
# param to train
self.step_count = 0
self.best_loss = math.inf
render_freq = 1
optimizer = None
params = [self.actions]
def closure():
if (optimizer):
optimizer.zero_grad()
# render ever y N steps
render = False
if ((self.step_count % render_freq) == 0):
render = True
with df.ScopedTimer("forward"):
#with torch.autograd.detect_anomaly():
l = self.loss()
with df.ScopedTimer("backward"):
#with torch.autograd.detect_anomaly():
l.backward()
# for e in range(self.env_count):
# print(self.actions.grad[e][0:20])
print(str(self.step_count) + ": " + str(l))
self.step_count += 1
with df.ScopedTimer("save"):
try:
if (render):
self.stage.Save()
except:
print("USD save error")
# save best trajectory
if (l.item() < self.best_loss):
self.save()
self.best_loss = l.item()
return l
with df.ScopedTimer("step"):
if (mode == 'gd'):
# simple Gradient Descent
for i in range(self.train_iters):
closure()
with torch.no_grad():
params[0] -= self.train_rate * params[0].grad
params[0].grad.zero_()
else:
# L-BFGS
if (mode == 'lbfgs'):
optimizer = torch.optim.LBFGS(params, lr=1.0, tolerance_grad=1.e-9, line_search_fn="strong_wolfe")
# Adam
if (mode == 'adam'):
optimizer = torch.optim.Adam(params, lr=self.train_rate)
# SGD
if (mode == 'sgd'):
optimizer = torch.optim.SGD(params, lr=self.train_rate, momentum=0.8, nesterov=True)
# train
for i in range(self.train_iters):
print("Step: " + str(i))
optimizer.step(closure)
# final save
try:
if (render):
self.stage.Save()
except:
print("USD save error")
def save(self):
torch.save(self.actions, "outputs/" + self.name + ".pt")
def load(self):
self.actions = torch.load("outputs/" + self.name + ".pt")
#---------
test = JointTest(depth=1, mode='dflex', render=True, adapter='cpu')
#df.config.no_grad = True
#df.config.check_grad = True
#df.config.verify_fp = True
test.run()
| 7,581 | Python | 25.982206 | 170 | 0.514312 |
vstrozzi/FRL-SHAC-Extension/dflex/tests/test_urdf.py |
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import torch
t = torch.tensor((1.0), requires_grad=True)
i = t.item()
print(i)
from urdfpy import URDF
#robot = URDF.load("assets/trifinger/urdf/trifinger_with_stage.urdf")
#robot = URDF.load("assets/franka_description/robots/franka_panda.urdf")
#robot = URDF.load("assets/anymal_b_simple_description/urdf/anymal.urdf")
#robot = URDF.load("assets/kinova_description/urdf/kinova.urdf")
#robot = URDF.load("assets/ur5/urdf/ur5_robot.urdf")
#robot = URDF.load("assets/kuka_allegro_description/allegro.urdf")
robot = URDF.load("assets/allegro_hand_description/allegro_hand_description_left.urdf")
for link in robot.links:
dir(link)
print(link)
for joint in robot.joints:
print(joint)
robot.show()
| 1,141 | Python | 29.052631 | 87 | 0.765118 |
vstrozzi/FRL-SHAC-Extension/dflex/tests/test_cloth.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import math
import torch
import time
# include parent path
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import dflex as df
from pxr import Usd, UsdGeom, Gf
# uncomment to output timers
df.ScopedTimer.enabled = True
class Cloth:
sim_duration = 5.0 # seconds
sim_substeps = 16
sim_dt = (1.0 / 60.0) / sim_substeps # 60 frames per second, 16 updates between frames,
# sim_steps = int(sim_duration/sim_dt)
sim_steps = int(sim_duration/sim_dt)
sim_time = 0.0
render_time = 0.0
train_iters = 100
train_rate = 0.01
phase_count = 4
def __init__(self, dim=20, mode="cloth", adapter='cpu'):
torch.manual_seed(42)
height = 2.5
builder = df.sim.ModelBuilder()
builder.add_cloth_grid(pos=(0.0, height, 0.0),
rot=df.quat_from_axis_angle((1.0, 0.0, 0.0), math.pi * 1.04),
vel=(0.0, 0.0, 0.0),
dim_x=dim,
dim_y=dim,
cell_x=0.2,
cell_y=0.2,
mass=400 / (dim**2)) #, fix_left=True, fix_right=True, fix_top=True, fix_bottom=True)
attach0 = 0
attach1 = 20
anchor0 = builder.add_particle(pos=builder.particle_q[attach0] - (1.0, 0.0, 0.0), vel=(0.0, 0.0, 0.0), mass=0.0)
anchor1 = builder.add_particle(pos=builder.particle_q[attach1] + (1.0, 0.0, 0.0), vel=(0.0, 0.0, 0.0), mass=0.0)
builder.add_spring(anchor0, attach0, 10000.0, 1000.0, 0)
builder.add_spring(anchor1, attach1, 10000.0, 1000.0, 0)
self.model = builder.finalize(adapter)
self.model.tri_ke = 10000.0
self.model.tri_ka = 10000.0
self.model.tri_kd = 100.0
self.model.contact_ke = 1.e+4
self.model.contact_kd = 1000.0
self.model.contact_kf = 1000.0
self.model.contact_mu = 0.5
self.model.particle_radius = 0.01
self.model.ground = False
self.model.tri_collisions = False
# set optimization targets
self.model.spring_rest_length.requires_grad_()
#-----------------------
# set up Usd renderer
self.stage = Usd.Stage.CreateNew("outputs/cloth.usd")
self.renderer = df.render.UsdRenderer(self.model, self.stage)
self.renderer.draw_points = True
self.renderer.draw_springs = True
self.renderer.draw_shapes = True
#self.integrator = df.sim.SemiImplicitIntegrator()
self.integrator = df.sim.XPBDIntegrator()
def loss(self, render=True):
# reset state
self.sim_time = 0.0
self.state = self.model.state()
loss = torch.zeros(1, requires_grad=True, device=self.model.adapter)
with df.ScopedTimer("forward", False):
# run simulation
for i in range(0, self.sim_steps):
with df.ScopedTimer("simulate", False):
self.state = self.integrator.forward(self.model, self.state, self.sim_dt)
with df.ScopedTimer("render", False):
if (render and (i % self.sim_substeps == 0)):
self.render_time += self.sim_dt * self.sim_substeps
self.renderer.update(self.state, self.render_time)
if (self.state.particle_q != self.state.particle_q).sum() != 0:
print("NaN found in state")
import pdb
pdb.set_trace()
self.sim_time += self.sim_dt
# compute loss
with df.ScopedTimer("loss", False):
com_pos = torch.mean(self.state.particle_q, 0)
com_vel = torch.mean(self.state.particle_qd, 0)
# use integral of velocity over course of the run
loss = loss - com_pos[1]
return loss
def run(self):
l = self.loss()
self.stage.Save()
def train(self, mode='gd'):
# param to train
self.step_count = 0
self.render_steps = 1
optimizer = None
def closure():
# render every N steps
render = False
if ((self.step_count % self.render_steps) == 0):
render = True
# with torch.autograd.detect_anomaly():
with df.ScopedTimer("forward", False):
l = self.loss(render)
with df.ScopedTimer("backward", False):
l.backward()
with df.ScopedTimer("save", False):
if (render):
self.stage.Save()
print(str(self.step_count) + ": " + str(l))
self.step_count += 1
return l
with df.ScopedTimer("step"):
if (mode == 'gd'):
param = self.model.spring_rest_length
# simple Gradient Descent
for i in range(self.train_iters):
closure()
with torch.no_grad():
param -= self.train_rate * param.grad
param.grad.data.zero_()
else:
# L-BFGS
if (mode == 'lbfgs'):
optimizer = torch.optim.LBFGS([self.model.spring_rest_length],
lr=0.01,
tolerance_grad=1.e-5,
tolerance_change=0.01,
line_search_fn="strong_wolfe")
# Adam
if (mode == 'adam'):
optimizer = torch.optim.Adam([self.model.spring_rest_length], lr=self.train_rate * 4.0)
# SGD
if (mode == 'sgd'):
optimizer = torch.optim.SGD([self.model.spring_rest_length], lr=self.train_rate * 0.01, momentum=0.8)
# train
for i in range(self.train_iters):
optimizer.zero_grad()
optimizer.step(closure)
def save(self, file):
torch.save(self.network, file)
def load(self, file):
self.network = torch.load(file)
self.network.eval()
#---------
cloth = Cloth(adapter='cuda')
cloth.run()
#cloth.train('adam')
# for dim in range(20, 400, 20):
# cloth = Cloth(dim=dim)
# cloth.run()
| 7,006 | Python | 31.142202 | 157 | 0.515273 |
vstrozzi/FRL-SHAC-Extension/dflex/tests/test_fem_contact.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import math
import torch
import cProfile
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import dflex as df
import test_util
from pxr import Usd, UsdGeom, Gf
class FEMContact:
sim_duration = 10.0 # seconds
sim_substeps = 64
sim_dt = (1.0 / 60.0) / sim_substeps
sim_steps = int(sim_duration / sim_dt)
sim_time = 0.0
train_iters = 16
train_rate = 0.01 #1.0/(sim_dt*sim_dt)
phase_count = 8
phase_step = math.pi / phase_count * 2.0
phase_freq = 2.5
def __init__(self, adapter='cpu'):
torch.manual_seed(42)
builder = df.sim.ModelBuilder()
solid = True
if (solid):
builder.add_soft_grid(
pos=(0.5, 1.0, 0.0),
rot=(0.0, 0.0, 0.0, 1.0),
vel=(0.0, 0.0, 0.0),
dim_x=3,
dim_y=10,
dim_z=3,
cell_x=0.1,
cell_y=0.1,
cell_z=0.1,
density=1000.0,
k_mu=10000.0,
k_lambda=10000.0,
k_damp=1.0)
else:
builder.add_cloth_grid(
pos=(-0.7, 1.0, -0.7),
rot=df.quat_from_axis_angle((1.0, 0.0, 0.0), math.pi*0.5),
vel=(0.0, 0.0, 0.0),
dim_x=20,
dim_y=20,
cell_x=0.075,
cell_y=0.075,
mass=0.2)
# art = builder.add_articulation()
# link = builder.add_link(
# parent=-1,
# X_pj=df.transform_identity(),
# axis=(0.0, 0.0, 0.0),
# type=df.JOINT_FREE,
# armature=0.0)
# builder.add_shape_sphere(
# body=link,
# pos=(0.0, 0.5, 0.0),
# rot=df.quat_identity(),
# radius=0.25)
# builder.add_shape_box(
# body=link,
# pos=(0.0, 0.5, 0.0),
# rot=df.quat_identity(),
# hx=0.5,
# hy=0.25,
# hz=0.5)
builder.add_articulation()
test_util.build_tree(builder, angle=0.0, length=0.25, width=0.1, max_depth=3, joint_stiffness=10000.0, joint_damping=100.0)
builder.joint_X_pj[0] = df.transform((-0.5, 0.5, 0.0), df.quat_identity())
# mesh = Usd.Stage.Open("assets/torus.stl.usda")
# geom = UsdGeom.Mesh(mesh.GetPrimAtPath("/mesh"))
# points = geom.GetPointsAttr().Get()
# tet_indices = geom.GetPrim().GetAttribute("tetraIndices").Get()
# tri_indices = geom.GetFaceVertexIndicesAttr().Get()
# tri_counts = geom.GetFaceVertexCountsAttr().Get()
# r = df.quat_multiply(df.quat_from_axis_angle((1.0, 0.0, 0.0), math.pi * 0.5), df.quat_from_axis_angle((1.0, 0.0, 0.0), math.pi * 0.0))
# builder.add_soft_mesh(pos=(0.0, 2.0, 0.0),
# rot=r,
# scale=0.25,
# vel=(1.5, 0.0, 0.0),
# vertices=points,
# indices=tet_indices,
# density=10.0,
# k_mu=1000.0,
# k_lambda=1000.0,
# k_damp=1.0)
self.model = builder.finalize(adapter)
#self.model.tet_kl = 1000.0
#self.model.tet_km = 1000.0
#self.model.tet_kd = 1.0
# disable triangle dynamics (just used for rendering)
if (solid):
self.model.tri_ke = 0.0
self.model.tri_ka = 0.0
self.model.tri_kd = 0.0
self.model.tri_kb = 0.0
else:
self.model.tri_ke = 1000.0
self.model.tri_ka = 1000.0
self.model.tri_kd = 10.0
self.model.tri_kb = 0.0
self.model.edge_ke = 100.0
self.model.edge_kd = 0.1
self.model.contact_ke = 1.e+4*2.0
self.model.contact_kd = 10.0
self.model.contact_kf = 10.0
self.model.contact_mu = 0.5
self.model.particle_radius = 0.05
self.model.ground = True
# one fully connected layer + tanh activation
self.network = torch.nn.Sequential(torch.nn.Linear(self.phase_count, self.model.tet_count, bias=False), torch.nn.Tanh()).to(adapter)
self.activation_strength = 0.3
self.activation_penalty = 0.0
#-----------------------
# set up Usd renderer
self.stage = Usd.Stage.CreateNew("outputs/fem_contact.usd")
if (self.stage):
self.renderer = df.render.UsdRenderer(self.model, self.stage)
self.renderer.draw_points = True
self.renderer.draw_springs = True
self.renderer.draw_shapes = True
self.render_time = 0.0
#self.renderer.add_sphere((0.0, 0.5, 0.0), 0.25, "collider")
#self.renderer.add_box((0.0, 0.5, 0.0), (0.25, 0.25, 0.25), "collider")
self.integrator = df.sim.SemiImplicitIntegrator()
def loss(self, render=True):
#-----------------------
# run simulation
self.sim_time = 0.0
self.state = self.model.state()
self.model.collide(self.state)
loss = torch.zeros(1, requires_grad=True, device=self.model.adapter)
for i in range(0, self.sim_steps):
# forward dynamics
with df.ScopedTimer("simulate", False):
self.state = self.integrator.forward(self.model, self.state, self.sim_dt)
self.sim_time += self.sim_dt
# render
with df.ScopedTimer("render", False):
if (self.stage and render and (i % self.sim_substeps == 0)):
self.render_time += self.sim_dt * self.sim_substeps
self.renderer.update(self.state, self.render_time)
return loss
def run(self, profile=False, render=True):
df.config.no_grad = True
with torch.no_grad():
with df.ScopedTimer("run"):
if profile:
cp = cProfile.Profile()
cp.clear()
cp.enable()
# run forward dynamics
if profile:
self.state = self.model.state()
for i in range(0, self.sim_steps):
self.state = self.integrator.forward(self.model, self.state, self.sim_dt)
self.sim_time += self.sim_dt
else:
l = self.loss(render)
if profile:
cp.disable()
cp.print_stats(sort='tottime')
if (self.stage):
self.stage.Save()
def train(self, mode='gd'):
# param to train
self.step_count = 0
render_freq = 1
optimizer = None
def closure():
optimizer.zero_grad()
# render every N steps
render = False
if ((self.step_count % render_freq) == 0):
render = True
with df.ScopedTimer("forward"):
#with torch.autograd.detect_anomaly():
l = self.loss(render)
with df.ScopedTimer("backward"):
#with torch.autograd.detect_anomaly():
l.backward()
print(str(self.step_count) + ": " + str(l))
self.step_count += 1
with df.ScopedTimer("save"):
try:
if (render):
self.stage.Save()
except:
print("USD save error")
return l
with df.ScopedTimer("step"):
if (mode == 'gd'):
# simple Gradient Descent
for i in range(self.train_iters):
closure()
with torch.no_grad():
param -= self.train_rate * param.grad
else:
# L-BFGS
if (mode == 'lbfgs'):
optimizer = torch.optim.LBFGS(self.network.parameters(), lr=1.0, tolerance_grad=1.e-5, tolerance_change=0.01, line_search_fn="strong_wolfe")
# Adam
if (mode == 'adam'):
optimizer = torch.optim.Adam(self.network.parameters(), lr=self.train_rate)
# SGD
if (mode == 'sgd'):
optimizer = torch.optim.SGD(self.network.parameters(), lr=self.train_rate, momentum=0.5)
# train
for i in range(self.train_iters):
optimizer.step(closure)
# final save
try:
if (render):
self.stage.Save()
except:
print("USD save error")
def save(self, file):
torch.save(self.network, file)
def load(self, file):
self.network = torch.load(file)
self.network.eval()
#---------
fem = FEMContact(adapter='cuda')
fem.run(profile=False, render=True)
#fem.train('lbfgs')
#fem.train('sgd')
| 9,689 | Python | 29.28125 | 160 | 0.484467 |
vstrozzi/FRL-SHAC-Extension/dflex/tests/test_ballistic.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import torch
import time
import math
# include parent path
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import dflex as df
from pxr import Usd, UsdGeom, Gf
class Ballistic:
sim_duration = 2.0 # seconds
sim_substeps = 10
sim_dt = (1.0 / 60.0) / sim_substeps
sim_steps = int(sim_duration / sim_dt)
sim_time = 0.0
train_iters = 5
train_rate = 0.1 #1.0/(sim_dt*sim_dt)
def __init__(self, adapter='cpu'):
builder = df.sim.ModelBuilder()
builder.add_particle((0, 1.0, 0.0), (0.1, 0.0, 0.0), 1.0)
self.model = builder.finalize(adapter)
self.target = torch.tensor((2.0, 1.0, 0.0), device=adapter)
#-----------------------
# set up Usd renderer
self.stage = Usd.Stage.CreateNew("outputs/ballistic.usda")
self.renderer = df.render.UsdRenderer(self.model, self.stage)
self.renderer.draw_points = True
self.renderer.draw_springs = True
self.renderer.draw_shapes = True
self.renderer.add_sphere(self.target.tolist(), 0.1, "target")
self.integrator = df.sim.SemiImplicitIntegrator()
def loss(self):
#-----------------------
# run simulation
self.state = self.model.state()
for i in range(0, self.sim_steps):
self.state = self.integrator.forward(self.model, self.state, self.sim_dt)
if (i % self.sim_substeps) == 0:
self.renderer.update(self.state, self.sim_time)
self.sim_time += self.sim_dt
loss = torch.norm(self.state.particle_q[0] - self.target)
return loss
def train(self, mode='gd'):
# Gradient Descent
if (mode == 'gd'):
for i in range(self.train_iters):
l = self.loss()
l.backward()
print(l)
with torch.no_grad():
self.model.particle_v -= self.train_rate * self.model.particle_v.grad
self.model.particle_v.grad.zero_()
# L-BFGS
if (mode == 'lbfgs'):
optimizer = torch.optim.LBFGS([self.model.particle_v], self.train_rate, tolerance_grad=1.e-5, history_size=4, line_search_fn="strong_wolfe")
def closure():
optimizer.zero_grad()
l = self.loss()
l.backward()
print(l)
return l
for i in range(self.train_iters):
optimizer.step(closure)
# SGD
if (mode == 'sgd'):
optimizer = torch.optim.SGD([self.model.particle_v], lr=self.train_rate, momentum=0.8)
for i in range(self.train_iters):
optimizer.zero_grad()
l = self.loss()
l.backward()
print(l)
optimizer.step()
self.stage.Save()
#---------
ballistic = Ballistic(adapter='cpu')
ballistic.train('lbfgs')
| 3,437 | Python | 25.446154 | 152 | 0.566773 |
vstrozzi/FRL-SHAC-Extension/dflex/tests/test_paper.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import math
import torch
import time
import cProfile
import numpy as np
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import dflex as df
from pxr import Usd, UsdGeom, Gf
class Paper:
sim_duration = 10.0 # seconds
sim_substeps = 64
sim_dt = (1.0 / 60.0) / sim_substeps
sim_steps = int(sim_duration / sim_dt)
sim_time = 0.0
train_iters = 200
train_rate = 0.01
def __init__(self, adapter='cpu'):
torch.manual_seed(42)
np.random.seed(42)
builder = df.sim.ModelBuilder()
mesh = Usd.Stage.Open("assets/paper.usda")
geom = UsdGeom.Mesh(mesh.GetPrimAtPath("/Grid/Grid"))
# mesh = Usd.Stage.Open("assets/dart.usda")
# geom = UsdGeom.Mesh(mesh.GetPrimAtPath("/planes_001/planes_001"))
points = geom.GetPointsAttr().Get()
indices = geom.GetFaceVertexIndicesAttr().Get()
counts = geom.GetFaceVertexCountsAttr().Get()
center = np.array((0.0, 10.0, 0.0))
radius = 5.0
for i in range(1):
center = np.array([0.0, 5.0, 0.0]) + np.random.ranf((3, )) * 10.0
axis = df.normalize(np.random.ranf((3, )))
angle = np.random.ranf(1, ) * math.pi
builder.add_cloth_mesh(pos=center,
rot=df.quat_from_axis_angle(axis, angle),
scale=radius,
vel=(0.0, 0.0, 0.0),
vertices=points,
indices=indices,
density=100.0)
self.model = builder.finalize(adapter)
self.model.tri_ke = 2000.0
self.model.tri_ka = 2000.0
self.model.tri_kd = 100.0
self.model.tri_lift = 50.0
self.model.tri_drag = 0.5
self.model.contact_ke = 1.e+4
self.model.contact_kd = 1000.0
self.model.contact_kf = 2000.0
self.model.contact_mu = 0.5
self.model.edge_ke = 20.0
self.model.edge_kd = 0.3
self.model.gravity = torch.tensor((0.0, -10.0, 0.0), device=adapter)
self.model.particle_radius = 0.01
self.model.ground = True
#-----------------------
# set up Usd renderer
self.stage = Usd.Stage.CreateNew("outputs/paper.usd")
self.renderer = df.render.UsdRenderer(self.model, self.stage)
self.renderer.draw_points = True
self.renderer.draw_springs = True
self.renderer.draw_shapes = True
self.integrator = df.sim.SemiImplicitIntegrator()
def loss(self, render=True):
#-----------------------
# run simulation
self.state = self.model.state()
loss = torch.zeros(1, requires_grad=True)
for i in range(0, self.sim_steps):
# forward dynamics
self.state = self.integrator.forward(self.model, self.state, self.sim_dt)
# render
if (render and (i % self.sim_substeps == 0)):
self.sim_time += self.sim_dt * self.sim_substeps
self.renderer.update(self.state, self.sim_time)
return loss
def run(self):
with torch.no_grad():
l = self.loss()
self.stage.Save()
def train(self, mode='gd'):
# param to train
self.step_count = 0
render_freq = 1
optimizer = None
def closure():
optimizer.zero_grad()
# render every N steps
render = False
if ((self.step_count % render_freq) == 0):
render = True
l = self.loss(render)
l.backward()
print(str(self.step_count) + ": " + str(l))
self.step_count += 1
try:
if (render):
self.stage.Save()
except:
print("USD save error")
return l
if (mode == 'gd'):
# simple Gradient Descent
for i in range(self.train_iters):
closure()
with torch.no_grad():
param -= self.train_rate * param.grad
else:
# L-BFGS
if (mode == 'lbfgs'):
optimizer = torch.optim.LBFGS(self.network.parameters(), lr=1.0, tolerance_grad=1.e-5, tolerance_change=0.01, line_search_fn="strong_wolfe")
# Adam
if (mode == 'adam'):
optimizer = torch.optim.Adam(self.network.parameters(), lr=self.train_rate)
# SGD
if (mode == 'sgd'):
optimizer = torch.optim.SGD(self.network.parameters(), lr=self.train_rate, momentum=0.5)
# train
for i in range(self.train_iters):
optimizer.step(closure)
# final save
try:
if (render):
self.stage.Save()
except:
print("USD save error")
def save(self, file):
torch.save(self.network, file)
def load(self, file):
self.network = torch.load(file)
self.network.eval()
#---------
paper = Paper(adapter='cpu')
paper.run()
#bending.train('lbfgs')
#bending.train('sgd')
| 5,705 | Python | 26.432692 | 156 | 0.533567 |
vstrozzi/FRL-SHAC-Extension/dflex/tests/test_muscle.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import math
import torch
import os
import sys
# to allow tests to import the module they belong to
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import dflex as df
import numpy as np
np.set_printoptions(precision=5, linewidth=256, suppress=True)
from pxr import Usd, UsdGeom, Gf
import test_util
class MuscleTest:
sim_duration = 4.0 # seconds
sim_substeps = 16
sim_dt = (1.0 / 60.0) / sim_substeps
sim_steps = int(sim_duration / sim_dt)
sim_time = 0.0
train_iters = 128
train_rate = 10.0
ground = False
name = "muscle"
regularization = 1.e-3
def __init__(self, depth=1, mode='numpy', render=True, adapter='cpu'):
torch.manual_seed(42)
builder = df.sim.ModelBuilder()
self.adapter = adapter
self.mode = mode
self.render = render
length = 0.5
width = 0.1
radius = 0.05
builder.add_articulation()
body = -1
for i in range(2):
if (i == 0):
body = builder.add_link(body, df.transform((2.0*length, 0.0, 0.0), df.quat_identity()), axis=(0.0, 0.0, 1.0), damping=1.0, stiffness=0.0, type=df.JOINT_FIXED)
else:
body = builder.add_link(body, df.transform((2.0*length, 0.0, 0.0), df.quat_identity()), axis=(0.0, 0.0, 1.0), damping=1.0, stiffness=0.0, type=df.JOINT_BALL)
shape = builder.add_shape_box(body, pos=(length, 0.0, 0.0), hx=length, hy=width, hz=width)
builder.add_muscle([0, 1], [(length*2.0 - 0.25, width + 0.05, 0.0), (0.25, width + 0.05, 0.0)], 1.0, 1.0, 1.0, 1.0, 1.0)
builder.add_muscle([0, 1], [(length*2.0 - 0.25, -width - 0.05, 0.0), (0.25, -width - 0.05, 0.0)], 1.0, 1.0, 1.0, 1.0, 1.0)
builder.muscle_activation[0] = 1000.0
builder.muscle_activation[1] = 0.0
self.pole_angle_penalty = 10.0
self.pole_velocity_penalty = 0.5
self.cart_action_penalty = 1.e-7
self.cart_velocity_penalty = 1.0
self.cart_position_penalty = 2.0
# finalize model
self.model = builder.finalize(adapter)
self.model.ground = self.ground
self.model.gravity = torch.tensor((0.0, -9.81, 0.0), device=adapter)
#self.model.gravity = torch.tensor((0.0, 0.0, 0.0), device=adapter)
self.activations = torch.zeros((self.sim_steps, 2), dtype=torch.float32, device=adapter, requires_grad=True)
self.model.joint_q.requires_grad = True
self.model.joint_qd.requires_grad = True
self.model.muscle_activation.requires_grad = True
#-----------------------
# set up Usd renderer
if (self.render):
self.stage = Usd.Stage.CreateNew("outputs/" + self.name + ".usd")
self.renderer = df.render.UsdRenderer(self.model, self.stage)
self.renderer.draw_points = True
self.renderer.draw_springs = True
self.renderer.draw_shapes = True
self.render_time = 0.0
self.integrator = df.sim.SemiImplicitIntegrator()
def set_target(self, x, name):
self.target = torch.tensor(x, device='cpu')
self.renderer.add_sphere(self.target.tolist(), 0.1, name)
def loss(self):
#---------------
# run simulation
self.sim_time = 0.0
# initial state
self.state = self.model.state()
loss = torch.zeros(1, requires_grad=True, device=self.model.adapter)
#self.model.muscle_activation = torch.zeros_like(self.model.muscle_activation)
for i in range(0, self.sim_steps):
# apply actions
#for m in range(self.model.muscle_count):
#self.model.muscle_activation = self.activations[i]
# simulate
with df.ScopedTimer("fd", active=False):
self.state = self.integrator.forward(self.model, self.state, self.sim_dt)
# render
with df.ScopedTimer("render", active=False):
if (self.render and (i % self.sim_substeps == 0)):
with torch.no_grad():
for m in range(self.model.muscle_count):
start = self.model.muscle_start[m]
end = self.model.muscle_start[m+1]
points = []
for w in range(start, end):
link = self.model.muscle_links[w]
point = self.model.muscle_points[w]
X_sc = df.transform_expand(self.state.body_X_sc[link].tolist())
points.append(Gf.Vec3f(df.transform_point(X_sc, point).tolist()))
self.renderer.add_line_strip(points, name=("muscle_0" + str(m)), radius=0.02, color=(self.activations[i][m]/1000.0 + 0.5, 0.2, 0.5), time=self.render_time)
# render scene
self.render_time += self.sim_dt * self.sim_substeps
self.renderer.update(self.state, self.render_time)
self.sim_time += self.sim_dt
loss = loss + self.state.joint_q[2]*self.state.joint_q[2]
return loss
def run(self):
l = self.loss()
if (self.render):
self.stage.Save()
def verify(self, eps=1.e-4):
params = self.actions
n = 1#len(params)
self.render = False
# evaluate analytic gradient
l = self.loss()
l.backward()
# evaluate numeric gradient
grad_analytic = params.grad.cpu().numpy()
grad_numeric = np.zeros(n)
with torch.no_grad():
df.config.no_grad = True
for i in range(1):
mid = params[0][i].item()
params[0][i] = mid - eps
left = self.loss()
params[0][i] = mid + eps
right = self.loss()
# reset
params[0][i] = mid
# numeric grad
grad_numeric[i] = (right-left)/(2.0*eps)
# report
print("grad_numeric: " + str(grad_numeric))
print("grad_analytic: " + str(grad_analytic))
def train(self, mode='gd'):
# param to train
self.step_count = 0
self.best_loss = math.inf
render_freq = 1
optimizer = None
params = [self.activations]
def closure():
if (optimizer):
optimizer.zero_grad()
# render ever y N steps
render = False
if ((self.step_count % render_freq) == 0):
render = True
with df.ScopedTimer("forward"):
#with torch.autograd.detect_anomaly():
l = self.loss()
with df.ScopedTimer("backward"):
#with torch.autograd.detect_anomaly():
l.backward()
# for e in range(self.env_count):
# print(self.actions.grad[e][0:20])
print(str(self.step_count) + ": " + str(l))
self.step_count += 1
with df.ScopedTimer("save"):
try:
if (render):
self.stage.Save()
except:
print("USD save error")
# save best trajectory
if (l.item() < self.best_loss):
self.save()
self.best_loss = l.item()
return l
with df.ScopedTimer("step"):
if (mode == 'gd'):
# simple Gradient Descent
for i in range(self.train_iters):
closure()
with torch.no_grad():
params[0] -= self.train_rate * params[0].grad
params[0].grad.zero_()
else:
# L-BFGS
if (mode == 'lbfgs'):
optimizer = torch.optim.LBFGS(params, lr=1.0, tolerance_grad=1.e-9, line_search_fn="strong_wolfe")
# Adam
if (mode == 'adam'):
optimizer = torch.optim.Adam(params, lr=self.train_rate)
# SGD
if (mode == 'sgd'):
optimizer = torch.optim.SGD(params, lr=self.train_rate, momentum=0.8, nesterov=True)
# train
for i in range(self.train_iters):
print("Step: " + str(i))
optimizer.step(closure)
# final save
try:
if (render):
self.stage.Save()
except:
print("USD save error")
def save(self):
torch.save(self.activations, "outputs/" + self.name + ".pt")
def load(self):
self.activations = torch.load("outputs/" + self.name + ".pt")
#---------
test = MuscleTest(depth=1, mode='dflex', render=True, adapter='cpu')
#df.config.no_grad = True
#df.config.check_grad = True
#df.config.verify_fp = True
#test.load()
test.run()
#test.train('lbfgs')
| 9,648 | Python | 28.965838 | 179 | 0.516998 |
vstrozzi/FRL-SHAC-Extension/dflex/tests/test_franka.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import math
import torch
import os
import sys
# to allow tests to import the module they belong to
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import dflex as df
import numpy as np
np.set_printoptions(precision=5, linewidth=256, suppress=True)
from pxr import Usd, UsdGeom, Gf
import test_util
class Robot:
sim_duration = 4.0 # seconds
sim_substeps = 4
sim_dt = (1.0 / 60.0) / sim_substeps
sim_steps = int(sim_duration / sim_dt)
sim_time = 0.0
train_iters = 128
train_rate = 10.0
ground = False
name = "franka"
regularization = 1.e-3
env_count = 1
env_dofs = 2
def __init__(self, depth=1, mode='numpy', render=True, adapter='cpu'):
torch.manual_seed(42)
builder = df.sim.ModelBuilder()
self.adapter = adapter
self.mode = mode
self.render = render
# cartpole
for i in range(self.env_count):
test_util.urdf_load(
builder,
"assets/franka_description/robots/franka_panda.urdf",
df.transform((0.0, 0.0, 0.0), df.quat_from_axis_angle((1.0, 0.0, 0.0), -math.pi*0.5)),
floating=False,
limit_ke=1.e+3,
limit_kd=1.e+2)
for i in range(len(builder.joint_target_kd)):
builder.joint_target_kd[i] = 1.0
# finalize model
self.model = builder.finalize(adapter)
self.model.ground = self.ground
self.model.gravity = torch.tensor((0.0, -9.81, 0.0), device=adapter)
#self.model.gravity = torch.tensor((0.0, 0.0, 0.0), device=adapter)
self.model.joint_q.requires_grad_()
self.model.joint_qd.requires_grad_()
self.actions = torch.zeros((self.env_count, self.sim_steps), device=adapter, requires_grad=True)
#self.actions = torch.zeros(1, device=adapter, requires_grad=True)
#-----------------------
# set up Usd renderer
if (self.render):
self.stage = Usd.Stage.CreateNew("outputs/" + self.name + ".usd")
self.renderer = df.render.UsdRenderer(self.model, self.stage)
self.renderer.draw_points = True
self.renderer.draw_springs = True
self.renderer.draw_shapes = True
self.render_time = 0.0
self.integrator = df.sim.SemiImplicitIntegrator()
def set_target(self, x, name):
self.target = torch.tensor(x, device='cpu')
self.renderer.add_sphere(self.target.tolist(), 0.1, name)
def loss(self):
#---------------
# run simulation
self.sim_time = 0.0
# initial state
self.state = self.model.state()
if (self.render):
traj = []
for e in range(self.env_count):
traj.append([])
loss = torch.zeros(1, requires_grad=True, device=self.model.adapter)
for i in range(0, self.sim_steps):
# simulate
with df.ScopedTimer("fd", detailed=False, active=False):
self.state = self.integrator.forward(self.model, self.state, self.sim_dt)
# render
with df.ScopedTimer("render", False):
if (self.render and (i % self.sim_substeps == 0)):
with torch.no_grad():
# draw end effector tracer
# for e in range(self.env_count):
# X_pole = df.transform_point(df.transform_expand(self.state.body_X_sc[e*3 + self.marker_body].tolist()), (0.0, 0.0, self.marker_offset))
# traj[e].append((X_pole[0], X_pole[1], X_pole[2]))
# # render trajectory
# self.renderer.add_line_strip(traj[e], (1.0, 1.0, 1.0), self.render_time, "traj_" + str(e))
# render scene
self.render_time += self.sim_dt * self.sim_substeps
self.renderer.update(self.state, self.render_time)
self.sim_time += self.sim_dt
return loss
def run(self):
l = self.loss()
if (self.render):
self.stage.Save()
def verify(self, eps=1.e-4):
params = self.actions
n = 1#len(params)
self.render = False
# evaluate analytic gradient
l = self.loss()
l.backward()
# evaluate numeric gradient
grad_analytic = params.grad.cpu().numpy()
grad_numeric = np.zeros(n)
with torch.no_grad():
df.config.no_grad = True
for i in range(1):
mid = params[0][i].item()
params[0][i] = mid - eps
left = self.loss()
params[0][i] = mid + eps
right = self.loss()
# reset
params[0][i] = mid
# numeric grad
grad_numeric[i] = (right-left)/(2.0*eps)
# report
print("grad_numeric: " + str(grad_numeric))
print("grad_analytic: " + str(grad_analytic))
def train(self, mode='gd'):
# param to train
self.step_count = 0
self.best_loss = math.inf
render_freq = 1
optimizer = None
params = [self.actions]
def closure():
if (optimizer):
optimizer.zero_grad()
# render ever y N steps
render = False
if ((self.step_count % render_freq) == 0):
render = True
with df.ScopedTimer("forward"):
#with torch.autograd.detect_anomaly():
l = self.loss()
with df.ScopedTimer("backward"):
#with torch.autograd.detect_anomaly():
l.backward()
# for e in range(self.env_count):
# print(self.actions.grad[e][0:20])
print(str(self.step_count) + ": " + str(l))
self.step_count += 1
with df.ScopedTimer("save"):
try:
if (render):
self.stage.Save()
except:
print("USD save error")
# save best trajectory
if (l.item() < self.best_loss):
self.save()
self.best_loss = l.item()
return l
with df.ScopedTimer("step"):
if (mode == 'gd'):
# simple Gradient Descent
for i in range(self.train_iters):
closure()
with torch.no_grad():
params[0] -= self.train_rate * params[0].grad
params[0].grad.zero_()
else:
# L-BFGS
if (mode == 'lbfgs'):
optimizer = torch.optim.LBFGS(params, lr=1.0, tolerance_grad=1.e-9, line_search_fn="strong_wolfe")
# Adam
if (mode == 'adam'):
optimizer = torch.optim.Adam(params, lr=self.train_rate)
# SGD
if (mode == 'sgd'):
optimizer = torch.optim.SGD(params, lr=self.train_rate, momentum=0.8, nesterov=True)
# train
for i in range(self.train_iters):
print("Step: " + str(i))
optimizer.step(closure)
# final save
try:
if (render):
self.stage.Save()
except:
print("USD save error")
def save(self):
torch.save(self.actions, "outputs/" + self.name + ".pt")
def load(self):
self.actions = torch.load("outputs/" + self.name + ".pt")
#---------
robot = Robot(depth=1, mode='dflex', render=True, adapter='cpu')
#df.config.no_grad = True
#df.config.check_grad = True
#df.config.verify_fp = True
#robot.load()
robot.run()
#robot.train(mode='lbfgs')
#robot.verify(eps=1.e+1)
| 8,591 | Python | 27.54485 | 165 | 0.505762 |
vstrozzi/FRL-SHAC-Extension/dflex/tests/test_util.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import urdfpy
import math
import numpy as np
import os
import xml.etree.ElementTree as ET
import dflex as df
def urdf_add_collision(builder, link, collisions, shape_ke, shape_kd, shape_kf, shape_mu):
# add geometry
for collision in collisions:
origin = urdfpy.matrix_to_xyz_rpy(collision.origin)
pos = origin[0:3]
rot = df.rpy2quat(*origin[3:6])
geo = collision.geometry
if (geo.box):
builder.add_shape_box(
link,
pos,
rot,
geo.box.size[0]*0.5,
geo.box.size[1]*0.5,
geo.box.size[2]*0.5,
ke=shape_ke,
kd=shape_kd,
kf=shape_kf,
mu=shape_mu)
if (geo.sphere):
builder.add_shape_sphere(
link,
pos,
rot,
geo.sphere.radius,
ke=shape_ke,
kd=shape_kd,
kf=shape_kf,
mu=shape_mu)
if (geo.cylinder):
# cylinders in URDF are aligned with z-axis, while dFlex uses x-axis
r = df.quat_from_axis_angle((0.0, 1.0, 0.0), math.pi*0.5)
builder.add_shape_capsule(
link,
pos,
df.quat_multiply(rot, r),
geo.cylinder.radius,
geo.cylinder.length*0.5,
ke=shape_ke,
kd=shape_kd,
kf=shape_kf,
mu=shape_mu)
if (geo.mesh):
for m in geo.mesh.meshes:
faces = []
vertices = []
for v in m.vertices:
vertices.append(np.array(v))
for f in m.faces:
faces.append(int(f[0]))
faces.append(int(f[1]))
faces.append(int(f[2]))
mesh = df.Mesh(vertices, faces)
builder.add_shape_mesh(
link,
pos,
rot,
mesh,
ke=shape_ke,
kd=shape_kd,
kf=shape_kf,
mu=shape_mu)
def urdf_load(
builder,
filename,
xform,
floating=False,
armature=0.0,
shape_ke=1.e+4,
shape_kd=1.e+3,
shape_kf=1.e+2,
shape_mu=0.25,
limit_ke=100.0,
limit_kd=10.0):
robot = urdfpy.URDF.load(filename)
# maps from link name -> link index
link_index = {}
builder.add_articulation()
# add base
if (floating):
root = builder.add_link(-1, df.transform_identity(), (0,0,0), df.JOINT_FREE)
# set dofs to transform
start = builder.joint_q_start[root]
builder.joint_q[start + 0] = xform[0][0]
builder.joint_q[start + 1] = xform[0][1]
builder.joint_q[start + 2] = xform[0][2]
builder.joint_q[start + 3] = xform[1][0]
builder.joint_q[start + 4] = xform[1][1]
builder.joint_q[start + 5] = xform[1][2]
builder.joint_q[start + 6] = xform[1][3]
else:
root = builder.add_link(-1, xform, (0,0,0), df.JOINT_FIXED)
urdf_add_collision(builder, root, robot.links[0].collisions, shape_ke, shape_kd, shape_kf, shape_mu)
link_index[robot.links[0].name] = root
# add children
for joint in robot.joints:
type = None
axis = (0.0, 0.0, 0.0)
if (joint.joint_type == "revolute" or joint.joint_type == "continuous"):
type = df.JOINT_REVOLUTE
axis = joint.axis
if (joint.joint_type == "prismatic"):
type = df.JOINT_PRISMATIC
axis = joint.axis
if (joint.joint_type == "fixed"):
type = df.JOINT_FIXED
if (joint.joint_type == "floating"):
type = df.JOINT_FREE
parent = -1
if joint.parent in link_index:
parent = link_index[joint.parent]
origin = urdfpy.matrix_to_xyz_rpy(joint.origin)
pos = origin[0:3]
rot = df.rpy2quat(*origin[3:6])
lower = -1.e+3
upper = 1.e+3
damping = 0.0
# limits
if (joint.limit):
if (joint.limit.lower != None):
lower = joint.limit.lower
if (joint.limit.upper != None):
upper = joint.limit.upper
# damping
if (joint.dynamics):
if (joint.dynamics.damping):
damping = joint.dynamics.damping
# add link
link = builder.add_link(
parent=parent,
X_pj=df.transform(pos, rot),
axis=axis,
type=type,
limit_lower=lower,
limit_upper=upper,
limit_ke=limit_ke,
limit_kd=limit_kd,
damping=damping)
# add collisions
urdf_add_collision(builder, link, robot.link_map[joint.child].collisions, shape_ke, shape_kd, shape_kf, shape_mu)
# add ourselves to the index
link_index[joint.child] = link
# build an articulated tree
def build_tree(
builder,
angle,
max_depth,
width=0.05,
length=0.25,
density=1000.0,
joint_stiffness=0.0,
joint_damping=0.0,
shape_ke = 1.e+4,
shape_kd = 1.e+3,
shape_kf = 1.e+2,
shape_mu = 0.5,
floating=False):
def build_recursive(parent, depth):
if (depth >= max_depth):
return
X_pj = df.transform((length * 2.0, 0.0, 0.0), df.quat_from_axis_angle((0.0, 0.0, 1.0), angle))
type = df.JOINT_REVOLUTE
axis = (0.0, 0.0, 1.0)
if (depth == 0 and floating == True):
X_pj = df.transform((0.0, 0.0, 0.0), df.quat_identity())
type = df.JOINT_FREE
link = builder.add_link(
parent,
X_pj,
axis,
type,
stiffness=joint_stiffness,
damping=joint_damping)
# box
# shape = builder.add_shape_box(
# link,
# pos=(length, 0.0, 0.0),
# hx=length,
# hy=width,
# hz=width,
# ke=shape_ke,
# kd=shape_kd,
# kf=shape_kf,
# mu=shape_mu)
# capsule
shape = builder.add_shape_capsule(
link,
pos=(length, 0.0, 0.0),
radius=width,
half_width=length,
ke=shape_ke,
kd=shape_kd,
kf=shape_kf,
mu=shape_mu)
# recurse
#build_tree_recursive(builder, link, angle, width, depth + 1, max_depth, shape_ke, shape_kd, shape_kf, shape_mu, floating)
build_recursive(link, depth + 1)
#
build_recursive(-1, 0)
# SNU file format parser
class MuscleUnit:
def __init__(self):
self.name = ""
self.bones = []
self.points = []
class Skeleton:
def __init__(self, skeleton_file, muscle_file, builder, filter):
self.parse_skeleton(skeleton_file, builder, filter)
self.parse_muscles(muscle_file, builder)
def parse_skeleton(self, filename, builder, filter):
file = ET.parse(filename)
root = file.getroot()
self.node_map = {} # map node names to link indices
self.xform_map = {} # map node names to parent transforms
self.mesh_map = {} # map mesh names to link indices objects
self.coord_start = len(builder.joint_q)
self.dof_start = len(builder.joint_qd)
type_map = {
"Ball": df.JOINT_BALL,
"Revolute": df.JOINT_REVOLUTE,
"Prismatic": df.JOINT_PRISMATIC,
"Free": df.JOINT_FREE,
"Fixed": df.JOINT_FIXED
}
builder.add_articulation()
for child in root:
if (child.tag == "Node"):
body = child.find("Body")
joint = child.find("Joint")
name = child.attrib["name"]
parent = child.attrib["parent"]
parent_X_s = df.transform_identity()
if parent in self.node_map:
parent_link = self.node_map[parent]
parent_X_s = self.xform_map[parent]
else:
parent_link = -1
body_xform = body.find("Transformation")
joint_xform = joint.find("Transformation")
body_mesh = body.attrib["obj"]
body_size = np.fromstring(body.attrib["size"], sep=" ")
body_type = body.attrib["type"]
body_mass = body.attrib["mass"]
body_R_s = np.fromstring(body_xform.attrib["linear"], sep=" ").reshape((3,3))
body_t_s = np.fromstring(body_xform.attrib["translation"], sep=" ")
joint_R_s = np.fromstring(joint_xform.attrib["linear"], sep=" ").reshape((3,3))
joint_t_s = np.fromstring(joint_xform.attrib["translation"], sep=" ")
joint_type = type_map[joint.attrib["type"]]
joint_lower = np.array([-1.e+3])
joint_upper = np.array([1.e+3])
try:
joint_lower = np.fromstring(joint.attrib["lower"], sep=" ")
joint_upper = np.fromstring(joint.attrib["upper"], sep=" ")
except:
pass
if ("axis" in joint.attrib):
joint_axis = np.fromstring(joint.attrib["axis"], sep=" ")
else:
joint_axis = np.array((0.0, 0.0, 0.0))
body_X_s = df.transform(body_t_s, df.quat_from_matrix(body_R_s))
joint_X_s = df.transform(joint_t_s, df.quat_from_matrix(joint_R_s))
mesh_base = os.path.splitext(body_mesh)[0]
mesh_file = mesh_base + ".usd"
#-----------------------------------
# one time conversion, put meshes into local body space (and meter units)
# stage = Usd.Stage.Open("./assets/snu/OBJ/" + mesh_file)
# geom = UsdGeom.Mesh.Get(stage, "/" + mesh_base + "_obj/defaultobject/defaultobject")
# body_X_bs = df.transform_inverse(body_X_s)
# joint_X_bs = df.transform_inverse(joint_X_s)
# points = geom.GetPointsAttr().Get()
# for i in range(len(points)):
# p = df.transform_point(joint_X_bs, points[i]*0.01)
# points[i] = Gf.Vec3f(p.tolist()) # cm -> meters
# geom.GetPointsAttr().Set(points)
# extent = UsdGeom.Boundable.ComputeExtentFromPlugins(geom, 0.0)
# geom.GetExtentAttr().Set(extent)
# stage.Save()
#--------------------------------------
link = -1
if len(filter) == 0 or name in filter:
joint_X_p = df.transform_multiply(df.transform_inverse(parent_X_s), joint_X_s)
body_X_c = df.transform_multiply(df.transform_inverse(joint_X_s), body_X_s)
if (parent_link == -1):
joint_X_p = df.transform_identity()
# add link
link = builder.add_link(
parent=parent_link,
X_pj=joint_X_p,
axis=joint_axis,
type=joint_type,
damping=2.0,
stiffness=10.0,
limit_lower=joint_lower[0],
limit_upper=joint_upper[0])
# add shape
shape = builder.add_shape_box(
body=link,
pos=body_X_c[0],
rot=body_X_c[1],
hx=body_size[0]*0.5,
hy=body_size[1]*0.5,
hz=body_size[2]*0.5,
ke=1.e+3*5.0,
kd=1.e+2*2.0,
kf=1.e+2,
mu=0.5)
# add lookup in name->link map
# save parent transform
self.xform_map[name] = joint_X_s
self.node_map[name] = link
self.mesh_map[mesh_base] = link
def parse_muscles(self, filename, builder):
# list of MuscleUnits
muscles = []
file = ET.parse(filename)
root = file.getroot()
self.muscle_start = len(builder.muscle_activation)
for child in root:
if (child.tag == "Unit"):
unit_name = child.attrib["name"]
unit_f0 = float(child.attrib["f0"])
unit_lm = float(child.attrib["lm"])
unit_lt = float(child.attrib["lt"])
unit_lmax = float(child.attrib["lmax"])
unit_pen = float(child.attrib["pen_angle"])
m = MuscleUnit()
m.name = unit_name
incomplete = False
for waypoint in child.iter("Waypoint"):
way_bone = waypoint.attrib["body"]
way_link = self.node_map[way_bone]
way_loc = np.fromstring(waypoint.attrib["p"], sep=" ", dtype=np.float32)
if (way_link == -1):
incomplete = True
break
# transform loc to joint local space
joint_X_s = self.xform_map[way_bone]
way_loc = df.transform_point(df.transform_inverse(joint_X_s), way_loc)
m.bones.append(way_link)
m.points.append(way_loc)
if not incomplete:
muscles.append(m)
builder.add_muscle(m.bones, m.points, f0=unit_f0, lm=unit_lm, lt=unit_lt, lmax=unit_lmax, pen=unit_pen)
self.muscles = muscles
| 14,815 | Python | 29.802495 | 130 | 0.468512 |
vstrozzi/FRL-SHAC-Extension/dflex/tests/test_contact.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import torch
import time
# include parent path
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import dflex as df
from pxr import Usd, UsdGeom, Gf
class Contact:
sim_duration = 2.0 # seconds
sim_substeps = 16
sim_dt = (1.0 / 60.0) / sim_substeps
sim_steps = int(sim_duration / sim_dt)
sim_time = 0.0
train_iters = 16
train_rate = 0.1 #1.0/(sim_dt*sim_dt)
def __init__(self, adapter='cpu'):
builder = df.sim.ModelBuilder()
builder.add_particle((0.0, 1.5, 0.0), (0.0, 0.0, 0.0), 0.25)
self.target_pos = torch.tensor((3.0, 0.0, 0.0), device=adapter)
self.target_index = 0
self.model = builder.finalize(adapter)
self.model.contact_ke = 1.e+3
self.model.contact_kf = 10.0
self.model.contact_kd = 10.0
self.model.contact_mu = 0.25
self.model.particle_qd.requires_grad = True
#-----------------------
# set up Usd renderer
self.stage = Usd.Stage.CreateNew("outputs/contact.usda")
self.renderer = df.render.UsdRenderer(self.model, self.stage)
self.renderer.draw_points = True
self.renderer.draw_springs = True
self.renderer.draw_shapes = True
self.renderer.add_sphere(self.target_pos.tolist(), 0.1, "target")
self.integrator = df.sim.SemiImplicitIntegrator()
def loss(self):
#-----------------------
# run simulation
self.state = self.model.state()
for i in range(0, self.sim_steps):
self.state = self.integrator.forward(self.model, self.state, self.sim_dt)
if (i % self.sim_substeps) == 0:
self.renderer.update(self.state, self.sim_time)
self.sim_time += self.sim_dt
self.stage.Save()
loss = torch.norm(self.state.particle_q[self.target_index] - self.target_pos)
return loss
def run(self):
l = self.loss()
self.stage.Save()
def train(self, mode='gd'):
# param to train
param = self.model.particle_qd
# Gradient Descent
if (mode == 'gd'):
for i in range(self.train_iters):
l = self.loss()
l.backward()
print("loss: " + str(l.item()))
print("v: " + str(param))
print("vgrad: " + str(param.grad))
print("--------------------")
with torch.no_grad():
param -= self.train_rate * param.grad
param.grad.zero_()
# L-BFGS
if (mode == 'lbfgs'):
optimizer = torch.optim.LBFGS([param], 1.0, tolerance_grad=1.e-5, history_size=4, line_search_fn="strong_wolfe")
def closure():
optimizer.zero_grad()
l = self.loss()
l.backward()
print(l)
return l
for i in range(self.train_iters):
optimizer.step(closure)
# SGD
if (mode == 'sgd'):
optimizer = torch.optim.SGD([param], lr=self.train_rate, momentum=0.8)
for i in range(self.train_iters):
optimizer.zero_grad()
l = self.loss()
l.backward()
print(l)
print(param)
optimizer.step()
self.stage.Save()
#---------
contact = Contact(adapter='cpu')
contact.train('lbfgs')
| 3,942 | Python | 25.112583 | 124 | 0.54693 |
vstrozzi/FRL-SHAC-Extension/dflex/tests/kit_walker.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import math
import torch
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import dflex as df
from pxr import Usd, UsdGeom, Gf
use_omni = False
if use_omni:
import omni.usd
class Experiment:
name = "kit_walker"
network_file = None
record = True
render_time = 0.0
render_enabled = True
def __init__(self):
pass
def reset(self, adapter='cuda'):
self.episode_duration = 5.0 # seconds
self.frame_dt = 1.0/60.0
self.frame_count = int(self.episode_duration/self.frame_dt)
self.sim_substeps = 64
self.sim_dt = self.frame_dt / self.sim_substeps
self.sim_time = 0.0
self.train_max_iters = 10000
self.train_iter = 0
self.train_rate = 0.025
self.train_loss = []
self.train_loss_best = math.inf
self.phase_count = 8
self.phase_step = math.pi / self.phase_count * 2.0
self.phase_freq = 5.0
self.render_time = 0.0
torch.manual_seed(42)
builder = df.sim.ModelBuilder()
self.train_loss = []
self.optimizer = None
#mesh = Usd.Stage.Open("assets/prop.usda")
if use_omni == False:
stage = Usd.Stage.Open("kit_walker.usda")
else:
stage = omni.usd.get_context().get_stage()
# ostrich
# geom = UsdGeom.Mesh(stage.GetPrimAtPath("/ostrich"))
# points = geom.GetPointsAttr().Get()
# builder.add_soft_mesh(pos=(0.0, 0.0, 0.0),
# rot=df.quat_identity(),
# scale=2.0,
# vel=(0.0, 0.0, 0.0),
# vertices=points,
# indices=tet_indices,
# density=1.0,
# k_mu=2000.0,
# k_lambda=2000.0,
# k_damp=1.0)
# bear
geom = UsdGeom.Mesh(stage.GetPrimAtPath("/bear"))
points = geom.GetPointsAttr().Get()
xform = geom.ComputeLocalToWorldTransform(0.0)
for i in range(len(points)):
points[i] = xform.Transform(points[i])
tet_indices = geom.GetPrim().GetAttribute("tetraIndices").Get()
tri_indices = geom.GetFaceVertexIndicesAttr().Get()
tri_counts = geom.GetFaceVertexCountsAttr().Get()
builder.add_soft_mesh(pos=(0.0, 0.0, 0.0),
rot=df.quat_identity(),
scale=2.0,
vel=(0.0, 0.0, 0.0),
vertices=points,
indices=tet_indices,
density=1.0,
k_mu=2000.0,
k_lambda=2000.0,
k_damp=2.0)
# # table
# geom = UsdGeom.Mesh(stage.GetPrimAtPath("/table"))
# points = geom.GetPointsAttr().Get()
# builder.add_soft_mesh(pos=(0.0, 0.0, 0.0),
# rot=df.quat_identity(),
# scale=1.0,
# vel=(0.0, 0.0, 0.0),
# vertices=points,
# indices=tet_indices,
# density=1.0,
# k_mu=1000.0,
# k_lambda=1000.0,
# k_damp=1.0)
#builder.add_soft_grid(pos=(0.0, 0.5, 0.0), rot=(0.0, 0.0, 0.0, 1.0), vel=(0.0, 0.0, 0.0), dim_x=1, dim_y=2, dim_z=1, cell_x=0.5, cell_y=0.5, cell_z=0.5, density=1.0)
# s = 2.0
# builder.add_particle((0.0, 0.5, 0.0), (0.0, 0.0, 0.0), 1.0)
# builder.add_particle((s, 0.5, 0.0), (0.0, 0.0, 0.0), 1.0)
# builder.add_particle((0.0, 0.5, s), (0.0, 0.0, 0.0), 1.0)
# builder.add_particle((0.0, s + 0.5, 0.0), (0.0, 0.0, 0.0), 1.0)
# builder.add_tetrahedron(1, 3, 0, 2)
self.model = builder.finalize(adapter)
#self.model.tet_kl = 1000.0
#self.model.tet_km = 1000.0
#self.model.tet_kd = 1.0
# disable triangle dynamics (just used for rendering)
self.model.tri_ke = 0.0
self.model.tri_ka = 0.0
self.model.tri_kd = 0.0
self.model.tri_kb = 0.0
self.model.contact_ke = 1.e+3*2.0
self.model.contact_kd = 0.1
self.model.contact_kf = 10.0
self.model.contact_mu = 0.7
self.model.particle_radius = 0.05
self.model.ground = True
#self.model.gravity = torch.tensor((0.0, -1.0, 0.0), device=adapter)
# one fully connected layer + tanh activation
self.network = torch.nn.Sequential(torch.nn.Linear(self.phase_count, self.model.tet_count, bias=False), torch.nn.Tanh()).to(adapter)
self.activation_strength = 0.3
self.activation_penalty = 0.0
#-----------------------
# set up Usd renderer
self.stage = stage#Usd.Stage.CreateNew("outputs/fem.usd")
if (self.stage):
self.renderer = df.render.UsdRenderer(self.model, self.stage)
self.renderer.draw_points = True
self.renderer.draw_springs = True
self.renderer.draw_shapes = True
self.render_time = 0.0
self.integrator = df.sim.SemiImplicitIntegrator()
self.state = self.model.state()
if self.network_file:
self.load(self.network_file)
def inference(self):
# build sinusoidal input phases
with df.ScopedTimer("inference", False):
phases = torch.zeros(self.phase_count, device=self.model.adapter)
for p in range(self.phase_count):
phases[p] = math.sin(self.phase_freq*self.sim_time + p * self.phase_step)
# compute activations
self.model.tet_activations = self.network(phases) * self.activation_strength
def simulate(self, no_grad=False):
# set grad mode
df.config.no_grad = no_grad
for i in range(self.sim_substeps):
self.state = self.integrator.forward(self.model, self.state, self.sim_dt)
self.sim_time += self.sim_dt
def render(self):
with df.ScopedTimer("render", False):
if (self.record):
self.render_time += self.frame_dt
if (self.stage):
self.renderer.update(self.state, self.render_time)
def loss(self, render=True):
#-----------------------
# run simulation
self.sim_time = 0.0
self.state = self.model.state()
loss = torch.zeros(1, requires_grad=True, device=self.model.adapter)
for f in range(self.frame_count):
self.inference()
self.simulate()
if (self.render_enabled):
self.render()
# loss
with df.ScopedTimer("loss", False):
com_loss = torch.mean(self.state.particle_qd, 0)
#act_loss = torch.norm(self.model.tet_activations)*self.activation_penalty
loss = loss - com_loss[0] + torch.norm(com_loss[1]) + torch.norm(com_loss[2])# + act_loss
return loss
def run(self, profile=False):
self.inference()
self.simulate(no_grad=True)
if (self.render_enabled):
self.render()
def train(self, mode='gd'):
# create optimizer if requested
if (self.optimizer == None):
# L-BFGS
if (mode == 'lbfgs'):
self.optimizer = torch.optim.LBFGS(self.network.parameters(), lr=1.0, tolerance_grad=1.e-5, tolerance_change=0.01, line_search_fn="strong_wolfe")
# Adam
if (mode == 'adam'):
self.optimizer = torch.optim.Adam(self.network.parameters(), lr=self.train_rate)
# SGD
if (mode == 'sgd'):
self.optimizer = torch.optim.SGD(self.network.parameters(), lr=self.train_rate, momentum=0.5, nesterov=True)
# closure for evaluating loss (called by optimizers)
def closure():
if (self.optimizer):
self.optimizer.zero_grad()
# render every N steps
render = True
with df.ScopedTimer("forward"):
l = self.loss(render)
# save best network so far
if (l < self.train_loss_best):
self.train_loss_best = float(l)
self.save()
self.train_loss.append(float(l))
df.log("Iteration: {} Loss: {}".format(len(self.train_loss), l.item()))
# save USD file
if use_omni == False:
try:
self.stage.Save()
except:
print("Usd save error")
# calculate gradient
with df.ScopedTimer("backward"):
l.backward()
return l
# perform optimization step
with df.ScopedTimer("step"):
if (mode == 'gd'):
# simple Gradient Descent
closure()
with torch.no_grad():
params = self.network.parameters()
for p in params:
if p.grad is None:
continue
p -= self.train_rate * p.grad
p.grad.zero_()
else:
self.optimizer.step(closure)
self.train_iter += 1
def save(self):
torch.save(self.network, self.name + str(self.train_iter) + ".pt")
def load(self, file):
self.network = torch.load(file)
self.network.eval()
df.log("Loaded pretrained network: " + file)
#---------
experiment = Experiment()
if use_omni == False:
experiment.reset(adapter='cuda')
#experiment.load("kit_walker19.pt")
#experiment.train_iter = 19
# with df.ScopedTimer("update", detailed=False):
# for i in range(experiment.frame_count):
# experiment.run()
# experiment.stage.Save()
experiment.render_enabled = False
#with torch.autograd.profiler.profile() as prof:
with df.ScopedTimer("train", detailed=True):
#for i in range(experiment.train_max_iters):
experiment.train('adam')
#print(prof.key_averages().table()) | 11,009 | Python | 29.414365 | 174 | 0.512762 |
vstrozzi/FRL-SHAC-Extension/dflex/tests/test_lift_drag.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import math
import torch
from torch.utils.tensorboard import SummaryWriter
# include parent path
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import dflex as df
from pxr import Usd, UsdGeom, Gf
# uncomment to output timers
df.ScopedTimer.enabled = False
class Cloth:
sim_duration = 2.0 # seconds
sim_substeps = 16
sim_dt = (1.0 / 60.0) / sim_substeps
sim_steps = int(sim_duration / sim_dt)
sim_time = 0.0
render_time = 0.0
train_iters = 4
train_rate = 0.01 / sim_substeps
phase_count = 4
def __init__(self, adapter='cpu'):
torch.manual_seed(42)
height = 2.5
builder = df.sim.ModelBuilder()
builder.add_cloth_grid(pos=(0.0, height, 0.0),
rot=df.quat_from_axis_angle((1.0, 0.5, 0.0), math.pi * 0.5),
vel=(0.0, 0.0, 0.0),
dim_x=16,
dim_y=16,
cell_x=0.125,
cell_y=0.125,
mass=1.0) #, fix_left=True, fix_right=True, fix_top=True, fix_bottom=True)
self.model = builder.finalize(adapter)
self.model.tri_ke = 10000.0
self.model.tri_ka = 10000.0
self.model.tri_kd = 100.0
self.model.tri_lift = 10.0
self.model.tri_drag = 5.0
self.model.contact_ke = 1.e+4
self.model.contact_kd = 1000.0
self.model.contact_kf = 1000.0
self.model.contact_mu = 0.5
self.model.particle_radius = 0.01
self.model.ground = False
self.target = torch.tensor((8.0, 0.0, 0.0), device=adapter)
self.initial_velocity = torch.tensor((1.0, 0.0, 0.0), requires_grad=True, device=adapter)
#-----------------------
# set up Usd renderer
self.stage = Usd.Stage.CreateNew("outputs/drag.usd")
self.renderer = df.render.UsdRenderer(self.model, self.stage)
self.renderer.draw_points = True
self.renderer.draw_springs = True
self.renderer.draw_shapes = True
self.renderer.add_sphere(self.target.tolist(), 0.1, "target")
self.integrator = df.sim.SemiImplicitIntegrator()
def loss(self, render=True):
# reset state
self.sim_time = 0.0
self.state = self.model.state()
self.state.particle_qd = self.state.particle_qd + self.initial_velocity
loss = torch.zeros(1, requires_grad=True, device=self.model.adapter)
# run simulation
for i in range(0, self.sim_steps):
with df.ScopedTimer("simulate", False):
self.state = self.integrator.forward(self.model, self.state, self.sim_dt)
with df.ScopedTimer("render", False):
if (render and (i % self.sim_substeps == 0)):
self.render_time += self.sim_dt * self.sim_substeps
self.renderer.update(self.state, self.render_time)
self.sim_time += self.sim_dt
# compute loss
with df.ScopedTimer("loss", False):
com_pos = torch.mean(self.state.particle_q, 0)
com_vel = torch.mean(self.state.particle_qd, 0)
# use integral of velocity over course of the run
loss = loss + torch.norm(com_pos - self.target)
return loss
def run(self):
l = self.loss()
self.stage.Save()
def train(self, mode='gd'):
writer = SummaryWriter()
writer.add_hparams({"lr": self.train_rate, "mode": mode}, {})
# param to train
self.step_count = 0
self.render_steps = 1
optimizer = None
param = self.initial_velocity
def closure():
if (optimizer):
optimizer.zero_grad()
# render every N steps
render = False
if ((self.step_count % self.render_steps) == 0):
render = True
with df.ScopedTimer("forward"):
l = self.loss(render)
with df.ScopedTimer("backward"):
l.backward()
with df.ScopedTimer("save"):
if (render):
self.stage.Save()
print(str(self.step_count) + ": " + str(l))
writer.add_scalar("loss", l.item(), self.step_count)
writer.flush()
self.step_count += 1
return l
with df.ScopedTimer("step"):
if (mode == 'gd'):
# simple Gradient Descent
for i in range(self.train_iters):
closure()
with torch.no_grad():
param -= self.train_rate * param.grad
param.grad.zero_()
else:
# L-BFGS
if (mode == 'lbfgs'):
optimizer = torch.optim.LBFGS([param], lr=0.1, tolerance_grad=1.e-5, tolerance_change=0.01, line_search_fn="strong_wolfe")
# Adam
if (mode == 'adam'):
optimizer = torch.optim.Adam([param], lr=self.train_rate * 4.0)
# SGD
if (mode == 'sgd'):
optimizer = torch.optim.SGD([param], lr=self.train_rate * (1.0 / 32.0), momentum=0.8)
# train
for i in range(self.train_iters):
optimizer.step(closure)
writer.close()
def save(self, file):
torch.save(self.network, file)
def load(self, file):
self.network = torch.load(file)
self.network.eval()
#---------
cloth = Cloth(adapter='cpu')
cloth.train('lbfgs')
#cloth.run()
| 6,236 | Python | 28.559242 | 156 | 0.533355 |
vstrozzi/FRL-SHAC-Extension/dflex/tests/test_cartpole.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import math
import torch
import os
import sys
# to allow tests to import the module they belong to
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import dflex as df
import numpy as np
np.set_printoptions(precision=5, linewidth=256, suppress=True)
from pxr import Usd, UsdGeom, Gf
import test_util
class Robot:
sim_duration = 4.0 # seconds
sim_substeps = 4
sim_dt = (1.0 / 60.0) / sim_substeps
sim_steps = int(sim_duration / sim_dt)
sim_time = 0.0
train_iters = 128
train_rate = 10.0
ground = True
name = "cartpole"
regularization = 1.e-3
env_count = 16
env_dofs = 2
def __init__(self, depth=1, mode='numpy', render=True, adapter='cpu'):
torch.manual_seed(42)
builder = df.sim.ModelBuilder()
self.adapter = adapter
self.mode = mode
self.render = render
link_width = 0.5
max_depth = depth
# cartpole
for i in range(self.env_count):
test_util.urdf_load(builder, "assets/" + self.name + ".urdf", df.transform((0.0, 2.5, -2.0 + i*2.0), df.quat_from_axis_angle((1.0, 0.0, 0.0), -math.pi*0.5)), floating=False)
builder.joint_q[i*2 + 0] = 0
builder.joint_q[i*2 + 1] = -math.pi*0.5# + i*0.25
self.pole_angle_penalty = 10.0
self.pole_velocity_penalty = 0.5
self.cart_action_penalty = 1.e-7
self.cart_velocity_penalty = 1.0
self.cart_position_penalty = 2.0
if self.name == "cartpole":
self.marker_body = 2
self.marker_offset = 1.0
self.discount_scale = 2.0
self.discount_factor = 0.5
if self.name == "cartpole_double":
self.marker_body = 3
self.marker_offset = 0.5
self.discount_scale = 6.0
self.discount_factor = 0.5
# # humanoid
# test_util.urdf_load(
# builder,
# "assets/humanoid.urdf",
# df.transform((0.0, 1.5, 0.0), df.quat_from_axis_angle((1.0, 0.0, 0.0), -math.pi*0.5)),
# floating=True,
# shape_ke=1.e+3*5.0,
# shape_kd=1.e+3,
# shape_kf=1.e+2,
# shape_mu=0.5)
# # set pd-stiffness
# for i in range(len(builder.joint_target_ke)):
# builder.joint_target_ke[i] = 10.0
# builder.joint_target_kd[i] = 1.0
# finalize model
self.model = builder.finalize(adapter)
self.model.ground = self.ground
self.model.gravity = torch.tensor((0.0, -9.81, 0.0), device=adapter)
#self.model.gravity = torch.tensor((0.0, 0.0, 0.0), device=adapter)
self.model.joint_q.requires_grad_()
self.model.joint_qd.requires_grad_()
self.actions = torch.zeros((self.env_count, self.sim_steps), device=adapter, requires_grad=True)
#self.actions = torch.zeros(1, device=adapter, requires_grad=True)
#-----------------------
# set up Usd renderer
if (self.render):
self.stage = Usd.Stage.CreateNew("outputs/" + self.name + ".usd")
self.renderer = df.render.UsdRenderer(self.model, self.stage)
self.renderer.draw_points = True
self.renderer.draw_springs = True
self.renderer.draw_shapes = True
self.render_time = 0.0
self.integrator = df.sim.SemiImplicitIntegrator()
def set_target(self, x, name):
self.target = torch.tensor(x, device='cpu')
self.renderer.add_sphere(self.target.tolist(), 0.1, name)
def loss(self):
#---------------
# run simulation
self.sim_time = 0.0
# initial state
self.state = self.model.state()
if (self.render):
traj = []
for e in range(self.env_count):
traj.append([])
loss = torch.zeros(1, requires_grad=True, device=self.model.adapter)
for i in range(0, self.sim_steps):
# apply actions
self.state.joint_act[::2] = self.actions[:, i] # assign actions to cart DOF 0, 2, 4, etc
#self.state.joint_act = self.state.joint_q*-50.0 - self.state.joint_qd*1.0
# simulate
with df.ScopedTimer("fd", detailed=False, active=False):
self.state = self.integrator.forward(self.model, self.state, self.sim_dt, update_mass_matrix=(i%1==0))
# render
with df.ScopedTimer("render", False):
if (self.render and (i % self.sim_substeps == 0)):
with torch.no_grad():
# draw end effector tracer
# for e in range(self.env_count):
# X_pole = df.transform_point(df.transform_expand(self.state.body_X_sc[e*3 + self.marker_body].tolist()), (0.0, 0.0, self.marker_offset))
# traj[e].append((X_pole[0], X_pole[1], X_pole[2]))
# # render trajectory
# self.renderer.add_line_strip(traj[e], (1.0, 1.0, 1.0), self.render_time, "traj_" + str(e))
# render scene
self.render_time += self.sim_dt * self.sim_substeps
self.renderer.update(self.state, self.render_time)
self.sim_time += self.sim_dt
# reward
reward_start = 2.0
if self.sim_time > reward_start:
discount_time = (self.sim_time - reward_start)
discount = math.pow(self.discount_factor, discount_time*self.discount_scale)
pole_rot = self.state.joint_q[1::2] # 1,3,5
pole_vel = self.state.joint_qd[1::2] # 1,3,5
cart_pos = self.state.joint_q[0::2] # 0,2,4
cart_vel = self.state.joint_qd[0::2] # 0,2,4
actions = self.actions.view(-1)
loss = loss + (torch.dot(pole_rot, pole_rot)*self.pole_angle_penalty +
torch.dot(pole_vel, pole_vel)*self.pole_velocity_penalty +
torch.dot(cart_pos, cart_pos)*self.cart_position_penalty +
torch.dot(cart_vel, cart_vel)*self.cart_velocity_penalty)*discount
loss = loss + torch.dot(actions, actions)*self.cart_action_penalty
return loss
def run(self):
l = self.loss()
if (self.render):
self.stage.Save()
def verify(self, eps=1.e-4):
params = self.actions
n = 1#len(params)
self.render = False
# evaluate analytic gradient
l = self.loss()
l.backward()
# evaluate numeric gradient
grad_analytic = params.grad.cpu().numpy()
grad_numeric = np.zeros(n)
with torch.no_grad():
df.config.no_grad = True
for i in range(1):
mid = params[0][i].item()
params[0][i] = mid - eps
left = self.loss()
params[0][i] = mid + eps
right = self.loss()
# reset
params[0][i] = mid
# numeric grad
grad_numeric[i] = (right-left)/(2.0*eps)
# report
print("grad_numeric: " + str(grad_numeric))
print("grad_analytic: " + str(grad_analytic))
def train(self, mode='gd'):
# param to train
self.step_count = 0
self.best_loss = math.inf
render_freq = 1
optimizer = None
params = [self.actions]
def closure():
if (optimizer):
optimizer.zero_grad()
# render ever y N steps
render = False
if ((self.step_count % render_freq) == 0):
render = True
with df.ScopedTimer("forward"):
#with torch.autograd.detect_anomaly():
l = self.loss()
with df.ScopedTimer("backward"):
#with torch.autograd.detect_anomaly():
l.backward()
# for e in range(self.env_count):
# print(self.actions.grad[e][0:20])
print(str(self.step_count) + ": " + str(l))
self.step_count += 1
with df.ScopedTimer("save"):
try:
if (render):
self.stage.Save()
except:
print("USD save error")
# save best trajectory
if (l.item() < self.best_loss):
self.save()
self.best_loss = l.item()
return l
with df.ScopedTimer("step"):
if (mode == 'gd'):
# simple Gradient Descent
for i in range(self.train_iters):
closure()
with torch.no_grad():
params[0] -= self.train_rate * params[0].grad
params[0].grad.zero_()
else:
# L-BFGS
if (mode == 'lbfgs'):
optimizer = torch.optim.LBFGS(params, lr=1.0, tolerance_grad=1.e-9, line_search_fn="strong_wolfe")
# Adam
if (mode == 'adam'):
optimizer = torch.optim.Adam(params, lr=self.train_rate)
# SGD
if (mode == 'sgd'):
optimizer = torch.optim.SGD(params, lr=self.train_rate, momentum=0.8, nesterov=True)
# train
for i in range(self.train_iters):
print("Step: " + str(i))
optimizer.step(closure)
# final save
try:
if (render):
self.stage.Save()
except:
print("USD save error")
def save(self):
torch.save(self.actions, "outputs/" + self.name + ".pt")
def load(self):
self.actions = torch.load("outputs/" + self.name + ".pt")
#---------
robot = Robot(depth=1, mode='dflex', render=True, adapter='cuda')
#df.config.no_grad = True
#df.config.check_grad = True
#df.config.verify_fp = True
#robot.load()
#df.config.no_grad = False
#robot.run()
robot.train(mode='lbfgs')
#robot.verify(eps=1.e+1)
| 10,960 | Python | 29.030137 | 185 | 0.506296 |
vstrozzi/FRL-SHAC-Extension/dflex/tests/test_snu.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import math
import torch
import os
import sys
# to allow tests to import the module they belong to
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import dflex as df
import numpy as np
np.set_printoptions(precision=5, linewidth=256, suppress=True)
from pxr import Usd, UsdGeom, Gf
import test_util
class HumanoidSNU:
sim_duration = 1.0 # seconds
sim_substeps = 16
sim_dt = (1.0 / 60.0) / sim_substeps
sim_steps = int(sim_duration / sim_dt)
sim_time = 0.0
train_iters = 128
train_rate = 0.01
ground = False
name = "humanoid_snu_neck"
regularization = 1.e-3
env_count = 16
env_dofs = 2
def __init__(self, depth=1, mode='numpy', render=True, adapter='cpu'):
torch.manual_seed(41)
builder = df.sim.ModelBuilder()
self.adapter = adapter
self.mode = mode
self.render = render
self.filter = {}
if self.name == "humanoid_snu_arm":
self.filter = { "ShoulderR", "ArmR", "ForeArmR", "HandR", "Torso", "Neck" }
self.ground = False
if self.name == "humanoid_snu_neck":
self.filter = { "Torso", "Neck", "Head", "ShoulderR", "ShoulderL"}
self.ground = False
self.node_map, self.xform_map, self.mesh_map = test_util.parse_skeleton("assets/snu/arm.xml", builder, self.filter)
self.muscles = test_util.parse_muscles("assets/snu/muscle284.xml", builder, self.node_map, self.xform_map)
# set initial position 1m off the ground
if self.name == "humanoid_snu":
builder.joint_q[1] = 1.0
# finalize model
self.model = builder.finalize(adapter)
self.model.ground = self.ground
self.model.gravity = torch.tensor((0.0, -9.81, 0.0), dtype=torch.float32, device=adapter)
#self.model.gravity = torch.tensor((0.0, 0.0, 0.0), device=adapter)
self.activations = torch.zeros((1, len(self.muscles)), dtype=torch.float32, device=adapter, requires_grad=True)
#self.activations = torch.rand((1, len(self.muscles)), dtype=torch.float32, device=adapter, requires_grad=True)
self.model.joint_q.requires_grad = True
self.model.joint_qd.requires_grad = True
self.model.muscle_activation.requires_grad = True
self.target_penalty = 1.0
self.velocity_penalty = 0.1
self.action_penalty = 0.0
self.muscle_strength = 40.0
self.discount_scale = 2.0
self.discount_factor = 1.0
#-----------------------
# set up Usd renderer
if (self.render):
self.stage = Usd.Stage.CreateNew("outputs/" + self.name + ".usd")
self.renderer = df.render.UsdRenderer(self.model, self.stage)
self.renderer.draw_points = True
self.renderer.draw_springs = True
self.renderer.draw_shapes = True
self.render_time = 0.0
else:
self.renderer = None
self.set_target((-0.1, 0.1, 0.5), "target")
self.integrator = df.sim.SemiImplicitIntegrator()
def set_target(self, x, name):
self.target = torch.tensor(x, dtype=torch.float32, device=self.adapter)
if (self.renderer):
self.renderer.add_sphere(self.target.tolist(), 0.05, name)
def loss(self):
#---------------
# run simulation
self.sim_time = 0.0
# initial state
self.state = self.model.state()
self.model.collide(self.state)
if (self.render):
traj = []
for e in range(self.env_count):
traj.append([])
loss = torch.zeros(1, requires_grad=True, device=self.model.adapter)
for i in range(0, self.sim_steps):
# apply actions
self.model.muscle_activation = (torch.tanh(4.0*self.activations[0] - 2.0)*0.5 + 0.5)*self.muscle_strength
#self.model.muscle_activation = self.activations[0]*self.muscle_strength
# simulate
with df.ScopedTimer("fd", detailed=False, active=False):
self.state = self.integrator.forward(self.model, self.state, self.sim_dt)
# render
with df.ScopedTimer("render", False):
if (self.render and (i % self.sim_substeps == 0)):
with torch.no_grad():
# draw end effector tracer
# for e in range(self.env_count):
# X_pole = df.transform_point(df.transform_expand(self.state.body_X_sc[e*3 + self.marker_body].tolist()), (0.0, 0.0, self.marker_offset))
# traj[e].append((X_pole[0], X_pole[1], X_pole[2]))
# # render trajectory
# self.renderer.add_line_strip(traj[e], (1.0, 1.0, 1.0), self.render_time, "traj_" + str(e))
for mesh, link in self.mesh_map.items():
if link != -1:
X_sc = df.transform_expand(self.state.body_X_sc[link].tolist())
#self.renderer.add_mesh(mesh, "../assets/snu/OBJ/" + mesh + ".usd", X_sc, 1.0, self.render_time)
self.renderer.add_mesh(mesh, "../assets/snu/OBJ/" + mesh + ".usd", X_sc, 1.0, self.render_time)
for m in range(self.model.muscle_count):
start = self.model.muscle_start[m]
end = self.model.muscle_start[m+1]
points = []
for w in range(start, end):
link = self.model.muscle_links[w]
point = self.model.muscle_points[w].cpu()
X_sc = df.transform_expand(self.state.body_X_sc[link].cpu().tolist())
points.append(Gf.Vec3f(df.transform_point(X_sc, point).tolist()))
self.renderer.add_line_strip(points, name=self.muscles[m].name, radius=0.0075, color=(self.model.muscle_activation[m]/self.muscle_strength, 0.2, 0.5), time=self.render_time)
# render scene
self.render_time += self.sim_dt * self.sim_substeps
self.renderer.update(self.state, self.render_time)
self.sim_time += self.sim_dt
# loss
if self.name == "humanoid_snu_arm":
hand_pos = self.state.body_X_sc[self.node_map["HandR"]][0:3]
discount_time = self.sim_time
discount = math.pow(self.discount_factor, discount_time*self.discount_scale)
# loss = loss + (torch.norm(hand_pos - self.target)*self.target_penalty +
# torch.norm(self.state.joint_qd)*self.velocity_penalty +
# torch.norm(self.model.muscle_activation)*self.action_penalty)*discount
#loss = loss + torch.norm(self.state.joint_qd)
loss = loss + torch.norm(hand_pos - self.target)*self.target_penalty
if self.name == "humanoid_snu_neck":
# rotate a vector
def transform_vector_torch(t, x):
axis = t[3:6]
w = t[6]
return x * (2.0 *w*w - 1.0) + torch.cross(axis, x) * w * 2.0 + axis * torch.dot(axis, x) * 2.0
forward_dir = torch.tensor((0.0, 0.0, 1.0), dtype=torch.float32, device=self.adapter)
up_dir = torch.tensor((0.0, 1.0, 0.0), dtype=torch.float32, device=self.adapter)
target_dir = torch.tensor((1.0, 0.0, 0.1), dtype=torch.float32, device=self.adapter)
head_forward = transform_vector_torch(self.state.body_X_sc[self.node_map["Head"]], forward_dir)
head_up = transform_vector_torch(self.state.body_X_sc[self.node_map["Head"]], up_dir)
loss = loss - torch.dot(head_forward, target_dir)*self.target_penalty - torch.dot(head_up, up_dir)*self.target_penalty
return loss
def run(self):
df.config.no_grad = True
with torch.no_grad():
l = self.loss()
if (self.render):
self.stage.Save()
def verify(self, eps=1.e-4):
params = self.actions
n = 1#len(params)
self.render = False
# evaluate analytic gradient
l = self.loss()
l.backward()
# evaluate numeric gradient
grad_analytic = params.grad.cpu().numpy()
grad_numeric = np.zeros(n)
with torch.no_grad():
df.config.no_grad = True
for i in range(1):
mid = params[0][i].item()
params[0][i] = mid - eps
left = self.loss()
params[0][i] = mid + eps
right = self.loss()
# reset
params[0][i] = mid
# numeric grad
grad_numeric[i] = (right-left)/(2.0*eps)
# report
print("grad_numeric: " + str(grad_numeric))
print("grad_analytic: " + str(grad_analytic))
def train(self, mode='gd'):
# param to train
self.step_count = 0
self.best_loss = math.inf
render_freq = 1
optimizer = None
params = [self.activations]
def closure():
if (optimizer):
optimizer.zero_grad()
# render ever y N steps
render = False
if ((self.step_count % render_freq) == 0):
render = True
with df.ScopedTimer("forward"):
#with torch.autograd.detect_anomaly():
l = self.loss()
with df.ScopedTimer("backward"):
#with torch.autograd.detect_anomaly():
l.backward()
# for e in range(self.env_count):
# print(self.actions.grad[e][0:20])
#print(self.activations.grad)
print(str(self.step_count) + ": " + str(l))
self.step_count += 1
with df.ScopedTimer("save"):
try:
if (render):
self.stage.Save()
except:
print("USD save error")
# save best trajectory
if (l.item() < self.best_loss):
self.save()
self.best_loss = l.item()
return l
with df.ScopedTimer("step"):
if (mode == 'gd'):
# simple Gradient Descent
for i in range(self.train_iters):
closure()
with torch.no_grad():
params[0] -= self.train_rate * params[0].grad
params[0].grad.zero_()
else:
# L-BFGS
if (mode == 'lbfgs'):
optimizer = torch.optim.LBFGS(params, lr=1.0, tolerance_grad=1.e-9)#, line_search_fn="strong_wolfe")
# Adam
if (mode == 'adam'):
optimizer = torch.optim.Adam(params, lr=self.train_rate)
# SGD
if (mode == 'sgd'):
optimizer = torch.optim.SGD(params, lr=self.train_rate, momentum=0.8, nesterov=True)
# train
for i in range(self.train_iters):
print("Step: " + str(i))
optimizer.step(closure)
# final save
try:
if (render):
self.stage.Save()
except:
print("USD save error")
def save(self):
torch.save(self.activations, "outputs/" + self.name + ".pt")
def load(self):
self.activations = torch.load("outputs/" + self.name + ".pt")
#---------
env = HumanoidSNU(depth=1, mode='dflex', render=True, adapter='cpu')
#df.config.no_grad = True
#df.config.check_grad = True
#df.config.verify_fp = True
#robot.load()
#env.run()
#env.load()
env.train(mode='adam')
#robot.verify(eps=1.e+1)
| 12,760 | Python | 31.306329 | 201 | 0.518103 |
vstrozzi/FRL-SHAC-Extension/dflex/tests/test_walker.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import math
import torch
import time
# include parent path
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import dflex as df
from pxr import Usd, UsdGeom, Gf
class Walker:
sim_duration = 5.0 # seconds
sim_substeps = 16
sim_dt = (1.0 / 60.0) / sim_substeps
sim_steps = int(sim_duration / sim_dt)
sim_time = 0.0
render_time = 0.0
train_iters = 50
train_rate = 0.0001
def __init__(self, mode="walker", adapter='cpu'):
self.phase_count = 8
self.phase_step = math.pi / self.phase_count * 2.0
self.phase_freq = 20.0
torch.manual_seed(42)
builder = df.sim.ModelBuilder()
walker = Usd.Stage.Open("assets/walker.usda")
mesh = UsdGeom.Mesh(walker.GetPrimAtPath("/Grid/Grid"))
points = mesh.GetPointsAttr().Get()
indices = mesh.GetFaceVertexIndicesAttr().Get()
for p in points:
builder.add_particle(tuple(p), (0.0, 0.0, 0.0), 1.0)
for t in range(0, len(indices), 3):
i = indices[t + 0]
j = indices[t + 1]
k = indices[t + 2]
builder.add_triangle(i, j, k)
self.model = builder.finalize(adapter)
self.model.tri_ke = 10000.0
self.model.tri_ka = 10000.0
self.model.tri_kd = 100.0
self.model.tri_lift = 0.0
self.model.tri_drag = 0.0
self.edge_ke = 0.0
self.edge_kd = 0.0
self.model.contact_ke = 1.e+4
self.model.contact_kd = 1000.0
self.model.contact_kf = 1000.0
self.model.contact_mu = 0.5
self.model.particle_radius = 0.01
# one fully connected layer + tanh activation
self.network = torch.nn.Sequential(torch.nn.Linear(self.phase_count, self.model.tri_count, bias=False), torch.nn.Tanh()).to(adapter)
self.activation_strength = 0.2
self.activation_penalty = 0.1
#-----------------------
# set up Usd renderer
self.stage = Usd.Stage.CreateNew("outputs/walker.usd")
self.renderer = df.render.UsdRenderer(self.model, self.stage)
self.renderer.draw_points = True
self.renderer.draw_springs = True
self.renderer.draw_shapes = True
self.integrator = df.sim.SemiImplicitIntegrator()
def loss(self, render=True):
#-----------------------
# run simulation
self.sim_time = 0.0
self.state = self.model.state()
loss = torch.zeros(1, requires_grad=True, device=self.model.adapter)
for i in range(0, self.sim_steps):
phases = torch.zeros(self.phase_count, device=self.model.adapter)
# build sinusoidal phase inputs
for p in range(self.phase_count):
phases[p] = math.cos(4.0*self.sim_time*math.pi/(2.0*self.phase_count)*(2.0*p + 1.0)) #self.phase_freq*self.sim_time + p * self.phase_step)
self.model.tri_activations = self.network(phases) * self.activation_strength
self.state = self.integrator.forward(self.model, self.state, self.sim_dt)
self.sim_time += self.sim_dt
if (render and (i % self.sim_substeps == 0)):
self.render_time += self.sim_dt * self.sim_substeps
self.renderer.update(self.state, self.render_time)
com_pos = torch.mean(self.state.particle_q, 0)
com_vel = torch.mean(self.state.particle_qd, 0)
# use integral of velocity over course of the run
loss = loss - com_vel[0] + torch.norm(self.model.tri_activations) * self.activation_penalty
return loss
def run(self):
l = self.loss()
self.stage.Save()
def train(self, mode='gd'):
# param to train
self.step_count = 0
render_freq = 1
optimizer = None
def closure():
optimizer.zero_grad()
# render every N steps
render = False
if ((self.step_count % render_freq) == 0):
render = True
l = self.loss(render)
l.backward()
print(str(self.step_count) + ": " + str(l))
self.step_count += 1
try:
if (render):
self.stage.Save()
except:
print("USD save error")
return l
if (mode == 'gd'):
# simple Gradient Descent
for i in range(self.train_iters):
closure()
with torch.no_grad():
param -= self.train_rate * param.grad
else:
# L-BFGS
if (mode == 'lbfgs'):
optimizer = torch.optim.LBFGS(self.network.parameters(), lr=0.1, tolerance_grad=1.e-5, tolerance_change=0.01, line_search_fn="strong_wolfe")
# Adam
if (mode == 'adam'):
optimizer = torch.optim.Adam(self.network.parameters(), lr=self.train_rate)
# SGD
if (mode == 'sgd'):
optimizer = torch.optim.SGD(self.network.parameters(), lr=self.train_rate, momentum=0.25)
# train
for i in range(self.train_iters):
optimizer.step(closure)
# final save
try:
if (render):
self.stage.Save()
except:
print("USD save error")
def save(self, file):
torch.save(self.network, file)
def load(self, file):
self.network = torch.load(file)
self.network.eval()
#---------
walker = Walker(adapter='cpu')
walker.train('lbfgs')
#walker.run()
| 6,109 | Python | 27.685446 | 158 | 0.560485 |
vstrozzi/FRL-SHAC-Extension/dflex/tests/test_cage.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import torch
import time
# include parent path
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import dflex as df
from pxr import Usd, UsdGeom, Gf
class Cage:
sim_duration = 2.0 # seconds
sim_substeps = 8
sim_dt = (1.0 / 60.0) / sim_substeps
sim_steps = int(sim_duration / sim_dt)
sim_time = 0.0
train_iters = 20
train_rate = 0.1 #1.0/(sim_dt*sim_dt)
def __init__(self, mode="quad", adapter='cpu'):
builder = df.sim.ModelBuilder()
if (mode == "quad"):
# anchors
builder.add_particle((-1.0, 1.0, 0.0), (0.0, 0.0, 0.0), 0.0)
builder.add_particle((1.0, 1.0, 0.0), (0.0, 0.0, 0.0), 0.0)
builder.add_particle((1.0, -1.0, 0.0), (0.0, 0.0, 0.0), 0.0)
builder.add_particle((-1.0, -1.0, 0.0), (0.0, 0.0, 0.0), 0.0)
# ball
builder.add_particle((0.0, 0.0, 0.0), (0.0, 0.0, 0.0), 1.0)
ke = 1.e+2
kd = 10.0
# springs
builder.add_spring(0, 4, ke, kd, 0)
builder.add_spring(1, 4, ke, kd, 0)
builder.add_spring(2, 4, ke, kd, 0)
builder.add_spring(3, 4, ke, kd, 0)
self.target_pos = torch.tensor((0.85, 0.5, 0.0), device=adapter)
self.target_index = 4
if (mode == "box"):
# anchors
builder.add_particle((-1.0, -1.0, -1.0), (0.0, 0.0, 0.0), 0.0)
builder.add_particle((-1.0, -1.0, 1.0), (0.0, 0.0, 0.0), 0.0)
builder.add_particle((-1.0, 1.0, -1.0), (0.0, 0.0, 0.0), 0.0)
builder.add_particle((-1.0, 1.0, 1.0), (0.0, 0.0, 0.0), 0.0)
builder.add_particle((1.0, -1.0, -1.0), (0.0, 0.0, 0.0), 0.0)
builder.add_particle((1.0, -1.0, 1.0), (0.0, 0.0, 0.0), 0.0)
builder.add_particle((1.0, 1.0, -1.0), (0.0, 0.0, 0.0), 0.0)
builder.add_particle((1.0, 1.0, 1.0), (0.0, 0.0, 0.0), 0.0)
# ball
builder.add_particle((0.0, 0.0, 0.0), (0.0, 0.0, 0.0), 1.0)
ke = 1.e+2
kd = 10.0
target = 8
# springs
builder.add_spring(0, target, ke, kd, 0)
builder.add_spring(1, target, ke, kd, 0)
builder.add_spring(2, target, ke, kd, 0)
builder.add_spring(3, target, ke, kd, 0)
builder.add_spring(4, target, ke, kd, 0)
builder.add_spring(5, target, ke, kd, 0)
builder.add_spring(6, target, ke, kd, 0)
builder.add_spring(7, target, ke, kd, 0)
self.target_pos = torch.tensor((0.85, 0.5, -0.75), device=adapter)
self.target_index = target
if (mode == "chain"):
# anchor
builder.add_particle((0.0, 0.0, 0.0), (0.0, 0.0, 0.0), 0.0)
segments = 4
segment_length = 1.0
ke = 1.e+2
kd = 10.0
for i in range(1, segments + 1):
builder.add_particle((segment_length * i, 0.0, 0.0), (0.0, 0.0, 0.0), 1.0)
builder.add_spring(i - 1, i, ke, kd, 0)
# bending spring
if (i > 1):
builder.add_spring(i - 2, i, ke * 4.0, kd, 0)
self.target_pos = torch.tensor((3.0, 0.0, 0.0), device=adapter)
self.target_index = segments
self.model = builder.finalize(adapter)
self.model.particle_radius = 0.05
self.model.ground = False
self.model.gravity = torch.tensor((0.0, 0.0, 0.0), device=adapter)
# set optimization targets
self.model.spring_rest_length.requires_grad_()
#-----------------------
# set up Usd renderer
self.stage = Usd.Stage.CreateNew("outputs/cage.usda")
self.renderer = df.render.UsdRenderer(self.model, self.stage)
self.renderer.draw_points = True
self.renderer.draw_springs = True
self.renderer.draw_shapes = True
self.renderer.add_sphere(self.target_pos.tolist(), 0.1, "target")
self.integrator = df.sim.SemiImplicitIntegrator()
def loss(self):
#-----------------------
# run simulation
self.state = self.model.state()
for i in range(0, self.sim_steps):
self.state = self.integrator.forward(self.model, self.state, self.sim_dt)
# print("state: ", self.state.particle_q[self.target_index])
if (i % self.sim_substeps) == 0:
self.renderer.update(self.state, self.sim_time)
self.sim_time += self.sim_dt
# print(self.state.particle_q[self.target_index])
loss = torch.norm(self.state.particle_q[self.target_index] - self.target_pos)
return loss
def run(self):
l = self.loss()
self.stage.Save()
def train(self, mode='gd'):
# param to train
param = self.model.spring_rest_length
# Gradient Descent
if (mode == 'gd'):
for i in range(self.train_iters):
# with torch.autograd.detect_anomaly():
l = self.loss()
print(l)
l.backward()
with torch.no_grad():
param -= self.train_rate * param.grad
param.grad.zero_()
# L-BFGS
if (mode == 'lbfgs'):
optimizer = torch.optim.LBFGS([param], self.train_rate, tolerance_grad=1.e-5, history_size=4, line_search_fn="strong_wolfe")
def closure():
optimizer.zero_grad()
l = self.loss()
l.backward()
print(l)
return l
for i in range(self.train_iters):
optimizer.step(closure)
# SGD
if (mode == 'sgd'):
optimizer = torch.optim.SGD([param], lr=self.train_rate, momentum=0.8)
for i in range(self.train_iters):
optimizer.zero_grad()
l = self.loss()
l.backward()
print(l)
optimizer.step()
self.stage.Save()
#---------
cage = Cage("box", adapter='cpu')
cage.train('gd')
#cage.run()
| 6,656 | Python | 29.122172 | 136 | 0.510968 |
vstrozzi/FRL-SHAC-Extension/dflex/tests/test_hopper.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import math
import torch
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import dflex as df
import numpy as np
np.set_printoptions(precision=5, linewidth=256, suppress=True)
from pxr import Usd, UsdGeom, Gf
import test_util
class Robot:
frame_dt = 1.0/60.0
episode_duration = 2.0 # seconds
episode_frames = int(episode_duration/frame_dt)
sim_substeps = 16
sim_dt = frame_dt / sim_substeps
sim_steps = int(episode_duration / sim_dt)
sim_time = 0.0
train_iters = 1024
train_rate = 0.001
ground = True
name = "hopper"
regularization = 1.e-3
def __init__(self, depth=1, mode='numpy', render=True, adapter='cpu'):
torch.manual_seed(42)
builder = df.sim.ModelBuilder()
self.adapter = adapter
self.mode = mode
self.render = render
# humanoid
test_util.urdf_load(
builder,
"assets/hopper.urdf",
#df.transform((0.0, 1.35, 0.0), df.quat_from_axis_angle((1.0, 0.0, 0.0), -math.pi*0.5)),
df.transform((0.0, 0.65, 0.0), df.quat_identity()),
floating=True,
shape_ke=1.e+3*2.0,
shape_kd=1.e+2,
shape_kf=1.e+2,
shape_mu=0.5)
# set pd-stiffness
for i in range(len(builder.joint_target_ke)):
builder.joint_target_ke[i] = 10.0
builder.joint_target_kd[i] = 1.0
# finalize model
self.model = builder.finalize(adapter)
self.model.ground = self.ground
self.model.gravity = torch.tensor((0.0, -9.81, 0.0), dtype=torch.float32, device=adapter)
self.model.joint_q.requires_grad_()
self.model.joint_qd.requires_grad_()
self.actions = torch.zeros((self.episode_frames, len(self.model.joint_qd)), dtype=torch.float32, device=adapter, requires_grad=True)
self.action_strength = 20.0
self.action_penalty = 0.01
self.balance_reward = 15.0
self.forward_reward = 1.0
self.discount_scale = 3.0
self.discount_factor = 0.5
#-----------------------
# set up Usd renderer
if (self.render):
self.stage = Usd.Stage.CreateNew("outputs/" + self.name + ".usd")
self.renderer = df.render.UsdRenderer(self.model, self.stage)
self.renderer.draw_points = True
self.renderer.draw_springs = True
self.renderer.draw_shapes = True
self.render_time = 0.0
self.integrator = df.sim.SemiImplicitIntegrator()
def set_target(self, x, name):
self.target = torch.tensor(x, device='cpu')
self.renderer.add_sphere(self.target.tolist(), 0.1, name)
def loss(self, render=True):
#---------------
# run simulation
self.sim_time = 0.0
self.state = self.model.state()
if (self.model.ground):
self.model.collide(self.state)
loss = torch.zeros(1, requires_grad=True, dtype=torch.float32, device=self.model.adapter)
for f in range(0, self.episode_frames):
# df.config.no_grad = True
#df.config.verify_fp = True
# simulate
with df.ScopedTimer("fk-id-dflex", detailed=False, active=False):
for i in range(0, self.sim_substeps):
self.state.joint_act = self.actions[f]
self.state = self.integrator.forward(self.model, self.state, self.sim_dt)
self.sim_time += self.sim_dt
# discount_time = self.sim_time
# discount = math.pow(self.discount_factor, discount_time*self.discount_scale)
loss = loss - self.state.joint_q[1]*self.state.joint_q[1]*self.balance_reward
# render
with df.ScopedTimer("render", False):
if (self.render):
self.render_time += self.frame_dt
self.renderer.update(self.state, self.render_time)
if (self.render):
try:
self.stage.Save()
except:
print("USD save error")
return loss
def run(self):
df.config.no_grad = True
#with torch.no_grad():
l = self.loss()
def verify(self, eps=1.e-4):
frame = 60
params = self.actions[frame]
n = len(params)
# evaluate analytic gradient
l = self.loss(render=False)
l.backward()
# evaluate numeric gradient
grad_analytic = self.actions.grad[frame].numpy()
grad_numeric = np.zeros(n)
with torch.no_grad():
df.config.no_grad = True
for i in range(n):
mid = params[i].item()
params[i] = mid - eps
left = self.loss(render=False)
params[i] = mid + eps
right = self.loss(render=False)
# reset
params[i] = mid
# numeric grad
grad_numeric[i] = (right-left)/(2.0*eps)
# report
print("grad_numeric: " + str(grad_numeric))
print("grad_analytic: " + str(grad_analytic))
def train(self, mode='gd'):
# param to train
self.step_count = 0
self.best_loss = math.inf
render_freq = 1
optimizer = None
params = [self.actions]
def closure():
if (optimizer):
optimizer.zero_grad()
# render every N steps
render = False
if ((self.step_count % render_freq) == 0):
render = True
with df.ScopedTimer("forward"):
#with torch.autograd.detect_anomaly():
l = self.loss(render)
with df.ScopedTimer("backward"):
#with torch.autograd.detect_anomaly():
l.backward()
#print("vel: " + str(params[0]))
#print("grad: " + str(params[0].grad))
#print("--------")
print(str(self.step_count) + ": " + str(l))
self.step_count += 1
# save best trajectory
if (l.item() < self.best_loss):
self.save()
self.best_loss = l.item()
return l
with df.ScopedTimer("step"):
if (mode == 'gd'):
# simple Gradient Descent
for i in range(self.train_iters):
closure()
with torch.no_grad():
params[0] -= self.train_rate * params[0].grad
params[0].grad.zero_()
else:
# L-BFGS
if (mode == 'lbfgs'):
optimizer = torch.optim.LBFGS(params, lr=1.0, tolerance_grad=1.e-9, line_search_fn="strong_wolfe")
# Adam
if (mode == 'adam'):
optimizer = torch.optim.Adam(params, lr=self.train_rate)
# SGD
if (mode == 'sgd'):
optimizer = torch.optim.SGD(params, lr=self.train_rate, momentum=0.8, nesterov=True)
# train
for i in range(self.train_iters):
print("Step: " + str(i))
optimizer.step(closure)
# final save
try:
if (render):
self.stage.Save()
except:
print("USD save error")
def save(self):
torch.save(self.actions, "outputs/" + self.name + ".pt")
def load(self):
self.actions = torch.load("outputs/" + self.name + ".pt")
#---------
robot = Robot(depth=1, mode='dflex', render=True, adapter='cpu')
#df.config.check_grad = True
#df.config.no_grad = True
#robot.run()
#df.config.verify_fp = True
#robot.load()
robot.train(mode='lbfgs')
#robot.verify(eps=1.e-3)
| 8,553 | Python | 26.593548 | 140 | 0.519116 |
vstrozzi/FRL-SHAC-Extension/dflex/tests/rl_swing_up.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import math
import torch
import os
import sys
# to allow tests to import the module they belong to
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import dflex as df
import numpy as np
np.set_printoptions(precision=5, linewidth=256, suppress=True)
from pxr import Usd, UsdGeom, Gf
import test_util
class Robot:
sim_duration = 4.0 # seconds
sim_substeps = 4
sim_dt = (1.0 / 60.0) / sim_substeps
sim_steps = int(sim_duration / sim_dt)
sim_time = 0.0
train_iters = 128
train_rate = 10.0
ground = True
name = "cartpole"
regularization = 1.e-3
def __init__(self, depth=1, mode='numpy', render=True, adapter='cpu'):
torch.manual_seed(42)
builder = df.sim.ModelBuilder()
self.adapter = adapter
self.mode = mode
self.render = render
link_width = 0.5
max_depth = depth
# if (True):
# # create a branched tree
# builder.add_articulation()
# test_util.build_tree(builder, angle=0.0, width=link_width, max_depth=max_depth)
# self.ground = False
# # add weight
# if (False):
# radius = 0.5
# X_pj = df.transform((link_width * 2.0, 0.0, 0.0), df.quat_from_axis_angle( (0.0, 0.0, 1.0), 0.0))
# X_cm = df.transform((radius, 0.0, 0.0), df.quat_identity())
# parent = len(builder.body_mass)-1
# link = builder.add_link(parent, X_pj, (0.0, 0.0, 1.0), df.JOINT_REVOLUTE)
# shape = builder.add_shape_sphere(link, pos=(0.0, 0.0, 0.0), radius=radius)
# builder.joint_q[0] = -math.pi*0.45
# cartpole
test_util.urdf_load(builder, "assets/" + self.name + ".urdf", df.transform((0.0, 2.5, 0.0), df.quat_from_axis_angle((1.0, 0.0, 0.0), -math.pi*0.5)), floating=False)
builder.joint_q[1] = -math.pi
self.pole_angle_penalty = 10.0
self.pole_velocity_penalty = 0.5
self.cart_action_penalty = 1.e-7
self.cart_velocity_penalty = 1.0
self.cart_position_penalty = 2.0
if self.name == "cartpole":
self.marker_body = 2
self.marker_offset = 1.0
self.discount_scale = 2.0
self.discount_factor = 0.5
if self.name == "cartpole_double":
self.marker_body = 3
self.marker_offset = 0.5
self.discount_scale = 6.0
self.discount_factor = 0.5
# # humanoid
# test_util.urdf_load(
# builder,
# "assets/humanoid.urdf",
# df.transform((0.0, 1.5, 0.0), df.quat_from_axis_angle((1.0, 0.0, 0.0), -math.pi*0.5)),
# floating=True,
# shape_ke=1.e+3*5.0,
# shape_kd=1.e+3,
# shape_kf=1.e+2,
# shape_mu=0.5)
# # set pd-stiffness
# for i in range(len(builder.joint_target_ke)):
# builder.joint_target_ke[i] = 10.0
# builder.joint_target_kd[i] = 1.0
#builder.joint_q[0] = -math.pi*0.45
# finalize model
self.model = builder.finalize(adapter)
self.model.ground = self.ground
self.model.gravity = torch.tensor((0.0, -9.81, 0.0), device=adapter)
#self.model.gravity = torch.tensor((0.0, 0.0, 0.0), device=adapter)
self.model.joint_q.requires_grad_()
self.model.joint_qd.requires_grad_()
self.actions = torch.zeros(self.sim_steps, device=adapter, requires_grad=True)
#self.actions = torch.zeros(1, device=adapter, requires_grad=True)
#-----------------------
# set up Usd renderer
if (self.render):
self.stage = Usd.Stage.CreateNew("outputs/" + self.name + ".usd")
self.renderer = df.render.UsdRenderer(self.model, self.stage)
self.renderer.draw_points = True
self.renderer.draw_springs = True
self.renderer.draw_shapes = True
self.render_time = 0.0
self.integrator = df.sim.SemiImplicitIntegrator()
def set_target(self, x, name):
self.target = torch.tensor(x, device='cpu')
self.renderer.add_sphere(self.target.tolist(), 0.1, name)
def loss(self, render=True):
#---------------
# run simulation
self.sim_time = 0.0
self.state = self.model.state()
if (self.model.ground):
self.model.collide(self.state)
loss = torch.zeros(1, requires_grad=True, device=self.model.adapter)
traj = []
for i in range(0, self.sim_steps):
# df.config.no_grad = True
#df.config.verify_fp = True
self.state.joint_act[0] = self.actions[i]
# simulate
with df.ScopedTimer("fk-id-dflex", detailed=False, active=False):
self.state = self.integrator.forward(self.model, self.state, self.sim_dt)
# render
with df.ScopedTimer("render", False):
if (self.render and (i % self.sim_substeps == 0)):
with torch.no_grad():
X_pole = df.transform_point(df.transform_expand(self.state.body_X_sc[self.marker_body].tolist()), (0.0, 0.0, self.marker_offset))
traj.append((X_pole[0], X_pole[1], X_pole[2]))
self.renderer.add_line_strip(traj, (1.0, 1.0, 1.0), self.render_time)
self.render_time += self.sim_dt * self.sim_substeps
self.renderer.update(self.state, self.render_time)
self.sim_time += self.sim_dt
# reward
if self.sim_time > 2.0:
discount_time = (self.sim_time - 2.0)
discount = math.pow(self.discount_factor, discount_time*self.discount_scale)
if self.name == "cartpole":
loss = loss + (torch.pow(self.state.joint_q[1], 2.0)*self.pole_angle_penalty +
torch.pow(self.state.joint_qd[1], 2.0)*self.pole_velocity_penalty +
torch.pow(self.state.joint_q[0], 2.0)*self.cart_position_penalty +
torch.pow(self.state.joint_qd[0], 2.0)*self.cart_velocity_penalty)*discount
if self.name == "cartpole_double":
loss = loss + (torch.pow(self.state.joint_q[1], 2.0)*self.pole_angle_penalty +
torch.pow(self.state.joint_qd[1], 2.0)*self.pole_velocity_penalty +
torch.pow(self.state.joint_q[2], 2.0)*self.pole_angle_penalty +
torch.pow(self.state.joint_qd[2], 2.0)*self.pole_velocity_penalty +
torch.pow(self.state.joint_q[0], 2.0)*self.cart_position_penalty +
torch.pow(self.state.joint_qd[0], 2.0)*self.cart_velocity_penalty)*discount
return loss + torch.dot(self.actions, self.actions)*self.cart_action_penalty
def run(self):
#with torch.no_grad():
l = self.loss()
self.stage.Save()
def verify(self, eps=1.e-4):
params = self.actions
n = len(params)
# evaluate analytic gradient
l = self.loss(render=False)
l.backward()
# evaluate numeric gradient
grad_analytic = params.grad.numpy()
grad_numeric = np.zeros(n)
with torch.no_grad():
df.config.no_grad = True
for i in range(n):
mid = params[i].item()
params[i] = mid - eps
left = self.loss(render=False)
params[i] = mid + eps
right = self.loss(render=False)
# reset
params[i] = mid
# numeric grad
grad_numeric[i] = (right-left)/(2.0*eps)
# report
print("grad_numeric: " + str(grad_numeric))
print("grad_analytic: " + str(grad_analytic))
def train(self, mode='gd'):
# param to train
self.step_count = 0
self.best_loss = math.inf
render_freq = 1
optimizer = None
params = [self.actions]
def closure():
if (optimizer):
optimizer.zero_grad()
# render every N steps
render = False
if ((self.step_count % render_freq) == 0):
render = True
with df.ScopedTimer("forward"):
#with torch.autograd.detect_anomaly():
l = self.loss(render)
with df.ScopedTimer("backward"):
#with torch.autograd.detect_anomaly():
l.backward()
#print("vel: " + str(params[0]))
#print("grad: " + str(params[0].grad))
#print("--------")
print(str(self.step_count) + ": " + str(l))
self.step_count += 1
with df.ScopedTimer("save"):
try:
if (render):
self.stage.Save()
except:
print("USD save error")
# save best trajectory
if (l.item() < self.best_loss):
self.save()
self.best_loss = l.item()
return l
with df.ScopedTimer("step"):
if (mode == 'gd'):
# simple Gradient Descent
for i in range(self.train_iters):
closure()
with torch.no_grad():
params[0] -= self.train_rate * params[0].grad
params[0].grad.zero_()
else:
# L-BFGS
if (mode == 'lbfgs'):
optimizer = torch.optim.LBFGS(params, lr=1.0, tolerance_grad=1.e-9, line_search_fn="strong_wolfe")
# Adam
if (mode == 'adam'):
optimizer = torch.optim.Adam(params, lr=self.train_rate)
# SGD
if (mode == 'sgd'):
optimizer = torch.optim.SGD(params, lr=self.train_rate, momentum=0.8, nesterov=True)
# train
for i in range(self.train_iters):
print("Step: " + str(i))
optimizer.step(closure)
# final save
try:
if (render):
self.stage.Save()
except:
print("USD save error")
def save(self):
torch.save(self.actions, "outputs/" + self.name + ".pt")
def load(self):
self.actions = torch.load("outputs/" + self.name + ".pt")
#---------
robot = Robot(depth=1, mode='dflex', render=True, adapter='cpu')
#df.config.no_grad = True
#df.config.check_grad = True
robot.load()
robot.run()
#robot.train(mode='lbfgs')
#df.config.verify_fp = True
#robot.verify(eps=1.e-2)
| 11,631 | Python | 30.523035 | 172 | 0.509414 |
vstrozzi/FRL-SHAC-Extension/dflex/tests/test_articulation_fk.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import math
import torch
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import dflex as df
import tinyobjloader
import numpy as np
np.set_printoptions(precision=5, linewidth=256, suppress=True)
from pxr import Usd, UsdGeom, Gf
import test_util
class Robot:
sim_duration = 10.0 # seconds
sim_substeps = 4
sim_dt = (1.0 / 60.0) / sim_substeps
sim_steps = int(sim_duration / sim_dt)
sim_time = 0.0
train_iters = 64
train_rate = 0.05 #1.0/(sim_dt*sim_dt)
def __init__(self, adapter='cpu'):
torch.manual_seed(42)
builder = df.sim.ModelBuilder()
x = 0.0
w = 0.5
max_depth = 3
# create a branched tree
builder.add_articulation()
test_util.build_tree(builder, angle=0.0, width=w, max_depth=max_depth)
# add weight
if (True):
radius = 0.1
X_pj = df.transform((w * 2.0, 0.0, 0.0), df.quat_from_axis_angle( (0.0, 0.0, 1.0), 0.0))
X_cm = df.transform((radius, 0.0, 0.0), df.quat_identity())
parent = len(builder.body_mass)-1
link = builder.add_link(parent, X_pj, (0.0, 0.0, 1.0), df.JOINT_REVOLUTE)
shape = builder.add_shape_sphere(link, pos=(0.0, 0.0, 0.0), radius=radius)
self.model = builder.finalize(adapter)
self.model.contact_ke = 1.e+4
self.model.contact_kd = 1000.0
self.model.contact_kf = 100.0
self.model.contact_mu = 0.75
self.model.ground = False
self.model.gravity = torch.tensor((0.0, 0.0, 0.0), device=adapter)
# base state
self.state = self.model.state()
self.state.joint_q.requires_grad_()
# ik target
self.target = torch.tensor((1.0, 2.0, 0.0), device=adapter)
#-----------------------
# set up Usd renderer
self.stage = Usd.Stage.CreateNew("outputs/articulation_fk.usda")
if (self.stage):
self.renderer = df.render.UsdRenderer(self.model, self.stage)
self.renderer.draw_points = True
self.renderer.draw_springs = True
self.renderer.draw_shapes = True
self.render_time = 0.0
self.integrator = df.sim.SemiImplicitIntegrator()
def set_target(self, x, name):
self.target = torch.tensor(x, device='cpu')
self.renderer.add_sphere(self.target.tolist(), 0.1, name)
def loss(self, render=True):
#---------------
# run simulation
self.sim_time = 0.0
if (True):
self.state.body_X_sc, self.state.body_X_sm = df.adjoint.launch(df.eval_rigid_fk,
1,
[ # inputs
self.model.articulation_start,
self.model.joint_type,
self.model.joint_parent,
self.model.joint_q_start,
self.model.joint_qd_start,
self.state.joint_q,
self.model.joint_X_pj,
self.model.joint_X_cm,
self.model.joint_axis
],
[ # outputs
self.state.body_X_sc,
self.state.body_X_sm
],
adapter='cpu',
preserve_output=True)
p = self.state.body_X_sm[3][0:3]
err = torch.norm(p - self.target)
# try:
# art_start = self.art.articulation_start.clone()
# art_end = self.art.articulation_end.clone()
# joint_type = self.art.joint_type.clone()
# joint_parent = self.art.joint_parent.clone()
# joint_q_start = self.art.joint_q_start.clone()
# joint_qd_start = self.art.joint_qd_start.clone()
# joint_q = self.art.joint_q.clone()
# joint_X_pj = self.art.joint_X_pj.clone()
# joint_X_cm = self.art.joint_X_cm.clone()
# joint_axis = self.art.joint_axis.clone()
# torch.autograd.gradcheck(df.EvalRigidFowardKinematicsFunc.apply, (
# art_start,
# art_end,
# joint_type,
# joint_parent,
# joint_q_start,
# joint_qd_start,
# joint_q,
# joint_X_pj,
# joint_X_cm,
# joint_axis,
# 'cpu'), eps=1e-3, atol=1e-3, raise_exception=True)
# except Exception as e:
# print("failed: " + str(e))
# render
with df.ScopedTimer("render", False):
if (self.stage):
self.render_time += self.sim_dt * self.sim_substeps
self.renderer.update(self.state, self.render_time)
#self.stage.Save()
self.sim_time += self.sim_dt
return err
def run(self):
#with torch.no_grad():
l = self.loss()
if (self.stage):
self.stage.Save()
def train(self, mode='gd'):
# param to train
self.step_count = 0
render_freq = 1
optimizer = None
params = [self.state.joint_q]
def closure():
if (optimizer):
optimizer.zero_grad()
# render every N steps
render = False
if ((self.step_count % render_freq) == 0):
render = True
with df.ScopedTimer("forward"):
#with torch.autograd.detect_anomaly():
l = self.loss(render)
with df.ScopedTimer("backward"):
#with torch.autograd.detect_anomaly():
l.backward()
print("vel: " + str(params[0]))
print("grad: " + str(params[0].grad))
print("--------")
print(str(self.step_count) + ": " + str(l))
self.step_count += 1
with df.ScopedTimer("save"):
try:
if (render):
self.stage.Save()
except:
print("USD save error")
return l
with df.ScopedTimer("step"):
if (mode == 'gd'):
# simple Gradient Descent
for i in range(self.train_iters):
closure()
with torch.no_grad():
params[0] -= self.train_rate * params[0].grad
params[0].grad.zero_()
else:
# L-BFGS
if (mode == 'lbfgs'):
optimizer = torch.optim.LBFGS(params, lr=0.2, tolerance_grad=1.e-5, tolerance_change=0.01, line_search_fn="strong_wolfe")
# Adam
if (mode == 'adam'):
optimizer = torch.optim.Adam(params, lr=self.train_rate)
# SGD
if (mode == 'sgd'):
optimizer = torch.optim.SGD(params, lr=self.train_rate, momentum=0.8)
# train
for i in range(self.train_iters):
optimizer.step(closure)
# final save
try:
if (render):
self.stage.Save()
except:
print("USD save error")
def save(self, file):
torch.save(self.network, file)
def load(self, file):
self.network = torch.load(file)
self.network.eval()
#---------
robot = Robot(adapter='cpu')
#robot.run()
mode = 'lbfgs'
robot.set_target((1.0, 2.0, 0.0), "target_1")
robot.train(mode)
robot.set_target((1.0, -2.0, 0.0), "target_2")
robot.train(mode)
robot.set_target((-1.0, -2.0, 0.0), "target_3")
robot.train(mode)
robot.set_target((-2.0, 2.0, 0.0), "target_4")
robot.train(mode)
#rigid.stage.Save()
| 8,503 | Python | 28.425605 | 141 | 0.49359 |
vstrozzi/FRL-SHAC-Extension/dflex/tests/test_fem.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import math
import torch
import cProfile
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import dflex as df
from pxr import Usd, UsdGeom, Gf
class FEM:
sim_duration = 5.0 # seconds
sim_substeps = 32
sim_dt = (1.0 / 60.0) / sim_substeps
sim_steps = int(sim_duration / sim_dt)
sim_time = 0.0
train_iters = 16
train_rate = 0.01 #1.0/(sim_dt*sim_dt)
phase_count = 8
phase_step = math.pi / phase_count * 2.0
phase_freq = 2.5
def __init__(self, adapter='cpu'):
torch.manual_seed(42)
builder = df.sim.ModelBuilder()
mesh = Usd.Stage.Open("assets/prop.usda")
geom = UsdGeom.Mesh(mesh.GetPrimAtPath("/mesh"))
points = geom.GetPointsAttr().Get()
tet_indices = geom.GetPrim().GetAttribute("tetraIndices").Get()
tri_indices = geom.GetFaceVertexIndicesAttr().Get()
tri_counts = geom.GetFaceVertexCountsAttr().Get()
r = df.quat_multiply(df.quat_from_axis_angle((0.0, 0.0, 1.0), math.pi * 0.0), df.quat_from_axis_angle((1.0, 0.0, 0.0), math.pi * 0.0))
builder.add_soft_mesh(pos=(0.0, 2.0, 0.0),
rot=r,
scale=1.0,
vel=(1.5, 0.0, 0.0),
vertices=points,
indices=tet_indices,
density=1.0,
k_mu=1000.0,
k_lambda=1000.0,
k_damp=1.0)
#builder.add_soft_grid(pos=(0.0, 0.5, 0.0), rot=(0.0, 0.0, 0.0, 1.0), vel=(0.0, 0.0, 0.0), dim_x=1, dim_y=2, dim_z=1, cell_x=0.5, cell_y=0.5, cell_z=0.5, density=1.0)
# s = 2.0
# builder.add_particle((0.0, 0.5, 0.0), (0.0, 0.0, 0.0), 1.0)
# builder.add_particle((s, 0.5, 0.0), (0.0, 0.0, 0.0), 1.0)
# builder.add_particle((0.0, 0.5, s), (0.0, 0.0, 0.0), 1.0)
# builder.add_particle((0.0, s + 0.5, 0.0), (0.0, 0.0, 0.0), 1.0)
# builder.add_tetrahedron(1, 3, 0, 2)
self.model = builder.finalize(adapter)
#self.model.tet_kl = 1000.0
#self.model.tet_km = 1000.0
#self.model.tet_kd = 1.0
# disable triangle dynamics (just used for rendering)
self.model.tri_ke = 0.0
self.model.tri_ka = 0.0
self.model.tri_kd = 0.0
self.model.tri_kb = 0.0
self.model.contact_ke = 1.e+4
self.model.contact_kd = 1.0
self.model.contact_kf = 10.0
self.model.contact_mu = 0.5
self.model.particle_radius = 0.05
self.model.ground = True
# one fully connected layer + tanh activation
self.network = torch.nn.Sequential(torch.nn.Linear(self.phase_count, self.model.tet_count, bias=False), torch.nn.Tanh()).to(adapter)
self.activation_strength = 0.3
self.activation_penalty = 0.0
#-----------------------
# set up Usd renderer
self.stage = Usd.Stage.CreateNew("outputs/fem.usd")
if (self.stage):
self.renderer = df.render.UsdRenderer(self.model, self.stage)
self.renderer.draw_points = True
self.renderer.draw_springs = True
self.renderer.draw_shapes = True
self.render_time = 0.0
self.integrator = df.sim.SemiImplicitIntegrator()
def loss(self, render=True):
#-----------------------
# run simulation
self.sim_time = 0.0
self.state = self.model.state()
loss = torch.zeros(1, requires_grad=True, device=self.model.adapter)
for i in range(0, self.sim_steps):
# build sinusoidal input phases
with df.ScopedTimer("inference", False):
phases = torch.zeros(self.phase_count, device=self.model.adapter)
for p in range(self.phase_count):
phases[p] = math.sin(self.phase_freq * self.sim_time + p * self.phase_step)
# compute activations (rest angles)
self.model.tet_activations = self.network(phases) * self.activation_strength
# forward dynamics
with df.ScopedTimer("simulate", False):
self.state = self.integrator.forward(self.model, self.state, self.sim_dt)
self.sim_time += self.sim_dt
# render
with df.ScopedTimer("render", False):
if (self.stage and render and (i % self.sim_substeps == 0)):
self.render_time += self.sim_dt * self.sim_substeps
self.renderer.update(self.state, self.render_time)
# loss
with df.ScopedTimer("loss", False):
com_loss = torch.mean(self.state.particle_qd, 0)
#act_loss = torch.norm(selfactivation)*self.activation_penalty
loss = loss - com_loss[0] # - act_loss
return loss
def run(self, profile=False, render=True):
df.config.no_grad = True
with torch.no_grad():
with df.ScopedTimer("run"):
if profile:
cp = cProfile.Profile()
cp.clear()
cp.enable()
# run forward dynamics
if profile:
self.state = self.model.state()
for i in range(0, self.sim_steps):
self.state = self.integrator.forward(self.model, self.state, self.sim_dt)
self.sim_time += self.sim_dt
else:
l = self.loss(render)
if profile:
cp.disable()
cp.print_stats(sort='tottime')
if (self.stage):
self.stage.Save()
def train(self, mode='gd'):
# param to train
self.step_count = 0
render_freq = 1
optimizer = None
def closure():
optimizer.zero_grad()
# render every N steps
render = False
if ((self.step_count % render_freq) == 0):
render = True
with df.ScopedTimer("forward"):
#with torch.autograd.detect_anomaly():
l = self.loss(render)
with df.ScopedTimer("backward"):
#with torch.autograd.detect_anomaly():
l.backward()
print(str(self.step_count) + ": " + str(l))
self.step_count += 1
with df.ScopedTimer("save"):
try:
if (render):
self.stage.Save()
except:
print("USD save error")
return l
with df.ScopedTimer("step"):
if (mode == 'gd'):
# simple Gradient Descent
for i in range(self.train_iters):
closure()
with torch.no_grad():
param -= self.train_rate * param.grad
else:
# L-BFGS
if (mode == 'lbfgs'):
optimizer = torch.optim.LBFGS(self.network.parameters(), lr=1.0, tolerance_grad=1.e-5, tolerance_change=0.01, line_search_fn="strong_wolfe")
# Adam
if (mode == 'adam'):
optimizer = torch.optim.Adam(self.network.parameters(), lr=self.train_rate)
# SGD
if (mode == 'sgd'):
optimizer = torch.optim.SGD(self.network.parameters(), lr=self.train_rate, momentum=0.5)
# train
for i in range(self.train_iters):
optimizer.step(closure)
# final save
try:
if (render):
self.stage.Save()
except:
print("USD save error")
def save(self, file):
torch.save(self.network, file)
def load(self, file):
self.network = torch.load(file)
self.network.eval()
#---------
fem = FEM(adapter='cuda')
fem.run(profile=False, render=True)
#fem.train('lbfgs')
#fem.train('sgd')
| 8,649 | Python | 30.918819 | 174 | 0.513239 |
vstrozzi/FRL-SHAC-Extension/dflex/tests/test_chain.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import torch
import time
# include parent path
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import dflex as df
from pxr import Usd, UsdGeom, Gf
class Chain:
sim_duration = 10.0 # seconds
sim_substeps = 2
sim_dt = (1.0 / 60.0) / sim_substeps
sim_steps = int(sim_duration / sim_dt)
sim_time = 0.0
train_iters = 20
train_rate = 0.1 #1.0/(sim_dt*sim_dt)
def __init__(self, adapter='cpu'):
builder = df.sim.ModelBuilder()
# anchor
builder.add_particle((0.0, 1.0, 0.0), (0.0, 0.0, 0.0), 0.0)
for i in range(1, 10):
builder.add_particle((i, 1.0, 0.0), (0.0, 0., 0.0), 1.0)
builder.add_spring(i - 1, i, 1.e+6, 0.0, 0)
self.model = builder.finalize(adapter)
self.model.ground = False
self.impulse = torch.tensor((0.0, 0.0, 0.0), requires_grad=True, device=adapter)
#-----------------------
# set up Usd renderer
self.stage = Usd.Stage.CreateNew("outputs/chain.usda")
self.renderer = df.render.UsdRenderer(self.model, self.stage)
self.renderer.draw_points = True
self.renderer.draw_springs = True
self.renderer.draw_shapes = True
#self.integrator = df.sim.SemiImplicitIntegrator()
self.integrator = df.sim.XPBDIntegrator()
def loss(self):
#-----------------------
# run simulation
self.state = self.model.state()
self.state.particle_qd[1] = self.impulse
for i in range(0, self.sim_steps):
self.state = self.integrator.forward(self.model, self.state, self.sim_dt)
if (i % self.sim_substeps) == 0:
self.renderer.update(self.state, self.sim_time)
self.sim_time += self.sim_dt
target = torch.tensor((0.0, 2.0, 0.0), device=self.model.adapter)
loss = torch.norm(self.state.particle_q[1] - target)
return loss
def run(self):
l = self.loss()
self.stage.Save()
def train(self, mode='gd'):
# param to train
param = self.impulse
# Gradient Descent
if (mode == 'gd'):
for i in range(self.train_iters):
l = self.loss()
l.backward()
print(l)
with torch.no_grad():
param -= self.train_rate * param.grad
param.grad.zero_()
# L-BFGS
if (mode == 'lbfgs'):
optimizer = torch.optim.LBFGS([param], self.train_rate, tolerance_grad=1.e-5, history_size=4, line_search_fn="strong_wolfe")
def closure():
optimizer.zero_grad()
l = self.loss()
l.backward()
print(l)
return l
for i in range(self.train_iters):
optimizer.step(closure)
# SGD
if (mode == 'sgd'):
optimizer = torch.optim.SGD([param], lr=self.train_rate, momentum=0.8)
for i in range(self.train_iters):
optimizer.zero_grad()
l = self.loss()
l.backward()
print(l)
optimizer.step()
self.stage.Save()
#---------
chain = Chain(adapter='cpu')
#cloth.train('lbfgs')
chain.run()
| 3,794 | Python | 24.993151 | 136 | 0.552188 |
vstrozzi/FRL-SHAC-Extension/dflex/tests/test_cloth_collisions.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import math
import torch
import time
# include parent path
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import dflex as df
from pxr import Usd, UsdGeom, Gf
# uncomment to output timers
df.ScopedTimer.enabled = True
class Cloth:
sim_duration = 10.0 # seconds
sim_substeps = 16
sim_dt = (1.0 / 60.0) / sim_substeps # 60 frames per second, 16 updates between frames,
# sim_steps = int(sim_duration/sim_dt)
sim_steps = int(sim_duration / sim_dt)
sim_time = 0.0
render_time = 0.0
train_iters = 100
train_rate = 0.01
phase_count = 4
def __init__(self, dim=20, mode="cloth", adapter='cpu'):
torch.manual_seed(42)
height = 4.0
builder = df.sim.ModelBuilder()
# builder.add_particle(pos=(2.5, 3.0, 2.5), vel=(0.0, 0.0, 0.0), mass=1.0)
# builder.add_particle(pos=(2.5, 4.0, 2.5), vel=(0.0, 0.0, 0.0), mass=1.0)
# builder.add_particle(pos=(2.5, 5.0, 2.5), vel=(0.0, 0.0, 0.0), mass=1.0)
builder.add_cloth_grid(pos=(0.0, height, 0.0),
rot=df.quat_from_axis_angle((1.0, 0.0, 0.0), math.pi / 2),
vel=(0, 5.0, 0.0),
dim_x=dim,
dim_y=dim,
cell_x=0.2,
cell_y=0.2,
mass=400 / (dim**2)) #, fix_left=True, fix_right=True, fix_top=True, fix_bottom=True)
usd = Usd.Stage.Open("assets/box.usda")
geom = UsdGeom.Mesh(usd.GetPrimAtPath("/Cube/Cube"))
points = geom.GetPointsAttr().Get()
indices = geom.GetFaceVertexIndicesAttr().Get()
counts = geom.GetFaceVertexCountsAttr().Get()
mesh = df.sim.Mesh(points, indices)
rigid = builder.add_rigid_body(pos=(2.5, 3.0, 2.5),
rot=df.quat_from_axis_angle((0.0, 0.0, 1.0), math.pi * 0.0),
vel=(0.0, 0.0, 0.0),
omega=(0.0, 0.0, 0.0))
shape = builder.add_shape_mesh(rigid, mesh=mesh, scale=(0.5, 0.5, 0.5), density=100.0, ke=1.e+5, kd=1000.0, kf=1000.0, mu=0.5)
# rigid = builder.add_rigid_body(pos=(2.5, 5.0, 2.5), rot=df.quat_from_axis_angle((0.0, 0.0, 1.0), math.pi*0.0), vel=(0.0, 0.0, 0.0), omega=(0.0, 0.0, 0.0))
# shape = builder.add_shape_mesh(rigid, mesh=mesh, scale=(0.5, 0.5, 0.5), density=100.0, ke=1.e+5, kd=1000.0, kf=1000.0, mu=0.5)
# attach0 = 1
# attach1 = 21
# attach2 = 423
# attach3 = 443
# anchor0 = builder.add_particle(pos=builder.particle_x[attach0]-(1.0, 0.0, 0.0), vel=(0.0, 0.0, 0.0), mass=0.0)
# anchor1 = builder.add_particle(pos=builder.particle_x[attach1]+(1.0, 0.0, 0.0), vel=(0.0, 0.0, 0.0), mass=0.0)
# anchor2 = builder.add_particle(pos=builder.particle_x[attach2]-(1.0, 0.0, 0.0), vel=(0.0, 0.0, 0.0), mass=0.0)
# anchor3 = builder.add_particle(pos=builder.particle_x[attach3]+(1.0, 0.0, 0.0), vel=(0.0, 0.0, 0.0), mass=0.0)
# builder.add_spring(anchor0, attach0, 500.0, 1000.0, 0)
# builder.add_spring(anchor1, attach1, 10000.0, 1000.0, 0)
# builder.add_spring(anchor2, attach2, 10000.0, 1000.0, 0)
# builder.add_spring(anchor3, attach3, 10000.0, 1000.0, 0)
self.model = builder.finalize(adapter)
# self.model.tri_ke = 10000.0
# self.model.tri_ka = 10000.0
# self.model.tri_kd = 100.0
self.model.tri_ke = 5000.0
self.model.tri_ka = 5000.0
self.model.tri_kd = 100.0
self.model.tri_lift = 50.0
self.model.tri_drag = 0.0
self.model.contact_ke = 1.e+4
self.model.contact_kd = 1000.0
self.model.contact_kf = 1000.0
self.model.contact_mu = 0.5
self.model.particle_radius = 0.1
self.model.ground = True
self.model.tri_collisions = True
#-----------------------
# set up Usd renderer
self.stage = Usd.Stage.CreateNew("outputs/cloth_collision.usd")
self.renderer = df.render.UsdRenderer(self.model, self.stage)
self.renderer.draw_points = True
self.renderer.draw_springs = True
self.renderer.draw_shapes = True
self.integrator = df.sim.SemiImplicitIntegrator()
def loss(self, render=True):
# reset state
self.sim_time = 0.0
self.state = self.model.state()
self.model.collide(self.state)
loss = torch.zeros(1, requires_grad=True, device=self.model.adapter)
with df.ScopedTimer("forward", False):
# run simulation
for i in range(0, self.sim_steps):
with df.ScopedTimer("simulate", False):
self.state = self.integrator.forward(self.model, self.state, self.sim_dt)
with df.ScopedTimer("render", False):
if (render and (i % self.sim_substeps == 0)):
self.render_time += self.sim_dt * self.sim_substeps
self.renderer.update(self.state, self.render_time)
if (self.state.particle_q != self.state.particle_q).sum() != 0:
print("NaN found in state")
import pdb
pdb.set_trace()
self.sim_time += self.sim_dt
# compute loss
with df.ScopedTimer("loss", False):
com_pos = torch.mean(self.state.particle_q, 0)
com_vel = torch.mean(self.state.particle_qd, 0)
# use integral of velocity over course of the run
loss = loss - com_pos[1]
return loss
def run(self):
l = self.loss()
self.stage.Save()
def train(self, mode='gd'):
# param to train
self.step_count = 0
self.render_steps = 1
optimizer = None
def closure():
# render every N steps
render = False
if ((self.step_count % self.render_steps) == 0):
render = True
# with torch.autograd.detect_anomaly():
with df.ScopedTimer("forward", False):
l = self.loss(render)
with df.ScopedTimer("backward", False):
l.backward()
with df.ScopedTimer("save", False):
if (render):
self.stage.Save()
print(str(self.step_count) + ": " + str(l))
self.step_count += 1
return l
with df.ScopedTimer("step"):
if (mode == 'gd'):
param = self.model.spring_rest_length
# simple Gradient Descent
for i in range(self.train_iters):
closure()
with torch.no_grad():
param -= self.train_rate * param.grad
param.grad.data.zero_()
else:
# L-BFGS
if (mode == 'lbfgs'):
optimizer = torch.optim.LBFGS([self.model.spring_rest_length],
lr=0.01,
tolerance_grad=1.e-5,
tolerance_change=0.01,
line_search_fn="strong_wolfe")
# Adam
if (mode == 'adam'):
optimizer = torch.optim.Adam([self.model.spring_rest_length], lr=self.train_rate * 4.0)
# SGD
if (mode == 'sgd'):
optimizer = torch.optim.SGD([self.model.spring_rest_length], lr=self.train_rate * 0.01, momentum=0.8)
# train
for i in range(self.train_iters):
optimizer.zero_grad()
optimizer.step(closure)
def save(self, file):
torch.save(self.network, file)
def load(self, file):
self.network = torch.load(file)
self.network.eval()
#---------
cloth = Cloth(adapter='cuda')
cloth.run()
# cloth.train('adam')
# for dim in range(20, 400, 20):
# cloth = Cloth(dim=dim)
# cloth.run()
| 8,834 | Python | 34.199203 | 164 | 0.514716 |
vstrozzi/FRL-SHAC-Extension/dflex/tests/test_cloth_sphere_collisions.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import math
import torch
import time
# include parent path
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import dflex as df
from pxr import Usd, UsdGeom, Gf
# uncomment to output timers
df.ScopedTimer.enabled = True
class Cloth:
sim_duration = 10.0 # seconds
sim_substeps = 16
sim_dt = (1.0 / 60.0) / sim_substeps # 60 frames per second, 16 updates between frames,
# sim_steps = int(sim_duration/sim_dt)
sim_steps = int(sim_duration / sim_dt)
sim_time = 0.0
render_time = 0.0
train_iters = 100
train_rate = 0.01
phase_count = 4
def __init__(self, dim=20, mode="cloth", adapter='cpu'):
torch.manual_seed(42)
height = 4.0
builder = df.sim.ModelBuilder()
builder.add_cloth_grid(pos=(0.0, height, 0.0),
rot=df.quat_from_axis_angle((1.0, 0.0, 0.0), math.pi / 2),
vel=(0, 5.0, 0.0),
dim_x=dim,
dim_y=dim,
cell_x=0.2,
cell_y=0.2,
mass=400 / (dim**2)) #, fix_left=True, fix_right=True, fix_top=True, fix_bottom=True)
usd = Usd.Stage.Open("assets/sphere.usda")
geom = UsdGeom.Mesh(usd.GetPrimAtPath("/Sphere/Sphere"))
points = geom.GetPointsAttr().Get()
indices = geom.GetFaceVertexIndicesAttr().Get()
counts = geom.GetFaceVertexCountsAttr().Get()
mesh = df.sim.Mesh(points, indices)
rigid = builder.add_rigid_body(pos=(2.5, 1.0, 2.5),
rot=df.quat_from_axis_angle((0.0, 0.0, 1.0), math.pi * 0.0),
vel=(0.0, 0.0, 0.0),
omega=(0.0, 0.0, 0.0))
shape = builder.add_shape_mesh(rigid, mesh=mesh, scale=(1 / 100, 1 / 100, 1 / 100), density=0.0, ke=1e3, kd=1e3, kf=1e3, mu=1.0)
self.model = builder.finalize(adapter)
# self.model.tri_ke = 10000.0
# self.model.tri_ka = 10000.0
# self.model.tri_kd = 100.0
self.model.tri_ke = 5000.0
self.model.tri_ka = 5000.0
self.model.tri_kd = 100.0
self.model.tri_lift = 50.0
self.model.tri_drag = 0.0
self.model.contact_ke = 1.e+4
self.model.contact_kd = 1000.0
self.model.contact_kf = 1000.0
self.model.contact_mu = 0.5
self.model.particle_radius = 0.1
self.model.ground = False
self.model.tri_collisions = True
#-----------------------
# set up Usd renderer
self.stage = Usd.Stage.CreateNew("outputs/cloth_sphere.usd")
self.renderer = df.render.UsdRenderer(self.model, self.stage)
self.renderer.draw_points = True
self.renderer.draw_springs = True
self.renderer.draw_shapes = True
self.integrator = df.sim.SemiImplicitIntegrator()
def loss(self, render=True):
# reset state
self.sim_time = 0.0
self.state = self.model.state()
self.model.collide(self.state)
loss = torch.zeros(1, requires_grad=True, device=self.model.adapter)
with df.ScopedTimer("forward", False):
# run simulation
for i in range(0, self.sim_steps):
with df.ScopedTimer("simulate", False):
self.state = self.integrator.forward(self.model, self.state, self.sim_dt)
with df.ScopedTimer("render", False):
if (render and (i % self.sim_substeps == 0)):
self.render_time += self.sim_dt * self.sim_substeps
self.renderer.update(self.state, self.render_time)
if (self.state.particle_q != self.state.particle_q).sum() != 0:
print("NaN found in state")
import pdb
pdb.set_trace()
self.sim_time += self.sim_dt
# compute loss
with df.ScopedTimer("loss", False):
com_pos = torch.mean(self.state.particle_q, 0)
com_vel = torch.mean(self.state.particle_qd, 0)
# use integral of velocity over course of the run
loss = loss - com_pos[1]
return loss
def run(self):
l = self.loss()
self.stage.Save()
def train(self, mode='gd'):
# param to train
self.step_count = 0
self.render_steps = 1
optimizer = None
def closure():
# render every N steps
render = False
if ((self.step_count % self.render_steps) == 0):
render = True
# with torch.autograd.detect_anomaly():
with df.ScopedTimer("forward", False):
l = self.loss(render)
with df.ScopedTimer("backward", False):
l.backward()
with df.ScopedTimer("save", False):
if (render):
self.stage.Save()
print(str(self.step_count) + ": " + str(l))
self.step_count += 1
return l
with df.ScopedTimer("step"):
if (mode == 'gd'):
param = self.model.spring_rest_length
# simple Gradient Descent
for i in range(self.train_iters):
closure()
with torch.no_grad():
param -= self.train_rate * param.grad
param.grad.data.zero_()
else:
# L-BFGS
if (mode == 'lbfgs'):
optimizer = torch.optim.LBFGS([self.model.spring_rest_length],
lr=0.01,
tolerance_grad=1.e-5,
tolerance_change=0.01,
line_search_fn="strong_wolfe")
# Adam
if (mode == 'adam'):
optimizer = torch.optim.Adam([self.model.spring_rest_length], lr=self.train_rate * 4.0)
# SGD
if (mode == 'sgd'):
optimizer = torch.optim.SGD([self.model.spring_rest_length], lr=self.train_rate * 0.01, momentum=0.8)
# train
for i in range(self.train_iters):
optimizer.zero_grad()
optimizer.step(closure)
def save(self, file):
torch.save(self.network, file)
def load(self, file):
self.network = torch.load(file)
self.network.eval()
#---------
cloth = Cloth(adapter='cuda')
cloth.run()
# cloth.train('adam')
# for dim in range(20, 400, 20):
# cloth = Cloth(dim=dim)
# cloth.run()
| 7,413 | Python | 31.234782 | 154 | 0.507757 |
vstrozzi/FRL-SHAC-Extension/dflex/tests/test_beam.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import math
import torch
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import dflex as df
from pxr import Usd, UsdGeom, Gf
class Beam:
sim_duration = 3.0 # seconds
sim_substeps = 32
sim_dt = (1.0 / 60.0) / sim_substeps
sim_steps = int(sim_duration / sim_dt)
sim_time = 0.0
train_iters = 64
train_rate = 1.0
def __init__(self, device='cpu'):
torch.manual_seed(42)
builder = df.sim.ModelBuilder()
builder.add_soft_grid(pos=(0.0, 0.0, 0.0),
rot=df.quat_identity(),
vel=(0.0, 0.0, 0.0),
dim_x=20,
dim_y=2,
dim_z=2,
cell_x=0.1,
cell_y=0.1,
cell_z=0.1,
density=10.0,
k_mu=1000.0,
k_lambda=1000.0,
k_damp=5.0,
fix_left=True,
fix_right=True)
self.model = builder.finalize(device)
# disable triangle dynamics (just used for rendering)
self.model.tri_ke = 0.0
self.model.tri_ka = 0.0
self.model.tri_kd = 0.0
self.model.tri_kb = 0.0
self.model.particle_radius = 0.05
self.model.ground = False
self.target = torch.tensor((-0.5)).to(device)
self.material = torch.tensor((100.0, 50.0, 5.0), requires_grad=True, device=device)
#-----------------------
# set up Usd renderer
self.stage = Usd.Stage.CreateNew("outputs/beam.usd")
if (self.stage):
self.renderer = df.render.UsdRenderer(self.model, self.stage)
self.renderer.draw_points = True
self.renderer.draw_springs = True
self.renderer.draw_shapes = True
self.render_time = 0.0
self.integrator = df.sim.SemiImplicitIntegrator()
def loss(self, render=True):
#-----------------------
# run simulation
self.sim_time = 0.0
self.state = self.model.state()
loss = torch.zeros(1, requires_grad=True, device=self.model.adapter)
for i in range(0, self.sim_steps):
# clamp material params to reasonable range
mat_min = torch.tensor((1.e+1, 1.e+1, 5.0), device=self.model.adapter)
mat_max = torch.tensor((1.e+5, 1.e+5, 5.0), device=self.model.adapter)
mat_val = torch.max(torch.min(mat_max, self.material), mat_min)
# broadcast stiffness params to all tets
self.model.tet_materials = mat_val.expand((self.model.tet_count, 3)).contiguous()
# forward dynamics
with df.ScopedTimer("simulate", False):
self.state = self.integrator.forward(self.model, self.state, self.sim_dt)
self.sim_time += self.sim_dt
# render
with df.ScopedTimer("render", False):
if (self.stage and render and (i % self.sim_substeps == 0)):
self.render_time += self.sim_dt * self.sim_substeps
self.renderer.update(self.state, self.render_time)
# loss
with df.ScopedTimer("loss", False):
com_loss = torch.mean(self.state.particle_q, 0)
# minimize y
loss = loss - torch.norm(com_loss[1] - self.target)
return loss
def run(self):
with torch.no_grad():
l = self.loss()
if (self.stage):
self.stage.Save()
def train(self, mode='gd'):
# param to train
self.step_count = 0
render_freq = 1
optimizer = None
params = [
self.material,
]
def closure():
if optimizer:
optimizer.zero_grad()
# render every N steps
render = False
if ((self.step_count % render_freq) == 0):
render = True
# with torch.autograd.detect_anomaly():
with df.ScopedTimer("forward"):
l = self.loss(render)
with df.ScopedTimer("backward"):
l.backward()
print(self.material)
print(self.material.grad)
print(str(self.step_count) + ": " + str(l))
self.step_count += 1
with df.ScopedTimer("save"):
try:
if (render):
self.stage.Save()
except:
print("USD save error")
return l
with df.ScopedTimer("step"):
if (mode == 'gd'):
# simple Gradient Descent
for i in range(self.train_iters):
closure()
with torch.no_grad():
for param in params:
param -= self.train_rate * param.grad
else:
# L-BFGS
if (mode == 'lbfgs'):
optimizer = torch.optim.LBFGS(params, lr=1.0, tolerance_grad=1.e-5, tolerance_change=0.01, line_search_fn="strong_wolfe")
# Adam
if (mode == 'adam'):
optimizer = torch.optim.Adam(params, lr=self.train_rate)
# SGD
if (mode == 'sgd'):
optimizer = torch.optim.SGD(params, lr=self.train_rate, momentum=0.5, nesterov=True)
# train
for i in range(self.train_iters):
optimizer.step(closure)
# final save
try:
if (render):
self.stage.Save()
except:
print("USD save error")
def save(self, file):
torch.save(self.network, file)
def load(self, file):
self.network = torch.load(file)
self.network.eval()
#---------
beam = Beam(device='cpu')
#beam.run()
#beam.train('lbfgs')
beam.train('gd')
| 6,623 | Python | 28.704036 | 141 | 0.495697 |
vstrozzi/FRL-SHAC-Extension/dflex/tests/test_rigid_bounce.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import math
import torch
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import dflex as df
import numpy as np
np.set_printoptions(precision=5, linewidth=256, suppress=True)
from pxr import Usd, UsdGeom, Gf
import test_util
class RigidBounce:
frame_dt = 1.0/60.0
episode_duration = 2.0 # seconds
episode_frames = int(episode_duration/frame_dt)
sim_substeps = 16
sim_dt = frame_dt / sim_substeps
sim_steps = int(episode_duration / sim_dt)
sim_time = 0.0
train_iters = 128
train_rate = 0.01
ground = True
name = "rigid_bounce"
def __init__(self, depth=1, mode='numpy', render=True, adapter='cpu'):
torch.manual_seed(42)
builder = df.sim.ModelBuilder()
self.adapter = adapter
self.mode = mode
self.render = render
builder.add_articulation()
# add sphere
link = builder.add_link(-1, df.transform((0.0, 0.0, 0.0), df.quat_identity()), (0,0,0), df.JOINT_FREE)
shape = builder.add_shape_sphere(
link,
(0.0, 0.0, 0.0),
df.quat_identity(),
radius=0.1,
ke=1.e+4,
kd=10.0,
kf=1.e+2,
mu=0.25)
builder.joint_q[1] = 1.0
#v_s = df.get_body_twist((0.0, 0.0, 0.0), (1.0, -1.0, 0.0), builder.joint_q[0:3])
w_m = (0.0, 0.0, 3.0) # angular velocity (expressed in world space)
v_m = (0.0, 0.0, 0.0) # linear velocity at center of mass (expressed in world space)
p_m = builder.joint_q[0:3] # position of the center of mass (expressed in world space)
# set body0 twist
builder.joint_qd[0:6] = df.get_body_twist(w_m, v_m, p_m)
# get decomposed velocities
print(df.get_body_angular_velocity(builder.joint_qd[0:6]))
print(df.get_body_linear_velocity(builder.joint_qd[0:6], p_m))
# finalize model
self.model = builder.finalize(adapter)
self.model.ground = self.ground
self.model.gravity = torch.tensor((0.0, -9.81, 0.0), dtype=torch.float32, device=adapter)
# initial velocity
#self.model.joint_qd[3] = 0.5
#self.model.joint_qd[4] = -0.5
#self.model.joint_qd[2] = 1.0
self.model.joint_qd.requires_grad_()
self.target = torch.tensor((1.0, 1.0, 0.0), dtype=torch.float32, device=adapter)
#-----------------------
# set up Usd renderer
if (self.render):
self.stage = Usd.Stage.CreateNew("outputs/" + self.name + ".usd")
self.renderer = df.render.UsdRenderer(self.model, self.stage)
self.renderer.draw_points = True
self.renderer.draw_springs = True
self.renderer.draw_shapes = True
self.render_time = 0.0
self.renderer.add_sphere(self.target.tolist(), 0.1, "target")
self.integrator = df.sim.SemiImplicitIntegrator()
def set_target(self, x, name):
self.target = torch.tensor(x, device='cpu')
self.renderer.add_sphere(self.target.tolist(), 0.1, name)
def loss(self, render=True):
#---------------
# run simulation
self.sim_time = 0.0
self.state = self.model.state()
if (self.model.ground):
self.model.collide(self.state)
loss = torch.zeros(1, requires_grad=True, device=self.model.adapter)
for f in range(0, self.episode_frames):
# df.config.no_grad = True
#df.config.verify_fp = True
# simulate
with df.ScopedTimer("fk-id-dflex", detailed=False, active=False):
for i in range(0, self.sim_substeps):
self.state = self.integrator.forward(self.model, self.state, self.sim_dt)
self.sim_time += self.sim_dt
# render
with df.ScopedTimer("render", False):
if (self.render):
self.render_time += self.frame_dt
self.renderer.update(self.state, self.render_time)
try:
self.stage.Save()
except:
print("USD save error")
#loss = loss + torch.dot(self.state.joint_qd[3:6], self.state.joint_qd[3:6])*self.balance_penalty*discount
pos = self.state.joint_q[0:3]
loss = torch.norm(pos-self.target)
return loss
def run(self):
df.config.no_grad = True
#with torch.no_grad():
l = self.loss()
def verify(self, eps=1.e-4):
frame = 60
params = self.model.joint_qd
n = len(params)
# evaluate analytic gradient
l = self.loss(render=False)
l.backward()
# evaluate numeric gradient
grad_analytic = self.model.joint_qd.grad.tolist()
grad_numeric = np.zeros(n)
with torch.no_grad():
df.config.no_grad = True
for i in range(n):
mid = params[i].item()
params[i] = mid - eps
left = self.loss(render=False)
params[i] = mid + eps
right = self.loss(render=False)
# reset
params[i] = mid
# numeric grad
grad_numeric[i] = (right-left)/(2.0*eps)
# report
print("grad_numeric: " + str(grad_numeric))
print("grad_analytic: " + str(grad_analytic))
def train(self, mode='gd'):
# param to train
self.step_count = 0
self.best_loss = math.inf
render_freq = 1
optimizer = None
params = [self.model.joint_qd]
def closure():
if (optimizer):
optimizer.zero_grad()
# render every N steps
render = False
if ((self.step_count % render_freq) == 0):
render = True
with df.ScopedTimer("forward"):
#with torch.autograd.detect_anomaly():
l = self.loss(render)
with df.ScopedTimer("backward"):
#with torch.autograd.detect_anomaly():
l.backward()
print("vel: " + str(params[0]))
print("grad: " + str(params[0].grad))
print("--------")
print(str(self.step_count) + ": " + str(l))
self.step_count += 1
# save best trajectory
if (l.item() < self.best_loss):
self.save()
self.best_loss = l.item()
return l
with df.ScopedTimer("step"):
if (mode == 'gd'):
# simple Gradient Descent
for i in range(self.train_iters):
closure()
with torch.no_grad():
params[0] -= self.train_rate * params[0].grad
params[0].grad.zero_()
else:
# L-BFGS
if (mode == 'lbfgs'):
optimizer = torch.optim.LBFGS(params, lr=1.0, tolerance_grad=1.e-9, line_search_fn="strong_wolfe")
# Adam
if (mode == 'adam'):
optimizer = torch.optim.Adam(params, lr=self.train_rate)
# SGD
if (mode == 'sgd'):
optimizer = torch.optim.SGD(params, lr=self.train_rate, momentum=0.8, nesterov=True)
# train
for i in range(self.train_iters):
print("Step: " + str(i))
optimizer.step(closure)
# final save
try:
if (render):
self.stage.Save()
except:
print("USD save error")
def save(self):
torch.save(self.model.joint_qd, "outputs/" + self.name + ".pt")
def load(self):
self.model.joint_qd = torch.load("outputs/" + self.name + ".pt")
#---------
robot = RigidBounce(depth=1, mode='dflex', render=True, adapter='cpu')
#df.config.check_grad = True
#df.config.no_grad = True
robot.run()
#df.config.verify_fp = True
#robot.load()
#robot.train(mode='lbfgs')
#robot.verify(eps=1.e-3)
| 8,881 | Python | 27.196825 | 118 | 0.516158 |
vstrozzi/FRL-SHAC-Extension/dflex/tests/test_rigid_slide.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import math
import torch
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import dflex as df
import tinyobjloader
import numpy as np
from pxr import Usd, UsdGeom, Gf
class RigidSlide:
sim_duration = 3.0 # seconds
sim_substeps = 16
sim_dt = (1.0 / 60.0) / sim_substeps
sim_steps = int(sim_duration / sim_dt)
sim_time = 0.0
train_iters = 64
train_rate = 0.1
discount_scale = 1.0
discount_factor = 0.5
def __init__(self, adapter='cpu'):
torch.manual_seed(42)
# load mesh
usd = Usd.Stage.Open("assets/suzanne.usda")
geom = UsdGeom.Mesh(usd.GetPrimAtPath("/Suzanne/Suzanne"))
points = geom.GetPointsAttr().Get()
indices = geom.GetFaceVertexIndicesAttr().Get()
counts = geom.GetFaceVertexCountsAttr().Get()
builder = df.sim.ModelBuilder()
mesh = df.sim.Mesh(points, indices)
articulation = builder.add_articulation()
rigid = builder.add_link(
parent=-1,
X_pj=df.transform((0.0, 0.0, 0.0), df.quat_identity()),
axis=(0.0, 0.0, 0.0),
type=df.JOINT_FREE)
ke = 1.e+4
kd = 1.e+3
kf = 1.e+3
mu = 0.5
# shape = builder.add_shape_mesh(
# rigid,
# mesh=mesh,
# scale=(0.2, 0.2, 0.2),
# density=1000.0,
# ke=1.e+4,
# kd=1000.0,
# kf=1000.0,
# mu=0.75)
radius = 0.1
#shape = builder.add_shape_sphere(rigid, pos=(0.0, 0.0, 0.0), ke=ke, kd=kd, kf=kf, mu=mu, radius=radius)
#shape = builder.add_shape_capsule(rigid, pos=(0.0, 0.0, 0.0), radius=radius, half_width=0.5)
shape = builder.add_shape_box(rigid, pos=(0.0, 0.0, 0.0), hx=radius, hy=radius, hz=radius, ke=ke, kd=kd, kf=kf, mu=mu)
builder.joint_q[1] = radius
self.model = builder.finalize(adapter)
self.model.joint_qd.requires_grad = True
self.vel = torch.tensor((1.0, 0.0, 0.0), dtype=torch.float32, device=adapter, requires_grad=True)
self.target = torch.tensor((3.0, 0.2, 0.0), device=adapter)
#-----------------------
# set up Usd renderer
self.stage = Usd.Stage.CreateNew("outputs/rigid_slide.usda")
if (self.stage):
self.renderer = df.render.UsdRenderer(self.model, self.stage)
self.renderer.draw_points = True
self.renderer.draw_springs = True
self.renderer.draw_shapes = True
self.render_time = 0.0
self.renderer.add_sphere(self.target.tolist(), 0.1, "target")
self.integrator = df.sim.SemiImplicitIntegrator()
def loss(self, render=True):
#---------------
# run simulation
# construct contacts once at startup
self.model.joint_qd = torch.cat((torch.tensor((0.0, 0.0, 0.0), dtype=torch.float32, device=self.model.adapter), self.vel))
self.sim_time = 0.0
self.state = self.model.state()
self.model.collide(self.state)
loss = torch.zeros(1, requires_grad=True, device=self.model.adapter)
for i in range(0, self.sim_steps):
# forward dynamics
with df.ScopedTimer("simulate", False):
self.state = self.integrator.forward(self.model, self.state, self.sim_dt)
self.sim_time += self.sim_dt
# render
with df.ScopedTimer("render", False):
if (self.stage and render and (i % self.sim_substeps == 0)):
self.render_time += self.sim_dt * self.sim_substeps
self.renderer.update(self.state, self.render_time)
#com = self.state.joint_q[0:3]
com = self.state.body_X_sm[0, 0:3]
loss = loss + torch.norm(com - self.target)
return loss
def run(self):
#with torch.no_grad():
l = self.loss()
if (self.stage):
self.stage.Save()
def train(self, mode='gd'):
# param to train
self.step_count = 0
render_freq = 1
optimizer = None
params = [self.vel]
def closure():
if (optimizer):
optimizer.zero_grad()
# render every N steps
render = False
if ((self.step_count % render_freq) == 0):
render = True
with df.ScopedTimer("forward"):
#with torch.autograd.detect_anomaly():
l = self.loss(render)
with df.ScopedTimer("backward"):
#with torch.autograd.detect_anomaly():
l.backward()
print("vel: " + str(params[0]))
print("grad: " + str(params[0].grad))
print("--------")
print(str(self.step_count) + ": " + str(l))
self.step_count += 1
with df.ScopedTimer("save"):
try:
if (render):
self.stage.Save()
except:
print("USD save error")
return l
with df.ScopedTimer("step"):
if (mode == 'gd'):
# simple Gradient Descent
for i in range(self.train_iters):
closure()
with torch.no_grad():
params[0] -= self.train_rate * params[0].grad
params[0].grad.zero_()
else:
# L-BFGS
if (mode == 'lbfgs'):
optimizer = torch.optim.LBFGS(params, lr=1.0, tolerance_grad=1.e-5, tolerance_change=0.01, line_search_fn="strong_wolfe")
# Adam
if (mode == 'adam'):
optimizer = torch.optim.Adam(params, lr=self.train_rate)
# SGD
if (mode == 'sgd'):
optimizer = torch.optim.SGD(params, lr=self.train_rate, momentum=0.8, nesterov=True)
# train
for i in range(self.train_iters):
optimizer.step(closure)
# final save
try:
if (render):
self.stage.Save()
except:
print("USD save error")
def save(self, file):
torch.save(self.network, file)
def load(self, file):
self.network = torch.load(file)
self.network.eval()
#---------
rigid = RigidSlide(adapter='cpu')
#rigid.run()
rigid.train('adam')
| 7,018 | Python | 28.124481 | 141 | 0.526503 |
vstrozzi/FRL-SHAC-Extension/dflex/tests/test_snu_mlp.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import math
import torch
import os
import sys
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.tensorboard import SummaryWriter
# to allow tests to import the module they belong to
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import dflex as df
import numpy as np
np.set_printoptions(precision=5, linewidth=256, suppress=True)
from pxr import Usd, UsdGeom, Gf
import test_util
class MultiLayerPerceptron(nn.Module):
def __init__(self, n_in, n_out, n_hd, adapter, inference=False):
super(MultiLayerPerceptron,self).__init__()
self.n_in = n_in
self.n_out = n_out
self.n_hd = n_hd
#self.ll = nn.Linear(n_in, n_out)
self.fc1 = nn.Linear(n_in, n_hd).to(adapter)
self.fc2 = nn.Linear(n_hd, n_hd).to(adapter)
self.fc3 = nn.Linear(n_hd, n_out).to(adapter)
self.bn1 = nn.LayerNorm(n_in, elementwise_affine=False).to(adapter)
self.bn2 = nn.LayerNorm(n_hd, elementwise_affine=False).to(adapter)
self.bn3 = nn.LayerNorm(n_out, elementwise_affine=False).to(adapter)
def forward(self, x: torch.Tensor):
x = F.leaky_relu(self.bn2(self.fc1(x)))
x = F.leaky_relu(self.bn2(self.fc2(x)))
x = torch.tanh(self.bn3(self.fc3(x))-2.0)
return x
class HumanoidSNU:
train_iters = 100000000
train_rate = 0.001
train_size = 128
train_batch_size = 4
train_batch_iters = 128
train_batch_count = int(train_size/train_batch_size)
train_data = None
ground = True
name = "humanoid_snu_lower"
regularization = 1.e-3
inference = False
initial_y = 1.0
def __init__(self, depth=1, mode='numpy', render=True, sim_duration=1.0, adapter='cpu', inference=False):
self.sim_duration = sim_duration # seconds
self.sim_substeps = 16
self.sim_dt = (1.0 / 60.0) / self.sim_substeps
self.sim_steps = int(self.sim_duration / self.sim_dt)
self.sim_time = 0.0
torch.manual_seed(41)
np.random.seed(41)
builder = df.sim.ModelBuilder()
self.adapter = adapter
self.mode = mode
self.render = render
self.filter = {}
if self.name == "humanoid_snu_arm":
self.filter = { "ShoulderR", "ArmR", "ForeArmR", "HandR", "Torso", "Neck" }
self.ground = False
if self.name == "humanoid_snu_neck":
self.filter = { "Torso", "Neck", "Head", "ShoulderR", "ShoulderL" }
self.ground = False
if self.name == "humanoid_snu_lower":
self.filter = { "Pelvis", "FemurR", "TibiaR", "TalusR", "FootThumbR", "FootPinkyR", "FemurL", "TibiaL", "TalusL", "FootThumbL", "FootPinkyL"}
self.ground = True
self.initial_y = 1.0
if self.name == "humanoid_snu":
self.filter = {}
self.ground = True
self.skeletons = []
self.inference = inference
# if (self.inference):
# self.train_batch_size = 1
for i in range(self.train_batch_size):
skeleton = test_util.Skeleton("assets/snu/arm.xml", "assets/snu/muscle284.xml", builder, self.filter)
# set initial position 1m off the ground
builder.joint_q[skeleton.coord_start + 0] = i*1.5
builder.joint_q[skeleton.coord_start + 1] = self.initial_y
# offset on z-axis
#builder.joint_q[skeleton.coord_start + 2] = 10.0
# initial velcoity
#builder.joint_qd[skeleton.dof_start + 5] = 3.0
self.skeletons.append(skeleton)
# finalize model
self.model = builder.finalize(adapter)
self.model.ground = self.ground
self.model.gravity = torch.tensor((0.0, -9.81, 0.0), dtype=torch.float32, device=adapter)
#self.model.gravity = torch.tensor((0.0, 0.0, 0.0), device=adapter)
#self.activations = torch.zeros((1, len(self.muscles)), dtype=torch.float32, device=adapter, requires_grad=True)
#self.activations = torch.rand((1, len(self.muscles)), dtype=torch.float32, device=adapter, requires_grad=True)
self.network = MultiLayerPerceptron(3, len(self.skeletons[0].muscles), 128, adapter)
self.model.joint_q.requires_grad = True
self.model.joint_qd.requires_grad = True
self.model.muscle_activation.requires_grad = True
self.target_penalty = 1.0
self.velocity_penalty = 0.1
self.action_penalty = 0.0
self.muscle_strength = 40.0
self.discount_scale = 2.0
self.discount_factor = 1.0
# generate training data
targets = []
for i in range(self.train_size):
# generate a random point in -1, 1 away from the head
t = np.random.rand(2)*2.0 - 1.0
t[1] += 0.5
targets.append((t[0], t[1] + 0.5, 1.0))
self.train_data = torch.tensor(targets, dtype=torch.float32, device=self.adapter)
#-----------------------
# set up Usd renderer
if (self.render):
self.stage = Usd.Stage.CreateNew("outputs/" + self.name + ".usd")
self.renderer = df.render.UsdRenderer(self.model, self.stage)
self.renderer.draw_points = True
self.renderer.draw_springs = True
self.renderer.draw_shapes = True
self.render_time = 0.0
else:
self.renderer = None
self.set_target(torch.tensor((0.75, 0.4, 0.5), dtype=torch.float32, device=self.adapter), "target")
self.integrator = df.sim.SemiImplicitIntegrator()
def set_target(self, x, name):
self.target = x
if (self.renderer):
self.renderer.add_sphere(self.target.tolist(), 0.05, name, self.render_time)
def loss(self):
#---------------
# run simulation
self.sim_time = 0.0
# initial state
self.state = self.model.state()
loss = torch.zeros(1, requires_grad=True, device=self.model.adapter)
# apply actions
#self.model.muscle_activation = self.activations[0]*self.muscle_strength
# compute activations for each target in the batch
targets = self.train_data[0:self.train_batch_size]
activations = torch.flatten(self.network(targets))
self.model.muscle_activation = (activations*0.5 + 0.5)*self.muscle_strength
# one time collision
self.model.collide(self.state)
for i in range(self.sim_steps):
# apply random actions per-frame
#self.model.muscle_activation = (activations*0.5 + 0.5 + torch.rand_like(activations,dtype=torch.float32, device=self.model.adapter))*self.muscle_strength
# simulate
with df.ScopedTimer("fd", detailed=False, active=False):
self.state = self.integrator.forward(self.model, self.state, self.sim_dt)
#if self.inference:
#x = math.cos(self.sim_time*0.5)*0.5
#y = math.sin(self.sim_time*0.5)*0.5
# t = self.sim_time*0.5
# x = math.sin(t)*0.5
# y = math.sin(t)*math.cos(t)*0.5
# self.set_target(torch.tensor((x, y + 0.5, 1.0), dtype=torch.float32, device=self.adapter), "target")
# activations = self.network(self.target)
# self.model.muscle_activation = (activations*0.5 + 0.5)*self.muscle_strength
# render
with df.ScopedTimer("render", False):
if (self.render and (i % self.sim_substeps == 0)):
with torch.no_grad():
muscle_start = 0
skel_index = 0
for s in self.skeletons:
for mesh, link in s.mesh_map.items():
if link != -1:
X_sc = df.transform_expand(self.state.body_X_sc[link].tolist())
#self.renderer.add_mesh(mesh, "../assets/snu/OBJ/" + mesh + ".usd", X_sc, 1.0, self.render_time)
self.renderer.add_mesh(mesh, "../assets/snu/OBJ/" + mesh + ".usd", X_sc, 1.0, self.render_time)
for m in range(len(s.muscles)):#.self.model.muscle_count):
start = self.model.muscle_start[muscle_start + m].item()
end = self.model.muscle_start[muscle_start + m + 1].item()
points = []
for w in range(start, end):
link = self.model.muscle_links[w].item()
point = self.model.muscle_points[w].cpu().numpy()
X_sc = df.transform_expand(self.state.body_X_sc[link].cpu().tolist())
points.append(Gf.Vec3f(df.transform_point(X_sc, point).tolist()))
self.renderer.add_line_strip(points, name=s.muscles[m].name + str(skel_index), radius=0.0075, color=(self.model.muscle_activation[muscle_start + m]/self.muscle_strength, 0.2, 0.5), time=self.render_time)
muscle_start += len(s.muscles)
skel_index += 1
# render scene
self.render_time += self.sim_dt * self.sim_substeps
self.renderer.update(self.state, self.render_time)
self.sim_time += self.sim_dt
# loss
if self.name == "humanoid_snu_arm":
hand_pos = self.state.body_X_sc[self.node_map["HandR"]][0:3]
discount_time = self.sim_time
discount = math.pow(self.discount_factor, discount_time*self.discount_scale)
# loss = loss + (torch.norm(hand_pos - self.target)*self.target_penalty +
# torch.norm(self.state.joint_qd)*self.velocity_penalty +
# torch.norm(self.model.muscle_activation)*self.action_penalty)*discount
#loss = loss + torch.norm(self.state.joint_qd)
loss = loss + torch.norm(hand_pos - self.target)*self.target_penalty
if self.name == "humanoid_snu_neck":
# rotate a vector
def transform_vector_torch(t, x):
axis = t[3:6]
w = t[6]
return x * (2.0 *w*w - 1.0) + torch.cross(axis, x) * w * 2.0 + axis * torch.dot(axis, x) * 2.0
forward_dir = torch.tensor((0.0, 0.0, 1.0), dtype=torch.float32, device=self.adapter)
up_dir = torch.tensor((0.0, 1.0, 0.0), dtype=torch.float32, device=self.adapter)
for i in range(self.train_batch_size):
skel = self.skeletons[i]
head_pos = self.state.body_X_sc[skel.node_map["Head"]][0:3]
head_forward = transform_vector_torch(self.state.body_X_sc[skel.node_map["Head"]], forward_dir)
head_up = transform_vector_torch(self.state.body_X_sc[skel.node_map["Head"]], up_dir)
target_dir = self.train_data[i] - head_pos
loss_forward = torch.dot(head_forward, target_dir)*self.target_penalty
loss_up = torch.dot(head_up, up_dir)*self.target_penalty*0.5
loss_penalty = torch.dot(activations, activations)*self.action_penalty
loss = loss - loss_forward - loss_up + loss_penalty
#self.writer.add_scalar("loss_forward", loss_forward.item(), self.step_count)
#self.writer.add_scalar("loss_up", loss_up.item(), self.step_count)
#self.writer.add_scalar("loss_penalty", loss_penalty.item(), self.step_count)
return loss
def run(self):
df.config.no_grad = True
self.inference = True
with torch.no_grad():
l = self.loss()
if (self.render):
self.stage.Save()
def verify(self, eps=1.e-4):
params = self.actions
n = 1#len(params)
self.render = False
# evaluate analytic gradient
l = self.loss()
l.backward()
# evaluate numeric gradient
grad_analytic = params.grad.cpu().numpy()
grad_numeric = np.zeros(n)
with torch.no_grad():
df.config.no_grad = True
for i in range(1):
mid = params[0][i].item()
params[0][i] = mid - eps
left = self.loss()
params[0][i] = mid + eps
right = self.loss()
# reset
params[0][i] = mid
# numeric grad
grad_numeric[i] = (right-left)/(2.0*eps)
# report
print("grad_numeric: " + str(grad_numeric))
print("grad_analytic: " + str(grad_analytic))
def train(self, mode='gd'):
self.writer = SummaryWriter()
self.writer.add_hparams({"lr": self.train_rate, "mode": mode}, {})
# param to train
self.step_count = 0
self.best_loss = math.inf
optimizer = None
scheduler = None
params = self.network.parameters()#[self.activations]
def closure():
batch = int(self.step_count/self.train_batch_iters)%self.train_batch_count
print("Batch: " + str(batch) + " Iter: " + str(self.step_count%self.train_batch_iters))
if (optimizer):
optimizer.zero_grad()
# compute loss on all examples
with df.ScopedTimer("forward"):#, detailed=True):
l = self.loss()
# compute gradient
with df.ScopedTimer("backward"):#, detailed=True):
l.backward()
# batch stats
self.writer.add_scalar("loss_batch", l.item(), self.step_count)
self.writer.flush()
print(str(self.step_count) + ": " + str(l))
self.step_count += 1
with df.ScopedTimer("save"):
try:
self.stage.Save()
except:
print("USD save error")
# save network
if (l < self.best_loss):
self.save()
self.best_loss = l
return l
with df.ScopedTimer("step"):
if (mode == 'gd'):
# simple Gradient Descent
for i in range(self.train_iters):
closure()
with torch.no_grad():
params[0] -= self.train_rate * params[0].grad
params[0].grad.zero_()
else:
# L-BFGS
if (mode == 'lbfgs'):
optimizer = torch.optim.LBFGS(params, lr=1.0, tolerance_grad=1.e-9)#, line_search_fn="strong_wolfe")
# Adam
if (mode == 'adam'):
last_LR = 1e-5
init_LR = 1e-3
decay_LR_steps = 2000
gamma = math.exp(math.log(last_LR/init_LR)/decay_LR_steps)
optimizer = torch.optim.Adam(params, lr=self.train_rate, weight_decay=1e-5)
#scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma = gamma)
# SGD
if (mode == 'sgd'):
optimizer = torch.optim.SGD(params, lr=self.train_rate, momentum=0.8, nesterov=True)
# train
for i in range(self.train_iters):
print("Step: " + str(i))
if optimizer:
optimizer.step(closure)
if scheduler:
scheduler.step()
# final save
try:
self.stage.Save()
except:
print("USD save error")
def save(self):
torch.save(self.network, "outputs/" + self.name + ".pt")
def load(self, suffix=""):
self.network = torch.load("outputs/" + self.name + suffix + ".pt")
if self.inference:
self.network.eval()
else:
self.network.train()
#---------
#env = HumanoidSNU(depth=1, mode='dflex', render=True, sim_duration=2.0, adapter='cuda')
#env.train(mode='adam')
env = HumanoidSNU(depth=1, mode='dflex', render=True, sim_duration=2.0, adapter='cuda', inference=True)
#env.load()
env.run()
| 17,357 | Python | 32.445087 | 235 | 0.526358 |
vstrozzi/FRL-SHAC-Extension/dflex/tests/test_allegro.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import math
import torch
import os
import sys
# to allow tests to import the module they belong to
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import dflex as df
import numpy as np
np.set_printoptions(precision=5, linewidth=256, suppress=True)
from pxr import Usd, UsdGeom, Gf
import test_util
class Robot:
sim_duration = 4.0 # seconds
sim_substeps = 64
sim_dt = (1.0 / 60.0) / sim_substeps
sim_steps = int(sim_duration / sim_dt)
sim_time = 0.0
train_iters = 128
train_rate = 10.0
ground = False
name = "allegro"
regularization = 1.e-3
env_count = 1
env_dofs = 2
def __init__(self, depth=1, mode='numpy', render=True, adapter='cpu'):
torch.manual_seed(42)
builder = df.sim.ModelBuilder()
self.adapter = adapter
self.mode = mode
self.render = render
# allegro
for i in range(self.env_count):
test_util.urdf_load(
builder,
#"assets/franka_description/robots/franka_panda.urdf",
"assets/allegro_hand_description/allegro_hand_description_right.urdf",
df.transform((0.0, 0.0, 0.0), df.quat_from_axis_angle((0.0, 0.0, 1.0), math.pi*0.5)),
floating=False,
limit_ke=0.0,#1.e+3,
limit_kd=0.0)#1.e+2)
# set fingers to mid-range of their limits
for i in range(len(builder.joint_q_start)):
if (builder.joint_type[i] == df.JOINT_REVOLUTE):
dof = builder.joint_q_start[i]
mid = (builder.joint_limit_lower[dof] + builder.joint_limit_upper[dof])*0.5
builder.joint_q[dof] = mid
builder.joint_target[dof] = mid
builder.joint_target_kd[i] = 0.02
builder.joint_target_ke[i] = 1.0
solid = False
# create FEM block
if (solid):
builder.add_soft_grid(
pos=(-0.05, 0.2, 0.0),
rot=(0.0, 0.0, 0.0, 1.0),
vel=(0.0, 0.0, 0.0),
dim_x=10,
dim_y=5,
dim_z=5,
cell_x=0.01,
cell_y=0.01,
cell_z=0.01,
density=1000.0,
k_mu=500.0,
k_lambda=1000.0,
k_damp=1.0)
else:
builder.add_cloth_grid(
pos=(-0.1, 0.2, -0.1),
rot=df.quat_from_axis_angle((1.0, 0.0, 0.0), math.pi*0.5),
vel=(0.0, 0.0, 0.0),
dim_x=20,
dim_y=20,
cell_x=0.01,
cell_y=0.01,
mass=0.0125)
# finalize model
self.model = builder.finalize(adapter)
self.model.ground = self.ground
self.model.gravity = torch.tensor((0.0, -9.81, 0.0), device=adapter)
#self.model.gravity = torch.tensor((0.0, 0.0, 0.0), device=adapter)
self.model.contact_ke = 1.e+3
self.model.contact_kd = 2.0
self.model.contact_kf = 0.1
self.model.contact_mu = 0.5
self.model.particle_radius = 0.01
if (solid):
self.model.tri_ke = 0.0
self.model.tri_ka = 0.0
self.model.tri_kd = 0.0
self.model.tri_kb = 0.0
else:
self.model.tri_ke = 100.0
self.model.tri_ka = 100.0
self.model.tri_kd = 1.0
self.model.tri_kb = 0.0
self.model.edge_ke = 0.01
self.model.edge_kd = 0.001
self.model.joint_q.requires_grad_()
self.model.joint_qd.requires_grad_()
self.actions = torch.zeros((self.env_count, self.sim_steps), device=adapter, requires_grad=True)
#self.actions = torch.zeros(1, device=adapter, requires_grad=True)
#-----------------------
# set up Usd renderer
if (self.render):
self.stage = Usd.Stage.CreateNew("outputs/" + self.name + ".usd")
self.renderer = df.render.UsdRenderer(self.model, self.stage)
self.renderer.draw_points = True
self.renderer.draw_springs = True
self.renderer.draw_shapes = True
self.render_time = 0.0
self.integrator = df.sim.SemiImplicitIntegrator()
def set_target(self, x, name):
self.target = torch.tensor(x, device='cpu')
self.renderer.add_sphere(self.target.tolist(), 0.1, name)
def loss(self):
#---------------
# run simulation
self.sim_time = 0.0
# initial state
self.state = self.model.state()
if (self.render):
traj = []
for e in range(self.env_count):
traj.append([])
loss = torch.zeros(1, requires_grad=True, device=self.model.adapter)
for i in range(0, self.sim_steps):
# simulate
with df.ScopedTimer("fd", detailed=False, active=False):
self.state = self.integrator.forward(self.model, self.state, self.sim_dt)
# render
with df.ScopedTimer("render", False):
if (self.render and (i % self.sim_substeps == 0)):
with torch.no_grad():
# draw end effector tracer
# for e in range(self.env_count):
# X_pole = df.transform_point(df.transform_expand(self.state.body_X_sc[e*3 + self.marker_body].tolist()), (0.0, 0.0, self.marker_offset))
# traj[e].append((X_pole[0], X_pole[1], X_pole[2]))
# # render trajectory
# self.renderer.add_line_strip(traj[e], (1.0, 1.0, 1.0), self.render_time, "traj_" + str(e))
# render scene
self.render_time += self.sim_dt * self.sim_substeps
self.renderer.update(self.state, self.render_time)
self.sim_time += self.sim_dt
return loss
def run(self):
l = self.loss()
if (self.render):
self.stage.Save()
def verify(self, eps=1.e-4):
params = self.actions
n = 1#len(params)
self.render = False
# evaluate analytic gradient
l = self.loss()
l.backward()
# evaluate numeric gradient
grad_analytic = params.grad.cpu().numpy()
grad_numeric = np.zeros(n)
with torch.no_grad():
df.config.no_grad = True
for i in range(1):
mid = params[0][i].item()
params[0][i] = mid - eps
left = self.loss()
params[0][i] = mid + eps
right = self.loss()
# reset
params[0][i] = mid
# numeric grad
grad_numeric[i] = (right-left)/(2.0*eps)
# report
print("grad_numeric: " + str(grad_numeric))
print("grad_analytic: " + str(grad_analytic))
def train(self, mode='gd'):
# param to train
self.step_count = 0
self.best_loss = math.inf
render_freq = 1
optimizer = None
params = [self.actions]
def closure():
if (optimizer):
optimizer.zero_grad()
# render ever y N steps
render = False
if ((self.step_count % render_freq) == 0):
render = True
with df.ScopedTimer("forward"):
#with torch.autograd.detect_anomaly():
l = self.loss()
with df.ScopedTimer("backward"):
#with torch.autograd.detect_anomaly():
l.backward()
# for e in range(self.env_count):
# print(self.actions.grad[e][0:20])
print(str(self.step_count) + ": " + str(l))
self.step_count += 1
with df.ScopedTimer("save"):
try:
if (render):
self.stage.Save()
except:
print("USD save error")
# save best trajectory
if (l.item() < self.best_loss):
self.save()
self.best_loss = l.item()
return l
with df.ScopedTimer("step"):
if (mode == 'gd'):
# simple Gradient Descent
for i in range(self.train_iters):
closure()
with torch.no_grad():
params[0] -= self.train_rate * params[0].grad
params[0].grad.zero_()
else:
# L-BFGS
if (mode == 'lbfgs'):
optimizer = torch.optim.LBFGS(params, lr=1.0, tolerance_grad=1.e-9, line_search_fn="strong_wolfe")
# Adam
if (mode == 'adam'):
optimizer = torch.optim.Adam(params, lr=self.train_rate)
# SGD
if (mode == 'sgd'):
optimizer = torch.optim.SGD(params, lr=self.train_rate, momentum=0.8, nesterov=True)
# train
for i in range(self.train_iters):
print("Step: " + str(i))
optimizer.step(closure)
# final save
try:
if (render):
self.stage.Save()
except:
print("USD save error")
def save(self):
torch.save(self.actions, "outputs/" + self.name + ".pt")
def load(self):
self.actions = torch.load("outputs/" + self.name + ".pt")
#---------
robot = Robot(depth=1, mode='dflex', render=True, adapter='cuda')
#df.config.no_grad = True
#df.config.check_grad = True
#df.config.verify_fp = True
#robot.load()
robot.run()
#robot.train(mode='lbfgs')
#robot.verify(eps=1.e+1)
| 10,608 | Python | 27.986339 | 165 | 0.488028 |
vstrozzi/FRL-SHAC-Extension/dflex/tests/test_adjoint.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import math
import torch
import time
import cProfile
import numpy as np
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import dflex as df
| 626 | Python | 25.124999 | 82 | 0.785942 |
vstrozzi/FRL-SHAC-Extension/dflex/tests/assets/humanoid.xml | <!-- ======================================================
This file is part of MuJoCo.
Copyright 2009-2015 Roboti LLC.
Model :: Humanoid
Mujoco :: Advanced physics simulation engine
Source : www.roboti.us
Version : 1.31
Released : 23Apr16
Author :: Vikash Kumar
Contacts : [email protected]
Last edits : 30Apr'16, 30Nov'15, 26Sept'15
====================================================== -->
<mujoco model='humanoid (v1.31)'>
<compiler inertiafromgeom='true' angle='degree'/>
<default>
<joint limited='true' damping='1' armature='0' />
<geom contype='1' conaffinity='1' condim='1' rgba='0.8 0.6 .4 1'
margin="0.001" solref=".02 1" solimp=".8 .8 .01" material="geom"/>
<motor ctrlrange='-.4 .4' ctrllimited='true'/>
</default>
<option timestep='0.002' iterations="50" solver="PGS">
<flag energy="enable"/>
</option>
<size nkey='5'/>
<visual>
<map fogstart="3" fogend="5" force="0.1"/>
<quality shadowsize="2048"/>
</visual>
<asset>
<texture type="skybox" builtin="gradient" width="100" height="100" rgb1=".4 .6 .8"
rgb2="0 0 0"/>
<texture name="texgeom" type="cube" builtin="flat" mark="cross" width="127" height="1278"
rgb1="0.8 0.6 0.4" rgb2="0.8 0.6 0.4" markrgb="1 1 1" random="0.01"/>
<texture name="texplane" type="2d" builtin="checker" rgb1=".2 .3 .4" rgb2=".1 0.15 0.2"
width="100" height="100"/>
<material name='MatPlane' reflectance='0.5' texture="texplane" texrepeat="1 1" texuniform="true"/>
<material name='geom' texture="texgeom" texuniform="true"/>
</asset>
<worldbody>
<geom name='floor' pos='0 0 0' size='10 10 0.125' type='plane' material="MatPlane" condim='3'/>
<body name='torso' pos='0 0 1.4'>
<light mode='trackcom' directional='false' diffuse='.8 .8 .8' specular='0.3 0.3 0.3' pos='0 0 4.0' dir='0 0 -1'/>
<joint name='root' type='free' pos='0 0 0' limited='false' damping='0' armature='0' stiffness='0'/>
<geom name='torso1' type='capsule' fromto='0 -.07 0 0 .07 0' size='0.07' />
<geom name='head' type='sphere' pos='0 0 .19' size='.09'/>
<geom name='uwaist' type='capsule' fromto='-.01 -.06 -.12 -.01 .06 -.12' size='0.06'/>
<body name='lwaist' pos='-.01 0 -0.260' quat='1.000 0 -0.002 0' >
<geom name='lwaist' type='capsule' fromto='0 -.06 0 0 .06 0' size='0.06' />
<joint name='abdomen_z' type='hinge' pos='0 0 0.065' axis='0 0 1' range='-45 45' damping='5' stiffness='20' armature='0.02' />
<joint name='abdomen_y' type='hinge' pos='0 0 0.065' axis='0 1 0' range='-75 30' damping='5' stiffness='10' armature='0.02' />
<body name='pelvis' pos='0 0 -0.165' quat='1.000 0 -0.002 0' >
<joint name='abdomen_x' type='hinge' pos='0 0 0.1' axis='1 0 0' range='-35 35' damping='5' stiffness='10' armature='0.02' />
<geom name='butt' type='capsule' fromto='-.02 -.07 0 -.02 .07 0' size='0.09' />
<body name='right_thigh' pos='0 -0.1 -0.04' >
<joint name='right_hip_x' type='hinge' pos='0 0 0' axis='1 0 0' range='-25 5' damping='5' stiffness='10' armature='0.01' />
<joint name='right_hip_z' type='hinge' pos='0 0 0' axis='0 0 1' range='-60 35' damping='5' stiffness='10' armature='0.01' />
<joint name='right_hip_y' type='hinge' pos='0 0 0' axis='0 1 0' range='-120 20' damping='5' stiffness='20' armature='0.01' />
<geom name='right_thigh1' type='capsule' fromto='0 0 0 0 0.01 -.34' size='0.06' />
<body name='right_shin' pos='0 0.01 -0.403' >
<joint name='right_knee' type='hinge' pos='0 0 .02' axis='0 -1 0' range='-160 -2' stiffness='1' armature='0.006' />
<geom name='right_shin1' type='capsule' fromto='0 0 0 0 0 -.3' size='0.049' />
<body name='right_foot' pos='0 0 -.39' >
<joint name='right_ankle_y' type='hinge' pos='0 0 0.08' axis='0 1 0' range='-50 50' damping='5' stiffness='4' armature='0.008' />
<joint name='right_ankle_x' type='hinge' pos='0 0 0.08' axis='1 0 0.5' range='-50 50' damping='5' stiffness='1' armature='0.006' />
<geom name='right_foot_cap1' type='capsule' fromto='-.07 -0.02 0 0.14 -0.04 0' size='0.027' />
<geom name='right_foot_cap2' type='capsule' fromto='-.07 0 0 0.14 0.02 0' size='0.027' />
</body>
</body>
</body>
<body name='left_thigh' pos='0 0.1 -0.04' >
<joint name='left_hip_x' type='hinge' pos='0 0 0' axis='-1 0 0' range='-25 5' damping='5' stiffness='10' armature='0.01' />
<joint name='left_hip_z' type='hinge' pos='0 0 0' axis='0 0 -1' range='-60 35' damping='5' stiffness='10' armature='0.01' />
<joint name='left_hip_y' type='hinge' pos='0 0 0' axis='0 1 0' range='-120 20' damping='5' stiffness='20' armature='0.01' />
<geom name='left_thigh1' type='capsule' fromto='0 0 0 0 -0.01 -.34' size='0.06' />
<body name='left_shin' pos='0 -0.01 -0.403' >
<joint name='left_knee' type='hinge' pos='0 0 .02' axis='0 -1 0' range='-160 -2' stiffness='1' armature='0.006' />
<geom name='left_shin1' type='capsule' fromto='0 0 0 0 0 -.3' size='0.049' />
<body name='left_foot' pos='0 0 -.39' >
<joint name='left_ankle_y' type='hinge' pos='0 0 0.08' axis='0 1 0' range='-50 50' damping='5' stiffness='4' armature='0.008' />
<joint name='left_ankle_x' type='hinge' pos='0 0 0.08' axis='1 0 0.5' range='-50 50' damping='5' stiffness='1' armature='0.006' />
<geom name='left_foot_cap1' type='capsule' fromto='-.07 0.02 0 0.14 0.04 0' size='0.027' />
<geom name='left_foot_cap2' type='capsule' fromto='-.07 0 0 0.14 -0.02 0' size='0.027' />
</body>
</body>
</body>
</body>
</body>
<body name='right_upper_arm' pos='0 -0.17 0.06' >
<joint name='right_shoulder1' type='hinge' pos='0 0 0' axis='2 1 1' range='-85 60' stiffness='1' armature='0.0068' />
<joint name='right_shoulder2' type='hinge' pos='0 0 0' axis='0 -1 1' range='-85 60' stiffness='1' armature='0.0051' />
<geom name='right_uarm1' type='capsule' fromto='0 0 0 .16 -.16 -.16' size='0.04 0.16' />
<body name='right_lower_arm' pos='.18 -.18 -.18' >
<joint name='right_elbow' type='hinge' pos='0 0 0' axis='0 -1 1' range='-90 50' stiffness='0' armature='0.0028' />
<geom name='right_larm' type='capsule' fromto='0.01 0.01 0.01 .17 .17 .17' size='0.031' />
<geom name='right_hand' type='sphere' pos='.18 .18 .18' size='0.04'/>
</body>
</body>
<body name='left_upper_arm' pos='0 0.17 0.06' >
<joint name='left_shoulder1' type='hinge' pos='0 0 0' axis='2 -1 1' range='-60 85' stiffness='1' armature='0.0068' />
<joint name='left_shoulder2' type='hinge' pos='0 0 0' axis='0 1 1' range='-60 85' stiffness='1' armature='0.0051' />
<geom name='left_uarm1' type='capsule' fromto='0 0 0 .16 .16 -.16' size='0.04 0.16' />
<body name='left_lower_arm' pos='.18 .18 -.18' >
<joint name='left_elbow' type='hinge' pos='0 0 0' axis='0 -1 -1' range='-90 50' stiffness='0' armature='0.0028' />
<geom name='left_larm' type='capsule' fromto='0.01 -0.01 0.01 .17 -.17 .17' size='0.031' />
<geom name='left_hand' type='sphere' pos='.18 -.18 .18' size='0.04'/>
</body>
</body>
</body>
</worldbody>
<tendon>
<fixed name='left_hipknee'>
<joint joint='left_hip_y' coef='-1'/>
<joint joint='left_knee' coef='1'/>
</fixed>
<fixed name='right_hipknee'>
<joint joint='right_hip_y' coef='-1'/>
<joint joint='right_knee' coef='1'/>
</fixed>
</tendon>
<keyframe>
<key qpos='-0.0233227 0.00247283 0.0784829 0.728141 0.00223397 -0.685422 -0.00181805 -0.000580139 -0.245119 0.0329713 -0.0461148 0.0354257 0.252234 -0.0347763 -0.4663 -0.0313013 0.0285638 0.0147285 0.264063 -0.0346441 -0.559198 0.021724 -0.0333332 -0.718563 0.872778 0.000260393 0.733088 0.872748' />
<key qpos='0.0168601 -0.00192002 0.127167 0.762693 0.00191588 0.646754 -0.00210291 -0.000199049 0.0573113 -4.05731e-005 0.0134177 -0.00468944 0.0985945 -0.282695 -0.0469067 0.00874203 0.0263262 -0.00295056 0.0984851 -0.282098 -0.044293 0.00475795 0.127371 -0.42895 0.882402 -0.0980573 0.428506 0.88193' />
<key qpos='0.000471586 0.0317577 0.210587 0.758805 -0.583984 0.254155 0.136322 -0.0811633 0.0870309 -0.0935227 0.0904958 -0.0278004 -0.00978614 -0.359193 0.139761 -0.240168 0.060149 0.237062 -0.00622109 -0.252598 -0.00376874 -0.160597 0.25253 -0.278634 0.834376 -0.990444 -0.169065 0.652876' />
<key qpos='-0.0602175 0.048078 0.194579 -0.377418 -0.119412 -0.675073 -0.622553 0.139093 0.0710746 -0.0506027 0.0863461 0.196165 -0.0276685 -0.521954 -0.267784 0.179051 0.0371897 0.0560134 -0.032595 -0.0480022 0.0357436 0.108502 0.963806 0.157805 0.873092 -1.01145 -0.796409 0.24736' />
</keyframe>
<actuator>
<motor name='abdomen_y' gear='200' joint='abdomen_y' />
<motor name='abdomen_z' gear='200' joint='abdomen_z' />
<motor name='abdomen_x' gear='200' joint='abdomen_x' />
<motor name='right_hip_x' gear='200' joint='right_hip_x' />
<motor name='right_hip_z' gear='200' joint='right_hip_z' />
<motor name='right_hip_y' gear='600' joint='right_hip_y' />
<motor name='right_knee' gear='400' joint='right_knee' />
<motor name='right_ankle_x' gear='100' joint='right_ankle_x' />
<motor name='right_ankle_y' gear='100' joint='right_ankle_y' />
<motor name='left_hip_x' gear='200' joint='left_hip_x' />
<motor name='left_hip_z' gear='200' joint='left_hip_z' />
<motor name='left_hip_y' gear='600' joint='left_hip_y' />
<motor name='left_knee' gear='400' joint='left_knee' />
<motor name='left_ankle_x' gear='100' joint='left_ankle_x' />
<motor name='left_ankle_y' gear='100' joint='left_ankle_y' />
<motor name='right_shoulder1' gear='100' joint='right_shoulder1' />
<motor name='right_shoulder2' gear='100' joint='right_shoulder2' />
<motor name='right_elbow' gear='200' joint='right_elbow' />
<motor name='left_shoulder1' gear='100' joint='left_shoulder1' />
<motor name='left_shoulder2' gear='100' joint='left_shoulder2' />
<motor name='left_elbow' gear='200' joint='left_elbow' />
</actuator>
</mujoco>
| 11,517 | XML | 68.385542 | 314 | 0.528784 |
vstrozzi/FRL-SHAC-Extension/dflex/tests/assets/ant.xml | <mujoco model="ant">
<compiler angle="degree" coordinate="local" inertiafromgeom="true"/>
<option integrator="RK4" timestep="0.01"/>
<custom>
<numeric data="0.0 0.0 0.55 1.0 0.0 0.0 0.0 0.0 1.0 0.0 -1.0 0.0 -1.0 0.0 1.0" name="init_qpos"/>
</custom>
<default>
<joint armature="0.001" damping="1" limited="true"/>
<geom conaffinity="0" condim="3" density="5.0" friction="1.5 0.1 0.1" margin="0.01" rgba="0.97 0.38 0.06 1"/>
</default>
<worldbody>
<body name="torso" pos="0 0 0.75">
<geom name="torso_geom" pos="0 0 0" size="0.25" type="sphere"/>
<geom fromto="0.0 0.0 0.0 0.2 0.2 0.0" name="aux_1_geom" size="0.08" type="capsule" rgba=".999 .2 .1 1"/>
<geom fromto="0.0 0.0 0.0 -0.2 0.2 0.0" name="aux_2_geom" size="0.08" type="capsule"/>
<geom fromto="0.0 0.0 0.0 -0.2 -0.2 0.0" name="aux_3_geom" size="0.08" type="capsule"/>
<geom fromto="0.0 0.0 0.0 0.2 -0.2 0.0" name="aux_4_geom" size="0.08" type="capsule" rgba=".999 .2 .02 1"/>
<joint armature="0" damping="0" limited="false" margin="0.01" name="root" pos="0 0 0" type="free"/>
<body name="front_left_leg" pos="0.2 0.2 0">
<joint axis="0 0 1" name="hip_1" pos="0.0 0.0 0.0" range="-40 40" type="hinge"/>
<geom fromto="0.0 0.0 0.0 0.2 0.2 0.0" name="left_leg_geom" size="0.08" type="capsule" rgba=".999 .2 .1 1"/>
<body pos="0.2 0.2 0" name="front_left_foot">
<joint axis="-1 1 0" name="ankle_1" pos="0.0 0.0 0.0" range="30 100" type="hinge"/>
<geom fromto="0.0 0.0 0.0 0.4 0.4 0.0" name="left_ankle_geom" size="0.08" type="capsule" rgba=".999 .2 .1 1"/>
</body>
</body>
<body name="front_right_leg" pos="-0.2 0.2 0">
<joint axis="0 0 1" name="hip_2" pos="0.0 0.0 0.0" range="-40 40" type="hinge"/>
<geom fromto="0.0 0.0 0.0 -0.2 0.2 0.0" name="right_leg_geom" size="0.08" type="capsule"/>
<body pos="-0.2 0.2 0" name="front_right_foot">
<joint axis="1 1 0" name="ankle_2" pos="0.0 0.0 0.0" range="-100 -30" type="hinge"/>
<geom fromto="0.0 0.0 0.0 -0.4 0.4 0.0" name="right_ankle_geom" size="0.08" type="capsule"/>
</body>
</body>
<body name="left_back_leg" pos="-0.2 -0.2 0">
<joint axis="0 0 1" name="hip_3" pos="0.0 0.0 0.0" range="-40 40" type="hinge"/>
<geom fromto="0.0 0.0 0.0 -0.2 -0.2 0.0" name="back_leg_geom" size="0.08" type="capsule"/>
<body pos="-0.2 -0.2 0" name="left_back_foot">
<joint axis="-1 1 0" name="ankle_3" pos="0.0 0.0 0.0" range="-100 -30" type="hinge"/>
<geom fromto="0.0 0.0 0.0 -0.4 -0.4 0.0" name="third_ankle_geom" size="0.08" type="capsule"/>
</body>
</body>
<body name="right_back_leg" pos="0.2 -0.2 0">
<joint axis="0 0 1" name="hip_4" pos="0.0 0.0 0.0" range="-40 40" type="hinge"/>
<geom fromto="0.0 0.0 0.0 0.2 -0.2 0.0" name="rightback_leg_geom" size="0.08" type="capsule" rgba=".999 .2 .1 1"/>
<body pos="0.2 -0.2 0" name="right_back_foot">
<joint axis="1 1 0" name="ankle_4" pos="0.0 0.0 0.0" range="30 100" type="hinge"/>
<geom fromto="0.0 0.0 0.0 0.4 -0.4 0.0" name="fourth_ankle_geom" size="0.08" type="capsule" rgba=".999 .2 .1 1"/>
</body>
</body>
</body>
</worldbody>
<actuator>
<motor ctrllimited="true" ctrlrange="-1.0 1.0" joint="hip_4" gear="150"/>
<motor ctrllimited="true" ctrlrange="-1.0 1.0" joint="ankle_4" gear="150"/>
<motor ctrllimited="true" ctrlrange="-1.0 1.0" joint="hip_1" gear="150"/>
<motor ctrllimited="true" ctrlrange="-1.0 1.0" joint="ankle_1" gear="150"/>
<motor ctrllimited="true" ctrlrange="-1.0 1.0" joint="hip_2" gear="150"/>
<motor ctrllimited="true" ctrlrange="-1.0 1.0" joint="ankle_2" gear="150"/>
<motor ctrllimited="true" ctrlrange="-1.0 1.0" joint="hip_3" gear="150"/>
<motor ctrllimited="true" ctrlrange="-1.0 1.0" joint="ankle_3" gear="150"/>
</actuator>
</mujoco> | 4,043 | XML | 61.215384 | 125 | 0.550829 |
vstrozzi/FRL-SHAC-Extension/dflex/docs/index.rst | Welcome to dFlex's documentation!
==================================
dFlex is a differentiable multiphysics engine for PyTorch. It is written entirely in Python and supports reverse mode differentiation w.r.t. to any simulation inputs.
It includes a USD-based visualization module (:class:`dflex.render`), which can generate time-sampled USD files, or update an existing stage on-the-fly.
Prerequisites
-------------
* Python 3.6
* PyTorch 1.4.0 or higher
* Pixar USD lib (for visualization)
Pre-built USD Python libraries can be downloaded from https://developer.nvidia.com/usd, once they are downloaded you should follow the instructions to add them to your PYTHONPATH environment variable.
.. toctree::
:maxdepth: 3
:caption: Contents:
modules/model
modules/sim
modules/render
Quick Start
-----------------
First ensure that the package is installed in your local Python environment (use the -e option if you will be doing development):
.. code-block::
pip install -e dflex
Then, to use the engine you can import the simulation module as follows:
.. code-block::
import dflex
To build physical models there is a helper class available in :class:`dflex.model.ModelBuilder`. This can be used to create models programmatically from Python. For example, to create a chain of particles:
.. code-block::
builder = dflex.model.ModelBuilder()
# anchor point (zero mass)
builder.add_particle((0, 1.0, 0.0), (0.0, 0.0, 0.0), 0.0)
# build chain
for i in range(1,10):
builder.add_particle((i, 1.0, 0.0), (0.0, 0.0, 0.0), 1.0)
builder.add_spring(i-1, i, 1.e+3, 0.0, 0)
# add ground plane
builder.add_shape_plane((0.0, 1.0, 0.0, 0.0), 0)
Once you have built your model you must convert it to a finalized PyTorch simulation data structure using :func:`dflex.model.ModelBuilder.finalize()`:
.. code-block::
model = builder.finalize('cpu')
The model object represents static (non-time varying) data such as constraints, collision shapes, etc. The model is stored in PyTorch tensors, allowing differentiation with respect to both model and state.
Time Stepping
-------------
To advance the simulation forward in time (forward dynamics), we use an `integrator` object. dFlex currently offers semi-implicit and fully implicit (planned), via. the :class:`dflex.sim.SemiImplicitIntegrator` class as follows:
.. code-block::
sim_dt = 1.0/60.0
sim_steps = 100
integrator = dflex.sim.SemiImplicitIntegrator()
for i in range(0, sim_steps):
state = integrator.forward(model, state, sim_dt)
Rendering
---------
To visualize the scene dFlex supports a USD-based update via. the :class:`dflex.render.UsdRenderer` class. To create a renderer you must first create the USD stage, and the physical model.
.. code-block::
import dflex.render
stage = Usd.Stage.CreateNew("test.usda")
renderer = dflex.render.UsdRenderer(model, stage)
renderer.draw_points = True
renderer.draw_springs = True
renderer.draw_shapes = True
Each frame the renderer should be updated with the current model state and the current elapsed simulation time:
.. code-block::
renderer.update(state, sim_time)
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
| 3,311 | reStructuredText | 27.8 | 228 | 0.700393 |
vstrozzi/FRL-SHAC-Extension/dflex/docs/conf.py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../dflex'))
# -- Project information -----------------------------------------------------
project = 'dFlex'
copyright = '2020, NVIDIA'
author = 'NVIDIA'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.intersphinx',
# 'sphinx.ext.autosummary',
'sphinx.ext.todo',
'autodocsumm'
]
# put type hints inside the description instead of the signature (easier to read)
autodoc_typehints = 'description'
# document class *and* __init__ methods
autoclass_content = 'both' #
todo_include_todos = True
intersphinx_mapping = {
'python': ("https://docs.python.org/3", None),
'numpy': ('http://docs.scipy.org/doc/numpy/', None),
'PyTorch': ('http://pytorch.org/docs/master/', None),
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
import sphinx_rtd_theme
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_theme = "sphinx_rtd_theme"
# html_theme = 'alabaster'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
| 2,515 | Python | 32.105263 | 81 | 0.659245 |
vstrozzi/FRL-SHAC-Extension/dflex/docs/modules/sim.rst | dflex.sim
===========
.. currentmodule:: dflex.sim
.. toctree::
:maxdepth: 2
.. automodule:: dflex.sim
:members:
:undoc-members:
:show-inheritance:
| 171 | reStructuredText | 12.230768 | 28 | 0.567251 |
vstrozzi/FRL-SHAC-Extension/dflex/docs/modules/model.rst | dflex.model
===========
.. currentmodule:: dflex.model
.. toctree::
:maxdepth: 2
model.modelbuilder
model.model
model.state
| 151 | reStructuredText | 10.692307 | 30 | 0.569536 |
vstrozzi/FRL-SHAC-Extension/dflex/docs/modules/model.model.rst | dflex.model.Model
========================
.. autoclasssumm:: dflex.model.Model
.. autoclass:: dflex.model.Model
:members:
:undoc-members:
:show-inheritance:
| 173 | reStructuredText | 14.81818 | 36 | 0.583815 |
vstrozzi/FRL-SHAC-Extension/dflex/docs/modules/render.rst | dflex.render
============
.. currentmodule:: dflex.render
.. toctree::
:maxdepth: 2
.. automodule:: dflex.render
:members:
:undoc-members:
:show-inheritance:
| 178 | reStructuredText | 11.785713 | 31 | 0.595506 |
vstrozzi/FRL-SHAC-Extension/dflex/docs/modules/model.state.rst | dflex.model.State
========================
.. autoclasssumm:: dflex.model.State
.. autoclass:: dflex.model.State
:members:
:undoc-members:
:show-inheritance:
| 173 | reStructuredText | 14.81818 | 36 | 0.583815 |
vstrozzi/FRL-SHAC-Extension/dflex/docs/modules/model.modelbuilder.rst | dflex.model.ModelBuilder
========================
.. autoclasssumm:: dflex.model.ModelBuilder
.. autoclass:: dflex.model.ModelBuilder
:members:
:undoc-members:
:show-inheritance:
| 194 | reStructuredText | 16.727271 | 43 | 0.628866 |
vstrozzi/FRL-SHAC-Extension/utils/common.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import sys
# if there's overlap between args_list and commandline input, use commandline input
def solve_argv_conflict(args_list):
arguments_to_be_removed = []
arguments_size = []
for argv in sys.argv[1:]:
if argv.startswith('-'):
size_count = 1
for i, args in enumerate(args_list):
if args == argv:
arguments_to_be_removed.append(args)
for more_args in args_list[i+1:]:
if not more_args.startswith('-'):
size_count += 1
else:
break
arguments_size.append(size_count)
break
for args, size in zip(arguments_to_be_removed, arguments_size):
args_index = args_list.index(args)
for _ in range(size):
args_list.pop(args_index)
def print_error(*message):
print('\033[91m', 'ERROR ', *message, '\033[0m')
raise RuntimeError
def print_ok(*message):
print('\033[92m', *message, '\033[0m')
def print_warning(*message):
print('\033[93m', *message, '\033[0m')
def print_info(*message):
print('\033[96m', *message, '\033[0m')
from datetime import datetime
def get_time_stamp():
now = datetime.now()
year = now.strftime('%Y')
month = now.strftime('%m')
day = now.strftime('%d')
hour = now.strftime('%H')
minute = now.strftime('%M')
second = now.strftime('%S')
return '{}-{}-{}-{}-{}-{}'.format(month, day, year, hour, minute, second)
import argparse
def parse_model_args(model_args_path):
fp = open(model_args_path, 'r')
model_args = eval(fp.read())
model_args = argparse.Namespace(**model_args)
return model_args
import torch
import numpy as np
import random
import os
def seeding(seed=0, torch_deterministic=False):
print("Setting seed: {}".format(seed))
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
if torch_deterministic:
# refer to https://docs.nvidia.com/cuda/cublas/index.html#cublasApi_reproducibility
os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8'
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
torch.use_deterministic_algorithms(True)
else:
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.deterministic = False
return seed | 2,965 | Python | 31.23913 | 91 | 0.629005 |
vstrozzi/FRL-SHAC-Extension/utils/torch_utils.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import timeit
import math
import numpy as np
import gc
import torch
import cProfile
log_output = ""
def log(s):
print(s)
global log_output
log_output = log_output + s + "\n"
# short hands
# torch quat/vector utils
def to_torch(x, dtype=torch.float, device='cuda:0', requires_grad=False):
return torch.tensor(x, dtype=dtype, device=device, requires_grad=requires_grad)
@torch.jit.script
def quat_mul(a, b):
assert a.shape == b.shape
shape = a.shape
a = a.reshape(-1, 4)
b = b.reshape(-1, 4)
x1, y1, z1, w1 = a[:, 0], a[:, 1], a[:, 2], a[:, 3]
x2, y2, z2, w2 = b[:, 0], b[:, 1], b[:, 2], b[:, 3]
ww = (z1 + x1) * (x2 + y2)
yy = (w1 - y1) * (w2 + z2)
zz = (w1 + y1) * (w2 - z2)
xx = ww + yy + zz
qq = 0.5 * (xx + (z1 - x1) * (x2 - y2))
w = qq - ww + (z1 - y1) * (y2 - z2)
x = qq - xx + (x1 + w1) * (x2 + w2)
y = qq - yy + (w1 - x1) * (y2 + z2)
z = qq - zz + (z1 + y1) * (w2 - x2)
quat = torch.stack([x, y, z, w], dim=-1).view(shape)
return quat
@torch.jit.script
def normalize(x, eps: float = 1e-9):
return x / x.norm(p=2, dim=-1).clamp(min=eps, max=None).unsqueeze(-1)
@torch.jit.script
def quat_apply(a, b):
shape = b.shape
a = a.reshape(-1, 4)
b = b.reshape(-1, 3)
xyz = a[:, :3]
t = xyz.cross(b, dim=-1) * 2
return (b + a[:, 3:] * t + xyz.cross(t, dim=-1)).view(shape)
@torch.jit.script
def quat_rotate(q, v):
shape = q.shape
q_w = q[:, -1]
q_vec = q[:, :3]
a = v * (2.0 * q_w ** 2 - 1.0).unsqueeze(-1)
b = torch.cross(q_vec, v, dim=-1) * q_w.unsqueeze(-1) * 2.0
c = q_vec * \
torch.bmm(q_vec.view(shape[0], 1, 3), v.view(
shape[0], 3, 1)).squeeze(-1) * 2.0
return a + b + c
@torch.jit.script
def quat_rotate_inverse(q, v):
shape = q.shape
q_w = q[:, -1]
q_vec = q[:, :3]
a = v * (2.0 * q_w ** 2 - 1.0).unsqueeze(-1)
b = torch.cross(q_vec, v, dim=-1) * q_w.unsqueeze(-1) * 2.0
c = q_vec * \
torch.bmm(q_vec.view(shape[0], 1, 3), v.view(
shape[0], 3, 1)).squeeze(-1) * 2.0
return a - b + c
@torch.jit.script
def quat_axis(q, axis=0):
# type: (Tensor, int) -> Tensor
basis_vec = torch.zeros(q.shape[0], 3, device=q.device)
basis_vec[:, axis] = 1
return quat_rotate(q, basis_vec)
@torch.jit.script
def quat_conjugate(a):
shape = a.shape
a = a.reshape(-1, 4)
return torch.cat((-a[:, :3], a[:, -1:]), dim=-1).view(shape)
@torch.jit.script
def quat_unit(a):
return normalize(a)
@torch.jit.script
def quat_from_angle_axis(angle, axis):
theta = (angle / 2).unsqueeze(-1)
xyz = normalize(axis) * theta.sin()
w = theta.cos()
return quat_unit(torch.cat([xyz, w], dim=-1))
@torch.jit.script
def normalize_angle(x):
return torch.atan2(torch.sin(x), torch.cos(x))
@torch.jit.script
def tf_inverse(q, t):
q_inv = quat_conjugate(q)
return q_inv, -quat_apply(q_inv, t)
@torch.jit.script
def tf_apply(q, t, v):
return quat_apply(q, v) + t
@torch.jit.script
def tf_vector(q, v):
return quat_apply(q, v)
@torch.jit.script
def tf_combine(q1, t1, q2, t2):
return quat_mul(q1, q2), quat_apply(q1, t2) + t1
@torch.jit.script
def get_basis_vector(q, v):
return quat_rotate(q, v)
def mem_report():
'''Report the memory usage of the tensor.storage in pytorch
Both on CPUs and GPUs are reported'''
def _mem_report(tensors, mem_type):
'''Print the selected tensors of type
There are two major storage types in our major concern:
- GPU: tensors transferred to CUDA devices
- CPU: tensors remaining on the system memory (usually unimportant)
Args:
- tensors: the tensors of specified type
- mem_type: 'CPU' or 'GPU' in current implementation '''
total_numel = 0
total_mem = 0
visited_data = []
for tensor in tensors:
if tensor.is_sparse:
continue
# a data_ptr indicates a memory block allocated
data_ptr = tensor.storage().data_ptr()
if data_ptr in visited_data:
continue
visited_data.append(data_ptr)
numel = tensor.storage().size()
total_numel += numel
element_size = tensor.storage().element_size()
mem = numel*element_size /1024/1024 # 32bit=4Byte, MByte
total_mem += mem
element_type = type(tensor).__name__
size = tuple(tensor.size())
# print('%s\t\t%s\t\t%.2f' % (
# element_type,
# size,
# mem) )
print('Type: %s Total Tensors: %d \tUsed Memory Space: %.2f MBytes' % (mem_type, total_numel, total_mem) )
gc.collect()
LEN = 65
objects = gc.get_objects()
#print('%s\t%s\t\t\t%s' %('Element type', 'Size', 'Used MEM(MBytes)') )
tensors = [obj for obj in objects if torch.is_tensor(obj)]
cuda_tensors = [t for t in tensors if t.is_cuda]
host_tensors = [t for t in tensors if not t.is_cuda]
_mem_report(cuda_tensors, 'GPU')
_mem_report(host_tensors, 'CPU')
print('='*LEN)
def grad_norm(params):
grad_norm = 0.
for p in params:
if p.grad is not None:
grad_norm += torch.sum(p.grad ** 2)
return torch.sqrt(grad_norm)
def print_leaf_nodes(grad_fn, id_set):
if grad_fn is None:
return
if hasattr(grad_fn, 'variable'):
mem_id = id(grad_fn.variable)
if not(mem_id in id_set):
print('is leaf:', grad_fn.variable.is_leaf)
print(grad_fn.variable)
id_set.add(mem_id)
# print(grad_fn)
for i in range(len(grad_fn.next_functions)):
print_leaf_nodes(grad_fn.next_functions[i][0], id_set)
def policy_kl(p0_mu, p0_sigma, p1_mu, p1_sigma):
c1 = torch.log(p1_sigma/p0_sigma + 1e-5)
c2 = (p0_sigma**2 + (p1_mu - p0_mu)**2)/(2.0 * (p1_sigma**2 + 1e-5))
c3 = -1.0 / 2.0
kl = c1 + c2 + c3
kl = kl.sum(dim=-1) # returning mean between all steps of sum between all actions
return kl.mean() | 6,536 | Python | 27.176724 | 114 | 0.568696 |
vstrozzi/FRL-SHAC-Extension/utils/average_meter.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import torch
import torch.nn as nn
import numpy as np
class AverageMeter(nn.Module):
def __init__(self, in_shape, max_size):
super(AverageMeter, self).__init__()
self.max_size = max_size
self.current_size = 0
self.register_buffer("mean", torch.zeros(in_shape, dtype = torch.float32))
def update(self, values):
size = values.size()[0]
if size == 0:
return
new_mean = torch.mean(values.float(), dim=0)
size = np.clip(size, 0, self.max_size)
old_size = min(self.max_size - size, self.current_size)
size_sum = old_size + size
self.current_size = size_sum
self.mean = (self.mean * old_size + new_mean * size) / size_sum
def clear(self):
self.current_size = 0
self.mean.fill_(0)
def __len__(self):
return self.current_size
def get_mean(self):
return self.mean.squeeze(0).cpu().numpy() | 1,368 | Python | 34.102563 | 82 | 0.65424 |
vstrozzi/FRL-SHAC-Extension/utils/load_utils.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import urdfpy
import math
import numpy as np
import os
import torch
import random
import xml.etree.ElementTree as ET
import dflex as df
def set_np_formatting():
np.set_printoptions(edgeitems=30, infstr='inf',
linewidth=4000, nanstr='nan', precision=2,
suppress=False, threshold=10000, formatter=None)
def set_seed(seed, torch_deterministic=False):
if seed == -1 and torch_deterministic:
seed = 42
elif seed == -1:
seed = np.random.randint(0, 10000)
print("Setting seed: {}".format(seed))
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
if torch_deterministic:
# refer to https://docs.nvidia.com/cuda/cublas/index.html#cublasApi_reproducibility
os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8'
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
torch.use_deterministic_algorithms(True)
else:
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.deterministic = False
return seed
def urdf_add_collision(builder, link, collisions, shape_ke, shape_kd, shape_kf, shape_mu):
# add geometry
for collision in collisions:
origin = urdfpy.matrix_to_xyz_rpy(collision.origin)
pos = origin[0:3]
rot = df.rpy2quat(*origin[3:6])
geo = collision.geometry
if (geo.box):
builder.add_shape_box(
link,
pos,
rot,
geo.box.size[0]*0.5,
geo.box.size[1]*0.5,
geo.box.size[2]*0.5,
ke=shape_ke,
kd=shape_kd,
kf=shape_kf,
mu=shape_mu)
if (geo.sphere):
builder.add_shape_sphere(
link,
pos,
rot,
geo.sphere.radius,
ke=shape_ke,
kd=shape_kd,
kf=shape_kf,
mu=shape_mu)
if (geo.cylinder):
# cylinders in URDF are aligned with z-axis, while dFlex uses x-axis
r = df.quat_from_axis_angle((0.0, 1.0, 0.0), math.pi*0.5)
builder.add_shape_capsule(
link,
pos,
df.quat_multiply(rot, r),
geo.cylinder.radius,
geo.cylinder.length*0.5,
ke=shape_ke,
kd=shape_kd,
kf=shape_kf,
mu=shape_mu)
if (geo.mesh):
for m in geo.mesh.meshes:
faces = []
vertices = []
for v in m.vertices:
vertices.append(np.array(v))
for f in m.faces:
faces.append(int(f[0]))
faces.append(int(f[1]))
faces.append(int(f[2]))
mesh = df.Mesh(vertices, faces)
builder.add_shape_mesh(
link,
pos,
rot,
mesh,
ke=shape_ke,
kd=shape_kd,
kf=shape_kf,
mu=shape_mu)
def urdf_load(
builder,
filename,
xform,
floating=False,
armature=0.0,
shape_ke=1.e+4,
shape_kd=1.e+4,
shape_kf=1.e+2,
shape_mu=0.25,
limit_ke=100.0,
limit_kd=1.0):
robot = urdfpy.URDF.load(filename)
# maps from link name -> link index
link_index = {}
builder.add_articulation()
# add base
if (floating):
root = builder.add_link(-1, df.transform_identity(), (0,0,0), df.JOINT_FREE)
# set dofs to transform
start = builder.joint_q_start[root]
builder.joint_q[start + 0] = xform[0][0]
builder.joint_q[start + 1] = xform[0][1]
builder.joint_q[start + 2] = xform[0][2]
builder.joint_q[start + 3] = xform[1][0]
builder.joint_q[start + 4] = xform[1][1]
builder.joint_q[start + 5] = xform[1][2]
builder.joint_q[start + 6] = xform[1][3]
else:
root = builder.add_link(-1, xform, (0,0,0), df.JOINT_FIXED)
urdf_add_collision(builder, root, robot.links[0].collisions, shape_ke, shape_kd, shape_kf, shape_mu)
link_index[robot.links[0].name] = root
# add children
for joint in robot.joints:
type = None
axis = (0.0, 0.0, 0.0)
if (joint.joint_type == "revolute" or joint.joint_type == "continuous"):
type = df.JOINT_REVOLUTE
axis = joint.axis
if (joint.joint_type == "prismatic"):
type = df.JOINT_PRISMATIC
axis = joint.axis
if (joint.joint_type == "fixed"):
type = df.JOINT_FIXED
if (joint.joint_type == "floating"):
type = df.JOINT_FREE
parent = -1
if joint.parent in link_index:
parent = link_index[joint.parent]
origin = urdfpy.matrix_to_xyz_rpy(joint.origin)
pos = origin[0:3]
rot = df.rpy2quat(*origin[3:6])
lower = -1.e+3
upper = 1.e+3
damping = 0.0
# limits
if (joint.limit):
if (joint.limit.lower != None):
lower = joint.limit.lower
if (joint.limit.upper != None):
upper = joint.limit.upper
# damping
if (joint.dynamics):
if (joint.dynamics.damping):
damping = joint.dynamics.damping
# add link
link = builder.add_link(
parent=parent,
X_pj=df.transform(pos, rot),
axis=axis,
type=type,
limit_lower=lower,
limit_upper=upper,
limit_ke=limit_ke,
limit_kd=limit_kd,
damping=damping)
# add collisions
urdf_add_collision(builder, link, robot.link_map[joint.child].collisions, shape_ke, shape_kd, shape_kf, shape_mu)
# add ourselves to the index
link_index[joint.child] = link
# build an articulated tree
def build_tree(
builder,
angle,
max_depth,
width=0.05,
length=0.25,
density=1000.0,
joint_stiffness=0.0,
joint_damping=0.0,
shape_ke = 1.e+4,
shape_kd = 1.e+3,
shape_kf = 1.e+2,
shape_mu = 0.5,
floating=False):
def build_recursive(parent, depth):
if (depth >= max_depth):
return
X_pj = df.transform((length * 2.0, 0.0, 0.0), df.quat_from_axis_angle((0.0, 0.0, 1.0), angle))
type = df.JOINT_REVOLUTE
axis = (0.0, 0.0, 1.0)
if (depth == 0 and floating == True):
X_pj = df.transform((0.0, 0.0, 0.0), df.quat_identity())
type = df.JOINT_FREE
link = builder.add_link(
parent,
X_pj,
axis,
type,
stiffness=joint_stiffness,
damping=joint_damping)
# capsule
shape = builder.add_shape_capsule(
link,
pos=(length, 0.0, 0.0),
radius=width,
half_width=length,
ke=shape_ke,
kd=shape_kd,
kf=shape_kf,
mu=shape_mu)
# recurse
#build_tree_recursive(builder, link, angle, width, depth + 1, max_depth, shape_ke, shape_kd, shape_kf, shape_mu, floating)
build_recursive(link, depth + 1)
#
build_recursive(-1, 0)
# Mujoco file format parser
def parse_mjcf(
filename,
builder,
density=1000.0,
stiffness=0.0,
damping=1.0,
contact_ke=1e4,
contact_kd=1e4,
contact_kf=1e3,
contact_mu=0.5,
limit_ke=100.0,
limit_kd=10.0,
armature=0.01,
radians=False,
load_stiffness=False,
load_armature=False):
file = ET.parse(filename)
root = file.getroot()
type_map = {
"ball": df.JOINT_BALL,
"hinge": df.JOINT_REVOLUTE,
"slide": df.JOINT_PRISMATIC,
"free": df.JOINT_FREE,
"fixed": df.JOINT_FIXED
}
def parse_float(node, key, default):
if key in node.attrib:
return float(node.attrib[key])
else:
return default
def parse_bool(node, key, default):
if key in node.attrib:
if node.attrib[key] == "true":
return True
else:
return False
else:
return default
def parse_vec(node, key, default):
if key in node.attrib:
return np.fromstring(node.attrib[key], sep=" ")
else:
return np.array(default)
def parse_body(body, parent, last_joint_pos):
body_name = body.attrib["name"]
body_pos = np.fromstring(body.attrib["pos"], sep=" ")
# last_joint_pos = np.zeros(3)
#-----------------
# add body for each joint, we assume the joints attached to one body have the same joint_pos
for i, joint in enumerate(body.findall("joint")):
joint_name = joint.attrib["name"]
joint_type = type_map[joint.attrib.get("type", 'hinge')]
joint_axis = parse_vec(joint, "axis", (0.0, 0.0, 0.0))
joint_pos = parse_vec(joint, "pos", (0.0, 0.0, 0.0))
joint_limited = parse_bool(joint, "limited", True)
if joint_limited:
if radians:
joint_range = parse_vec(joint, "range", (np.deg2rad(-170.), np.deg2rad(170.)))
else:
joint_range = np.deg2rad(parse_vec(joint, "range", (-170.0, 170.0)))
else:
joint_range = np.array([-1.e+6, 1.e+6])
if load_stiffness:
joint_stiffness = parse_float(joint, 'stiffness', stiffness)
else:
joint_stiffness = stiffness
joint_damping = parse_float(joint, 'damping', damping)
if load_armature:
joint_armature = parse_float(joint, "armature", armature)
else:
joint_armature = armature
joint_axis = df.normalize(joint_axis)
if (parent == -1):
body_pos = np.array((0.0, 0.0, 0.0))
#-----------------
# add body
link = builder.add_link(
parent,
X_pj=df.transform(body_pos + joint_pos - last_joint_pos, df.quat_identity()),
axis=joint_axis,
type=joint_type,
limit_lower=joint_range[0],
limit_upper=joint_range[1],
limit_ke=limit_ke,
limit_kd=limit_kd,
stiffness=joint_stiffness,
damping=joint_damping,
armature=joint_armature)
# assume that each joint is one body in simulation
parent = link
body_pos = [0.0, 0.0, 0.0]
last_joint_pos = joint_pos
#-----------------
# add shapes to the last joint in the body
for geom in body.findall("geom"):
geom_name = geom.attrib["name"]
geom_type = geom.attrib["type"]
geom_size = parse_vec(geom, "size", [1.0])
geom_pos = parse_vec(geom, "pos", (0.0, 0.0, 0.0))
geom_rot = parse_vec(geom, "quat", (0.0, 0.0, 0.0, 1.0))
if (geom_type == "sphere"):
builder.add_shape_sphere(
link,
pos=geom_pos - last_joint_pos, # position relative to the parent frame
rot=geom_rot,
radius=geom_size[0],
density=density,
ke=contact_ke,
kd=contact_kd,
kf=contact_kf,
mu=contact_mu)
elif (geom_type == "capsule"):
if ("fromto" in geom.attrib):
geom_fromto = parse_vec(geom, "fromto", (0.0, 0.0, 0.0, 1.0, 0.0, 0.0))
start = geom_fromto[0:3]
end = geom_fromto[3:6]
# compute rotation to align dflex capsule (along x-axis), with mjcf fromto direction
axis = df.normalize(end-start)
angle = math.acos(np.dot(axis, (1.0, 0.0, 0.0)))
axis = df.normalize(np.cross(axis, (1.0, 0.0, 0.0)))
geom_pos = (start + end)*0.5
geom_rot = df.quat_from_axis_angle(axis, -angle)
geom_radius = geom_size[0]
geom_width = np.linalg.norm(end-start)*0.5
else:
geom_radius = geom_size[0]
geom_width = geom_size[1]
geom_pos = parse_vec(geom, "pos", (0.0, 0.0, 0.0))
if ("axisangle" in geom.attrib):
axis_angle = parse_vec(geom, "axisangle", (0.0, 1.0, 0.0, 0.0))
geom_rot = df.quat_from_axis_angle(axis_angle[0:3], axis_angle[3])
if ("quat" in geom.attrib):
q = parse_vec(geom, "quat", df.quat_identity())
geom_rot = q
geom_rot = df.quat_multiply(geom_rot, df.quat_from_axis_angle((0.0, 1.0, 0.0), -math.pi*0.5))
builder.add_shape_capsule(
link,
pos=geom_pos - last_joint_pos,
rot=geom_rot,
radius=geom_radius,
half_width=geom_width,
density=density,
ke=contact_ke,
kd=contact_kd,
kf=contact_kf,
mu=contact_mu)
else:
print("Type: " + geom_type + " unsupported")
#-----------------
# recurse
for child in body.findall("body"):
parse_body(child, link, last_joint_pos)
#-----------------
# start articulation
builder.add_articulation()
world = root.find("worldbody")
for body in world.findall("body"):
parse_body(body, -1, np.zeros(3))
# SNU file format parser
class MuscleUnit:
def __init__(self):
self.name = ""
self.bones = []
self.points = []
self.muscle_strength = 0.0
class Skeleton:
def __init__(self, skeleton_file, muscle_file, builder,
filter={},
visualize_shapes=True,
stiffness=5.0,
damping=2.0,
contact_ke=5000.0,
contact_kd=2000.0,
contact_kf=1000.0,
contact_mu=0.5,
limit_ke=1000.0,
limit_kd=10.0,
armature = 0.05):
self.armature = armature
self.stiffness = stiffness
self.damping = damping
self.contact_ke = contact_ke
self.contact_kd = contact_kd
self.contact_kf = contact_kf
self.limit_ke = limit_ke
self.limit_kd = limit_kd
self.contact_mu = contact_mu
self.visualize_shapes = visualize_shapes
self.parse_skeleton(skeleton_file, builder, filter)
if muscle_file != None:
self.parse_muscles(muscle_file, builder)
def parse_skeleton(self, filename, builder, filter):
file = ET.parse(filename)
root = file.getroot()
self.node_map = {} # map node names to link indices
self.xform_map = {} # map node names to parent transforms
self.mesh_map = {} # map mesh names to link indices objects
self.coord_start = len(builder.joint_q)
self.dof_start = len(builder.joint_qd)
type_map = {
"Ball": df.JOINT_BALL,
"Revolute": df.JOINT_REVOLUTE,
"Prismatic": df.JOINT_PRISMATIC,
"Free": df.JOINT_FREE,
"Fixed": df.JOINT_FIXED
}
builder.add_articulation()
for child in root:
if (child.tag == "Node"):
body = child.find("Body")
joint = child.find("Joint")
name = child.attrib["name"]
parent = child.attrib["parent"]
parent_X_s = df.transform_identity()
if parent in self.node_map:
parent_link = self.node_map[parent]
parent_X_s = self.xform_map[parent]
else:
parent_link = -1
body_xform = body.find("Transformation")
joint_xform = joint.find("Transformation")
body_mesh = body.attrib["obj"]
body_size = np.fromstring(body.attrib["size"], sep=" ")
body_type = body.attrib["type"]
body_mass = float(body.attrib["mass"])
x=body_size[0]
y=body_size[1]
z=body_size[2]
density = body_mass / (x*y*z)
max_body_mass = 15.0
mass_scale = body_mass / max_body_mass
body_R_s = np.fromstring(body_xform.attrib["linear"], sep=" ").reshape((3,3))
body_t_s = np.fromstring(body_xform.attrib["translation"], sep=" ")
joint_R_s = np.fromstring(joint_xform.attrib["linear"], sep=" ").reshape((3,3))
joint_t_s = np.fromstring(joint_xform.attrib["translation"], sep=" ")
joint_type = type_map[joint.attrib["type"]]
joint_lower = -1.e+3
joint_upper = 1.e+3
if (joint_type == type_map["Revolute"]):
if ("lower" in joint.attrib):
joint_lower = np.fromstring(joint.attrib["lower"], sep=" ")[0]
if ("upper" in joint.attrib):
joint_upper = np.fromstring(joint.attrib["upper"], sep=" ")[0]
# print(joint_type, joint_lower, joint_upper)
if ("axis" in joint.attrib):
joint_axis = np.fromstring(joint.attrib["axis"], sep=" ")
else:
joint_axis = np.array((0.0, 0.0, 0.0))
body_X_s = df.transform(body_t_s, df.quat_from_matrix(body_R_s))
joint_X_s = df.transform(joint_t_s, df.quat_from_matrix(joint_R_s))
mesh_base = os.path.splitext(body_mesh)[0]
mesh_file = mesh_base + ".usd"
link = -1
if len(filter) == 0 or name in filter:
joint_X_p = df.transform_multiply(df.transform_inverse(parent_X_s), joint_X_s)
body_X_c = df.transform_multiply(df.transform_inverse(joint_X_s), body_X_s)
if (parent_link == -1):
joint_X_p = df.transform_identity()
# add link
link = builder.add_link(
parent=parent_link,
X_pj=joint_X_p,
axis=joint_axis,
type=joint_type,
limit_lower=joint_lower,
limit_upper=joint_upper,
limit_ke=self.limit_ke * mass_scale,
limit_kd=self.limit_kd * mass_scale,
damping=self.damping,
stiffness=self.stiffness * math.sqrt(mass_scale),
armature=self.armature)
# armature=self.armature * math.sqrt(mass_scale))
# add shape
shape = builder.add_shape_box(
body=link,
pos=body_X_c[0],
rot=body_X_c[1],
hx=x*0.5,
hy=y*0.5,
hz=z*0.5,
density=density,
ke=self.contact_ke,
kd=self.contact_kd,
kf=self.contact_kf,
mu=self.contact_mu)
# add lookup in name->link map
# save parent transform
self.xform_map[name] = joint_X_s
self.node_map[name] = link
self.mesh_map[mesh_base] = link
def parse_muscles(self, filename, builder):
# list of MuscleUnits
muscles = []
file = ET.parse(filename)
root = file.getroot()
self.muscle_start = len(builder.muscle_activation)
for child in root:
if (child.tag == "Unit"):
unit_name = child.attrib["name"]
unit_f0 = float(child.attrib["f0"])
unit_lm = float(child.attrib["lm"])
unit_lt = float(child.attrib["lt"])
unit_lmax = float(child.attrib["lmax"])
unit_pen = float(child.attrib["pen_angle"])
m = MuscleUnit()
m.name = unit_name
m.muscle_strength = unit_f0
incomplete = False
for waypoint in child.iter("Waypoint"):
way_bone = waypoint.attrib["body"]
way_link = self.node_map[way_bone]
way_loc = np.fromstring(waypoint.attrib["p"], sep=" ", dtype=np.float32)
if (way_link == -1):
incomplete = True
break
# transform loc to joint local space
joint_X_s = self.xform_map[way_bone]
way_loc = df.transform_point(df.transform_inverse(joint_X_s), way_loc)
m.bones.append(way_link)
m.points.append(way_loc)
if not incomplete:
muscles.append(m)
builder.add_muscle(m.bones, m.points, f0=unit_f0, lm=unit_lm, lt=unit_lt, lmax=unit_lmax, pen=unit_pen)
self.muscles = muscles
| 22,759 | Python | 30.523546 | 130 | 0.482622 |
vstrozzi/FRL-SHAC-Extension/utils/dataset.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import numpy as np
class CriticDataset:
def __init__(self, batch_size, obs, target_values, shuffle = False, drop_last = False):
self.obs = obs.view(-1, obs.shape[-1])
self.target_values = target_values.view(-1)
self.batch_size = batch_size
if shuffle:
self.shuffle()
if drop_last:
self.length = self.obs.shape[0] // self.batch_size
else:
self.length = ((self.obs.shape[0] - 1) // self.batch_size) + 1
def shuffle(self):
index = np.random.permutation(self.obs.shape[0])
self.obs = self.obs[index, :]
self.target_values = self.target_values[index]
def __len__(self):
return self.length
def __getitem__(self, index):
start_idx = index * self.batch_size
end_idx = min((index + 1) * self.batch_size, self.obs.shape[0])
return {'obs': self.obs[start_idx:end_idx, :], 'target_values': self.target_values[start_idx:end_idx]}
| 1,420 | Python | 37.405404 | 110 | 0.645775 |
vstrozzi/FRL-SHAC-Extension/utils/time_report.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import time
from utils.common import *
class Timer:
def __init__(self, name):
self.name = name
self.start_time = None
self.time_total = 0.
def on(self):
assert self.start_time is None, "Timer {} is already turned on!".format(self.name)
self.start_time = time.time()
def off(self):
assert self.start_time is not None, "Timer {} not started yet!".format(self.name)
self.time_total += time.time() - self.start_time
self.start_time = None
def report(self):
print_info('Time report [{}]: {:.2f} seconds'.format(self.name, self.time_total))
def clear(self):
self.start_time = None
self.time_total = 0.
class TimeReport:
def __init__(self):
self.timers = {}
def add_timer(self, name):
assert name not in self.timers, "Timer {} already exists!".format(name)
self.timers[name] = Timer(name = name)
def start_timer(self, name):
assert name in self.timers, "Timer {} does not exist!".format(name)
self.timers[name].on()
def end_timer(self, name):
assert name in self.timers, "Timer {} does not exist!".format(name)
self.timers[name].off()
def report(self, name = None):
if name is not None:
assert name in self.timers, "Timer {} does not exist!".format(name)
self.timers[name].report()
else:
print_info("------------Time Report------------")
for timer_name in self.timers.keys():
self.timers[timer_name].report()
print_info("-----------------------------------")
def clear_timer(self, name = None):
if name is not None:
assert name in self.timers, "Timer {} does not exist!".format(name)
self.timers[name].clear()
else:
for timer_name in self.timers.keys():
self.timers[timer_name].clear()
def pop_timer(self, name = None):
if name is not None:
assert name in self.timers, "Timer {} does not exist!".format(name)
self.timers[name].report()
del self.timers[name]
else:
self.report()
self.timers = {}
| 2,688 | Python | 34.853333 | 90 | 0.58631 |
vstrozzi/FRL-SHAC-Extension/utils/running_mean_std.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
from typing import Tuple
import torch
class RunningMeanStd(object):
def __init__(self, epsilon: float = 1e-4, shape: Tuple[int, ...] = (), device = 'cuda:0'):
"""
Calulates the running mean and std of a data stream
https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm
:param epsilon: helps with arithmetic issues
:param shape: the shape of the data stream's output
"""
self.mean = torch.zeros(shape, dtype = torch.float32, device = device)
self.var = torch.ones(shape, dtype = torch.float32, device = device)
self.count = epsilon
def to(self, device):
rms = RunningMeanStd(device = device)
rms.mean = self.mean.to(device).clone()
rms.var = self.var.to(device).clone()
rms.count = self.count
return rms
@torch.no_grad()
def update(self, arr: torch.tensor) -> None:
batch_mean = torch.mean(arr, dim = 0)
batch_var = torch.var(arr, dim = 0, unbiased = False)
batch_count = arr.shape[0]
self.update_from_moments(batch_mean, batch_var, batch_count)
def update_from_moments(self, batch_mean: torch.tensor, batch_var: torch.tensor, batch_count: int) -> None:
delta = batch_mean - self.mean
tot_count = self.count + batch_count
new_mean = self.mean + delta * batch_count / tot_count
m_a = self.var * self.count
m_b = batch_var * batch_count
m_2 = m_a + m_b + torch.square(delta) * self.count * batch_count / (self.count + batch_count)
new_var = m_2 / (self.count + batch_count)
new_count = batch_count + self.count
self.mean = new_mean
self.var = new_var
self.count = new_count
def normalize(self, arr:torch.tensor, un_norm = False) -> torch.tensor:
if not un_norm:
result = (arr - self.mean) / torch.sqrt(self.var + 1e-5)
else:
result = arr * torch.sqrt(self.var + 1e-5) + self.mean
return result | 2,462 | Python | 40.745762 | 111 | 0.638099 |
profK/Worldwizards-Export-Tools/README.md | # Extension Project Template
This project was automatically generated.
- `app` - It is a folder link to the location of your *Omniverse Kit* based app.
- `exts` - It is a folder where you can add new extensions. It was automatically added to extension search path. (Extension Manager -> Gear Icon -> Extension Search Path).
Open this folder using Visual Studio Code. It will suggest you to install few extensions that will make python experience better.
Look for "worldwizards.export.tools" extension in extension manager and enable it. Try applying changes to any python files, it will hot-reload and you can observe results immediately.
Alternatively, you can launch your app from console with this folder added to search path and your extension enabled, e.g.:
```
> app\omni.code.bat --ext-folder exts --enable company.hello.world
```
# App Link Setup
If `app` folder link doesn't exist or broken it can be created again. For better developer experience it is recommended to create a folder link named `app` to the *Omniverse Kit* app installed from *Omniverse Launcher*. Convenience script to use is included.
Run:
```
> link_app.bat
```
If successful you should see `app` folder link in the root of this repo.
If multiple Omniverse apps is installed script will select recommended one. Or you can explicitly pass an app:
```
> link_app.bat --app create
```
You can also just pass a path to create link to:
```
> link_app.bat --path "C:/Users/bob/AppData/Local/ov/pkg/create-2021.3.4"
```
# Sharing Your Extensions
This folder is ready to be pushed to any git repository. Once pushed direct link to a git repository can be added to *Omniverse Kit* extension search paths.
Link might look like this: `git://github.com/[user]/[your_repo].git?branch=main&dir=exts`
Notice `exts` is repo subfolder with extensions. More information can be found in "Git URL as Extension Search Paths" section of developers manual.
To add a link to your *Omniverse Kit* based app go into: Extension Manager -> Gear Icon -> Extension Search Path
| 2,049 | Markdown | 37.679245 | 258 | 0.757931 |
profK/Worldwizards-Export-Tools/tools/scripts/link_app.py | import argparse
import json
import os
import sys
import packmanapi
import urllib3
def find_omniverse_apps():
http = urllib3.PoolManager()
try:
r = http.request("GET", "http://127.0.0.1:33480/components")
except Exception as e:
print(f"Failed retrieving apps from an Omniverse Launcher, maybe it is not installed?\nError: {e}")
sys.exit(1)
apps = {}
for x in json.loads(r.data.decode("utf-8")):
latest = x.get("installedVersions", {}).get("latest", "")
if latest:
for s in x.get("settings", []):
if s.get("version", "") == latest:
root = s.get("launch", {}).get("root", "")
apps[x["slug"]] = (x["name"], root)
break
return apps
def create_link(src, dst):
print(f"Creating a link '{src}' -> '{dst}'")
packmanapi.link(src, dst)
APP_PRIORITIES = ["code", "create", "view"]
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Create folder link to Kit App installed from Omniverse Launcher")
parser.add_argument(
"--path",
help="Path to Kit App installed from Omniverse Launcher, e.g.: 'C:/Users/bob/AppData/Local/ov/pkg/create-2021.3.4'",
required=False,
)
parser.add_argument(
"--app", help="Name of Kit App installed from Omniverse Launcher, e.g.: 'code', 'create'", required=False
)
args = parser.parse_args()
path = args.path
if not path:
print("Path is not specified, looking for Omniverse Apps...")
apps = find_omniverse_apps()
if len(apps) == 0:
print(
"Can't find any Omniverse Apps. Use Omniverse Launcher to install one. 'Code' is the recommended app for developers."
)
sys.exit(0)
print("\nFound following Omniverse Apps:")
for i, slug in enumerate(apps):
name, root = apps[slug]
print(f"{i}: {name} ({slug}) at: '{root}'")
if args.app:
selected_app = args.app.lower()
if selected_app not in apps:
choices = ", ".join(apps.keys())
print(f"Passed app: '{selected_app}' is not found. Specify one of the following found Apps: {choices}")
sys.exit(0)
else:
selected_app = next((x for x in APP_PRIORITIES if x in apps), None)
if not selected_app:
selected_app = next(iter(apps))
print(f"\nSelected app: {selected_app}")
_, path = apps[selected_app]
if not os.path.exists(path):
print(f"Provided path doesn't exist: {path}")
else:
SCRIPT_ROOT = os.path.dirname(os.path.realpath(__file__))
create_link(f"{SCRIPT_ROOT}/../../app", path)
print("Success!")
| 2,814 | Python | 32.117647 | 133 | 0.562189 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.