file_path
stringlengths
20
207
content
stringlengths
5
3.85M
size
int64
5
3.85M
lang
stringclasses
9 values
avg_line_length
float64
1.33
100
max_line_length
int64
4
993
alphanum_fraction
float64
0.26
0.93
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/common/ivecenv.py
class IVecEnv: def step(self, actions): raise NotImplementedError def reset(self): raise NotImplementedError def has_action_masks(self): return False def get_number_of_agents(self): return 1 def get_env_info(self): pass def set_train_info(self, env_frames, *args, **kwargs): """ Send the information in the direction algo->environment. Most common use case: tell the environment how far along we are in the training process. This is useful for implementing curriculums and things such as that. """ pass def get_env_state(self): """ Return serializable environment state to be saved to checkpoint. Can be used for stateful training sessions, i.e. with adaptive curriculums. """ return None def set_env_state(self, env_state): pass
905
Python
25.647058
111
0.624309
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/common/vecenv.py
import ray from rl_games.common.ivecenv import IVecEnv from rl_games.common.env_configurations import configurations from rl_games.common.tr_helpers import dicts_to_dict_with_arrays import numpy as np import gym from time import sleep class RayWorker: def __init__(self, config_name, config): self.env = configurations[config_name]['env_creator'](**config) #self.obs = self.env.reset() def step(self, action): next_state, reward, is_done, info = self.env.step(action) if np.isscalar(is_done): episode_done = is_done else: episode_done = is_done.all() if episode_done: next_state = self.reset() if isinstance(next_state, dict): for k,v in next_state.items(): if isinstance(v, dict): for dk,dv in v.items(): if dv.dtype == np.float64: v[dk] = dv.astype(np.float32) else: if v.dtype == np.float64: next_state[k] = v.astype(np.float32) else: if next_state.dtype == np.float64: next_state = next_state.astype(np.float32) return next_state, reward, is_done, info def render(self): self.env.render() def reset(self): self.obs = self.env.reset() return self.obs def get_action_mask(self): return self.env.get_action_mask() def get_number_of_agents(self): if hasattr(self.env, 'get_number_of_agents'): return self.env.get_number_of_agents() else: return 1 def set_weights(self, weights): self.env.update_weights(weights) def can_concat_infos(self): if hasattr(self.env, 'concat_infos'): return self.env.concat_infos else: return False def get_env_info(self): info = {} observation_space = self.env.observation_space #if isinstance(observation_space, gym.spaces.dict.Dict): # observation_space = observation_space['observations'] info['action_space'] = self.env.action_space info['observation_space'] = observation_space info['state_space'] = None info['use_global_observations'] = False info['agents'] = self.get_number_of_agents() info['value_size'] = 1 if hasattr(self.env, 'use_central_value'): info['use_global_observations'] = self.env.use_central_value if hasattr(self.env, 'value_size'): info['value_size'] = self.env.value_size if hasattr(self.env, 'state_space'): info['state_space'] = self.env.state_space return info class RayVecEnv(IVecEnv): def __init__(self, config_name, num_actors, **kwargs): self.config_name = config_name self.num_actors = num_actors self.use_torch = False self.remote_worker = ray.remote(RayWorker) self.workers = [self.remote_worker.remote(self.config_name, kwargs) for i in range(self.num_actors)] res = self.workers[0].get_number_of_agents.remote() self.num_agents = ray.get(res) res = self.workers[0].get_env_info.remote() env_info = ray.get(res) res = self.workers[0].can_concat_infos.remote() can_concat_infos = ray.get(res) self.use_global_obs = env_info['use_global_observations'] self.concat_infos = can_concat_infos self.obs_type_dict = type(env_info.get('observation_space')) is gym.spaces.Dict self.state_type_dict = type(env_info.get('state_space')) is gym.spaces.Dict if self.num_agents == 1: self.concat_func = np.stack else: self.concat_func = np.concatenate def step(self, actions): newobs, newstates, newrewards, newdones, newinfos = [], [], [], [], [] res_obs = [] if self.num_agents == 1: for (action, worker) in zip(actions, self.workers): res_obs.append(worker.step.remote(action)) else: for num, worker in enumerate(self.workers): res_obs.append(worker.step.remote(actions[self.num_agents * num: self.num_agents * num + self.num_agents])) all_res = ray.get(res_obs) for res in all_res: cobs, crewards, cdones, cinfos = res if self.use_global_obs: newobs.append(cobs["obs"]) newstates.append(cobs["state"]) else: newobs.append(cobs) newrewards.append(crewards) newdones.append(cdones) newinfos.append(cinfos) if self.obs_type_dict: ret_obs = dicts_to_dict_with_arrays(newobs, self.num_agents == 1) else: ret_obs = self.concat_func(newobs) if self.use_global_obs: newobsdict = {} newobsdict["obs"] = ret_obs if self.state_type_dict: newobsdict["states"] = dicts_to_dict_with_arrays(newstates, True) else: newobsdict["states"] = np.stack(newstates) ret_obs = newobsdict if self.concat_infos: newinfos = dicts_to_dict_with_arrays(newinfos, False) return ret_obs, self.concat_func(newrewards), self.concat_func(newdones), newinfos def get_env_info(self): res = self.workers[0].get_env_info.remote() return ray.get(res) def set_weights(self, indices, weights): res = [] for ind in indices: res.append(self.workers[ind].set_weights.remote(weights)) ray.get(res) def has_action_masks(self): return True def get_action_masks(self): mask = [worker.get_action_mask.remote() for worker in self.workers] return np.asarray(ray.get(mask), dtype=np.int32) def reset(self): res_obs = [worker.reset.remote() for worker in self.workers] newobs, newstates = [],[] for res in res_obs: cobs = ray.get(res) if self.use_global_obs: newobs.append(cobs["obs"]) newstates.append(cobs["state"]) else: newobs.append(cobs) if self.obs_type_dict: ret_obs = dicts_to_dict_with_arrays(newobs, self.num_agents == 1) else: ret_obs = self.concat_func(newobs) if self.use_global_obs: newobsdict = {} newobsdict["obs"] = ret_obs if self.state_type_dict: newobsdict["states"] = dicts_to_dict_with_arrays(newstates, True) else: newobsdict["states"] = np.stack(newstates) ret_obs = newobsdict return ret_obs # todo rename multi-agent class RayVecSMACEnv(IVecEnv): def __init__(self, config_name, num_actors, **kwargs): self.config_name = config_name self.num_actors = num_actors self.remote_worker = ray.remote(RayWorker) self.workers = [self.remote_worker.remote(self.config_name, kwargs) for i in range(self.num_actors)] res = self.workers[0].get_number_of_agents.remote() self.num_agents = ray.get(res) res = self.workers[0].get_env_info.remote() env_info = ray.get(res) self.use_global_obs = env_info['use_global_observations'] def get_env_info(self): res = self.workers[0].get_env_info.remote() return ray.get(res) def get_number_of_agents(self): return self.num_agents def step(self, actions): newobs, newstates, newrewards, newdones, newinfos = [], [], [], [], [] newobsdict = {} res_obs, res_state = [], [] for num, worker in enumerate(self.workers): res_obs.append(worker.step.remote(actions[self.num_agents * num: self.num_agents * num + self.num_agents])) for res in res_obs: cobs, crewards, cdones, cinfos = ray.get(res) if self.use_global_obs: newobs.append(cobs["obs"]) newstates.append(cobs["state"]) else: newobs.append(cobs) newrewards.append(crewards) newdones.append(cdones) newinfos.append(cinfos) if self.use_global_obs: newobsdict["obs"] = np.concatenate(newobs, axis=0) newobsdict["states"] = np.asarray(newstates) ret_obs = newobsdict else: ret_obs = np.concatenate(newobs, axis=0) return ret_obs, np.concatenate(newrewards, axis=0), np.concatenate(newdones, axis=0), newinfos def has_action_masks(self): return True def get_action_masks(self): mask = [worker.get_action_mask.remote() for worker in self.workers] masks = ray.get(mask) return np.concatenate(masks, axis=0) def reset(self): res_obs = [worker.reset.remote() for worker in self.workers] if self.use_global_obs: newobs, newstates = [],[] for res in res_obs: cobs = ray.get(res) if self.use_global_obs: newobs.append(cobs["obs"]) newstates.append(cobs["state"]) else: newobs.append(cobs) newobsdict = {} newobsdict["obs"] = np.concatenate(newobs, axis=0) newobsdict["states"] = np.asarray(newstates) ret_obs = newobsdict else: ret_obs = ray.get(res_obs) ret_obs = np.concatenate(ret_obs, axis=0) return ret_obs vecenv_config = {} def register(config_name, func): vecenv_config[config_name] = func def create_vec_env(config_name, num_actors, **kwargs): vec_env_name = configurations[config_name]['vecenv_type'] return vecenv_config[vec_env_name](config_name, num_actors, **kwargs) register('RAY', lambda config_name, num_actors, **kwargs: RayVecEnv(config_name, num_actors, **kwargs)) register('RAY_SMAC', lambda config_name, num_actors, **kwargs: RayVecSMACEnv(config_name, num_actors, **kwargs)) from rl_games.envs.brax import BraxEnv register('BRAX', lambda config_name, num_actors, **kwargs: BraxEnv(config_name, num_actors, **kwargs))
10,351
Python
34.696552
123
0.571539
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/common/tr_helpers.py
import numpy as np from collections import defaultdict class LinearValueProcessor: def __init__(self, start_eps, end_eps, end_eps_frames): self.start_eps = start_eps self.end_eps = end_eps self.end_eps_frames = end_eps_frames def __call__(self, frame): if frame >= self.end_eps_frames: return self.end_eps df = frame / self.end_eps_frames return df * self.end_eps + (1.0 - df) * self.start_eps class DefaultRewardsShaper: def __init__(self, scale_value = 1, shift_value = 0, min_val=-np.inf, max_val=np.inf, is_torch=True): self.scale_value = scale_value self.shift_value = shift_value self.min_val = min_val self.max_val = max_val self.is_torch = is_torch def __call__(self, reward): reward = reward + self.shift_value reward = reward * self.scale_value if self.is_torch: import torch reward = torch.clamp(reward, self.min_val, self.max_val) else: reward = np.clip(reward, self.min_val, self.max_val) return reward def dicts_to_dict_with_arrays(dicts, add_batch_dim = True): def stack(v): if len(np.shape(v)) == 1: return np.array(v) else: return np.stack(v) def concatenate(v): if len(np.shape(v)) == 1: return np.array(v) else: return np.concatenate(v) dicts_len = len(dicts) if(dicts_len <= 1): return dicts res = defaultdict(list) { res[key].append(sub[key]) for sub in dicts for key in sub } if add_batch_dim: concat_func = stack else: concat_func = concatenate res = {k : concat_func(v) for k,v in res.items()} return res def unsqueeze_obs(obs): if type(obs) is dict: for k,v in obs.items(): obs[k] = unsqueeze_obs(v) else: obs = obs.unsqueeze(0) return obs def flatten_first_two_dims(arr): if arr.ndim > 2: return arr.reshape(-1, *arr.shape[-(arr.ndim-2):]) else: return arr.reshape(-1) def free_mem(): import ctypes ctypes.CDLL('libc.so.6').malloc_trim(0)
2,203
Python
26.898734
105
0.568316
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/common/object_factory.py
class ObjectFactory: def __init__(self): self._builders = {} def register_builder(self, name, builder): self._builders[name] = builder def set_builders(self, builders): self._builders = builders def create(self, name, **kwargs): builder = self._builders.get(name) if not builder: raise ValueError(name) return builder(**kwargs)
414
Python
26.666665
46
0.584541
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/common/transforms/soft_augmentation.py
from rl_games.common.transforms import transforms import torch class SoftAugmentation(): def __init__(self, **kwargs): self.transform_config = kwargs.pop('transform') self.aug_coef = kwargs.pop('aug_coef', 0.001) print('aug coef:', self.aug_coef) self.name = self.transform_config['name'] #TODO: remove hardcode self.transform = transforms.ImageDatasetTransform(**self.transform_config) def get_coef(self): return self.aug_coef def get_loss(self, p_dict, model, input_dict, loss_type = 'both'): ''' loss_type: 'critic', 'policy', 'both' ''' if self.transform: input_dict = self.transform(input_dict) loss = 0 q_dict = model(input_dict) if loss_type == 'policy' or loss_type == 'both': p_dict['logits'] = p_dict['logits'].detach() loss = model.kl(p_dict, q_dict) if loss_type == 'critic' or loss_type == 'both': p_value = p_dict['value'].detach() q_value = q_dict['value'] loss = loss + (0.5 * (p_value - q_value)**2).sum(dim=-1) return loss
1,165
Python
34.333332
82
0.559657
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/common/transforms/transforms.py
import torch from torch import nn class DatasetTransform(nn.Module): def __init__(self): super().__init__() def forward(self, dataset): return dataset class ImageDatasetTransform(DatasetTransform): def __init__(self, **kwargs): super().__init__() import kornia self.transform = torch.nn.Sequential( nn.ReplicationPad2d(4), kornia.augmentation.RandomCrop((84,84)) #kornia.augmentation.RandomErasing(p=0.2), #kornia.augmentation.RandomAffine(degrees=0, translate=(2.0/84,2.0/84), p=1), #kornia.augmentation.RandomCrop((84,84)) ) def forward(self, dataset): dataset['obs'] = self.transform(dataset['obs']) return dataset
746
Python
27.730768
85
0.619303
RoboticExplorationLab/CGAC/dflex/setup.py
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. import setuptools setuptools.setup( name="dflex", version="0.0.1", author="NVIDIA", author_email="[email protected]", description="Differentiable Multiphysics for Python", long_description="", long_description_content_type="text/markdown", # url="https://github.com/pypa/sampleproject", packages=setuptools.find_packages(), package_data={"": ["*.h"]}, classifiers=[ "Programming Language :: Python :: 3", "Operating System :: OS Independent", ], install_requires=["ninja", "torch"], )
983
Python
35.444443
76
0.713123
RoboticExplorationLab/CGAC/dflex/README.md
# A Differentiable Multiphysics Engine for PyTorch dFlex is a physics engine for Python. It is written entirely in PyTorch and supports reverse mode differentiation w.r.t. to any simulation inputs. It includes a USD-based visualization library (`dflex.render`), which can generate time-sampled USD files, or update an existing stage on-the-fly. ## Prerequisites * Python 3.6 * PyTorch 1.4.0 or higher * Pixar USD lib (for visualization) Pre-built USD Python libraries can be downloaded from https://developer.nvidia.com/usd, once they are downloaded you should follow the instructions to add them to your PYTHONPATH environment variable. ## Using the built-in backend By default dFlex uses the built-in PyTorch cpp-extensions mechanism to compile auto-generated simulation kernels. - Windows users should ensure they have Visual Studio 2019 installed ## Setup and Running To use the engine you can import first the simulation module: ```python import dflex.sim ``` To build physical models there is a helper class available in `dflex.sim.ModelBuilder`. This can be used to create models programmatically from Python. For example, to create a chain of particles: ```python builder = dflex.sim.ModelBuilder() # anchor point (zero mass) builder.add_particle((0, 1.0, 0.0), (0.0, 0.0, 0.0), 0.0) # build chain for i in range(1,10): builder.add_particle((i, 1.0, 0.0), (0.0, 0.0, 0.0), 1.0) builder.add_spring(i-1, i, 1.e+3, 0.0, 0) # add ground plane builder.add_shape_plane((0.0, 1.0, 0.0, 0.0), 0) ``` Once you have built your model you must convert it to a finalized PyTorch simulation data structure using `finalize()`: ```python model = builder.finalize('cpu') ``` The model object represents static (non-time varying) data such as constraints, collision shapes, etc. The model is stored in PyTorch tensors, allowing differentiation with respect to both model and state. ## Time Stepping To advance the simulation forward in time (forward dynamics), we use an `integrator` object. dFlex currently offers semi-implicit and fully implicit (planned), via. the `dflex.sim.ExplicitIntegrator`, and `dflex.sim.ImplicitIntegrator` classes as follows: ```python sim_dt = 1.0/60.0 sim_steps = 100 integrator = dflex.sim.ExplicitIntegrator() for i in range(0, sim_steps): state = integrator.forward(model, state, sim_dt) ``` ## Rendering To visualize the scene dFlex supports a USD-based update via. the `dflex.render.UsdRenderer` class. To create a renderer you must first create the USD stage, and the physical model. ```python import dflex.render stage = Usd.Stage.CreateNew("test.usda") renderer = dflex.render.UsdRenderer(model, stage) renderer.draw_points = True renderer.draw_springs = True renderer.draw_shapes = True ``` Each frame the renderer should be updated with the current model state and the current elapsed simulation time: ```python renderer.update(state, sim_time) ``` ## Contact Miles Macklin ([email protected])
3,065
Markdown
32.326087
255
0.730832
RoboticExplorationLab/CGAC/dflex/extension/dflex.py
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. """dFlex Kit extension Allows setting up, training, and running inference on dFlex optimization environments. """ import os import subprocess import carb import carb.input import math import numpy as np import omni.kit.ui import omni.appwindow import omni.kit.editor import omni.timeline import omni.usd import omni.ui as ui from pathlib import Path ICON_PATH = Path(__file__).parent.parent.joinpath("icons") from pxr import Usd, UsdGeom, Sdf, Gf import torch from omni.kit.settings import create_setting_widget, create_setting_widget_combo, SettingType, get_settings_interface KIT_GREEN = 0xFF8A8777 LABEL_PADDING = 120 DARK_WINDOW_STYLE = { "Button": {"background_color": 0xFF292929, "margin": 3, "padding": 3, "border_radius": 2}, "Button.Label": {"color": 0xFFCCCCCC}, "Button:hovered": {"background_color": 0xFF9E9E9E}, "Button:pressed": {"background_color": 0xC22A8778}, "VStack::main_v_stack": {"secondary_color": 0x0, "margin_width": 10, "margin_height": 0}, "VStack::frame_v_stack": {"margin_width": 15, "margin_height": 10}, "Rectangle::frame_background": {"background_color": 0xFF343432, "border_radius": 5}, "Field::models": {"background_color": 0xFF23211F, "font_size": 14, "color": 0xFFAAAAAA, "border_radius": 4.0}, "Frame": {"background_color": 0xFFAAAAAA}, "Label": {"font_size": 14, "color": 0xFF8A8777}, "Label::status": {"font_size": 14, "color": 0xFF8AFF77} } CollapsableFrame_style = { "CollapsableFrame": { "background_color": 0xFF343432, "secondary_color": 0xFF343432, "color": 0xFFAAAAAA, "border_radius": 4.0, "border_color": 0x0, "border_width": 0, "font_size": 14, "padding": 0, }, "HStack::header": {"margin": 5}, "CollapsableFrame:hovered": {"secondary_color": 0xFF3A3A3A}, "CollapsableFrame:pressed": {"secondary_color": 0xFF343432}, } experiment = None class Extension: def __init__(self): self.MENU_SHOW_WINDOW = "Window/dFlex" self.MENU_INSERT_REFERENCE = "Utilities/Insert Reference" self._editor_window = None self._window_Frame = None self.time = 0.0 self.plot = None self.log = None self.status = None self.mode = 'stopped' self.properties = {} # add some helper menus self.menus = [] def on_shutdown(self): self._editor_window = None self.menus = [] #self.input.unsubscribe_to_keyboard_events(self.appwindow.get_keyboard(), self.key_sub) def on_startup(self): self.appwindow = omni.appwindow.get_default_app_window() self.editor = omni.kit.editor.get_editor_interface() self.input = carb.input.acquire_input_interface() self.timeline = omni.timeline.get_timeline_interface() self.usd_context = omni.usd.get_context() # event subscriptions self.stage_sub = self.usd_context.get_stage_event_stream().create_subscription_to_pop(self.on_stage, name="dFlex") self.update_sub = self.editor.subscribe_to_update_events(self.on_update) #self.key_sub = self.input.subscribe_to_keyboard_events(self.appwindow.get_keyboard(), self.on_key) self.menus.append(omni.kit.ui.get_editor_menu().add_item(self.MENU_SHOW_WINDOW, self.ui_on_menu, True, 11)) self.menus.append(omni.kit.ui.get_editor_menu().add_item(self.MENU_INSERT_REFERENCE, self.ui_on_menu)) self.reload() self.build_ui() def format(self, s): return s.replace("_", " ").title() def add_float_field(self, label, x, low=0.0, high=1.0): with ui.HStack(): ui.Label(self.format(label), width=120) self.add_property(label, ui.FloatSlider(name="value", width=150, min=low, max=high), x) def add_int_field(self, label, x, low=0, high=100): with ui.HStack(): ui.Label(self.format(label), width=120) self.add_property(label, ui.IntSlider(name="value", width=150, min=low, max=high), x) def add_combo_field(self, label, i, options): with ui.HStack(): ui.Label(self.format(label), width=120) ui.ComboBox(i, *options, width=150) # todo: how does the model work for combo boxes in omni.ui def add_bool_field(self, label, b): with ui.HStack(): ui.Label(self.format(label), width=120) self.add_property(label, ui.CheckBox(width=10), b) def add_property(self, label, widget, value): self.properties[label] = widget widget.model.set_value(value) def ui_on_menu(self, menu, value): if menu == self.MENU_SHOW_WINDOW: if self.window: if value: self.window.show() else: self.window.hide() omni.kit.ui.get_editor_menu().set_value(self.STAGE_SCRIPT_WINDOW_MENU, value) if menu == self.MENU_INSERT_REFERENCE: self.file_pick = omni.kit.ui.FilePicker("Select USD File", file_type=omni.kit.ui.FileDialogSelectType.FILE) self.file_pick.set_file_selected_fn(self.ui_on_select_ref_fn) self.file_pick.show(omni.kit.ui.FileDialogDataSource.LOCAL) def ui_on_select_ref_fn(self, real_path): file = os.path.normpath(real_path) name = os.path.basename(file) stem = os.path.splitext(name)[0] stage = self.usd_context.get_stage() stage_path = stage.GetRootLayer().realPath base = os.path.commonpath([real_path, stage_path]) rel_path = os.path.relpath(real_path, base) over = stage.OverridePrim('/' + stem) over.GetReferences().AddReference(rel_path) def ui_on_select_script_fn(self): # file picker self.file_pick = omni.kit.ui.FilePicker("Select Python Script", file_type=omni.kit.ui.FileDialogSelectType.FILE) self.file_pick.set_file_selected_fn(self.set_stage_script) self.file_pick.add_filter("Python Files (*.py)", ".*.py") self.file_pick.show(omni.kit.ui.FileDialogDataSource.LOCAL) def ui_on_clear_script_fn(self, widget): self.clear_stage_script() def ui_on_select_network_fn(self): # file picker self.file_pick = omni.kit.ui.FilePicker("Select Model", file_type=omni.kit.ui.FileDialogSelectType.FILE) self.file_pick.set_file_selected_fn(self.set_network) self.file_pick.add_filter("PyTorch Files (*.pt)", ".*.pt") self.file_pick.show(omni.kit.ui.FileDialogDataSource.LOCAL) # build panel def build_ui(self): stage = self.usd_context.get_stage() self._editor_window = ui.Window("dFlex", width=450, height=800) self._editor_window.frame.set_style(DARK_WINDOW_STYLE) with self._editor_window.frame: with ui.VStack(): self._window_Frame = ui.ScrollingFrame( name="canvas", horizontal_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_ALWAYS_OFF, ) with self._window_Frame: with ui.VStack(spacing=6, name="main_v_stack"): ui.Spacer(height=5) with ui.CollapsableFrame(title="Experiment", height=60, style=CollapsableFrame_style): with ui.VStack(spacing=4, name="frame_v_stack"): with ui.HStack(): ui.Label("Script", name="label", width=120) s = "" if (self.get_stage_script() != None): s = self.get_stage_script() ui.StringField(name="models", tooltip="Training Python script").model.set_value(self.get_stage_script()) ui.Button("", image_url="resources/icons/folder.png", width=15, image_width=15, clicked_fn=self.ui_on_select_script_fn) ui.Button("Clear", width=15, clicked_fn=self.clear_stage_script) ui.Button("Reload", width=15, clicked_fn=self.reload) with ui.HStack(): ui.Label("Hot Reload", width=100) ui.CheckBox(width=10).model.set_value(False) if (experiment): with ui.CollapsableFrame(height=60, title="Simulation Settings", style=CollapsableFrame_style): with ui.VStack(spacing=4, name="frame_v_stack"): self.add_int_field("sim_substeps", 4, 1, 100) self.add_float_field("sim_duration", 5.0, 0.0, 30.0) with ui.CollapsableFrame(title="Training Settings", height=60, style=CollapsableFrame_style): with ui.VStack(spacing=4, name="frame_v_stack"): self.add_int_field("train_iters", 64, 1, 100) self.add_float_field("train_rate", 0.1, 0.0, 10.0) self.add_combo_field("train_optimizer", 0, ["GD", "SGD", "L-BFGS"]) with ui.CollapsableFrame(title="Actions", height=10, style=CollapsableFrame_style): with ui.VStack(spacing=4, name="frame_v_stack"): with ui.HStack(): ui.Label("Network", name="label", width=120) s = "" if (self.get_network() != None): s = self.get_network() ui.StringField(name="models", tooltip="Pretrained PyTorch network").model.set_value(s) ui.Button("", image_url="resources/icons/folder.png", width=15, image_width=15, clicked_fn=self.ui_on_select_network_fn) ui.Button("Clear", width=15, clicked_fn=self.clear_network) with ui.HStack(): p = (1.0/6.0)*100.0 ui.Button("Run", width=ui.Percent(p), clicked_fn=self.run) ui.Button("Train", width=ui.Percent(p), clicked_fn=self.train) ui.Button("Stop", width=ui.Percent(p), clicked_fn=self.stop) ui.Button("Reset", width=ui.Percent(p), clicked_fn=self.reset) self.add_bool_field("record", True) with ui.HStack(): ui.Label("Status: ", width=120) self.status = ui.Label("", name="status", width=200) with ui.CollapsableFrame(title="Loss", style=CollapsableFrame_style): data = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0] self.plot = ui.Plot(ui.Type.LINE, -1.0, 1.0, *data, height=200, style={"color": 0xff00ffFF}) # with ui.ScrollingFrame( # name="log", # horizontal_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_ALWAYS_OFF, # height=200, # width=ui.Percent(95) # ): with ui.CollapsableFrame(title="Log", style=CollapsableFrame_style): with ui.VStack(spacing=4, name="frame_v_stack"): self.log = ui.Label("", height=200) def reload(self): path = self.get_stage_script() if (path): # read code to string file = open(path) code = file.read() file.close() # run it in the local environment exec(code, globals(), globals()) self.build_ui() # methods for storing script in stage metadata def get_stage_script(self): stage = self.usd_context.get_stage() custom_data = stage.GetEditTarget().GetLayer().customLayerData print(custom_data) if "script" in custom_data: return custom_data["script"] else: return None def set_stage_script(self, real_path): path = os.path.normpath(real_path) print("Setting stage script to: " + str(path)) stage = self.usd_context.get_stage() with Sdf.ChangeBlock(): custom_data = stage.GetEditTarget().GetLayer().customLayerData custom_data["script"] = path stage.GetEditTarget().GetLayer().customLayerData = custom_data # rebuild ui self.build_ui() def clear_stage_script(self): stage = self.usd_context.get_stage() with Sdf.ChangeBlock(): custom_data = stage.GetEditTarget().GetLayer().customLayerData if "script" in custom_data: del custom_data["script"] stage.GetEditTarget().GetLayer().customLayerData = custom_data self.build_ui() def set_network(self, real_path): path = os.path.normpath(real_path) experiment.network_file = path self.build_ui() def get_network(self): return experiment.network_file def clear_network(self): experiment.network_file = None self.build_ui() def on_key(self, event, *args, **kwargs): # if event.keyboard == self.appwindow.get_keyboard(): # if event.type == carb.input.KeyboardEventType.KEY_PRESS: # if event.input == carb.input.KeyboardInput.ESCAPE: # self.stop() # return True pass def on_stage(self, stage_event): if stage_event.type == int(omni.usd.StageEventType.OPENED): self.build_ui() self.reload() def on_update(self, dt): if (experiment): stage = self.usd_context.get_stage() stage.SetStartTimeCode(0.0) stage.SetEndTimeCode(experiment.render_time*60.0) stage.SetTimeCodesPerSecond(60.0) # pass parameters to the experiment if ('record' in self.properties): experiment.record = self.properties['record'].model.get_value_as_bool() # experiment.train_rate = self.get_property('train_rate') # experiment.train_iters = self.get_property('train_iters') # experiment.sim_duration = self.get_property('sim_duration') # experiment.sim_substeps = self.get_property('sim_substeps') if (self.mode == 'training'): experiment.train() # update error plot if (self.plot): self.plot.scale_min = np.min(experiment.train_loss) self.plot.scale_max = np.max(experiment.train_loss) self.plot.set_data(*experiment.train_loss) elif (self.mode == 'inference'): experiment.run() # update stage time (allow scrubbing while stopped) if (self.mode != 'stopped'): self.timeline.set_current_time(experiment.render_time*60.0) # update log if (self.log): self.log.text = df.util.log_output def set_status(self, str): self.status.text = str def train(self): experiment.reset() self.mode = 'training' # update status self.set_status('Training in progress, press [ESC] to cancel') def run(self): experiment.reset() self.mode = 'inference' # update status self.set_status('Inference in progress, press [ESC] to cancel') def stop(self): self.mode = 'stopped' # update status self.set_status('Stopped') def reset(self): experiment.reset() self.stop() def get_extension(): return Extension()
16,913
Python
35.689805
160
0.549814
RoboticExplorationLab/CGAC/dflex/dflex/mat33.h
#pragma once //---------------------------------------------------------- // mat33 struct mat33 { inline CUDA_CALLABLE mat33(float3 c0, float3 c1, float3 c2) { data[0][0] = c0.x; data[1][0] = c0.y; data[2][0] = c0.z; data[0][1] = c1.x; data[1][1] = c1.y; data[2][1] = c1.z; data[0][2] = c2.x; data[1][2] = c2.y; data[2][2] = c2.z; } inline CUDA_CALLABLE mat33(float m00=0.0f, float m01=0.0f, float m02=0.0f, float m10=0.0f, float m11=0.0f, float m12=0.0f, float m20=0.0f, float m21=0.0f, float m22=0.0f) { data[0][0] = m00; data[1][0] = m10; data[2][0] = m20; data[0][1] = m01; data[1][1] = m11; data[2][1] = m21; data[0][2] = m02; data[1][2] = m12; data[2][2] = m22; } CUDA_CALLABLE float3 get_row(int index) const { return (float3&)data[index]; } CUDA_CALLABLE void set_row(int index, const float3& v) { (float3&)data[index] = v; } CUDA_CALLABLE float3 get_col(int index) const { return float3(data[0][index], data[1][index], data[2][index]); } CUDA_CALLABLE void set_col(int index, const float3& v) { data[0][index] = v.x; data[1][index] = v.y; data[2][index] = v.z; } // row major storage assumed to be compatible with PyTorch float data[3][3]; }; #ifdef CUDA inline __device__ void atomic_add(mat33 * addr, mat33 value) { atomicAdd(&((addr -> data)[0][0]), value.data[0][0]); atomicAdd(&((addr -> data)[1][0]), value.data[1][0]); atomicAdd(&((addr -> data)[2][0]), value.data[2][0]); atomicAdd(&((addr -> data)[0][1]), value.data[0][1]); atomicAdd(&((addr -> data)[1][1]), value.data[1][1]); atomicAdd(&((addr -> data)[2][1]), value.data[2][1]); atomicAdd(&((addr -> data)[0][2]), value.data[0][2]); atomicAdd(&((addr -> data)[1][2]), value.data[1][2]); atomicAdd(&((addr -> data)[2][2]), value.data[2][2]); } #endif inline CUDA_CALLABLE void adj_mat33(float3 c0, float3 c1, float3 c2, float3& a0, float3& a1, float3& a2, const mat33& adj_ret) { // column constructor a0 += adj_ret.get_col(0); a1 += adj_ret.get_col(1); a2 += adj_ret.get_col(2); } inline CUDA_CALLABLE void adj_mat33(float m00, float m01, float m02, float m10, float m11, float m12, float m20, float m21, float m22, float& a00, float& a01, float& a02, float& a10, float& a11, float& a12, float& a20, float& a21, float& a22, const mat33& adj_ret) { printf("todo\n"); } inline CUDA_CALLABLE float index(const mat33& m, int row, int col) { return m.data[row][col]; } inline CUDA_CALLABLE mat33 add(const mat33& a, const mat33& b) { mat33 t; for (int i=0; i < 3; ++i) { for (int j=0; j < 3; ++j) { t.data[i][j] = a.data[i][j] + b.data[i][j]; } } return t; } inline CUDA_CALLABLE mat33 mul(const mat33& a, float b) { mat33 t; for (int i=0; i < 3; ++i) { for (int j=0; j < 3; ++j) { t.data[i][j] = a.data[i][j]*b; } } return t; } inline CUDA_CALLABLE float3 mul(const mat33& a, const float3& b) { float3 r = a.get_col(0)*b.x + a.get_col(1)*b.y + a.get_col(2)*b.z; return r; } inline CUDA_CALLABLE mat33 mul(const mat33& a, const mat33& b) { mat33 t; for (int i=0; i < 3; ++i) { for (int j=0; j < 3; ++j) { for (int k=0; k < 3; ++k) { t.data[i][j] += a.data[i][k]*b.data[k][j]; } } } return t; } inline CUDA_CALLABLE mat33 transpose(const mat33& a) { mat33 t; for (int i=0; i < 3; ++i) { for (int j=0; j < 3; ++j) { t.data[i][j] = a.data[j][i]; } } return t; } inline CUDA_CALLABLE float determinant(const mat33& m) { return dot(float3(m.data[0]), cross(float3(m.data[1]), float3(m.data[2]))); } inline CUDA_CALLABLE mat33 outer(const float3& a, const float3& b) { return mat33(a*b.x, a*b.y, a*b.z); } inline CUDA_CALLABLE mat33 skew(const float3& a) { mat33 out(0.0f, -a.z, a.y, a.z, 0.0f, -a.x, -a.y, a.x, 0.0f); return out; } inline void CUDA_CALLABLE adj_index(const mat33& m, int row, int col, mat33& adj_m, int& adj_row, int& adj_col, float adj_ret) { adj_m.data[row][col] += adj_ret; } inline CUDA_CALLABLE void adj_add(const mat33& a, const mat33& b, mat33& adj_a, mat33& adj_b, const mat33& adj_ret) { for (int i=0; i < 3; ++i) { for (int j=0; j < 3; ++j) { adj_a.data[i][j] += adj_ret.data[i][j]; adj_b.data[i][j] += adj_ret.data[i][j]; } } } inline CUDA_CALLABLE void adj_mul(const mat33& a, float b, mat33& adj_a, float& adj_b, const mat33& adj_ret) { for (int i=0; i < 3; ++i) { for (int j=0; j < 3; ++j) { adj_a.data[i][j] += b*adj_ret.data[i][j]; adj_b += a.data[i][j]*adj_ret.data[i][j]; } } } inline CUDA_CALLABLE void adj_mul(const mat33& a, const float3& b, mat33& adj_a, float3& adj_b, const float3& adj_ret) { adj_a += outer(adj_ret, b); adj_b += mul(transpose(a), adj_ret); } inline CUDA_CALLABLE void adj_mul(const mat33& a, const mat33& b, mat33& adj_a, mat33& adj_b, const mat33& adj_ret) { adj_a += mul(adj_ret, transpose(b)); adj_b += mul(transpose(a), adj_ret); } inline CUDA_CALLABLE void adj_transpose(const mat33& a, mat33& adj_a, const mat33& adj_ret) { adj_a += transpose(adj_ret); } inline CUDA_CALLABLE void adj_determinant(const mat33& m, mat33& adj_m, float adj_ret) { (float3&)adj_m.data[0] += cross(m.get_row(1), m.get_row(2))*adj_ret; (float3&)adj_m.data[1] += cross(m.get_row(2), m.get_row(0))*adj_ret; (float3&)adj_m.data[2] += cross(m.get_row(0), m.get_row(1))*adj_ret; } inline CUDA_CALLABLE void adj_skew(const float3& a, float3& adj_a, const mat33& adj_ret) { mat33 out(0.0f, -a.z, a.y, a.z, 0.0f, -a.x, -a.y, a.x, 0.0f); adj_a.x += adj_ret.data[2][1] - adj_ret.data[1][2]; adj_a.y += adj_ret.data[0][2] - adj_ret.data[2][0]; adj_a.z += adj_ret.data[1][0] - adj_ret.data[0][1]; }
6,549
C
24.192308
126
0.505726
RoboticExplorationLab/CGAC/dflex/dflex/spatial.h
#pragma once //--------------------------------------------------------------------------------- // Represents a twist in se(3) struct spatial_vector { float3 w; float3 v; CUDA_CALLABLE inline spatial_vector(float a, float b, float c, float d, float e, float f) : w(a, b, c), v(d, e, f) {} CUDA_CALLABLE inline spatial_vector(float3 w=float3(), float3 v=float3()) : w(w), v(v) {} CUDA_CALLABLE inline spatial_vector(float a) : w(a, a, a), v(a, a, a) {} CUDA_CALLABLE inline float operator[](int index) const { assert(index < 6); return (&w.x)[index]; } CUDA_CALLABLE inline float& operator[](int index) { assert(index < 6); return (&w.x)[index]; } }; CUDA_CALLABLE inline spatial_vector operator - (spatial_vector a) { return spatial_vector(-a.w, -a.v); } CUDA_CALLABLE inline spatial_vector add(const spatial_vector& a, const spatial_vector& b) { return { a.w + b.w, a.v + b.v }; } CUDA_CALLABLE inline spatial_vector sub(const spatial_vector& a, const spatial_vector& b) { return { a.w - b.w, a.v - b.v }; } CUDA_CALLABLE inline spatial_vector mul(const spatial_vector& a, float s) { return { a.w*s, a.v*s }; } CUDA_CALLABLE inline float spatial_dot(const spatial_vector& a, const spatial_vector& b) { return dot(a.w, b.w) + dot(a.v, b.v); } CUDA_CALLABLE inline spatial_vector spatial_cross(const spatial_vector& a, const spatial_vector& b) { float3 w = cross(a.w, b.w); float3 v = cross(a.v, b.w) + cross(a.w, b.v); return spatial_vector(w, v); } CUDA_CALLABLE inline spatial_vector spatial_cross_dual(const spatial_vector& a, const spatial_vector& b) { float3 w = cross(a.w, b.w) + cross(a.v, b.v); float3 v = cross(a.w, b.v); return spatial_vector(w, v); } CUDA_CALLABLE inline float3 spatial_top(const spatial_vector& a) { return a.w; } CUDA_CALLABLE inline float3 spatial_bottom(const spatial_vector& a) { return a.v; } // adjoint methods CUDA_CALLABLE inline void adj_spatial_vector( float a, float b, float c, float d, float e, float f, float& adj_a, float& adj_b, float& adj_c, float& adj_d, float& adj_e,float& adj_f, const spatial_vector& adj_ret) { adj_a += adj_ret.w.x; adj_b += adj_ret.w.y; adj_c += adj_ret.w.z; adj_d += adj_ret.v.x; adj_e += adj_ret.v.y; adj_f += adj_ret.v.z; } CUDA_CALLABLE inline void adj_spatial_vector(const float3& w, const float3& v, float3& adj_w, float3& adj_v, const spatial_vector& adj_ret) { adj_w += adj_ret.w; adj_v += adj_ret.v; } CUDA_CALLABLE inline void adj_add(const spatial_vector& a, const spatial_vector& b, spatial_vector& adj_a, spatial_vector& adj_b, const spatial_vector& adj_ret) { adj_add(a.w, b.w, adj_a.w, adj_b.w, adj_ret.w); adj_add(a.v, b.v, adj_a.v, adj_b.v, adj_ret.v); } CUDA_CALLABLE inline void adj_sub(const spatial_vector& a, const spatial_vector& b, spatial_vector& adj_a, spatial_vector& adj_b, const spatial_vector& adj_ret) { adj_sub(a.w, b.w, adj_a.w, adj_b.w, adj_ret.w); adj_sub(a.v, b.v, adj_a.v, adj_b.v, adj_ret.v); } CUDA_CALLABLE inline void adj_mul(const spatial_vector& a, float s, spatial_vector& adj_a, float& adj_s, const spatial_vector& adj_ret) { adj_mul(a.w, s, adj_a.w, adj_s, adj_ret.w); adj_mul(a.v, s, adj_a.v, adj_s, adj_ret.v); } CUDA_CALLABLE inline void adj_spatial_dot(const spatial_vector& a, const spatial_vector& b, spatial_vector& adj_a, spatial_vector& adj_b, const float& adj_ret) { adj_dot(a.w, b.w, adj_a.w, adj_b.w, adj_ret); adj_dot(a.v, b.v, adj_a.v, adj_b.v, adj_ret); } CUDA_CALLABLE inline void adj_spatial_cross(const spatial_vector& a, const spatial_vector& b, spatial_vector& adj_a, spatial_vector& adj_b, const spatial_vector& adj_ret) { adj_cross(a.w, b.w, adj_a.w, adj_b.w, adj_ret.w); adj_cross(a.v, b.w, adj_a.v, adj_b.w, adj_ret.v); adj_cross(a.w, b.v, adj_a.w, adj_b.v, adj_ret.v); } CUDA_CALLABLE inline void adj_spatial_cross_dual(const spatial_vector& a, const spatial_vector& b, spatial_vector& adj_a, spatial_vector& adj_b, const spatial_vector& adj_ret) { adj_cross(a.w, b.w, adj_a.w, adj_b.w, adj_ret.w); adj_cross(a.v, b.v, adj_a.v, adj_b.v, adj_ret.w); adj_cross(a.w, b.v, adj_a.w, adj_b.v, adj_ret.v); } CUDA_CALLABLE inline void adj_spatial_top(const spatial_vector& a, spatial_vector& adj_a, const float3& adj_ret) { adj_a.w += adj_ret; } CUDA_CALLABLE inline void adj_spatial_bottom(const spatial_vector& a, spatial_vector& adj_a, const float3& adj_ret) { adj_a.v += adj_ret; } #ifdef CUDA inline __device__ void atomic_add(spatial_vector* addr, const spatial_vector& value) { atomic_add(&addr->w, value.w); atomic_add(&addr->v, value.v); } #endif //--------------------------------------------------------------------------------- // Represents a rigid body transformation struct spatial_transform { float3 p; quat q; CUDA_CALLABLE inline spatial_transform(float3 p=float3(), quat q=quat()) : p(p), q(q) {} CUDA_CALLABLE inline spatial_transform(float) {} // helps uniform initialization }; CUDA_CALLABLE inline spatial_transform spatial_transform_identity() { return spatial_transform(float3(), quat_identity()); } CUDA_CALLABLE inline float3 spatial_transform_get_translation(const spatial_transform& t) { return t.p; } CUDA_CALLABLE inline quat spatial_transform_get_rotation(const spatial_transform& t) { return t.q; } CUDA_CALLABLE inline spatial_transform spatial_transform_multiply(const spatial_transform& a, const spatial_transform& b) { return { rotate(a.q, b.p) + a.p, mul(a.q, b.q) }; } /* CUDA_CALLABLE inline spatial_transform spatial_transform_inverse(const spatial_transform& t) { quat q_inv = inverse(t.q); return spatial_transform(-rotate(q_inv, t.p), q_inv); } */ CUDA_CALLABLE inline float3 spatial_transform_vector(const spatial_transform& t, const float3& x) { return rotate(t.q, x); } CUDA_CALLABLE inline float3 spatial_transform_point(const spatial_transform& t, const float3& x) { return t.p + rotate(t.q, x); } // Frank & Park definition 3.20, pg 100 CUDA_CALLABLE inline spatial_vector spatial_transform_twist(const spatial_transform& t, const spatial_vector& x) { float3 w = rotate(t.q, x.w); float3 v = rotate(t.q, x.v) + cross(t.p, w); return spatial_vector(w, v); } CUDA_CALLABLE inline spatial_vector spatial_transform_wrench(const spatial_transform& t, const spatial_vector& x) { float3 v = rotate(t.q, x.v); float3 w = rotate(t.q, x.w) + cross(t.p, v); return spatial_vector(w, v); } CUDA_CALLABLE inline spatial_transform add(const spatial_transform& a, const spatial_transform& b) { return { a.p + b.p, a.q + b.q }; } CUDA_CALLABLE inline spatial_transform sub(const spatial_transform& a, const spatial_transform& b) { return { a.p - b.p, a.q - b.q }; } CUDA_CALLABLE inline spatial_transform mul(const spatial_transform& a, float s) { return { a.p*s, a.q*s }; } // adjoint methods CUDA_CALLABLE inline void adj_add(const spatial_transform& a, const spatial_transform& b, spatial_transform& adj_a, spatial_transform& adj_b, const spatial_transform& adj_ret) { adj_add(a.p, b.p, adj_a.p, adj_b.p, adj_ret.p); adj_add(a.q, b.q, adj_a.q, adj_b.q, adj_ret.q); } CUDA_CALLABLE inline void adj_sub(const spatial_transform& a, const spatial_transform& b, spatial_transform& adj_a, spatial_transform& adj_b, const spatial_transform& adj_ret) { adj_sub(a.p, b.p, adj_a.p, adj_b.p, adj_ret.p); adj_sub(a.q, b.q, adj_a.q, adj_b.q, adj_ret.q); } CUDA_CALLABLE inline void adj_mul(const spatial_transform& a, float s, spatial_transform& adj_a, float& adj_s, const spatial_transform& adj_ret) { adj_mul(a.p, s, adj_a.p, adj_s, adj_ret.p); adj_mul(a.q, s, adj_a.q, adj_s, adj_ret.q); } #ifdef CUDA inline __device__ void atomic_add(spatial_transform* addr, const spatial_transform& value) { atomic_add(&addr->p, value.p); atomic_add(&addr->q, value.q); } #endif CUDA_CALLABLE inline void adj_spatial_transform(const float3& p, const quat& q, float3& adj_p, quat& adj_q, const spatial_transform& adj_ret) { adj_p += adj_ret.p; adj_q += adj_ret.q; } CUDA_CALLABLE inline void adj_spatial_transform_identity(const spatial_transform& adj_ret) { // nop } CUDA_CALLABLE inline void adj_spatial_transform_get_translation(const spatial_transform& t, spatial_transform& adj_t, const float3& adj_ret) { adj_t.p += adj_ret; } CUDA_CALLABLE inline void adj_spatial_transform_get_rotation(const spatial_transform& t, spatial_transform& adj_t, const quat& adj_ret) { adj_t.q += adj_ret; } /* CUDA_CALLABLE inline void adj_spatial_transform_inverse(const spatial_transform& t, spatial_transform& adj_t, const spatial_transform& adj_ret) { //quat q_inv = inverse(t.q); //return spatial_transform(-rotate(q_inv, t.p), q_inv); quat q_inv = inverse(t.q); float3 p = rotate(q_inv, t.p); float3 np = -p; quat adj_q_inv = 0.0f; quat adj_q = 0.0f; float3 adj_p = 0.0f; float3 adj_np = 0.0f; adj_spatial_transform(np, q_inv, adj_np, adj_q_inv, adj_ret); adj_p = -adj_np; adj_rotate(q_inv, t.p, adj_q_inv, adj_t.p, adj_p); adj_inverse(t.q, adj_t.q, adj_q_inv); } */ CUDA_CALLABLE inline void adj_spatial_transform_multiply(const spatial_transform& a, const spatial_transform& b, spatial_transform& adj_a, spatial_transform& adj_b, const spatial_transform& adj_ret) { // translational part adj_rotate(a.q, b.p, adj_a.q, adj_b.p, adj_ret.p); adj_a.p += adj_ret.p; // rotational part adj_mul(a.q, b.q, adj_a.q, adj_b.q, adj_ret.q); } CUDA_CALLABLE inline void adj_spatial_transform_vector(const spatial_transform& t, const float3& x, spatial_transform& adj_t, float3& adj_x, const float3& adj_ret) { adj_rotate(t.q, x, adj_t.q, adj_x, adj_ret); } CUDA_CALLABLE inline void adj_spatial_transform_point(const spatial_transform& t, const float3& x, spatial_transform& adj_t, float3& adj_x, const float3& adj_ret) { adj_rotate(t.q, x, adj_t.q, adj_x, adj_ret); adj_t.p += adj_ret; } CUDA_CALLABLE inline void adj_spatial_transform_twist(const spatial_transform& a, const spatial_vector& s, spatial_transform& adj_a, spatial_vector& adj_s, const spatial_vector& adj_ret) { printf("todo, %s, %d\n", __FILE__, __LINE__); // float3 w = rotate(t.q, x.w); // float3 v = rotate(t.q, x.v) + cross(t.p, w); // return spatial_vector(w, v); } CUDA_CALLABLE inline void adj_spatial_transform_wrench(const spatial_transform& t, const spatial_vector& x, spatial_transform& adj_t, spatial_vector& adj_x, const spatial_vector& adj_ret) { printf("todo, %s, %d\n", __FILE__, __LINE__); // float3 v = rotate(t.q, x.v); // float3 w = rotate(t.q, x.w) + cross(t.p, v); // return spatial_vector(w, v); } /* // should match model.py #define JOINT_PRISMATIC 0 #define JOINT_REVOLUTE 1 #define JOINT_FIXED 2 #define JOINT_FREE 3 CUDA_CALLABLE inline spatial_transform spatial_jcalc(int type, float* joint_q, float3 axis, int start) { if (type == JOINT_REVOLUTE) { float q = joint_q[start]; spatial_transform X_jc = spatial_transform(float3(), quat_from_axis_angle(axis, q)); return X_jc; } else if (type == JOINT_PRISMATIC) { float q = joint_q[start]; spatial_transform X_jc = spatial_transform(axis*q, quat_identity()); return X_jc; } else if (type == JOINT_FREE) { float px = joint_q[start+0]; float py = joint_q[start+1]; float pz = joint_q[start+2]; float qx = joint_q[start+3]; float qy = joint_q[start+4]; float qz = joint_q[start+5]; float qw = joint_q[start+6]; spatial_transform X_jc = spatial_transform(float3(px, py, pz), quat(qx, qy, qz, qw)); return X_jc; } // JOINT_FIXED return spatial_transform(float3(), quat_identity()); } CUDA_CALLABLE inline void adj_spatial_jcalc(int type, float* q, float3 axis, int start, int& adj_type, float* adj_q, float3& adj_axis, int& adj_start, const spatial_transform& adj_ret) { if (type == JOINT_REVOLUTE) { adj_quat_from_axis_angle(axis, q[start], adj_axis, adj_q[start], adj_ret.q); } else if (type == JOINT_PRISMATIC) { adj_mul(axis, q[start], adj_axis, adj_q[start], adj_ret.p); } else if (type == JOINT_FREE) { adj_q[start+0] += adj_ret.p.x; adj_q[start+1] += adj_ret.p.y; adj_q[start+2] += adj_ret.p.z; adj_q[start+3] += adj_ret.q.x; adj_q[start+4] += adj_ret.q.y; adj_q[start+5] += adj_ret.q.z; adj_q[start+6] += adj_ret.q.w; } } */ struct spatial_matrix { float data[6][6] = { { 0 } }; CUDA_CALLABLE inline spatial_matrix(float f=0.0f) { } CUDA_CALLABLE inline spatial_matrix( float a00, float a01, float a02, float a03, float a04, float a05, float a10, float a11, float a12, float a13, float a14, float a15, float a20, float a21, float a22, float a23, float a24, float a25, float a30, float a31, float a32, float a33, float a34, float a35, float a40, float a41, float a42, float a43, float a44, float a45, float a50, float a51, float a52, float a53, float a54, float a55) { data[0][0] = a00; data[0][1] = a01; data[0][2] = a02; data[0][3] = a03; data[0][4] = a04; data[0][5] = a05; data[1][0] = a10; data[1][1] = a11; data[1][2] = a12; data[1][3] = a13; data[1][4] = a14; data[1][5] = a15; data[2][0] = a20; data[2][1] = a21; data[2][2] = a22; data[2][3] = a23; data[2][4] = a24; data[2][5] = a25; data[3][0] = a30; data[3][1] = a31; data[3][2] = a32; data[3][3] = a33; data[3][4] = a34; data[3][5] = a35; data[4][0] = a40; data[4][1] = a41; data[4][2] = a42; data[4][3] = a43; data[4][4] = a44; data[4][5] = a45; data[5][0] = a50; data[5][1] = a51; data[5][2] = a52; data[5][3] = a53; data[5][4] = a54; data[5][5] = a55; } }; inline CUDA_CALLABLE float index(const spatial_matrix& m, int row, int col) { return m.data[row][col]; } inline CUDA_CALLABLE spatial_matrix add(const spatial_matrix& a, const spatial_matrix& b) { spatial_matrix out; for (int i=0; i < 6; ++i) for (int j=0; j < 6; ++j) out.data[i][j] = a.data[i][j] + b.data[i][j]; return out; } inline CUDA_CALLABLE spatial_vector mul(const spatial_matrix& a, const spatial_vector& b) { spatial_vector out; for (int i=0; i < 6; ++i) for (int j=0; j < 6; ++j) out[i] += a.data[i][j]*b[j]; return out; } inline CUDA_CALLABLE spatial_matrix mul(const spatial_matrix& a, const spatial_matrix& b) { spatial_matrix out; for (int i=0; i < 6; ++i) { for (int j=0; j < 6; ++j) { for (int k=0; k < 6; ++k) { out.data[i][j] += a.data[i][k]*b.data[k][j]; } } } return out; } inline CUDA_CALLABLE spatial_matrix transpose(const spatial_matrix& a) { spatial_matrix out; for (int i=0; i < 6; i++) for (int j=0; j < 6; j++) out.data[i][j] = a.data[j][i]; return out; } inline CUDA_CALLABLE spatial_matrix outer(const spatial_vector& a, const spatial_vector& b) { spatial_matrix out; for (int i=0; i < 6; i++) for (int j=0; j < 6; j++) out.data[i][j] = a[i]*b[j]; return out; } CUDA_CALLABLE void print(spatial_transform t); CUDA_CALLABLE void print(spatial_matrix m); inline CUDA_CALLABLE spatial_matrix spatial_adjoint(const mat33& R, const mat33& S) { spatial_matrix adT; // T = [R 0] // [skew(p)*R R] // diagonal blocks for (int i=0; i < 3; ++i) { for (int j=0; j < 3; ++j) { adT.data[i][j] = R.data[i][j]; adT.data[i+3][j+3] = R.data[i][j]; } } // lower off diagonal for (int i=0; i < 3; ++i) { for (int j=0; j < 3; ++j) { adT.data[i+3][j] = S.data[i][j]; } } return adT; } inline CUDA_CALLABLE void adj_spatial_adjoint(const mat33& R, const mat33& S, mat33& adj_R, mat33& adj_S, const spatial_matrix& adj_ret) { // diagonal blocks for (int i=0; i < 3; ++i) { for (int j=0; j < 3; ++j) { adj_R.data[i][j] += adj_ret.data[i][j]; adj_R.data[i][j] += adj_ret.data[i+3][j+3]; } } // lower off diagonal for (int i=0; i < 3; ++i) { for (int j=0; j < 3; ++j) { adj_S.data[i][j] += adj_ret.data[i+3][j]; } } } /* // computes adj_t^-T*I*adj_t^-1 (tensor change of coordinates), Frank & Park, section 8.2.3, pg 290 inline CUDA_CALLABLE spatial_matrix spatial_transform_inertia(const spatial_transform& t, const spatial_matrix& I) { spatial_transform t_inv = spatial_transform_inverse(t); float3 r1 = rotate(t_inv.q, float3(1.0, 0.0, 0.0)); float3 r2 = rotate(t_inv.q, float3(0.0, 1.0, 0.0)); float3 r3 = rotate(t_inv.q, float3(0.0, 0.0, 1.0)); mat33 R(r1, r2, r3); mat33 S = mul(skew(t_inv.p), R); spatial_matrix T = spatial_adjoint(R, S); // first quadratic form, for derivation of the adjoint see https://people.maths.ox.ac.uk/gilesm/files/AD2008.pdf, section 2.3.2 return mul(mul(transpose(T), I), T); } */ inline CUDA_CALLABLE void adj_add(const spatial_matrix& a, const spatial_matrix& b, spatial_matrix& adj_a, spatial_matrix& adj_b, const spatial_matrix& adj_ret) { adj_a += adj_ret; adj_b += adj_ret; } inline CUDA_CALLABLE void adj_mul(const spatial_matrix& a, const spatial_vector& b, spatial_matrix& adj_a, spatial_vector& adj_b, const spatial_vector& adj_ret) { adj_a += outer(adj_ret, b); adj_b += mul(transpose(a), adj_ret); } inline CUDA_CALLABLE void adj_mul(const spatial_matrix& a, const spatial_matrix& b, spatial_matrix& adj_a, spatial_matrix& adj_b, const spatial_matrix& adj_ret) { adj_a += mul(adj_ret, transpose(b)); adj_b += mul(transpose(a), adj_ret); } inline CUDA_CALLABLE void adj_transpose(const spatial_matrix& a, spatial_matrix& adj_a, const spatial_matrix& adj_ret) { adj_a += transpose(adj_ret); } inline CUDA_CALLABLE void adj_spatial_transform_inertia( const spatial_transform& xform, const spatial_matrix& I, const spatial_transform& adj_xform, const spatial_matrix& adj_I, spatial_matrix& adj_ret) { //printf("todo, %s, %d\n", __FILE__, __LINE__); } inline void CUDA_CALLABLE adj_index(const spatial_matrix& m, int row, int col, spatial_matrix& adj_m, int& adj_row, int& adj_col, float adj_ret) { adj_m.data[row][col] += adj_ret; } #ifdef CUDA inline __device__ void atomic_add(spatial_matrix* addr, const spatial_matrix& value) { for (int i=0; i < 6; ++i) { for (int j=0; j < 6; ++j) { atomicAdd(&addr->data[i][j], value.data[i][j]); } } } #endif CUDA_CALLABLE inline int row_index(int stride, int i, int j) { return i*stride + j; } // builds spatial Jacobian J which is an (joint_count*6)x(dof_count) matrix CUDA_CALLABLE inline void spatial_jacobian( const spatial_vector* S, const int* joint_parents, const int* joint_qd_start, int joint_start, // offset of the first joint for the articulation int joint_count, int J_start, float* J) { const int articulation_dof_start = joint_qd_start[joint_start]; const int articulation_dof_end = joint_qd_start[joint_start + joint_count]; const int articulation_dof_count = articulation_dof_end-articulation_dof_start; // shift output pointers const int S_start = articulation_dof_start; S += S_start; J += J_start; for (int i=0; i < joint_count; ++i) { const int row_start = i * 6; int j = joint_start + i; while (j != -1) { const int joint_dof_start = joint_qd_start[j]; const int joint_dof_end = joint_qd_start[j+1]; const int joint_dof_count = joint_dof_end-joint_dof_start; // fill out each row of the Jacobian walking up the tree //for (int col=dof_start; col < dof_end; ++col) for (int dof=0; dof < joint_dof_count; ++dof) { const int col = (joint_dof_start-articulation_dof_start) + dof; J[row_index(articulation_dof_count, row_start+0, col)] = S[col].w.x; J[row_index(articulation_dof_count, row_start+1, col)] = S[col].w.y; J[row_index(articulation_dof_count, row_start+2, col)] = S[col].w.z; J[row_index(articulation_dof_count, row_start+3, col)] = S[col].v.x; J[row_index(articulation_dof_count, row_start+4, col)] = S[col].v.y; J[row_index(articulation_dof_count, row_start+5, col)] = S[col].v.z; } j = joint_parents[j]; } } } CUDA_CALLABLE inline void adj_spatial_jacobian( const spatial_vector* S, const int* joint_parents, const int* joint_qd_start, const int joint_start, const int joint_count, const int J_start, const float* J, // adjs spatial_vector* adj_S, int* adj_joint_parents, int* adj_joint_qd_start, int& adj_joint_start, int& adj_joint_count, int& adj_J_start, const float* adj_J) { const int articulation_dof_start = joint_qd_start[joint_start]; const int articulation_dof_end = joint_qd_start[joint_start + joint_count]; const int articulation_dof_count = articulation_dof_end-articulation_dof_start; // shift output pointers const int S_start = articulation_dof_start; S += S_start; J += J_start; adj_S += S_start; adj_J += J_start; for (int i=0; i < joint_count; ++i) { const int row_start = i * 6; int j = joint_start + i; while (j != -1) { const int joint_dof_start = joint_qd_start[j]; const int joint_dof_end = joint_qd_start[j+1]; const int joint_dof_count = joint_dof_end-joint_dof_start; // fill out each row of the Jacobian walking up the tree //for (int col=dof_start; col < dof_end; ++col) for (int dof=0; dof < joint_dof_count; ++dof) { const int col = (joint_dof_start-articulation_dof_start) + dof; adj_S[col].w.x += adj_J[row_index(articulation_dof_count, row_start+0, col)]; adj_S[col].w.y += adj_J[row_index(articulation_dof_count, row_start+1, col)]; adj_S[col].w.z += adj_J[row_index(articulation_dof_count, row_start+2, col)]; adj_S[col].v.x += adj_J[row_index(articulation_dof_count, row_start+3, col)]; adj_S[col].v.y += adj_J[row_index(articulation_dof_count, row_start+4, col)]; adj_S[col].v.z += adj_J[row_index(articulation_dof_count, row_start+5, col)]; } j = joint_parents[j]; } } } CUDA_CALLABLE inline void spatial_mass(const spatial_matrix* I_s, int joint_start, int joint_count, int M_start, float* M) { const int stride = joint_count*6; for (int l=0; l < joint_count; ++l) { for (int i=0; i < 6; ++i) { for (int j=0; j < 6; ++j) { M[M_start + row_index(stride, l*6 + i, l*6 + j)] = I_s[joint_start + l].data[i][j]; } } } } CUDA_CALLABLE inline void adj_spatial_mass( const spatial_matrix* I_s, const int joint_start, const int joint_count, const int M_start, const float* M, spatial_matrix* adj_I_s, int& adj_joint_start, int& adj_joint_count, int& adj_M_start, const float* adj_M) { const int stride = joint_count*6; for (int l=0; l < joint_count; ++l) { for (int i=0; i < 6; ++i) { for (int j=0; j < 6; ++j) { adj_I_s[joint_start + l].data[i][j] += adj_M[M_start + row_index(stride, l*6 + i, l*6 + j)]; } } } }
24,501
C
28.099762
198
0.594057
RoboticExplorationLab/CGAC/dflex/dflex/mat22.h
#pragma once //---------------------------------------------------------- // mat22 struct mat22 { inline CUDA_CALLABLE mat22(float m00=0.0f, float m01=0.0f, float m10=0.0f, float m11=0.0f) { data[0][0] = m00; data[1][0] = m10; data[0][1] = m01; data[1][1] = m11; } // row major storage assumed to be compatible with PyTorch float data[2][2]; }; #ifdef CUDA inline __device__ void atomic_add(mat22 * addr, mat22 value) { // *addr += value; atomicAdd(&((addr -> data)[0][0]), value.data[0][0]); atomicAdd(&((addr -> data)[0][1]), value.data[0][1]); atomicAdd(&((addr -> data)[1][0]), value.data[1][0]); atomicAdd(&((addr -> data)[1][1]), value.data[1][1]); } #endif inline CUDA_CALLABLE void adj_mat22(float m00, float m01, float m10, float m11, float& adj_m00, float& adj_m01, float& adj_m10, float& adj_m11, const mat22& adj_ret) { printf("todo\n"); } inline CUDA_CALLABLE float index(const mat22& m, int row, int col) { return m.data[row][col]; } inline CUDA_CALLABLE mat22 add(const mat22& a, const mat22& b) { mat22 t; for (int i=0; i < 2; ++i) { for (int j=0; j < 2; ++j) { t.data[i][j] = a.data[i][j] + b.data[i][j]; } } return t; } inline CUDA_CALLABLE mat22 mul(const mat22& a, float b) { mat22 t; for (int i=0; i < 2; ++i) { for (int j=0; j < 2; ++j) { t.data[i][j] = a.data[i][j]*b; } } return t; } inline CUDA_CALLABLE mat22 mul(const mat22& a, const mat22& b) { mat22 t; for (int i=0; i < 2; ++i) { for (int j=0; j < 2; ++j) { for (int k=0; k < 2; ++k) { t.data[i][j] += a.data[i][k]*b.data[k][j]; } } } return t; } inline CUDA_CALLABLE mat22 transpose(const mat22& a) { mat22 t; for (int i=0; i < 2; ++i) { for (int j=0; j < 2; ++j) { t.data[i][j] = a.data[j][i]; } } return t; } inline CUDA_CALLABLE float determinant(const mat22& m) { return m.data[0][0]*m.data[1][1] - m.data[1][0]*m.data[0][1]; } inline void CUDA_CALLABLE adj_index(const mat22& m, int row, int col, mat22& adj_m, int& adj_row, int& adj_col, float adj_ret) { adj_m.data[row][col] += adj_ret; } inline CUDA_CALLABLE void adj_add(const mat22& a, const mat22& b, mat22& adj_a, mat22& adj_b, const mat22& adj_ret) { for (int i=0; i < 2; ++i) { for (int j=0; j < 2; ++j) { adj_a.data[i][j] = adj_ret.data[i][j]; adj_b.data[i][j] = adj_ret.data[i][j]; } } } inline CUDA_CALLABLE void adj_mul(const mat22& a, const mat22& b, mat22& adj_a, mat22& adj_b, const mat22& adj_ret) { printf("todo\n"); } inline CUDA_CALLABLE void adj_transpose(const mat22& a, mat22& adj_a, const mat22& adj_ret) { printf("todo\n"); } inline CUDA_CALLABLE void adj_determinant(const mat22& m, mat22& adj_m, float adj_ret) { adj_m.data[0][0] += m.data[1][1]*adj_ret; adj_m.data[1][1] += m.data[0][0]*adj_ret; adj_m.data[0][1] -= m.data[1][0]*adj_ret; adj_m.data[1][0] -= m.data[0][1]*adj_ret; }
3,206
C
21.744681
165
0.515908
RoboticExplorationLab/CGAC/dflex/dflex/vec2.h
#pragma once struct float2 { float x; float y; };
58
C
7.42857
13
0.586207
RoboticExplorationLab/CGAC/dflex/dflex/util.py
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. import timeit import math import numpy as np import gc import torch import cProfile log_output = "" def log(s): print(s) global log_output log_output = log_output + s + "\n" # short hands def length(a): return np.linalg.norm(a) def length_sq(a): return np.dot(a, a) # NumPy has no normalize() method.. def normalize(v): norm = np.linalg.norm(v) if norm == 0.0: return v return v / norm def skew(v): return np.array([[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]]) # math utils def quat(i, j, k, w): return np.array([i, j, k, w]) def quat_identity(): return np.array((0.0, 0.0, 0.0, 1.0)) def quat_inverse(q): return np.array((-q[0], -q[1], -q[2], q[3])) def quat_from_axis_angle(axis, angle): v = np.array(axis) half = angle * 0.5 w = math.cos(half) sin_theta_over_two = math.sin(half) v *= sin_theta_over_two return np.array((v[0], v[1], v[2], w)) # rotate a vector def quat_rotate(q, x): x = np.array(x) axis = np.array((q[0], q[1], q[2])) return x * (2.0 * q[3] * q[3] - 1.0) + np.cross(axis, x) * q[3] * 2.0 + axis * np.dot(axis, x) * 2.0 # multiply two quats def quat_multiply(a, b): return np.array((a[3] * b[0] + b[3] * a[0] + a[1] * b[2] - b[1] * a[2], a[3] * b[1] + b[3] * a[1] + a[2] * b[0] - b[2] * a[0], a[3] * b[2] + b[3] * a[2] + a[0] * b[1] - b[0] * a[1], a[3] * b[3] - a[0] * b[0] - a[1] * b[1] - a[2] * b[2])) # convert to mat33 def quat_to_matrix(q): c1 = quat_rotate(q, np.array((1.0, 0.0, 0.0))) c2 = quat_rotate(q, np.array((0.0, 1.0, 0.0))) c3 = quat_rotate(q, np.array((0.0, 0.0, 1.0))) return np.array([c1, c2, c3]).T def quat_rpy(roll, pitch, yaw): cy = math.cos(yaw * 0.5) sy = math.sin(yaw * 0.5) cr = math.cos(roll * 0.5) sr = math.sin(roll * 0.5) cp = math.cos(pitch * 0.5) sp = math.sin(pitch * 0.5) w = (cy * cr * cp + sy * sr * sp) x = (cy * sr * cp - sy * cr * sp) y = (cy * cr * sp + sy * sr * cp) z = (sy * cr * cp - cy * sr * sp) return (x, y, z, w) def quat_from_matrix(m): tr = m[0, 0] + m[1, 1] + m[2, 2] h = 0.0 if(tr >= 0.0): h = math.sqrt(tr + 1.0) w = 0.5 * h h = 0.5 / h x = (m[2, 1] - m[1, 2]) * h y = (m[0, 2] - m[2, 0]) * h z = (m[1, 0] - m[0, 1]) * h else: i = 0; if(m[1, 1] > m[0, 0]): i = 1; if(m[2, 2] > m[i, i]): i = 2; if (i == 0): h = math.sqrt((m[0, 0] - (m[1, 1] + m[2, 2])) + 1.0) x = 0.5 * h h = 0.5 / h y = (m[0, 1] + m[1, 0]) * h z = (m[2, 0] + m[0, 2]) * h w = (m[2, 1] - m[1, 2]) * h elif (i == 1): h = sqrtf((m[1, 1] - (m[2, 2] + m[0, 0])) + 1.0) y = 0.5 * h h = 0.5 / h z = (m[1, 2] + m[2, 1]) * h x = (m[0, 1] + m[1, 0]) * h w = (m[0, 2] - m[2, 0]) * h elif (i == 2): h = sqrtf((m[2, 2] - (m[0, 0] + m[1, 1])) + 1.0) z = 0.5 * h h = 0.5 / h x = (m[2, 0] + m[0, 2]) * h y = (m[1, 2] + m[2, 1]) * h w = (m[1, 0] - m[0, 1]) * h return normalize(quat(x, y, z, w)) # rigid body transform def transform(x, r): return (np.array(x), np.array(r)) def transform_identity(): return (np.array((0.0, 0.0, 0.0)), quat_identity()) # se(3) -> SE(3), Park & Lynch pg. 105, screw in [w, v] normalized form def transform_exp(s, angle): w = np.array(s[0:3]) v = np.array(s[3:6]) if (length(w) < 1.0): r = quat_identity() else: r = quat_from_axis_angle(w, angle) t = v * angle + (1.0 - math.cos(angle)) * np.cross(w, v) + (angle - math.sin(angle)) * np.cross(w, np.cross(w, v)) return (t, r) def transform_inverse(t): q_inv = quat_inverse(t[1]) return (-quat_rotate(q_inv, t[0]), q_inv) def transform_vector(t, v): return quat_rotate(t[1], v) def transform_point(t, p): return np.array(t[0]) + quat_rotate(t[1], p) def transform_multiply(t, u): return (quat_rotate(t[1], u[0]) + t[0], quat_multiply(t[1], u[1])) # flatten an array of transforms (p,q) format to a 7-vector def transform_flatten(t): return np.array([*t[0], *t[1]]) # expand a 7-vec to a tuple of arrays def transform_expand(t): return (np.array(t[0:3]), np.array(t[3:7])) # convert array of transforms to a array of 7-vecs def transform_flatten_list(xforms): exp = lambda t: transform_flatten(t) return list(map(exp, xforms)) def transform_expand_list(xforms): exp = lambda t: transform_expand(t) return list(map(exp, xforms)) def transform_inertia(m, I, p, q): R = quat_to_matrix(q) # Steiner's theorem return R * I * R.T + m * (np.dot(p, p) * np.eye(3) - np.outer(p, p)) # spatial operators # AdT def spatial_adjoint(t): R = quat_to_matrix(t[1]) w = skew(t[0]) A = np.zeros((6, 6)) A[0:3, 0:3] = R A[3:6, 0:3] = np.dot(w, R) A[3:6, 3:6] = R return A # (AdT)^-T def spatial_adjoint_dual(t): R = quat_to_matrix(t[1]) w = skew(t[0]) A = np.zeros((6, 6)) A[0:3, 0:3] = R A[0:3, 3:6] = np.dot(w, R) A[3:6, 3:6] = R return A # AdT*s def transform_twist(t_ab, s_b): return np.dot(spatial_adjoint(t_ab), s_b) # AdT^{-T}*s def transform_wrench(t_ab, f_b): return np.dot(spatial_adjoint_dual(t_ab), f_b) # transform spatial inertia (6x6) in b frame to a frame def transform_spatial_inertia(t_ab, I_b): t_ba = transform_inverse(t_ab) # todo: write specialized method I_a = np.dot(np.dot(spatial_adjoint(t_ba).T, I_b), spatial_adjoint(t_ba)) return I_a def translate_twist(p_ab, s_b): w = s_b[0:3] v = np.cross(p_ab, s_b[0:3]) + s_b[3:6] return np.array((*w, *v)) def translate_wrench(p_ab, s_b): w = s_b[0:3] + np.cross(p_ab, s_b[3:6]) v = s_b[3:6] return np.array((*w, *v)) def spatial_vector(v=(0.0, 0.0, 0.0, 0.0, 0.0, 0.0)): return np.array(v) # ad_V pg. 289 L&P, pg. 25 Featherstone def spatial_cross(a, b): w = np.cross(a[0:3], b[0:3]) v = np.cross(a[3:6], b[0:3]) + np.cross(a[0:3], b[3:6]) return np.array((*w, *v)) # ad_V^T pg. 290 L&P, pg. 25 Featurestone, note this does not includes the sign flip in the definition def spatial_cross_dual(a, b): w = np.cross(a[0:3], b[0:3]) + np.cross(a[3:6], b[3:6]) v = np.cross(a[0:3], b[3:6]) return np.array((*w, *v)) def spatial_dot(a, b): return np.dot(a, b) def spatial_outer(a, b): return np.outer(a, b) def spatial_matrix(): return np.zeros((6, 6)) def spatial_matrix_from_inertia(I, m): G = spatial_matrix() G[0:3, 0:3] = I G[3, 3] = m G[4, 4] = m G[5, 5] = m return G # solves x = I^(-1)b def spatial_solve(I, b): return np.dot(np.linalg.inv(I), b) def rpy2quat(roll, pitch, yaw): cy = math.cos(yaw * 0.5) sy = math.sin(yaw * 0.5) cr = math.cos(roll * 0.5) sr = math.sin(roll * 0.5) cp = math.cos(pitch * 0.5) sp = math.sin(pitch * 0.5) w = cy * cr * cp + sy * sr * sp x = cy * sr * cp - sy * cr * sp y = cy * cr * sp + sy * sr * cp z = sy * cr * cp - cy * sr * sp return (x, y, z, w) # helper to retrive body angular velocity from a twist v_s in se(3) def get_body_angular_velocity(v_s): return v_s[0:3] # helper to compute velocity of a point p on a body given it's spatial twist v_s def get_body_linear_velocity(v_s, p): dpdt = v_s[3:6] + torch.cross(v_s[0:3], p) return dpdt # helper to build a body twist given the angular and linear velocity of # the center of mass specified in the world frame, returns the body # twist with respect to the origin (v_s) def get_body_twist(w_m, v_m, p_m): lin = v_m + torch.cross(p_m, w_m) return (*w_m, *lin) # timer utils class ScopedTimer: indent = -1 enabled = True def __init__(self, name, active=True, detailed=False): self.name = name self.active = active and self.enabled self.detailed = detailed def __enter__(self): if (self.active): self.start = timeit.default_timer() ScopedTimer.indent += 1 if (self.detailed): self.cp = cProfile.Profile() self.cp.clear() self.cp.enable() def __exit__(self, exc_type, exc_value, traceback): if (self.detailed): self.cp.disable() self.cp.print_stats(sort='tottime') if (self.active): elapsed = (timeit.default_timer() - self.start) * 1000.0 indent = "" for i in range(ScopedTimer.indent): indent += "\t" log("{}{} took {:.2f} ms".format(indent, self.name, elapsed)) ScopedTimer.indent -= 1 # code snippet for invoking cProfile # cp = cProfile.Profile() # cp.enable() # for i in range(1000): # self.state = self.integrator.forward(self.model, self.state, self.sim_dt) # cp.disable() # cp.print_stats(sort='tottime') # exit(0) # represent an edge between v0, v1 with connected faces f0, f1, and opposite vertex o0, and o1 # winding is such that first tri can be reconstructed as {v0, v1, o0}, and second tri as { v1, v0, o1 } class MeshEdge: def __init__(self, v0, v1, o0, o1, f0, f1): self.v0 = v0 # vertex 0 self.v1 = v1 # vertex 1 self.o0 = o0 # opposite vertex 1 self.o1 = o1 # opposite vertex 2 self.f0 = f0 # index of tri1 self.f1 = f1 # index of tri2 class MeshAdjacency: def __init__(self, indices, num_tris): # map edges (v0, v1) to faces (f0, f1) self.edges = {} self.indices = indices for index, tri in enumerate(indices): self.add_edge(tri[0], tri[1], tri[2], index) self.add_edge(tri[1], tri[2], tri[0], index) self.add_edge(tri[2], tri[0], tri[1], index) def add_edge(self, i0, i1, o, f): # index1, index2, index3, index of triangle key = (min(i0, i1), max(i0, i1)) edge = None if key in self.edges: edge = self.edges[key] if (edge.f1 != -1): print("Detected non-manifold edge") return else: # update other side of the edge edge.o1 = o edge.f1 = f else: # create new edge with opposite yet to be filled edge = MeshEdge(i0, i1, o, -1, f, -1) self.edges[key] = edge def opposite_vertex(self, edge): pass def mem_report(): '''Report the memory usage of the tensor.storage in pytorch Both on CPUs and GPUs are reported''' def _mem_report(tensors, mem_type): '''Print the selected tensors of type There are two major storage types in our major concern: - GPU: tensors transferred to CUDA devices - CPU: tensors remaining on the system memory (usually unimportant) Args: - tensors: the tensors of specified type - mem_type: 'CPU' or 'GPU' in current implementation ''' total_numel = 0 total_mem = 0 visited_data = [] for tensor in tensors: if tensor.is_sparse: continue # a data_ptr indicates a memory block allocated data_ptr = tensor.storage().data_ptr() if data_ptr in visited_data: continue visited_data.append(data_ptr) numel = tensor.storage().size() total_numel += numel element_size = tensor.storage().element_size() mem = numel*element_size /1024/1024 # 32bit=4Byte, MByte total_mem += mem element_type = type(tensor).__name__ size = tuple(tensor.size()) # print('%s\t\t%s\t\t%.2f' % ( # element_type, # size, # mem) ) print('Type: %s Total Tensors: %d \tUsed Memory Space: %.2f MBytes' % (mem_type, total_numel, total_mem) ) gc.collect() LEN = 65 objects = gc.get_objects() #print('%s\t%s\t\t\t%s' %('Element type', 'Size', 'Used MEM(MBytes)') ) tensors = [obj for obj in objects if torch.is_tensor(obj)] cuda_tensors = [t for t in tensors if t.is_cuda] host_tensors = [t for t in tensors if not t.is_cuda] _mem_report(cuda_tensors, 'GPU') _mem_report(host_tensors, 'CPU') print('='*LEN)
13,134
Python
23.056777
118
0.522994
RoboticExplorationLab/CGAC/dflex/dflex/adjoint.h
#pragma once #include <cmath> #include <stdio.h> #ifdef CPU #define CUDA_CALLABLE #define __device__ #define __host__ #define __constant__ #elif defined(CUDA) #define CUDA_CALLABLE __device__ #include <cuda.h> #include <cuda_runtime_api.h> #define check_cuda(code) { check_cuda_impl(code, __FILE__, __LINE__); } void check_cuda_impl(cudaError_t code, const char* file, int line) { if (code != cudaSuccess) { printf("CUDA Error: %s %s %d\n", cudaGetErrorString(code), file, line); } } void print_device() { int currentDevice; cudaError_t err = cudaGetDevice(&currentDevice); cudaDeviceProp props; err = cudaGetDeviceProperties(&props, currentDevice); if (err != cudaSuccess) printf("CUDA error: %d\n", err); else printf("%s\n", props.name); } #endif #ifdef _WIN32 #define __restrict__ __restrict #endif #define FP_CHECK 0 namespace df { template <typename T> CUDA_CALLABLE float cast_float(T x) { return (float)(x); } template <typename T> CUDA_CALLABLE int cast_int(T x) { return (int)(x); } template <typename T> CUDA_CALLABLE void adj_cast_float(T x, T& adj_x, float adj_ret) { adj_x += adj_ret; } template <typename T> CUDA_CALLABLE void adj_cast_int(T x, T& adj_x, int adj_ret) { adj_x += adj_ret; } // avoid namespacing of float type for casting to float type, this is to avoid wp::float(x), which is not valid in C++ #define float(x) cast_float(x) #define adj_float(x, adj_x, adj_ret) adj_cast_float(x, adj_x, adj_ret) #define int(x) cast_int(x) #define adj_int(x, adj_x, adj_ret) adj_cast_int(x, adj_x, adj_ret) #define kEps 0.0f // basic ops for integer types inline CUDA_CALLABLE int mul(int a, int b) { return a*b; } inline CUDA_CALLABLE int div(int a, int b) { return a/b; } inline CUDA_CALLABLE int add(int a, int b) { return a+b; } inline CUDA_CALLABLE int sub(int a, int b) { return a-b; } inline CUDA_CALLABLE int mod(int a, int b) { return a % b; } inline CUDA_CALLABLE void adj_mul(int a, int b, int& adj_a, int& adj_b, int adj_ret) { } inline CUDA_CALLABLE void adj_div(int a, int b, int& adj_a, int& adj_b, int adj_ret) { } inline CUDA_CALLABLE void adj_add(int a, int b, int& adj_a, int& adj_b, int adj_ret) { } inline CUDA_CALLABLE void adj_sub(int a, int b, int& adj_a, int& adj_b, int adj_ret) { } inline CUDA_CALLABLE void adj_mod(int a, int b, int& adj_a, int& adj_b, int adj_ret) { } // basic ops for float types inline CUDA_CALLABLE float mul(float a, float b) { return a*b; } inline CUDA_CALLABLE float div(float a, float b) { return a/b; } inline CUDA_CALLABLE float add(float a, float b) { return a+b; } inline CUDA_CALLABLE float sub(float a, float b) { return a-b; } inline CUDA_CALLABLE float min(float a, float b) { return a<b?a:b; } inline CUDA_CALLABLE float max(float a, float b) { return a>b?a:b; } inline CUDA_CALLABLE float leaky_min(float a, float b, float r) { return min(a, b); } inline CUDA_CALLABLE float leaky_max(float a, float b, float r) { return max(a, b); } inline CUDA_CALLABLE float clamp(float x, float a, float b) { return min(max(a, x), b); } inline CUDA_CALLABLE float step(float x) { return x < 0.0 ? 1.0 : 0.0; } inline CUDA_CALLABLE float sign(float x) { return x < 0.0 ? -1.0 : 1.0; } inline CUDA_CALLABLE float abs(float x) { return fabsf(x); } inline CUDA_CALLABLE float nonzero(float x) { return x == 0.0 ? 0.0 : 1.0; } inline CUDA_CALLABLE float acos(float x) { return std::acos(std::min(std::max(x, -1.0f), 1.0f)); } inline CUDA_CALLABLE float sin(float x) { return std::sin(x); } inline CUDA_CALLABLE float cos(float x) { return std::cos(x); } inline CUDA_CALLABLE float sqrt(float x) { return std::sqrt(x); } inline CUDA_CALLABLE void adj_mul(float a, float b, float& adj_a, float& adj_b, float adj_ret) { adj_a += b*adj_ret; adj_b += a*adj_ret; } inline CUDA_CALLABLE void adj_div(float a, float b, float& adj_a, float& adj_b, float adj_ret) { adj_a += adj_ret/b; adj_b -= adj_ret*(a/b)/b; } inline CUDA_CALLABLE void adj_add(float a, float b, float& adj_a, float& adj_b, float adj_ret) { adj_a += adj_ret; adj_b += adj_ret; } inline CUDA_CALLABLE void adj_sub(float a, float b, float& adj_a, float& adj_b, float adj_ret) { adj_a += adj_ret; adj_b -= adj_ret; } // inline CUDA_CALLABLE bool lt(float a, float b) { return a < b; } // inline CUDA_CALLABLE bool gt(float a, float b) { return a > b; } // inline CUDA_CALLABLE bool lte(float a, float b) { return a <= b; } // inline CUDA_CALLABLE bool gte(float a, float b) { return a >= b; } // inline CUDA_CALLABLE bool eq(float a, float b) { return a == b; } // inline CUDA_CALLABLE bool neq(float a, float b) { return a != b; } // inline CUDA_CALLABLE bool adj_lt(float a, float b, float & adj_a, float & adj_b, bool & adj_ret) { } // inline CUDA_CALLABLE bool adj_gt(float a, float b, float & adj_a, float & adj_b, bool & adj_ret) { } // inline CUDA_CALLABLE bool adj_lte(float a, float b, float & adj_a, float & adj_b, bool & adj_ret) { } // inline CUDA_CALLABLE bool adj_gte(float a, float b, float & adj_a, float & adj_b, bool & adj_ret) { } // inline CUDA_CALLABLE bool adj_eq(float a, float b, float & adj_a, float & adj_b, bool & adj_ret) { } // inline CUDA_CALLABLE bool adj_neq(float a, float b, float & adj_a, float & adj_b, bool & adj_ret) { } inline CUDA_CALLABLE void adj_min(float a, float b, float& adj_a, float& adj_b, float adj_ret) { if (a < b) adj_a += adj_ret; else adj_b += adj_ret; } inline CUDA_CALLABLE void adj_max(float a, float b, float& adj_a, float& adj_b, float adj_ret) { if (a > b) adj_a += adj_ret; else adj_b += adj_ret; } inline CUDA_CALLABLE void adj_leaky_min(float a, float b, float r, float& adj_a, float& adj_b, float& adj_r, float adj_ret) { if (a < b) adj_a += adj_ret; else { adj_a += r*adj_ret; adj_b += adj_ret; } } inline CUDA_CALLABLE void adj_leaky_max(float a, float b, float r, float& adj_a, float& adj_b, float& adj_r, float adj_ret) { if (a > b) adj_a += adj_ret; else { adj_a += r*adj_ret; adj_b += adj_ret; } } inline CUDA_CALLABLE void adj_clamp(float x, float a, float b, float& adj_x, float& adj_a, float& adj_b, float adj_ret) { if (x < a) adj_a += adj_ret; else if (x > b) adj_b += adj_ret; else adj_x += adj_ret; } inline CUDA_CALLABLE void adj_step(float x, float& adj_x, float adj_ret) { // nop } inline CUDA_CALLABLE void adj_nonzero(float x, float& adj_x, float adj_ret) { // nop } inline CUDA_CALLABLE void adj_sign(float x, float& adj_x, float adj_ret) { // nop } inline CUDA_CALLABLE void adj_abs(float x, float& adj_x, float adj_ret) { if (x < 0.0) adj_x -= adj_ret; else adj_x += adj_ret; } inline CUDA_CALLABLE void adj_acos(float x, float& adj_x, float adj_ret) { float d = sqrt(1.0-x*x); if (d > 0.0) adj_x -= (1.0/d)*adj_ret; } inline CUDA_CALLABLE void adj_sin(float x, float& adj_x, float adj_ret) { adj_x += std::cos(x)*adj_ret; } inline CUDA_CALLABLE void adj_cos(float x, float& adj_x, float adj_ret) { adj_x -= std::sin(x)*adj_ret; } inline CUDA_CALLABLE void adj_sqrt(float x, float& adj_x, float adj_ret) { adj_x += 0.5f*(1.0/std::sqrt(x))*adj_ret; } template <typename T> CUDA_CALLABLE inline T select(bool cond, const T& a, const T& b) { return cond?b:a; } template <typename T> CUDA_CALLABLE inline void adj_select(bool cond, const T& a, const T& b, bool& adj_cond, T& adj_a, T& adj_b, const T& adj_ret) { if (cond) adj_b += adj_ret; else adj_a += adj_ret; } // some helpful operator overloads (just for C++ use, these are not adjointed) template <typename T> CUDA_CALLABLE T& operator += (T& a, const T& b) { a = add(a, b); return a; } template <typename T> CUDA_CALLABLE T& operator -= (T& a, const T& b) { a = sub(a, b); return a; } template <typename T> CUDA_CALLABLE T operator*(const T& a, float s) { return mul(a, s); } template <typename T> CUDA_CALLABLE T operator/(const T& a, float s) { return div(a, s); } template <typename T> CUDA_CALLABLE T operator+(const T& a, const T& b) { return add(a, b); } template <typename T> CUDA_CALLABLE T operator-(const T& a, const T& b) { return sub(a, b); } // for single thread CPU only static int s_threadIdx; inline CUDA_CALLABLE int tid() { #ifdef CPU return s_threadIdx; #elif defined(CUDA) return blockDim.x * blockIdx.x + threadIdx.x; #endif } #include "vec2.h" #include "vec3.h" #include "mat22.h" #include "mat33.h" #include "matnn.h" #include "quat.h" #include "spatial.h" //-------------- template<typename T> inline CUDA_CALLABLE T load(T* buf, int index) { assert(buf); return buf[index]; } template<typename T> inline CUDA_CALLABLE void store(T* buf, int index, T value) { // allow NULL buffers for case where gradients are not required if (buf) { buf[index] = value; } } #ifdef CUDA template<typename T> inline __device__ void atomic_add(T* buf, T value) { atomicAdd(buf, value); } #endif template<typename T> inline __device__ void atomic_add(T* buf, int index, T value) { if (buf) { // CPU mode is sequential so just add #ifdef CPU buf[index] += value; #elif defined(CUDA) atomic_add(buf + index, value); #endif } } template<typename T> inline __device__ void atomic_sub(T* buf, int index, T value) { if (buf) { // CPU mode is sequential so just add #ifdef CPU buf[index] -= value; #elif defined(CUDA) atomic_add(buf + index, -value); #endif } } template <typename T> inline CUDA_CALLABLE void adj_load(T* buf, int index, T* adj_buf, int& adj_index, const T& adj_output) { // allow NULL buffers for case where gradients are not required if (adj_buf) { #ifdef CPU adj_buf[index] += adj_output; // does not need to be atomic if single-threaded #elif defined(CUDA) atomic_add(adj_buf, index, adj_output); #endif } } template <typename T> inline CUDA_CALLABLE void adj_store(T* buf, int index, T value, T* adj_buf, int& adj_index, T& adj_value) { adj_value += adj_buf[index]; // doesn't need to be atomic because it's used to load from a buffer onto the stack } template<typename T> inline CUDA_CALLABLE void adj_atomic_add(T* buf, int index, T value, T* adj_buf, int& adj_index, T& adj_value) { if (adj_buf) { // cannot be atomic because used locally adj_value += adj_buf[index]; } } template<typename T> inline CUDA_CALLABLE void adj_atomic_sub(T* buf, int index, T value, T* adj_buf, int& adj_index, T& adj_value) { if (adj_buf) { // cannot be atomic because used locally adj_value -= adj_buf[index]; } } //------------------------- // Texture methods inline CUDA_CALLABLE float sdf_sample(float3 x) { return 0.0; } inline CUDA_CALLABLE float3 sdf_grad(float3 x) { return float3(); } inline CUDA_CALLABLE void adj_sdf_sample(float3 x, float3& adj_x, float adj_ret) { } inline CUDA_CALLABLE void adj_sdf_grad(float3 x, float3& adj_x, float3& adj_ret) { } inline CUDA_CALLABLE void print(int i) { printf("%d\n", i); } inline CUDA_CALLABLE void print(float i) { printf("%f\n", i); } inline CUDA_CALLABLE void print(float3 i) { printf("%f %f %f\n", i.x, i.y, i.z); } inline CUDA_CALLABLE void print(quat i) { printf("%f %f %f %f\n", i.x, i.y, i.z, i.w); } inline CUDA_CALLABLE void print(mat22 m) { printf("%f %f\n%f %f\n", m.data[0][0], m.data[0][1], m.data[1][0], m.data[1][1]); } inline CUDA_CALLABLE void print(mat33 m) { printf("%f %f %f\n%f %f %f\n%f %f %f\n", m.data[0][0], m.data[0][1], m.data[0][2], m.data[1][0], m.data[1][1], m.data[1][2], m.data[2][0], m.data[2][1], m.data[2][2]); } inline CUDA_CALLABLE void print(spatial_transform t) { printf("(%f %f %f) (%f %f %f %f)\n", t.p.x, t.p.y, t.p.z, t.q.x, t.q.y, t.q.z, t.q.w); } inline CUDA_CALLABLE void print(spatial_vector v) { printf("(%f %f %f) (%f %f %f)\n", v.w.x, v.w.y, v.w.z, v.v.x, v.v.y, v.v.z); } inline CUDA_CALLABLE void print(spatial_matrix m) { printf("%f %f %f %f %f %f\n" "%f %f %f %f %f %f\n" "%f %f %f %f %f %f\n" "%f %f %f %f %f %f\n" "%f %f %f %f %f %f\n" "%f %f %f %f %f %f\n", m.data[0][0], m.data[0][1], m.data[0][2], m.data[0][3], m.data[0][4], m.data[0][5], m.data[1][0], m.data[1][1], m.data[1][2], m.data[1][3], m.data[1][4], m.data[1][5], m.data[2][0], m.data[2][1], m.data[2][2], m.data[2][3], m.data[2][4], m.data[2][5], m.data[3][0], m.data[3][1], m.data[3][2], m.data[3][3], m.data[3][4], m.data[3][5], m.data[4][0], m.data[4][1], m.data[4][2], m.data[4][3], m.data[4][4], m.data[4][5], m.data[5][0], m.data[5][1], m.data[5][2], m.data[5][3], m.data[5][4], m.data[5][5]); } inline CUDA_CALLABLE void adj_print(int i, int& adj_i) { printf("%d adj: %d\n", i, adj_i); } inline CUDA_CALLABLE void adj_print(float i, float& adj_i) { printf("%f adj: %f\n", i, adj_i); } inline CUDA_CALLABLE void adj_print(float3 i, float3& adj_i) { printf("%f %f %f adj: %f %f %f \n", i.x, i.y, i.z, adj_i.x, adj_i.y, adj_i.z); } inline CUDA_CALLABLE void adj_print(quat i, quat& adj_i) { } inline CUDA_CALLABLE void adj_print(mat22 m, mat22& adj_m) { } inline CUDA_CALLABLE void adj_print(mat33 m, mat33& adj_m) { } inline CUDA_CALLABLE void adj_print(spatial_transform t, spatial_transform& adj_t) {} inline CUDA_CALLABLE void adj_print(spatial_vector t, spatial_vector& adj_t) {} inline CUDA_CALLABLE void adj_print(spatial_matrix t, spatial_matrix& adj_t) {} } // namespace df
13,946
C
29.05819
144
0.608992
RoboticExplorationLab/CGAC/dflex/dflex/config.py
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. import os no_grad = False # disable adjoint tracking check_grad = False # will perform numeric gradient checking after each launch verify_fp = False # verify inputs and outputs are finite after each launch
650
Python
49.076919
82
0.783077
RoboticExplorationLab/CGAC/dflex/dflex/quat.h
#pragma once struct quat { // imaginary part float x; float y; float z; // real part float w; inline CUDA_CALLABLE quat(float x=0.0f, float y=0.0f, float z=0.0f, float w=0.0) : x(x), y(y), z(z), w(w) {} explicit inline CUDA_CALLABLE quat(const float3& v, float w=0.0f) : x(v.x), y(v.y), z(v.z), w(w) {} }; #ifdef CUDA inline __device__ void atomic_add(quat * addr, quat value) { atomicAdd(&(addr -> x), value.x); atomicAdd(&(addr -> y), value.y); atomicAdd(&(addr -> z), value.z); atomicAdd(&(addr -> w), value.w); } #endif inline CUDA_CALLABLE void adj_quat(float x, float y, float z, float w, float& adj_x, float& adj_y, float& adj_z, float& adj_w, quat adj_ret) { adj_x += adj_ret.x; adj_y += adj_ret.y; adj_z += adj_ret.z; adj_w += adj_ret.w; } inline CUDA_CALLABLE void adj_quat(const float3& v, float w, float3& adj_v, float& adj_w, quat adj_ret) { adj_v.x += adj_ret.x; adj_v.y += adj_ret.y; adj_v.z += adj_ret.z; adj_w += adj_ret.w; } // foward methods inline CUDA_CALLABLE quat quat_from_axis_angle(const float3& axis, float angle) { float half = angle*0.5f; float w = cosf(half); float sin_theta_over_two = sinf(half); float3 v = axis*sin_theta_over_two; return quat(v.x, v.y, v.z, w); } inline CUDA_CALLABLE quat quat_identity() { return quat(0.0f, 0.0f, 0.0f, 1.0f); } inline CUDA_CALLABLE float dot(const quat& a, const quat& b) { return a.x*b.x + a.y*b.y + a.z*b.z + a.w*b.w; } inline CUDA_CALLABLE float length(const quat& q) { return sqrtf(dot(q, q)); } inline CUDA_CALLABLE quat normalize(const quat& q) { float l = length(q); if (l > kEps) { float inv_l = 1.0f/l; return quat(q.x*inv_l, q.y*inv_l, q.z*inv_l, q.w*inv_l); } else { return quat(0.0f, 0.0f, 0.0f, 1.0f); } } inline CUDA_CALLABLE quat inverse(const quat& q) { return quat(-q.x, -q.y, -q.z, q.w); } inline CUDA_CALLABLE quat add(const quat& a, const quat& b) { return quat(a.x+b.x, a.y+b.y, a.z+b.z, a.w+b.w); } inline CUDA_CALLABLE quat sub(const quat& a, const quat& b) { return quat(a.x-b.x, a.y-b.y, a.z-b.z, a.w-b.w);} inline CUDA_CALLABLE quat mul(const quat& a, const quat& b) { return quat(a.w*b.x + b.w*a.x + a.y*b.z - b.y*a.z, a.w*b.y + b.w*a.y + a.z*b.x - b.z*a.x, a.w*b.z + b.w*a.z + a.x*b.y - b.x*a.y, a.w*b.w - a.x*b.x - a.y*b.y - a.z*b.z); } inline CUDA_CALLABLE quat mul(const quat& a, float s) { return quat(a.x*s, a.y*s, a.z*s, a.w*s); } inline CUDA_CALLABLE float3 rotate(const quat& q, const float3& x) { return x*(2.0f*q.w*q.w-1.0f) + cross(float3(&q.x), x)*q.w*2.0f + float3(&q.x)*dot(float3(&q.x), x)*2.0f; } inline CUDA_CALLABLE float3 rotate_inv(const quat& q, const float3& x) { return x*(2.0f*q.w*q.w-1.0f) - cross(float3(&q.x), x)*q.w*2.0f + float3(&q.x)*dot(float3(&q.x), x)*2.0f; } inline CUDA_CALLABLE float index(const quat& a, int idx) { #if FP_CHECK if (idx < 0 || idx > 3) { printf("quat index %d out of bounds at %s %d", idx, __FILE__, __LINE__); exit(1); } #endif return (&a.x)[idx]; } inline CUDA_CALLABLE void adj_index(const quat& a, int idx, quat& adj_a, int & adj_idx, float & adj_ret) { #if FP_CHECK if (idx < 0 || idx > 3) { printf("quat index %d out of bounds at %s %d", idx, __FILE__, __LINE__); exit(1); } #endif (&adj_a.x)[idx] += adj_ret; } // backward methods inline CUDA_CALLABLE void adj_quat_from_axis_angle(const float3& axis, float angle, float3& adj_axis, float& adj_angle, const quat& adj_ret) { float3 v = float3(adj_ret.x, adj_ret.y, adj_ret.z); float s = sinf(angle*0.5f); float c = cosf(angle*0.5f); quat dqda = quat(axis.x*c, axis.y*c, axis.z*c, -s)*0.5f; adj_axis += v*s; adj_angle += dot(dqda, adj_ret); } inline CUDA_CALLABLE void adj_quat_identity(const quat& adj_ret) { // nop } inline CUDA_CALLABLE void adj_dot(const quat& a, const quat& b, quat& adj_a, quat& adj_b, const float adj_ret) { adj_a += b*adj_ret; adj_b += a*adj_ret; } inline CUDA_CALLABLE void adj_length(const quat& a, quat& adj_a, const float adj_ret) { adj_a += normalize(a)*adj_ret; } inline CUDA_CALLABLE void adj_normalize(const quat& q, quat& adj_q, const quat& adj_ret) { float l = length(q); if (l > kEps) { float l_inv = 1.0f/l; adj_q += adj_ret*l_inv - q*(l_inv*l_inv*l_inv*dot(q, adj_ret)); } } inline CUDA_CALLABLE void adj_inverse(const quat& q, quat& adj_q, const quat& adj_ret) { adj_q.x -= adj_ret.x; adj_q.y -= adj_ret.y; adj_q.z -= adj_ret.z; adj_q.w += adj_ret.w; } // inline void adj_normalize(const quat& a, quat& adj_a, const quat& adj_ret) // { // float d = length(a); // if (d > kEps) // { // float invd = 1.0f/d; // quat ahat = normalize(a); // adj_a += (adj_ret - ahat*(dot(ahat, adj_ret))*invd); // //if (!isfinite(adj_a)) // // printf("%s:%d - adj_normalize((%f %f %f), (%f %f %f), (%f, %f, %f))\n", __FILE__, __LINE__, a.x, a.y, a.z, adj_a.x, adj_a.y, adj_a.z, adj_ret.x, adj_ret.y, adj_ret.z); // } // } inline CUDA_CALLABLE void adj_add(const quat& a, const quat& b, quat& adj_a, quat& adj_b, const quat& adj_ret) { adj_a += adj_ret; adj_b += adj_ret; } inline CUDA_CALLABLE void adj_sub(const quat& a, const quat& b, quat& adj_a, quat& adj_b, const quat& adj_ret) { adj_a += adj_ret; adj_b -= adj_ret; } inline CUDA_CALLABLE void adj_mul(const quat& a, const quat& b, quat& adj_a, quat& adj_b, const quat& adj_ret) { // shorthand const quat& r = adj_ret; adj_a += quat(b.w*r.x - b.x*r.w + b.y*r.z - b.z*r.y, b.w*r.y - b.y*r.w - b.x*r.z + b.z*r.x, b.w*r.z + b.x*r.y - b.y*r.x - b.z*r.w, b.w*r.w + b.x*r.x + b.y*r.y + b.z*r.z); adj_b += quat(a.w*r.x - a.x*r.w - a.y*r.z + a.z*r.y, a.w*r.y - a.y*r.w + a.x*r.z - a.z*r.x, a.w*r.z - a.x*r.y + a.y*r.x - a.z*r.w, a.w*r.w + a.x*r.x + a.y*r.y + a.z*r.z); } inline CUDA_CALLABLE void adj_mul(const quat& a, float s, quat& adj_a, float& adj_s, const quat& adj_ret) { adj_a += adj_ret*s; adj_s += dot(a, adj_ret); } inline CUDA_CALLABLE void adj_rotate(const quat& q, const float3& p, quat& adj_q, float3& adj_p, const float3& adj_ret) { const float3& r = adj_ret; { float t2 = p.z*q.z*2.0f; float t3 = p.y*q.w*2.0f; float t4 = p.x*q.w*2.0f; float t5 = p.x*q.x*2.0f; float t6 = p.y*q.y*2.0f; float t7 = p.z*q.y*2.0f; float t8 = p.x*q.z*2.0f; float t9 = p.x*q.y*2.0f; float t10 = p.y*q.x*2.0f; adj_q.x += r.z*(t3+t8)+r.x*(t2+t6+p.x*q.x*4.0f)+r.y*(t9-p.z*q.w*2.0f); adj_q.y += r.y*(t2+t5+p.y*q.y*4.0f)+r.x*(t10+p.z*q.w*2.0f)-r.z*(t4-p.y*q.z*2.0f); adj_q.z += r.y*(t4+t7)+r.z*(t5+t6+p.z*q.z*4.0f)-r.x*(t3-p.z*q.x*2.0f); adj_q.w += r.x*(t7+p.x*q.w*4.0f-p.y*q.z*2.0f)+r.y*(t8+p.y*q.w*4.0f-p.z*q.x*2.0f)+r.z*(-t9+t10+p.z*q.w*4.0f); } { float t2 = q.w*q.w; float t3 = t2*2.0f; float t4 = q.w*q.z*2.0f; float t5 = q.x*q.y*2.0f; float t6 = q.w*q.y*2.0f; float t7 = q.w*q.x*2.0f; float t8 = q.y*q.z*2.0f; adj_p.x += r.y*(t4+t5)+r.x*(t3+(q.x*q.x)*2.0f-1.0f)-r.z*(t6-q.x*q.z*2.0f); adj_p.y += r.z*(t7+t8)-r.x*(t4-t5)+r.y*(t3+(q.y*q.y)*2.0f-1.0f); adj_p.z += -r.y*(t7-t8)+r.z*(t3+(q.z*q.z)*2.0f-1.0f)+r.x*(t6+q.x*q.z*2.0f); } } inline CUDA_CALLABLE void adj_rotate_inv(const quat& q, const float3& p, quat& adj_q, float3& adj_p, const float3& adj_ret) { const float3& r = adj_ret; { float t2 = p.z*q.w*2.0f; float t3 = p.z*q.z*2.0f; float t4 = p.y*q.w*2.0f; float t5 = p.x*q.w*2.0f; float t6 = p.x*q.x*2.0f; float t7 = p.y*q.y*2.0f; float t8 = p.y*q.z*2.0f; float t9 = p.z*q.x*2.0f; float t10 = p.x*q.y*2.0f; adj_q.x += r.y*(t2+t10)+r.x*(t3+t7+p.x*q.x*4.0f)-r.z*(t4-p.x*q.z*2.0f); adj_q.y += r.z*(t5+t8)+r.y*(t3+t6+p.y*q.y*4.0f)-r.x*(t2-p.y*q.x*2.0f); adj_q.z += r.x*(t4+t9)+r.z*(t6+t7+p.z*q.z*4.0f)-r.y*(t5-p.z*q.y*2.0f); adj_q.w += r.x*(t8+p.x*q.w*4.0f-p.z*q.y*2.0f)+r.y*(t9+p.y*q.w*4.0f-p.x*q.z*2.0f)+r.z*(t10-p.y*q.x*2.0f+p.z*q.w*4.0f); } { float t2 = q.w*q.w; float t3 = t2*2.0f; float t4 = q.w*q.z*2.0f; float t5 = q.w*q.y*2.0f; float t6 = q.x*q.z*2.0f; float t7 = q.w*q.x*2.0f; adj_p.x += r.z*(t5+t6)+r.x*(t3+(q.x*q.x)*2.0f-1.0f)-r.y*(t4-q.x*q.y*2.0f); adj_p.y += r.y*(t3+(q.y*q.y)*2.0f-1.0f)+r.x*(t4+q.x*q.y*2.0f)-r.z*(t7-q.y*q.z*2.0f); adj_p.z += -r.x*(t5-t6)+r.z*(t3+(q.z*q.z)*2.0f-1.0f)+r.y*(t7+q.y*q.z*2.0f); } }
8,985
C
26.993769
184
0.522315
RoboticExplorationLab/CGAC/dflex/dflex/vec3.h
#pragma once struct float3 { float x; float y; float z; inline CUDA_CALLABLE float3(float x=0.0f, float y=0.0f, float z=0.0f) : x(x), y(y), z(z) {} explicit inline CUDA_CALLABLE float3(const float* p) : x(p[0]), y(p[1]), z(p[2]) {} }; //-------------- // float3 methods inline CUDA_CALLABLE float3 operator - (float3 a) { return { -a.x, -a.y, -a.z }; } inline CUDA_CALLABLE float3 mul(float3 a, float s) { return { a.x*s, a.y*s, a.z*s }; } inline CUDA_CALLABLE float3 div(float3 a, float s) { return { a.x/s, a.y/s, a.z/s }; } inline CUDA_CALLABLE float3 add(float3 a, float3 b) { return { a.x+b.x, a.y+b.y, a.z+b.z }; } inline CUDA_CALLABLE float3 add(float3 a, float s) { return { a.x + s, a.y + s, a.z + s }; } inline CUDA_CALLABLE float3 sub(float3 a, float3 b) { return { a.x-b.x, a.y-b.y, a.z-b.z }; } inline CUDA_CALLABLE float dot(float3 a, float3 b) { return a.x*b.x + a.y*b.y + a.z*b.z; } inline CUDA_CALLABLE float3 cross(float3 a, float3 b) { float3 c; c.x = a.y*b.z - a.z*b.y; c.y = a.z*b.x - a.x*b.z; c.z = a.x*b.y - a.y*b.x; return c; } inline CUDA_CALLABLE float index(const float3 & a, int idx) { #if FP_CHECK if (idx < 0 || idx > 2) { printf("float3 index %d out of bounds at %s %d\n", idx, __FILE__, __LINE__); exit(1); } #endif return (&a.x)[idx]; } inline CUDA_CALLABLE void adj_index(const float3 & a, int idx, float3 & adj_a, int & adj_idx, float & adj_ret) { #if FP_CHECK if (idx < 0 || idx > 2) { printf("float3 index %d out of bounds at %s %d\n", idx, __FILE__, __LINE__); exit(1); } #endif (&adj_a.x)[idx] += adj_ret; } inline CUDA_CALLABLE float length(float3 a) { return sqrtf(dot(a, a)); } inline CUDA_CALLABLE float3 normalize(float3 a) { float l = length(a); if (l > kEps) return div(a,l); else return float3(); } inline bool CUDA_CALLABLE isfinite(float3 x) { return std::isfinite(x.x) && std::isfinite(x.y) && std::isfinite(x.z); } // adjoint float3 constructor inline CUDA_CALLABLE void adj_float3(float x, float y, float z, float& adj_x, float& adj_y, float& adj_z, const float3& adj_ret) { adj_x += adj_ret.x; adj_y += adj_ret.y; adj_z += adj_ret.z; } inline CUDA_CALLABLE void adj_mul(float3 a, float s, float3& adj_a, float& adj_s, const float3& adj_ret) { adj_a.x += s*adj_ret.x; adj_a.y += s*adj_ret.y; adj_a.z += s*adj_ret.z; adj_s += dot(a, adj_ret); #if FP_CHECK if (!isfinite(a) || !isfinite(s) || !isfinite(adj_a) || !isfinite(adj_s) || !isfinite(adj_ret)) printf("adj_mul((%f %f %f), %f, (%f %f %f), %f, (%f %f %f)\n", a.x, a.y, a.z, s, adj_a.x, adj_a.y, adj_a.z, adj_s, adj_ret.x, adj_ret.y, adj_ret.z); #endif } inline CUDA_CALLABLE void adj_div(float3 a, float s, float3& adj_a, float& adj_s, const float3& adj_ret) { adj_s += dot(- a / (s * s), adj_ret); // - a / s^2 adj_a.x += adj_ret.x / s; adj_a.y += adj_ret.y / s; adj_a.z += adj_ret.z / s; #if FP_CHECK if (!isfinite(a) || !isfinite(s) || !isfinite(adj_a) || !isfinite(adj_s) || !isfinite(adj_ret)) printf("adj_div((%f %f %f), %f, (%f %f %f), %f, (%f %f %f)\n", a.x, a.y, a.z, s, adj_a.x, adj_a.y, adj_a.z, adj_s, adj_ret.x, adj_ret.y, adj_ret.z); #endif } inline CUDA_CALLABLE void adj_add(float3 a, float3 b, float3& adj_a, float3& adj_b, const float3& adj_ret) { adj_a += adj_ret; adj_b += adj_ret; } inline CUDA_CALLABLE void adj_add(float3 a, float s, float3& adj_a, float& adj_s, const float3& adj_ret) { adj_a += adj_ret; adj_s += adj_ret.x + adj_ret.y + adj_ret.z; } inline CUDA_CALLABLE void adj_sub(float3 a, float3 b, float3& adj_a, float3& adj_b, const float3& adj_ret) { adj_a += adj_ret; adj_b -= adj_ret; } inline CUDA_CALLABLE void adj_dot(float3 a, float3 b, float3& adj_a, float3& adj_b, const float adj_ret) { adj_a += b*adj_ret; adj_b += a*adj_ret; #if FP_CHECK if (!isfinite(a) || !isfinite(b) || !isfinite(adj_a) || !isfinite(adj_b) || !isfinite(adj_ret)) printf("adj_dot((%f %f %f), (%f %f %f), (%f %f %f), (%f %f %f), %f)\n", a.x, a.y, a.z, b.x, b.y, b.z, adj_a.x, adj_a.y, adj_a.z, adj_b.x, adj_b.y, adj_b.z, adj_ret); #endif } inline CUDA_CALLABLE void adj_cross(float3 a, float3 b, float3& adj_a, float3& adj_b, const float3& adj_ret) { // todo: sign check adj_a += cross(b, adj_ret); adj_b -= cross(a, adj_ret); } #ifdef CUDA inline __device__ void atomic_add(float3 * addr, float3 value) { // *addr += value; atomicAdd(&(addr -> x), value.x); atomicAdd(&(addr -> y), value.y); atomicAdd(&(addr -> z), value.z); } #endif inline CUDA_CALLABLE void adj_length(float3 a, float3& adj_a, const float adj_ret) { adj_a += normalize(a)*adj_ret; #if FP_CHECK if (!isfinite(adj_a)) printf("%s:%d - adj_length((%f %f %f), (%f %f %f), (%f))\n", __FILE__, __LINE__, a.x, a.y, a.z, adj_a.x, adj_a.y, adj_a.z, adj_ret); #endif } inline CUDA_CALLABLE void adj_normalize(float3 a, float3& adj_a, const float3& adj_ret) { float d = length(a); if (d > kEps) { float invd = 1.0f/d; float3 ahat = normalize(a); adj_a += (adj_ret*invd - ahat*(dot(ahat, adj_ret))*invd); #if FP_CHECK if (!isfinite(adj_a)) printf("%s:%d - adj_normalize((%f %f %f), (%f %f %f), (%f, %f, %f))\n", __FILE__, __LINE__, a.x, a.y, a.z, adj_a.x, adj_a.y, adj_a.z, adj_ret.x, adj_ret.y, adj_ret.z); #endif } }
5,542
C
23.745536
179
0.560628
RoboticExplorationLab/CGAC/dflex/dflex/sim.py
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. """This module contains time-integration objects for simulating models + state forward in time. """ import math import torch import numpy as np import dflex.util import dflex.adjoint as df import dflex.config from dflex.model import * import time import ipdb # Todo #----- # # [x] Spring model # [x] 2D FEM model # [x] 3D FEM model # [x] Cloth # [x] Wind/Drag model # [x] Bending model # [x] Triangle collision # [x] Rigid body model # [x] Rigid shape contact # [x] Sphere # [x] Capsule # [x] Box # [ ] Convex # [ ] SDF # [ ] Implicit solver # [x] USD import # [x] USD export # ----- # externally compiled kernels module (C++/CUDA code with PyBind entry points) kernels = None @df.func def test(c: float): x = 1.0 y = float(2) z = int(3.0) print(y) print(z) if (c < 3.0): x = 2.0 return x*6.0 def kernel_init(): global kernels kernels = df.compile() @df.kernel def integrate_particles(x: df.tensor(df.float3), v: df.tensor(df.float3), f: df.tensor(df.float3), w: df.tensor(float), gravity: df.tensor(df.float3), dt: float, x_new: df.tensor(df.float3), v_new: df.tensor(df.float3)): tid = df.tid() x0 = df.load(x, tid) v0 = df.load(v, tid) f0 = df.load(f, tid) inv_mass = df.load(w, tid) g = df.load(gravity, 0) # simple semi-implicit Euler. v1 = v0 + a dt, x1 = x0 + v1 dt v1 = v0 + (f0 * inv_mass + g * df.step(0.0 - inv_mass)) * dt x1 = x0 + v1 * dt df.store(x_new, tid, x1) df.store(v_new, tid, v1) # semi-implicit Euler integration @df.kernel def integrate_rigids(rigid_x: df.tensor(df.float3), rigid_r: df.tensor(df.quat), rigid_v: df.tensor(df.float3), rigid_w: df.tensor(df.float3), rigid_f: df.tensor(df.float3), rigid_t: df.tensor(df.float3), inv_m: df.tensor(float), inv_I: df.tensor(df.mat33), gravity: df.tensor(df.float3), dt: float, rigid_x_new: df.tensor(df.float3), rigid_r_new: df.tensor(df.quat), rigid_v_new: df.tensor(df.float3), rigid_w_new: df.tensor(df.float3)): tid = df.tid() # positions x0 = df.load(rigid_x, tid) r0 = df.load(rigid_r, tid) # velocities v0 = df.load(rigid_v, tid) w0 = df.load(rigid_w, tid) # angular velocity # forces f0 = df.load(rigid_f, tid) t0 = df.load(rigid_t, tid) # masses inv_mass = df.load(inv_m, tid) # 1 / mass inv_inertia = df.load(inv_I, tid) # inverse of 3x3 inertia matrix g = df.load(gravity, 0) # linear part v1 = v0 + (f0 * inv_mass + g * df.nonzero(inv_mass)) * dt # linear integral (linear position/velocity) x1 = x0 + v1 * dt # angular part # so reverse multiplication by r0 takes you from global coordinates into local coordinates # because it's covector and thus gets pulled back rather than pushed forward wb = df.rotate_inv(r0, w0) # angular integral (angular velocity and rotation), rotate into object reference frame tb = df.rotate_inv(r0, t0) # also rotate torques into local coordinates # I^{-1} torque = angular acceleration and inv_inertia is always going to be in the object frame. # So we need to rotate into that frame, and then back into global. w1 = df.rotate(r0, wb + inv_inertia * tb * dt) # I^-1 * torque * dt., then go back into global coordinates r1 = df.normalize(r0 + df.quat(w1, 0.0) * r0 * 0.5 * dt) # rotate around w1 by dt df.store(rigid_x_new, tid, x1) df.store(rigid_r_new, tid, r1) df.store(rigid_v_new, tid, v1) df.store(rigid_w_new, tid, w1) @df.kernel def eval_springs(x: df.tensor(df.float3), v: df.tensor(df.float3), spring_indices: df.tensor(int), spring_rest_lengths: df.tensor(float), spring_stiffness: df.tensor(float), spring_damping: df.tensor(float), f: df.tensor(df.float3)): tid = df.tid() i = df.load(spring_indices, tid * 2 + 0) j = df.load(spring_indices, tid * 2 + 1) ke = df.load(spring_stiffness, tid) kd = df.load(spring_damping, tid) rest = df.load(spring_rest_lengths, tid) xi = df.load(x, i) xj = df.load(x, j) vi = df.load(v, i) vj = df.load(v, j) xij = xi - xj vij = vi - vj l = length(xij) l_inv = 1.0 / l # normalized spring direction dir = xij * l_inv c = l - rest dcdt = dot(dir, vij) # damping based on relative velocity. fs = dir * (ke * c + kd * dcdt) df.atomic_sub(f, i, fs) df.atomic_add(f, j, fs) @df.kernel def eval_triangles(x: df.tensor(df.float3), v: df.tensor(df.float3), indices: df.tensor(int), pose: df.tensor(df.mat22), activation: df.tensor(float), k_mu: float, k_lambda: float, k_damp: float, k_drag: float, k_lift: float, f: df.tensor(df.float3)): tid = df.tid() i = df.load(indices, tid * 3 + 0) j = df.load(indices, tid * 3 + 1) k = df.load(indices, tid * 3 + 2) p = df.load(x, i) # point zero q = df.load(x, j) # point one r = df.load(x, k) # point two vp = df.load(v, i) # vel zero vq = df.load(v, j) # vel one vr = df.load(v, k) # vel two qp = q - p # barycentric coordinates (centered at p) rp = r - p Dm = df.load(pose, tid) inv_rest_area = df.determinant(Dm) * 2.0 # 1 / det(A) = det(A^-1) rest_area = 1.0 / inv_rest_area # scale stiffness coefficients to account for area k_mu = k_mu * rest_area k_lambda = k_lambda * rest_area k_damp = k_damp * rest_area # F = Xs*Xm^-1 f1 = qp * Dm[0, 0] + rp * Dm[1, 0] f2 = qp * Dm[0, 1] + rp * Dm[1, 1] #----------------------------- # St. Venant-Kirchoff # # Green strain, F'*F-I # e00 = dot(f1, f1) - 1.0 # e10 = dot(f2, f1) # e01 = dot(f1, f2) # e11 = dot(f2, f2) - 1.0 # E = df.mat22(e00, e01, # e10, e11) # # local forces (deviatoric part) # T = df.mul(E, df.transpose(Dm)) # # spatial forces, F*T # fq = (f1*T[0,0] + f2*T[1,0])*k_mu*2.0 # fr = (f1*T[0,1] + f2*T[1,1])*k_mu*2.0 # alpha = 1.0 #----------------------------- # Baraff & Witkin, note this model is not isotropic # c1 = length(f1) - 1.0 # c2 = length(f2) - 1.0 # f1 = normalize(f1)*c1*k1 # f2 = normalize(f2)*c2*k1 # fq = f1*Dm[0,0] + f2*Dm[0,1] # fr = f1*Dm[1,0] + f2*Dm[1,1] #----------------------------- # Neo-Hookean (with rest stability) # force = mu*F*Dm' fq = (f1 * Dm[0, 0] + f2 * Dm[0, 1]) * k_mu fr = (f1 * Dm[1, 0] + f2 * Dm[1, 1]) * k_mu alpha = 1.0 + k_mu / k_lambda #----------------------------- # Area Preservation n = df.cross(qp, rp) area = df.length(n) * 0.5 # actuation act = df.load(activation, tid) # J-alpha c = area * inv_rest_area - alpha + act # dJdx n = df.normalize(n) dcdq = df.cross(rp, n) * inv_rest_area * 0.5 dcdr = df.cross(n, qp) * inv_rest_area * 0.5 f_area = k_lambda * c #----------------------------- # Area Damping dcdt = dot(dcdq, vq) + dot(dcdr, vr) - dot(dcdq + dcdr, vp) f_damp = k_damp * dcdt fq = fq + dcdq * (f_area + f_damp) fr = fr + dcdr * (f_area + f_damp) fp = fq + fr #----------------------------- # Lift + Drag vmid = (vp + vr + vq) * 0.3333 vdir = df.normalize(vmid) f_drag = vmid * (k_drag * area * df.abs(df.dot(n, vmid))) f_lift = n * (k_lift * area * (1.57079 - df.acos(df.dot(n, vdir)))) * dot(vmid, vmid) # note reversed sign due to atomic_add below.. need to write the unary op - fp = fp - f_drag - f_lift fq = fq + f_drag + f_lift fr = fr + f_drag + f_lift # apply forces df.atomic_add(f, i, fp) df.atomic_sub(f, j, fq) df.atomic_sub(f, k, fr) @df.func def triangle_closest_point_barycentric(a: df.float3, b: df.float3, c: df.float3, p: df.float3): ab = b - a ac = c - a ap = p - a d1 = df.dot(ab, ap) d2 = df.dot(ac, ap) if (d1 <= 0.0 and d2 <= 0.0): return float3(1.0, 0.0, 0.0) bp = p - b d3 = df.dot(ab, bp) d4 = df.dot(ac, bp) if (d3 >= 0.0 and d4 <= d3): return float3(0.0, 1.0, 0.0) vc = d1 * d4 - d3 * d2 v = d1 / (d1 - d3) if (vc <= 0.0 and d1 >= 0.0 and d3 <= 0.0): return float3(1.0 - v, v, 0.0) cp = p - c d5 = dot(ab, cp) d6 = dot(ac, cp) if (d6 >= 0.0 and d5 <= d6): return float3(0.0, 0.0, 1.0) vb = d5 * d2 - d1 * d6 w = d2 / (d2 - d6) if (vb <= 0.0 and d2 >= 0.0 and d6 <= 0.0): return float3(1.0 - w, 0.0, w) va = d3 * d6 - d5 * d4 w = (d4 - d3) / ((d4 - d3) + (d5 - d6)) if (va <= 0.0 and (d4 - d3) >= 0.0 and (d5 - d6) >= 0.0): return float3(0.0, w, 1.0 - w) denom = 1.0 / (va + vb + vc) v = vb * denom w = vc * denom return float3(1.0 - v - w, v, w) @df.kernel def eval_triangles_contact( # idx : df.tensor(int), # list of indices for colliding particles num_particles: int, # size of particles x: df.tensor(df.float3), v: df.tensor(df.float3), indices: df.tensor(int), pose: df.tensor(df.mat22), activation: df.tensor(float), k_mu: float, k_lambda: float, k_damp: float, k_drag: float, k_lift: float, f: df.tensor(df.float3)): tid = df.tid() face_no = tid // num_particles # which face particle_no = tid % num_particles # which particle # index = df.load(idx, tid) pos = df.load(x, particle_no) # at the moment, just one particle # vel0 = df.load(v, 0) i = df.load(indices, face_no * 3 + 0) j = df.load(indices, face_no * 3 + 1) k = df.load(indices, face_no * 3 + 2) if (i == particle_no or j == particle_no or k == particle_no): return p = df.load(x, i) # point zero q = df.load(x, j) # point one r = df.load(x, k) # point two # vp = df.load(v, i) # vel zero # vq = df.load(v, j) # vel one # vr = df.load(v, k) # vel two # qp = q-p # barycentric coordinates (centered at p) # rp = r-p bary = triangle_closest_point_barycentric(p, q, r, pos) closest = p * bary[0] + q * bary[1] + r * bary[2] diff = pos - closest dist = df.dot(diff, diff) n = df.normalize(diff) c = df.min(dist - 0.01, 0.0) # 0 unless within 0.01 of surface #c = df.leaky_min(dot(n, x0)-0.01, 0.0, 0.0) fn = n * c * 1e5 df.atomic_sub(f, particle_no, fn) # # apply forces (could do - f / 3 here) df.atomic_add(f, i, fn * bary[0]) df.atomic_add(f, j, fn * bary[1]) df.atomic_add(f, k, fn * bary[2]) @df.kernel def eval_triangles_rigid_contacts( num_particles: int, # number of particles (size of contact_point) x: df.tensor(df.float3), # position of particles v: df.tensor(df.float3), indices: df.tensor(int), # triangle indices rigid_x: df.tensor(df.float3), # rigid body positions rigid_r: df.tensor(df.quat), rigid_v: df.tensor(df.float3), rigid_w: df.tensor(df.float3), contact_body: df.tensor(int), contact_point: df.tensor(df.float3), # position of contact points relative to body contact_dist: df.tensor(float), contact_mat: df.tensor(int), materials: df.tensor(float), # rigid_f : df.tensor(df.float3), # rigid_t : df.tensor(df.float3), tri_f: df.tensor(df.float3)): tid = df.tid() face_no = tid // num_particles # which face particle_no = tid % num_particles # which particle # ----------------------- # load rigid body point c_body = df.load(contact_body, particle_no) c_point = df.load(contact_point, particle_no) c_dist = df.load(contact_dist, particle_no) c_mat = df.load(contact_mat, particle_no) # hard coded surface parameter tensor layout (ke, kd, kf, mu) ke = df.load(materials, c_mat * 4 + 0) # restitution coefficient kd = df.load(materials, c_mat * 4 + 1) # damping coefficient kf = df.load(materials, c_mat * 4 + 2) # friction coefficient mu = df.load(materials, c_mat * 4 + 3) # coulomb friction x0 = df.load(rigid_x, c_body) # position of colliding body r0 = df.load(rigid_r, c_body) # orientation of colliding body v0 = df.load(rigid_v, c_body) w0 = df.load(rigid_w, c_body) # transform point to world space pos = x0 + df.rotate(r0, c_point) # use x0 as center, everything is offset from center of mass # moment arm r = pos - x0 # basically just c_point in the new coordinates rhat = df.normalize(r) pos = pos + rhat * c_dist # add on 'thickness' of shape, e.g.: radius of sphere/capsule # contact point velocity dpdt = v0 + df.cross(w0, r) # this is rigid velocity cross offset, so it's the velocity of the contact point. # ----------------------- # load triangle i = df.load(indices, face_no * 3 + 0) j = df.load(indices, face_no * 3 + 1) k = df.load(indices, face_no * 3 + 2) p = df.load(x, i) # point zero q = df.load(x, j) # point one r = df.load(x, k) # point two vp = df.load(v, i) # vel zero vq = df.load(v, j) # vel one vr = df.load(v, k) # vel two bary = triangle_closest_point_barycentric(p, q, r, pos) closest = p * bary[0] + q * bary[1] + r * bary[2] diff = pos - closest # vector from tri to point dist = df.dot(diff, diff) # squared distance n = df.normalize(diff) # points into the object c = df.min(dist - 0.05, 0.0) # 0 unless within 0.05 of surface #c = df.leaky_min(dot(n, x0)-0.01, 0.0, 0.0) # fn = n * c * 1e6 # points towards cloth (both n and c are negative) # df.atomic_sub(tri_f, particle_no, fn) fn = c * ke # normal force (restitution coefficient * how far inside for ground) (negative) vtri = vp * bary[0] + vq * bary[1] + vr * bary[2] # bad approximation for centroid velocity vrel = vtri - dpdt vn = dot(n, vrel) # velocity component of rigid in negative normal direction vt = vrel - n * vn # velocity component not in normal direction # contact damping fd = 0.0 - df.max(vn, 0.0) * kd * df.step(c) # again, negative, into the ground # # viscous friction # ft = vt*kf # Coulomb friction (box) lower = mu * (fn + fd) upper = 0.0 - lower # workaround because no unary ops yet nx = cross(n, float3(0.0, 0.0, 1.0)) # basis vectors for tangent nz = cross(n, float3(1.0, 0.0, 0.0)) vx = df.clamp(dot(nx * kf, vt), lower, upper) vz = df.clamp(dot(nz * kf, vt), lower, upper) ft = (nx * vx + nz * vz) * (0.0 - df.step(c)) # df.float3(vx, 0.0, vz)*df.step(c) # # Coulomb friction (smooth, but gradients are numerically unstable around |vt| = 0) # #ft = df.normalize(vt)*df.min(kf*df.length(vt), 0.0 - mu*c*ke) f_total = n * (fn + fd) + ft df.atomic_add(tri_f, i, f_total * bary[0]) df.atomic_add(tri_f, j, f_total * bary[1]) df.atomic_add(tri_f, k, f_total * bary[2]) @df.kernel def eval_bending( x: df.tensor(df.float3), v: df.tensor(df.float3), indices: df.tensor(int), rest: df.tensor(float), ke: float, kd: float, f: df.tensor(df.float3)): tid = df.tid() i = df.load(indices, tid * 4 + 0) j = df.load(indices, tid * 4 + 1) k = df.load(indices, tid * 4 + 2) l = df.load(indices, tid * 4 + 3) rest_angle = df.load(rest, tid) x1 = df.load(x, i) x2 = df.load(x, j) x3 = df.load(x, k) x4 = df.load(x, l) v1 = df.load(v, i) v2 = df.load(v, j) v3 = df.load(v, k) v4 = df.load(v, l) n1 = df.cross(x3 - x1, x4 - x1) # normal to face 1 n2 = df.cross(x4 - x2, x3 - x2) # normal to face 2 n1_length = df.length(n1) n2_length = df.length(n2) rcp_n1 = 1.0 / n1_length rcp_n2 = 1.0 / n2_length cos_theta = df.dot(n1, n2) * rcp_n1 * rcp_n2 n1 = n1 * rcp_n1 * rcp_n1 n2 = n2 * rcp_n2 * rcp_n2 e = x4 - x3 e_hat = df.normalize(e) e_length = df.length(e) s = df.sign(df.dot(df.cross(n2, n1), e_hat)) angle = df.acos(cos_theta) * s d1 = n1 * e_length d2 = n2 * e_length d3 = n1 * df.dot(x1 - x4, e_hat) + n2 * df.dot(x2 - x4, e_hat) d4 = n1 * df.dot(x3 - x1, e_hat) + n2 * df.dot(x3 - x2, e_hat) # elastic f_elastic = ke * (angle - rest_angle) # damping f_damp = kd * (df.dot(d1, v1) + df.dot(d2, v2) + df.dot(d3, v3) + df.dot(d4, v4)) # total force, proportional to edge length f_total = 0.0 - e_length * (f_elastic + f_damp) df.atomic_add(f, i, d1 * f_total) df.atomic_add(f, j, d2 * f_total) df.atomic_add(f, k, d3 * f_total) df.atomic_add(f, l, d4 * f_total) @df.kernel def eval_tetrahedra(x: df.tensor(df.float3), v: df.tensor(df.float3), indices: df.tensor(int), pose: df.tensor(df.mat33), activation: df.tensor(float), materials: df.tensor(float), f: df.tensor(df.float3)): tid = df.tid() i = df.load(indices, tid * 4 + 0) j = df.load(indices, tid * 4 + 1) k = df.load(indices, tid * 4 + 2) l = df.load(indices, tid * 4 + 3) act = df.load(activation, tid) k_mu = df.load(materials, tid * 3 + 0) k_lambda = df.load(materials, tid * 3 + 1) k_damp = df.load(materials, tid * 3 + 2) x0 = df.load(x, i) x1 = df.load(x, j) x2 = df.load(x, k) x3 = df.load(x, l) v0 = df.load(v, i) v1 = df.load(v, j) v2 = df.load(v, k) v3 = df.load(v, l) x10 = x1 - x0 x20 = x2 - x0 x30 = x3 - x0 v10 = v1 - v0 v20 = v2 - v0 v30 = v3 - v0 Ds = df.mat33(x10, x20, x30) Dm = df.load(pose, tid) inv_rest_volume = df.determinant(Dm) * 6.0 rest_volume = 1.0 / inv_rest_volume alpha = 1.0 + k_mu / k_lambda - k_mu / (4.0 * k_lambda) # scale stiffness coefficients to account for area k_mu = k_mu * rest_volume k_lambda = k_lambda * rest_volume k_damp = k_damp * rest_volume # F = Xs*Xm^-1 F = Ds * Dm dFdt = df.mat33(v10, v20, v30) * Dm col1 = df.float3(F[0, 0], F[1, 0], F[2, 0]) col2 = df.float3(F[0, 1], F[1, 1], F[2, 1]) col3 = df.float3(F[0, 2], F[1, 2], F[2, 2]) #----------------------------- # Neo-Hookean (with rest stability [Smith et al 2018]) Ic = dot(col1, col1) + dot(col2, col2) + dot(col3, col3) # deviatoric part P = F * k_mu * (1.0 - 1.0 / (Ic + 1.0)) + dFdt * k_damp H = P * df.transpose(Dm) f1 = df.float3(H[0, 0], H[1, 0], H[2, 0]) f2 = df.float3(H[0, 1], H[1, 1], H[2, 1]) f3 = df.float3(H[0, 2], H[1, 2], H[2, 2]) #----------------------------- # C_spherical # r_s = df.sqrt(dot(col1, col1) + dot(col2, col2) + dot(col3, col3)) # r_s_inv = 1.0/r_s # C = r_s - df.sqrt(3.0) # dCdx = F*df.transpose(Dm)*r_s_inv # grad1 = float3(dCdx[0,0], dCdx[1,0], dCdx[2,0]) # grad2 = float3(dCdx[0,1], dCdx[1,1], dCdx[2,1]) # grad3 = float3(dCdx[0,2], dCdx[1,2], dCdx[2,2]) # f1 = grad1*C*k_mu # f2 = grad2*C*k_mu # f3 = grad3*C*k_mu #---------------------------- # C_D # r_s = df.sqrt(dot(col1, col1) + dot(col2, col2) + dot(col3, col3)) # C = r_s*r_s - 3.0 # dCdx = F*df.transpose(Dm)*2.0 # grad1 = float3(dCdx[0,0], dCdx[1,0], dCdx[2,0]) # grad2 = float3(dCdx[0,1], dCdx[1,1], dCdx[2,1]) # grad3 = float3(dCdx[0,2], dCdx[1,2], dCdx[2,2]) # f1 = grad1*C*k_mu # f2 = grad2*C*k_mu # f3 = grad3*C*k_mu # hydrostatic part J = df.determinant(F) #print(J) s = inv_rest_volume / 6.0 dJdx1 = df.cross(x20, x30) * s dJdx2 = df.cross(x30, x10) * s dJdx3 = df.cross(x10, x20) * s f_volume = (J - alpha + act) * k_lambda f_damp = (df.dot(dJdx1, v1) + df.dot(dJdx2, v2) + df.dot(dJdx3, v3)) * k_damp f_total = f_volume + f_damp f1 = f1 + dJdx1 * f_total f2 = f2 + dJdx2 * f_total f3 = f3 + dJdx3 * f_total f0 = (f1 + f2 + f3) * (0.0 - 1.0) # apply forces df.atomic_sub(f, i, f0) df.atomic_sub(f, j, f1) df.atomic_sub(f, k, f2) df.atomic_sub(f, l, f3) @df.kernel def eval_contacts(x: df.tensor(df.float3), v: df.tensor(df.float3), ke: float, kd: float, kf: float, mu: float, f: df.tensor(df.float3)): tid = df.tid() # this just handles contact of particles with the ground plane, nothing else. x0 = df.load(x, tid) v0 = df.load(v, tid) n = float3(0.0, 1.0, 0.0) # why is the normal always y? Ground is always (0, 1, 0) normal c = df.min(dot(n, x0) - 0.01, 0.0) # 0 unless within 0.01 of surface #c = df.leaky_min(dot(n, x0)-0.01, 0.0, 0.0) vn = dot(n, v0) vt = v0 - n * vn fn = n * c * ke # contact damping fd = n * df.min(vn, 0.0) * kd # viscous friction #ft = vt*kf # Coulomb friction (box) lower = mu * c * ke upper = 0.0 - lower vx = clamp(dot(float3(kf, 0.0, 0.0), vt), lower, upper) vz = clamp(dot(float3(0.0, 0.0, kf), vt), lower, upper) ft = df.float3(vx, 0.0, vz) # Coulomb friction (smooth, but gradients are numerically unstable around |vt| = 0) #ft = df.normalize(vt)*df.min(kf*df.length(vt), 0.0 - mu*c*ke) ftotal = fn + (fd + ft) * df.step(c) df.atomic_sub(f, tid, ftotal) @df.func def sphere_sdf(center: df.float3, radius: float, p: df.float3): return df.length(p-center) - radius @df.func def sphere_sdf_grad(center: df.float3, radius: float, p: df.float3): return df.normalize(p-center) @df.func def box_sdf(upper: df.float3, p: df.float3): # adapted from https://www.iquilezles.org/www/articles/distfunctions/distfunctions.htm qx = abs(p[0])-upper[0] qy = abs(p[1])-upper[1] qz = abs(p[2])-upper[2] e = df.float3(df.max(qx, 0.0), df.max(qy, 0.0), df.max(qz, 0.0)) return df.length(e) + df.min(df.max(qx, df.max(qy, qz)), 0.0) @df.func def box_sdf_grad(upper: df.float3, p: df.float3): qx = abs(p[0])-upper[0] qy = abs(p[1])-upper[1] qz = abs(p[2])-upper[2] # exterior case if (qx > 0.0 or qy > 0.0 or qz > 0.0): x = df.clamp(p[0], 0.0-upper[0], upper[0]) y = df.clamp(p[1], 0.0-upper[1], upper[1]) z = df.clamp(p[2], 0.0-upper[2], upper[2]) return df.normalize(p - df.float3(x, y, z)) sx = df.sign(p[0]) sy = df.sign(p[1]) sz = df.sign(p[2]) # x projection if (qx > qy and qx > qz): return df.float3(sx, 0.0, 0.0) # y projection if (qy > qx and qy > qz): return df.float3(0.0, sy, 0.0) # z projection if (qz > qx and qz > qy): return df.float3(0.0, 0.0, sz) @df.func def capsule_sdf(radius: float, half_width: float, p: df.float3): if (p[0] > half_width): return length(df.float3(p[0] - half_width, p[1], p[2])) - radius if (p[0] < 0.0 - half_width): return length(df.float3(p[0] + half_width, p[1], p[2])) - radius return df.length(df.float3(0.0, p[1], p[2])) - radius @df.func def capsule_sdf_grad(radius: float, half_width: float, p: df.float3): if (p[0] > half_width): return normalize(df.float3(p[0] - half_width, p[1], p[2])) if (p[0] < 0.0 - half_width): return normalize(df.float3(p[0] + half_width, p[1], p[2])) return normalize(df.float3(0.0, p[1], p[2])) @df.kernel def eval_soft_contacts( num_particles: int, particle_x: df.tensor(df.float3), particle_v: df.tensor(df.float3), body_X_sc: df.tensor(df.spatial_transform), body_v_sc: df.tensor(df.spatial_vector), shape_X_co: df.tensor(df.spatial_transform), shape_body: df.tensor(int), shape_geo_type: df.tensor(int), shape_geo_src: df.tensor(int), shape_geo_scale: df.tensor(df.float3), shape_materials: df.tensor(float), ke: float, kd: float, kf: float, mu: float, # outputs particle_f: df.tensor(df.float3), body_f: df.tensor(df.spatial_vector)): tid = df.tid() shape_index = tid // num_particles # which shape particle_index = tid % num_particles # which particle rigid_index = df.load(shape_body, shape_index) px = df.load(particle_x, particle_index) pv = df.load(particle_v, particle_index) #center = float3(0.0, 0.5, 0.0) #radius = 0.25 #margin = 0.01 # sphere collider # c = df.min(sphere_sdf(center, radius, x0)-margin, 0.0) # n = sphere_sdf_grad(center, radius, x0) # box collider #c = df.min(box_sdf(df.float3(radius, radius, radius), x0-center)-margin, 0.0) #n = box_sdf_grad(df.float3(radius, radius, radius), x0-center) X_sc = df.spatial_transform_identity() if (rigid_index >= 0): X_sc = df.load(body_X_sc, rigid_index) X_co = df.load(shape_X_co, shape_index) X_so = df.spatial_transform_multiply(X_sc, X_co) X_os = df.spatial_transform_inverse(X_so) # transform particle position to shape local space x_local = df.spatial_transform_point(X_os, px) # geo description geo_type = df.load(shape_geo_type, shape_index) geo_scale = df.load(shape_geo_scale, shape_index) margin = 0.01 # evaluate shape sdf c = 0.0 n = df.float3(0.0, 0.0, 0.0) # GEO_SPHERE (0) if (geo_type == 0): c = df.min(sphere_sdf(df.float3(0.0, 0.0, 0.0), geo_scale[0], x_local)-margin, 0.0) n = df.spatial_transform_vector(X_so, sphere_sdf_grad(df.float3(0.0, 0.0, 0.0), geo_scale[0], x_local)) # GEO_BOX (1) if (geo_type == 1): c = df.min(box_sdf(geo_scale, x_local)-margin, 0.0) n = df.spatial_transform_vector(X_so, box_sdf_grad(geo_scale, x_local)) # GEO_CAPSULE (2) if (geo_type == 2): c = df.min(capsule_sdf(geo_scale[0], geo_scale[1], x_local)-margin, 0.0) n = df.spatial_transform_vector(X_so, capsule_sdf_grad(geo_scale[0], geo_scale[1], x_local)) # rigid velocity rigid_v_s = df.spatial_vector() if (rigid_index >= 0): rigid_v_s = df.load(body_v_sc, rigid_index) rigid_w = df.spatial_top(rigid_v_s) rigid_v = df.spatial_bottom(rigid_v_s) # compute the body velocity at the particle position bv = rigid_v + df.cross(rigid_w, px) # relative velocity v = pv - bv # decompose relative velocity vn = dot(n, v) vt = v - n * vn # contact elastic fn = n * c * ke # contact damping fd = n * df.min(vn, 0.0) * kd # viscous friction #ft = vt*kf # Coulomb friction (box) lower = mu * c * ke upper = 0.0 - lower vx = clamp(dot(float3(kf, 0.0, 0.0), vt), lower, upper) vz = clamp(dot(float3(0.0, 0.0, kf), vt), lower, upper) ft = df.float3(vx, 0.0, vz) # Coulomb friction (smooth, but gradients are numerically unstable around |vt| = 0) #ft = df.normalize(vt)*df.min(kf*df.length(vt), 0.0 - mu*c*ke) f_total = fn + (fd + ft) * df.step(c) t_total = df.cross(px, f_total) df.atomic_sub(particle_f, particle_index, f_total) if (rigid_index >= 0): df.atomic_sub(body_f, rigid_index, df.spatial_vector(t_total, f_total)) @df.kernel def eval_rigid_contacts(rigid_x: df.tensor(df.float3), rigid_r: df.tensor(df.quat), rigid_v: df.tensor(df.float3), rigid_w: df.tensor(df.float3), contact_body: df.tensor(int), contact_point: df.tensor(df.float3), contact_dist: df.tensor(float), contact_mat: df.tensor(int), materials: df.tensor(float), rigid_f: df.tensor(df.float3), rigid_t: df.tensor(df.float3)): tid = df.tid() c_body = df.load(contact_body, tid) c_point = df.load(contact_point, tid) c_dist = df.load(contact_dist, tid) c_mat = df.load(contact_mat, tid) # hard coded surface parameter tensor layout (ke, kd, kf, mu) ke = df.load(materials, c_mat * 4 + 0) # restitution coefficient kd = df.load(materials, c_mat * 4 + 1) # damping coefficient kf = df.load(materials, c_mat * 4 + 2) # friction coefficient mu = df.load(materials, c_mat * 4 + 3) # coulomb friction x0 = df.load(rigid_x, c_body) # position of colliding body r0 = df.load(rigid_r, c_body) # orientation of colliding body v0 = df.load(rigid_v, c_body) w0 = df.load(rigid_w, c_body) n = float3(0.0, 1.0, 0.0) # transform point to world space p = x0 + df.rotate(r0, c_point) - n * c_dist # add on 'thickness' of shape, e.g.: radius of sphere/capsule # use x0 as center, everything is offset from center of mass # moment arm r = p - x0 # basically just c_point in the new coordinates # contact point velocity dpdt = v0 + df.cross(w0, r) # this is rigid velocity cross offset, so it's the velocity of the contact point. # check ground contact c = df.min(dot(n, p), 0.0) # check if we're inside the ground vn = dot(n, dpdt) # velocity component out of the ground vt = dpdt - n * vn # velocity component not into the ground fn = c * ke # normal force (restitution coefficient * how far inside for ground) # contact damping fd = df.min(vn, 0.0) * kd * df.step(c) # again, velocity into the ground, negative # viscous friction #ft = vt*kf # Coulomb friction (box) lower = mu * (fn + fd) # negative upper = 0.0 - lower # positive, workaround for no unary ops vx = df.clamp(dot(float3(kf, 0.0, 0.0), vt), lower, upper) vz = df.clamp(dot(float3(0.0, 0.0, kf), vt), lower, upper) ft = df.float3(vx, 0.0, vz) * df.step(c) # Coulomb friction (smooth, but gradients are numerically unstable around |vt| = 0) #ft = df.normalize(vt)*df.min(kf*df.length(vt), 0.0 - mu*c*ke) f_total = n * (fn + fd) + ft t_total = df.cross(r, f_total) df.atomic_sub(rigid_f, c_body, f_total) df.atomic_sub(rigid_t, c_body, t_total) # # Frank & Park definition 3.20, pg 100 @df.func def spatial_transform_twist(t: df.spatial_transform, x: df.spatial_vector): q = spatial_transform_get_rotation(t) p = spatial_transform_get_translation(t) w = spatial_top(x) v = spatial_bottom(x) w = rotate(q, w) v = rotate(q, v) + cross(p, w) return spatial_vector(w, v) @df.func def spatial_transform_wrench(t: df.spatial_transform, x: df.spatial_vector): q = spatial_transform_get_rotation(t) p = spatial_transform_get_translation(t) w = spatial_top(x) v = spatial_bottom(x) v = rotate(q, v) w = rotate(q, w) + cross(p, v) return spatial_vector(w, v) @df.func def spatial_transform_inverse(t: df.spatial_transform): p = spatial_transform_get_translation(t) q = spatial_transform_get_rotation(t) q_inv = inverse(q) return spatial_transform(rotate(q_inv, p)*(0.0 - 1.0), q_inv); # computes adj_t^-T*I*adj_t^-1 (tensor change of coordinates), Frank & Park, section 8.2.3, pg 290 @df.func def spatial_transform_inertia(t: df.spatial_transform, I: df.spatial_matrix): t_inv = spatial_transform_inverse(t) q = spatial_transform_get_rotation(t_inv) p = spatial_transform_get_translation(t_inv) r1 = rotate(q, float3(1.0, 0.0, 0.0)) r2 = rotate(q, float3(0.0, 1.0, 0.0)) r3 = rotate(q, float3(0.0, 0.0, 1.0)) R = mat33(r1, r2, r3) S = mul(skew(p), R) T = spatial_adjoint(R, S) return mul(mul(transpose(T), I), T) @df.kernel def eval_rigid_contacts_art( body_X_s: df.tensor(df.spatial_transform), body_v_s: df.tensor(df.spatial_vector), contact_body: df.tensor(int), contact_point: df.tensor(df.float3), contact_dist: df.tensor(float), contact_mat: df.tensor(int), materials: df.tensor(float), body_f_s: df.tensor(df.spatial_vector)): tid = df.tid() c_body = df.load(contact_body, tid) c_point = df.load(contact_point, tid) c_dist = df.load(contact_dist, tid) c_mat = df.load(contact_mat, tid) # hard coded surface parameter tensor layout (ke, kd, kf, mu) ke = df.load(materials, c_mat * 4 + 0) # restitution coefficient kd = df.load(materials, c_mat * 4 + 1) # damping coefficient kf = df.load(materials, c_mat * 4 + 2) # friction coefficient mu = df.load(materials, c_mat * 4 + 3) # coulomb friction X_s = df.load(body_X_s, c_body) # position of colliding body v_s = df.load(body_v_s, c_body) # orientation of colliding body n = float3(0.0, 1.0, 0.0) # transform point to world space p = df.spatial_transform_point(X_s, c_point) - n * c_dist # add on 'thickness' of shape, e.g.: radius of sphere/capsule w = df.spatial_top(v_s) v = df.spatial_bottom(v_s) # contact point velocity dpdt = v + df.cross(w, p) # check ground contact c = df.dot(n, p) # check if we're inside the ground if (c >= 0.0): return vn = dot(n, dpdt) # velocity component out of the ground vt = dpdt - n * vn # velocity component not into the ground fn = c * ke # normal force (restitution coefficient * how far inside for ground) # contact damping fd = df.min(vn, 0.0) * kd * df.step(c) * (0.0 - c) # viscous friction #ft = vt*kf # Coulomb friction (box) lower = mu * (fn + fd) # negative upper = 0.0 - lower # positive, workaround for no unary ops vx = df.clamp(dot(float3(kf, 0.0, 0.0), vt), lower, upper) vz = df.clamp(dot(float3(0.0, 0.0, kf), vt), lower, upper) # Coulomb friction (smooth, but gradients are numerically unstable around |vt| = 0) ft = df.normalize(vt)*df.min(kf*df.length(vt), 0.0 - mu*c*ke) * df.step(c) f_total = n * (fn + fd) + ft t_total = df.cross(p, f_total) df.atomic_add(body_f_s, c_body, df.spatial_vector(t_total, f_total)) @df.func def compute_muscle_force( i: int, body_X_s: df.tensor(df.spatial_transform), body_v_s: df.tensor(df.spatial_vector), muscle_links: df.tensor(int), muscle_points: df.tensor(df.float3), muscle_activation: float, body_f_s: df.tensor(df.spatial_vector)): link_0 = df.load(muscle_links, i) link_1 = df.load(muscle_links, i+1) if (link_0 == link_1): return 0 r_0 = df.load(muscle_points, i) r_1 = df.load(muscle_points, i+1) xform_0 = df.load(body_X_s, link_0) xform_1 = df.load(body_X_s, link_1) pos_0 = df.spatial_transform_point(xform_0, r_0) pos_1 = df.spatial_transform_point(xform_1, r_1) n = df.normalize(pos_1 - pos_0) # todo: add passive elastic and viscosity terms f = n * muscle_activation df.atomic_sub(body_f_s, link_0, df.spatial_vector(df.cross(pos_0, f), f)) df.atomic_add(body_f_s, link_1, df.spatial_vector(df.cross(pos_1, f), f)) return 0 @df.kernel def eval_muscles( body_X_s: df.tensor(df.spatial_transform), body_v_s: df.tensor(df.spatial_vector), muscle_start: df.tensor(int), muscle_params: df.tensor(float), muscle_links: df.tensor(int), muscle_points: df.tensor(df.float3), muscle_activation: df.tensor(float), # output body_f_s: df.tensor(df.spatial_vector)): tid = df.tid() m_start = df.load(muscle_start, tid) m_end = df.load(muscle_start, tid+1) - 1 activation = df.load(muscle_activation, tid) for i in range(m_start, m_end): compute_muscle_force(i, body_X_s, body_v_s, muscle_links, muscle_points, activation, body_f_s) # compute transform across a joint @df.func def jcalc_transform(type: int, axis: df.float3, joint_q: df.tensor(float), start: int): # prismatic if (type == 0): q = df.load(joint_q, start) X_jc = spatial_transform(axis * q, quat_identity()) return X_jc # revolute if (type == 1): q = df.load(joint_q, start) X_jc = spatial_transform(float3(0.0, 0.0, 0.0), quat_from_axis_angle(axis, q)) return X_jc # ball if (type == 2): qx = df.load(joint_q, start + 0) qy = df.load(joint_q, start + 1) qz = df.load(joint_q, start + 2) qw = df.load(joint_q, start + 3) X_jc = spatial_transform(float3(0.0, 0.0, 0.0), quat(qx, qy, qz, qw)) return X_jc # fixed if (type == 3): X_jc = spatial_transform_identity() return X_jc # free if (type == 4): px = df.load(joint_q, start + 0) py = df.load(joint_q, start + 1) pz = df.load(joint_q, start + 2) qx = df.load(joint_q, start + 3) qy = df.load(joint_q, start + 4) qz = df.load(joint_q, start + 5) qw = df.load(joint_q, start + 6) X_jc = spatial_transform(float3(px, py, pz), quat(qx, qy, qz, qw)) return X_jc # default case return spatial_transform_identity() # compute motion subspace and velocity for a joint @df.func def jcalc_motion(type: int, axis: df.float3, X_sc: df.spatial_transform, joint_S_s: df.tensor(df.spatial_vector), joint_qd: df.tensor(float), joint_start: int): # prismatic if (type == 0): S_s = df.spatial_transform_twist(X_sc, spatial_vector(float3(0.0, 0.0, 0.0), axis)) v_j_s = S_s * df.load(joint_qd, joint_start) df.store(joint_S_s, joint_start, S_s) return v_j_s # revolute if (type == 1): S_s = df.spatial_transform_twist(X_sc, spatial_vector(axis, float3(0.0, 0.0, 0.0))) v_j_s = S_s * df.load(joint_qd, joint_start) df.store(joint_S_s, joint_start, S_s) return v_j_s # ball if (type == 2): w = float3(df.load(joint_qd, joint_start + 0), df.load(joint_qd, joint_start + 1), df.load(joint_qd, joint_start + 2)) S_0 = df.spatial_transform_twist(X_sc, spatial_vector(1.0, 0.0, 0.0, 0.0, 0.0, 0.0)) S_1 = df.spatial_transform_twist(X_sc, spatial_vector(0.0, 1.0, 0.0, 0.0, 0.0, 0.0)) S_2 = df.spatial_transform_twist(X_sc, spatial_vector(0.0, 0.0, 1.0, 0.0, 0.0, 0.0)) # write motion subspace df.store(joint_S_s, joint_start + 0, S_0) df.store(joint_S_s, joint_start + 1, S_1) df.store(joint_S_s, joint_start + 2, S_2) return S_0*w[0] + S_1*w[1] + S_2*w[2] # fixed if (type == 3): return spatial_vector() # free if (type == 4): v_j_s = spatial_vector(df.load(joint_qd, joint_start + 0), df.load(joint_qd, joint_start + 1), df.load(joint_qd, joint_start + 2), df.load(joint_qd, joint_start + 3), df.load(joint_qd, joint_start + 4), df.load(joint_qd, joint_start + 5)) # write motion subspace df.store(joint_S_s, joint_start + 0, spatial_vector(1.0, 0.0, 0.0, 0.0, 0.0, 0.0)) df.store(joint_S_s, joint_start + 1, spatial_vector(0.0, 1.0, 0.0, 0.0, 0.0, 0.0)) df.store(joint_S_s, joint_start + 2, spatial_vector(0.0, 0.0, 1.0, 0.0, 0.0, 0.0)) df.store(joint_S_s, joint_start + 3, spatial_vector(0.0, 0.0, 0.0, 1.0, 0.0, 0.0)) df.store(joint_S_s, joint_start + 4, spatial_vector(0.0, 0.0, 0.0, 0.0, 1.0, 0.0)) df.store(joint_S_s, joint_start + 5, spatial_vector(0.0, 0.0, 0.0, 0.0, 0.0, 1.0)) return v_j_s # default case return spatial_vector() # # compute the velocity across a joint # #@df.func # def jcalc_velocity(self, type, S_s, joint_qd, start): # # prismatic # if (type == 0): # v_j_s = df.load(S_s, start)*df.load(joint_qd, start) # return v_j_s # # revolute # if (type == 1): # v_j_s = df.load(S_s, start)*df.load(joint_qd, start) # return v_j_s # # fixed # if (type == 2): # v_j_s = spatial_vector() # return v_j_s # # free # if (type == 3): # v_j_s = S_s[start+0]*joint_qd[start+0] # v_j_s += S_s[start+1]*joint_qd[start+1] # v_j_s += S_s[start+2]*joint_qd[start+2] # v_j_s += S_s[start+3]*joint_qd[start+3] # v_j_s += S_s[start+4]*joint_qd[start+4] # v_j_s += S_s[start+5]*joint_qd[start+5] # return v_j_s # computes joint space forces/torques in tau @df.func def jcalc_tau( type: int, target_k_e: float, target_k_d: float, limit_k_e: float, limit_k_d: float, joint_S_s: df.tensor(spatial_vector), joint_q: df.tensor(float), joint_qd: df.tensor(float), joint_act: df.tensor(float), joint_target: df.tensor(float), joint_limit_lower: df.tensor(float), joint_limit_upper: df.tensor(float), coord_start: int, dof_start: int, body_f_s: spatial_vector, tau: df.tensor(float)): # prismatic / revolute if (type == 0 or type == 1): S_s = df.load(joint_S_s, dof_start) q = df.load(joint_q, coord_start) qd = df.load(joint_qd, dof_start) act = df.load(joint_act, dof_start) target = df.load(joint_target, coord_start) lower = df.load(joint_limit_lower, coord_start) upper = df.load(joint_limit_upper, coord_start) limit_f = 0.0 # compute limit forces, damping only active when limit is violated if (q < lower): limit_f = limit_k_e*(lower-q) if (q > upper): limit_f = limit_k_e*(upper-q) damping_f = (0.0 - limit_k_d) * qd # total torque / force on the joint t = 0.0 - spatial_dot(S_s, body_f_s) - target_k_e*(q - target) - target_k_d*qd + act + limit_f + damping_f df.store(tau, dof_start, t) # ball if (type == 2): # elastic term.. this is proportional to the # imaginary part of the relative quaternion r_j = float3(df.load(joint_q, coord_start + 0), df.load(joint_q, coord_start + 1), df.load(joint_q, coord_start + 2)) # angular velocity for damping w_j = float3(df.load(joint_qd, dof_start + 0), df.load(joint_qd, dof_start + 1), df.load(joint_qd, dof_start + 2)) for i in range(0, 3): S_s = df.load(joint_S_s, dof_start+i) w = w_j[i] r = r_j[i] df.store(tau, dof_start+i, 0.0 - spatial_dot(S_s, body_f_s) - w*target_k_d - r*target_k_e) # fixed # if (type == 3) # pass # free if (type == 4): for i in range(0, 6): S_s = df.load(joint_S_s, dof_start+i) df.store(tau, dof_start+i, 0.0 - spatial_dot(S_s, body_f_s)) return 0 @df.func def jcalc_integrate( type: int, joint_q: df.tensor(float), joint_qd: df.tensor(float), joint_qdd: df.tensor(float), coord_start: int, dof_start: int, dt: float, joint_q_new: df.tensor(float), joint_qd_new: df.tensor(float)): # prismatic / revolute if (type == 0 or type == 1): qdd = df.load(joint_qdd, dof_start) qd = df.load(joint_qd, dof_start) q = df.load(joint_q, coord_start) qd_new = qd + qdd*dt q_new = q + qd_new*dt df.store(joint_qd_new, dof_start, qd_new) df.store(joint_q_new, coord_start, q_new) # ball if (type == 2): m_j = float3(df.load(joint_qdd, dof_start + 0), df.load(joint_qdd, dof_start + 1), df.load(joint_qdd, dof_start + 2)) w_j = float3(df.load(joint_qd, dof_start + 0), df.load(joint_qd, dof_start + 1), df.load(joint_qd, dof_start + 2)) r_j = quat(df.load(joint_q, coord_start + 0), df.load(joint_q, coord_start + 1), df.load(joint_q, coord_start + 2), df.load(joint_q, coord_start + 3)) # symplectic Euler w_j_new = w_j + m_j*dt drdt_j = mul(quat(w_j_new, 0.0), r_j) * 0.5 # new orientation (normalized) r_j_new = normalize(r_j + drdt_j * dt) # update joint coords df.store(joint_q_new, coord_start + 0, r_j_new[0]) df.store(joint_q_new, coord_start + 1, r_j_new[1]) df.store(joint_q_new, coord_start + 2, r_j_new[2]) df.store(joint_q_new, coord_start + 3, r_j_new[3]) # update joint vel df.store(joint_qd_new, dof_start + 0, w_j_new[0]) df.store(joint_qd_new, dof_start + 1, w_j_new[1]) df.store(joint_qd_new, dof_start + 2, w_j_new[2]) # fixed joint #if (type == 3) # pass # free joint if (type == 4): # dofs: qd = (omega_x, omega_y, omega_z, vel_x, vel_y, vel_z) # coords: q = (trans_x, trans_y, trans_z, quat_x, quat_y, quat_z, quat_w) # angular and linear acceleration m_s = float3(df.load(joint_qdd, dof_start + 0), df.load(joint_qdd, dof_start + 1), df.load(joint_qdd, dof_start + 2)) a_s = float3(df.load(joint_qdd, dof_start + 3), df.load(joint_qdd, dof_start + 4), df.load(joint_qdd, dof_start + 5)) # angular and linear velocity w_s = float3(df.load(joint_qd, dof_start + 0), df.load(joint_qd, dof_start + 1), df.load(joint_qd, dof_start + 2)) v_s = float3(df.load(joint_qd, dof_start + 3), df.load(joint_qd, dof_start + 4), df.load(joint_qd, dof_start + 5)) # symplectic Euler w_s = w_s + m_s*dt v_s = v_s + a_s*dt # translation of origin p_s = float3(df.load(joint_q, coord_start + 0), df.load(joint_q, coord_start + 1), df.load(joint_q, coord_start + 2)) # linear vel of origin (note q/qd switch order of linear angular elements) # note we are converting the body twist in the space frame (w_s, v_s) to compute center of mass velcity dpdt_s = v_s + cross(w_s, p_s) # quat and quat derivative r_s = quat(df.load(joint_q, coord_start + 3), df.load(joint_q, coord_start + 4), df.load(joint_q, coord_start + 5), df.load(joint_q, coord_start + 6)) drdt_s = mul(quat(w_s, 0.0), r_s) * 0.5 # new orientation (normalized) p_s_new = p_s + dpdt_s * dt r_s_new = normalize(r_s + drdt_s * dt) # update transform df.store(joint_q_new, coord_start + 0, p_s_new[0]) df.store(joint_q_new, coord_start + 1, p_s_new[1]) df.store(joint_q_new, coord_start + 2, p_s_new[2]) df.store(joint_q_new, coord_start + 3, r_s_new[0]) df.store(joint_q_new, coord_start + 4, r_s_new[1]) df.store(joint_q_new, coord_start + 5, r_s_new[2]) df.store(joint_q_new, coord_start + 6, r_s_new[3]) # update joint_twist df.store(joint_qd_new, dof_start + 0, w_s[0]) df.store(joint_qd_new, dof_start + 1, w_s[1]) df.store(joint_qd_new, dof_start + 2, w_s[2]) df.store(joint_qd_new, dof_start + 3, v_s[0]) df.store(joint_qd_new, dof_start + 4, v_s[1]) df.store(joint_qd_new, dof_start + 5, v_s[2]) return 0 @df.func def compute_link_transform(i: int, joint_type: df.tensor(int), joint_parent: df.tensor(int), joint_q_start: df.tensor(int), joint_qd_start: df.tensor(int), joint_q: df.tensor(float), joint_X_pj: df.tensor(df.spatial_transform), joint_X_cm: df.tensor(df.spatial_transform), joint_axis: df.tensor(df.float3), body_X_sc: df.tensor(df.spatial_transform), body_X_sm: df.tensor(df.spatial_transform)): # parent transform parent = load(joint_parent, i) # parent transform in spatial coordinates X_sp = spatial_transform_identity() if (parent >= 0): X_sp = load(body_X_sc, parent) type = load(joint_type, i) axis = load(joint_axis, i) coord_start = load(joint_q_start, i) dof_start = load(joint_qd_start, i) # compute transform across joint X_jc = jcalc_transform(type, axis, joint_q, coord_start) X_pj = load(joint_X_pj, i) X_sc = spatial_transform_multiply(X_sp, spatial_transform_multiply(X_pj, X_jc)) # compute transform of center of mass X_cm = load(joint_X_cm, i) X_sm = spatial_transform_multiply(X_sc, X_cm) # store geometry transforms store(body_X_sc, i, X_sc) store(body_X_sm, i, X_sm) return 0 @df.kernel def eval_rigid_fk(articulation_start: df.tensor(int), joint_type: df.tensor(int), joint_parent: df.tensor(int), joint_q_start: df.tensor(int), joint_qd_start: df.tensor(int), joint_q: df.tensor(float), joint_X_pj: df.tensor(df.spatial_transform), joint_X_cm: df.tensor(df.spatial_transform), joint_axis: df.tensor(df.float3), body_X_sc: df.tensor(df.spatial_transform), body_X_sm: df.tensor(df.spatial_transform)): # one thread per-articulation index = tid() start = df.load(articulation_start, index) end = df.load(articulation_start, index+1) for i in range(start, end): compute_link_transform(i, joint_type, joint_parent, joint_q_start, joint_qd_start, joint_q, joint_X_pj, joint_X_cm, joint_axis, body_X_sc, body_X_sm) @df.func def compute_link_velocity(i: int, joint_type: df.tensor(int), joint_parent: df.tensor(int), joint_qd_start: df.tensor(int), joint_qd: df.tensor(float), joint_axis: df.tensor(df.float3), body_I_m: df.tensor(df.spatial_matrix), body_X_sc: df.tensor(df.spatial_transform), body_X_sm: df.tensor(df.spatial_transform), joint_X_pj: df.tensor(df.spatial_transform), gravity: df.tensor(df.float3), # outputs joint_S_s: df.tensor(df.spatial_vector), body_I_s: df.tensor(df.spatial_matrix), body_v_s: df.tensor(df.spatial_vector), body_f_s: df.tensor(df.spatial_vector), body_a_s: df.tensor(df.spatial_vector)): type = df.load(joint_type, i) axis = df.load(joint_axis, i) parent = df.load(joint_parent, i) dof_start = df.load(joint_qd_start, i) X_sc = df.load(body_X_sc, i) # parent transform in spatial coordinates X_sp = spatial_transform_identity() if (parent >= 0): X_sp = load(body_X_sc, parent) X_pj = load(joint_X_pj, i) X_sj = spatial_transform_multiply(X_sp, X_pj) # compute motion subspace and velocity across the joint (also stores S_s to global memory) v_j_s = jcalc_motion(type, axis, X_sj, joint_S_s, joint_qd, dof_start) # parent velocity v_parent_s = spatial_vector() a_parent_s = spatial_vector() if (parent >= 0): v_parent_s = df.load(body_v_s, parent) a_parent_s = df.load(body_a_s, parent) # body velocity, acceleration v_s = v_parent_s + v_j_s a_s = a_parent_s + spatial_cross(v_s, v_j_s) # + self.joint_S_s[i]*self.joint_qdd[i] # compute body forces X_sm = df.load(body_X_sm, i) I_m = df.load(body_I_m, i) # gravity and external forces (expressed in frame aligned with s but centered at body mass) g = df.load(gravity, 0) m = I_m[3, 3] f_g_m = spatial_vector(float3(), g) * m f_g_s = spatial_transform_wrench(spatial_transform(spatial_transform_get_translation(X_sm), quat_identity()), f_g_m) #f_ext_s = df.load(body_f_s, i) + f_g_s # body forces I_s = spatial_transform_inertia(X_sm, I_m) f_b_s = df.mul(I_s, a_s) + spatial_cross_dual(v_s, df.mul(I_s, v_s)) df.store(body_v_s, i, v_s) df.store(body_a_s, i, a_s) df.store(body_f_s, i, f_b_s - f_g_s) df.store(body_I_s, i, I_s) return 0 @df.func def compute_link_tau(offset: int, joint_end: int, joint_type: df.tensor(int), joint_parent: df.tensor(int), joint_q_start: df.tensor(int), joint_qd_start: df.tensor(int), joint_q: df.tensor(float), joint_qd: df.tensor(float), joint_act: df.tensor(float), joint_target: df.tensor(float), joint_target_ke: df.tensor(float), joint_target_kd: df.tensor(float), joint_limit_lower: df.tensor(float), joint_limit_upper: df.tensor(float), joint_limit_ke: df.tensor(float), joint_limit_kd: df.tensor(float), joint_S_s: df.tensor(df.spatial_vector), body_fb_s: df.tensor(df.spatial_vector), # outputs body_ft_s: df.tensor(df.spatial_vector), tau: df.tensor(float)): # for backwards traversal i = joint_end-offset-1 type = df.load(joint_type, i) parent = df.load(joint_parent, i) dof_start = df.load(joint_qd_start, i) coord_start = df.load(joint_q_start, i) target_k_e = df.load(joint_target_ke, i) target_k_d = df.load(joint_target_kd, i) limit_k_e = df.load(joint_limit_ke, i) limit_k_d = df.load(joint_limit_kd, i) # total forces on body f_b_s = df.load(body_fb_s, i) f_t_s = df.load(body_ft_s, i) f_s = f_b_s + f_t_s # compute joint-space forces, writes out tau jcalc_tau(type, target_k_e, target_k_d, limit_k_e, limit_k_d, joint_S_s, joint_q, joint_qd, joint_act, joint_target, joint_limit_lower, joint_limit_upper, coord_start, dof_start, f_s, tau) # update parent forces, todo: check that this is valid for the backwards pass if (parent >= 0): df.atomic_add(body_ft_s, parent, f_s) return 0 @df.kernel def eval_rigid_id(articulation_start: df.tensor(int), joint_type: df.tensor(int), joint_parent: df.tensor(int), joint_q_start: df.tensor(int), joint_qd_start: df.tensor(int), joint_q: df.tensor(float), joint_qd: df.tensor(float), joint_axis: df.tensor(df.float3), joint_target_ke: df.tensor(float), joint_target_kd: df.tensor(float), body_I_m: df.tensor(df.spatial_matrix), body_X_sc: df.tensor(df.spatial_transform), body_X_sm: df.tensor(df.spatial_transform), joint_X_pj: df.tensor(df.spatial_transform), gravity: df.tensor(df.float3), # outputs joint_S_s: df.tensor(df.spatial_vector), body_I_s: df.tensor(df.spatial_matrix), body_v_s: df.tensor(df.spatial_vector), body_f_s: df.tensor(df.spatial_vector), body_a_s: df.tensor(df.spatial_vector)): # one thread per-articulation index = tid() start = df.load(articulation_start, index) end = df.load(articulation_start, index+1) count = end-start # compute link velocities and coriolis forces for i in range(start, end): compute_link_velocity( i, joint_type, joint_parent, joint_qd_start, joint_qd, joint_axis, body_I_m, body_X_sc, body_X_sm, joint_X_pj, gravity, joint_S_s, body_I_s, body_v_s, body_f_s, body_a_s) @df.kernel def eval_rigid_tau(articulation_start: df.tensor(int), joint_type: df.tensor(int), joint_parent: df.tensor(int), joint_q_start: df.tensor(int), joint_qd_start: df.tensor(int), joint_q: df.tensor(float), joint_qd: df.tensor(float), joint_act: df.tensor(float), joint_target: df.tensor(float), joint_target_ke: df.tensor(float), joint_target_kd: df.tensor(float), joint_limit_lower: df.tensor(float), joint_limit_upper: df.tensor(float), joint_limit_ke: df.tensor(float), joint_limit_kd: df.tensor(float), joint_axis: df.tensor(df.float3), joint_S_s: df.tensor(df.spatial_vector), body_fb_s: df.tensor(df.spatial_vector), # outputs body_ft_s: df.tensor(df.spatial_vector), tau: df.tensor(float)): # one thread per-articulation index = tid() start = df.load(articulation_start, index) end = df.load(articulation_start, index+1) count = end-start # compute joint forces for i in range(0, count): compute_link_tau( i, end, joint_type, joint_parent, joint_q_start, joint_qd_start, joint_q, joint_qd, joint_act, joint_target, joint_target_ke, joint_target_kd, joint_limit_lower, joint_limit_upper, joint_limit_ke, joint_limit_kd, joint_S_s, body_fb_s, body_ft_s, tau) @df.kernel def eval_rigid_jacobian( articulation_start: df.tensor(int), articulation_J_start: df.tensor(int), joint_parent: df.tensor(int), joint_qd_start: df.tensor(int), joint_S_s: df.tensor(spatial_vector), # outputs J: df.tensor(float)): # one thread per-articulation index = tid() joint_start = df.load(articulation_start, index) joint_end = df.load(articulation_start, index+1) joint_count = joint_end-joint_start J_offset = df.load(articulation_J_start, index) # in spatial.h spatial_jacobian(joint_S_s, joint_parent, joint_qd_start, joint_start, joint_count, J_offset, J) # @df.kernel # def eval_rigid_jacobian( # articulation_start: df.tensor(int), # articulation_J_start: df.tensor(int), # joint_parent: df.tensor(int), # joint_qd_start: df.tensor(int), # joint_S_s: df.tensor(spatial_vector), # # outputs # J: df.tensor(float)): # # one thread per-articulation # index = tid() # joint_start = df.load(articulation_start, index) # joint_end = df.load(articulation_start, index+1) # joint_count = joint_end-joint_start # dof_start = df.load(joint_qd_start, joint_start) # dof_end = df.load(joint_qd_start, joint_end) # dof_count = dof_end-dof_start # #(const spatial_vector* S, const int* joint_parents, const int* joint_qd_start, int num_links, int num_dofs, float* J) # spatial_jacobian(joint_S_s, joint_parent, joint_qd_start, joint_count, dof_count, J) @df.kernel def eval_rigid_mass( articulation_start: df.tensor(int), articulation_M_start: df.tensor(int), body_I_s: df.tensor(spatial_matrix), # outputs M: df.tensor(float)): # one thread per-articulation index = tid() joint_start = df.load(articulation_start, index) joint_end = df.load(articulation_start, index+1) joint_count = joint_end-joint_start M_offset = df.load(articulation_M_start, index) # in spatial.h spatial_mass(body_I_s, joint_start, joint_count, M_offset, M) @df.kernel def eval_dense_gemm(m: int, n: int, p: int, t1: int, t2: int, A: df.tensor(float), B: df.tensor(float), C: df.tensor(float)): dense_gemm(m, n, p, t1, t2, A, B, C) @df.kernel def eval_dense_gemm_batched(m: df.tensor(int), n: df.tensor(int), p: df.tensor(int), t1: int, t2: int, A_start: df.tensor(int), B_start: df.tensor(int), C_start: df.tensor(int), A: df.tensor(float), B: df.tensor(float), C: df.tensor(float)): dense_gemm_batched(m, n, p, t1, t2, A_start, B_start, C_start, A, B, C) @df.kernel def eval_dense_cholesky(n: int, A: df.tensor(float), regularization: df.tensor(float), L: df.tensor(float)): dense_chol(n, A, regularization, L) @df.kernel def eval_dense_cholesky_batched(A_start: df.tensor(int), A_dim: df.tensor(int), A: df.tensor(float), regularization: df.tensor(float), L: df.tensor(float)): dense_chol_batched(A_start, A_dim, A, regularization, L) @df.kernel def eval_dense_subs(n: int, L: df.tensor(float), b: df.tensor(float), x: df.tensor(float)): dense_subs(n, L, b, x) # helper that propagates gradients back to A, treating L as a constant / temporary variable # allows us to reuse the Cholesky decomposition from the forward pass @df.kernel def eval_dense_solve(n: int, A: df.tensor(float), L: df.tensor(float), b: df.tensor(float), tmp: df.tensor(float), x: df.tensor(float)): dense_solve(n, A, L, b, tmp, x) # helper that propagates gradients back to A, treating L as a constant / temporary variable # allows us to reuse the Cholesky decomposition from the forward pass @df.kernel def eval_dense_solve_batched(b_start: df.tensor(int), A_start: df.tensor(int), A_dim: df.tensor(int), A: df.tensor(float), L: df.tensor(float), b: df.tensor(float), tmp: df.tensor(float), x: df.tensor(float)): dense_solve_batched(b_start, A_start, A_dim, A, L, b, tmp, x) @df.kernel def eval_rigid_integrate( joint_type: df.tensor(int), joint_q_start: df.tensor(int), joint_qd_start: df.tensor(int), joint_q: df.tensor(float), joint_qd: df.tensor(float), joint_qdd: df.tensor(float), dt: float, # outputs joint_q_new: df.tensor(float), joint_qd_new: df.tensor(float)): # one thread per-articulation index = tid() type = df.load(joint_type, index) coord_start = df.load(joint_q_start, index) dof_start = df.load(joint_qd_start, index) jcalc_integrate( type, joint_q, joint_qd, joint_qdd, coord_start, dof_start, dt, joint_q_new, joint_qd_new) g_state_out = None # define PyTorch autograd op to wrap simulate func class SimulateFunc(torch.autograd.Function): """PyTorch autograd function representing a simulation stpe Note: This node will be inserted into the computation graph whenever `forward()` is called on an integrator object. It should not be called directly by the user. """ @staticmethod def forward(ctx, integrator, model, state_in, dt, substeps, mass_matrix_freq, *tensors): # record launches ctx.tape = df.Tape() ctx.inputs = tensors #ctx.outputs = df.to_weak_list(state_out.flatten()) actuation = state_in.joint_act # simulate for i in range(substeps): # ensure actuation is set on all substeps state_in.joint_act = actuation state_out = model.state() integrator._simulate(ctx.tape, model, state_in, state_out, dt/float(substeps), update_mass_matrix=((i%mass_matrix_freq)==0)) # swap states state_in = state_out # use global to pass state object back to caller global g_state_out g_state_out = state_out ctx.outputs = df.to_weak_list(state_out.flatten()) return tuple(state_out.flatten()) @staticmethod def backward(ctx, *grads): # ensure grads are contiguous in memory adj_outputs = df.make_contiguous(grads) # register outputs with tape outputs = df.to_strong_list(ctx.outputs) for o in range(len(outputs)): ctx.tape.adjoints[outputs[o]] = adj_outputs[o] # replay launches backwards ctx.tape.replay() # find adjoint of inputs adj_inputs = [] for i in ctx.inputs: if i in ctx.tape.adjoints: adj_inputs.append(ctx.tape.adjoints[i]) else: adj_inputs.append(None) # free the tape ctx.tape.reset() # filter grads to replace empty tensors / no grad / constant params with None return (None, None, None, None, None, None, *df.filter_grads(adj_inputs)) class SemiImplicitIntegrator: """A semi-implicit integrator using symplectic Euler After constructing `Model` and `State` objects this time-integrator may be used to advance the simulation state forward in time. Semi-implicit time integration is a variational integrator that preserves energy, however it not unconditionally stable, and requires a time-step small enough to support the required stiffness and damping forces. See: https://en.wikipedia.org/wiki/Semi-implicit_Euler_method Example: >>> integrator = df.SemiImplicitIntegrator() >>> >>> # simulation loop >>> for i in range(100): >>> state = integrator.forward(model, state, dt) """ def __init__(self): pass def forward(self, model: Model, state_in: State, dt: float, substeps: int, mass_matrix_freq: int) -> State: """Performs a single integration step forward in time This method inserts a node into the PyTorch computational graph with references to all model and state tensors such that gradients can be propagrated back through the simulation step. Args: model: Simulation model state: Simulation state at the start the time-step dt: The simulation time-step (usually in seconds) Returns: The state of the system at the end of the time-step """ if dflex.config.no_grad: # if no gradient required then do inplace update for i in range(substeps): self._simulate(df.Tape(), model, state_in, state_in, dt/float(substeps), update_mass_matrix=(i%mass_matrix_freq)==0) return state_in else: # get list of inputs and outputs for PyTorch tensor tracking inputs = [*state_in.flatten(), *model.flatten()] # run sim as a PyTorch op tensors = SimulateFunc.apply(self, model, state_in, dt, substeps, mass_matrix_freq, *inputs) global g_state_out state_out = g_state_out g_state_out = None # null reference return state_out def _simulate(self, tape, model, state_in, state_out, dt, update_mass_matrix=True): with dflex.util.ScopedTimer("simulate", False): # alloc particle force buffer if (model.particle_count): state_out.particle_f.zero_() if (model.link_count): state_out.body_ft_s = torch.zeros((model.link_count, 6), dtype=torch.float32, device=model.adapter, requires_grad=True) state_out.body_f_ext_s = torch.zeros((model.link_count, 6), dtype=torch.float32, device=model.adapter, requires_grad=True) # damped springs if (model.spring_count): tape.launch(func=eval_springs, dim=model.spring_count, inputs=[state_in.particle_q, state_in.particle_qd, model.spring_indices, model.spring_rest_length, model.spring_stiffness, model.spring_damping], outputs=[state_out.particle_f], adapter=model.adapter) # triangle elastic and lift/drag forces if (model.tri_count and model.tri_ke > 0.0): tape.launch(func=eval_triangles, dim=model.tri_count, inputs=[ state_in.particle_q, state_in.particle_qd, model.tri_indices, model.tri_poses, model.tri_activations, model.tri_ke, model.tri_ka, model.tri_kd, model.tri_drag, model.tri_lift ], outputs=[state_out.particle_f], adapter=model.adapter) # triangle/triangle contacts if (model.enable_tri_collisions and model.tri_count and model.tri_ke > 0.0): tape.launch(func=eval_triangles_contact, dim=model.tri_count * model.particle_count, inputs=[ model.particle_count, state_in.particle_q, state_in.particle_qd, model.tri_indices, model.tri_poses, model.tri_activations, model.tri_ke, model.tri_ka, model.tri_kd, model.tri_drag, model.tri_lift ], outputs=[state_out.particle_f], adapter=model.adapter) # triangle bending if (model.edge_count): tape.launch(func=eval_bending, dim=model.edge_count, inputs=[state_in.particle_q, state_in.particle_qd, model.edge_indices, model.edge_rest_angle, model.edge_ke, model.edge_kd], outputs=[state_out.particle_f], adapter=model.adapter) # particle ground contact if (model.ground and model.particle_count): tape.launch(func=eval_contacts, dim=model.particle_count, inputs=[state_in.particle_q, state_in.particle_qd, model.contact_ke, model.contact_kd, model.contact_kf, model.contact_mu], outputs=[state_out.particle_f], adapter=model.adapter) # tetrahedral FEM if (model.tet_count): tape.launch(func=eval_tetrahedra, dim=model.tet_count, inputs=[state_in.particle_q, state_in.particle_qd, model.tet_indices, model.tet_poses, model.tet_activations, model.tet_materials], outputs=[state_out.particle_f], adapter=model.adapter) #---------------------------- # articulations if (model.link_count): # evaluate body transforms tape.launch( func=eval_rigid_fk, dim=model.articulation_count, inputs=[ model.articulation_joint_start, model.joint_type, model.joint_parent, model.joint_q_start, model.joint_qd_start, state_in.joint_q, model.joint_X_pj, model.joint_X_cm, model.joint_axis ], outputs=[ state_out.body_X_sc, state_out.body_X_sm ], adapter=model.adapter, preserve_output=True) # evaluate joint inertias, motion vectors, and forces tape.launch( func=eval_rigid_id, dim=model.articulation_count, inputs=[ model.articulation_joint_start, model.joint_type, model.joint_parent, model.joint_q_start, model.joint_qd_start, state_in.joint_q, state_in.joint_qd, model.joint_axis, model.joint_target_ke, model.joint_target_kd, model.body_I_m, state_out.body_X_sc, state_out.body_X_sm, model.joint_X_pj, model.gravity ], outputs=[ state_out.joint_S_s, state_out.body_I_s, state_out.body_v_s, state_out.body_f_s, state_out.body_a_s, ], adapter=model.adapter, preserve_output=True) if (model.ground and model.contact_count > 0): # evaluate contact forces tape.launch( func=eval_rigid_contacts_art, dim=model.contact_count, inputs=[ state_out.body_X_sc, state_out.body_v_s, model.contact_body0, model.contact_point0, model.contact_dist, model.contact_material, model.shape_materials ], outputs=[ state_out.body_f_s ], adapter=model.adapter, preserve_output=True) # particle shape contact if (model.particle_count): # tape.launch(func=eval_soft_contacts, # dim=model.particle_count*model.shape_count, # inputs=[state_in.particle_q, state_in.particle_qd, model.contact_ke, model.contact_kd, model.contact_kf, model.contact_mu], # outputs=[state_out.particle_f], # adapter=model.adapter) tape.launch(func=eval_soft_contacts, dim=model.particle_count*model.shape_count, inputs=[ model.particle_count, state_in.particle_q, state_in.particle_qd, state_in.body_X_sc, state_in.body_v_s, model.shape_transform, model.shape_body, model.shape_geo_type, torch.Tensor(), model.shape_geo_scale, model.shape_materials, model.contact_ke, model.contact_kd, model.contact_kf, model.contact_mu], # outputs outputs=[ state_out.particle_f, state_out.body_f_s], adapter=model.adapter) # evaluate muscle actuation tape.launch( func=eval_muscles, dim=model.muscle_count, inputs=[ state_out.body_X_sc, state_out.body_v_s, model.muscle_start, model.muscle_params, model.muscle_links, model.muscle_points, model.muscle_activation ], outputs=[ state_out.body_f_s ], adapter=model.adapter, preserve_output=True) # evaluate joint torques tape.launch( func=eval_rigid_tau, dim=model.articulation_count, inputs=[ model.articulation_joint_start, model.joint_type, model.joint_parent, model.joint_q_start, model.joint_qd_start, state_in.joint_q, state_in.joint_qd, state_in.joint_act, model.joint_target, model.joint_target_ke, model.joint_target_kd, model.joint_limit_lower, model.joint_limit_upper, model.joint_limit_ke, model.joint_limit_kd, model.joint_axis, state_out.joint_S_s, state_out.body_f_s ], outputs=[ state_out.body_ft_s, state_out.joint_tau ], adapter=model.adapter, preserve_output=True) if (update_mass_matrix): model.alloc_mass_matrix() # build J tape.launch( func=eval_rigid_jacobian, dim=model.articulation_count, inputs=[ # inputs model.articulation_joint_start, model.articulation_J_start, model.joint_parent, model.joint_qd_start, state_out.joint_S_s ], outputs=[ model.J ], adapter=model.adapter, preserve_output=True) # build M tape.launch( func=eval_rigid_mass, dim=model.articulation_count, inputs=[ # inputs model.articulation_joint_start, model.articulation_M_start, state_out.body_I_s ], outputs=[ model.M ], adapter=model.adapter, preserve_output=True) # form P = M*J df.matmul_batched( tape, model.articulation_count, model.articulation_M_rows, model.articulation_J_cols, model.articulation_J_rows, 0, 0, model.articulation_M_start, model.articulation_J_start, model.articulation_J_start, # P start is the same as J start since it has the same dims as J model.M, model.J, model.P, adapter=model.adapter) # form H = J^T*P df.matmul_batched( tape, model.articulation_count, model.articulation_J_cols, model.articulation_J_cols, model.articulation_J_rows, # P rows is the same as J rows 1, 0, model.articulation_J_start, model.articulation_J_start, # P start is the same as J start since it has the same dims as J model.articulation_H_start, model.J, model.P, model.H, adapter=model.adapter) # compute decomposition tape.launch( func=eval_dense_cholesky_batched, dim=model.articulation_count, inputs=[ model.articulation_H_start, model.articulation_H_rows, model.H, model.joint_armature ], outputs=[ model.L ], adapter=model.adapter, skip_check_grad=True) tmp = torch.zeros_like(state_out.joint_tau) # solve for qdd tape.launch( func=eval_dense_solve_batched, dim=model.articulation_count, inputs=[ model.articulation_dof_start, model.articulation_H_start, model.articulation_H_rows, model.H, model.L, state_out.joint_tau, tmp ], outputs=[ state_out.joint_qdd ], adapter=model.adapter, skip_check_grad=True) # integrate joint dofs -> joint coords tape.launch( func=eval_rigid_integrate, dim=model.link_count, inputs=[ model.joint_type, model.joint_q_start, model.joint_qd_start, state_in.joint_q, state_in.joint_qd, state_out.joint_qdd, dt ], outputs=[ state_out.joint_q, state_out.joint_qd ], adapter=model.adapter) #---------------------------- # integrate particles if (model.particle_count): tape.launch(func=integrate_particles, dim=model.particle_count, inputs=[state_in.particle_q, state_in.particle_qd, state_out.particle_f, model.particle_inv_mass, model.gravity, dt], outputs=[state_out.particle_q, state_out.particle_qd], adapter=model.adapter) return state_out @df.kernel def solve_springs(x: df.tensor(df.float3), v: df.tensor(df.float3), invmass: df.tensor(float), spring_indices: df.tensor(int), spring_rest_lengths: df.tensor(float), spring_stiffness: df.tensor(float), spring_damping: df.tensor(float), dt: float, delta: df.tensor(df.float3)): tid = df.tid() i = df.load(spring_indices, tid * 2 + 0) j = df.load(spring_indices, tid * 2 + 1) ke = df.load(spring_stiffness, tid) kd = df.load(spring_damping, tid) rest = df.load(spring_rest_lengths, tid) xi = df.load(x, i) xj = df.load(x, j) vi = df.load(v, i) vj = df.load(v, j) xij = xi - xj vij = vi - vj l = length(xij) l_inv = 1.0 / l # normalized spring direction dir = xij * l_inv c = l - rest dcdt = dot(dir, vij) # damping based on relative velocity. #fs = dir * (ke * c + kd * dcdt) wi = df.load(invmass, i) wj = df.load(invmass, j) denom = wi + wj alpha = 1.0/(ke*dt*dt) multiplier = c / (denom)# + alpha) xd = dir*multiplier df.atomic_sub(delta, i, xd*wi) df.atomic_add(delta, j, xd*wj) @df.kernel def solve_tetrahedra(x: df.tensor(df.float3), v: df.tensor(df.float3), inv_mass: df.tensor(float), indices: df.tensor(int), pose: df.tensor(df.mat33), activation: df.tensor(float), materials: df.tensor(float), dt: float, relaxation: float, delta: df.tensor(df.float3)): tid = df.tid() i = df.load(indices, tid * 4 + 0) j = df.load(indices, tid * 4 + 1) k = df.load(indices, tid * 4 + 2) l = df.load(indices, tid * 4 + 3) act = df.load(activation, tid) k_mu = df.load(materials, tid * 3 + 0) k_lambda = df.load(materials, tid * 3 + 1) k_damp = df.load(materials, tid * 3 + 2) x0 = df.load(x, i) x1 = df.load(x, j) x2 = df.load(x, k) x3 = df.load(x, l) v0 = df.load(v, i) v1 = df.load(v, j) v2 = df.load(v, k) v3 = df.load(v, l) w0 = df.load(inv_mass, i) w1 = df.load(inv_mass, j) w2 = df.load(inv_mass, k) w3 = df.load(inv_mass, l) x10 = x1 - x0 x20 = x2 - x0 x30 = x3 - x0 v10 = v1 - v0 v20 = v2 - v0 v30 = v3 - v0 Ds = df.mat33(x10, x20, x30) Dm = df.load(pose, tid) inv_rest_volume = df.determinant(Dm) * 6.0 rest_volume = 1.0 / inv_rest_volume # F = Xs*Xm^-1 F = Ds * Dm f1 = df.float3(F[0, 0], F[1, 0], F[2, 0]) f2 = df.float3(F[0, 1], F[1, 1], F[2, 1]) f3 = df.float3(F[0, 2], F[1, 2], F[2, 2]) # C_sqrt tr = dot(f1, f1) + dot(f2, f2) + dot(f3, f3) r_s = df.sqrt(abs(tr - 3.0)) C = r_s if (r_s == 0.0): return if (tr < 3.0): r_s = 0.0 - r_s dCdx = F*df.transpose(Dm)*(1.0/r_s) alpha = 1.0 + k_mu / k_lambda # C_Neo # r_s = df.sqrt(dot(f1, f1) + dot(f2, f2) + dot(f3, f3)) # r_s_inv = 1.0/r_s # C = r_s # dCdx = F*df.transpose(Dm)*r_s_inv # alpha = 1.0 + k_mu / k_lambda # C_Spherical # r_s = df.sqrt(dot(f1, f1) + dot(f2, f2) + dot(f3, f3)) # r_s_inv = 1.0/r_s # C = r_s - df.sqrt(3.0) # dCdx = F*df.transpose(Dm)*r_s_inv # alpha = 1.0 # C_D #r_s = df.sqrt(dot(f1, f1) + dot(f2, f2) + dot(f3, f3)) #C = r_s*r_s - 3.0 #dCdx = F*df.transpose(Dm)*2.0 #alpha = 1.0 grad1 = float3(dCdx[0,0], dCdx[1,0], dCdx[2,0]) grad2 = float3(dCdx[0,1], dCdx[1,1], dCdx[2,1]) grad3 = float3(dCdx[0,2], dCdx[1,2], dCdx[2,2]) grad0 = (grad1 + grad2 + grad3)*(0.0 - 1.0) denom = dot(grad0,grad0)*w0 + dot(grad1,grad1)*w1 + dot(grad2,grad2)*w2 + dot(grad3,grad3)*w3 multiplier = C/(denom + 1.0/(k_mu*dt*dt*rest_volume)) delta0 = grad0*multiplier delta1 = grad1*multiplier delta2 = grad2*multiplier delta3 = grad3*multiplier # hydrostatic part J = df.determinant(F) C_vol = J - alpha # dCdx = df.mat33(cross(f2, f3), cross(f3, f1), cross(f1, f2))*df.transpose(Dm) # grad1 = float3(dCdx[0,0], dCdx[1,0], dCdx[2,0]) # grad2 = float3(dCdx[0,1], dCdx[1,1], dCdx[2,1]) # grad3 = float3(dCdx[0,2], dCdx[1,2], dCdx[2,2]) # grad0 = (grad1 + grad2 + grad3)*(0.0 - 1.0) s = inv_rest_volume / 6.0 grad1 = df.cross(x20, x30) * s grad2 = df.cross(x30, x10) * s grad3 = df.cross(x10, x20) * s grad0 = (grad1 + grad2 + grad3)*(0.0 - 1.0) denom = dot(grad0, grad0)*w0 + dot(grad1, grad1)*w1 + dot(grad2, grad2)*w2 + dot(grad3, grad3)*w3 multiplier = C_vol/(denom + 1.0/(k_lambda*dt*dt*rest_volume)) delta0 = delta0 + grad0 * multiplier delta1 = delta1 + grad1 * multiplier delta2 = delta2 + grad2 * multiplier delta3 = delta3 + grad3 * multiplier # apply forces df.atomic_sub(delta, i, delta0*w0*relaxation) df.atomic_sub(delta, j, delta1*w1*relaxation) df.atomic_sub(delta, k, delta2*w2*relaxation) df.atomic_sub(delta, l, delta3*w3*relaxation) @df.kernel def solve_contacts( x: df.tensor(df.float3), v: df.tensor(df.float3), inv_mass: df.tensor(float), mu: float, dt: float, delta: df.tensor(df.float3)): tid = df.tid() x0 = df.load(x, tid) v0 = df.load(v, tid) w0 = df.load(inv_mass, tid) n = df.float3(0.0, 1.0, 0.0) c = df.dot(n, x0) - 0.01 if (c > 0.0): return # normal lambda_n = c delta_n = n*lambda_n # friction vn = df.dot(n, v0) vt = v0 - n * vn lambda_f = df.max(mu*lambda_n, 0.0 - df.length(vt)*dt) delta_f = df.normalize(vt)*lambda_f df.atomic_add(delta, tid, delta_f - delta_n) @df.kernel def apply_deltas(x_orig: df.tensor(df.float3), v_orig: df.tensor(df.float3), x_pred: df.tensor(df.float3), delta: df.tensor(df.float3), dt: float, x_out: df.tensor(df.float3), v_out: df.tensor(df.float3)): tid = df.tid() x0 = df.load(x_orig, tid) xp = df.load(x_pred, tid) # constraint deltas d = df.load(delta, tid) x_new = xp + d v_new = (x_new - x0)/dt df.store(x_out, tid, x_new) df.store(v_out, tid, v_new) class XPBDIntegrator: """A implicit integrator using XPBD After constructing `Model` and `State` objects this time-integrator may be used to advance the simulation state forward in time. Semi-implicit time integration is a variational integrator that preserves energy, however it not unconditionally stable, and requires a time-step small enough to support the required stiffness and damping forces. See: https://en.wikipedia.org/wiki/Semi-implicit_Euler_method Example: >>> integrator = df.SemiImplicitIntegrator() >>> >>> # simulation loop >>> for i in range(100): >>> state = integrator.forward(model, state, dt) """ def __init__(self): pass def forward(self, model: Model, state_in: State, dt: float) -> State: """Performs a single integration step forward in time This method inserts a node into the PyTorch computational graph with references to all model and state tensors such that gradients can be propagrated back through the simulation step. Args: model: Simulation model state: Simulation state at the start the time-step dt: The simulation time-step (usually in seconds) Returns: The state of the system at the end of the time-step """ if dflex.config.no_grad: # if no gradient required then do inplace update self._simulate(df.Tape(), model, state_in, state_in, dt) return state_in else: # get list of inputs and outputs for PyTorch tensor tracking inputs = [*state_in.flatten(), *model.flatten()] # allocate new output state_out = model.state() # run sim as a PyTorch op tensors = SimulateFunc.apply(self, model, state_in, state_out, dt, *inputs) return state_out def _simulate(self, tape, model, state_in, state_out, dt): with dflex.util.ScopedTimer("simulate", False): # alloc particle force buffer if (model.particle_count): state_out.particle_f.zero_() q_pred = torch.zeros_like(state_in.particle_q) qd_pred = torch.zeros_like(state_in.particle_qd) #---------------------------- # integrate particles if (model.particle_count): tape.launch(func=integrate_particles, dim=model.particle_count, inputs=[state_in.particle_q, state_in.particle_qd, state_out.particle_f, model.particle_inv_mass, model.gravity, dt], outputs=[q_pred, qd_pred], adapter=model.adapter) # contacts if (model.particle_count and model.ground): tape.launch(func=solve_contacts, dim=model.particle_count, inputs=[q_pred, qd_pred, model.particle_inv_mass, model.contact_mu, dt], outputs=[state_out.particle_f], adapter=model.adapter) # damped springs if (model.spring_count): tape.launch(func=solve_springs, dim=model.spring_count, inputs=[q_pred, qd_pred, model.particle_inv_mass, model.spring_indices, model.spring_rest_length, model.spring_stiffness, model.spring_damping, dt], outputs=[state_out.particle_f], adapter=model.adapter) # tetrahedral FEM if (model.tet_count): tape.launch(func=solve_tetrahedra, dim=model.tet_count, inputs=[q_pred, qd_pred, model.particle_inv_mass, model.tet_indices, model.tet_poses, model.tet_activations, model.tet_materials, dt, model.relaxation], outputs=[state_out.particle_f], adapter=model.adapter) # apply updates tape.launch(func=apply_deltas, dim=model.particle_count, inputs=[state_in.particle_q, state_in.particle_qd, q_pred, state_out.particle_f, dt], outputs=[state_out.particle_q, state_out.particle_qd], adapter=model.adapter) return state_out
97,150
Python
31.329784
241
0.512527
RoboticExplorationLab/CGAC/dflex/dflex/matnn.h
#pragma once CUDA_CALLABLE inline int dense_index(int stride, int i, int j) { return i*stride + j; } template <bool transpose> CUDA_CALLABLE inline int dense_index(int rows, int cols, int i, int j) { if (transpose) return j*rows + i; else return i*cols + j; } #ifdef CPU const int kNumThreadsPerBlock = 1; template <bool t1, bool t2, bool add> CUDA_CALLABLE inline void dense_gemm_impl(int m, int n, int p, const float* __restrict__ A, const float* __restrict__ B, float* __restrict__ C) { for (int i=0; i < m; i++) { for (int j=0; j < n; ++j) { float sum = 0.0f; for (int k=0; k < p; ++k) { sum += A[dense_index<t1>(m, p, i, k)]*B[dense_index<t2>(p, n, k, j)]; } if (add) C[i*n + j] += sum; else C[i*n + j] = sum; } } } #else const int kNumThreadsPerBlock = 256; template <bool t1, bool t2, bool add> CUDA_CALLABLE inline void dense_gemm_impl(int m, int n, int p, const float* __restrict__ A, const float* __restrict__ B, float* __restrict__ C) { // each thread in the block calculates an output (or more if output dim > block dim) for (int e=threadIdx.x; e < m*n; e += blockDim.x) { const int i=e/n; const int j=e%n; float sum = 0.0f; for (int k=0; k < p; ++k) { sum += A[dense_index<t1>(m, p, i, k)]*B[dense_index<t2>(p, n, k, j)]; } if (add) C[i*n + j] += sum; else C[i*n + j] = sum; } } #endif template <bool add=false> CUDA_CALLABLE inline void dense_gemm(int m, int n, int p, int t1, int t2, const float* __restrict__ A, const float* __restrict__ B, float* __restrict__ C) { if (t1 == 0 && t2 == 0) dense_gemm_impl<false, false, add>(m, n, p, A, B, C); else if (t1 == 1 && t2 == 0) dense_gemm_impl<true, false, add>(m, n, p, A, B, C); else if (t1 == 0 && t2 == 1) dense_gemm_impl<false, true, add>(m, n, p, A, B, C); else if (t1 == 1 && t2 == 1) dense_gemm_impl<true, true, add>(m, n, p, A, B, C); } template <bool add=false> CUDA_CALLABLE inline void dense_gemm_batched( const int* __restrict__ m, const int* __restrict__ n, const int* __restrict__ p, int t1, int t2, const int* __restrict__ A_start, const int* __restrict__ B_start, const int* __restrict__ C_start, const float* __restrict__ A, const float* __restrict__ B, float* __restrict__ C) { // on the CPU each thread computes the whole matrix multiply // on the GPU each block computes the multiply with one output per-thread const int batch = tid()/kNumThreadsPerBlock; dense_gemm<add>(m[batch], n[batch], p[batch], t1, t2, A+A_start[batch], B+B_start[batch], C+C_start[batch]); } // computes c = b^T*a*b, with a and b being stored in row-major layout CUDA_CALLABLE inline void dense_quadratic() { } // CUDA_CALLABLE inline void dense_chol(int n, const float* A, float* L) // { // // for each column // for (int j=0; j < n; ++j) // { // for (int i=j; i < n; ++i) // { // L[dense_index(n, i, j)] = A[dense_index(n, i, j)]; // } // for (int k = 0; k < j; ++k) // { // const float p = L[dense_index(n, j, k)]; // for (int i=j; i < n; ++i) // { // L[dense_index(n, i, j)] -= p*L[dense_index(n, i, k)]; // } // } // // scale // const float d = L[dense_index(n, j, j)]; // const float s = 1.0f/sqrtf(d); // for (int i=j; i < n; ++i) // { // L[dense_index(n, i, j)] *=s; // } // } // } void CUDA_CALLABLE inline dense_chol(int n, const float* __restrict__ A, const float* __restrict__ regularization, float* __restrict__ L) { for (int j=0; j < n; ++j) { float s = A[dense_index(n, j, j)] + regularization[j]; for (int k=0; k < j; ++k) { float r = L[dense_index(n, j, k)]; s -= r*r; } s = sqrtf(s); const float invS = 1.0f/s; L[dense_index(n, j, j)] = s; for (int i=j+1; i < n; ++i) { s = A[dense_index(n, i, j)]; for (int k=0; k < j; ++k) { s -= L[dense_index(n, i, k)]*L[dense_index(n, j, k)]; } L[dense_index(n, i, j)] = s*invS; } } } void CUDA_CALLABLE inline dense_chol_batched(const int* __restrict__ A_start, const int* __restrict__ A_dim, const float* __restrict__ A, const float* __restrict__ regularization, float* __restrict__ L) { const int batch = tid(); const int n = A_dim[batch]; const int offset = A_start[batch]; dense_chol(n, A + offset, regularization + n*batch, L + offset); } // Solves (L*L^T)x = b given the Cholesky factor L CUDA_CALLABLE inline void dense_subs(int n, const float* __restrict__ L, const float* __restrict__ b, float* __restrict__ x) { // forward substitution for (int i=0; i < n; ++i) { float s = b[i]; for (int j=0; j < i; ++j) { s -= L[dense_index(n, i, j)]*x[j]; } x[i] = s/L[dense_index(n, i, i)]; } // backward substitution for (int i=n-1; i >= 0; --i) { float s = x[i]; for (int j=i+1; j < n; ++j) { s -= L[dense_index(n, j, i)]*x[j]; } x[i] = s/L[dense_index(n, i, i)]; } } CUDA_CALLABLE inline void dense_solve(int n, const float* __restrict__ A, const float* __restrict__ L, const float* __restrict__ b, float* __restrict__ tmp, float* __restrict__ x) { dense_subs(n, L, b, x); } CUDA_CALLABLE inline void dense_solve_batched( const int* __restrict__ b_start, const int* A_start, const int* A_dim, const float* __restrict__ A, const float* __restrict__ L, const float* __restrict__ b, float* __restrict__ tmp, float* __restrict__ x) { const int batch = tid(); dense_solve(A_dim[batch], A + A_start[batch], L + A_start[batch], b + b_start[batch], NULL, x + b_start[batch]); } CUDA_CALLABLE inline void print_matrix(const char* name, int m, int n, const float* data) { printf("%s = [", name); for (int i=0; i < m; ++i) { for (int j=0; j < n; ++j) { printf("%f ", data[dense_index(n, i, j)]); } printf(";\n"); } printf("]\n"); } // adjoint methods CUDA_CALLABLE inline void adj_dense_gemm( int m, int n, int p, int t1, int t2, const float* A, const float* B, float* C, int adj_m, int adj_n, int adj_p, int adj_t1, int adj_t2, float* adj_A, float* adj_B, const float* adj_C) { // print_matrix("A", m, p, A); // print_matrix("B", p, n, B); // printf("t1: %d t2: %d\n", t1, t2); if (t1) { dense_gemm<true>(p, m, n, 0, 1, B, adj_C, adj_A); dense_gemm<true>(p, n, m, int(!t1), 0, A, adj_C, adj_B); } else { dense_gemm<true>(m, p, n, 0, int(!t2), adj_C, B, adj_A); dense_gemm<true>(p, n, m, int(!t1), 0, A, adj_C, adj_B); } } CUDA_CALLABLE inline void adj_dense_gemm_batched( const int* __restrict__ m, const int* __restrict__ n, const int* __restrict__ p, int t1, int t2, const int* __restrict__ A_start, const int* __restrict__ B_start, const int* __restrict__ C_start, const float* __restrict__ A, const float* __restrict__ B, float* __restrict__ C, // adj int* __restrict__ adj_m, int* __restrict__ adj_n, int* __restrict__ adj_p, int adj_t1, int adj_t2, int* __restrict__ adj_A_start, int* __restrict__ adj_B_start, int* __restrict__ adj_C_start, float* __restrict__ adj_A, float* __restrict__ adj_B, const float* __restrict__ adj_C) { const int batch = tid()/kNumThreadsPerBlock; adj_dense_gemm(m[batch], n[batch], p[batch], t1, t2, A+A_start[batch], B+B_start[batch], C+C_start[batch], 0, 0, 0, 0, 0, adj_A+A_start[batch], adj_B+B_start[batch], adj_C+C_start[batch]); } CUDA_CALLABLE inline void adj_dense_chol( int n, const float* A, const float* __restrict__ regularization, float* L, int adj_n, const float* adj_A, const float* __restrict__ adj_regularization, float* adj_L) { // nop, use dense_solve to differentiate through (A^-1)b = x } CUDA_CALLABLE inline void adj_dense_chol_batched( const int* __restrict__ A_start, const int* __restrict__ A_dim, const float* __restrict__ A, const float* __restrict__ regularization, float* __restrict__ L, const int* __restrict__ adj_A_start, const int* __restrict__ adj_A_dim, const float* __restrict__ adj_A, const float* __restrict__ adj_regularization, float* __restrict__ adj_L) { // nop, use dense_solve to differentiate through (A^-1)b = x } CUDA_CALLABLE inline void adj_dense_subs( int n, const float* L, const float* b, float* x, int adj_n, const float* adj_L, const float* adj_b, float* adj_x) { // nop, use dense_solve to differentiate through (A^-1)b = x } CUDA_CALLABLE inline void adj_dense_solve( int n, const float* __restrict__ A, const float* __restrict__ L, const float* __restrict__ b, float* __restrict__ tmp, const float* __restrict__ x, int adj_n, float* __restrict__ adj_A, float* __restrict__ adj_L, float* __restrict__ adj_b, float* __restrict__ adj_tmp, const float* __restrict__ adj_x) { for (int i=0; i < n; ++i) { tmp[i] = 0.0f; } dense_subs(n, L, adj_x, tmp); for (int i=0; i < n; ++i) { adj_b[i] += tmp[i]; } //dense_subs(n, L, adj_x, adj_b); // A* = -adj_b*x^T for (int i=0; i < n; ++i) { for (int j=0; j < n; ++j) { adj_A[dense_index(n, i, j)] += -tmp[i]*x[j]; } } } CUDA_CALLABLE inline void adj_dense_solve_batched( const int* __restrict__ b_start, const int* A_start, const int* A_dim, const float* __restrict__ A, const float* __restrict__ L, const float* __restrict__ b, float* __restrict__ tmp, float* __restrict__ x, // adj int* __restrict__ adj_b_start, int* __restrict__ adj_A_start, int* __restrict__ adj_A_dim, float* __restrict__ adj_A, float* __restrict__ adj_L, float* __restrict__ adj_b, float* __restrict__ adj_tmp, const float* __restrict__ adj_x) { const int batch = tid(); adj_dense_solve(A_dim[batch], A + A_start[batch], L + A_start[batch], b + b_start[batch], tmp + b_start[batch], x + b_start[batch], 0, adj_A + A_start[batch], adj_L + A_start[batch], adj_b + b_start[batch], tmp + b_start[batch], adj_x + b_start[batch]); }
10,723
C
29.379603
202
0.531847
RoboticExplorationLab/CGAC/dflex/dflex/__init__.py
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. from dflex.sim import * from dflex.render import * from dflex.adjoint import compile from dflex.util import * # compiles kernels kernel_init()
569
Python
34.624998
76
0.804921
RoboticExplorationLab/CGAC/dflex/dflex/render.py
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. """This optional module contains a built-in renderer for the USD data format that can be used to visualize time-sampled simulation data. Users should create a simulation model and integrator and periodically call :func:`UsdRenderer.update()` to write time-sampled simulation data to the USD stage. Example: >>> # construct a new USD stage >>> stage = Usd.Stage.CreateNew("my_stage.usda") >>> renderer = df.render.UsdRenderer(model, stage) >>> >>> time = 0.0 >>> >>> for i in range(100): >>> >>> # update simulation here >>> # .... >>> >>> # update renderer >>> stage.update(state, time) >>> time += dt >>> >>> # write stage to file >>> stage.Save() Note: You must have the Pixar USD bindings installed to use this module please see https://developer.nvidia.com/usd to obtain precompiled USD binaries and installation instructions. """ try: from pxr import Usd, UsdGeom, Gf, Sdf except ModuleNotFoundError: print("No pxr package") import dflex.sim import dflex.util import math def _usd_add_xform(prim): prim.ClearXformOpOrder() t = prim.AddTranslateOp() r = prim.AddOrientOp() s = prim.AddScaleOp() def _usd_set_xform(xform, transform, scale, time): xform_ops = xform.GetOrderedXformOps() pos = tuple(transform[0]) rot = tuple(transform[1]) xform_ops[0].Set(Gf.Vec3d(pos), time) xform_ops[1].Set(Gf.Quatf(rot[3], rot[0], rot[1], rot[2]), time) xform_ops[2].Set(Gf.Vec3d(scale), time) # transforms a cylinder such that it connects the two points pos0, pos1 def _compute_segment_xform(pos0, pos1): mid = (pos0 + pos1) * 0.5 height = (pos1 - pos0).GetLength() dir = (pos1 - pos0) / height rot = Gf.Rotation() rot.SetRotateInto((0.0, 0.0, 1.0), Gf.Vec3d(dir)) scale = Gf.Vec3f(1.0, 1.0, height) return (mid, Gf.Quath(rot.GetQuat()), scale) class UsdRenderer: """A USD renderer """ def __init__(self, model: dflex.model.Model, stage): """Construct a UsdRenderer object Args: model: A simulation model stage (Usd.Stage): A USD stage (either in memory or on disk) """ self.stage = stage self.model = model self.draw_points = True self.draw_springs = False self.draw_triangles = False if (stage.GetPrimAtPath("/root")): stage.RemovePrim("/root") self.root = UsdGeom.Xform.Define(stage, '/root') # add sphere instancer for particles self.particle_instancer = UsdGeom.PointInstancer.Define(stage, self.root.GetPath().AppendChild("particle_instancer")) self.particle_instancer_sphere = UsdGeom.Sphere.Define(stage, self.particle_instancer.GetPath().AppendChild("sphere")) self.particle_instancer_sphere.GetRadiusAttr().Set(model.particle_radius) self.particle_instancer.CreatePrototypesRel().SetTargets([self.particle_instancer_sphere.GetPath()]) self.particle_instancer.CreateProtoIndicesAttr().Set([0] * model.particle_count) # add line instancer if (self.model.spring_count > 0): self.spring_instancer = UsdGeom.PointInstancer.Define(stage, self.root.GetPath().AppendChild("spring_instancer")) self.spring_instancer_cylinder = UsdGeom.Capsule.Define(stage, self.spring_instancer.GetPath().AppendChild("cylinder")) self.spring_instancer_cylinder.GetRadiusAttr().Set(0.01) self.spring_instancer.CreatePrototypesRel().SetTargets([self.spring_instancer_cylinder.GetPath()]) self.spring_instancer.CreateProtoIndicesAttr().Set([0] * model.spring_count) self.stage.SetDefaultPrim(self.root.GetPrim()) # time codes try: self.stage.SetStartTimeCode(0.0) self.stage.SetEndTimeCode(0.0) self.stage.SetTimeCodesPerSecond(1.0) except: pass # add dynamic cloth mesh if (model.tri_count > 0): self.cloth_mesh = UsdGeom.Mesh.Define(stage, self.root.GetPath().AppendChild("cloth")) self.cloth_remap = {} self.cloth_verts = [] self.cloth_indices = [] # USD needs a contiguous vertex buffer, use a dict to map from simulation indices->render indices indices = self.model.tri_indices.flatten().tolist() for i in indices: if i not in self.cloth_remap: # copy vertex new_index = len(self.cloth_verts) self.cloth_verts.append(self.model.particle_q[i].tolist()) self.cloth_indices.append(new_index) self.cloth_remap[i] = new_index else: self.cloth_indices.append(self.cloth_remap[i]) self.cloth_mesh.GetPointsAttr().Set(self.cloth_verts) self.cloth_mesh.GetFaceVertexIndicesAttr().Set(self.cloth_indices) self.cloth_mesh.GetFaceVertexCountsAttr().Set([3] * model.tri_count) else: self.cloth_mesh = None # built-in ground plane if (model.ground): size = 10.0 mesh = UsdGeom.Mesh.Define(stage, self.root.GetPath().AppendChild("plane_0")) mesh.CreateDoubleSidedAttr().Set(True) points = ((-size, 0.0, -size), (size, 0.0, -size), (size, 0.0, size), (-size, 0.0, size)) normals = ((0.0, 1.0, 0.0), (0.0, 1.0, 0.0), (0.0, 1.0, 0.0), (0.0, 1.0, 0.0)) counts = (4, ) indices = [0, 1, 2, 3] mesh.GetPointsAttr().Set(points) mesh.GetNormalsAttr().Set(normals) mesh.GetFaceVertexCountsAttr().Set(counts) mesh.GetFaceVertexIndicesAttr().Set(indices) # add rigid bodies xform root for b in range(model.link_count): xform = UsdGeom.Xform.Define(stage, self.root.GetPath().AppendChild("body_" + str(b))) _usd_add_xform(xform) # add rigid body shapes for s in range(model.shape_count): parent_path = self.root.GetPath() if model.shape_body[s] >= 0: parent_path = parent_path.AppendChild("body_" + str(model.shape_body[s].item())) geo_type = model.shape_geo_type[s].item() geo_scale = model.shape_geo_scale[s].tolist() geo_src = model.shape_geo_src[s] # shape transform in body frame X_bs = dflex.util.transform_expand(model.shape_transform[s].tolist()) if (geo_type == dflex.sim.GEO_PLANE): # plane mesh size = 1000.0 mesh = UsdGeom.Mesh.Define(stage, parent_path.AppendChild("plane_" + str(s))) mesh.CreateDoubleSidedAttr().Set(True) points = ((-size, 0.0, -size), (size, 0.0, -size), (size, 0.0, size), (-size, 0.0, size)) normals = ((0.0, 1.0, 0.0), (0.0, 1.0, 0.0), (0.0, 1.0, 0.0), (0.0, 1.0, 0.0)) counts = (4, ) indices = [0, 1, 2, 3] mesh.GetPointsAttr().Set(points) mesh.GetNormalsAttr().Set(normals) mesh.GetFaceVertexCountsAttr().Set(counts) mesh.GetFaceVertexIndicesAttr().Set(indices) elif (geo_type == dflex.sim.GEO_SPHERE): mesh = UsdGeom.Sphere.Define(stage, parent_path.AppendChild("sphere_" + str(s))) mesh.GetRadiusAttr().Set(geo_scale[0]) _usd_add_xform(mesh) _usd_set_xform(mesh, X_bs, (1.0, 1.0, 1.0), 0.0) elif (geo_type == dflex.sim.GEO_CAPSULE): mesh = UsdGeom.Capsule.Define(stage, parent_path.AppendChild("capsule_" + str(s))) mesh.GetRadiusAttr().Set(geo_scale[0]) mesh.GetHeightAttr().Set(geo_scale[1] * 2.0) # geometry transform w.r.t shape, convert USD geometry to physics engine convention X_sg = dflex.util.transform((0.0, 0.0, 0.0), dflex.util.quat_from_axis_angle((0.0, 1.0, 0.0), math.pi * 0.5)) X_bg = dflex.util.transform_multiply(X_bs, X_sg) _usd_add_xform(mesh) _usd_set_xform(mesh, X_bg, (1.0, 1.0, 1.0), 0.0) elif (geo_type == dflex.sim.GEO_BOX): mesh = UsdGeom.Cube.Define(stage, parent_path.AppendChild("box_" + str(s))) #mesh.GetSizeAttr().Set((geo_scale[0], geo_scale[1], geo_scale[2])) _usd_add_xform(mesh) _usd_set_xform(mesh, X_bs, (geo_scale[0], geo_scale[1], geo_scale[2]), 0.0) elif (geo_type == dflex.sim.GEO_MESH): mesh = UsdGeom.Mesh.Define(stage, parent_path.AppendChild("mesh_" + str(s))) mesh.GetPointsAttr().Set(geo_src.vertices) mesh.GetFaceVertexIndicesAttr().Set(geo_src.indices) mesh.GetFaceVertexCountsAttr().Set([3] * int(len(geo_src.indices) / 3)) _usd_add_xform(mesh) _usd_set_xform(mesh, X_bs, (geo_scale[0], geo_scale[1], geo_scale[2]), 0.0) elif (geo_type == dflex.sim.GEO_SDF): pass def update(self, state: dflex.model.State, time: float): """Update the USD stage with latest simulation data Args: state: Current state of the simulation time: The current time to update at in seconds """ try: self.stage.SetEndTimeCode(time) except: pass # convert to list if self.model.particle_count: particle_q = state.particle_q.tolist() particle_orientations = [Gf.Quath(1.0, 0.0, 0.0, 0.0)] * self.model.particle_count self.particle_instancer.GetPositionsAttr().Set(particle_q, time) self.particle_instancer.GetOrientationsAttr().Set(particle_orientations, time) # update cloth if (self.cloth_mesh): for k, v in self.cloth_remap.items(): self.cloth_verts[v] = particle_q[k] self.cloth_mesh.GetPointsAttr().Set(self.cloth_verts, time) # update springs if (self.model.spring_count > 0): line_positions = [] line_rotations = [] line_scales = [] for i in range(self.model.spring_count): index0 = self.model.spring_indices[i * 2 + 0] index1 = self.model.spring_indices[i * 2 + 1] pos0 = particle_q[index0] pos1 = particle_q[index1] (pos, rot, scale) = _compute_segment_xform(Gf.Vec3f(pos0), Gf.Vec3f(pos1)) line_positions.append(pos) line_rotations.append(rot) line_scales.append(scale) self.spring_instancer.GetPositionsAttr().Set(line_positions, time) self.spring_instancer.GetOrientationsAttr().Set(line_rotations, time) self.spring_instancer.GetScalesAttr().Set(line_scales, time) # rigids for b in range(self.model.link_count): #xform = UsdGeom.Xform.Define(self.stage, self.root.GetPath().AppendChild("body_" + str(b))) node = UsdGeom.Xform(self.stage.GetPrimAtPath(self.root.GetPath().AppendChild("body_" + str(b)))) # unpack rigid spatial_transform X_sb = dflex.util.transform_expand(state.body_X_sc[b].tolist()) _usd_set_xform(node, X_sb, (1.0, 1.0, 1.0), time) def add_sphere(self, pos: tuple, radius: float, name: str, time: float=0.0): """Debug helper to add a sphere for visualization Args: pos: The position of the sphere radius: The radius of the sphere name: A name for the USD prim on the stage """ sphere_path = self.root.GetPath().AppendChild(name) sphere = UsdGeom.Sphere.Get(self.stage, sphere_path) if not sphere: sphere = UsdGeom.Sphere.Define(self.stage, sphere_path) sphere.GetRadiusAttr().Set(radius, time) mat = Gf.Matrix4d() mat.SetIdentity() mat.SetTranslateOnly(Gf.Vec3d(pos)) op = sphere.MakeMatrixXform() op.Set(mat, time) def add_box(self, pos: tuple, extents: float, name: str, time: float=0.0): """Debug helper to add a box for visualization Args: pos: The position of the sphere extents: The radius of the sphere name: A name for the USD prim on the stage """ sphere_path = self.root.GetPath().AppendChild(name) sphere = UsdGeom.Cube.Get(self.stage, sphere_path) if not sphere: sphere = UsdGeom.Cube.Define(self.stage, sphere_path) #sphere.GetSizeAttr().Set((extents[0]*2.0, extents[1]*2.0, extents[2]*2.0), time) mat = Gf.Matrix4d() mat.SetIdentity() mat.SetScale(extents) mat.SetTranslateOnly(Gf.Vec3d(pos)) op = sphere.MakeMatrixXform() op.Set(mat, time) def add_mesh(self, name: str, path: str, transform, scale, time: float): ref_path = "/root/" + name ref = UsdGeom.Xform.Get(self.stage, ref_path) if not ref: ref = UsdGeom.Xform.Define(self.stage, ref_path) ref.GetPrim().GetReferences().AddReference(path) _usd_add_xform(ref) # update transform _usd_set_xform(ref, transform, scale, time) def add_line_list(self, vertices, color, time, name, radius): """Debug helper to add a line list as a set of capsules Args: vertices: The vertices of the line-strip color: The color of the line time: The time to update at """ num_lines = int(len(vertices)/2) if (num_lines < 1): return # look up rope point instancer instancer_path = self.root.GetPath().AppendChild(name) instancer = UsdGeom.PointInstancer.Get(self.stage, instancer_path) if not instancer: instancer = UsdGeom.PointInstancer.Define(self.stage, instancer_path) instancer_capsule = UsdGeom.Capsule.Define(self.stage, instancer.GetPath().AppendChild("capsule")) instancer_capsule.GetRadiusAttr().Set(radius) instancer.CreatePrototypesRel().SetTargets([instancer_capsule.GetPath()]) instancer.CreatePrimvar("displayColor", Sdf.ValueTypeNames.Float3Array, "constant", 1) line_positions = [] line_rotations = [] line_scales = [] # line_colors = [] for i in range(num_lines): pos0 = vertices[i*2+0] pos1 = vertices[i*2+1] (pos, rot, scale) = _compute_segment_xform(Gf.Vec3f(pos0), Gf.Vec3f(pos1)) line_positions.append(pos) line_rotations.append(rot) line_scales.append(scale) #line_colors.append(Gf.Vec3f((float(i)/num_lines, 0.5, 0.5))) instancer.GetPositionsAttr().Set(line_positions, time) instancer.GetOrientationsAttr().Set(line_rotations, time) instancer.GetScalesAttr().Set(line_scales, time) instancer.GetProtoIndicesAttr().Set([0] * num_lines, time) # instancer.GetPrimvar("displayColor").Set(line_colors, time) def add_line_strip(self, vertices: dflex.sim.List[dflex.sim.Vec3], color: tuple, time: float, name: str, radius: float=0.01): """Debug helper to add a line strip as a connected list of capsules Args: vertices: The vertices of the line-strip color: The color of the line time: The time to update at """ num_lines = int(len(vertices)-1) if (num_lines < 1): return # look up rope point instancer instancer_path = self.root.GetPath().AppendChild(name) instancer = UsdGeom.PointInstancer.Get(self.stage, instancer_path) if not instancer: instancer = UsdGeom.PointInstancer.Define(self.stage, instancer_path) instancer_capsule = UsdGeom.Capsule.Define(self.stage, instancer.GetPath().AppendChild("capsule")) instancer_capsule.GetRadiusAttr().Set(radius) instancer.CreatePrototypesRel().SetTargets([instancer_capsule.GetPath()]) line_positions = [] line_rotations = [] line_scales = [] for i in range(num_lines): pos0 = vertices[i] pos1 = vertices[i+1] (pos, rot, scale) = _compute_segment_xform(Gf.Vec3f(pos0), Gf.Vec3f(pos1)) line_positions.append(pos) line_rotations.append(rot) line_scales.append(scale) instancer.GetPositionsAttr().Set(line_positions, time) instancer.GetOrientationsAttr().Set(line_rotations, time) instancer.GetScalesAttr().Set(line_scales, time) instancer.GetProtoIndicesAttr().Set([0] * num_lines, time) instancer_capsule = UsdGeom.Capsule.Get(self.stage, instancer.GetPath().AppendChild("capsule")) instancer_capsule.GetDisplayColorAttr().Set([Gf.Vec3f(color)], time)
17,760
Python
34.808468
131
0.586768
RoboticExplorationLab/CGAC/dflex/dflex/model.py
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. """A module for building simulation models and state. """ import math import torch import numpy as np from typing import Tuple from typing import List Vec3 = List[float] Vec4 = List[float] Quat = List[float] Mat33 = List[float] Transform = Tuple[Vec3, Quat] from dflex.util import * # shape geometry types GEO_SPHERE = 0 GEO_BOX = 1 GEO_CAPSULE = 2 GEO_MESH = 3 GEO_SDF = 4 GEO_PLANE = 5 GEO_NONE = 6 # body joint types JOINT_PRISMATIC = 0 JOINT_REVOLUTE = 1 JOINT_BALL = 2 JOINT_FIXED = 3 JOINT_FREE = 4 class Mesh: """Describes a triangle collision mesh for simulation Attributes: vertices (List[Vec3]): Mesh vertices indices (List[int]): Mesh indices I (Mat33): Inertia tensor of the mesh assuming density of 1.0 (around the center of mass) mass (float): The total mass of the body assuming density of 1.0 com (Vec3): The center of mass of the body """ def __init__(self, vertices: List[Vec3], indices: List[int]): """Construct a Mesh object from a triangle mesh The mesh center of mass and inertia tensor will automatically be calculated using a density of 1.0. This computation is only valid if the mesh is closed (two-manifold). Args: vertices: List of vertices in the mesh indices: List of triangle indices, 3 per-element """ self.vertices = vertices self.indices = indices # compute com and inertia (using density=1.0) com = np.mean(vertices, 0) num_tris = int(len(indices) / 3) # compute signed inertia for each tetrahedron # formed with the interior point, using an order-2 # quadrature: https://www.sciencedirect.com/science/article/pii/S0377042712001604#br000040 weight = 0.25 alpha = math.sqrt(5.0) / 5.0 I = np.zeros((3, 3)) mass = 0.0 for i in range(num_tris): p = np.array(vertices[indices[i * 3 + 0]]) q = np.array(vertices[indices[i * 3 + 1]]) r = np.array(vertices[indices[i * 3 + 2]]) mid = (com + p + q + r) / 4.0 pcom = p - com qcom = q - com rcom = r - com Dm = np.matrix((pcom, qcom, rcom)).T volume = np.linalg.det(Dm) / 6.0 # quadrature points lie on the line between the # centroid and each vertex of the tetrahedron quads = (mid + (p - mid) * alpha, mid + (q - mid) * alpha, mid + (r - mid) * alpha, mid + (com - mid) * alpha) for j in range(4): # displacement of quadrature point from COM d = quads[j] - com I += weight * volume * (length_sq(d) * np.eye(3, 3) - np.outer(d, d)) mass += weight * volume self.I = I self.mass = mass self.com = com class State: """The State object holds all *time-varying* data for a model. Time-varying data includes particle positions, velocities, rigid body states, and anything that is output from the integrator as derived data, e.g.: forces. The exact attributes depend on the contents of the model. State objects should generally be created using the :func:`Model.state()` function. Attributes: particle_q (torch.Tensor): Tensor of particle positions particle_qd (torch.Tensor): Tensor of particle velocities joint_q (torch.Tensor): Tensor of joint coordinates joint_qd (torch.Tensor): Tensor of joint velocities joint_act (torch.Tensor): Tensor of joint actuation values """ def __init__(self): self.particle_count = 0 self.link_count = 0 # def flatten(self): # """Returns a list of Tensors stored by the state # This function is intended to be used internal-only but can be used to obtain # a set of all tensors owned by the state. # """ # tensors = [] # # particles # if (self.particle_count): # tensors.append(self.particle_q) # tensors.append(self.particle_qd) # # articulations # if (self.link_count): # tensors.append(self.joint_q) # tensors.append(self.joint_qd) # tensors.append(self.joint_act) # return tensors def flatten(self): """Returns a list of Tensors stored by the state This function is intended to be used internal-only but can be used to obtain a set of all tensors owned by the state. """ tensors = [] # build a list of all tensor attributes for attr, value in self.__dict__.items(): if (torch.is_tensor(value)): tensors.append(value) return tensors class Model: """Holds the definition of the simulation model This class holds the non-time varying description of the system, i.e.: all geometry, constraints, and parameters used to describe the simulation. Attributes: particle_q (torch.Tensor): Particle positions, shape [particle_count, 3], float particle_qd (torch.Tensor): Particle velocities, shape [particle_count, 3], float particle_mass (torch.Tensor): Particle mass, shape [particle_count], float particle_inv_mass (torch.Tensor): Particle inverse mass, shape [particle_count], float shape_transform (torch.Tensor): Rigid shape transforms, shape [shape_count, 7], float shape_body (torch.Tensor): Rigid shape body index, shape [shape_count], int shape_geo_type (torch.Tensor): Rigid shape geometry type, [shape_count], int shape_geo_src (torch.Tensor): Rigid shape geometry source, shape [shape_count], int shape_geo_scale (torch.Tensor): Rigid shape geometry scale, shape [shape_count, 3], float shape_materials (torch.Tensor): Rigid shape contact materials, shape [shape_count, 4], float spring_indices (torch.Tensor): Particle spring indices, shape [spring_count*2], int spring_rest_length (torch.Tensor): Particle spring rest length, shape [spring_count], float spring_stiffness (torch.Tensor): Particle spring stiffness, shape [spring_count], float spring_damping (torch.Tensor): Particle spring damping, shape [spring_count], float spring_control (torch.Tensor): Particle spring activation, shape [spring_count], float tri_indices (torch.Tensor): Triangle element indices, shape [tri_count*3], int tri_poses (torch.Tensor): Triangle element rest pose, shape [tri_count, 2, 2], float tri_activations (torch.Tensor): Triangle element activations, shape [tri_count], float edge_indices (torch.Tensor): Bending edge indices, shape [edge_count*2], int edge_rest_angle (torch.Tensor): Bending edge rest angle, shape [edge_count], float tet_indices (torch.Tensor): Tetrahedral element indices, shape [tet_count*4], int tet_poses (torch.Tensor): Tetrahedral rest poses, shape [tet_count, 3, 3], float tet_activations (torch.Tensor): Tetrahedral volumetric activations, shape [tet_count], float tet_materials (torch.Tensor): Tetrahedral elastic parameters in form :math:`k_{mu}, k_{lambda}, k_{damp}`, shape [tet_count, 3] body_X_cm (torch.Tensor): Rigid body center of mass (in local frame), shape [link_count, 7], float body_I_m (torch.Tensor): Rigid body inertia tensor (relative to COM), shape [link_count, 3, 3], float articulation_start (torch.Tensor): Articulation start offset, shape [num_articulations], int joint_q (torch.Tensor): Joint coordinate, shape [joint_coord_count], float joint_qd (torch.Tensor): Joint velocity, shape [joint_dof_count], float joint_type (torch.Tensor): Joint type, shape [joint_count], int joint_parent (torch.Tensor): Joint parent, shape [joint_count], int joint_X_pj (torch.Tensor): Joint transform in parent frame, shape [joint_count, 7], float joint_X_cm (torch.Tensor): Joint mass frame in child frame, shape [joint_count, 7], float joint_axis (torch.Tensor): Joint axis in child frame, shape [joint_count, 3], float joint_q_start (torch.Tensor): Joint coordinate offset, shape [joint_count], int joint_qd_start (torch.Tensor): Joint velocity offset, shape [joint_count], int joint_armature (torch.Tensor): Armature for each joint, shape [joint_count], float joint_target_ke (torch.Tensor): Joint stiffness, shape [joint_count], float joint_target_kd (torch.Tensor): Joint damping, shape [joint_count], float joint_target (torch.Tensor): Joint target, shape [joint_count], float particle_count (int): Total number of particles in the system joint_coord_count (int): Total number of joint coordinates in the system joint_dof_count (int): Total number of joint dofs in the system link_count (int): Total number of links in the system shape_count (int): Total number of shapes in the system tri_count (int): Total number of triangles in the system tet_count (int): Total number of tetrahedra in the system edge_count (int): Total number of edges in the system spring_count (int): Total number of springs in the system contact_count (int): Total number of contacts in the system Note: It is strongly recommended to use the ModelBuilder to construct a simulation rather than creating your own Model object directly, however it is possible to do so if desired. """ def __init__(self, adapter): self.particle_q = None self.particle_qd = None self.particle_mass = None self.particle_inv_mass = None self.shape_transform = None self.shape_body = None self.shape_geo_type = None self.shape_geo_src = None self.shape_geo_scale = None self.shape_materials = None self.spring_indices = None self.spring_rest_length = None self.spring_stiffness = None self.spring_damping = None self.spring_control = None self.tri_indices = None self.tri_poses = None self.tri_activations = None self.edge_indices = None self.edge_rest_angle = None self.tet_indices = None self.tet_poses = None self.tet_activations = None self.tet_materials = None self.body_X_cm = None self.body_I_m = None self.articulation_start = None self.joint_q = None self.joint_qd = None self.joint_type = None self.joint_parent = None self.joint_X_pj = None self.joint_X_cm = None self.joint_axis = None self.joint_q_start = None self.joint_qd_start = None self.joint_armature = None self.joint_target_ke = None self.joint_target_kd = None self.joint_target = None self.particle_count = 0 self.joint_coord_count = 0 self.joint_dof_count = 0 self.link_count = 0 self.shape_count = 0 self.tri_count = 0 self.tet_count = 0 self.edge_count = 0 self.spring_count = 0 self.contact_count = 0 self.gravity = torch.tensor((0.0, -9.8, 0.0), dtype=torch.float32, device=adapter) self.contact_distance = 0.1 self.contact_ke = 1.e+3 self.contact_kd = 0.0 self.contact_kf = 1.e+3 self.contact_mu = 0.5 self.tri_ke = 100.0 self.tri_ka = 100.0 self.tri_kd = 10.0 self.tri_kb = 100.0 self.tri_drag = 0.0 self.tri_lift = 0.0 self.edge_ke = 100.0 self.edge_kd = 0.0 self.particle_radius = 0.1 self.adapter = adapter def state(self) -> State: """Returns a state object for the model The returned state will be initialized with the initial configuration given in the model description. """ s = State() s.particle_count = self.particle_count s.link_count = self.link_count #-------------------------------- # dynamic state (input, output) # particles if (self.particle_count): s.particle_q = torch.clone(self.particle_q) s.particle_qd = torch.clone(self.particle_qd) # articulations if (self.link_count): s.joint_q = torch.clone(self.joint_q) s.joint_qd = torch.clone(self.joint_qd) s.joint_act = torch.zeros_like(self.joint_qd) s.joint_q.requires_grad = True s.joint_qd.requires_grad = True #-------------------------------- # derived state (output only) if (self.particle_count): s.particle_f = torch.empty_like(self.particle_qd, requires_grad=True) if (self.link_count): # joints s.joint_qdd = torch.empty_like(self.joint_qd, requires_grad=True) s.joint_tau = torch.empty_like(self.joint_qd, requires_grad=True) s.joint_S_s = torch.empty((self.joint_dof_count, 6), dtype=torch.float32, device=self.adapter, requires_grad=True) # derived rigid body data (maximal coordinates) s.body_X_sc = torch.empty((self.link_count, 7), dtype=torch.float32, device=self.adapter, requires_grad=True) s.body_X_sm = torch.empty((self.link_count, 7), dtype=torch.float32, device=self.adapter, requires_grad=True) s.body_I_s = torch.empty((self.link_count, 6, 6), dtype=torch.float32, device=self.adapter, requires_grad=True) s.body_v_s = torch.empty((self.link_count, 6), dtype=torch.float32, device=self.adapter, requires_grad=True) s.body_a_s = torch.empty((self.link_count, 6), dtype=torch.float32, device=self.adapter, requires_grad=True) s.body_f_s = torch.zeros((self.link_count, 6), dtype=torch.float32, device=self.adapter, requires_grad=True) #s.body_ft_s = torch.zeros((self.link_count, 6), dtype=torch.float32, device=self.adapter, requires_grad=True) #s.body_f_ext_s = torch.zeros((self.link_count, 6), dtype=torch.float32, device=self.adapter, requires_grad=True) return s def alloc_mass_matrix(self): if (self.link_count): # system matrices self.M = torch.zeros(self.M_size, dtype=torch.float32, device=self.adapter, requires_grad=True) self.J = torch.zeros(self.J_size, dtype=torch.float32, device=self.adapter, requires_grad=True) self.P = torch.empty(self.J_size, dtype=torch.float32, device=self.adapter, requires_grad=True) self.H = torch.empty(self.H_size, dtype=torch.float32, device=self.adapter, requires_grad=True) # zero since only upper triangle is set which can trigger NaN detection self.L = torch.zeros(self.H_size, dtype=torch.float32, device=self.adapter, requires_grad=True) def flatten(self): """Returns a list of Tensors stored by the model This function is intended to be used internal-only but can be used to obtain a set of all tensors owned by the model. """ tensors = [] # build a list of all tensor attributes for attr, value in self.__dict__.items(): if (torch.is_tensor(value)): tensors.append(value) return tensors # builds contacts def collide(self, state: State): """Constructs a set of contacts between rigid bodies and ground This method performs collision detection between rigid body vertices in the scene and updates the model's set of contacts stored as the following attributes: * **contact_body0**: Tensor of ints with first rigid body index * **contact_body1**: Tensor of ints with second rigid body index (currently always -1 to indicate ground) * **contact_point0**: Tensor of Vec3 representing contact point in local frame of body0 * **contact_dist**: Tensor of float values representing the distance to maintain * **contact_material**: Tensor contact material indices Args: state: The state of the simulation at which to perform collision detection Note: Currently this method uses an 'all pairs' approach to contact generation that is state indepdendent. In the future this will change and will create a node in the computational graph to propagate gradients as a function of state. Todo: Only ground-plane collision is currently implemented. Since the ground is static it is acceptable to call this method once at initialization time. """ body0 = [] body1 = [] point = [] dist = [] mat = [] def add_contact(b0, b1, t, p0, d, m): body0.append(b0) body1.append(b1) point.append(transform_point(t, np.array(p0))) dist.append(d) mat.append(m) for i in range(self.shape_count): # transform from shape to body X_bs = transform_expand(self.shape_transform[i].tolist()) geo_type = self.shape_geo_type[i].item() if (geo_type == GEO_SPHERE): radius = self.shape_geo_scale[i][0].item() add_contact(self.shape_body[i], -1, X_bs, (0.0, 0.0, 0.0), radius, i) elif (geo_type == GEO_CAPSULE): radius = self.shape_geo_scale[i][0].item() half_width = self.shape_geo_scale[i][1].item() add_contact(self.shape_body[i], -1, X_bs, (-half_width, 0.0, 0.0), radius, i) add_contact(self.shape_body[i], -1, X_bs, (half_width, 0.0, 0.0), radius, i) elif (geo_type == GEO_BOX): edges = self.shape_geo_scale[i].tolist() add_contact(self.shape_body[i], -1, X_bs, (-edges[0], -edges[1], -edges[2]), 0.0, i) add_contact(self.shape_body[i], -1, X_bs, ( edges[0], -edges[1], -edges[2]), 0.0, i) add_contact(self.shape_body[i], -1, X_bs, (-edges[0], edges[1], -edges[2]), 0.0, i) add_contact(self.shape_body[i], -1, X_bs, (edges[0], edges[1], -edges[2]), 0.0, i) add_contact(self.shape_body[i], -1, X_bs, (-edges[0], -edges[1], edges[2]), 0.0, i) add_contact(self.shape_body[i], -1, X_bs, (edges[0], -edges[1], edges[2]), 0.0, i) add_contact(self.shape_body[i], -1, X_bs, (-edges[0], edges[1], edges[2]), 0.0, i) add_contact(self.shape_body[i], -1, X_bs, (edges[0], edges[1], edges[2]), 0.0, i) elif (geo_type == GEO_MESH): mesh = self.shape_geo_src[i] scale = self.shape_geo_scale[i] for v in mesh.vertices: p = (v[0] * scale[0], v[1] * scale[1], v[2] * scale[2]) add_contact(self.shape_body[i], -1, X_bs, p, 0.0, i) # send to torch self.contact_body0 = torch.tensor(body0, dtype=torch.int32, device=self.adapter) self.contact_body1 = torch.tensor(body1, dtype=torch.int32, device=self.adapter) self.contact_point0 = torch.tensor(point, dtype=torch.float32, device=self.adapter) self.contact_dist = torch.tensor(dist, dtype=torch.float32, device=self.adapter) self.contact_material = torch.tensor(mat, dtype=torch.int32, device=self.adapter) self.contact_count = len(body0) class ModelBuilder: """A helper class for building simulation models at runtime. Use the ModelBuilder to construct a simulation scene. The ModelBuilder is independent of PyTorch and builds the scene representation using standard Python data structures, this means it is not differentiable. Once :func:`finalize()` has been called the ModelBuilder transfers all data to Torch tensors and returns an object that may be used for simulation. Example: >>> import dflex as df >>> >>> builder = df.ModelBuilder() >>> >>> # anchor point (zero mass) >>> builder.add_particle((0, 1.0, 0.0), (0.0, 0.0, 0.0), 0.0) >>> >>> # build chain >>> for i in range(1,10): >>> builder.add_particle((i, 1.0, 0.0), (0.0, 0.0, 0.0), 1.0) >>> builder.add_spring(i-1, i, 1.e+3, 0.0, 0) >>> >>> # create model >>> model = builder.finalize() Note: It is strongly recommended to use the ModelBuilder to construct a simulation rather than creating your own Model object directly, however it is possible to do so if desired. """ def __init__(self): # particles self.particle_q = [] self.particle_qd = [] self.particle_mass = [] # shapes self.shape_transform = [] self.shape_body = [] self.shape_geo_type = [] self.shape_geo_scale = [] self.shape_geo_src = [] self.shape_materials = [] # geometry self.geo_meshes = [] self.geo_sdfs = [] # springs self.spring_indices = [] self.spring_rest_length = [] self.spring_stiffness = [] self.spring_damping = [] self.spring_control = [] # triangles self.tri_indices = [] self.tri_poses = [] self.tri_activations = [] # edges (bending) self.edge_indices = [] self.edge_rest_angle = [] # tetrahedra self.tet_indices = [] self.tet_poses = [] self.tet_activations = [] self.tet_materials = [] # muscles self.muscle_start = [] self.muscle_params = [] self.muscle_activation = [] self.muscle_links = [] self.muscle_points = [] # rigid bodies self.joint_parent = [] # index of the parent body (constant) self.joint_child = [] # index of the child body (constant) self.joint_axis = [] # joint axis in child joint frame (constant) self.joint_X_pj = [] # frame of joint in parent (constant) self.joint_X_cm = [] # frame of child com (in child coordinates) (constant) self.joint_q_start = [] # joint offset in the q array self.joint_qd_start = [] # joint offset in the qd array self.joint_type = [] self.joint_armature = [] self.joint_target_ke = [] self.joint_target_kd = [] self.joint_target = [] self.joint_limit_lower = [] self.joint_limit_upper = [] self.joint_limit_ke = [] self.joint_limit_kd = [] self.joint_q = [] # generalized coordinates (input) self.joint_qd = [] # generalized velocities (input) self.joint_qdd = [] # generalized accelerations (id,fd) self.joint_tau = [] # generalized actuation (input) self.joint_u = [] # generalized total torque (fd) self.body_mass = [] self.body_inertia = [] self.body_com = [] self.articulation_start = [] def add_articulation(self) -> int: """Add an articulation object, all subsequently added links (see: :func:`add_link`) will belong to this articulation object. Calling this method multiple times 'closes' any previous articulations and begins a new one. Returns: The index of the articulation """ self.articulation_start.append(len(self.joint_type)) return len(self.articulation_start)-1 # rigids, register a rigid body and return its index. def add_link( self, parent : int, X_pj : Transform, axis : Vec3, type : int, armature: float=0.01, stiffness: float=0.0, damping: float=0.0, limit_lower: float=-1.e+3, limit_upper: float=1.e+3, limit_ke: float=100.0, limit_kd: float=10.0, com: Vec3=np.zeros(3), I_m: Mat33=np.zeros((3, 3)), m: float=0.0) -> int: """Adds a rigid body to the model. Args: parent: The index of the parent body X_pj: The location of the joint in the parent's local frame connecting this body axis: The joint axis type: The type of joint, should be one of: JOINT_PRISMATIC, JOINT_REVOLUTE, JOINT_BALL, JOINT_FIXED, or JOINT_FREE armature: Additional inertia around the joint axis stiffness: Spring stiffness that attempts to return joint to zero position damping: Spring damping that attempts to remove joint velocity com: The center of mass of the body w.r.t its origin I_m: The 3x3 inertia tensor of the body (specified relative to the center of mass) m: The mass of the body Returns: The index of the body in the model Note: If the mass (m) is zero then the body is treated as kinematic with no dynamics """ # joint data self.joint_type.append(type) self.joint_axis.append(np.array(axis)) self.joint_parent.append(parent) self.joint_X_pj.append(X_pj) self.joint_target_ke.append(stiffness) self.joint_target_kd.append(damping) self.joint_limit_ke.append(limit_ke) self.joint_limit_kd.append(limit_kd) self.joint_q_start.append(len(self.joint_q)) self.joint_qd_start.append(len(self.joint_qd)) if (type == JOINT_PRISMATIC): self.joint_q.append(0.0) self.joint_qd.append(0.0) self.joint_target.append(0.0) self.joint_armature.append(armature) self.joint_limit_lower.append(limit_lower) self.joint_limit_upper.append(limit_upper) elif (type == JOINT_REVOLUTE): self.joint_q.append(0.0) self.joint_qd.append(0.0) self.joint_target.append(0.0) self.joint_armature.append(armature) self.joint_limit_lower.append(limit_lower) self.joint_limit_upper.append(limit_upper) elif (type == JOINT_BALL): # quaternion self.joint_q.append(0.0) self.joint_q.append(0.0) self.joint_q.append(0.0) self.joint_q.append(1.0) # angular velocity self.joint_qd.append(0.0) self.joint_qd.append(0.0) self.joint_qd.append(0.0) # pd targets self.joint_target.append(0.0) self.joint_target.append(0.0) self.joint_target.append(0.0) self.joint_target.append(0.0) self.joint_armature.append(armature) self.joint_armature.append(armature) self.joint_armature.append(armature) self.joint_limit_lower.append(limit_lower) self.joint_limit_lower.append(limit_lower) self.joint_limit_lower.append(limit_lower) self.joint_limit_lower.append(0.0) self.joint_limit_upper.append(limit_upper) self.joint_limit_upper.append(limit_upper) self.joint_limit_upper.append(limit_upper) self.joint_limit_upper.append(0.0) elif (type == JOINT_FIXED): pass elif (type == JOINT_FREE): # translation self.joint_q.append(0.0) self.joint_q.append(0.0) self.joint_q.append(0.0) # quaternion self.joint_q.append(0.0) self.joint_q.append(0.0) self.joint_q.append(0.0) self.joint_q.append(1.0) # note armature for free joints should always be zero, better to modify the body inertia directly self.joint_armature.append(0.0) self.joint_armature.append(0.0) self.joint_armature.append(0.0) self.joint_armature.append(0.0) self.joint_armature.append(0.0) self.joint_armature.append(0.0) self.joint_target.append(0.0) self.joint_target.append(0.0) self.joint_target.append(0.0) self.joint_target.append(0.0) self.joint_target.append(0.0) self.joint_target.append(0.0) self.joint_target.append(0.0) self.joint_limit_lower.append(0.0) self.joint_limit_lower.append(0.0) self.joint_limit_lower.append(0.0) self.joint_limit_lower.append(0.0) self.joint_limit_lower.append(0.0) self.joint_limit_lower.append(0.0) self.joint_limit_lower.append(0.0) self.joint_limit_upper.append(0.0) self.joint_limit_upper.append(0.0) self.joint_limit_upper.append(0.0) self.joint_limit_upper.append(0.0) self.joint_limit_upper.append(0.0) self.joint_limit_upper.append(0.0) self.joint_limit_upper.append(0.0) # joint velocities for i in range(6): self.joint_qd.append(0.0) self.body_inertia.append(np.zeros((3, 3))) self.body_mass.append(0.0) self.body_com.append(np.zeros(3)) # return index of body return len(self.joint_type) - 1 # muscles def add_muscle(self, links: List[int], positions: List[Vec3], f0: float, lm: float, lt: float, lmax: float, pen: float) -> float: """Adds a muscle-tendon activation unit Args: links: A list of link indices for each waypoint positions: A list of positions of each waypoint in the link's local frame f0: Force scaling lm: Muscle length lt: Tendon length lmax: Maximally efficient muscle length Returns: The index of the muscle in the model """ n = len(links) self.muscle_start.append(len(self.muscle_links)) self.muscle_params.append((f0, lm, lt, lmax, pen)) self.muscle_activation.append(0.0) for i in range(n): self.muscle_links.append(links[i]) self.muscle_points.append(positions[i]) # return the index of the muscle return len(self.muscle_start)-1 # shapes def add_shape_plane(self, plane: Vec4=(0.0, 1.0, 0.0, 0.0), ke: float=1.e+5, kd: float=1000.0, kf: float=1000.0, mu: float=0.5): """Adds a plane collision shape Args: plane: The plane equation in form a*x + b*y + c*z + d = 0 ke: The contact elastic stiffness kd: The contact damping stiffness kf: The contact friction stiffness mu: The coefficient of friction """ self._add_shape(-1, (0.0, 0.0, 0.0), (0.0, 0.0, 0.0), GEO_PLANE, plane, None, 0.0, ke, kd, kf, mu) def add_shape_sphere(self, body, pos: Vec3=(0.0, 0.0, 0.0), rot: Quat=(0.0, 0.0, 0.0, 1.0), radius: float=1.0, density: float=1000.0, ke: float=1.e+5, kd: float=1000.0, kf: float=1000.0, mu: float=0.5): """Adds a sphere collision shape to a link. Args: body: The index of the parent link this shape belongs to pos: The location of the shape with respect to the parent frame rot: The rotation of the shape with respect to the parent frame radius: The radius of the sphere density: The density of the shape ke: The contact elastic stiffness kd: The contact damping stiffness kf: The contact friction stiffness mu: The coefficient of friction """ self._add_shape(body, pos, rot, GEO_SPHERE, (radius, 0.0, 0.0, 0.0), None, density, ke, kd, kf, mu) def add_shape_box(self, body : int, pos: Vec3=(0.0, 0.0, 0.0), rot: Quat=(0.0, 0.0, 0.0, 1.0), hx: float=0.5, hy: float=0.5, hz: float=0.5, density: float=1000.0, ke: float=1.e+5, kd: float=1000.0, kf: float=1000.0, mu: float=0.5): """Adds a box collision shape to a link. Args: body: The index of the parent link this shape belongs to pos: The location of the shape with respect to the parent frame rot: The rotation of the shape with respect to the parent frame hx: The half-extents along the x-axis hy: The half-extents along the y-axis hz: The half-extents along the z-axis density: The density of the shape ke: The contact elastic stiffness kd: The contact damping stiffness kf: The contact friction stiffness mu: The coefficient of friction """ self._add_shape(body, pos, rot, GEO_BOX, (hx, hy, hz, 0.0), None, density, ke, kd, kf, mu) def add_shape_capsule(self, body: int, pos: Vec3=(0.0, 0.0, 0.0), rot: Quat=(0.0, 0.0, 0.0, 1.0), radius: float=1.0, half_width: float=0.5, density: float=1000.0, ke: float=1.e+5, kd: float=1000.0, kf: float=1000.0, mu: float=0.5): """Adds a capsule collision shape to a link. Args: body: The index of the parent link this shape belongs to pos: The location of the shape with respect to the parent frame rot: The rotation of the shape with respect to the parent frame radius: The radius of the capsule half_width: The half length of the center cylinder along the x-axis density: The density of the shape ke: The contact elastic stiffness kd: The contact damping stiffness kf: The contact friction stiffness mu: The coefficient of friction """ self._add_shape(body, pos, rot, GEO_CAPSULE, (radius, half_width, 0.0, 0.0), None, density, ke, kd, kf, mu) def add_shape_mesh(self, body: int, pos: Vec3=(0.0, 0.0, 0.0), rot: Quat=(0.0, 0.0, 0.0, 1.0), mesh: Mesh=None, scale: Vec3=(1.0, 1.0, 1.0), density: float=1000.0, ke: float=1.e+5, kd: float=1000.0, kf: float=1000.0, mu: float=0.5): """Adds a triangle mesh collision shape to a link. Args: body: The index of the parent link this shape belongs to pos: The location of the shape with respect to the parent frame rot: The rotation of the shape with respect to the parent frame mesh: The mesh object scale: Scale to use for the collider density: The density of the shape ke: The contact elastic stiffness kd: The contact damping stiffness kf: The contact friction stiffness mu: The coefficient of friction """ self._add_shape(body, pos, rot, GEO_MESH, (scale[0], scale[1], scale[2], 0.0), mesh, density, ke, kd, kf, mu) def _add_shape(self, body , pos, rot, type, scale, src, density, ke, kd, kf, mu): self.shape_body.append(body) self.shape_transform.append(transform(pos, rot)) self.shape_geo_type.append(type) self.shape_geo_scale.append((scale[0], scale[1], scale[2])) self.shape_geo_src.append(src) self.shape_materials.append((ke, kd, kf, mu)) (m, I) = self._compute_shape_mass(type, scale, src, density) self._update_body_mass(body, m, I, np.array(pos), np.array(rot)) # particles def add_particle(self, pos : Vec3, vel : Vec3, mass : float) -> int: """Adds a single particle to the model Args: pos: The initial position of the particle vel: The initial velocity of the particle mass: The mass of the particle Note: Set the mass equal to zero to create a 'kinematic' particle that does is not subject to dynamics. Returns: The index of the particle in the system """ self.particle_q.append(pos) self.particle_qd.append(vel) self.particle_mass.append(mass) return len(self.particle_q) - 1 def add_spring(self, i : int, j, ke : float, kd : float, control: float): """Adds a spring between two particles in the system Args: i: The index of the first particle j: The index of the second particle ke: The elastic stiffness of the spring kd: The damping stiffness of the spring control: The actuation level of the spring Note: The spring is created with a rest-length based on the distance between the particles in their initial configuration. """ self.spring_indices.append(i) self.spring_indices.append(j) self.spring_stiffness.append(ke) self.spring_damping.append(kd) self.spring_control.append(control) # compute rest length p = self.particle_q[i] q = self.particle_q[j] delta = np.subtract(p, q) l = np.sqrt(np.dot(delta, delta)) self.spring_rest_length.append(l) def add_triangle(self, i : int, j : int, k : int) -> float: """Adds a trianglular FEM element between three particles in the system. Triangles are modeled as viscoelastic elements with elastic stiffness and damping Parameters specfied on the model. See model.tri_ke, model.tri_kd. Args: i: The index of the first particle j: The index of the second particle k: The index of the third particle Return: The area of the triangle Note: The triangle is created with a rest-length based on the distance between the particles in their initial configuration. Todo: * Expose elastic paramters on a per-element basis """ # compute basis for 2D rest pose p = np.array(self.particle_q[i]) q = np.array(self.particle_q[j]) r = np.array(self.particle_q[k]) qp = q - p rp = r - p # construct basis aligned with the triangle n = normalize(np.cross(qp, rp)) e1 = normalize(qp) e2 = normalize(np.cross(n, e1)) R = np.matrix((e1, e2)) M = np.matrix((qp, rp)) D = R * M.T inv_D = np.linalg.inv(D) area = np.linalg.det(D) / 2.0 if (area < 0.0): print("inverted triangle element") self.tri_indices.append((i, j, k)) self.tri_poses.append(inv_D.tolist()) self.tri_activations.append(0.0) return area def add_tetrahedron(self, i: int, j: int, k: int, l: int, k_mu: float=1.e+3, k_lambda: float=1.e+3, k_damp: float=0.0) -> float: """Adds a tetrahedral FEM element between four particles in the system. Tetrahdera are modeled as viscoelastic elements with a NeoHookean energy density based on [Smith et al. 2018]. Args: i: The index of the first particle j: The index of the second particle k: The index of the third particle l: The index of the fourth particle k_mu: The first elastic Lame parameter k_lambda: The second elastic Lame parameter k_damp: The element's damping stiffness Return: The volume of the tetrahedron Note: The tetrahedron is created with a rest-pose based on the particle's initial configruation """ # compute basis for 2D rest pose p = np.array(self.particle_q[i]) q = np.array(self.particle_q[j]) r = np.array(self.particle_q[k]) s = np.array(self.particle_q[l]) qp = q - p rp = r - p sp = s - p Dm = np.matrix((qp, rp, sp)).T volume = np.linalg.det(Dm) / 6.0 if (volume <= 0.0): print("inverted tetrahedral element") else: inv_Dm = np.linalg.inv(Dm) self.tet_indices.append((i, j, k, l)) self.tet_poses.append(inv_Dm.tolist()) self.tet_activations.append(0.0) self.tet_materials.append((k_mu, k_lambda, k_damp)) return volume def add_edge(self, i: int, j: int, k: int, l: int, rest: float=None): """Adds a bending edge element between four particles in the system. Bending elements are designed to be between two connected triangles. Then bending energy is based of [Bridson et al. 2002]. Bending stiffness is controlled by the `model.tri_kb` parameter. Args: i: The index of the first particle j: The index of the second particle k: The index of the third particle l: The index of the fourth particle rest: The rest angle across the edge in radians, if not specified it will be computed Note: The edge lies between the particles indexed by 'k' and 'l' parameters with the opposing vertices indexed by 'i' and 'j'. This defines two connected triangles with counter clockwise winding: (i, k, l), (j, l, k). """ # compute rest angle if (rest == None): x1 = np.array(self.particle_q[i]) x2 = np.array(self.particle_q[j]) x3 = np.array(self.particle_q[k]) x4 = np.array(self.particle_q[l]) n1 = normalize(np.cross(x3 - x1, x4 - x1)) n2 = normalize(np.cross(x4 - x2, x3 - x2)) e = normalize(x4 - x3) d = np.clip(np.dot(n2, n1), -1.0, 1.0) angle = math.acos(d) sign = np.sign(np.dot(np.cross(n2, n1), e)) rest = angle * sign self.edge_indices.append((i, j, k, l)) self.edge_rest_angle.append(rest) def add_cloth_grid(self, pos: Vec3, rot: Quat, vel: Vec3, dim_x: int, dim_y: int, cell_x: float, cell_y: float, mass: float, reverse_winding: bool=False, fix_left: bool=False, fix_right: bool=False, fix_top: bool=False, fix_bottom: bool=False): """Helper to create a regular planar cloth grid Creates a rectangular grid of particles with FEM triangles and bending elements automatically. Args: pos: The position of the cloth in world space rot: The orientation of the cloth in world space vel: The velocity of the cloth in world space dim_x_: The number of rectangular cells along the x-axis dim_y: The number of rectangular cells along the y-axis cell_x: The width of each cell in the x-direction cell_y: The width of each cell in the y-direction mass: The mass of each particle reverse_winding: Flip the winding of the mesh fix_left: Make the left-most edge of particles kinematic (fixed in place) fix_right: Make the right-most edge of particles kinematic fix_top: Make the top-most edge of particles kinematic fix_bottom: Make the bottom-most edge of particles kinematic """ def grid_index(x, y, dim_x): return y * dim_x + x start_vertex = len(self.particle_q) start_tri = len(self.tri_indices) for y in range(0, dim_y + 1): for x in range(0, dim_x + 1): g = np.array((x * cell_x, y * cell_y, 0.0)) p = quat_rotate(rot, g) + pos m = mass if (x == 0 and fix_left): m = 0.0 elif (x == dim_x and fix_right): m = 0.0 elif (y == 0 and fix_bottom): m = 0.0 elif (y == dim_y and fix_top): m = 0.0 self.add_particle(p, vel, m) if (x > 0 and y > 0): if (reverse_winding): tri1 = (start_vertex + grid_index(x - 1, y - 1, dim_x + 1), start_vertex + grid_index(x, y - 1, dim_x + 1), start_vertex + grid_index(x, y, dim_x + 1)) tri2 = (start_vertex + grid_index(x - 1, y - 1, dim_x + 1), start_vertex + grid_index(x, y, dim_x + 1), start_vertex + grid_index(x - 1, y, dim_x + 1)) self.add_triangle(*tri1) self.add_triangle(*tri2) else: tri1 = (start_vertex + grid_index(x - 1, y - 1, dim_x + 1), start_vertex + grid_index(x, y - 1, dim_x + 1), start_vertex + grid_index(x - 1, y, dim_x + 1)) tri2 = (start_vertex + grid_index(x, y - 1, dim_x + 1), start_vertex + grid_index(x, y, dim_x + 1), start_vertex + grid_index(x - 1, y, dim_x + 1)) self.add_triangle(*tri1) self.add_triangle(*tri2) end_vertex = len(self.particle_q) end_tri = len(self.tri_indices) # bending constraints, could create these explicitly for a grid but this # is a good test of the adjacency structure adj = MeshAdjacency(self.tri_indices[start_tri:end_tri], end_tri - start_tri) for k, e in adj.edges.items(): # skip open edges if (e.f0 == -1 or e.f1 == -1): continue self.add_edge(e.o0, e.o1, e.v0, e.v1) # opposite 0, opposite 1, vertex 0, vertex 1 def add_cloth_mesh(self, pos: Vec3, rot: Quat, scale: float, vel: Vec3, vertices: List[Vec3], indices: List[int], density: float, edge_callback=None, face_callback=None): """Helper to create a cloth model from a regular triangle mesh Creates one FEM triangle element and one bending element for every face and edge in the input triangle mesh Args: pos: The position of the cloth in world space rot: The orientation of the cloth in world space vel: The velocity of the cloth in world space vertices: A list of vertex positions indices: A list of triangle indices, 3 entries per-face density: The density per-area of the mesh edge_callback: A user callback when an edge is created face_callback: A user callback when a face is created Note: The mesh should be two manifold. """ num_tris = int(len(indices) / 3) start_vertex = len(self.particle_q) start_tri = len(self.tri_indices) # particles for i, v in enumerate(vertices): p = quat_rotate(rot, v * scale) + pos self.add_particle(p, vel, 0.0) # triangles for t in range(num_tris): i = start_vertex + indices[t * 3 + 0] j = start_vertex + indices[t * 3 + 1] k = start_vertex + indices[t * 3 + 2] if (face_callback): face_callback(i, j, k) area = self.add_triangle(i, j, k) # add area fraction to particles if (area > 0.0): self.particle_mass[i] += density * area / 3.0 self.particle_mass[j] += density * area / 3.0 self.particle_mass[k] += density * area / 3.0 end_vertex = len(self.particle_q) end_tri = len(self.tri_indices) adj = MeshAdjacency(self.tri_indices[start_tri:end_tri], end_tri - start_tri) # bend constraints for k, e in adj.edges.items(): # skip open edges if (e.f0 == -1 or e.f1 == -1): continue if (edge_callback): edge_callback(e.f0, e.f1) self.add_edge(e.o0, e.o1, e.v0, e.v1) def add_soft_grid(self, pos: Vec3, rot: Quat, vel: Vec3, dim_x: int, dim_y: int, dim_z: int, cell_x: float, cell_y: float, cell_z: float, density: float, k_mu: float, k_lambda: float, k_damp: float, fix_left: bool=False, fix_right: bool=False, fix_top: bool=False, fix_bottom: bool=False): """Helper to create a rectangular tetrahedral FEM grid Creates a regular grid of FEM tetrhedra and surface triangles. Useful for example to create beams and sheets. Each hexahedral cell is decomposed into 5 tetrahedral elements. Args: pos: The position of the solid in world space rot: The orientation of the solid in world space vel: The velocity of the solid in world space dim_x_: The number of rectangular cells along the x-axis dim_y: The number of rectangular cells along the y-axis dim_z: The number of rectangular cells along the z-axis cell_x: The width of each cell in the x-direction cell_y: The width of each cell in the y-direction cell_z: The width of each cell in the z-direction density: The density of each particle k_mu: The first elastic Lame parameter k_lambda: The second elastic Lame parameter k_damp: The damping stiffness fix_left: Make the left-most edge of particles kinematic (fixed in place) fix_right: Make the right-most edge of particles kinematic fix_top: Make the top-most edge of particles kinematic fix_bottom: Make the bottom-most edge of particles kinematic """ start_vertex = len(self.particle_q) mass = cell_x * cell_y * cell_z * density for z in range(dim_z + 1): for y in range(dim_y + 1): for x in range(dim_x + 1): v = np.array((x * cell_x, y * cell_y, z * cell_z)) m = mass if (fix_left and x == 0): m = 0.0 if (fix_right and x == dim_x): m = 0.0 if (fix_top and y == dim_y): m = 0.0 if (fix_bottom and y == 0): m = 0.0 p = quat_rotate(rot, v) + pos self.add_particle(p, vel, m) # dict of open faces faces = {} def add_face(i: int, j: int, k: int): key = tuple(sorted((i, j, k))) if key not in faces: faces[key] = (i, j, k) else: del faces[key] def add_tet(i: int, j: int, k: int, l: int): self.add_tetrahedron(i, j, k, l, k_mu, k_lambda, k_damp) add_face(i, k, j) add_face(j, k, l) add_face(i, j, l) add_face(i, l, k) def grid_index(x, y, z): return (dim_x + 1) * (dim_y + 1) * z + (dim_x + 1) * y + x for z in range(dim_z): for y in range(dim_y): for x in range(dim_x): v0 = grid_index(x, y, z) + start_vertex v1 = grid_index(x + 1, y, z) + start_vertex v2 = grid_index(x + 1, y, z + 1) + start_vertex v3 = grid_index(x, y, z + 1) + start_vertex v4 = grid_index(x, y + 1, z) + start_vertex v5 = grid_index(x + 1, y + 1, z) + start_vertex v6 = grid_index(x + 1, y + 1, z + 1) + start_vertex v7 = grid_index(x, y + 1, z + 1) + start_vertex if (((x & 1) ^ (y & 1) ^ (z & 1))): add_tet(v0, v1, v4, v3) add_tet(v2, v3, v6, v1) add_tet(v5, v4, v1, v6) add_tet(v7, v6, v3, v4) add_tet(v4, v1, v6, v3) else: add_tet(v1, v2, v5, v0) add_tet(v3, v0, v7, v2) add_tet(v4, v7, v0, v5) add_tet(v6, v5, v2, v7) add_tet(v5, v2, v7, v0) # add triangles for k, v in faces.items(): self.add_triangle(v[0], v[1], v[2]) def add_soft_mesh(self, pos: Vec3, rot: Quat, scale: float, vel: Vec3, vertices: List[Vec3], indices: List[int], density: float, k_mu: float, k_lambda: float, k_damp: float): """Helper to create a tetrahedral model from an input tetrahedral mesh Args: pos: The position of the solid in world space rot: The orientation of the solid in world space vel: The velocity of the solid in world space vertices: A list of vertex positions indices: A list of tetrahedron indices, 4 entries per-element density: The density per-area of the mesh k_mu: The first elastic Lame parameter k_lambda: The second elastic Lame parameter k_damp: The damping stiffness """ num_tets = int(len(indices) / 4) start_vertex = len(self.particle_q) start_tri = len(self.tri_indices) # dict of open faces faces = {} def add_face(i, j, k): key = tuple(sorted((i, j, k))) if key not in faces: faces[key] = (i, j, k) else: del faces[key] # add particles for v in vertices: p = quat_rotate(rot, v * scale) + pos self.add_particle(p, vel, 0.0) # add tetrahedra for t in range(num_tets): v0 = start_vertex + indices[t * 4 + 0] v1 = start_vertex + indices[t * 4 + 1] v2 = start_vertex + indices[t * 4 + 2] v3 = start_vertex + indices[t * 4 + 3] volume = self.add_tetrahedron(v0, v1, v2, v3, k_mu, k_lambda, k_damp) # distribute volume fraction to particles if (volume > 0.0): self.particle_mass[v0] += density * volume / 4.0 self.particle_mass[v1] += density * volume / 4.0 self.particle_mass[v2] += density * volume / 4.0 self.particle_mass[v3] += density * volume / 4.0 # build open faces add_face(v0, v2, v1) add_face(v1, v2, v3) add_face(v0, v1, v3) add_face(v0, v3, v2) # add triangles for k, v in faces.items(): try: self.add_triangle(v[0], v[1], v[2]) except np.linalg.LinAlgError: continue def compute_sphere_inertia(self, density: float, r: float) -> tuple: """Helper to compute mass and inertia of a sphere Args: density: The sphere density r: The sphere radius Returns: A tuple of (mass, inertia) with inertia specified around the origin """ v = 4.0 / 3.0 * math.pi * r * r * r m = density * v Ia = 2.0 / 5.0 * m * r * r I = np.array([[Ia, 0.0, 0.0], [0.0, Ia, 0.0], [0.0, 0.0, Ia]]) return (m, I) def compute_capsule_inertia(self, density: float, r: float, l: float) -> tuple: """Helper to compute mass and inertia of a capsule Args: density: The capsule density r: The capsule radius l: The capsule length (full width of the interior cylinder) Returns: A tuple of (mass, inertia) with inertia specified around the origin """ ms = density * (4.0 / 3.0) * math.pi * r * r * r mc = density * math.pi * r * r * l # total mass m = ms + mc # adapted from ODE Ia = mc * (0.25 * r * r + (1.0 / 12.0) * l * l) + ms * (0.4 * r * r + 0.375 * r * l + 0.25 * l * l) Ib = (mc * 0.5 + ms * 0.4) * r * r I = np.array([[Ib, 0.0, 0.0], [0.0, Ia, 0.0], [0.0, 0.0, Ia]]) return (m, I) def compute_box_inertia(self, density: float, w: float, h: float, d: float) -> tuple: """Helper to compute mass and inertia of a box Args: density: The box density w: The box width along the x-axis h: The box height along the y-axis d: The box depth along the z-axis Returns: A tuple of (mass, inertia) with inertia specified around the origin """ v = w * h * d m = density * v Ia = 1.0 / 12.0 * m * (h * h + d * d) Ib = 1.0 / 12.0 * m * (w * w + d * d) Ic = 1.0 / 12.0 * m * (w * w + h * h) I = np.array([[Ia, 0.0, 0.0], [0.0, Ib, 0.0], [0.0, 0.0, Ic]]) return (m, I) def _compute_shape_mass(self, type, scale, src, density): if density == 0: # zero density means fixed return 0, np.zeros((3, 3)) if (type == GEO_SPHERE): return self.compute_sphere_inertia(density, scale[0]) elif (type == GEO_BOX): return self.compute_box_inertia(density, scale[0] * 2.0, scale[1] * 2.0, scale[2] * 2.0) elif (type == GEO_CAPSULE): return self.compute_capsule_inertia(density, scale[0], scale[1] * 2.0) elif (type == GEO_MESH): #todo: non-uniform scale of inertia tensor s = scale[0] # eventually want to compute moment of inertia for mesh. return (density * src.mass * s * s * s, density * src.I * s * s * s * s * s) # incrementally updates rigid body mass with additional mass and inertia expressed at a local to the body def _update_body_mass(self, i, m, I, p, q): if (i == -1): return # find new COM new_mass = self.body_mass[i] + m if new_mass == 0.0: # no mass return new_com = (self.body_com[i] * self.body_mass[i] + p * m) / new_mass # shift inertia to new COM com_offset = new_com - self.body_com[i] shape_offset = new_com - p new_inertia = transform_inertia(self.body_mass[i], self.body_inertia[i], com_offset, quat_identity()) + transform_inertia( m, I, shape_offset, q) self.body_mass[i] = new_mass self.body_inertia[i] = new_inertia self.body_com[i] = new_com # returns a (model, state) pair given the description def finalize(self, adapter: str) -> Model: """Convert this builder object to a concrete model for simulation. After building simulation elements this method should be called to transfer all data to PyTorch tensors ready for simulation. Args: adapter: The simulation adapter to use, e.g.: 'cpu', 'cuda' Returns: A model object. """ # construct particle inv masses particle_inv_mass = [] for m in self.particle_mass: if (m > 0.0): particle_inv_mass.append(1.0 / m) else: particle_inv_mass.append(0.0) #------------------------------------- # construct Model (non-time varying) data m = Model(adapter) #--------------------- # particles # state (initial) m.particle_q = torch.tensor(self.particle_q, dtype=torch.float32, device=adapter) m.particle_qd = torch.tensor(self.particle_qd, dtype=torch.float32, device=adapter) # model m.particle_mass = torch.tensor(self.particle_mass, dtype=torch.float32, device=adapter) m.particle_inv_mass = torch.tensor(particle_inv_mass, dtype=torch.float32, device=adapter) #--------------------- # collision geometry m.shape_transform = torch.tensor(np.array(transform_flatten_list(self.shape_transform)), dtype=torch.float32, device=adapter) m.shape_body = torch.tensor(self.shape_body, dtype=torch.int32, device=adapter) m.shape_geo_type = torch.tensor(self.shape_geo_type, dtype=torch.int32, device=adapter) m.shape_geo_src = self.shape_geo_src m.shape_geo_scale = torch.tensor(self.shape_geo_scale, dtype=torch.float32, device=adapter) m.shape_materials = torch.tensor(self.shape_materials, dtype=torch.float32, device=adapter) #--------------------- # springs m.spring_indices = torch.tensor(self.spring_indices, dtype=torch.int32, device=adapter) m.spring_rest_length = torch.tensor(self.spring_rest_length, dtype=torch.float32, device=adapter) m.spring_stiffness = torch.tensor(self.spring_stiffness, dtype=torch.float32, device=adapter) m.spring_damping = torch.tensor(self.spring_damping, dtype=torch.float32, device=adapter) m.spring_control = torch.tensor(self.spring_control, dtype=torch.float32, device=adapter) #--------------------- # triangles m.tri_indices = torch.tensor(self.tri_indices, dtype=torch.int32, device=adapter) m.tri_poses = torch.tensor(self.tri_poses, dtype=torch.float32, device=adapter) m.tri_activations = torch.tensor(self.tri_activations, dtype=torch.float32, device=adapter) #--------------------- # edges m.edge_indices = torch.tensor(self.edge_indices, dtype=torch.int32, device=adapter) m.edge_rest_angle = torch.tensor(self.edge_rest_angle, dtype=torch.float32, device=adapter) #--------------------- # tetrahedra m.tet_indices = torch.tensor(self.tet_indices, dtype=torch.int32, device=adapter) m.tet_poses = torch.tensor(self.tet_poses, dtype=torch.float32, device=adapter) m.tet_activations = torch.tensor(self.tet_activations, dtype=torch.float32, device=adapter) m.tet_materials = torch.tensor(self.tet_materials, dtype=torch.float32, device=adapter) #----------------------- # muscles muscle_count = len(self.muscle_start) # close the muscle waypoint indices self.muscle_start.append(len(self.muscle_links)) m.muscle_start = torch.tensor(self.muscle_start, dtype=torch.int32, device=adapter) m.muscle_params = torch.tensor(self.muscle_params, dtype=torch.float32, device=adapter) m.muscle_links = torch.tensor(self.muscle_links, dtype=torch.int32, device=adapter) m.muscle_points = torch.tensor(np.array(self.muscle_points), dtype=torch.float32, device=adapter) m.muscle_activation = torch.tensor(self.muscle_activation, dtype=torch.float32, device=adapter) #-------------------------------------- # articulations # build 6x6 spatial inertia and COM transform body_X_cm = [] body_I_m = [] for i in range(len(self.body_inertia)): body_I_m.append(spatial_matrix_from_inertia(self.body_inertia[i], self.body_mass[i])) body_X_cm.append(transform(self.body_com[i], quat_identity())) m.body_I_m = torch.tensor(body_I_m, dtype=torch.float32, device=adapter) articulation_count = len(self.articulation_start) joint_coord_count = len(self.joint_q) joint_dof_count = len(self.joint_qd) # 'close' the start index arrays with a sentinel value self.joint_q_start.append(len(self.joint_q)) self.joint_qd_start.append(len(self.joint_qd)) self.articulation_start.append(len(self.joint_type)) # calculate total size and offsets of Jacobian and mass matrices for entire system m.J_size = 0 m.M_size = 0 m.H_size = 0 articulation_J_start = [] articulation_M_start = [] articulation_H_start = [] articulation_M_rows = [] articulation_H_rows = [] articulation_J_rows = [] articulation_J_cols = [] articulation_dof_start = [] articulation_coord_start = [] for i in range(articulation_count): first_joint = self.articulation_start[i] last_joint = self.articulation_start[i+1] first_coord = self.joint_q_start[first_joint] last_coord = self.joint_q_start[last_joint] first_dof = self.joint_qd_start[first_joint] last_dof = self.joint_qd_start[last_joint] joint_count = last_joint-first_joint dof_count = last_dof-first_dof coord_count = last_coord-first_coord articulation_J_start.append(m.J_size) articulation_M_start.append(m.M_size) articulation_H_start.append(m.H_size) articulation_dof_start.append(first_dof) articulation_coord_start.append(first_coord) # bit of data duplication here, but will leave it as such for clarity articulation_M_rows.append(joint_count*6) articulation_H_rows.append(dof_count) articulation_J_rows.append(joint_count*6) articulation_J_cols.append(dof_count) m.J_size += 6*joint_count*dof_count m.M_size += 6*joint_count*6*joint_count m.H_size += dof_count*dof_count m.articulation_joint_start = torch.tensor(self.articulation_start, dtype=torch.int32, device=adapter) # matrix offsets for batched gemm m.articulation_J_start = torch.tensor(articulation_J_start, dtype=torch.int32, device=adapter) m.articulation_M_start = torch.tensor(articulation_M_start, dtype=torch.int32, device=adapter) m.articulation_H_start = torch.tensor(articulation_H_start, dtype=torch.int32, device=adapter) m.articulation_M_rows = torch.tensor(articulation_M_rows, dtype=torch.int32, device=adapter) m.articulation_H_rows = torch.tensor(articulation_H_rows, dtype=torch.int32, device=adapter) m.articulation_J_rows = torch.tensor(articulation_J_rows, dtype=torch.int32, device=adapter) m.articulation_J_cols = torch.tensor(articulation_J_cols, dtype=torch.int32, device=adapter) m.articulation_dof_start = torch.tensor(articulation_dof_start, dtype=torch.int32, device=adapter) m.articulation_coord_start = torch.tensor(articulation_coord_start, dtype=torch.int32, device=adapter) # state (initial) m.joint_q = torch.tensor(self.joint_q, dtype=torch.float32, device=adapter) m.joint_qd = torch.tensor(self.joint_qd, dtype=torch.float32, device=adapter) # model m.joint_type = torch.tensor(self.joint_type, dtype=torch.int32, device=adapter) m.joint_parent = torch.tensor(self.joint_parent, dtype=torch.int32, device=adapter) m.joint_X_pj = torch.tensor(transform_flatten_list(self.joint_X_pj), dtype=torch.float32, device=adapter) m.joint_X_cm = torch.tensor(transform_flatten_list(body_X_cm), dtype=torch.float32, device=adapter) m.joint_axis = torch.tensor(self.joint_axis, dtype=torch.float32, device=adapter) m.joint_q_start = torch.tensor(self.joint_q_start, dtype=torch.int32, device=adapter) m.joint_qd_start = torch.tensor(self.joint_qd_start, dtype=torch.int32, device=adapter) # dynamics properties m.joint_armature = torch.tensor(self.joint_armature, dtype=torch.float32, device=adapter) m.joint_target = torch.tensor(self.joint_target, dtype=torch.float32, device=adapter) m.joint_target_ke = torch.tensor(self.joint_target_ke, dtype=torch.float32, device=adapter) m.joint_target_kd = torch.tensor(self.joint_target_kd, dtype=torch.float32, device=adapter) m.joint_limit_lower = torch.tensor(self.joint_limit_lower, dtype=torch.float32, device=adapter) m.joint_limit_upper = torch.tensor(self.joint_limit_upper, dtype=torch.float32, device=adapter) m.joint_limit_ke = torch.tensor(self.joint_limit_ke, dtype=torch.float32, device=adapter) m.joint_limit_kd = torch.tensor(self.joint_limit_kd, dtype=torch.float32, device=adapter) # counts m.particle_count = len(self.particle_q) m.articulation_count = articulation_count m.joint_coord_count = joint_coord_count m.joint_dof_count = joint_dof_count m.muscle_count = muscle_count m.link_count = len(self.joint_type) m.shape_count = len(self.shape_geo_type) m.tri_count = len(self.tri_poses) m.tet_count = len(self.tet_poses) m.edge_count = len(self.edge_rest_angle) m.spring_count = len(self.spring_rest_length) m.contact_count = 0 # store refs to geometry m.geo_meshes = self.geo_meshes m.geo_sdfs = self.geo_sdfs # enable ground plane m.ground = True m.enable_tri_collisions = False m.gravity = torch.tensor((0.0, -9.8, 0.0), dtype=torch.float32, device=adapter) # allocate space for mass / jacobian matrices m.alloc_mass_matrix() return m
71,080
Python
36.809043
206
0.562085
RoboticExplorationLab/CGAC/dflex/dflex/adjoint.py
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. import os import imp import ast import math import inspect import typing import weakref import numpy as np import torch import torch.utils.cpp_extension import dflex.config import copy # Todo #----- # # [ ] Unary ops (e.g.: -) # [ ] Inplace ops (e.g.: +=, -=) # [ ] Conditionals # [ ] Loops (unrolled) # [ ] Auto-gen PyTorch operator # [ ] CUDA kernel code gen + dynamic compilation # ----- operators = {} functions = {} cuda_functions = {} kernels = {} #---------------------- # built-in types class float3: def __init__(self): x = 0.0 y = 0.0 z = 0.0 class float4: def __init__(self): x = 0.0 y = 0.0 z = 0.0 w = 0.0 class quat: def __init__(self): x = 0.0 y = 0.0 z = 0.0 w = 1.0 class mat22: def __init__(self): pass class mat33: def __init__(self): pass class spatial_vector: def __init__(self): pass class spatial_matrix: def __init__(self): pass class spatial_transform: def __init__(self): pass class void: def __init__(self): pass class tensor: def __init__(self, type): self.type = type self.requires_grad = True self.__name__ = "tensor<" + type.__name__ + ">" #---------------------- # register built-in function def builtin(key): def insert(func): func.key = key func.prefix = "df::" functions[key] = func return func return insert #--------------------------------- # built-in operators +,-,*,/ @builtin("add") class AddFunc: @staticmethod def value_type(args): return args[0].type @builtin("sub") class SubFunc: @staticmethod def value_type(args): return args[0].type @builtin("mod") class ModFunc: @staticmethod def value_type(args): return args[0].type @builtin("mul") class MulFunc: @staticmethod def value_type(args): # todo: encode type operator type globally if (args[0].type == mat33 and args[1].type == float3): return float3 if (args[0].type == spatial_matrix and args[1].type == spatial_vector): return spatial_vector else: return args[0].type @builtin("div") class DivFunc: @staticmethod def value_type(args): return args[0].type #---------------------- # map operator nodes to builtin operators[ast.Add] = "add" operators[ast.Sub] = "sub" operators[ast.Mult] = "mul" operators[ast.Div] = "div" operators[ast.FloorDiv] = "div" operators[ast.Mod] = "mod" operators[ast.Gt] = ">" operators[ast.Lt] = "<" operators[ast.GtE] = ">=" operators[ast.LtE] = "<=" operators[ast.Eq] = "==" operators[ast.NotEq] = "!=" #---------------------- # built-in functions @builtin("min") class MinFunc: @staticmethod def value_type(args): return float @builtin("max") class MaxFunc: @staticmethod def value_type(args): return float @builtin("leaky_max") class LeakyMaxFunc: @staticmethod def value_type(args): return float @builtin("leaky_min") class LeakyMinFunc: @staticmethod def value_type(args): return float @builtin("clamp") class ClampFunc: @staticmethod def value_type(args): return float @builtin("step") class StepFunc: @staticmethod def value_type(args): return float @builtin("nonzero") class NonZeroFunc: @staticmethod def value_type(args): return float @builtin("sign") class SignFunc: @staticmethod def value_type(args): return float @builtin("abs") class AbsFunc: @staticmethod def value_type(args): return float @builtin("sin") class SinFunc: @staticmethod def value_type(args): return float @builtin("cos") class CosFunc: @staticmethod def value_type(args): return float @builtin("acos") class ACosFunc: @staticmethod def value_type(args): return float @builtin("sin") class SinFunc: @staticmethod def value_type(args): return float @builtin("cos") class CosFunc: @staticmethod def value_type(args): return float @builtin("sqrt") class SqrtFunc: @staticmethod def value_type(args): return float @builtin("dot") class DotFunc: @staticmethod def value_type(args): return float @builtin("cross") class CrossFunc: @staticmethod def value_type(args): return float3 @builtin("skew") class SkewFunc: @staticmethod def value_type(args): return mat33 @builtin("length") class LengthFunc: @staticmethod def value_type(args): return float @builtin("normalize") class NormalizeFunc: @staticmethod def value_type(args): return args[0].type @builtin("select") class SelectFunc: @staticmethod def value_type(args): return args[1].type @builtin("rotate") class RotateFunc: @staticmethod def value_type(args): return float3 @builtin("rotate_inv") class RotateInvFunc: @staticmethod def value_type(args): return float3 @builtin("determinant") class DeterminantFunc: @staticmethod def value_type(args): return float @builtin("transpose") class TransposeFunc: @staticmethod def value_type(args): return args[0].type @builtin("load") class LoadFunc: @staticmethod def value_type(args): if (type(args[0].type) != tensor): raise Exception("Load input 0 must be a tensor") if (args[1].type != int): raise Exception("Load input 1 must be a int") return args[0].type.type @builtin("store") class StoreFunc: @staticmethod def value_type(args): if (type(args[0].type) != tensor): raise Exception("Store input 0 must be a tensor") if (args[1].type != int): raise Exception("Store input 1 must be a int") if (args[2].type != args[0].type.type): raise Exception("Store input 2 must be of the same type as the tensor") return None @builtin("atomic_add") class AtomicAddFunc: @staticmethod def value_type(args): return None @builtin("atomic_sub") class AtomicSubFunc: @staticmethod def value_type(args): return None @builtin("tid") class ThreadIdFunc: @staticmethod def value_type(args): return int # type construtors @builtin("float") class floatFunc: @staticmethod def value_type(args): return float @builtin("int") class IntFunc: @staticmethod def value_type(args): return int @builtin("float3") class Float3Func: @staticmethod def value_type(args): return float3 @builtin("quat") class QuatFunc: @staticmethod def value_type(args): return quat @builtin("quat_identity") class QuatIdentityFunc: @staticmethod def value_type(args): return quat @builtin("quat_from_axis_angle") class QuatAxisAngleFunc: @staticmethod def value_type(args): return quat @builtin("mat22") class Mat22Func: @staticmethod def value_type(args): return mat22 @builtin("mat33") class Mat33Func: @staticmethod def value_type(args): return mat33 @builtin("spatial_vector") class SpatialVectorFunc: @staticmethod def value_type(args): return spatial_vector # built-in spatial operators @builtin("spatial_transform") class TransformFunc: @staticmethod def value_type(args): return spatial_transform @builtin("spatial_transform_identity") class TransformIdentity: @staticmethod def value_type(args): return spatial_transform @builtin("inverse") class Inverse: @staticmethod def value_type(args): return quat # @builtin("spatial_transform_inverse") # class TransformInverse: # @staticmethod # def value_type(args): # return spatial_transform @builtin("spatial_transform_get_translation") class TransformGetTranslation: @staticmethod def value_type(args): return float3 @builtin("spatial_transform_get_rotation") class TransformGetRotation: @staticmethod def value_type(args): return quat @builtin("spatial_transform_multiply") class TransformMulFunc: @staticmethod def value_type(args): return spatial_transform # @builtin("spatial_transform_inertia") # class TransformInertiaFunc: # @staticmethod # def value_type(args): # return spatial_matrix @builtin("spatial_adjoint") class SpatialAdjoint: @staticmethod def value_type(args): return spatial_matrix @builtin("spatial_dot") class SpatialDotFunc: @staticmethod def value_type(args): return float @builtin("spatial_cross") class SpatialDotFunc: @staticmethod def value_type(args): return spatial_vector @builtin("spatial_cross_dual") class SpatialDotFunc: @staticmethod def value_type(args): return spatial_vector @builtin("spatial_transform_point") class SpatialTransformPointFunc: @staticmethod def value_type(args): return float3 @builtin("spatial_transform_vector") class SpatialTransformVectorFunc: @staticmethod def value_type(args): return float3 @builtin("spatial_top") class SpatialTopFunc: @staticmethod def value_type(args): return float3 @builtin("spatial_bottom") class SpatialBottomFunc: @staticmethod def value_type(args): return float3 @builtin("spatial_jacobian") class SpatialJacobian: @staticmethod def value_type(args): return None @builtin("spatial_mass") class SpatialMass: @staticmethod def value_type(args): return None @builtin("dense_gemm") class DenseGemm: @staticmethod def value_type(args): return None @builtin("dense_gemm_batched") class DenseGemmBatched: @staticmethod def value_type(args): return None @builtin("dense_chol") class DenseChol: @staticmethod def value_type(args): return None @builtin("dense_chol_batched") class DenseCholBatched: @staticmethod def value_type(args): return None @builtin("dense_subs") class DenseSubs: @staticmethod def value_type(args): return None @builtin("dense_solve") class DenseSolve: @staticmethod def value_type(args): return None @builtin("dense_solve_batched") class DenseSolve: @staticmethod def value_type(args): return None # helpers @builtin("index") class IndexFunc: @staticmethod def value_type(args): return float @builtin("print") class PrintFunc: @staticmethod def value_type(args): return None class Var: def __init__(adj, label, type, requires_grad=False, constant=None): adj.label = label adj.type = type adj.requires_grad = requires_grad adj.constant = constant def __str__(adj): return adj.label def ctype(self): if (isinstance(self.type, tensor)): if self.type.type == float3: return str("df::" + self.type.type.__name__) + "*" return str(self.type.type.__name__) + "*" elif self.type == float3: return "df::" + str(self.type.__name__) else: return str(self.type.__name__) #-------------------- # Storage class for partial AST up to a return statement. class Stmt: def __init__(self, cond, forward, forward_replay, reverse, ret_forward, ret_line): self.cond = cond # condition, can be None self.forward = forward # all forward code outside of conditional branch *since last return* self.forward_replay = forward_replay self.reverse = reverse # all reverse code including the reverse of any code in ret_forward self.ret_forward = ret_forward # all forward commands in the return statement except the actual return statement self.ret_line = ret_line # actual return statement #------------------------------------------------------------------------ # Source code transformer, this class takes a Python function and # computes its adjoint using single-pass translation of the function's AST class Adjoint: def __init__(adj, func, device='cpu'): adj.func = func adj.device = device adj.symbols = {} # map from symbols to adjoint variables adj.variables = [] # list of local variables (in order) adj.args = [] # list of function arguments (in order) adj.cond = None # condition variable if in branch adj.return_var = None # return type for function or kernel # build AST from function object adj.source = inspect.getsource(func) adj.tree = ast.parse(adj.source) # parse argument types arg_types = typing.get_type_hints(func) # add variables and symbol map for each argument for name, t in arg_types.items(): adj.symbols[name] = Var(name, t, False) # build ordered list of args for a in adj.tree.body[0].args.args: adj.args.append(adj.symbols[a.arg]) # primal statements (allows different statements in replay) adj.body_forward = [] adj.body_forward_replay = [] adj.body_reverse = [] adj.output = [] adj.indent_count = 0 adj.label_count = 0 # recursively evaluate function body adj.eval(adj.tree.body[0]) # code generation methods def format_template(adj, template, input_vars, output_var): # output var is always the 0th index args = [output_var] + input_vars s = template.format(*args) return s # generates a comma separated list of args def format_args(adj, prefix, indices): args = "" sep = "" for i in indices: args += sep + prefix + str(i) sep = ", " return args def add_var(adj, type=None, constant=None): index = len(adj.variables) v = Var(str(index), type=type, constant=constant) adj.variables.append(v) return v def add_constant(adj, n): output = adj.add_var(type=type(n), constant=n) #adj.add_forward("var_{} = {};".format(output, n)) return output def add_load(adj, input): output = adj.add_var(input.type) adj.add_forward("var_{} = {};".format(output, input)) adj.add_reverse("adj_{} += adj_{};".format(input, output)) return output def add_operator(adj, op, inputs): # todo: just using first input as the output type, would need some # type inference here to support things like float3 = float*float3 output = adj.add_var(inputs[0].type) transformer = operators[op.__class__] for t in transformer.forward(): adj.add_forward(adj.format_template(t, inputs, output)) for t in transformer.reverse(): adj.add_reverse(adj.format_template(t, inputs, output)) return output def add_comp(adj, op_strings, left, comps): output = adj.add_var(bool) s = "var_" + str(output) + " = " + ("(" * len(comps)) + "var_" + str(left) + " " for op, comp in zip(op_strings, comps): s += op + " var_" + str(comp) + ") " s = s.rstrip() + ";" adj.add_forward(s) return output def add_bool_op(adj, op_string, exprs): output = adj.add_var(bool) command = "var_" + str(output) + " = " + (" " + op_string + " ").join(["var_" + str(expr) for expr in exprs]) + ";" adj.add_forward(command) return output def add_call(adj, func, inputs, prefix='df::'): # expression (zero output), e.g.: tid() if (func.value_type(inputs) == None): forward_call = prefix + "{}({});".format(func.key, adj.format_args("var_", inputs)) adj.add_forward(forward_call) if (len(inputs)): reverse_call = prefix + "{}({}, {});".format("adj_" + func.key, adj.format_args("var_", inputs), adj.format_args("adj_", inputs)) adj.add_reverse(reverse_call) return None # function (one output) else: output = adj.add_var(func.value_type(inputs)) forward_call = "var_{} = ".format(output) + prefix + "{}({});".format(func.key, adj.format_args("var_", inputs)) adj.add_forward(forward_call) if (len(inputs)): reverse_call = prefix + "{}({}, {}, {});".format( "adj_" + func.key, adj.format_args("var_", inputs), adj.format_args("adj_", inputs), adj.format_args("adj_", [output])) adj.add_reverse(reverse_call) return output def add_return(adj, var): if (var == None): adj.add_forward("return;".format(var), "goto label{};".format(adj.label_count)) else: adj.add_forward("return var_{};".format(var), "goto label{};".format(adj.label_count)) adj.add_reverse("adj_" + str(var) + " += adj_ret;") adj.add_reverse("label{}:;".format(adj.label_count)) adj.label_count += 1 # define an if statement def begin_if(adj, cond): adj.add_forward("if (var_{}) {{".format(cond)) adj.add_reverse("}") adj.indent_count += 1 def end_if(adj, cond): adj.indent_count -= 1 adj.add_forward("}") adj.add_reverse("if (var_{}) {{".format(cond)) # define a for-loop def begin_for(adj, iter, start, end): # note that dynamic for-loops must not mutate any previous state, so we don't need to re-run them in the reverse pass adj.add_forward("for (var_{0}=var_{1}; var_{0} < var_{2}; ++var_{0}) {{".format(iter, start, end), "if (false) {") adj.add_reverse("}") adj.indent_count += 1 def end_for(adj, iter, start, end): adj.indent_count -= 1 adj.add_forward("}") adj.add_reverse("for (var_{0}=var_{2}-1; var_{0} >= var_{1}; --var_{0}) {{".format(iter, start, end)) # append a statement to the forward pass def add_forward(adj, statement, statement_replay=None): prefix = "" for i in range(adj.indent_count): prefix += "\t" adj.body_forward.append(prefix + statement) # allow for different statement in reverse kernel replay if (statement_replay): adj.body_forward_replay.append(prefix + statement_replay) else: adj.body_forward_replay.append(prefix + statement) # append a statement to the reverse pass def add_reverse(adj, statement): prefix = "" for i in range(adj.indent_count): prefix += "\t" adj.body_reverse.append(prefix + statement) def eval(adj, node): try: if (isinstance(node, ast.FunctionDef)): out = None for f in node.body: out = adj.eval(f) if 'return' in adj.symbols and adj.symbols['return'] is not None: out = adj.symbols['return'] stmt = Stmt(None, adj.body_forward, adj.body_forward_replay, reversed(adj.body_reverse), [], "") adj.output.append(stmt) else: stmt = Stmt(None, adj.body_forward, adj.body_forward_replay, reversed(adj.body_reverse), [], "") adj.output.append(stmt) return out elif (isinstance(node, ast.If)): # if statement if len(node.orelse) != 0: raise SyntaxError("Else statements not currently supported") if len(node.body) == 0: return None # save symbol map symbols_prev = adj.symbols.copy() # eval condition cond = adj.eval(node.test) # eval body adj.begin_if(cond) for stmt in node.body: adj.eval(stmt) adj.end_if(cond) # detect symbols with conflicting definitions (assigned inside the branch) for items in symbols_prev.items(): sym = items[0] var1 = items[1] var2 = adj.symbols[sym] if var1 != var2: # insert a phi function that # selects var1, var2 based on cond out = adj.add_call(functions["select"], [cond, var1, var2]) adj.symbols[sym] = out return None elif (isinstance(node, ast.Compare)): # node.left, node.ops (list of ops), node.comparators (things to compare to) # e.g. (left ops[0] node.comparators[0]) ops[1] node.comparators[1] left = adj.eval(node.left) comps = [adj.eval(comp) for comp in node.comparators] op_strings = [operators[type(op)] for op in node.ops] out = adj.add_comp(op_strings, left, comps) return out elif (isinstance(node, ast.BoolOp)): # op, expr list values (e.g. and and a list of things anded together) op = node.op if isinstance(op, ast.And): func = "&&" elif isinstance(op, ast.Or): func = "||" else: raise KeyError("Op {} is not supported".format(op)) out = adj.add_bool_op(func, [adj.eval(expr) for expr in node.values]) # import pdb # pdb.set_trace() return out elif (isinstance(node, ast.Name)): # lookup symbol, if it has already been assigned to a variable then return the existing mapping if (node.id in adj.symbols): return adj.symbols[node.id] else: raise KeyError("Referencing undefined symbol: " + str(node.id)) elif (isinstance(node, ast.Num)): # lookup constant, if it has already been assigned then return existing var # currently disabled, since assigning constant in a branch means it key = (node.n, type(node.n)) if (key in adj.symbols): return adj.symbols[key] else: out = adj.add_constant(node.n) adj.symbols[key] = out return out #out = adj.add_constant(node.n) #return out elif (isinstance(node, ast.BinOp)): # evaluate binary operator arguments left = adj.eval(node.left) right = adj.eval(node.right) name = operators[type(node.op)] func = functions[name] out = adj.add_call(func, [left, right]) return out elif (isinstance(node, ast.UnaryOp)): # evaluate unary op arguments arg = adj.eval(node.operand) out = adj.add_operator(node.op, [arg]) return out elif (isinstance(node, ast.For)): if (len(node.iter.args) != 2): raise Exception("For loop ranges must be of form range(start, end) with both start and end specified and no skip specifier.") # check if loop range is compile time constant unroll = True for a in node.iter.args: if (isinstance(a, ast.Num) == False): unroll = False break if (unroll): # constant loop, unroll start = node.iter.args[0].n end = node.iter.args[1].n for i in range(start, end): var_iter = adj.add_constant(i) adj.symbols[node.target.id] = var_iter # eval body for s in node.body: adj.eval(s) else: # dynamic loop, body must be side-effect free, i.e.: not # overwrite memory locations used by previous operations start = adj.eval(node.iter.args[0]) end = adj.eval(node.iter.args[1]) # add iterator variable iter = adj.add_var(int) adj.symbols[node.target.id] = iter adj.begin_for(iter, start, end) # eval body for s in node.body: adj.eval(s) adj.end_for(iter, start, end) elif (isinstance(node, ast.Expr)): return adj.eval(node.value) elif (isinstance(node, ast.Call)): name = None # determine if call is to a builtin (attribute), or to a user-func (name) if (isinstance(node.func, ast.Attribute)): name = node.func.attr elif (isinstance(node.func, ast.Name)): name = node.func.id # check it exists if name not in functions: raise KeyError("Could not find function {}".format(name)) if adj.device == 'cuda' and name in cuda_functions: func = cuda_functions[name] else: func = functions[name] args = [] # eval all arguments for arg in node.args: var = adj.eval(arg) args.append(var) # add var with value type from the function out = adj.add_call(func, args, prefix=func.prefix) return out elif (isinstance(node, ast.Subscript)): target = adj.eval(node.value) indices = [] if isinstance(node.slice.value, ast.Tuple): # handles the M[i, j] case for arg in node.slice.value.elts: var = adj.eval(arg) indices.append(var) else: # simple expression var = adj.eval(node.slice.value) indices.append(var) out = adj.add_call(functions["index"], [target, *indices]) return out elif (isinstance(node, ast.Assign)): # if adj.cond is not None: # raise SyntaxError("error, cannot assign variables in a conditional branch") # evaluate rhs out = adj.eval(node.value) # update symbol map (assumes lhs is a Name node) adj.symbols[node.targets[0].id] = out return out elif (isinstance(node, ast.Return)): cond = adj.cond # None if not in branch, else branch boolean out = adj.eval(node.value) adj.symbols['return'] = out if out is not None: # set return type of function return_var = out if adj.return_var is not None and adj.return_var.ctype() != return_var.ctype(): raise TypeError("error, function returned different types") adj.return_var = return_var adj.add_return(out) return out elif node is None: return None else: print("[WARNING] ast node of type {} not supported".format(type(node))) except Exception as e: # print error / line number lines = adj.source.splitlines() print("Error: {} while transforming node {} in func: {} at line: {} col: {}: \n {}".format(e, type(node), adj.func.__name__, node.lineno, node.col_offset, lines[max(node.lineno-1, 0)])) raise #---------------- # code generation cpu_module_header = ''' #define CPU #include "adjoint.h" using namespace df; template <typename T> T cast(torch::Tensor t) {{ return (T)(t.data_ptr()); }} ''' cuda_module_header = ''' #define CUDA #include "adjoint.h" using namespace df; template <typename T> T cast(torch::Tensor t) {{ return (T)(t.data_ptr()); }} ''' cpu_function_template = ''' {return_type} {name}_cpu_func({forward_args}) {{ {forward_body} }} void adj_{name}_cpu_func({forward_args}, {reverse_args}) {{ {reverse_body} }} ''' cuda_function_template = ''' CUDA_CALLABLE {return_type} {name}_cuda_func({forward_args}) {{ {forward_body} }} CUDA_CALLABLE void adj_{name}_cuda_func({forward_args}, {reverse_args}) {{ {reverse_body} }} ''' cuda_kernel_template = ''' __global__ void {name}_cuda_kernel_forward(int dim, {forward_args}) {{ {forward_body} }} __global__ void {name}_cuda_kernel_backward(int dim, {forward_args}, {reverse_args}) {{ {reverse_body} }} ''' cpu_kernel_template = ''' void {name}_cpu_kernel_forward({forward_args}) {{ {forward_body} }} void {name}_cpu_kernel_backward({forward_args}, {reverse_args}) {{ {reverse_body} }} ''' cuda_module_template = ''' // Python entry points void {name}_cuda_forward(int dim, {forward_args}) {{ {name}_cuda_kernel_forward<<<(dim + 256 - 1) / 256, 256>>>(dim, {forward_params}); //check_cuda(cudaPeekAtLastError()); //check_cuda(cudaDeviceSynchronize()); }} void {name}_cuda_backward(int dim, {forward_args}, {reverse_args}) {{ {name}_cuda_kernel_backward<<<(dim + 256 - 1) / 256, 256>>>(dim, {forward_params}, {reverse_params}); //check_cuda(cudaPeekAtLastError()); //check_cuda(cudaDeviceSynchronize()); }} ''' cpu_module_template = ''' // Python entry points void {name}_cpu_forward(int dim, {forward_args}) {{ for (int i=0; i < dim; ++i) {{ s_threadIdx = i; {name}_cpu_kernel_forward({forward_params}); }} }} void {name}_cpu_backward(int dim, {forward_args}, {reverse_args}) {{ for (int i=0; i < dim; ++i) {{ s_threadIdx = i; {name}_cpu_kernel_backward({forward_params}, {reverse_params}); }} }} ''' cuda_module_header_template = ''' // Python entry points void {name}_cuda_forward(int dim, {forward_args}); void {name}_cuda_backward(int dim, {forward_args}, {reverse_args}); ''' cpu_module_header_template = ''' // Python entry points void {name}_cpu_forward(int dim, {forward_args}); void {name}_cpu_backward(int dim, {forward_args}, {reverse_args}); ''' def indent(args, stops=1): sep = "\n" for i in range(stops): sep += "\t" return sep + args.replace(", ", "," + sep) def codegen_func_forward_body(adj, device='cpu', indent=4): body = [] indent_block = " " * indent for stmt in adj.output: for f in stmt.forward: body += [f + "\n"] if stmt.cond is not None: body += ["if (" + str(stmt.cond) + ") {\n"] for l in stmt.ret_forward: body += [indent_block + l + "\n"] body += [indent_block + stmt.ret_line + "\n"] body += ["}\n"] else: for l in stmt.ret_forward: body += [l + "\n"] body += [stmt.ret_line + "\n"] break # break once unconditional return is encountered return "".join([indent_block + l for l in body]) def codegen_func_forward(adj, func_type='kernel', device='cpu'): s = "" # primal vars s += " //---------\n" s += " // primal vars\n" for var in adj.variables: if var.constant == None: s += " " + var.ctype() + " var_" + str(var.label) + ";\n" else: s += " const " + var.ctype() + " var_" + str(var.label) + " = " + str(var.constant) + ";\n" # forward pass s += " //---------\n" s += " // forward\n" if device == 'cpu': s += codegen_func_forward_body(adj, device=device, indent=4) elif device == 'cuda': if func_type == 'kernel': s += " int var_idx = blockDim.x * blockIdx.x + threadIdx.x;\n" s += " if (var_idx < dim) {\n" s += codegen_func_forward_body(adj, device=device, indent=8) s += " }\n" else: s += codegen_func_forward_body(adj, device=device, indent=4) return s def codegen_func_reverse_body(adj, device='cpu', indent=4): body = [] indent_block = " " * indent for stmt in adj.output: # forward pass body += ["//---------\n"] body += ["// forward\n"] for f in stmt.forward_replay: body += [f + "\n"] if stmt.cond is not None: body += ["if (" + str(stmt.cond) + ") {\n"] for l in stmt.ret_forward: body += [indent_block + l + "\n"] # reverse pass body += [indent_block + "//---------\n"] body += [indent_block + "// reverse\n"] for l in stmt.reverse: body += [indent_block + l + "\n"] body += [indent_block + "return;\n"] body += ["}\n"] else: for l in stmt.ret_forward: body += [l + "\n"] # reverse pass body += ["//---------\n"] body += ["// reverse\n"] for l in stmt.reverse: body += [l + "\n"] body += ["return;\n"] break # break once unconditional return is encountered return "".join([indent_block + l for l in body]) def codegen_func_reverse(adj, func_type='kernel', device='cpu'): s = "" # primal vars s += " //---------\n" s += " // primal vars\n" for var in adj.variables: if var.constant == None: s += " " + var.ctype() + " var_" + str(var.label) + ";\n" else: s += " const " + var.ctype() + " var_" + str(var.label) + " = " + str(var.constant) + ";\n" # dual vars s += " //---------\n" s += " // dual vars\n" for var in adj.variables: s += " " + var.ctype() + " adj_" + str(var.label) + " = 0;\n" if device == 'cpu': s += codegen_func_reverse_body(adj, device=device, indent=4) elif device == 'cuda': if func_type == 'kernel': s += " int var_idx = blockDim.x * blockIdx.x + threadIdx.x;\n" s += " if (var_idx < dim) {\n" s += codegen_func_reverse_body(adj, device=device, indent=8) s += " }\n" else: s += codegen_func_reverse_body(adj, device=device, indent=4) else: raise ValueError("Device {} not supported for codegen".format(device)) return s def codegen_func(adj, device='cpu'): # forward header # return_type = "void" return_type = 'void' if adj.return_var is None else adj.return_var.ctype() # s = "{} {}_forward(".format(return_type, adj.func.__name__) # sep = "" # for arg in adj.args: # if (arg.label != 'return'): # s += sep + str(arg.type.__name__) + " var_" + arg.label # sep = ", " # reverse header # s = "void {}_reverse(".format(adj.func.__name__) # return s forward_args = "" reverse_args = "" # s = "" # forward args sep = "" for arg in adj.args: forward_args += sep + arg.ctype() + " var_" + arg.label sep = ", " # reverse args sep = "" for arg in adj.args: if "*" in arg.ctype(): reverse_args += sep + arg.ctype() + " adj_" + arg.label else: reverse_args += sep + arg.ctype() + " & adj_" + arg.label sep = ", " reverse_args += sep + return_type + " & adj_ret" # reverse args # add primal version of parameters # sep = "" # for var in adj.args: # if (var.label != 'return'): # s += sep + var.ctype() + " var_" + var.label # sep = ", " # # add adjoint version of parameters # for var in adj.args: # if (var.label != 'return'): # s += sep + var.ctype() + "& adj_" + var.label # sep = ", " # # add adjoint of output # if ('return' in adj.symbols and adj.symbols['return'] != None): # s += sep + str(adj.symbols['return'].type.__name__) + " adj_" + str(adj.symbols['return']) # codegen body forward_body = codegen_func_forward(adj, func_type='function', device=device) reverse_body = codegen_func_reverse(adj, func_type='function', device=device) if device == 'cpu': template = cpu_function_template elif device == 'cuda': template = cuda_function_template else: raise ValueError("Device {} is not supported".format(device)) s = template.format(name=adj.func.__name__, return_type=return_type, forward_args=indent(forward_args), reverse_args=indent(reverse_args), forward_body=forward_body, reverse_body=reverse_body) return s def codegen_kernel(adj, device='cpu'): forward_args = "" reverse_args = "" # forward args sep = "" for arg in adj.args: forward_args += sep + arg.ctype() + " var_" + arg.label sep = ", " # reverse args sep = "" for arg in adj.args: reverse_args += sep + arg.ctype() + " adj_" + arg.label sep = ", " # codegen body forward_body = codegen_func_forward(adj, func_type='kernel', device=device) reverse_body = codegen_func_reverse(adj, func_type='kernel', device=device) # import pdb # pdb.set_trace() if device == 'cpu': template = cpu_kernel_template elif device == 'cuda': template = cuda_kernel_template else: raise ValueError("Device {} is not supported".format(device)) s = template.format(name=adj.func.__name__, forward_args=indent(forward_args), reverse_args=indent(reverse_args), forward_body=forward_body, reverse_body=reverse_body) return s def codegen_module(adj, device='cpu'): forward_args = "" reverse_args = "" forward_params = "" reverse_params = "" sep = "" for arg in adj.args: if (isinstance(arg.type, tensor)): forward_args += sep + "torch::Tensor var_" + arg.label forward_params += sep + "cast<" + arg.ctype() + ">(var_" + arg.label + ")" else: forward_args += sep + arg.ctype() + " var_" + arg.label forward_params += sep + "var_" + arg.label sep = ", " sep = "" for arg in adj.args: if (isinstance(arg.type, tensor)): reverse_args += sep + "torch::Tensor adj_" + arg.label reverse_params += sep + "cast<" + arg.ctype() + ">(adj_" + arg.label + ")" else: reverse_args += sep + arg.ctype() + " adj_" + arg.label reverse_params += sep + "adj_" + arg.label sep = ", " if device == 'cpu': template = cpu_module_template elif device == 'cuda': template = cuda_module_template else: raise ValueError("Device {} is not supported".format(device)) s = template.format(name=adj.func.__name__, forward_args=indent(forward_args), reverse_args=indent(reverse_args), forward_params=indent(forward_params, 3), reverse_params=indent(reverse_params, 3)) return s def codegen_module_decl(adj, device='cpu'): forward_args = "" reverse_args = "" forward_params = "" reverse_params = "" sep = "" for arg in adj.args: if (isinstance(arg.type, tensor)): forward_args += sep + "torch::Tensor var_" + arg.label forward_params += sep + "cast<" + arg.ctype() + ">(var_" + arg.label + ")" else: forward_args += sep + arg.ctype() + " var_" + arg.label forward_params += sep + "var_" + arg.label sep = ", " sep = "" for arg in adj.args: if (isinstance(arg.type, tensor)): reverse_args += sep + "torch::Tensor adj_" + arg.label reverse_params += sep + "cast<" + arg.ctype() + ">(adj_" + arg.label + ")" else: reverse_args += sep + arg.ctype() + " adj_" + arg.label reverse_params += sep + "adj_" + arg.label sep = ", " if device == 'cpu': template = cpu_module_header_template elif device == 'cuda': template = cuda_module_header_template else: raise ValueError("Device {} is not supported".format(device)) s = template.format(name=adj.func.__name__, forward_args=indent(forward_args), reverse_args=indent(reverse_args)) return s # runs vcvars and copies back the build environment, PyTorch should really be doing this def set_build_env(): if os.name == 'nt': # VS2019 (required for PyTorch headers) vcvars_path = "C:\\Program Files (x86)\\Microsoft Visual Studio\\2019\\Community\\VC\\Auxiliary\Build\\vcvars64.bat" s = '"{}" && set'.format(vcvars_path) output = os.popen(s).read() for line in output.splitlines(): pair = line.split("=", 1) if (len(pair) >= 2): os.environ[pair[0]] = pair[1] else: # nothing needed for Linux or Mac pass def import_module(module_name, path): # https://stackoverflow.com/questions/67631/how-to-import-a-module-given-the-full-path file, path, description = imp.find_module(module_name, [path]) # Close the .so file after load. with file: return imp.load_module(module_name, file, path, description) def rename(name, return_type): def func(cls): cls.__name__ = name cls.key = name cls.prefix = "" cls.return_type = return_type return cls return func user_funcs = {} user_kernels = {} def func(f): user_funcs[f.__name__] = f # adj = Adjoint(f) # print(adj.codegen_forward()) # print(adj.codegen_reverse()) # set_build_env() # include_path = os.path.dirname(os.path.realpath(__file__)) # # requires PyTorch hotfix https://github.com/pytorch/pytorch/pull/33002 # test_cuda = torch.utils.cpp_extension.load_inline('test_cuda', [cpp_template], None, ["test_forward_1", "test_backward_1"], extra_include_paths=include_path, verbose=True) # help(test_cuda) def kernel(f): # stores source and compiled entry points for a kernel (will be populated after module loads) class Kernel: def __init__(self, f): self.func = f def register(self, module): # lookup entry points based on name self.forward_cpu = eval("module." + self.func.__name__ + "_cpu_forward") self.backward_cpu = eval("module." + self.func.__name__ + "_cpu_backward") if (torch.cuda.is_available()): self.forward_cuda = eval("module." + self.func.__name__ + "_cuda_forward") self.backward_cuda = eval("module." + self.func.__name__ + "_cuda_backward") k = Kernel(f) # register globally user_kernels[f.__name__] = k return k def compile(): use_cuda = torch.cuda.is_available() if not use_cuda: print("[INFO] CUDA support not found. Disabling CUDA kernel compilation.") cpp_source = "" cuda_source = "" cpp_source += cpu_module_header cuda_source += cuda_module_header # kernels entry_points = [] # functions for name, func in user_funcs.items(): adj = Adjoint(func, device='cpu') cpp_source += codegen_func(adj, device='cpu') adj = Adjoint(func, device='cuda') cuda_source += codegen_func(adj, device='cuda') # import pdb # pdb.set_trace() import copy @rename(func.__name__ + "_cpu_func", adj.return_var.type) class Func: @classmethod def value_type(cls, *args): return cls.return_type functions[func.__name__] = Func @rename(func.__name__ + "_cuda_func", adj.return_var.type) class CUDAFunc: @classmethod def value_type(cls, *args): return cls.return_type cuda_functions[func.__name__] = CUDAFunc for name, kernel in user_kernels.items(): if use_cuda: # each kernel gets an entry point in the module entry_points.append(name + "_cuda_forward") entry_points.append(name + "_cuda_backward") # each kernel gets an entry point in the module entry_points.append(name + "_cpu_forward") entry_points.append(name + "_cpu_backward") if use_cuda: adj = Adjoint(kernel.func, device='cuda') cuda_source += codegen_kernel(adj, device='cuda') cuda_source += codegen_module(adj, device='cuda') cpp_source += codegen_module_decl(adj, device='cuda') adj = Adjoint(kernel.func, device='cpu') cpp_source += codegen_kernel(adj, device='cpu') cpp_source += codegen_module(adj, device='cpu') cpp_source += codegen_module_decl(adj, device='cpu') include_path = os.path.dirname(os.path.realpath(__file__)) build_path = os.path.dirname(os.path.realpath(__file__)) + "/kernels" cache_file = build_path + "/adjoint.gen" if (os.path.exists(build_path) == False): os.mkdir(build_path) # test cache if (os.path.exists(cache_file)): f = open(cache_file, 'r') cache_string = f.read() f.close() if (cache_string == cpp_source): print("Using cached kernels") module = import_module("kernels", build_path) # register kernel methods for k in user_kernels.values(): k.register(module) return module # print("ignoring rebuild, using stale kernels") # module = import_module("kernels", build_path) # return module # cache stale, rebuild print("Rebuilding kernels") set_build_env() # debug config #module = torch.utils.cpp_extension.load_inline('kernels', [cpp_source], None, entry_points, extra_cflags=["/Zi", "/Od"], extra_ldflags=["/DEBUG"], build_directory=build_path, extra_include_paths=[include_path], verbose=True) if os.name == 'nt': cpp_flags = ["/Ox", "-DNDEBUG", "/fp:fast"] ld_flags = ["-DNDEBUG"] # cpp_flags = ["/Zi", "/Od", "/DEBUG"] # ld_flags = ["/DEBUG"] else: cpp_flags = ["-Z", "-O2", "-DNDEBUG"] ld_flags = ["-DNDEBUG"] # just use minimum to ensure compatability cuda_flags = ['-gencode=arch=compute_35,code=compute_35'] # release config if use_cuda: module = torch.utils.cpp_extension.load_inline('kernels', cpp_sources=[cpp_source], cuda_sources=[cuda_source], functions=entry_points, extra_cflags=cpp_flags, extra_ldflags=ld_flags, extra_cuda_cflags=cuda_flags, build_directory=build_path, extra_include_paths=[include_path], verbose=True, with_pytorch_error_handling=False) else: module = torch.utils.cpp_extension.load_inline('kernels', cpp_sources=[cpp_source], cuda_sources=[], functions=entry_points, extra_cflags=cpp_flags, extra_ldflags=ld_flags, extra_cuda_cflags=cuda_flags, build_directory=build_path, extra_include_paths=[include_path], verbose=True, with_pytorch_error_handling=False) # update cache f = open(cache_file, 'w') f.write(cpp_source) f.close() # register kernel methods for k in user_kernels.values(): k.register(module) return module #--------------------------------------------- # Helper functions for launching kernels as Torch ops def check_adapter(l, a): for t in l: if torch.is_tensor(t): assert(t.device.type == a) def check_finite(l): for t in l: if torch.is_tensor(t): assert(t.is_contiguous()) if (torch.isnan(t).any() == True): print(t) assert(torch.isnan(t).any() == False) else: assert(math.isnan(t) == False) def filter_grads(grads): """helper that takes a list of gradient tensors and makes non-outputs None as required by PyTorch when returning from a custom op """ outputs = [] for g in grads: if torch.is_tensor(g) and len(g) > 0: outputs.append(g) else: outputs.append(None) return tuple(outputs) def make_empty(outputs, device): empty = [] for o in outputs: empty.append(torch.FloatTensor().to(device)) return empty def make_contiguous(grads): ret = [] for g in grads: ret.append(g.contiguous()) return ret def copy_params(params): out = [] for p in params: if torch.is_tensor(p): c = p.clone() if c.dtype == torch.float32: c.requires_grad_() out.append(c) else: out.append(p) return out def assert_device(device, inputs): """helper that asserts that all Tensors in inputs reside on the specified device (device should be cpu or cuda). Also checks that dtypes are correct. """ for arg in inputs: if isinstance(arg, torch.Tensor): if (arg.dtype == torch.float64) or (arg.dtype == torch.float16): raise TypeError("Tensor {arg} has invalid dtype {dtype}".format(arg=arg, dtype=arg.dtype)) if device == 'cpu': if arg.is_cuda: # make sure all tensors are on the right device. Can fail silently in the CUDA kernel. raise TypeError("Tensor {arg} is using CUDA but was expected to be on the CPU.".format(arg=arg)) elif torch.device(device).type == 'cuda': #elif device.startswith('cuda'): if not arg.is_cuda: raise TypeError("Tensor {arg} is not on a CUDA device but was expected to be using CUDA.".format(arg=arg)) else: raise ValueError("Device {} is not supported".format(device)) def to_weak_list(s): w = [] for o in s: w.append(weakref.ref(o)) return w def to_strong_list(w): s = [] for o in w: s.append(o()) return s # standalone method to launch a kernel using PyTorch graph (skip custom tape) def launch_torch(func, dim, inputs, outputs, adapter, preserve_output=False, check_grad=False, no_grad=False): num_inputs = len(inputs) num_outputs = len(outputs) # define autograd type class TorchFunc(torch.autograd.Function): @staticmethod def forward(ctx, *args): #local_inputs = args[0:num_inputs] #local_outputs = args[num_inputs:len(args)] # save for backward #ctx.inputs = list(local_inputs) ctx.inputs = args local_outputs = [] for o in outputs: local_outputs.append(torch.zeros_like(o, requires_grad=True)) ctx.outputs = local_outputs # ensure inputs match adapter assert_device(adapter, args) # launch if adapter == 'cpu': func.forward_cpu(*[dim, *args, *ctx.outputs]) elif torch.device(adapter).type == 'cuda': #elif adapter.startswith('cuda'): func.forward_cuda(*[dim, *args, *ctx.outputs]) ret = tuple(ctx.outputs) return ret @staticmethod def backward(ctx, *grads): # ensure grads are contiguous in memory adj_outputs = make_contiguous(grads) # alloc grads adj_inputs = alloc_grads(ctx.inputs, adapter) # if we don't need outputs then make empty tensors to skip the write local_outputs = ctx.outputs # if preserve_output == True: # local_outputs = ctx.outputs # else: # local_outputs = [] # for o in range(num_outputs): # local_outputs.append(torch.FloatTensor().to(adapter)) # print("backward") # print("--------") # print (" inputs") # for i in ctx.inputs: # print(i) # print (" outputs") # for o in ctx.outputs: # print(o) # print (" adj_inputs") # for adj_i in adj_inputs: # print(adj_i) # print (" adj_outputs") # for adj_o in adj_outputs: # print(adj_o) # launch if adapter == 'cpu': func.backward_cpu(*[dim, *ctx.inputs, *local_outputs, *adj_inputs, *adj_outputs]) elif torch.device(adapter).type == 'cuda': #elif adapter.startswith('cuda'): func.backward_cuda(*[dim, *ctx.inputs, *local_outputs, *adj_inputs, *adj_outputs]) # filter grads replaces empty tensors / constant params with None ret = list(filter_grads(adj_inputs)) for i in range(num_outputs): ret.append(None) return tuple(ret) # run params = [*inputs] torch.set_printoptions(edgeitems=3) if (check_grad == True and no_grad == False): try: torch.autograd.gradcheck(TorchFunc.apply, params, eps=1e-2, atol=1e-3, rtol=1.e-3, raise_exception=True) except Exception as e: print(str(func.func.__name__) + " failed: " + str(e)) output = TorchFunc.apply(*params) return output class Tape: def __init__(self): self.launches = [] # dictionary mapping Tensor inputs to their adjoint self.adjoints = {} def launch(self, func, dim, inputs, outputs, adapter, preserve_output=False, skip_check_grad=False): if (dim > 0): # run kernel if adapter == 'cpu': func.forward_cpu(*[dim, *inputs, *outputs]) elif torch.device(adapter).type == 'cuda': #adapter.startswith('cuda'): func.forward_cuda(*[dim, *inputs, *outputs]) if dflex.config.verify_fp: check_adapter(inputs, adapter) check_adapter(outputs, adapter) check_finite(inputs) check_finite(outputs) # record launch if dflex.config.no_grad == False: self.launches.append([func, dim, inputs, outputs, adapter, preserve_output]) # optionally run grad check if dflex.config.check_grad == True and skip_check_grad == False: # copy inputs and outputs to avoid disturbing the computational graph inputs_copy = copy_params(inputs) outputs_copy = copy_params(outputs) launch_torch(func, dim, inputs_copy, outputs_copy, adapter, preserve_output, check_grad=True) def replay(self): for kernel in reversed(self.launches): func = kernel[0] dim = kernel[1] inputs = kernel[2] #outputs = to_strong_list(kernel[3]) outputs = kernel[3] adapter = kernel[4] # lookup adj_inputs adj_inputs = [] adj_outputs = [] # build input adjoints for i in inputs: if i in self.adjoints: adj_inputs.append(self.adjoints[i]) else: if torch.is_tensor(i): adj_inputs.append(self.alloc_grad(i)) else: adj_inputs.append(type(i)()) # build output adjoints for o in outputs: if o in self.adjoints: adj_outputs.append(self.adjoints[o]) else: # no output adjoint means the output wasn't used in the loss function so # allocate a zero tensor (they will still be read by the kernels) adj_outputs.append(self.alloc_grad(o)) # launch reverse if adapter == 'cpu': func.backward_cpu(*[dim, *inputs, *outputs, *adj_inputs, *adj_outputs]) elif torch.device(adapter).type == 'cuda': #elif adapter.startswith('cuda'): func.backward_cuda(*[dim, *inputs, *outputs, *adj_inputs, *adj_outputs]) if dflex.config.verify_fp: check_finite(inputs) check_finite(outputs) check_finite(adj_inputs) check_finite(adj_outputs) def reset(self): self.adjoints = {} self.launches = [] def alloc_grad(self, t): if t.dtype == torch.float32 and t.requires_grad: # zero tensor self.adjoints[t] = torch.zeros_like(t) return self.adjoints[t] else: # null tensor return torch.FloatTensor().to(t.device) # helper that given a set of inputs, will generate a set of output grad buffers def alloc_grads(inputs, adapter): """helper that generates output grad buffers for a set of inputs on the specified device. Args: inputs (iterable of Tensors, other literals): list of Tensors to generate gradient buffers for. Non-tensors are ignored. adapter (str, optional): name of torch device for storage location of allocated gradient buffers. Defaults to 'cpu'. """ grads = [] for arg in inputs: if (torch.is_tensor(arg)): if (arg.requires_grad and arg.dtype == torch.float): grads.append(torch.zeros_like(arg, device=adapter)) #grads.append(lookup_grad(arg)) else: grads.append(torch.FloatTensor().to(adapter)) else: grads.append(type(arg)()) return grads def matmul(tape, m, n, k, t1, t2, A, B, C, adapter): if (adapter == 'cpu'): threads = 1 else: threads = 256 # should match the threadblock size tape.launch( func=dflex.eval_dense_gemm, dim=threads, inputs=[ m, n, k, t1, t2, A, B, ], outputs=[ C ], adapter=adapter, preserve_output=False) def matmul_batched(tape, batch_count, m, n, k, t1, t2, A_start, B_start, C_start, A, B, C, adapter): if (adapter == 'cpu'): threads = batch_count else: threads = 256*batch_count # must match the threadblock size used in adjoint.py tape.launch( func=dflex.eval_dense_gemm_batched, dim=threads, inputs=[ m, n, k, t1, t2, A_start, B_start, C_start, A, B, ], outputs=[ C ], adapter=adapter, preserve_output=False)
61,332
Python
25.68973
229
0.535283
RoboticExplorationLab/CGAC/dflex/docs/index.rst
Welcome to dFlex's documentation! ================================== dFlex is a differentiable multiphysics engine for PyTorch. It is written entirely in Python and supports reverse mode differentiation w.r.t. to any simulation inputs. It includes a USD-based visualization module (:class:`dflex.render`), which can generate time-sampled USD files, or update an existing stage on-the-fly. Prerequisites ------------- * Python 3.6 * PyTorch 1.4.0 or higher * Pixar USD lib (for visualization) Pre-built USD Python libraries can be downloaded from https://developer.nvidia.com/usd, once they are downloaded you should follow the instructions to add them to your PYTHONPATH environment variable. .. toctree:: :maxdepth: 3 :caption: Contents: modules/model modules/sim modules/render Quick Start ----------------- First ensure that the package is installed in your local Python environment (use the -e option if you will be doing development): .. code-block:: pip install -e dflex Then, to use the engine you can import the simulation module as follows: .. code-block:: import dflex To build physical models there is a helper class available in :class:`dflex.model.ModelBuilder`. This can be used to create models programmatically from Python. For example, to create a chain of particles: .. code-block:: builder = dflex.model.ModelBuilder() # anchor point (zero mass) builder.add_particle((0, 1.0, 0.0), (0.0, 0.0, 0.0), 0.0) # build chain for i in range(1,10): builder.add_particle((i, 1.0, 0.0), (0.0, 0.0, 0.0), 1.0) builder.add_spring(i-1, i, 1.e+3, 0.0, 0) # add ground plane builder.add_shape_plane((0.0, 1.0, 0.0, 0.0), 0) Once you have built your model you must convert it to a finalized PyTorch simulation data structure using :func:`dflex.model.ModelBuilder.finalize()`: .. code-block:: model = builder.finalize('cpu') The model object represents static (non-time varying) data such as constraints, collision shapes, etc. The model is stored in PyTorch tensors, allowing differentiation with respect to both model and state. Time Stepping ------------- To advance the simulation forward in time (forward dynamics), we use an `integrator` object. dFlex currently offers semi-implicit and fully implicit (planned), via. the :class:`dflex.sim.SemiImplicitIntegrator` class as follows: .. code-block:: sim_dt = 1.0/60.0 sim_steps = 100 integrator = dflex.sim.SemiImplicitIntegrator() for i in range(0, sim_steps): state = integrator.forward(model, state, sim_dt) Rendering --------- To visualize the scene dFlex supports a USD-based update via. the :class:`dflex.render.UsdRenderer` class. To create a renderer you must first create the USD stage, and the physical model. .. code-block:: import dflex.render stage = Usd.Stage.CreateNew("test.usda") renderer = dflex.render.UsdRenderer(model, stage) renderer.draw_points = True renderer.draw_springs = True renderer.draw_shapes = True Each frame the renderer should be updated with the current model state and the current elapsed simulation time: .. code-block:: renderer.update(state, sim_time) Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search`
3,311
reStructuredText
27.8
228
0.700393
RoboticExplorationLab/CGAC/dflex/docs/conf.py
# Configuration file for the Sphinx documentation builder. # # This file only contains a selection of the most common options. For a full # list see the documentation: # https://www.sphinx-doc.org/en/master/usage/configuration.html # -- Path setup -------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # import os import sys sys.path.insert(0, os.path.abspath('../dflex')) # -- Project information ----------------------------------------------------- project = 'dFlex' copyright = '2020, NVIDIA' author = 'NVIDIA' # -- General configuration --------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.napoleon', 'sphinx.ext.intersphinx', # 'sphinx.ext.autosummary', 'sphinx.ext.todo', 'autodocsumm' ] # put type hints inside the description instead of the signature (easier to read) autodoc_typehints = 'description' # document class *and* __init__ methods autoclass_content = 'both' # todo_include_todos = True intersphinx_mapping = { 'python': ("https://docs.python.org/3", None), 'numpy': ('http://docs.scipy.org/doc/numpy/', None), 'PyTorch': ('http://pytorch.org/docs/master/', None), } # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # import sphinx_rtd_theme html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] html_theme = "sphinx_rtd_theme" # html_theme = 'alabaster' # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = []
2,515
Python
32.105263
81
0.659245
RoboticExplorationLab/CGAC/dflex/docs/modules/sim.rst
dflex.sim =========== .. currentmodule:: dflex.sim .. toctree:: :maxdepth: 2 .. automodule:: dflex.sim :members: :undoc-members: :show-inheritance:
171
reStructuredText
12.230768
28
0.567251
RoboticExplorationLab/CGAC/dflex/docs/modules/model.rst
dflex.model =========== .. currentmodule:: dflex.model .. toctree:: :maxdepth: 2 model.modelbuilder model.model model.state
151
reStructuredText
10.692307
30
0.569536
RoboticExplorationLab/CGAC/dflex/docs/modules/model.model.rst
dflex.model.Model ======================== .. autoclasssumm:: dflex.model.Model .. autoclass:: dflex.model.Model :members: :undoc-members: :show-inheritance:
173
reStructuredText
14.81818
36
0.583815
RoboticExplorationLab/CGAC/dflex/docs/modules/render.rst
dflex.render ============ .. currentmodule:: dflex.render .. toctree:: :maxdepth: 2 .. automodule:: dflex.render :members: :undoc-members: :show-inheritance:
178
reStructuredText
11.785713
31
0.595506
RoboticExplorationLab/CGAC/dflex/docs/modules/model.state.rst
dflex.model.State ======================== .. autoclasssumm:: dflex.model.State .. autoclass:: dflex.model.State :members: :undoc-members: :show-inheritance:
173
reStructuredText
14.81818
36
0.583815
RoboticExplorationLab/CGAC/dflex/docs/modules/model.modelbuilder.rst
dflex.model.ModelBuilder ======================== .. autoclasssumm:: dflex.model.ModelBuilder .. autoclass:: dflex.model.ModelBuilder :members: :undoc-members: :show-inheritance:
194
reStructuredText
16.727271
43
0.628866
RoboticExplorationLab/CGAC/utils/common.py
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. import sys # if there's overlap between args_list and commandline input, use commandline input def solve_argv_conflict(args_list): arguments_to_be_removed = [] arguments_size = [] for argv in sys.argv[1:]: if argv.startswith('-'): size_count = 1 for i, args in enumerate(args_list): if args == argv: arguments_to_be_removed.append(args) for more_args in args_list[i+1:]: if not more_args.startswith('-'): size_count += 1 else: break arguments_size.append(size_count) break for args, size in zip(arguments_to_be_removed, arguments_size): args_index = args_list.index(args) for _ in range(size): args_list.pop(args_index) def print_error(*message): print('\033[91m', 'ERROR ', *message, '\033[0m') raise RuntimeError def print_ok(*message): print('\033[92m', *message, '\033[0m') def print_warning(*message): print('\033[93m', *message, '\033[0m') def print_info(*message): print('\033[96m', *message, '\033[0m') from datetime import datetime def get_time_stamp(): now = datetime.now() year = now.strftime('%Y') month = now.strftime('%m') day = now.strftime('%d') hour = now.strftime('%H') minute = now.strftime('%M') second = now.strftime('%S') return '{}-{}-{}-{}-{}-{}'.format(month, day, year, hour, minute, second) import argparse def parse_model_args(model_args_path): fp = open(model_args_path, 'r') model_args = eval(fp.read()) model_args = argparse.Namespace(**model_args) return model_args import torch import numpy as np import random import os def seeding(seed=0, torch_deterministic=False): print("Setting seed: {}".format(seed)) random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed) if torch_deterministic: # refer to https://docs.nvidia.com/cuda/cublas/index.html#cublasApi_reproducibility os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8' torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True torch.use_deterministic_algorithms(True) else: torch.backends.cudnn.benchmark = True torch.backends.cudnn.deterministic = False return seed
2,965
Python
31.23913
91
0.629005
RoboticExplorationLab/CGAC/utils/torch_utils.py
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. import timeit import math import numpy as np import gc import torch import cProfile log_output = "" def log(s): print(s) global log_output log_output = log_output + s + "\n" # short hands # torch quat/vector utils def to_torch(x, dtype=torch.float, device='cuda:0', requires_grad=False): return torch.tensor(x, dtype=dtype, device=device, requires_grad=requires_grad) @torch.jit.script def quat_mul(a, b): assert a.shape == b.shape shape = a.shape a = a.reshape(-1, 4) b = b.reshape(-1, 4) x1, y1, z1, w1 = a[:, 0], a[:, 1], a[:, 2], a[:, 3] x2, y2, z2, w2 = b[:, 0], b[:, 1], b[:, 2], b[:, 3] ww = (z1 + x1) * (x2 + y2) yy = (w1 - y1) * (w2 + z2) zz = (w1 + y1) * (w2 - z2) xx = ww + yy + zz qq = 0.5 * (xx + (z1 - x1) * (x2 - y2)) w = qq - ww + (z1 - y1) * (y2 - z2) x = qq - xx + (x1 + w1) * (x2 + w2) y = qq - yy + (w1 - x1) * (y2 + z2) z = qq - zz + (z1 + y1) * (w2 - x2) quat = torch.stack([x, y, z, w], dim=-1).view(shape) return quat @torch.jit.script def normalize(x, eps: float = 1e-9): return x / x.norm(p=2, dim=-1).clamp(min=eps, max=None).unsqueeze(-1) @torch.jit.script def quat_apply(a, b): shape = b.shape a = a.reshape(-1, 4) b = b.reshape(-1, 3) xyz = a[:, :3] t = xyz.cross(b, dim=-1) * 2 return (b + a[:, 3:] * t + xyz.cross(t, dim=-1)).view(shape) @torch.jit.script def quat_rotate(q, v): shape = q.shape q_w = q[:, -1] q_vec = q[:, :3] a = v * (2.0 * q_w ** 2 - 1.0).unsqueeze(-1) b = torch.cross(q_vec, v, dim=-1) * q_w.unsqueeze(-1) * 2.0 c = q_vec * \ torch.bmm(q_vec.view(shape[0], 1, 3), v.view( shape[0], 3, 1)).squeeze(-1) * 2.0 return a + b + c @torch.jit.script def quat_rotate_inverse(q, v): shape = q.shape q_w = q[:, -1] q_vec = q[:, :3] a = v * (2.0 * q_w ** 2 - 1.0).unsqueeze(-1) b = torch.cross(q_vec, v, dim=-1) * q_w.unsqueeze(-1) * 2.0 c = q_vec * \ torch.bmm(q_vec.view(shape[0], 1, 3), v.view( shape[0], 3, 1)).squeeze(-1) * 2.0 return a - b + c @torch.jit.script def quat_axis(q, axis=0): # type: (Tensor, int) -> Tensor basis_vec = torch.zeros(q.shape[0], 3, device=q.device) basis_vec[:, axis] = 1 return quat_rotate(q, basis_vec) @torch.jit.script def quat_conjugate(a): shape = a.shape a = a.reshape(-1, 4) return torch.cat((-a[:, :3], a[:, -1:]), dim=-1).view(shape) @torch.jit.script def quat_unit(a): return normalize(a) @torch.jit.script def quat_from_angle_axis(angle, axis): theta = (angle / 2).unsqueeze(-1) xyz = normalize(axis) * theta.sin() w = theta.cos() return quat_unit(torch.cat([xyz, w], dim=-1)) @torch.jit.script def normalize_angle(x): return torch.atan2(torch.sin(x), torch.cos(x)) @torch.jit.script def tf_inverse(q, t): q_inv = quat_conjugate(q) return q_inv, -quat_apply(q_inv, t) @torch.jit.script def tf_apply(q, t, v): return quat_apply(q, v) + t @torch.jit.script def tf_vector(q, v): return quat_apply(q, v) @torch.jit.script def tf_combine(q1, t1, q2, t2): return quat_mul(q1, q2), quat_apply(q1, t2) + t1 @torch.jit.script def get_basis_vector(q, v): return quat_rotate(q, v) def mem_report(): '''Report the memory usage of the tensor.storage in pytorch Both on CPUs and GPUs are reported''' def _mem_report(tensors, mem_type): '''Print the selected tensors of type There are two major storage types in our major concern: - GPU: tensors transferred to CUDA devices - CPU: tensors remaining on the system memory (usually unimportant) Args: - tensors: the tensors of specified type - mem_type: 'CPU' or 'GPU' in current implementation ''' total_numel = 0 total_mem = 0 visited_data = [] for tensor in tensors: if tensor.is_sparse: continue # a data_ptr indicates a memory block allocated data_ptr = tensor.storage().data_ptr() if data_ptr in visited_data: continue visited_data.append(data_ptr) numel = tensor.storage().size() total_numel += numel element_size = tensor.storage().element_size() mem = numel*element_size /1024/1024 # 32bit=4Byte, MByte total_mem += mem element_type = type(tensor).__name__ size = tuple(tensor.size()) # print('%s\t\t%s\t\t%.2f' % ( # element_type, # size, # mem) ) print('Type: %s Total Tensors: %d \tUsed Memory Space: %.2f MBytes' % (mem_type, total_numel, total_mem) ) gc.collect() LEN = 65 objects = gc.get_objects() #print('%s\t%s\t\t\t%s' %('Element type', 'Size', 'Used MEM(MBytes)') ) tensors = [obj for obj in objects if torch.is_tensor(obj)] cuda_tensors = [t for t in tensors if t.is_cuda] host_tensors = [t for t in tensors if not t.is_cuda] _mem_report(cuda_tensors, 'GPU') _mem_report(host_tensors, 'CPU') print('='*LEN) def grad_norm(params): grad_norm = 0. for p in params: if p.grad is not None: grad_norm += torch.sum(p.grad ** 2) return torch.sqrt(grad_norm) def print_leaf_nodes(grad_fn, id_set): if grad_fn is None: return if hasattr(grad_fn, 'variable'): mem_id = id(grad_fn.variable) if not(mem_id in id_set): print('is leaf:', grad_fn.variable.is_leaf) print(grad_fn.variable) id_set.add(mem_id) # print(grad_fn) for i in range(len(grad_fn.next_functions)): print_leaf_nodes(grad_fn.next_functions[i][0], id_set) def policy_kl(p0_mu, p0_sigma, p1_mu, p1_sigma): c1 = torch.log(p1_sigma/p0_sigma + 1e-5) c2 = (p0_sigma**2 + (p1_mu - p0_mu)**2)/(2.0 * (p1_sigma**2 + 1e-5)) c3 = -1.0 / 2.0 kl = c1 + c2 + c3 kl = kl.sum(dim=-1) # returning mean between all steps of sum between all actions return kl.mean()
6,536
Python
27.176724
114
0.568696
RoboticExplorationLab/CGAC/utils/average_meter.py
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. import torch import torch.nn as nn import numpy as np class AverageMeter(nn.Module): def __init__(self, in_shape, max_size): super(AverageMeter, self).__init__() self.max_size = max_size self.current_size = 0 self.register_buffer("mean", torch.zeros(in_shape, dtype = torch.float32)) def update(self, values): size = values.size()[0] if size == 0: return new_mean = torch.mean(values.float(), dim=0) size = np.clip(size, 0, self.max_size) old_size = min(self.max_size - size, self.current_size) size_sum = old_size + size self.current_size = size_sum self.mean = (self.mean * old_size + new_mean * size) / size_sum def clear(self): self.current_size = 0 self.mean.fill_(0) def __len__(self): return self.current_size def get_mean(self): return self.mean.squeeze(0).cpu().numpy()
1,368
Python
34.102563
82
0.65424
RoboticExplorationLab/CGAC/utils/load_utils.py
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. import urdfpy import math import numpy as np import os import torch import random import xml.etree.ElementTree as ET import dflex as df def set_np_formatting(): np.set_printoptions(edgeitems=30, infstr='inf', linewidth=4000, nanstr='nan', precision=2, suppress=False, threshold=10000, formatter=None) def set_seed(seed, torch_deterministic=False): if seed == -1 and torch_deterministic: seed = 42 elif seed == -1: seed = np.random.randint(0, 10000) print("Setting seed: {}".format(seed)) random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed) if torch_deterministic: # refer to https://docs.nvidia.com/cuda/cublas/index.html#cublasApi_reproducibility os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8' torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True torch.use_deterministic_algorithms(True) else: torch.backends.cudnn.benchmark = True torch.backends.cudnn.deterministic = False return seed def urdf_add_collision(builder, link, collisions, shape_ke, shape_kd, shape_kf, shape_mu): # add geometry for collision in collisions: origin = urdfpy.matrix_to_xyz_rpy(collision.origin) pos = origin[0:3] rot = df.rpy2quat(*origin[3:6]) geo = collision.geometry if (geo.box): builder.add_shape_box( link, pos, rot, geo.box.size[0]*0.5, geo.box.size[1]*0.5, geo.box.size[2]*0.5, ke=shape_ke, kd=shape_kd, kf=shape_kf, mu=shape_mu) if (geo.sphere): builder.add_shape_sphere( link, pos, rot, geo.sphere.radius, ke=shape_ke, kd=shape_kd, kf=shape_kf, mu=shape_mu) if (geo.cylinder): # cylinders in URDF are aligned with z-axis, while dFlex uses x-axis r = df.quat_from_axis_angle((0.0, 1.0, 0.0), math.pi*0.5) builder.add_shape_capsule( link, pos, df.quat_multiply(rot, r), geo.cylinder.radius, geo.cylinder.length*0.5, ke=shape_ke, kd=shape_kd, kf=shape_kf, mu=shape_mu) if (geo.mesh): for m in geo.mesh.meshes: faces = [] vertices = [] for v in m.vertices: vertices.append(np.array(v)) for f in m.faces: faces.append(int(f[0])) faces.append(int(f[1])) faces.append(int(f[2])) mesh = df.Mesh(vertices, faces) builder.add_shape_mesh( link, pos, rot, mesh, ke=shape_ke, kd=shape_kd, kf=shape_kf, mu=shape_mu) def urdf_load( builder, filename, xform, floating=False, armature=0.0, shape_ke=1.e+4, shape_kd=1.e+4, shape_kf=1.e+2, shape_mu=0.25, limit_ke=100.0, limit_kd=1.0): robot = urdfpy.URDF.load(filename) # maps from link name -> link index link_index = {} builder.add_articulation() # add base if (floating): root = builder.add_link(-1, df.transform_identity(), (0,0,0), df.JOINT_FREE) # set dofs to transform start = builder.joint_q_start[root] builder.joint_q[start + 0] = xform[0][0] builder.joint_q[start + 1] = xform[0][1] builder.joint_q[start + 2] = xform[0][2] builder.joint_q[start + 3] = xform[1][0] builder.joint_q[start + 4] = xform[1][1] builder.joint_q[start + 5] = xform[1][2] builder.joint_q[start + 6] = xform[1][3] else: root = builder.add_link(-1, xform, (0,0,0), df.JOINT_FIXED) urdf_add_collision(builder, root, robot.links[0].collisions, shape_ke, shape_kd, shape_kf, shape_mu) link_index[robot.links[0].name] = root # add children for joint in robot.joints: type = None axis = (0.0, 0.0, 0.0) if (joint.joint_type == "revolute" or joint.joint_type == "continuous"): type = df.JOINT_REVOLUTE axis = joint.axis if (joint.joint_type == "prismatic"): type = df.JOINT_PRISMATIC axis = joint.axis if (joint.joint_type == "fixed"): type = df.JOINT_FIXED if (joint.joint_type == "floating"): type = df.JOINT_FREE parent = -1 if joint.parent in link_index: parent = link_index[joint.parent] origin = urdfpy.matrix_to_xyz_rpy(joint.origin) pos = origin[0:3] rot = df.rpy2quat(*origin[3:6]) lower = -1.e+3 upper = 1.e+3 damping = 0.0 # limits if (joint.limit): if (joint.limit.lower != None): lower = joint.limit.lower if (joint.limit.upper != None): upper = joint.limit.upper # damping if (joint.dynamics): if (joint.dynamics.damping): damping = joint.dynamics.damping # add link link = builder.add_link( parent=parent, X_pj=df.transform(pos, rot), axis=axis, type=type, limit_lower=lower, limit_upper=upper, limit_ke=limit_ke, limit_kd=limit_kd, damping=damping) # add collisions urdf_add_collision(builder, link, robot.link_map[joint.child].collisions, shape_ke, shape_kd, shape_kf, shape_mu) # add ourselves to the index link_index[joint.child] = link # build an articulated tree def build_tree( builder, angle, max_depth, width=0.05, length=0.25, density=1000.0, joint_stiffness=0.0, joint_damping=0.0, shape_ke = 1.e+4, shape_kd = 1.e+3, shape_kf = 1.e+2, shape_mu = 0.5, floating=False): def build_recursive(parent, depth): if (depth >= max_depth): return X_pj = df.transform((length * 2.0, 0.0, 0.0), df.quat_from_axis_angle((0.0, 0.0, 1.0), angle)) type = df.JOINT_REVOLUTE axis = (0.0, 0.0, 1.0) if (depth == 0 and floating == True): X_pj = df.transform((0.0, 0.0, 0.0), df.quat_identity()) type = df.JOINT_FREE link = builder.add_link( parent, X_pj, axis, type, stiffness=joint_stiffness, damping=joint_damping) # capsule shape = builder.add_shape_capsule( link, pos=(length, 0.0, 0.0), radius=width, half_width=length, ke=shape_ke, kd=shape_kd, kf=shape_kf, mu=shape_mu) # recurse #build_tree_recursive(builder, link, angle, width, depth + 1, max_depth, shape_ke, shape_kd, shape_kf, shape_mu, floating) build_recursive(link, depth + 1) # build_recursive(-1, 0) # Mujoco file format parser def parse_mjcf( filename, builder, density=1000.0, stiffness=0.0, damping=1.0, contact_ke=1e4, contact_kd=1e4, contact_kf=1e3, contact_mu=0.5, limit_ke=100.0, limit_kd=10.0, armature=0.01, radians=False, load_stiffness=False, load_armature=False): file = ET.parse(filename) root = file.getroot() type_map = { "ball": df.JOINT_BALL, "hinge": df.JOINT_REVOLUTE, "slide": df.JOINT_PRISMATIC, "free": df.JOINT_FREE, "fixed": df.JOINT_FIXED } def parse_float(node, key, default): if key in node.attrib: return float(node.attrib[key]) else: return default def parse_bool(node, key, default): if key in node.attrib: if node.attrib[key] == "true": return True else: return False else: return default def parse_vec(node, key, default): if key in node.attrib: return np.fromstring(node.attrib[key], sep=" ") else: return np.array(default) def parse_body(body, parent, last_joint_pos): body_name = body.attrib["name"] body_pos = np.fromstring(body.attrib["pos"], sep=" ") # last_joint_pos = np.zeros(3) #----------------- # add body for each joint, we assume the joints attached to one body have the same joint_pos for i, joint in enumerate(body.findall("joint")): joint_name = joint.attrib["name"] joint_type = type_map[joint.attrib.get("type", 'hinge')] joint_axis = parse_vec(joint, "axis", (0.0, 0.0, 0.0)) joint_pos = parse_vec(joint, "pos", (0.0, 0.0, 0.0)) joint_limited = parse_bool(joint, "limited", True) if joint_limited: if radians: joint_range = parse_vec(joint, "range", (np.deg2rad(-170.), np.deg2rad(170.))) else: joint_range = np.deg2rad(parse_vec(joint, "range", (-170.0, 170.0))) else: joint_range = np.array([-1.e+6, 1.e+6]) if load_stiffness: joint_stiffness = parse_float(joint, 'stiffness', stiffness) else: joint_stiffness = stiffness joint_damping = parse_float(joint, 'damping', damping) if load_armature: joint_armature = parse_float(joint, "armature", armature) else: joint_armature = armature joint_axis = df.normalize(joint_axis) if (parent == -1): body_pos = np.array((0.0, 0.0, 0.0)) #----------------- # add body link = builder.add_link( parent, X_pj=df.transform(body_pos + joint_pos - last_joint_pos, df.quat_identity()), axis=joint_axis, type=joint_type, limit_lower=joint_range[0], limit_upper=joint_range[1], limit_ke=limit_ke, limit_kd=limit_kd, stiffness=joint_stiffness, damping=joint_damping, armature=joint_armature) # assume that each joint is one body in simulation parent = link body_pos = [0.0, 0.0, 0.0] last_joint_pos = joint_pos #----------------- # add shapes to the last joint in the body for geom in body.findall("geom"): geom_name = geom.attrib["name"] geom_type = geom.attrib["type"] geom_size = parse_vec(geom, "size", [1.0]) geom_pos = parse_vec(geom, "pos", (0.0, 0.0, 0.0)) geom_rot = parse_vec(geom, "quat", (0.0, 0.0, 0.0, 1.0)) if (geom_type == "sphere"): builder.add_shape_sphere( link, pos=geom_pos - last_joint_pos, # position relative to the parent frame rot=geom_rot, radius=geom_size[0], density=density, ke=contact_ke, kd=contact_kd, kf=contact_kf, mu=contact_mu) elif (geom_type == "capsule"): if ("fromto" in geom.attrib): geom_fromto = parse_vec(geom, "fromto", (0.0, 0.0, 0.0, 1.0, 0.0, 0.0)) start = geom_fromto[0:3] end = geom_fromto[3:6] # compute rotation to align dflex capsule (along x-axis), with mjcf fromto direction axis = df.normalize(end-start) angle = math.acos(np.dot(axis, (1.0, 0.0, 0.0))) axis = df.normalize(np.cross(axis, (1.0, 0.0, 0.0))) geom_pos = (start + end)*0.5 geom_rot = df.quat_from_axis_angle(axis, -angle) geom_radius = geom_size[0] geom_width = np.linalg.norm(end-start)*0.5 else: geom_radius = geom_size[0] geom_width = geom_size[1] geom_pos = parse_vec(geom, "pos", (0.0, 0.0, 0.0)) if ("axisangle" in geom.attrib): axis_angle = parse_vec(geom, "axisangle", (0.0, 1.0, 0.0, 0.0)) geom_rot = df.quat_from_axis_angle(axis_angle[0:3], axis_angle[3]) if ("quat" in geom.attrib): q = parse_vec(geom, "quat", df.quat_identity()) geom_rot = q geom_rot = df.quat_multiply(geom_rot, df.quat_from_axis_angle((0.0, 1.0, 0.0), -math.pi*0.5)) builder.add_shape_capsule( link, pos=geom_pos - last_joint_pos, rot=geom_rot, radius=geom_radius, half_width=geom_width, density=density, ke=contact_ke, kd=contact_kd, kf=contact_kf, mu=contact_mu) else: print("Type: " + geom_type + " unsupported") #----------------- # recurse for child in body.findall("body"): parse_body(child, link, last_joint_pos) #----------------- # start articulation builder.add_articulation() world = root.find("worldbody") for body in world.findall("body"): parse_body(body, -1, np.zeros(3)) # SNU file format parser class MuscleUnit: def __init__(self): self.name = "" self.bones = [] self.points = [] self.muscle_strength = 0.0 class Skeleton: def __init__(self, skeleton_file, muscle_file, builder, filter={}, visualize_shapes=True, stiffness=5.0, damping=2.0, contact_ke=5000.0, contact_kd=2000.0, contact_kf=1000.0, contact_mu=0.5, limit_ke=1000.0, limit_kd=10.0, armature = 0.05): self.armature = armature self.stiffness = stiffness self.damping = damping self.contact_ke = contact_ke self.contact_kd = contact_kd self.contact_kf = contact_kf self.limit_ke = limit_ke self.limit_kd = limit_kd self.contact_mu = contact_mu self.visualize_shapes = visualize_shapes self.parse_skeleton(skeleton_file, builder, filter) if muscle_file != None: self.parse_muscles(muscle_file, builder) def parse_skeleton(self, filename, builder, filter): file = ET.parse(filename) root = file.getroot() self.node_map = {} # map node names to link indices self.xform_map = {} # map node names to parent transforms self.mesh_map = {} # map mesh names to link indices objects self.coord_start = len(builder.joint_q) self.dof_start = len(builder.joint_qd) type_map = { "Ball": df.JOINT_BALL, "Revolute": df.JOINT_REVOLUTE, "Prismatic": df.JOINT_PRISMATIC, "Free": df.JOINT_FREE, "Fixed": df.JOINT_FIXED } builder.add_articulation() for child in root: if (child.tag == "Node"): body = child.find("Body") joint = child.find("Joint") name = child.attrib["name"] parent = child.attrib["parent"] parent_X_s = df.transform_identity() if parent in self.node_map: parent_link = self.node_map[parent] parent_X_s = self.xform_map[parent] else: parent_link = -1 body_xform = body.find("Transformation") joint_xform = joint.find("Transformation") body_mesh = body.attrib["obj"] body_size = np.fromstring(body.attrib["size"], sep=" ") body_type = body.attrib["type"] body_mass = float(body.attrib["mass"]) x=body_size[0] y=body_size[1] z=body_size[2] density = body_mass / (x*y*z) max_body_mass = 15.0 mass_scale = body_mass / max_body_mass body_R_s = np.fromstring(body_xform.attrib["linear"], sep=" ").reshape((3,3)) body_t_s = np.fromstring(body_xform.attrib["translation"], sep=" ") joint_R_s = np.fromstring(joint_xform.attrib["linear"], sep=" ").reshape((3,3)) joint_t_s = np.fromstring(joint_xform.attrib["translation"], sep=" ") joint_type = type_map[joint.attrib["type"]] joint_lower = -1.e+3 joint_upper = 1.e+3 if (joint_type == type_map["Revolute"]): if ("lower" in joint.attrib): joint_lower = np.fromstring(joint.attrib["lower"], sep=" ")[0] if ("upper" in joint.attrib): joint_upper = np.fromstring(joint.attrib["upper"], sep=" ")[0] # print(joint_type, joint_lower, joint_upper) if ("axis" in joint.attrib): joint_axis = np.fromstring(joint.attrib["axis"], sep=" ") else: joint_axis = np.array((0.0, 0.0, 0.0)) body_X_s = df.transform(body_t_s, df.quat_from_matrix(body_R_s)) joint_X_s = df.transform(joint_t_s, df.quat_from_matrix(joint_R_s)) mesh_base = os.path.splitext(body_mesh)[0] mesh_file = mesh_base + ".usd" link = -1 if len(filter) == 0 or name in filter: joint_X_p = df.transform_multiply(df.transform_inverse(parent_X_s), joint_X_s) body_X_c = df.transform_multiply(df.transform_inverse(joint_X_s), body_X_s) if (parent_link == -1): joint_X_p = df.transform_identity() # add link link = builder.add_link( parent=parent_link, X_pj=joint_X_p, axis=joint_axis, type=joint_type, limit_lower=joint_lower, limit_upper=joint_upper, limit_ke=self.limit_ke * mass_scale, limit_kd=self.limit_kd * mass_scale, damping=self.damping, stiffness=self.stiffness * math.sqrt(mass_scale), armature=self.armature) # armature=self.armature * math.sqrt(mass_scale)) # add shape shape = builder.add_shape_box( body=link, pos=body_X_c[0], rot=body_X_c[1], hx=x*0.5, hy=y*0.5, hz=z*0.5, density=density, ke=self.contact_ke, kd=self.contact_kd, kf=self.contact_kf, mu=self.contact_mu) # add lookup in name->link map # save parent transform self.xform_map[name] = joint_X_s self.node_map[name] = link self.mesh_map[mesh_base] = link def parse_muscles(self, filename, builder): # list of MuscleUnits muscles = [] file = ET.parse(filename) root = file.getroot() self.muscle_start = len(builder.muscle_activation) for child in root: if (child.tag == "Unit"): unit_name = child.attrib["name"] unit_f0 = float(child.attrib["f0"]) unit_lm = float(child.attrib["lm"]) unit_lt = float(child.attrib["lt"]) unit_lmax = float(child.attrib["lmax"]) unit_pen = float(child.attrib["pen_angle"]) m = MuscleUnit() m.name = unit_name m.muscle_strength = unit_f0 incomplete = False for waypoint in child.iter("Waypoint"): way_bone = waypoint.attrib["body"] way_link = self.node_map[way_bone] way_loc = np.fromstring(waypoint.attrib["p"], sep=" ", dtype=np.float32) if (way_link == -1): incomplete = True break # transform loc to joint local space joint_X_s = self.xform_map[way_bone] way_loc = df.transform_point(df.transform_inverse(joint_X_s), way_loc) m.bones.append(way_link) m.points.append(way_loc) if not incomplete: muscles.append(m) builder.add_muscle(m.bones, m.points, f0=unit_f0, lm=unit_lm, lt=unit_lt, lmax=unit_lmax, pen=unit_pen) self.muscles = muscles
22,759
Python
30.523546
130
0.482622
RoboticExplorationLab/CGAC/utils/dataset.py
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. import numpy as np class CriticDataset: def __init__(self, batch_size, obs, target_values, states_grad=None, states_joint=None, early_term=None, shuffle = False, drop_last = False): self.obs = obs.view(-1, obs.shape[-1]) if states_joint is not None: self.states = states_joint.view(-1, states_joint.shape[-1]) else: self.states = states_joint if states_grad is not None: self.states_grad = states_grad.view(-1, states_grad.shape[-1]) else: self.states_grad = states_grad if early_term is not None: self.early_term = early_term.view(-1) self.target_values = target_values.view(-1) self.batch_size = batch_size if shuffle: self.shuffle() if drop_last: self.length = self.obs.shape[0] // self.batch_size else: self.length = ((self.obs.shape[0] - 1) // self.batch_size) + 1 def shuffle(self): index = np.random.permutation(self.obs.shape[0]) self.obs = self.obs[index, :] self.target_values = self.target_values[index] if self.states is not None: self.states = self.states[index] self.states_grad = self.states_grad[index] self.early_term = self.early_term[index] def __len__(self): return self.length def __getitem__(self, index): start_idx = index * self.batch_size end_idx = min((index + 1) * self.batch_size, self.obs.shape[0]) if self.states is not None: return {'obs': self.obs[start_idx:end_idx, :], 'target_values': self.target_values[start_idx:end_idx], 'states': self.states[start_idx:end_idx], 'states_grad': self.states_grad[start_idx:end_idx], 'early_term': self.early_term[start_idx:end_idx]} else: return {'obs': self.obs[start_idx:end_idx, :], 'target_values': self.target_values[start_idx:end_idx]}
2,391
Python
43.296295
258
0.627771
RoboticExplorationLab/CGAC/utils/time_report.py
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. import time from utils.common import * class Timer: def __init__(self, name): self.name = name self.start_time = None self.time_total = 0. def on(self): assert self.start_time is None, "Timer {} is already turned on!".format(self.name) self.start_time = time.time() def off(self): assert self.start_time is not None, "Timer {} not started yet!".format(self.name) self.time_total += time.time() - self.start_time self.start_time = None def report(self): print_info('Time report [{}]: {:.2f} seconds'.format(self.name, self.time_total)) def clear(self): self.start_time = None self.time_total = 0. class TimeReport: def __init__(self): self.timers = {} def add_timer(self, name): assert name not in self.timers, "Timer {} already exists!".format(name) self.timers[name] = Timer(name = name) def start_timer(self, name): assert name in self.timers, "Timer {} does not exist!".format(name) self.timers[name].on() def end_timer(self, name): assert name in self.timers, "Timer {} does not exist!".format(name) self.timers[name].off() def report(self, name = None): if name is not None: assert name in self.timers, "Timer {} does not exist!".format(name) self.timers[name].report() else: print_info("------------Time Report------------") for timer_name in self.timers.keys(): self.timers[timer_name].report() print_info("-----------------------------------") def clear_timer(self, name = None): if name is not None: assert name in self.timers, "Timer {} does not exist!".format(name) self.timers[name].clear() else: for timer_name in self.timers.keys(): self.timers[timer_name].clear() def pop_timer(self, name = None): if name is not None: assert name in self.timers, "Timer {} does not exist!".format(name) self.timers[name].report() del self.timers[name] else: self.report() self.timers = {}
2,688
Python
34.853333
90
0.58631
RoboticExplorationLab/CGAC/utils/running_mean_std.py
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. from typing import Tuple import torch class RunningMeanStd(object): def __init__(self, epsilon: float = 1e-4, shape: Tuple[int, ...] = (), device = 'cuda:0'): """ Calulates the running mean and std of a data stream https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm :param epsilon: helps with arithmetic issues :param shape: the shape of the data stream's output """ self.mean = torch.zeros(shape, dtype = torch.float32, device = device) self.var = torch.ones(shape, dtype = torch.float32, device = device) self.count = epsilon def to(self, device): rms = RunningMeanStd(device = device) rms.mean = self.mean.to(device).clone() rms.var = self.var.to(device).clone() rms.count = self.count return rms @torch.no_grad() def update(self, arr: torch.tensor) -> None: batch_mean = torch.mean(arr, dim = 0) batch_var = torch.var(arr, dim = 0, unbiased = False) batch_count = arr.shape[0] self.update_from_moments(batch_mean, batch_var, batch_count) def update_from_moments(self, batch_mean: torch.tensor, batch_var: torch.tensor, batch_count: int) -> None: delta = batch_mean - self.mean tot_count = self.count + batch_count new_mean = self.mean + delta * batch_count / tot_count m_a = self.var * self.count m_b = batch_var * batch_count m_2 = m_a + m_b + torch.square(delta) * self.count * batch_count / (self.count + batch_count) new_var = m_2 / (self.count + batch_count) new_count = batch_count + self.count self.mean = new_mean self.var = new_var self.count = new_count def normalize(self, arr:torch.tensor, un_norm = False) -> torch.tensor: if not un_norm: result = (arr - self.mean) / torch.sqrt(self.var + 1e-5) else: result = arr * torch.sqrt(self.var + 1e-5) + self.mean return result
2,462
Python
40.745762
111
0.638099
RoboticExplorationLab/CGAC/cgac/main.py
import sys, os import argparse import datetime import time import numpy as np import itertools project_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) sys.path.append(project_dir) parser = argparse.ArgumentParser(description='PyTorch Soft Actor-Critic Args') parser.add_argument('--env-name', default="SNUHumanoidEnv", choices=["AntEnv", "HumanoidEnv", "SNUHumanoidEnv", "CartPoleSwingUpEnv", "CheetahEnv", "HopperEnv", "AllegroHand"]) parser.add_argument('--policy', default="Gaussian", help='Policy Type: Gaussian | Deterministic (default: Gaussian)') parser.add_argument('--gamma', type=float, default=0.99, metavar='G', help='discount factor for reward (default: 0.99)') parser.add_argument('--tau_value', type=float, default=0.005, metavar='G', help='target smoothing coefficient(τ) (default: 0.005)') parser.add_argument('--tau_policy', type=float, default=0.001) parser.add_argument('--lr', type=float, default=0.002) parser.add_argument('--final_lr', type=float, default=1e-4) parser.add_argument('--alpha', type=float, default=0.1, metavar='G', help='Temperature parameter α determines the relative importance of the entropy term against the reward (default: 0.2)') parser.add_argument('--alpha_final', type=float, default=2e-5) parser.add_argument('--no_automatic_entropy_tuning', action="store_true", help='Automaically adjust α (default: False)') parser.add_argument('--seed', type=int, default=0) parser.add_argument('--batch_size_update', type=int, default=8192*2) parser.add_argument('--num_actors', type=int, default=4096) parser.add_argument('--num_steps', type=int, default=500000001) parser.add_argument('--critic_hidden', nargs='+', type=int, default=[512, 512, 256]) parser.add_argument('--actor_hidden', nargs='+', type=int, default=[512, 256]) parser.add_argument('--critic_act', type=str, default='elu', choices=['elu', 'tanh', 'relu']) parser.add_argument('--actor_act', type=str, default='elu', choices=['elu', 'tanh', 'relu']) parser.add_argument('--num_critic_updates', type=int, default=2) parser.add_argument('--val_horizon', type=int, default=32) parser.add_argument('--num_steps_buffer', type=int, default=32) parser.add_argument('--betas', nargs='+', type=float, default=[0.9, 0.999]) parser.add_argument('--lam', type=float, default=0.95) parser.add_argument('--no_const_std', action='store_true') parser.add_argument('--grad_norm', type=float, default=20) parser.add_argument('--actor_grad_norm', type=float, default=4) parser.add_argument('--clip_actor_gn', action='store_true') parser.add_argument('--max_updates', type=int, default=20000) parser.add_argument('--lr_schedule', type=str, default='linear', choices=['linear', 'decay', 'constant']) parser.add_argument('--alpha_schedule', type=str, default='linear', choices=['linear', 'decay', 'constant']) parser.add_argument('--final_targ_ent_coeff', type=float, default=3.5) parser.add_argument('--init_targ_ent_coeff', type=float, default=0.2) parser.add_argument('--peak_expected_reward', type=float, default=7.5) parser.add_argument('--init_expected_reward', type=float, default=1.5) parser.add_argument('--critic_method', type=str, default='gae-return', choices=['gae-return', 'td-lambda', 'one-step']) parser.add_argument('--episode_length', type=int, default=1000) parser.add_argument('--no_stochastic_init', action='store_true') parser.add_argument('--policy_clip', action='store_true') parser.add_argument('--cuda', action="store_true") parser.add_argument('--target_update_interval', type=int, default=1, metavar='N', help='Value target update per no. of updates per step (default: 1)') parser.add_argument('--id', type=str, default='0') parser.add_argument('--desc', type=str, default='') parser.add_argument('--final', action='store_true') parser.add_argument('--start_steps', type=int, default=80000, metavar='N', help='Steps sampling random actions (default: 10000)') parser.add_argument('--replay_size', type=int, default=1000000) parser.add_argument('--test', action="store_true") parser.add_argument('--eval', type=bool, default=True, help='Evaluates a policy a policy every 10 episode (default: True)') parser.add_argument('--on_policy_update', action='store_true') parser.add_argument('--reduction_rate_updates', type=int, default=8000) # parser.add_argument('--num_steps_buffer', type=int, default=32) parser.add_argument('--max_updates_alpha', type=int, default=8000) parser.add_argument('--decay_steps', type=int, default=2000) parser.add_argument('--lr_update_freq', type=int, default=1000) parser.add_argument('--lr_decay_rate', type=float, default=0.75) args = parser.parse_args() args.automatic_entropy_tuning = not args.no_automatic_entropy_tuning args.batch_size = args.num_actors args.critic_lr = args.lr args.actor_lr = args.lr args.alpha_lr = args.lr*10 args.alpha_init = args.alpha*4 # args.alpha_final = args.alpha/4 args.max_rand_resets = args.batch_size//args.episode_length args.horizon_shrink_steps = 12000/args.num_steps_buffer args.const_std = not args.no_const_std args.no_grad_train = not args.grad_train args.stochastic_init = not args.no_stochastic_init device_str = "cuda:0" if args.cuda else "cpu" device = args.device = device_str if args.env_name=='AllegroHand': rl_games_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../rl_games/')) sys.path.append(rl_games_dir) import isaacgym import isaacgymenvs import torch env = isaacgymenvs.make( args.seed, args.env_name, args.batch_size, device_str, device_str, graphics_device_id=0, headless=True) env.actions = torch.zeros((env.num_envs, env.num_actions), device=device, dtype=torch.float) args.episode_length = 600 args.max_rand_resets = 0 else: import dflex as df import envs import torch from utils.common import * if args.env_name == "HumanoidEnv": args.MM_caching_frequency = 48 elif args.env_name == "SNUHumanoidEnv": args.MM_caching_frequency = 8 elif args.env_name == "AntEnv": args.MM_caching_frequency = 16 seeding(args.seed) env_fn = getattr(envs, args.env_name) env = env_fn(num_envs = args.batch_size, \ device = args.device, \ render = False, \ seed = args.seed, \ episode_length=args.episode_length, \ stochastic_init = args.stochastic_init, \ MM_caching_frequency = args.MM_caching_frequency, \ no_grad=args.no_grad_train) from cgac.cgac import CGAC from torch.utils.tensorboard import SummaryWriter from replay_memory import VectorizedReplayBufferIsaacSAC from utils_cgac import * from utils.common import * seeding(args.seed) memory_cgac = VectorizedReplayBufferIsaacSAC(env.obs_space.shape, env.act_space.shape, args.batch_size, args.num_steps_buffer, args.device, gamma=args.gamma, lam=args.lam, horizon=args.val_horizon, critic_method=args.critic_method) agent = CGAC(env.obs_space.shape[0], env.act_space, args, memory_cgac, env) if not args.test: save_dir = f'runs/hp/{args.env_name}/RL_final/tauval{args.tau_value}pi{args.tau_policy}_{args.actor_act}{args.actor_hidden}_\ {args.critic_act}{args.critic_hidden}_actors{args.num_actors}bszup{args.batch_size_update}_alphaautolin{int(args.final_targ_ent_coeff)}_\ {args.alpha}fin{args.alpha_final}_criticups{args.num_critic_updates}_bufs{args.num_steps_buffer}h{args.val_horizon}_seed{args.seed}_\ piclip{args.policy_clip}_{args.desc}' print('save_dir : ', save_dir) writer = SummaryWriter(save_dir) if args.env_name=='AllegroHand': RL_update_func = agent.update_parameters_and_collect_buffer_RL_isaac else: RL_update_func = agent.update_parameters_and_collect_buffer_RL # Training Loop total_numsteps = 0 updates = 0 total_updates = 0 num_episodes = 0 episode_steps = np.zeros(args.num_actors) episode_rewards = torch.zeros(args.num_actors).to(device) total_episode_reward_hist = [] episode_len_hist = [] agent.times = [] last = time.time() start_time = time.time() for i_episode in itertools.count(1): critic_1_grad_state_loss = None critic_1_loss, critic_2_loss, policy_loss, ent_loss, alpha, log_pi, min_qf_pi, next_values, \ qf_loss_batch, actor_grad_norm, critic_grad_norm, = RL_update_func(memory_cgac, args.batch_size, updates) if not args.test and not args.final: writer.add_scalar('loss/critic_1', critic_1_loss, updates) writer.add_scalar('loss/critic_2', critic_2_loss, updates) writer.add_scalar('loss/policy', policy_loss, updates) writer.add_scalar('loss/entropy_loss', ent_loss, updates) writer.add_scalar('loss/log_pi', log_pi, updates) writer.add_scalar('loss/min_qf_pi', min_qf_pi, updates) writer.add_scalar('entropy_temprature/alpha', alpha, updates) writer.add_scalar('entropy_temprature/actor_grad_norm', actor_grad_norm, updates) writer.add_scalar('entropy_temprature/critic_grad_norm', critic_grad_norm, updates) writer.add_scalar('losses/val_targ_max', next_values.max().item(), updates) writer.add_scalar('losses/val_targ_mean', next_values.mean().item(), updates) writer.add_scalar('losses/val_targ_min', next_values.min().item(), updates) writer.add_scalar('losses/loss_qf_max', qf_loss_batch.max().item(), updates) writer.add_scalar('losses/loss_qf_mean', qf_loss_batch.mean().item(), updates) writer.add_scalar('losses/loss_qf_min', qf_loss_batch.min().item(), updates) writer.add_scalar('losses/loss_qf_median', qf_loss_batch.median().item(), updates) writer.add_scalar('entropy_temprature/num_nans', agent.num_nans, updates) writer.add_scalar('agent_rewards/rew1', agent.episode_rewards[0], i_episode) writer.add_scalar('agent_rewards/rew10', agent.episode_rewards[10], i_episode) writer.add_scalar('agent_rewards/rew100', agent.episode_rewards[20], i_episode) writer.add_scalar('agent_rewards/rew1k', agent.episode_rewards[40], i_episode) writer.add_scalar('agent_rewards/eplenavg', agent.env_train.progress_buf.clone().float().mean().item(), i_episode) writer.add_scalar('agent_done_stats/done_len', agent.len_dones, i_episode) writer.add_scalar('agent_done_stats/queue_len', agent.len_queue, i_episode) writer.add_scalar('agent_done_stats/env_thres', agent.env_thres, i_episode) writer.add_scalar('agent_done_stats/memory_cgac_horizon', memory_cgac.h, i_episode) writer.add_scalar('agent_done_stats/sum_episode_wts', (1-agent.episode_wts).sum(), i_episode) writer.add_scalar('agent_done_stats/agent_curr_rew', agent.reward_batch_curr, i_episode) updates += 1 total_updates += args.num_critic_updates total_numsteps = updates*args.batch_size if total_numsteps > args.num_steps: break if i_episode%100 == 0: time_taken = time.time() - last last = time.time() total_time = time.time() - start_time if len(agent.total_episode_reward_hist)>0: num_test_avg = 100 if not args.test: writer.add_scalar('reward/train', np.array(agent.total_episode_reward_hist[-num_test_avg:]).mean(), i_episode) writer.add_scalar('reward/train_recent', agent.total_episode_reward_hist[-1], i_episode) writer.add_scalar('reward/ep_len', np.array(agent.episode_len_hist[-num_test_avg:]).mean(), i_episode) writer.add_scalar('reward/rew_eplen', np.array(agent.total_episode_reward_hist[-num_test_avg:]).mean()/np.array(agent.episode_len_hist[-num_test_avg:]).mean(), i_episode) writer.add_scalar('reward/train_agent', np.array(agent.total_episode_reward_hist[-num_test_avg:]).mean(), i_episode) writer.add_scalar('reward/ep_len_agent', np.array(agent.episode_len_hist[-num_test_avg:]).mean(), i_episode) writer.add_scalar('reward/rew_eplen_agent', np.array(agent.total_episode_reward_hist[-num_test_avg:]).mean()/np.array(agent.episode_len_hist[-num_test_avg:]).mean(), i_episode) writer.add_scalar('reward/train_time', np.array(agent.total_episode_reward_hist[-num_test_avg:]).mean(), total_time) writer.add_scalar('reward/ep_len_time', np.array(agent.episode_len_hist[-num_test_avg:]).mean(), total_time) writer.add_scalar('reward/train_steps', np.array(agent.total_episode_reward_hist[-num_test_avg:]).mean(), total_numsteps) writer.add_scalar('reward/ep_len_steps', np.array(agent.episode_len_hist[-num_test_avg:]).mean(), total_numsteps) # writer.add_scalar('variance/snr_q', agent.snr_q, total_numsteps) # writer.add_scalar('variance/snr_q_iter', agent.snr_q, i_episode) # writer.add_scalar('variance/ppo_snr', agent.ppo_snr, total_numsteps) # writer.add_scalar('variance/ppo_snr_iter', agent.ppo_snr, i_episode) # writer.add_scalar('variance/ppo_snr_adv', agent.ppo_snr_adv, total_numsteps) # writer.add_scalar('variance/ppo_snr_adv_iter', agent.ppo_snr_adv, i_episode) print(f"iters: {i_episode}, total numsteps: {total_numsteps}, num ep: {agent.num_episodes}, episode steps: {np.array(agent.episode_len_hist[-num_test_avg:]).mean()}, reward: {np.array(agent.total_episode_reward_hist[-num_test_avg:]).mean()}, progress_buf: {env.progress_buf.min()}, time: {time_taken}, lr: {agent.lr}, num_nans: {agent.num_nans}") else: print(f"iters: {i_episode}, total numsteps: {total_numsteps}, num ep: {agent.num_episodes}, progress_buf: {env.progress_buf.min()}, time: {time_taken}, lr: {agent.lr}, num_nans: {agent.num_nans}") print(agent.alpha) env.close()
13,888
Python
57.851695
358
0.682316
RoboticExplorationLab/CGAC/cgac/replay_memory.py
import random import numpy as np import torch import ipdb class VectorizedReplayBufferIsaacSAC: def __init__(self, obs_shape, action_shape, batch_size, num_steps, device, gamma=0.99, lam=0.95, horizon=200000, critic_method='td-lambda'):# """Create Vectorized Replay buffer. Parameters ---------- size: int Max number of transitions to store in the buffer. When the buffer overflows the old memories are dropped. See Also -------- ReplayBuffer.__init__ """ self.device = device self.out_device = device self.batch_size = batch_size self.num_steps = num_steps self.critic_method = critic_method capacity = batch_size*num_steps self.lam = lam self.gamma = gamma self.h = min(horizon, num_steps) self.minhorizon = 1 self.actions = torch.empty((num_steps, batch_size, *action_shape), dtype=torch.float32, device=self.device) self.rewards = torch.empty((num_steps, batch_size, 1), dtype=torch.float32, device=self.device) self.masks = torch.empty((num_steps, batch_size, 1), dtype=torch.float32, device=self.device) self.term_masks = torch.empty((num_steps, batch_size, 1), dtype=torch.float32, device=self.device) self.next_values = torch.empty((num_steps, batch_size, 1), dtype=torch.float32, device=self.device) self.next_state_log_pi = torch.empty((num_steps, batch_size, 1), dtype=torch.float32, device=self.device) self.states = torch.empty((num_steps, batch_size, *obs_shape), dtype=torch.float32, device=self.device) self.episode_wts = torch.empty((num_steps, batch_size, 1), dtype=torch.float32, device=self.device) self.val_cur = torch.empty((num_steps, batch_size, 1), dtype=torch.float32, device=self.device) self.gamma_k = torch.ones((num_steps, batch_size, 1), dtype=torch.float32, device=self.device) self.sigmar = torch.zeros((num_steps, batch_size, 1), dtype=torch.float32, device=self.device) self.Vt_new = torch.zeros((num_steps, batch_size, 1), dtype=torch.float32, device=self.device) self.lam_t = torch.ones((num_steps, batch_size, 1), dtype=torch.float32, device=self.device) self.Vt_out = torch.zeros((num_steps, batch_size, 1), dtype=torch.float32, device=self.device) self.mask_t = torch.ones((num_steps, batch_size, 1), dtype=torch.float32, device=self.device) self.sigmaG = torch.zeros((num_steps, batch_size, 1), dtype=torch.float32, device=self.device) self.G_prev = torch.zeros((num_steps, batch_size, 1), dtype=torch.float32, device=self.device) self.gamma_lam_k = torch.zeros((num_steps, batch_size, 1), dtype=torch.float32, device=self.device) self.sample_start_idxs = torch.zeros((batch_size,), dtype=torch.float32, device=self.device) self.num_episodes_passed = torch.zeros((batch_size,), dtype=torch.float32, device=self.device) self.capacity = capacity self.idx = 0 self.full = False @torch.no_grad() def add(self, states, actions, rewards, next_q_value, masks, term_masks, next_state_log_pi, alpha, episode_wts, updates): num_observations = self.batch_size if self.idx >= self.num_steps: self.full = True self.idx = 0 self.actions[self.idx, :] = actions self.states[self.idx, :] = states self.rewards[self.idx, :] = rewards self.masks[self.idx, :] = masks self.term_masks[self.idx, :] = term_masks self.next_values[self.idx, :] = next_q_value self.next_state_log_pi[self.idx, :] = next_state_log_pi self.episode_wts[self.idx, :] = episode_wts self.alpha = alpha self.compute_target_values() self.idx += 1 @torch.no_grad() def sample(self, batch_size): """Sample a batch of experiences. Parameters ---------- batch_size: int How many transitions to sample. Returns ------- obses: torch tensor batch of observations actions: torch tensor batch of actions executed given obs rewards: torch tensor rewards received as results of executing act_batch next_obses: torch tensor next set of observations seen after executing act_batch not_dones: torch tensor inverse of whether the episode ended at this tuple of (observation, action) or not not_dones_no_max: torch tensor inverse of whether the episode ended at this tuple of (observation, action) or not, specifically exlcuding maximum episode steps """ idxs_steps = torch.randint(0, self.num_steps if self.full else self.idx, (batch_size,), device=self.device) idxs_bsz = torch.randint(0, self.batch_size, (batch_size,), device=self.device) states = self.states[idxs_steps, idxs_bsz] actions = self.actions[idxs_steps, idxs_bsz] rewards = self.rewards[idxs_steps, idxs_bsz] masks = self.masks[idxs_steps, idxs_bsz] target_val = self.Vt_out[idxs_steps, idxs_bsz] episode_wts = self.episode_wts[idxs_steps, idxs_bsz] return states, actions, rewards, target_val, masks, episode_wts def __len__(self): return self.idx @torch.no_grad() def compute_target_values(self): if self.critic_method == 'one-step': self.Vt_out[self.idx] = self.rewards[self.idx] + self.gamma * self.next_values[self.idx] * self.masks[self.idx] elif self.critic_method == 'td-lambda': if self.full: if self.idx>=self.h-1: start_idx = self.idx - self.h + 1 else: start_idx = 0 end_idx = self.idx - self.h + 1 mask = self.masks[self.idx] self.sigmar[end_idx:] = self.sigmar[end_idx:] + self.gamma_k[end_idx:]*self.rewards[self.idx].unsqueeze(0) G_new = self.sigmar[end_idx:] + self.gamma*self.gamma_k[end_idx:]*(self.next_values[self.idx]*mask).unsqueeze(0) self.G_prev[end_idx:] = G_new.clone().detach() self.gamma_k[end_idx:] = self.gamma*self.gamma_k[end_idx:] Vt_new = (1-self.lam)*self.sigmaG[end_idx:] + self.lam_t[end_idx:]*G_new self.sigmaG[end_idx:] = self.sigmaG[end_idx:] + self.lam_t[end_idx:]*G_new self.lam_t[end_idx:] = self.lam_t[end_idx:]*self.lam self.Vt_out[end_idx:] = self.Vt_out[end_idx:]*(1-self.mask_t[end_idx:]) + Vt_new*self.mask_t[end_idx:] self.mask_t[end_idx:] = self.mask_t[end_idx:]*(self.grad_masks[self.idx].unsqueeze(0)) if self.idx>0: mask = self.masks[self.idx] self.sigmar[start_idx:self.idx] = self.sigmar[start_idx:self.idx] + self.gamma_k[start_idx:self.idx]*self.rewards[self.idx].unsqueeze(0) G_new = self.sigmar[start_idx:self.idx] + self.gamma*self.gamma_k[start_idx:self.idx]*(self.next_values[self.idx]*mask).unsqueeze(0) self.G_prev[start_idx:self.idx] = G_new.clone().detach() self.gamma_k[start_idx:self.idx] = self.gamma*self.gamma_k[start_idx:self.idx] Vt_new = (1-self.lam)*self.sigmaG[start_idx:self.idx] + self.lam_t[start_idx:self.idx]*G_new self.sigmaG[start_idx:self.idx] = self.sigmaG[start_idx:self.idx] + self.lam_t[start_idx:self.idx]*G_new self.lam_t[start_idx:self.idx] = self.lam_t[start_idx:self.idx]*self.lam self.Vt_out[start_idx:self.idx] = self.Vt_out[start_idx:self.idx]*(1-self.mask_t[start_idx:self.idx]) + Vt_new*self.mask_t[start_idx:self.idx] self.mask_t[start_idx:self.idx] = self.mask_t[start_idx:self.idx]*(self.grad_masks[self.idx].unsqueeze(0)) else: if self.idx > 0: mask = self.masks[self.idx] self.sigmar[:self.idx] = self.sigmar[:self.idx] + self.gamma_k[:self.idx]*self.rewards[self.idx].unsqueeze(0) G_new = self.sigmar[:self.idx] + self.gamma*self.gamma_k[:self.idx]*(self.next_values[self.idx]*mask).unsqueeze(0) self.G_prev[:self.idx] = G_new.clone().detach() self.gamma_k[:self.idx] = self.gamma*self.gamma_k[:self.idx] Vt_new = (1-self.lam)*self.sigmaG[:self.idx] + self.lam_t[:self.idx]*G_new self.sigmaG[:self.idx] = self.sigmaG[:self.idx] + self.lam_t[:self.idx]*G_new self.lam_t[:self.idx] = self.lam_t[:self.idx]*self.lam self.Vt_out[:self.idx] = self.Vt_out[:self.idx]*(1-self.mask_t[:self.idx]) + Vt_new*self.mask_t[:self.idx] self.mask_t[:self.idx]= self.mask_t[:self.idx]*(self.grad_masks[self.idx].unsqueeze(0)) ## Initializing for self.idx mask = self.masks[self.idx] self.sigmar[self.idx] = self.rewards[self.idx].clone() G_new = self.sigmar[self.idx] + self.gamma*self.next_values[self.idx]*mask self.G_prev[self.idx] = G_new.clone().detach() self.gamma_k[self.idx] = self.gamma self.sigmaG[self.idx] = G_new.clone().detach() Vt_new = G_new self.lam_t[self.idx] = self.lam self.Vt_out[self.idx] = Vt_new self.mask_t[self.idx] = self.grad_masks[self.idx].clone().detach() elif self.critic_method == 'gae-return': if self.full: # If the buffer is full, compute the start and end indices for the horizon to compute the gae-return # After exceeding the buffer length, the experiences wrap around to the beginning of the buffer if self.idx >= self.h-1: start_idx = self.idx - self.h + 1 else: # Accounting for the wrap around of the buffer. We want to update end_idx:end and start_idx:self.idx start_idx = 0 end_idx = self.idx - self.h + 1 mask = self.masks[self.idx] delta_gamma_k = self.gamma_lam_k[end_idx:] * (self.rewards[self.idx] - self.next_values[self.idx-1] + self.gamma * self.next_values[self.idx] * mask).unsqueeze(0) self.Vt_out[end_idx:] = self.Vt_out[end_idx:] + delta_gamma_k * self.mask_t[end_idx:] self.gamma_lam_k[end_idx:] = self.gamma_lam_k[end_idx:] * self.gamma * self.lam self.mask_t[end_idx:] = self.mask_t[end_idx:] * self.term_masks[self.idx].unsqueeze(0) if self.idx > 0: mask = self.masks[self.idx] delta_gamma_k = self.gamma_lam_k[start_idx:self.idx] * (self.rewards[self.idx] - self.next_values[self.idx-1] + self.gamma * self.next_values[self.idx] * mask).unsqueeze(0) self.Vt_out[start_idx:self.idx] = self.Vt_out[start_idx:self.idx] + delta_gamma_k * self.mask_t[start_idx:self.idx] self.gamma_lam_k[start_idx:self.idx] = self.gamma_lam_k[start_idx:self.idx] * self.gamma * self.lam self.mask_t[start_idx:self.idx] = self.mask_t[start_idx:self.idx] * self.term_masks[self.idx].unsqueeze(0) else: # If the buffer is not full, only need to update start_idx:self.idx start_idx = max(0, self.idx - self.h + 1) mask = self.masks[self.idx] delta_gamma_k = self.gamma_lam_k[start_idx:self.idx] * (self.rewards[self.idx] - self.next_values[self.idx-1] + self.gamma * self.next_values[self.idx] * mask).unsqueeze(0) self.Vt_out[start_idx:self.idx] = self.Vt_out[start_idx:self.idx] + delta_gamma_k * self.mask_t[start_idx:self.idx] self.gamma_lam_k[start_idx:self.idx] = self.gamma_lam_k[start_idx:self.idx] * self.gamma * self.lam self.mask_t[start_idx:self.idx] = self.mask_t[start_idx:self.idx] * self.term_masks[self.idx].unsqueeze(0) # Update Vt_out, gamma_lam_k, and mask_t for the current timestep mask = self.masks[self.idx] delta_gamma_k = self.rewards[self.idx] + self.gamma * self.next_values[self.idx] * mask self.Vt_out[self.idx] = delta_gamma_k self.gamma_lam_k[self.idx] = self.gamma * self.lam self.mask_t[self.idx] = self.term_masks[self.idx].clone().detach() else: raise NotImplementedError
12,832
Python
58.688372
193
0.584165
RoboticExplorationLab/CGAC/cgac/cgac.py
import os import torch import torch.nn.functional as F from torch.optim import Adam from utils_cgac import * from model import GaussianPolicy, QNetwork, DeterministicPolicy import gc import ipdb from torch.nn.utils.clip_grad import clip_grad_norm_ import numpy as np import time from torch.distributions import Normal class CGAC(object): def __init__(self, num_inputs, action_space, args, memory_cgac, env=None): self.gamma = args.gamma self.tau = args.tau_value self.alpha = args.alpha self.args = args if self.args.on_policy_update: self.env_train = env if args.env_name!='AllegroHand': self.env_train.clear_grad() self.env_train.reset() self.state_batch = self.env_train.obs_buf self.policy_type = args.policy self.target_update_interval = args.target_update_interval self.automatic_entropy_tuning = args.automatic_entropy_tuning self.lr = args.actor_lr self.device = torch.device("cuda" if args.cuda else "cpu") self.rms_obs = RunningMeanStd((num_inputs,)).to(self.device) self.val_running_median = 1. self.act_running_median = 1. self.state_running_median = 1. self.memory_cgac = memory_cgac self.critic = QNetwork(num_inputs, action_space.shape[0], args.critic_hidden, args).to(device=self.device) self.critic_optim = Adam(self.critic.parameters(), lr=args.critic_lr, betas = args.betas) self.critic_target = QNetwork(num_inputs, action_space.shape[0], args.critic_hidden, args).to(self.device) hard_update(self.critic_target, self.critic) self.episode_steps = np.zeros(args.batch_size) self.episode_rewards = torch.zeros(args.batch_size).to(self.device) self.total_episode_reward_hist = [] self.episode_len_hist = [] self.num_episodes = 0 self.total_numsteps = 0 self.episode_wts = torch.ones(args.batch_size, 1).to(self.device) self.queue_ids = set([]) self.num_nans = 0 self.rewards_moving_avg = 0.0 self.targ_ent_coeff = 1.0 self.mean_ratio = 100 self.ep_lens = torch.zeros_like(self.env_train.progress_buf) + 10 self.obs_dict = {} if self.policy_type == "Gaussian": # Target Entropy = −dim(A) (e.g. , -6 for HalfCheetah-v2) as given in the paper if self.automatic_entropy_tuning is True: self.target_entropy = -torch.prod(torch.Tensor(action_space.shape).to(self.device)).item() self.log_alpha = torch.zeros(1, requires_grad=True, device=self.device) self.log_alpha.data[0] = np.log(self.alpha) self.alpha_optim = Adam([self.log_alpha], lr=args.alpha_lr) self.policy = GaussianPolicy(num_inputs, action_space.shape[0], args.actor_hidden, args, action_space, const_std=args.const_std).to(self.device) self.policy_optim = Adam(self.policy.parameters(), lr=args.actor_lr, betas = args.betas) self.policy_avg = GaussianPolicy(num_inputs, action_space.shape[0], args.actor_hidden, args, action_space, const_std=args.const_std).to(self.device) hard_update(self.policy_avg, self.policy) else: self.alpha = 0 self.automatic_entropy_tuning = False self.policy = DeterministicPolicy(num_inputs, action_space.shape[0], args.actor_hidden, action_space).to(self.device) self.policy_optim = Adam(self.policy.parameters(), lr=args.actor_lr) def select_action(self, state, evaluate=False): if evaluate is False: action, _, _ = self.policy.sample(state) else: _, _, action = self.policy.sample(state) return action.detach() def compute_loss(self, out, targ): """ Computes the loss for the critic network. Hinge loss for TD error with hinge at 4*median. """ diff = torch.abs(out - targ) median = diff.median(dim=0)[0].detach().clone().unsqueeze(0) mask = (diff < 4*median).float() cost = torch.square(out - targ)*(mask) + diff*8*median*(1-mask) cost = torch.mean(cost) return cost def update_parameters_and_collect_buffer_RL(self, memory, batch_size, updates): ### Learning rate schedule for the actor and critic networks ### if self.args.lr_schedule == 'linear': final_lr = self.args.final_lr actor_lr = max((final_lr - self.args.actor_lr) * float((updates) / self.args.max_updates) + self.args.actor_lr, final_lr) for param_group in self.policy_optim.param_groups: param_group['lr'] = actor_lr self.lr = actor_lr critic_lr = max((final_lr - self.args.critic_lr) * float((updates) / self.args.max_updates) + self.args.critic_lr, final_lr) for param_group in self.critic_optim.param_groups: param_group['lr'] = critic_lr elif self.args.lr_schedule == 'step': final_lr = self.args.final_lr exp = updates//self.args.lr_update_freq actor_lr = max(self.args.actor_lr * (self.args.lr_decay_rate**exp), final_lr) for param_group in self.policy_optim.param_groups: param_group['lr'] = actor_lr self.lr = actor_lr critic_lr = max(self.args.critic_lr * (self.args.lr_decay_rate**exp), final_lr) for param_group in self.critic_optim.param_groups: param_group['lr'] = critic_lr else: self.lr = self.args.actor_lr ### Schedule for alpha or target entropy for policy ### if self.args.alpha_schedule == 'linear': if self.automatic_entropy_tuning: self.targ_ent_coeff = (self.args.final_targ_ent_coeff - self.args.init_targ_ent_coeff)*(self.rewards_moving_avg - self.args.init_expected_reward)/\ (self.args.peak_expected_reward - self.args.init_expected_reward) + self.args.init_targ_ent_coeff else: self.alpha = max((self.args.alpha_final - self.args.alpha_init) * float((updates) / self.args.max_updates_alpha) + self.args.alpha_init, self.args.alpha_final) elif self.args.alpha_schedule == 'decay': if updates % self.args.decay_steps == 0: self.alpha = max(self.args.alpha_init/(2**(updates/self.args.decay_steps)), self.args.alpha_final) ### Update Obs statistics and get obs ### self.rms_obs.update(self.env_train.obs_buf.detach().clone()) obs_batch = self.rms_obs(self.env_train.obs_buf) ### Sample actions and execute it in the environment ### with torch.no_grad(): action, _, _ = self.policy_avg.sample(obs_batch) action_batch = action with torch.no_grad(): next_obs_batch, reward_batch, done_batch, info = self.env_train.step(action_batch, force_done_ids) next_obs_batch = self.rms_obs(next_obs_batch) mask_batch = (1-done_batch.unsqueeze(-1)).float() done_env_ids = done_batch.nonzero(as_tuple = False).squeeze(-1).cpu().numpy() ### Update reward statistics ### self.episode_steps += 1 self.total_numsteps += self.args.batch_size self.episode_rewards += reward_batch reward_batch = reward_batch.unsqueeze(-1) self.reward_batch_curr = (reward_batch.clone().detach()*self.episode_wts).sum()/self.episode_wts.sum() self.rewards_moving_avg = 0.95*self.rewards_moving_avg + 0.05*self.reward_batch_curr ### Handling Env Terminations ### if len(done_env_ids) > 0: self.total_episode_reward_hist += self.episode_rewards[done_env_ids].cpu().numpy().tolist() self.episode_len_hist += self.episode_steps[done_env_ids].tolist() self.episode_rewards[done_env_ids] = 0 self.episode_steps[done_env_ids] = 0 self.num_episodes += len(done_env_ids) inv_mask = torch.logical_or( torch.logical_or( torch.isnan(info['obs_before_reset'][done_env_ids]).sum(dim=-1) > 0, torch.isinf(info['obs_before_reset'][done_env_ids]).sum(dim=-1) > 0), (torch.abs(info['obs_before_reset'][done_env_ids]) > 1e6).sum(dim=-1) > 0 ) self.num_nans += inv_mask.float().sum() eplen_mask = self.env_train.progress_buf_mask[done_env_ids] == self.env_train.episode_length if eplen_mask.float().sum() >0: eplen_done_ids = done_env_ids[eplen_mask.cpu().numpy()] inv_mask = torch.logical_or( torch.logical_or( torch.isnan(info['obs_before_reset'][eplen_done_ids]).sum(dim=-1) > 0, torch.isinf(info['obs_before_reset'][eplen_done_ids]).sum(dim=-1) > 0), (torch.abs(info['obs_before_reset'][eplen_done_ids]) > 1e6).sum(dim=-1) > 0 ) valid_mask = torch.logical_not(inv_mask) val_done_ids = eplen_done_ids[valid_mask.cpu().numpy()] if len(val_done_ids)>0: mask_batch[val_done_ids] = 1. next_obs_batch[val_done_ids] = self.rms_obs(info['obs_before_reset'][val_done_ids]) ### Compute next state Q values ### with torch.no_grad(): next_state_action1, next_state_log_pi, _ = self.policy_avg.sample(next_obs_batch.detach(), 1) qf1_next_target1, qf2_next_target1 = self.critic_target(next_obs_batch, next_state_action1) min_qf_next = torch.min(qf1_next_target1, qf2_next_target1) term_mask = mask_batch.clone().detach() ### Update replay buffer ### self.memory_cgac.add(obs_batch, action_batch, reward_batch, min_qf_next.detach().clone(), mask_batch, term_mask, next_state_log_pi.detach().clone(), self.alpha, self.episode_wts.clone().detach(), updates) ### Prevent suddent collapse and reset of too many envs - This can skew the distribution ### ### This is done by keeping a count of finished episodes and performing resets at a specific rate ### ### The finished episodes until they are reset are ignored for training ### env_thres_stop = self.env_thres = (self.args.batch_size)/(self.env_train.ep_lens[:200].float().mean()+1e-8) env_thres_start = max((self.args.batch_size)/(self.env_train.ep_lens.float().mean()+1e-8), 1) self.len_dones = len(done_env_ids) force_done_ids = None done_env_ids_resets = list(set(done_env_ids).difference(self.queue_ids)) if len(done_env_ids_resets) > int(env_thres_stop) + 1: env_thres_int = int(env_thres_stop) + 1 self.episode_wts[done_env_ids_resets[env_thres_int:]] = 0 self.queue_ids.update(list(done_env_ids_resets[env_thres_int:])) if len(done_env_ids_resets) < int(env_thres_start): env_thres_int = int(env_thres_start) num_resets = min(env_thres_int-len(done_env_ids_resets), len(self.queue_ids)+self.args.max_rand_resets) num_rand_resets = max(min(num_resets - len(self.queue_ids), self.args.max_rand_resets),0) nids = min(env_thres_int-len(done_env_ids_resets), len(self.queue_ids)) queue_ids = list(self.queue_ids) if num_rand_resets>0: rand_reset_ids = list(np.random.randint(self.args.batch_size, size=(num_rand_resets,))) reset_ids = rand_reset_ids + queue_ids self.memory_cgac.mask_t[:, rand_reset_ids] *= 0 else: reset_ids = queue_ids[:nids] self.episode_wts[reset_ids] = 1 force_done_ids = torch.tensor(reset_ids).long() self.queue_ids = set(queue_ids[nids:]).difference(reset_ids) self.env_train.reset(force_done_ids, eplenupdate=False) self.episode_rewards[force_done_ids] = 0 self.episode_steps[force_done_ids] = 0 self.len_queue = len(self.queue_ids) ### Update critic parameters if num_critic_updates > 1 ### for i in range(self.args.num_critic_updates-1): self.update_parameters_with_RL(updates, critic_update_only=True) updates += 1 ### Update critic params, actor params and alpha ### return self.update_parameters_with_RL(updates) def update_parameters_with_RL(self, updates, critic_update_only=False): obs_batch_new, action_batch, reward_batch, next_q_value, mask_batch, episode_wts = self.memory_cgac.sample(self.args.batch_size_update) episode_wts = episode_wts/episode_wts.mean() qf1, qf2 = self.critic(obs_batch_new, action_batch) # Two Q-functions to mitigate positive bias in the policy improvement step ### CRITIC UPDATE ### ### Computing hinge TD errors. episode_wts are used to ignore transitions ### qf1_loss = self.compute_loss(qf1*episode_wts, next_q_value*episode_wts) # JQ = 𝔼(st,at)~D[0.5(Q1(st,at) - r(st,at) - γ(𝔼st+1~p[V(st+1)]))^2] qf2_loss = self.compute_loss(qf2*episode_wts, next_q_value*episode_wts) # JQ = 𝔼(st,at)~D[0.5(Q1(st,at) - r(st,at) - γ(𝔼st+1~p[V(st+1)]))^2] qf_loss = qf1_loss + qf2_loss qf1_loss_batch = (qf1*episode_wts - next_q_value*episode_wts)**2 qf2_loss_batch = (qf2*episode_wts - next_q_value*episode_wts)**2 qf_loss_batch = qf1_loss_batch + qf2_loss_batch qf_loss_full = qf_loss self.critic_optim.zero_grad() qf_loss_full.backward() critic_grad_norm = clip_grad_norm_(self.critic.parameters(), self.args.grad_norm) self.critic_optim.step() ### Update actor and alpha if critic_update_only is not True ### if not critic_update_only: ### ACTOR UPDATE ### pi, log_pi, _, x_t = self.policy.sample(obs_batch_new.detach().clone(), with_xt=True) qf1_pi, qf2_pi = self.critic(obs_batch_new.detach().clone(), pi) min_qf_pi = torch.min(qf1_pi,qf2_pi)*episode_wts if self.args.clip_actor_gn: qpi_grad = torch.autograd.grad(min_qf_pi.mean(), pi, retain_graph=True)[0] ratio = (qpi_grad.abs().max(dim=0)[0]/qpi_grad.abs().median(dim=0)[0]).mean() self.mean_ratio = 0.95*self.mean_ratio + 0.05*ratio if ratio > self.mean_ratio*2: qpi_grad = torch.clamp(qpi_grad, min=torch.quantile(qpi_grad, 0.5, dim=0), max=torch.quantile(qpi_grad, 0.95, dim=0)).detach().clone() policy_loss = (self.alpha * log_pi*episode_wts).mean() - (qpi_grad*pi).sum()# # Jπ = 𝔼st∼D,εt∼N[α * logπ(f(εt;st)|st) − Q(st,f(εt;st))] else: policy_loss = ((self.alpha * log_pi*episode_wts) - min_qf_pi).mean() # pol_grad = torch.autograd.grad(policy_loss, pi, retain_graph=True)[0] # self.snr_q = (pol_grad.mean(dim=0).abs()/pol_grad.std(dim=0)).mean() self.policy_optim.zero_grad() policy_loss.backward() actor_grad_norm = self.policy.layers[3].weight.grad.norm().item() self.policy_optim.step() ### ALPHA UPDATE ### if self.automatic_entropy_tuning: alpha_loss = -(self.log_alpha * (log_pi*episode_wts + self.targ_ent_coeff*self.target_entropy).detach()).mean() self.alpha_optim.zero_grad() alpha_loss.backward() self.alpha_optim.step() self.alpha = min(self.log_alpha.exp().detach().item(), self.alpha) self.log_alpha.data[0] = np.log(self.alpha) alpha_tlogs = self.alpha # For TensorboardX logs else: alpha_loss = torch.tensor(0.).to(self.device) alpha_tlogs = torch.tensor(self.alpha) # For TensorboardX logs ### Update target network and policy avg network ### if updates % self.target_update_interval == 0: soft_update(self.critic_target, self.critic, self.args.tau_value) soft_update(self.policy_avg, self.policy, self.args.tau_policy) return qf1_loss.item(), qf2_loss.item(), policy_loss.item(), alpha_loss.item(), alpha_tlogs, log_pi.mean().item(), min_qf_pi.mean().item(), next_q_value.detach().cpu(), \ qf_loss_batch.detach().cpu(), actor_grad_norm, critic_grad_norm return None def update_parameters_and_collect_buffer_RL_isaac(self, memory, batch_size, updates): ### Learning rate schedule for the actor and critic networks ### if self.args.lr_schedule == 'linear': final_lr = self.args.final_lr actor_lr = max((final_lr - self.args.actor_lr) * float((updates) / self.args.max_updates) + self.args.actor_lr, final_lr) for param_group in self.policy_optim.param_groups: param_group['lr'] = actor_lr self.lr = actor_lr critic_lr = max((final_lr - self.args.critic_lr) * float((updates) / self.args.max_updates) + self.args.critic_lr, final_lr) for param_group in self.critic_optim.param_groups: param_group['lr'] = critic_lr elif self.args.lr_schedule == 'step': final_lr = self.args.final_lr exp = updates//self.args.lr_update_freq actor_lr = max(self.args.actor_lr * (self.args.lr_decay_rate**exp), final_lr) for param_group in self.policy_optim.param_groups: param_group['lr'] = actor_lr self.lr = actor_lr critic_lr = max(self.args.critic_lr * (self.args.lr_decay_rate**exp), final_lr) for param_group in self.critic_optim.param_groups: param_group['lr'] = critic_lr else: self.lr = self.args.actor_lr ### Schedule for alpha or target entropy for policy ### if self.args.alpha_schedule == 'linear': if self.automatic_entropy_tuning: self.targ_ent_coeff = max((self.args.final_targ_ent_coeff - self.args.init_targ_ent_coeff)*(self.rewards_moving_avg - self.args.init_expected_reward)/\ (self.args.peak_expected_reward - self.args.init_expected_reward) + self.args.init_targ_ent_coeff, -0.4)#, self.targ_ent_coeff)#self.args.final_targ_ent_coeff) # if self.rewards_moving_avg > self.args.peak_expected_reward: # self.alpha = min(self.alpha, 1e-5) else: self.alpha = max((self.args.alpha_final - self.args.alpha_init) * float((updates) / self.args.max_updates_alpha) + self.args.alpha_init, self.args.alpha_final) elif self.args.alpha_schedule == 'decay': if updates % self.args.decay_steps == 0: self.alpha = max(self.args.alpha_init/(2**(updates/self.args.decay_steps)), self.args.alpha_final) ### Update Obs statistics and get obs ### obs_dict, dones = self._env_reset_done() obs_buf = obs_dict['obs'] self.rms_obs.update(obs_buf) obs_batch = self.rms_obs(obs_buf) ### Sample actions and execute it in the environment ### with torch.no_grad(): action, _, _ = self.policy_avg.sample(obs_batch) action_batch = action next_obs_batch, reward_batch, done_batch, info = self.env_train.step(action_batch) next_obs_batch = next_obs_batch['obs'] next_obs_batch = self.rms_obs(next_obs_batch) mask_batch = (1-done_batch.unsqueeze(-1)).float() done_env_ids = done_batch.nonzero(as_tuple = False).squeeze(-1).cpu().numpy() term_mask = mask_batch.clone() ### Update reward statistics ### self.episode_steps += 1 self.total_numsteps += self.args.batch_size self.episode_rewards += reward_batch.detach().clone() reward_batch = reward_batch.unsqueeze(-1) self.reward_batch_curr = (reward_batch.clone().detach()*self.episode_wts).sum()/self.episode_wts.sum() self.rewards_moving_avg = 0.95*self.rewards_moving_avg + 0.05*self.reward_batch_curr ### Handling Env Terminations if len(done_env_ids) > 0: self.total_episode_reward_hist += self.episode_rewards[done_env_ids].cpu().numpy().tolist() self.episode_len_hist += self.episode_steps[done_env_ids].tolist() self.episode_rewards[done_env_ids] = 0 self.episode_steps[done_env_ids] = 0 self.num_episodes += len(done_env_ids) inv_mask = torch.logical_or( torch.logical_or( torch.isnan(next_obs_batch[done_env_ids]).sum(dim=-1) > 0, torch.isinf(next_obs_batch[done_env_ids]).sum(dim=-1) > 0), (torch.abs(next_obs_batch[done_env_ids]) > 1e6).sum(dim=-1) > 0 ) self.num_nans += inv_mask.float().sum() self.ep_lens = torch.cat([self.env_train.progress_buf[done_env_ids].clone(), self.ep_lens], dim=0)[:200] eplen_mask = self.env_train.progress_buf[done_env_ids] == self.env_train.max_episode_length # Need to check if eplen_mask.float().sum() >0: eplen_done_ids = done_env_ids[eplen_mask.cpu().numpy()] inv_mask = torch.logical_or( torch.logical_or( torch.isnan(next_obs_batch[eplen_done_ids]).sum(dim=-1) > 0, torch.isinf(next_obs_batch[eplen_done_ids]).sum(dim=-1) > 0), (torch.abs(next_obs_batch[eplen_done_ids]) > 1e6).sum(dim=-1) > 0 ) valid_mask = torch.logical_not(inv_mask) val_done_ids = eplen_done_ids[valid_mask.cpu().numpy()] if len(val_done_ids)>0: mask_batch[val_done_ids] = 1. ### Compute next state Q values ### with torch.no_grad(): next_state_action1, next_state_log_pi, _ = self.policy_avg.sample(next_obs_batch.detach(), 1) qf1_next_target1, qf2_next_target1 = self.critic_target(next_obs_batch, next_state_action1) min_qf_next = torch.min(qf1_next_target1, qf2_next_target1) ### Update replay buffer ### self.memory_cgac.add(obs_batch, action_batch, reward_batch, min_qf_next.detach().clone(), mask_batch, term_mask, next_state_log_pi.detach().clone(), self.alpha, self.episode_wts.clone().detach(), updates) ### Prevent suddent collapse and reset of too many envs - This can skew the distribution ### ### This is done by keeping a count of finished episodes and performing resets at a specific rate ### ### The finished episodes until they are reset are ignored for training ### env_thres_stop = self.env_thres = (self.args.batch_size)/(self.ep_lens[:200].float().mean()+1e-8) env_thres_start = (self.args.batch_size)/(self.ep_lens.float().mean()+1e-8) self.len_dones = len(done_env_ids) done_env_ids_resets = list(set(done_env_ids).difference(self.queue_ids)) force_done_ids = None if len(done_env_ids_resets) > int(env_thres_stop) + 1: env_thres_int = int(env_thres_stop) + 1 self.episode_wts[done_env_ids_resets[env_thres_int:]] = 0 self.queue_ids.update(list(done_env_ids_resets[env_thres_int:])) if len(done_env_ids_resets) < int(env_thres_start): env_thres_int = int(env_thres_start) num_resets = min(env_thres_int-len(done_env_ids_resets), len(self.queue_ids)+self.args.max_rand_resets) num_rand_resets = max(min(num_resets - len(self.queue_ids), self.args.max_rand_resets),0) nids = min(env_thres_int-len(done_env_ids_resets), len(self.queue_ids)) queue_ids = list(self.queue_ids) if num_rand_resets>0: rand_reset_ids = list(np.random.randint(self.args.batch_size, size=(num_rand_resets,))) reset_ids = rand_reset_ids + queue_ids self.memory_cgac.mask_t[:, rand_reset_ids] *= 0 else: reset_ids = queue_ids[:nids] if len(reset_ids) > 0: self.episode_wts[reset_ids] = 1 force_done_ids = torch.tensor(reset_ids).long() self.queue_ids = set(queue_ids[nids:]).difference(reset_ids) self.env_train.reset_idx(force_done_ids, force_done_ids) self.episode_rewards[force_done_ids] = 0 self.episode_steps[force_done_ids] = 0 self.len_queue = len(self.queue_ids) ### Update critic parameters if num_critic_updates > 1 ### for i in range(self.args.num_critic_updates-1): self.update_parameters_with_RL_isaac(updates, critic_update_only=True) updates += 1 ### Update critic params, actor params and alpha ### return self.update_parameters_with_RL_isaac(updates) def update_parameters_with_RL_isaac(self, updates, critic_update_only=False): obs_batch_new, action_batch, reward_batch, next_q_value, mask_batch, episode_wts = self.memory_cgac.sample(self.args.batch_size_update) episode_wts = episode_wts/episode_wts.mean() qf1, qf2 = self.critic(obs_batch_new, action_batch) # Two Q-functions to mitigate positive bias in the policy improvement step ### CRITIC UPDATE ### ### Computing hinge TD errors. episode_wts are used to ignore transitions ### qf1_loss = self.compute_loss(qf1*episode_wts, next_q_value*episode_wts) qf2_loss = self.compute_loss(qf2*episode_wts, next_q_value*episode_wts) qf_loss = qf1_loss + qf2_loss qf1_loss_batch = (qf1*episode_wts - next_q_value*episode_wts)**2 qf2_loss_batch = (qf2*episode_wts - next_q_value*episode_wts)**2 qf_loss_batch = qf1_loss_batch + qf2_loss_batch qf_loss_full = qf_loss self.critic_optim.zero_grad() qf_loss_full.backward() critic_grad_norm = clip_grad_norm_(self.critic.parameters(), self.args.grad_norm) self.critic_optim.step() ### Update actor and alpha if critic_update_only is not True ### if not critic_update_only: ### ACTOR UPDATE ### pi, log_pi, _ = self.policy.sample(obs_batch_new.detach().clone()) qf1_pi, qf2_pi = self.critic(obs_batch_new.detach().clone(), pi) min_qf_pi = torch.min(qf1_pi,qf2_pi)*episode_wts if self.args.clip_actor_gn: qpi_grad = torch.autograd.grad(min_qf_pi.mean(), pi, retain_graph=True)[0] ratio = (qpi_grad.abs().max(dim=0)[0]/qpi_grad.abs().median(dim=0)[0]).mean() self.mean_ratio = 0.95*self.mean_ratio + 0.05*ratio if ratio > self.mean_ratio*2: qpi_grad = torch.clamp(qpi_grad, min=torch.quantile(qpi_grad, 0.5, dim=0), max=torch.quantile(qpi_grad, 0.95, dim=0)).detach().clone() policy_loss = (self.alpha * log_pi*episode_wts).mean() - (qpi_grad*pi).sum()# # Jπ = 𝔼st∼D,εt∼N[α * logπ(f(εt;st)|st) − Q(st,f(εt;st))] else: policy_loss = ((self.alpha * log_pi*episode_wts) - min_qf_pi).mean() self.policy_optim.zero_grad() policy_loss.backward() actor_grad_norm = self.policy.layers[3].weight.grad.norm().item() self.policy_optim.step() ### ALPHA UPDATE ### if self.automatic_entropy_tuning: alpha_loss = -(self.log_alpha * (log_pi*episode_wts + self.targ_ent_coeff*self.target_entropy).detach()).mean() self.alpha_optim.zero_grad() alpha_loss.backward() self.alpha_optim.step() self.alpha = min(self.log_alpha.exp().detach().item(), self.alpha) self.log_alpha.data[0] = np.log(self.alpha) alpha_tlogs = self.alpha # For TensorboardX logs else: alpha_loss = torch.tensor(0.).to(self.device) alpha_tlogs = torch.tensor(self.alpha) # For TensorboardX logs ### Update target network and policy avg network ### if updates % self.target_update_interval == 0: soft_update(self.critic_target, self.critic, self.args.tau_value) soft_update(self.policy_avg, self.policy, self.args.tau_policy) return qf1_loss.item(), qf2_loss.item(), policy_loss.item(), alpha_loss.item(), alpha_tlogs, log_pi.mean().item(), min_qf_pi.mean().item(), next_q_value.detach().cpu(), \ qf_loss_batch.detach().cpu(), actor_grad_norm, critic_grad_norm return None def _env_reset_done(self): """Reset the environment. Returns: Observation dictionary, indices of environments being reset """ done_env_ids = self.env_train.reset_buf.nonzero(as_tuple=False).flatten() goal_env_ids = self.env_train.reset_goal_buf.nonzero(as_tuple=False).squeeze(-1) # if only goals need reset, then call set API if len(goal_env_ids) > 0 and len(done_env_ids) == 0: self.env_train.reset_target_pose(goal_env_ids, apply_reset=True) # if goals need reset in addition to other envs, call set API in reset() elif len(goal_env_ids) > 0: self.env_train.reset_target_pose(goal_env_ids) if len(done_env_ids) > 0: self.env_train.reset_idx(done_env_ids, goal_env_ids) if len(goal_env_ids) > 0 or len(done_env_ids) > 0: self.env_train.compute_observations() self.obs_dict["obs"] = torch.clamp(self.env_train.obs_buf, -self.env_train.clip_obs, self.env_train.clip_obs).to(self.env_train.rl_device) return self.obs_dict, done_env_ids # Save model parameters def save_checkpoint(self, env_name, suffix="", ckpt_path=None): if not os.path.exists('checkpoints/'): os.makedirs('checkpoints/') if ckpt_path is None: ckpt_path = "checkpoints/cgac_checkpoint_{}_{}".format(env_name, suffix) print('Saving models to {}'.format(ckpt_path)) torch.save({'policy_state_dict': self.policy.state_dict(), 'critic_state_dict': self.critic.state_dict(), 'critic_target_state_dict': self.critic_target.state_dict(), 'critic_optimizer_state_dict': self.critic_optim.state_dict(), 'policy_optimizer_state_dict': self.policy_optim.state_dict()}, ckpt_path) # Load model parameters def load_checkpoint(self, ckpt_path, evaluate=False): print('Loading models from {}'.format(ckpt_path)) if ckpt_path is not None: checkpoint = torch.load(ckpt_path) self.policy.load_state_dict(checkpoint['policy_state_dict']) self.critic.load_state_dict(checkpoint['critic_state_dict']) self.critic_target.load_state_dict(checkpoint['critic_target_state_dict']) self.critic_optim.load_state_dict(checkpoint['critic_optimizer_state_dict']) self.policy_optim.load_state_dict(checkpoint['policy_optimizer_state_dict']) if evaluate: self.policy.eval() self.critic.eval() self.critic_target.eval() else: self.policy.train() self.critic.train() self.critic_target.train()
31,804
Python
52.543771
212
0.590272
RoboticExplorationLab/CGAC/cgac/utils_cgac.py
import math import torch import torch.nn as nn import numpy as np def create_log_gaussian(mean, log_std, t): """Creates a log probability for a Gaussian distribution.""" quadratic = -((0.5 * (t - mean) / (log_std.exp())).pow(2)) l = mean.shape log_z = log_std z = l[-1] * math.log(2 * math.pi) log_p = quadratic.sum(dim=-1) - log_z.sum(dim=-1) - 0.5 * z return log_p def logsumexp(inputs, dim=None, keepdim=False): """Numerically stable logsumexp.""" if dim is None: inputs = inputs.view(-1) dim = 0 s, _ = torch.max(inputs, dim=dim, keepdim=True) outputs = s + (inputs - s).exp().sum(dim=dim, keepdim=True).log() if not keepdim: outputs = outputs.squeeze(dim) return outputs def soft_update(target, source, tau): for target_param, param in zip(target.parameters(), source.parameters()): target_param.data.copy_(target_param.data * (1.0 - tau) + param.data * tau) def hard_update(target, source): for target_param, param in zip(target.parameters(), source.parameters()): target_param.data.copy_(param.data) def rand_sample(act_space, bsz): return torch.rand((bsz,act_space.shape[0]))*(act_space.high-act_space.low) + act_space.low def grad_norm(params): """Computes the norm of gradients for a group of parameters.""" grad_norm = 0. for p in params: if p.grad is not None: grad_norm += torch.sum(p.grad ** 2) return torch.sqrt(grad_norm) class dotdict(dict): """dot.notation access to dictionary attributes""" __getattr__ = dict.get __setattr__ = dict.__setitem__ __delattr__ = dict.__delitem__ ''' updates statistic from a full data ''' class RunningMeanStd(nn.Module): def __init__(self, insize, epsilon=1e-05, per_channel=False, norm_only=False): super(RunningMeanStd, self).__init__() print('RunningMeanStd: ', insize) self.insize = insize self.epsilon = epsilon self.norm_only = norm_only self.per_channel = per_channel if per_channel: if len(self.insize) == 3: self.axis = [0,2,3] if len(self.insize) == 2: self.axis = [0,2] if len(self.insize) == 1: self.axis = [0] in_size = self.insize[0] else: self.axis = [0] in_size = insize self.register_buffer("running_mean", torch.zeros(in_size, dtype = torch.float64)) self.register_buffer("running_var", torch.ones(in_size, dtype = torch.float64)) self.register_buffer("count", torch.ones((), dtype = torch.float64)) def _update_mean_var_count_from_moments(self, mean, var, count, batch_mean, batch_var, batch_count): delta = batch_mean - mean tot_count = count + batch_count new_mean = mean + delta * batch_count / tot_count m_a = var * count m_b = batch_var * batch_count M2 = m_a + m_b + delta**2 * count * batch_count / tot_count new_var = M2 / tot_count new_count = tot_count return new_mean, new_var, new_count def update(self, input): mean = input.mean(self.axis) # along channel axis var = input.var(self.axis) self.running_mean, self.running_var, self.count = self._update_mean_var_count_from_moments(self.running_mean, self.running_var, self.count, mean, var, input.size()[0] ) def forward(self, input, unnorm=False): # change shape if self.per_channel: if len(self.insize) == 3: current_mean = self.running_mean.detach().view([1, self.insize[0], 1, 1]).expand_as(input) current_var = self.running_var.detach().view([1, self.insize[0], 1, 1]).expand_as(input) if len(self.insize) == 2: current_mean = self.running_mean.detach().view([1, self.insize[0], 1]).expand_as(input) current_var = self.running_var.detach().view([1, self.insize[0], 1]).expand_as(input) if len(self.insize) == 1: current_mean = self.running_mean.detach().view([1, self.insize[0]]).expand_as(input) current_var = self.running_var.detach().view([1, self.insize[0]]).expand_as(input) else: current_mean = self.running_mean.detach() current_var = self.running_var.detach() # get output if unnorm: y = torch.clamp(input, min=-5.0, max=5.0) y = torch.sqrt(current_var.float() + self.epsilon)*y + current_mean.float() else: if self.norm_only: y = input/ torch.sqrt(current_var.float() + self.epsilon) else: y = (input - current_mean.float()) / torch.sqrt(current_var.float() + self.epsilon) y = torch.clamp(y, min=-5.0, max=5.0) return y ''' updates statistic from a full data ''' class RunningGradMag(nn.Module): def __init__(self, insize, epsilon=1e-05, per_channel=False, norm_only=False, const=1): super(RunningGradMag, self).__init__() print('RunningGradMag: ', insize) self.insize = insize self.epsilon = epsilon self.const = const self.norm_only = norm_only self.per_channel = per_channel if per_channel: if len(self.insize) == 3: self.axis = [0,2,3] if len(self.insize) == 2: self.axis = [0,2] if len(self.insize) == 1: self.axis = [0] in_size = self.insize[0] else: self.axis = [0] in_size = insize self.register_buffer("running_absmean", torch.ones(in_size, dtype = torch.float64)) self.register_buffer("count", torch.ones((), dtype = torch.float64)) def _update_mean_var_count_from_moments(self, mean, count, batch_mean, batch_count): delta = batch_mean - mean tot_count = count + batch_count new_mean = mean + delta * batch_count / tot_count new_count = tot_count return new_mean, new_count def update(self, input): mean = input.abs().mean(self.axis) # along channel axis self.running_absmean, self.count = self._update_mean_var_count_from_moments(self.running_absmean, self.count, mean, input.size()[0] ) def forward(self, input, unnorm=False): # change shape if self.per_channel: if len(self.insize) == 3: current_mean = self.running_absmean.detach().view([1, self.insize[0], 1, 1]).expand_as(input) if len(self.insize) == 2: current_mean = self.running_absmean.detach().view([1, self.insize[0], 1]).expand_as(input) if len(self.insize) == 1: current_mean = self.running_absmean.detach().view([1, self.insize[0]]).expand_as(input) else: current_mean = self.running_absmean.detach() # get output if unnorm: y = input/self.const#(current_mean.float()) else: y = input*self.const#(current_mean.float()) return y class RunningMeanStdObs(nn.Module): def __init__(self, insize, epsilon=1e-05, per_channel=False, norm_only=False): assert(insize is dict) super(RunningMeanStdObs, self).__init__() self.running_mean_std = nn.ModuleDict({ k : RunningMeanStd(v, epsilon, per_channel, norm_only) for k,v in insize.items() }) def forward(self, input, unnorm=False): res = {k : self.running_mean_std(v, unnorm) for k,v in input.items()} return res class DummyRMS(nn.Module): def __init__(self, insize, epsilon=1e-05, per_channel=False, norm_only=False): super(DummyRMS, self).__init__() print('DummyRMS: ', insize) self.insize = insize self.epsilon = epsilon def forward(self, input, unnorm=False): return input def update(self, input): return None
8,104
Python
37.051643
152
0.572804
RoboticExplorationLab/CGAC/cgac/model.py
import torch import torch.nn as nn import torch.nn.functional as F from torch.distributions import Normal LOG_SIG_MAX = 2 LOG_SIG_MIN = -5 epsilon = 1e-6 activations_dict = {'elu': nn.ELU(), 'relu': nn.ReLU(), 'tanh': nn.Tanh()} # Initialize Policy weights def weights_init_(m): if isinstance(m, nn.Linear): torch.nn.init.xavier_uniform_(m.weight, gain=1) torch.nn.init.constant_(m.bias, 0) class QNetwork(nn.Module): def __init__(self, num_inputs, num_actions, hidden_dim, args): super(QNetwork, self).__init__() if isinstance(hidden_dim, int): hidden_dim = [hidden_dim, hidden_dim] layer_sizes = [num_inputs + num_actions,]+hidden_dim activation = activations_dict[args.critic_act] # Q1 architecture layers1 = [] for i in range(len(layer_sizes)-1): layers1.append(nn.Linear(layer_sizes[i], layer_sizes[i+1])) layers1.append(activation) layers1.append(nn.Identity()) self.layers1 = nn.Sequential(*layers1) self.layer_out1 = nn.Linear(layer_sizes[-1], 1) # Q2 architecture layers2 = [] for i in range(len(layer_sizes)-1): layers2.append(nn.Linear(layer_sizes[i], layer_sizes[i+1])) layers2.append(activation) layers2.append(nn.Identity()) self.layers2 = nn.Sequential(*layers2) self.layer_out2 = nn.Linear(layer_sizes[-1], 1) self.apply(weights_init_) def forward(self, state, action): xu = torch.cat([state, action], 1) x1 = self.layers1(xu) x1 = self.layer_out1(x1) x2 = self.layers2(xu) x2 = self.layer_out2(x2) return x1, x2 class GaussianPolicy(nn.Module): def __init__(self, num_inputs, num_actions, hidden_dim, args, action_space=None, const_std=False): super(GaussianPolicy, self).__init__() if isinstance(hidden_dim, int): hidden_dim = [hidden_dim, hidden_dim] layer_sizes = [num_inputs,]+hidden_dim activation = activations_dict[args.actor_act] layers = [] for i in range(len(layer_sizes)-1): layers.append(nn.Linear(layer_sizes[i], layer_sizes[i+1])) layers.append(activation) layers.append(nn.Identity()) self.layers = nn.Sequential(*layers) self.mean_linear = nn.Linear(layer_sizes[-1], num_actions) self.const_std = const_std if const_std: logstd = -1.0#'actor_logstd_init' self.logstd = torch.nn.Parameter(torch.ones(num_actions, dtype=torch.float32) * logstd) else: self.log_std_linear = nn.Linear(layer_sizes[-1], num_actions) self.apply(weights_init_) # action rescaling if action_space is None: self.action_scale = torch.tensor(1.) self.action_bias = torch.tensor(0.) else: self.action_scale = torch.FloatTensor( (action_space.high - action_space.low) / 2.) self.action_bias = torch.FloatTensor( (action_space.high + action_space.low) / 2.) def forward(self, state): x = self.layers(state) mean = self.mean_linear(x) if self.const_std: log_std = self.logstd else: log_std = self.log_std_linear(x) log_std = torch.clamp(log_std, min=LOG_SIG_MIN, max=LOG_SIG_MAX) return mean, log_std def log_prob(self, state, action, x_t): mean, log_std = self.forward(state) std = log_std.exp() normal = Normal(mean, std) y_t = (action - self.action_bias)/self.action_scale log_prob = normal.log_prob(x_t) log_prob -= torch.log(self.action_scale * (1 - y_t.pow(2)) + epsilon) log_prob = log_prob.sum(1, keepdim=True) return log_prob def sample(self, state, netid=1, with_xt=False): mean, log_std = self.forward(state) std = log_std.exp() normal = Normal(mean, std) x_t = normal.rsample() # for reparameterization trick (mean + std * N(0,1)) y_t = torch.tanh(x_t) action = y_t * self.action_scale + self.action_bias log_prob = normal.log_prob(x_t) # Enforcing Action Bound log_prob -= torch.log(self.action_scale * (1 - y_t.pow(2)) + epsilon) log_prob = log_prob.sum(1, keepdim=True) mean = torch.tanh(mean) * self.action_scale + self.action_bias if with_xt: return action, log_prob, mean, x_t else: return action, log_prob, mean def to(self, device): self.action_scale = self.action_scale.to(device) self.action_bias = self.action_bias.to(device) return super(GaussianPolicy, self).to(device) class DeterministicPolicy(nn.Module): def __init__(self, num_inputs, num_actions, hidden_dim, action_space=None): super(DeterministicPolicy, self).__init__() if isinstance(hidden_dim, int): hidden_dim = [hidden_dim, hidden_dim] layer_sizes = [num_inputs,]+hidden_dim layers = [] for i in range(len(layer_sizes)-1): layers.append(nn.Linear(layer_sizes[i], layer_sizes[i+1])) layers.append(nn.ReLU()) self.layers = nn.Sequential(*layers) self.mean = nn.Linear(layer_sizes[-1], num_actions) self.noise = torch.Tensor(num_actions) self.apply(weights_init_) # action rescaling if action_space is None: self.action_scale = 1. self.action_bias = 0. else: self.action_scale = torch.FloatTensor( (action_space.high - action_space.low) / 2.) self.action_bias = torch.FloatTensor( (action_space.high + action_space.low) / 2.) def forward(self, state): x = self.layers(state) mean = torch.tanh(self.mean(x)) * self.action_scale + self.action_bias return mean def sample(self, state): mean = self.forward(state) noise = self.noise.normal_(0., std=0.1) noise = noise.clamp(-0.25, 0.25) action = mean + noise return action, torch.tensor(0.), mean def to(self, device): self.action_scale = self.action_scale.to(device) self.action_bias = self.action_bias.to(device) self.noise = self.noise.to(device) return super(DeterministicPolicy, self).to(device)
6,511
Python
34.9779
102
0.58071
shikimori/shikimori/CONTRIBUTING.md
* [Fork](https://help.github.com/articles/fork-a-repo) the project on GitHub. * Make your feature addition or bug fix in a feature branch. (Include a description of your changes) * Push your feature branch to GitHub. * Send a [Pull Request](https://help.github.com/articles/using-pull-requests). After opening your pull request, ensure all tests pass on Circle CI. If a test fails and you believe it is unrelated to your change, leave a comment on the pull request explaining why.
482
Markdown
67.99999
184
0.76971
shikimori/shikimori/README.md
[![RSpec CI](https://github.com/shikimori/shikimori/actions/workflows/rspec.yml/badge.svg?branch=master)](https://github.com/shikimori/shikimori/actions/workflows/rspec.yml) ## Contributing Feel free to open tickets or send pull requests with improvements. Thanks in advance for your help! Please follow the [contribution guidelines](https://github.com/shikimori/shikimori/blob/master/CONTRIBUTING.md). ## Requirements OSX or Linux PostgreSQL >= 10.0, Ruby >= 2.6, NodeJS >= 10.0, Elasticsearch 6.x (7.0 not supported), Memcached, Redis ## Issues Board (Agile Season) https://agileseason.com/#/shared/board/098d2e36dff32f296d7815cf943ac8eb ## Requirements ### Checkout all projects ```sh git clone [email protected]:shikimori/shikimori.git git clone [email protected]:shikimori/neko-achievements.git cd neko-achievements mix local.hex --force mix deps.get cd .. git clone [email protected]:shikimori/camo-server.git cd camo-server yarn cd .. git clone [email protected]:shikimori/faye-server.git cd faye-server yarn cd .. cd shikimori ``` ### Install `yarn`, `tmux` and `overmind` via Homebrew (OSX) ```sh brew install yarn tmux overmind ``` In linux you have to install them another way. ### Install dependent gems and npm packages ```sh yarn install bundle install ``` ## PostgreSQL ### DB ```sh psql -d postgres ``` ```sql create user shikimori_development; create user shikimori_test; alter user shikimori_development createdb; alter user shikimori_test createdb; alter user shikimori_development with superuser; alter user shikimori_test with superuser; ``` ### Create databases Make sure `en_US.UTF-8` database collation is set [https://gist.github.com/ffmike/877447#gistcomment-2851598](https://gist.github.com/morr/9507173acfd504837a7feb4485a5f669) Or you manually initialize new database with command ```sh initdb --pgdata=/usr/local/var/postgres-16 -E 'UTF-8' --lc-collate='en_US.UTF-8' --lc-ctype='en_US.UTF-8' ``` Or initdb for apple M1 ```sh initdb --pgdata=/usr/local/var/postgresql@16 -E 'UTF-8' --lc-collate='en_US.UTF-8' --lc-ctype='en_US.UTF-8' ``` Create rails databases ```sh rails db:create ``` ## Local Run Everything you need to run is listed in [Procfile](https://github.com/shikimori/shikimori/blob/master/Procfile). Shikimori uses [Overmind](https://github.com/DarthSim/overmind) to execute `Procfile`. ### Restore from a backup ```sh rails db:drop && rails db:create unzip -d db/ db/dump.sql.zip psql -U shikimori_development -d shikimori_development -f db/dump.sql rm db/dump.sql RAILS_ENV=test rails db:schema:load # migrate dump to latest schema rails db:migrate ``` ### Start rails server ```sh rails server ``` ### Start related services ```sh overmind start ``` ### Start some of related services ```sh OVERMIND_PROCESSES=camo,faye overmind start ``` ## Elasticsearch In rails console: ``` Elasticsearch::RebuildIndexes.new.perform ``` ## Elasticsearch fix on OSX https://github.com/Homebrew/homebrew-core/issues/100260#issuecomment-1137067501 ``` I've finally made it work, but I'm not sure this is the right call: I've edited the service plist at /usr/local/Cellar/elasticsearch@6/6.8.23/[email protected]: <key>ProgramArguments</key> <array> <string>/usr/local/opt/elasticsearch@6/bin/elasticsearch</string> </array> <key>EnvironmentVariables</key> <dict> + <key>JAVA_HOME</key> + <string>'/usr/libexec/java_home -v 17'</string> </dict> I had to edit the plist in the Cellar folder instead of the one in ~/Library/LaunchAgents because brew services is overwriting it at every start. ``` ## Update neko rules ```sh rails neko:update ``` ## Other ### Make a backup ```sh pg_dump -c shikimori_development > db/dump.sql ``` ### Autorun rspec & rubocop ```sh guard ``` ### Record apipie docs ```sh APIPIE_RECORD=all rspec spec/controllers/api/** ``` ### Add new video hosting ```ruby # app/services/video_extractor/player_url_extractor.rb ``` ### Run locally in production mode ```sh RAILS_ENV=production rails assets:precompile && IS_LOCAL_RUN=true RAILS_ENV=production rails server ``` ### Webpack debugger https://nodejs.org/en/docs/inspector/ Install the Chrome Extension NIM (Node Inspector Manager): https://chrome.google.com/webstore/detail/nim-node-inspector-manage/gnhhdgbaldcilmgcpfddgdbkhjohddkj ```sh RAILS_ENV=development NODE_ENV=development NODE_PATH=node_modules node --inspect-brk node_modules/.bin/webpack-dev-server --progress --color --config config/webpack/development.js ``` ### Shakapacker debugger https://nodejs.org/en/docs/inspector/ Install the Chrome Extension NIM (Node Inspector Manager): https://chrome.google.com/webstore/detail/nim-node-inspector-manage/gnhhdgbaldcilmgcpfddgdbkhjohddkj ```sh ./bin/shakapacker-dev-server --debug-shakapacker ``` ### Webpack visualizer https://chrisbateman.github.io/webpack-visualizer/ ### Dependabot ``` @dependabot ignore this dependency ``` ## [Sandboxes](/doc/sandboxes.md)
4,968
Markdown
24.482051
179
0.741143
shikimori/shikimori/config/i18n.yml
--- translations: - file: "app/packs/javascripts/i18n/translations.json" patterns: - '*.activerecord.attributes.user_rate.*' - '*.activerecord.attributes.collection_link.*' - '*.activerecord.attributes.external_link.url' - '*.activerecord.attributes.user_rate.statuses.anime.*' - '*.activerecord.attributes.user_rate.statuses.manga.*' - '*.frontend.*'
394
YAML
34.909088
62
0.659898
shikimori/shikimori/config/cable.yml
development: adapter: async test: adapter: test production: adapter: redis url: <%= ENV.fetch("REDIS_URL") { "redis://localhost:6379/1" } %> channel_prefix: shikimori_production
190
YAML
16.363635
67
0.684211
shikimori/shikimori/config/appsignal.yml
default: &defaults # Your push api key, it is possible to set this dynamically using ERB: # push_api_key: "<%= ENV['APPSIGNAL_PUSH_API_KEY'] %>" push_api_key: '3b66a159-85bd-43dc-acc4-b43750364cdc' # Your app's name name: 'App' # Actions that should not be monitored by AppSignal # ignore_actions: # - ApplicationController#isup # Errors that should not be recorded by AppSignal # For more information see our docs: # https://docs.appsignal.com/ruby/configuration/ignore-errors.html ignore_errors: - AbstractController::ActionNotFound - ActionController::InvalidAuthenticityToken - ActionController::ParameterMissing - ActionController::RoutingError - ActionController::UnknownFormat - ActionController::UnknownHttpMethod - ActionController::BadRequest - ActionDispatch::RemoteIp::IpSpoofAttackError - ActiveRecord::PreparedStatementCacheExpired - ActiveRecord::RecordNotFound - CanCan::AccessDenied - I18n::InvalidLocale - Unicorn::ClientShutdown - AgeRestricted - MismatchedEntries - InvalidEpisodesError - CopyrightedResource - Net::SMTPServerBusy - Net::SMTPFatalError - Interrupt - Apipie::ParamMissing - InvalidIdError - InvalidParameterError - EmptyContentError - MalParser::RecordNotFound - Errors::NotIdentifiedByImageMagickError - Sidekiq::Shutdown - Terrapin::ExitStatusError # See http://docs.appsignal.com/ruby/configuration/options.html for # all configuration options. # Configuration per environment, leave out an environment or set active # to false to not push metrics for that environment. beta: <<: *defaults active: false development: <<: *defaults active: false production: <<: *defaults active: true
1,784
YAML
27.790322
72
0.727018
shikimori/shikimori/config/database.yml
development: &defaults adapter: postgresql encoding: utf8 database: <%= ENV['POSTGRES_DEV_DB'] %> username: <%= ENV['POSTGRES_DEV_USER'] %> password: <%= ENV['POSTGRES_DEV_PASSWORD'].presence %> host: <%= ENV['POSTGRES_DEV_HOST'] %> pool: 100 timeout: 5000 encoding: utf8 collation: ru_RU.UTF-8 ctype: ru_RU.UTF-8 template: template0 production: <<: *defaults test: <<: *defaults database: <%= ENV['POSTGRES_TEST_DB'] %><%=ENV['TEST_ENV_NUMBER'] %> username: <%= ENV['POSTGRES_TEST_USER'] %> password: <%= ENV['POSTGRES_TEST_PASSWORD'].presence %> host: <%= ENV['POSTGRES_TEST_HOST'] %>
625
YAML
25.083332
70
0.632
shikimori/shikimori/config/chewy.yml
# config/chewy.yml # separate environment configs development: &development host: 'localhost:9200' prefix: 'shikimori_development' test: host: 'localhost:9200' prefix: 'shikimori_test' production: host: 'localhost:9200' prefix: <%=ENV['USER'] != 'morr' ? 'shikimori_production' : 'shikimori_development' %>
319
YAML
25.666665
88
0.714734
shikimori/shikimori/config/sidekiq.yml
:concurrency: 5 :pidfile: tmp/pids/sidekiq.pid staging: :concurrency: 5 production: :concurrency: 80 :queues: - [high_priority, 8] - [critical, 10] - [push_notifications, 2] - [default, 5] - [episode_notifications, 5] - [cpu_intensive, 5] - [slow_parsers, 5] - [torrents_parsers, 5] - [mal_parsers, 3] - [anime365_parsers, 3] - [webm_thumbnails, 5] - [history_jobs, 5] - [scores_jobs, 4] - [low_priority, 1] - [cleanup_jobs, 1] - [mailers, 5] - [imports, 4] - [achievements, 6] - [chewy, 10] - [dangerous_actions, 8] :limits: cpu_intensive: 2 slow_parsers: 2 torrents_parsers: 1 webm_thumbnails: 1 history_jobs: 1 scores_jobs: 40 cleanup_jobs: 1 mal_parsers: 40 anime365_parsers: 3 push_notifications: 5 imports: 2 achievements: 50 episode_notifications: 1 dangerous_actions: 1
854
YAML
17.191489
30
0.633489
shikimori/shikimori/config/secrets.yml
# Be sure to restart your server when you modify this file. # Your secret key is used for verifying the integrity of signed cookies. # If you change this key, all old signed cookies will become invalid! # Make sure the secret is at least 30 characters and all random, # no regular words or you'll be exposed to dictionary attacks. # You can use `rails secret` to generate a secure secret key. # Make sure the secrets in this file are kept private # if you're sharing your code publicly. # Do not keep production secrets in the repository, # instead read values from the environment. development: &defaults secret_key_base: fd4e8a95884930c25ebfc3020c53b1f4c128912e33c677f7a5139axx0c01b2ef5de41496e446abd4733ad2a3f51404c712acca297d967d651bddfcfd1c1f55aa devise: :secret_key: 2345678fg67fydg9843uitfgr9udfg8ui3ed89fiyucdv8uifgre80tfhgjv9oaf1324346dtyusfjkdsf8sd976732yhjkkednsc78sgcjhb7wyubhjdf867234ingp :pepper: 8d33d1cb74746054xx09e1bccfc63a82fc9aa251cbe03e3d813985040a88cd37c63c35a6af657f9bb30719f243cee977ff0a431d628657e5e44046e178c3096a recaptcha: :v2: :site_key: 6Le4Q58UAAAAAPykYvE5itXM04NSOsYeQUXzowWM :secret_key: 6Le4Q58UAAAAAJ0ylh5Zx3GRIJMtfQoZSqNeVpwt :v3: :site_key: 6LePQ58UAAAAAJ7HyOCd3Y9VtF5Co8I_2kyQJW9y :secret_key: 6LePQ58UAAAAALIpZbycjL-IZZtsp6ZtNg_PFi39 oauth: :facebook: :app_id: 337441442986680 :app_secret: 6750e33a1997602a019e30cdcd79ea13 :app_permissions: "" :vkontakte: :app_id: 2722473 :app_secret: G48K2YtxMajMo67ExE7a :app_permissions: "" :twitter: :secret_key: U8CPcoMCerH9Dqct3sG1XDqBd47XJAroMSuf8Eucjl9YLM49ci :consumer_key: JEukEItluUpRTJB7Tvd9uU9Sb mailgun: :login: xxxxxxxxxxxxxxxxxx :password: xxxxxxxxxxxxxxxxxxxxxxxx s3: connection: :server: s3-eu-west-1.amazonaws.com :access_key_id: xxxxxxxxxxxxxxxxxxxx :secret_access_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx :use_ssl: true # :persistent: true bucket: d.shikimori.org max_file_size: 10485760 acl: public-read access_key_id: xxxxxxxxxxxxxxxxxxxx secret_access_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx gcm: :token: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx1 api: :anime_videos: :token: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx camo: :host: localhost:5566 :port: 5566 :key: abc :endpoint_path: '/' faye: :host: localhost:9292 :port: 9292 :endpoint_path: '/' :token: xxxxxxxxxxxxxxxxxxxx # https://proxy6.net/user/proxy proxy: :url: <%= ENV['PROXY_URL'] %> :login: <%= ENV['PROXY_LOGIN'] %> :password: <%= ENV['PROXY_PASSWORD'] %> :vkontakte: # https://oauth.vk.com/authorize?client_id=2427019&scope=video,offline&redirect_uri=http://api.vk.com/blank.html&display=page&response_type=token :user_access_token: <%= ENV['VK_USER_ACCESS_TOKEN'] %> vimeo: :app_access_token: <%= ENV['VIMEO_APP_ACCESS_TOKEN'] %> turnstile: :site_key: <%= ENV['TURNSTILE_SITE_KEY'] %> :secret_key: <%= ENV['TURNSTILE_SECRET_KEY'] %> yandex_metrika: :oauth_token: <%= ENV['YANDEX_METRIKA_OAUTH_TOKEN'] %> test: <<: *defaults vkontakte: :user_access_token: USER_ACCESS_TOKEN_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx vimeo: :app_access_token: USER_ACCESS_TOKEN_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx production: <<: *defaults
3,454
YAML
35.755319
149
0.739722
shikimori/shikimori/config/honeybadger.yml
--- # For more options, see https://docs.honeybadger.io/lib/ruby/gem-reference/configuration api_key: 'hbp_tObQMWZaPeNe3kVaGAUZVUc8BAscDg3WQff7' # The environment your app is running in. env: "<%= Rails.env %>" # The absolute path to your project folder. root: "<%= Rails.root.to_s %>" # Honeybadger won't report errors in these environments. development_environments: - test - development - cucumber exceptions: ignore: - AbstractController::ActionNotFound - ActionController::InvalidAuthenticityToken - ActionController::ParameterMissing - ActionController::RoutingError - ActionController::UnknownFormat - ActionController::UnknownHttpMethod - ActionController::BadRequest - ActionDispatch::RemoteIp::IpSpoofAttackError - ActiveRecord::PreparedStatementCacheExpired - ActiveRecord::RecordNotFound - CanCan::AccessDenied - I18n::InvalidLocale - Unicorn::ClientShutdown - AgeRestricted - RknBanned - MismatchedEntries - InvalidEpisodesError - CopyrightedResource - Net::SMTPServerBusy - Net::SMTPFatalError - Interrupt - Apipie::ParamMissing - InvalidIdError - InvalidParameterError - EmptyContentError - MalParser::RecordNotFound - Errors::NotIdentifiedByImageMagickError - Sidekiq::Shutdown - Terrapin::ExitStatusError # By default, Honeybadger won't report errors in the development_environments. # You can override this by explicitly setting report_data to true or false. # report_data: true # The current Git revision of your project. Defaults to the last commit hash. # revision: null # Enable verbose debug logging (useful for troubleshooting). debug: false
1,692
YAML
27.694915
88
0.74409
shikimori/shikimori/config/storage.yml
test: service: Disk root: <%= Rails.root.join("tmp/storage") %> local: service: Disk root: <%= Rails.root.join("storage") %> # Use rails credentials:edit to set the AWS secrets (as aws:access_key_id|secret_access_key) # amazon: # service: S3 # access_key_id: <%= Rails.application.credentials.dig(:aws, :access_key_id) %> # secret_access_key: <%= Rails.application.credentials.dig(:aws, :secret_access_key) %> # region: us-east-1 # bucket: your_own_bucket # Remember not to checkin your GCS keyfile to a repository # google: # service: GCS # project: your_project # credentials: <%= Rails.root.join("path/to/gcs.keyfile") %> # bucket: your_own_bucket # Use rails credentials:edit to set the Azure Storage secret (as azure_storage:storage_access_key) # microsoft: # service: AzureStorage # storage_account_name: your_account_name # storage_access_key: <%= Rails.application.credentials.dig(:azure_storage, :storage_access_key) %> # container: your_container_name # mirror: # service: Mirror # primary: local # mirrors: [ amazon, google, microsoft ]
1,093
YAML
30.257142
101
0.693504
shikimori/shikimori/config/locales/services.en.yml
en: list_compare_service: group_by_key: both: In both lists user_only: "In %{nickname}'s list only" user_rate_status: planned: planned dropped: dropped messages/create_notification: nickname_changed: >- Your friend %{old_nickname} changed nickname to %{new_nickname}. user_registered_message: >- Welcome! [url=%{faq_url}]Here[/url] you'll find the answers to most frequently asked questions. You can import anime and manga lists from [url=http://myanimelist.net]myanimelist.net[/url] in your [url=/%{settings_path}/edit/account]profile settings[/url]. You can change your nickname there as well. Before you start posting on the forum we recommend you get familiar with our [url=%{site_rules_url}]site rules[/url]. If you have any questions or suggestions feel free to post them on the forum - we'll try to help you. moderatable_banned: without_reason: >- Your [entry=%{topic_id}]%{entry_name}[/entry] was moved to offtopic reason: >- because of [quote=%{approver_nickname}]%{reason}[/quote] bad_email_message: >- Our mail delivery service couldn't deliver mail to your email %{email}. You have either specified not existing email in profile settings or marked one of our delivered mails as spam. We recommend that you change your email in profile settings, or else you won't be able to restore your account if you forget you password. omniauth_service: new_user: New user bb_codes/tags/replies_tag: reply: 'Reply: ' replies: 'Replies: ' bb_codes/tags/contest_status_tag: started: started finished: finished bb_codes/tags/contest_round_status_tag: started: started finished: finished messages/generate_body: profile_comment: >- Left a comment in your <a class='b-link' href='%{profile_url}'>profile</a>. friend_request: add: Add @{f:her|m:him} to your friend list as well? added: Has added you to @{f:her|m:him} friend list. quoted_by_user: >- You have been <a class="b-link" href="%{comment_url}">mentioned</a> %{linked_name} anons: '%{linked_name} anime announced' ongoing: '%{linked_name} anime airing' episode: '%{linked_name} episode %{episode} released' released: '%{linked_name} anime released' subscription_commented: New messages %{linked_name} warned: target: >- You have been warned for posting a %{target_type_name} %{linked_name}. missing: >- You have been warned for posting a %{target_type_name} (<em>removed</em>). Reason: "%{reason}". other: >- You have been warned. Reason: "%{reason}". banned: target: >- You have been banned for %{duration} for posting a %{target_type_name} %{linked_name}. missing: >- You have been banned for %{duration} for posting a %{target_type_name} (<em>removed</em>). Reason: "%{reason}". other: >- You have been banned for %{duration}. Reason: "%{reason}". club_request: Invitation to join the club [club]%{club_id}[/club]. version_accepted: >- Your [version=%{version_id}]content change[/version] for [%{item_type}]%{item_id}[/%{item_type}] was accepted. version_rejected: >- Your [version=%{version_id}]content change[/version] for [%{item_type}]%{item_id}[/%{item_type}] was rejected. version_rejected_with_reason: >- Your [version=%{version_id}]content change[/version] for [%{item_type}]%{item_id}[/%{item_type}] was rejected because of [quote=%{moderator}]%{reason}[/quote] messages/mention_source: simple_mention: nil: <em>deleted</em> topic: &default_simple_mention <a href="%{url}"%{bubble}>%{name}</a> profile: *default_simple_mention review: *default_simple_mention critique: *default_simple_mention article: *default_simple_mention collection: *default_simple_mention text_mention: nil: in <em>deleted</em>. topic: in topic <a href="%{url}"%{bubble}>%{name}</a>. profile: in <a href="%{url}"%{bubble}>%{name}</a>'s profile. review: in review for <a href="%{url}"%{bubble}>%{name}</a>. messages/check_spam_abuse: ban_text: >- You are banned for spam. Send a word to %{email} in case if you are innocent. users/check_hacked: lock_text: >- This account was used to spam on the site. Account access has been blocked. To recover access, use the password recovery page. %{recovery_url} In order to avoid such situations in the future, we recommend that you do not use simple passwords like "123", "qwerty", "anime", "naruto", etc. moderations/banhammer: ban_reason: '[url=%{url}]site rule #3[/url]'
4,892
YAML
36.068182
101
0.632461
shikimori/shikimori/config/locales/value_objects.en.yml
en: titles/season_title: anime: &anime_season_titles catalog: season: winter: Winter %{year} spring: Spring %{year} summer: Summer %{year} fall: Fall %{year} year: '%{year} year' decade: '%{decade}0s' ancient: Older short: season : winter: Winter Season spring: Spring Season summer: Summer Season fall: Fall Season year: '%{year} year' full: season: winter: Winter %{year} Anime spring: Spring %{year} Anime summer: Summer %{year} Anime fall: Fall %{year} Anime year: Anime %{year} manga: &manga_season_titles <<: *anime_season_titles full: season: winter: Winter %{year} Manga spring: Spring %{year} Manga summer: Summer %{year} Manga fall: Fall %{year} Manga year: Manga %{year} ranobe: <<: *manga_season_titles titles/status_title: anime: &anime_status_titles anons: Planned ongoing: Airing released: Released latest: Aired recently manga: &manga_status_titles <<: &anime_status_titles ongoing: Publishing latest: Published recently ranobe: <<: *manga_status_titles
1,323
YAML
24.960784
38
0.538171
shikimori/shikimori/config/locales/decorators.ru.yml
ru: db_entry_decorator: &db_entry_decorator no_description: Нет описания ani_manga_decorator: &ani_manga_decorator <<: *db_entry_decorator time_ago_format: '%s назад' anime_decorator: <<: *ani_manga_decorator anime_video_preview_decorator: score: excellent: отлично good: хорошо okay: нормально character_decorator: <<: *db_entry_decorator job_title: character: Персонаж anime: Персонаж аниме anime_manga: Персонаж аниме и манги anime_manga_ranobe: Персонаж аниме, манги и ранобэ anime_ranobe: Персонаж аниме и ранобэ manga: Персонаж манги manga_ranobe: Персонаж манги и ранобэ ranobe: Персонаж ранобэ contest_decorator: <<: *db_entry_decorator club_decorator: <<: *db_entry_decorator manga_decorator: <<: *ani_manga_decorator person_decorator: &person_decorator <<: *db_entry_decorator job_title: producer: Режиссёр аниме mangaka: Автор манги composer: Композитор vocalist: Вокалист seyu: Сэйю anime_manga_projects_participant: Участник аниме и манга проектов # anime_manga_ranobe_projects_participant: Участник аниме, манга и ранобэ проектов anime_projects_participant: Участник аниме проектов # anime_ranobe_projects_participant: Участник аниме и ранобэ проектов manga_projects_participant: Участник манга проектов # manga_ranobe_projects_participant: Участник манга и ранобэ проектов # ranobe_projects_participant: Участник ранобэ проектов _projects_participant: '' seyu_decorator: <<: *person_decorator collection_decorator: <<: *db_entry_decorator user_decorator: &user_decorator always_online: всегда на сайте always_online_bot: всегда на сайте (бот) online: сейчас на сайте offline: 'в сети: %{time_ago}%{ago}' ago: назад registration_formats: full: '%e %B %Y г.' month_year: '%d %B %Y г.' year: '%Y г.' user_history_decorator: actions: add: Добавлено в список delete: Удалено из списка complete_with_score: '%{status_name} и оценено на <b>%{score}</b>' episodes: completed_movie: Просмотрен фильм completed_anime: Просмотрены все эпизоды completed_novel: Прочитана новелла completed_manga: Прочитана манга reset_anime: Сброшено число эпизодов reset_manga: Сброшено число томов и глав rate: cancelled: Отменена оценка changed: Изменена оценка c <b>%{prior_score}</b> на <b>%{score}</b> rated: Оценено на <b>%{score}</b> import: anime: Импортировано аниме - %{records} manga: Импортирована манга - %{records} registration: Регистрация на сайте anime_history_clear: Очистка истории аниме manga_history_clear: Очистка истории манги time_ago: '%{time_ago} назад' watched_one_episode: '%{watched} %{number}%{suffix} %{division}' watched_two_episodes: '%{watched} %{number_first}%{suffix} и %{number_second}%{suffix} %{division}' watched_three_episodes: '%{watched} %{number_first}%{suffix}, %{number_second}%{suffix} и %{number_third}%{suffix} %{division}' watched_episodes_range: '%{watched} с %{number_first}%{suffix_first} по %{number_last}%{suffix_last} %{division}' watched_first_episodes: '%{watched} %{number}%{suffix} %{division}' user_profile_decorator: <<: *user_decorator version_decorator: field_name: screenshots_upload: Загрузка кадров screenshots_delete: Удаление кадров screenshots_reposition: Порядок кадров poster_upload: Загрузка постера poster_delete: Удаление постера video_upload: Загрузка видео video_delete: Удаление видео role_add: Добавление роли role_remove: Удаление роли
3,807
YAML
36.333333
131
0.669031
shikimori/shikimori/config/locales/phrases.en.yml
en: actions: accept: Accept actions: Actions add: Add apply: Apply cancel: Cancel confirm: This action is irreversible. Are you sure? confirm_delete: Confirm deletion confirm_sync: Confirm sync confirm_simple: Are you sure? cancel_delete: Cancel deletion create: Create delete: Delete delete_all: Delete All edit: Edit ignore: Ignore markers: summary: add: Add summary mark remove: Remove summary mark confirm_add: Add summary mark? confirm_remove: Remove summary mark? offtopic: add: Add offtopic mark remove: Remove offtopic mark confirm_add: Add offtopic mark? confirm_remove: Remove offtopic mark? review: convert_to_comment: Convert review into comment? comment: convert_to_review: Convert comment into review? moderate: Moderate moderation: non_strict_moderation: This comment must be moderated only in case of laws violence abuse: Abuse laws_abuse: Russian law abuse ban: Ban hide_to_spoiler: Hide to spoiler not_offtopic: It's not offtopic not_summary: It's not summary not_review: It's not review offtopic: It's offtopic spoiler: It's spoiler summary: It's summary review: It's review explain: abuse: Please describe (optional) spoiler: Please describe (optional) edition: Edition increment: Increment (+) preview: Preview quote: Quote reject: Reject reply: Reply rollback: Rollback (-) save_apply: Save & Apply start: Start stop: Stop take: Take upload: Upload upload_image: Upload image write: Write by: id: By ID aired_on: By release date date_added: By date added date_updated: By date updated kind: By type name: In alphabetical order chapters: By number of chapters episodes: By number of episodes volumes: By number of volumes popularity: By popularity ranked: By rank random: By random ranked_random: By random ranked_shiki: By Shikimori ranking score: By score status: By status answers: 'no': 'no' 'yes': 'yes' about_site: About site anime_industry: Anime industry anime_list: Anime List anime_title: Anime title back: back back_to_page: Back to page calendar: Calendar changes_not_saved: Changes not saved! changes_saved: Changes saved character_name: Character name club_name: Club name collection_name: Collection name collapse: collapse cosplay: Cosplay deleted: anime: Deleted anime character: Deleted character manga: Deleted manga critique: Deleted critique video: Deleted video anime_video: Deleted video user: Deleted user error: error female: female forum: Forum gallery: Gallery goto: Goto imageboard_art: Imageboard art in_clubs: In Clubs in_collections: In Collections in_english: In English in_favorites: In favorites in_japanese: In Japanese in_russian: In Russian information: Information loading: Loading... previous_value: Previous value markers: abuse: abuse new: new offtopic: offtopic spoiler: spoiler convert_review: review summary: summary mail: Mail male: male manga_list: Manga List manga_title: Manga title mangaka: Mangaka moderators_only: For moderators only news: News no_synopsis: No synopsis no_comments: No comments no_collections: No collections no_critiques: No critiques no_summaries: No summaries no_topics: No topics no_reviews: No reviews nothing_found: Nothing found nothing_here: Nothing here notifications: Notifications of: of for: for page: Page %{page} page_not_found: Page not found pagination: back: Back next: Next person: Person person_name: Person name producer: Director ranobe_title: Light Novel title settings: Settings seyu: Seiyu share: Share search: search: Search... title: Search by title... name: Search by name... text: Search by text... shikimori: Shikimori source: Source sponsors: Sponsors this: anime: this anime manga: this manga this_action_cant_be_undone: This action can't be undone! tournament_bracket: Tournament bracket total: Total username: User name under_development: Under development page_under_development: This page is not completed and is under development yes_i_confirm: Yes, I confirm date_field: Date navigation: Navigation form_errors: Errors 'yes': 'Yes' 'no': 'No' is_deleted: Deleted facebook: 'Meta Platforms**, as well as its social network Facebook* : ** recognized as an extremist organization, its activities are banned in Russia. * banned in Russia' facebook_html: 'Meta Platforms**, as well as its social network Facebook*:<br>** recognized as an extremist organization, its activities are banned in Russia.<br>* banned in Russia'
4,991
YAML
25.983784
183
0.685634
shikimori/shikimori/config/locales/helpers.en.yml
en: anime_helper: minute: min. hour: zero: hours one: hour other: hours
100
YAML
11.624999
18
0.52
shikimori/shikimori/config/locales/services.ru.yml
ru: list_compare_service: group_by_key: both: В обоих списках user_only: Только в списке %{nickname} user_rate_status: planned: в планах dropped: брошено messages/create_notification: nickname_changed: >- @{f:Твоя|m:Твой} @{f:подруга|m:друг} %{old_nickname} @{f:изменила|m:изменил} никнейм на %{new_nickname}. user_registered_message: >- Добро пожаловать. [url=%{faq_url}]Здесь[/url] находятся ответы на наиболее часто задаваемые вопросы. Импортировать список аниме и манги из [url=http://myanimelist.net]myanimelist.net[/url] можно в [url=/%{settings_path}/edit/account]настройках профиля[/url]. Там же можно изменить свой никнейм. Перед публикацией на форуме рекомендуем ознакомиться с [url=%{site_rules_url}]правилами сайта[/url]. Если возникнут вопросы или пожелания - пиши на форуме, мы постараемся тебе ответить. moderatable_banned: without_reason: >- Твоя [entry=%{topic_id}]%{entry_name}[/entry] перенесена в оффтоп reason: >- по причине [quote=%{approver_nickname}]%{reason}[/quote] bad_email_message: >- Наш почтовый сервис не смог доставить письмо на твою почту %{email}. Ты либо @{f:указала|m:указал} несуществующий почтовый ящик, либо когда-то @{f:пометила|m:пометил} одно из наших писем как спам. Рекомендуем сменить электронный адрес в настройках профиля, иначе при утрате пароля ты не сможешь восстановить пароль от аккаунта. omniauth_service: new_user: Новый пользователь bb_codes/tags/replies_tag: reply: 'Ответ: ' replies: 'Ответы: ' bb_codes/tags/contest_status_tag: started: начат finished: завершён bb_codes/tags/contest_round_status_tag: started: начат finished: завершён messages/generate_body: profile_comment: >- @{f:Написала|m:Написал} что-то в твоём <a class='b-link' href='%{profile_url}'>профиле</a>. friend_request: add: Добавить @{f:её|m:его} в твой список друзей в ответ? added: "@{f:Добавила|m:Добавил} тебя в список друзей." quoted_by_user: >- @{f:Написала|m:Написал} <a class="b-link" href="%{comment_url}">что-то</a> тебе %{linked_name} anons: Анонсировано аниме %{linked_name} ongoing: Начат показ аниме %{linked_name} episode: Вышел %{episode} эпизод аниме %{linked_name} released: Завершён показ аниме %{linked_name} subscription_commented: Новые сообщения %{linked_name} warned: target: >- Тебе вынесено предупреждение за %{target_type_name} %{linked_name}. missing: >- Тебе вынесено предупреждение за %{target_type_name} (<em>удалён</em>). Причина: "%{reason}". other: >- Тебе вынесено предупреждение. Причина: "%{reason}". banned: target: >- Ты @{f:забанена|m:забанен} на %{duration} за %{target_type_name} %{linked_name}. missing: >- Ты @{f:забанена|m:забанен} на %{duration} за %{target_type_name} (<em>удалён</em>). Причина: "%{reason}". other: >- Ты @{f:забанена|m:забанен} на %{duration}. Причина: "%{reason}". club_request: Приглашение на вступление в клуб [club]%{club_id}[/club]. version_accepted: >- Твоя [version=%{version_id}]правка[/version] для [%{item_type}]%{item_id}[/%{item_type}] принята. version_rejected: >- Твоя [version=%{version_id}]правка[/version] для [%{item_type}]%{item_id}[/%{item_type}] отклонена. version_rejected_with_reason: >- Твоя [version=%{version_id}]правка[/version] для [%{item_type}]%{item_id}[/%{item_type}] отклонена по причине: [quote=%{moderator}]%{reason}[/quote] messages/mention_source: simple_mention: nil: <em>удалено</em> topic: &default_simple_mention <a href="%{url}"%{bubble}>%{name}</a> profile: *default_simple_mention review: *default_simple_mention critique: *default_simple_mention article: *default_simple_mention collection: *default_simple_mention text_mention: nil: в <em>удалено</em>. topic: в топике <a href="%{url}"%{bubble}>%{name}</a>. profile: в профиле пользователя <a href="%{url}"%{bubble}>%{name}</a>. review: в отзыве к <a href="%{url}"%{bubble}>%{name}</a>. messages/check_spam_abuse: ban_text: >- Ты @{f:забанена|m:забанен} за спам. Напиши на %{email}, если ты @{f:невиновна|m:невиновен}. users/check_hacked: lock_text: >- С этого аккаунта на сайте рассылается спам. Доступ к аккаунту забанен. Для восстановления доступа воспользуйтесь страницей восстановления пароля. %{recovery_url} Чтобы в дальнейшем избежать подобных ситуаций, рекомендуем не использовать простые пароли вроде "123", "qwerty", "anime", "naruto" и т.п. moderations/banhammer: ban_reason: п.3 [url=%{url}]правил сайта[/url]
4,947
YAML
36.484848
96
0.639377
shikimori/shikimori/config/locales/verbs.en.yml
en: verbs: watched_episodes: zero: watched one: watched other: watched read_volumes: zero: read one: read other: read read_chapters: zero: read one: read other: read added_by: zero: added by one: added by other: added by wrote: zero: wrote one: wrote other: wrote
376
YAML
15.391304
21
0.526596
shikimori/shikimori/config/locales/inflections.en.yml
en: inflections: years_old: zero: yo one: yo other: yo datetime: second: zero: seconds one: second other: seconds minute: zero: minutes one: minute other: minutes hour: zero: hours one: hour other: hours day: zero: days one: day other: days week: zero: weeks one: week other: weeks month: zero: months one: month other: months year: zero: years one: year other: years ordinal: studio: one: publisher few: publishers ranobe: zero: light novels one: light novel other: light novels inflections: user_signed_in: signed_in: signed in not_signed_in: not signed in
875
YAML
15.846154
36
0.482286
shikimori/shikimori/config/locales/devise.en.yml
# Additional translations at https://github.com/plataformatec/devise/wiki/I18n en: devise: confirmations: confirmed: "Your email address has been successfully confirmed." send_instructions: "You will receive an email with instructions for how to confirm your email address in a few minutes." send_paranoid_instructions: "If your email address exists in our database, you will receive an email with instructions for how to confirm your email address in a few minutes." failure: already_authenticated: "You are already signed in." inactive: "Your account is not activated yet." invalid: "Invalid %{authentication_keys} or password." locked: "Your account is locked." last_attempt: "You have one more attempt before your account is locked." not_found_in_database: "Invalid %{authentication_keys} or password." timeout: "Your session expired. Please sign in again to continue." unauthenticated: "You need to sign in or sign up before continuing." unconfirmed: "You have to confirm your email address before continuing." mailer: confirmation_instructions: subject: "Confirmation instructions" reset_password_instructions: subject: "Reset password instructions" unlock_instructions: subject: "Unlock instructions" email_changed: subject: "Email Changed" password_change: subject: "Password Changed" omniauth_callbacks: register: "Successfully registered from %{kind} account." failure: "Could not authenticate you from %{kind} because \"%{reason}\"." success: "Successfully authenticated from %{kind} account." passwords: no_token: "You can't access this page without coming from a password reset email. If you do come from a password reset email, please make sure you used the full URL provided." send_instructions: "You will receive an email with instructions on how to reset your password in a few minutes." send_paranoid_instructions: "If your email address exists in our database, you will receive a password recovery link at your email address in a few minutes." updated: "Your password has been changed successfully. You are now signed in." updated_not_active: "Your password has been changed successfully." registrations: destroyed: "Bye! Your account has been successfully cancelled. We hope to see you again soon." signed_up: "Welcome! You have signed up successfully." signed_up_but_inactive: "You have signed up successfully. However, we could not sign you in because your account is not yet activated." signed_up_but_locked: "You have signed up successfully. However, we could not sign you in because your account is locked." signed_up_but_unconfirmed: "A message with a confirmation link has been sent to your email address. Please follow the link to activate your account." update_needs_confirmation: "You updated your account successfully, but we need to verify your new email address. Please check your email and follow the confirm link to confirm your new email address." updated: "Your account has been updated successfully." sessions: signed_in: "Signed in successfully." signed_out: "Signed out successfully." already_signed_out: "Signed out successfully." user: already_signed_out: :devise.sessions.signed_out signed_in: :devise.sessions.signed_in signed_out: :devise.sessions.signed_out unlocks: send_instructions: "You will receive an email with instructions for how to unlock your account in a few minutes." send_paranoid_instructions: "If your account exists, you will receive an email with instructions for how to unlock it in a few minutes." unlocked: "Your account has been unlocked successfully. Please sign in to continue." errors: messages: already_confirmed: "was already confirmed, please try signing in" confirmation_period_expired: "needs to be confirmed within %{period}, please request a new one" expired: "has expired, please request a new one" not_found: "not found" not_locked: "was not locked" not_saved: one: "1 error prohibited this %{resource} from being saved:" other: "%{count} errors prohibited this %{resource} from being saved:"
4,356
YAML
61.242856
206
0.719467
shikimori/shikimori/config/locales/achievements.en.yml
en: achievements: group: common: Common genre: By Genres franchise: By Franchises author: By Authors neko_name: action: Action animelist: Anime List comedy: Comedy dementia_psychological: Dementia / Psychological drama: Drama fantasy: Fantasy fujoshi: Fujoshi gar: GAR historical: Historical horror_thriller: Horror / Thriller josei: Josei kids: For Kids kuudere: Kuudere longshounen: Long Title mahou_shoujo: Mahou Shoujo mecha: Mecha military: Military moe: Moe music: Music mystery: Mystery oldfag: Classic oniichan: Forbidden love otaku: Art historian police: Police romance: Romance scifi: Sci-Fi seinen: Seinen shortie: Short film slice_of_life: Slice of Life sovietanime: \"Our"\ anime stop_motion: Stop Motion space: Space sports: Sport supernatural: Supernatural test: Unknown achievement tsundere: Tsundere yandere: Yandere yuuri: Yuri genki: Genki world_masterpiece_theater: World Masterpiece Theater hint: default: '%{neko_name} level %{level}' animelist: '%{threshold} anime watched'
1,287
YAML
23.76923
58
0.616939
shikimori/shikimori/config/locales/roles.en.yml
en: role: 2nd Key Animation: 2nd Key Animation ADR Director: ADR Director Animation Check: Animation Check Animation Director: Animation Director Art Director: Art Director Art: Art Assistant Animation Director: Assistant Animation Director Assistant Director: Assistant Director Assistant Engineer: Assistant Engineer Assistant Producer: Assistant Producer Assistant Production Coordinat: Assistant Production Coordinat Associate Casting Director: Associate Casting Director Associate Producer: Associate Producer Background Art: Background Art Brazilian: Seiyu (BR) Casting Director: Casting Director Character Design: Character Design Chief Animation Director: Chief Animation Director Chief Producer: Chief Producer Co-Director: Co-Director Co-Producer: Co-Producer Color Design: Color Design Color Setting: Color Setting Creator: Creator Dialogue Editing: Dialogue Editing Digital Paint: Digital Paint Director of Photography: Director of Photography Director: Director Editing: Editing English: Seiyu (EN) Episode Director: Episode Director Executive Producer: Executive Producer French: Seiyu (FR) German: Seiyu (DE) Hebrew: Seiyu (IL) Hungarian: Seiyu (HU) In-Between Animation: In-Between Animation Inserted Song Performance: Inserted Song Performance Italian: Seiyu (IT) Japanese: Seiyu Key Animation: Key Animation Korean: Seiyu (KR) Layout: Layout Mandarin: Seiyu (CH) Mechanical Design: Mechanical Design Music: Music Online Editing Supervision: Online Editing Supervision Online Editor: Online Editor Original Character Design: Original Character Design Original Creator: Original Creator Planning Producer: Planning Producer Planning: Planning Portuguese (BR): Seiyu (BR) Post-Production Assistant: Post-Production Assistant Principle Drawing: Principle Drawing Producer: Producer Production Assistant: Production Assistant Production Coordination: Production Coordination Production Manager: Production Manager Publicity: Publicity Re-Recording Mixing: Re-Recording Mixing Recording Assistant: Recording Assistant Recording Engineer: Recording Engineer Recording: Recording Screenplay: Screenplay Script: Script Series Composition: Series Composition Series Production Director: Series Production Director Setting Manager: Setting Manager Setting: Setting Sound Director: Sound Director Sound Effects: Sound Effects Sound Manager: Sound Manager Sound Supervisor: Sound Supervisor Spanish: Seiyu (ES) Special Effects: Special Effects Spotting: Spotting Story & Art: Story & Art Story: Story Storyboard: Storyboard Theme Song Arrangement: Theme Song Arrangement Theme Song Composition: Theme Song Composition Theme Song Lyrics: Theme Song Lyrics Theme Song Performance: Theme Song Performance
3,045
YAML
34.011494
66
0.743842
shikimori/shikimori/config/locales/verbs.ru.yml
ru: verbs: watched_episodes: one: просмотрен few: просмотрены many: просмотрено read_volumes: one: прочитан few: прочитаны many: прочитано read_chapters: one: прочитана few: прочитаны many: прочитано added_by: one: '@{f:добавила|m:добавил}' few: добавили many: добавили wrote: one: '@{f:написала|m:написал}' few: написали many: написали
449
YAML
18.565217
36
0.57461
shikimori/shikimori/config/locales/value_objects.ru.yml
ru: titles/season_title: anime: &anime_season_titles catalog: season: winter: Зима %{year} spring: Весна %{year} summer: Лето %{year} fall: Осень %{year} year: '%{year} год' decade: '%{decade}0-е годы' ancient: Более старые short: season: winter: Зимний сезон spring: Весенний сезон summer: Летний сезон fall: Осенний сезон year: '%{year} год' full: season: winter: Зимний сезон %{year} года spring: Весенний сезон %{year} года summer: Летний сезон %{year} года fall: Осенний сезон %{year} года year: Аниме %{year} года manga: &manga_season_titles <<: *anime_season_titles full: season: winter: Зимний сезон %{year} года spring: Весенний сезон %{year} года summer: Летний сезон %{year} года fall: Осенний сезон %{year} года year: Манга %{year} года ranobe: <<: *manga_season_titles titles/status_title: anime: &anime_status_titles anons: Анонсы ongoing: Онгоинги released: Вышедшее latest: Недавно вышедшее manga: &manga_status_titles <<: &anime_status_titles ongoing: Выходящее ranobe: <<: *manga_status_titles ongoing: Онгоинги
1,383
YAML
26.137254
45
0.550253
shikimori/shikimori/config/locales/roles.ru.yml
ru: role: 2nd Key Animation: Второстепен. анимация ADR Director: Режиссёр перевода Animation Check: Контроль анимации Animation Director: Режиссёр анимации Art Director: Арт-директор Art: Рисовка Assistant Animation Director: Помощник режиссёра анимации Assistant Director: Помощник режиссёра Assistant Engineer: Инженер-ассистент Assistant Producer: Ассистент продюсера Assistant Production Coordinat: Координация работ Associate Casting Director: Помощник директора по кастингу Associate Producer: Помощник продюсера Background Art: Фоновая рисовка Brazilian: Сэйю (BR) Casting Director: Директор по кастингу Character Design: Дизайн персонажей Chief Animation Director: Главный аниматор Chief Producer: Главный продюсер Co-Director: Второй режиссёр Co-Producer: Второй продюсер Color Design: Дизайн цвета Color Setting: Настройка цвета Creator: Автор Dialogue Editing: Редактор диалогов Digital Paint: Компьютерная рисовка Director of Photography: Оператор-постановщик Director: Режиссёр Editing: Монтаж English: Сэйю (EN) Episode Director: Режиссёр эпизодов Executive Producer: Исполнительн. продюсер French: Сэйю (FR) German: Сэйю (DE) Hebrew: Сэйю (IL) Hungarian: Сэйю (HU) In-Between Animation: Промежуточ. анимация Inserted Song Performance: Музыкальное сопровождение Italian: Сэйю (IT) Japanese: Сэйю Key Animation: Ключевая анимация Korean: Сэйю (KR) Layout: Вёрстка Mandarin: Сэйю (CH) Mechanical Design: Дизайн макетов Music: Музыка Online Editing Supervision: Надзор за редакторами Online Editor: Редактор Original Character Design: Оригинал. дизайн персонажей Original Creator: Автор оригинала Planning Producer: Продюсер планирования Planning: Планирование Portuguese (BR): Португальский (BR) Post-Production Assistant: Пост-продакшн Principle Drawing: Принцип рисовки Producer: Продюсер Production Assistant: Ассистент по производству Production Coordination: Координация работы Production Manager: Менеджер по производству Publicity: Реклама Re-Recording Mixing: Микширование звука Recording Assistant: Помощник звукооператора Recording Engineer: Звукооператор Recording: Звукооператор Screenplay: Сценарий Script: Сценарий Series Composition: Компоновка серий Series Production Director: Директор по производству Setting Manager: Менеджер по настройке Setting: Настройка Sound Director: Звукорежиссёр Sound Effects: Звуковые эффекты Sound Manager: Звукорежиссёр Sound Supervisor: Звукорежиссёр Spanish: Сэйю (ES) Special Effects: Спецэффекты Spotting: Корректировка Story & Art: Сюжет и иллюстрации Story: Сюжет Storyboard: Раскадровка Theme Song Arrangement: Аранжировка гл. муз. темы Theme Song Composition: Композитор гл. муз. темы Theme Song Lyrics: Лирика гл. муз. темы Theme Song Performance: Исполнение гл. муз. темы
3,080
YAML
34.413793
62
0.744805
shikimori/shikimori/config/locales/decorators.en.yml
en: db_entry_decorator: &db_entry_decorator no_description: No description ani_manga_decorator: &ani_manga_decorator <<: *db_entry_decorator time_ago_format: '%s ago' anime_decorator: <<: *ani_manga_decorator anime_video_preview_decorator: score: excellent: excellent good: good okay: okay character_decorator: <<: *db_entry_decorator job_title: character: Character anime: Anime character anime_manga: Anime & Manga Character anime_manga_ranobe: Anime & Manga & Light Novel character anime_ranobe: Anime & Light Novel character manga: Manga character manga_ranobe: Manga & Light Novel character ranobe: Light Novel character contest_decorator: <<: *db_entry_decorator club_decorator: <<: *db_entry_decorator manga_decorator: <<: *ani_manga_decorator person_decorator: &person_decorator <<: *db_entry_decorator job_title: producer: Producer mangaka: Mangaka composer: Composer seyu: Seiyu vocalist: Vocalist anime_manga_projects_participant: Anime & Manga projects participant # anime_manga_ranobe_projects_participant: Anime, Manga & Light novel projects participant anime_projects_participant: Anime projects participant # anime_ranobe_projects_participant: Anime & Light novel projects participant manga_projects_participant: Manga projects participant # manga_ranobe_projects_participant: Manga & Light novel projects participant # ranobe_projects_participant: Light novel projects participant _projects_participant: '' seyu_decorator: <<: *person_decorator collection_decorator: <<: *db_entry_decorator user_decorator: &user_decorator always_online: always online always_online_bot: always online (bot) online: online offline: last online %{time_ago}%{ago} ago: ago registration_formats: full: '%B %e, %Y' month_year: '%B %Y' year: '%Y' user_history_decorator: actions: add: Added to list delete: Removed from list complete_with_score: '%{status_name} and rated <b>%{score}</b>' episodes: completed_movie: Watched movie completed_anime: Watched all episodes completed_novel: Read novel completed_manga: Read manga reset_anime: Reset episodes count reset_manga: Reset volumes and chapters count rate: cancelled: Score removed changed: Score changed from <b>%{prior_score}</b> to <b>%{score}</b> rated: Rated <b>%{score}</b> import: anime: Anime imported - %{records} manga: Manga imported - %{records} registration: Registration anime_history_clear: Anime history cleared manga_history_clear: Manga history cleared time_ago: '%{time_ago} ago' watched_one_episode: '%{watched} %{division} %{number}' watched_two_episodes: '%{watched} %{number_first} and %{number_second} %{division}' watched_three_episodes: '%{watched} %{number_first}, %{number_second} and %{number_third} %{division}' watched_episodes_range: '%{watched} %{division} %{number_first}-%{number_last}' watched_first_episodes: '%{watched} %{number} %{division}' user_profile_decorator: <<: *user_decorator version_decorator: field_name: screenshots_upload: Screenshots upload screenshots_delete: Screenshots delete screenshots_reposition: Screenshots order poster_upload: Poster upload poster_delete: Poster delete video_upload: Video upload video_delete: Video delete role_add: Role add role_remove: Role remove
3,684
YAML
35.127451
106
0.66721
shikimori/shikimori/config/locales/datetime.en.yml
en: date: formats: &date_formats full: '%d.%m.%Y %H:%M' human: '%B %e, %Y' human_short: '%b %e, %Y' human_day_month: '%B %e' human_month_year: '%B %Y' short: '%d.%m.%Y' day_month_human: '%B %e' month_year_human: '%B %Y' ongoing: '%A, %B %e, %Y' ongoing_short: '%A, %B %e, %Y' time: formats: <<: *date_formats #momentjs: '%Y-%m-%d %H:%M:%S' datetime: intervals: today: today yesterday: yesterday during_week: during this week week: one week ago two_weeks: two weeks ago three_weeks: three weeks ago month: one month ago two_months: two months ago three_months: three months ago four_months: four months ago five_months: five months ago half_year: half a year ago year: one year ago two_years: two years ago many_years: a long time ago release_dates: date: '%{date}' for_date: for %{date} in_years: in %{from_date}-%{to_date} since_date: since %{date} since_till_date: '%{from_date} to %{to_date}' till_date: till %{date} parts: second: zero: seconds one: second other: seconds minute: zero: minutes one: minute other: minutes hour: zero: hours one: hour other: hours day: zero: days one: day other: days week: zero: weeks one: week other: weeks month: zero: months one: month other: months year: zero: years one: year other: years
1,668
YAML
21.253333
51
0.506595
shikimori/shikimori/config/locales/achievements.ru.yml
ru: achievements: group: common: Общие genre: Жанровые franchise: Франшизы author: Авторы neko_name: action: Боевик animelist: Список аниме comedy: Комедия dementia_psychological: Безумие / Психологическое drama: Драма fantasy: Фэнтези fujoshi: Фудзёси gar: ГАР historical: Историческое horror_thriller: Хоррор / Триллер josei: Дзёсей kids: Детское kuudere: Кудере longshounen: Длиннотайтлы mahou_shoujo: Махо-сёдзё mecha: Меха military: Военное moe: Моэ music: Музыка mystery: Детектив oldfag: Классика oniichan: Запретная любовь otaku: Искусствовед police: Полиция romance: Романтика scifi: Фантастика seinen: Сейнен shortie: Короткометражки slice_of_life: Повседневность sovietanime: Наши в аниме stop_motion: Покадровая анимация space: Космос sports: Спорт supernatural: Сверхъестественное test: Неизвестное достижение tsundere: Цундере yandere: Яндере yuuri: Юри genki: Генки world_masterpiece_theater: Театр Мировых Шедевров hint: default: '%{neko_name} %{level}-го уровня' animelist: '%{threshold} просмотренных аниме'
1,316
YAML
24.326923
55
0.634498
shikimori/shikimori/config/locales/views.yml
ru: animes: page: kind: &video_kinds raw: оригинал subtitles: субтитры fandub: озвучка unknown: озвучка new: kind: <<: *video_kinds edit: kind: <<: *video_kinds
241
YAML
15.133332
27
0.481328
shikimori/shikimori/config/locales/devise.ru.yml
# Русский перевод для https://github.com/plataformatec/devise/tree/v4.3.0 # Другие переводы на http://github.com/plataformatec/devise/wiki/I18n ru: devise: confirmations: confirmed: "Твой аккаунт подтверждён." send_instructions: "В течение нескольких минут ты получишь письмо с инструкцией по подтверждению аккаунта." send_paranoid_instructions: "Если твоя почта есть в базе сайта, то в течение нескольких минут ты получишь письмо с инструкцией по подтверждению аккаунта." failure: already_authenticated: "Ты уже в системе." inactive: "Твой аккаунт ещё не подтверждён." invalid: "Неверный пароль или: %{authentication_keys}." locked: "Твой аккаунт заблокирован." last_attempt: "У тебя осталась ещё одна попытка ввести пароль до блокировки." not_found_in_database: "Неверный пароль или: %{authentication_keys}." timeout: "Твой сеанс закончился. Войди в систему снова." unauthenticated: "Тебе необходимо войти в систему или зарегистрироваться." unconfirmed: "Тебе нужно подтвердить твой аккаунт." mailer: confirmation_instructions: subject: "Инструкция по подтверждению аккаунта" reset_password_instructions: subject: "Инструкция по восстановлению пароля" unlock_instructions: subject: "Инструкция по разблокировке аккаунта" email_changed: subject: "Почта была изменена" password_change: subject: "Пароль был изменён" omniauth_callbacks: register: 'Успешная регистрация через аккаунт %{kind}.' failure: "Ты не можешь войти в систему с аккаунтом из %{kind}, так как \"%{reason}\"." success: "Вход в систему выполнен с аккаунтом из %{kind}." passwords: no_token: "Эта страница доступна только при переходе по ссылке для сброса пароля. Если вы перешли по ссылке из письма, убедитесь, что вы использовали полный URL." send_instructions: "В течение нескольких минут ты получишь письмо с инструкцией по восстановлению пароля." send_paranoid_instructions: "Если твоя почта есть в базе сайта, то в течение нескольких минут ты получишь письмо с инструкцией по восстановлению пароля." updated: "Твой пароль изменён" updated_not_active: "Пароль изменён" registrations: destroyed: "Скатертью дорога! Твой аккаунт удалён." signed_up: "Добро пожаловать! Регистрация завершена." signed_up_but_inactive: "Вы зарегистрировались. Тем не менее, вы не можете войти, потому что ваш аккаунт ещё не подтверждён." signed_up_but_locked: "Вы зарегистрировались. Тем не менее, вы не можете войти, потому что ваш аккаунт забанен." signed_up_but_unconfirmed: "В течение нескольких минут ты получишь письмо с инструкцией по подтверждению аккаунта." update_needs_confirmation: "Твой аккаунт обновлён, но необходимо подтвердить твою новую почту. Проверь свою почту и нажми на ссылку \"Подтвердить\", чтобы завершить обновление." updated: "Твой аккаунт изменён" sessions: signed_in: "Вход на сайт выполнен" signed_out: "Выход из сайта выполнен" already_signed_out: "Выход из сайта выполнен" unlocks: send_instructions: "В течение нескольких минут ты получишь письмо с инструкцией по разблокировке аккаунта." send_paranoid_instructions: "Если твой аккаунт существует, то в течение нескольких минут ты получишь письмо с инструкцией по его разблокировке." unlocked: "Ваш аккаунт разблокирован. Теперь вы авторизованы." user: already_signed_out: :devise.sessions.signed_out signed_in: :devise.sessions.signed_in signed_out: :devise.sessions.signed_out errors: messages: already_confirmed: "уже подтверждена. Пожалуйста, попробуй войти на сайт" confirmation_period_expired: "должен быть подтверждён в течении %{period}, запроси подтверждение ещё раз" expired: "устарела. Запроси новую" not_found: "не найден" not_locked: "не заблокирован" not_saved: one: "%{resource}: сохранение не удалось из-за %{count} ошибки" few: "%{resource}: сохранение не удалось из-за %{count} ошибок" many: "%{resource}: сохранение не удалось из-за %{count} ошибок" other: "%{resource}: сохранение не удалось из-за %{count} ошибки"
4,267
YAML
58.277777
183
0.718069
shikimori/shikimori/config/locales/datetime.ru.yml
ru: date: formats: &date_formats full: '%H:%M %d.%m.%Y' human: '%e %B %Y' human_short: '%e %b %Y' human_day_month: '%e %B' human_month_year: '%B %Y' short: '%d.%m.%Y' day_month_human: '%e %B' month_year_human: '%B %Y' ongoing: '%A, %e %B %Y' ongoing_short: '%A, %e %B' time: formats: <<: *date_formats #momentjs: '%Y-%m-%d %H:%M:%S' datetime: intervals: today: сегодня yesterday: вчера during_week: в течение недели week: неделя назад two_weeks: две недели назад three_weeks: три недели назад month: месяц назад two_months: два месяца назад three_months: три месяца назад four_months: четыре месяца назад five_months: пять месяцев назад half_year: более полугода назад year: год назад two_years: два года назад many_years: совсем давно release_dates: date: '%{date} г.' for_date: на %{date} г. in_years: в %{from_date}-%{to_date} гг. since_date: с %{date} г. since_till_date: с %{from_date} г. по %{to_date} г. till_date: до %{date} г. parts: second: one: секунда few: секунды many: секунд other: секунды minute: one: минута few: минуты many: минут other: минуты hour: one: час few: часа many: часов other: часа day: one: день few: дня many: дней other: дня week: one: неделя few: недели many: недель other: недели month: one: месяц few: месяца many: месяцев other: месяца year: one: год few: года many: лет other: года
1,814
YAML
21.134146
57
0.503308
shikimori/shikimori/config/locales/mailers.ru.yml
ru: shiki_mailer: private_message_email: subject: Личное сообщение body: |- %{nickname}, у тебя 1 новое сообщение на %{site_link} от пользователя %{from_nickname}. Прочитать можно тут: %{private_message_link} Отписаться от уведомлений можно по ссылке: %{unsubscribe_link} reset_password_instructions: subject: Инструкция по сбросу пароля body: |- Привет! Кто-то задействовал процедуру сброса пароля для твоего аккаунта на %{site_link}. Твой логин - %{nickname}. Изменить пароль можно, перейдя по ссылке: %{reset_password_link} Если тебе пришло несколько писем о восстановлении пароля, то переходить на страницу сброса пароля нужно обязательно по ссылке из самого последнего письма. Если ты не запрашивал(-а) сброс пароля, то просто проигнорируй это письмо. Твой пароль не будет изменён до тех пор, пока ты не перейдёшь по указанной выше ссылке.
976
YAML
32.689654
162
0.683402
shikimori/shikimori/config/locales/simple_form.en.yml
en: simple_form: yes: Yes no: No required: text: 'required' mark: '*' # You can uncomment the line below if you need to overwrite the whole required html. # When using html, text and mark won't be used. # html: '<abbr title="required">*</abbr>' error_notification: default_message: "Please critique the problems below:" # Labels and hints examples # labels: # defaults: # password: 'Password' # user: # new: # email: 'E-mail to sign in.' # edit: # email: 'E-mail.' # hints: # defaults: # username: 'User name to sign in.' # password: 'No special characters, please.' labels: user: nickname: Login (nickname) anime_video: author_name: Author (dubbing, subtitles) anime_video_author_id: Author (dubbing, subtitles) placeholders: topic: title: Topic title options: topic: type: Topic: Topic Topics::NewsTopic: News topic user: sex: male: male female: female hints: user: nickname: Case sensitive password: Case sensitive email: Case sensitive user_preferences: apply_user_styles: >- Other site users can define their own styles (change the appearance of the site) for the pages of their profile and clubs. If you disable this setting, you will always see standard site style. favorites_in_profile: >- Changing it will break your default profile layout. <br>You may want to change it if you have custom styles in profile. version: reason: &optional Optional anime: &anime_hints description_ru_source: *optional description_en_source: *optional episodes: It must be "0" for ongoings with an unknown total number of episodes more_info: >- Text imported from MAL is not displayed until the marker <code class="b-code_inline">[MAL]</code> is removed from the text manga: <<: *anime_hints volumes: It must be "0" for ongoings with an unknown total number of volumes chapters: It must be "0" for ongoings with an unknown total number of chapters anime_video: author_name: >- Формат записи: Название_проекта/студии (Ник_даббера1 & Ник_даббера2) list_import: list: Import supports Shikimori JSON and MyAnimeList XML lists (15mb max) club: is_censored: Required option for clubs with "adult" images and texts is_private: Club content is only visible to club members and moderators is_non_thematic: Non thematic clubs are not displayed on anime and manga pages is_shadowbanned: Hides the club from everyone except its members club_page: parent_page_id: Inside which page the page is displayed magic_submit: devise: sessions: new: &sign_in submit: Sign in disable_with: Signing in&#133; users: registrations: new: submit: Register disable_with: Registering&#133; passwords: new: submit: Send instructions disable_with: Sending instructions&#133; sessions: new: <<: *sign_in dashboards: show: <<: *sign_in club_invite: &send submit: Send disable_with: Sending&#133; comment: &comment submit: Send disable_with: Posting&#133; message: <<: *comment anime_video_report: index: <<: *send list_import: submit: Import disable_with: Importing&#133; default: submit: Save retry: Try saving once again disable_with: Saving&#133; new: submit: Create retry: Try again edit: submit: Save feedback: <<: *comment
4,093
YAML
24.116564
90
0.571463
shikimori/shikimori/config/locales/mailers.en.yml
en: shiki_mailer: private_message_email: subject: Private message body: |- %{nickname}, you have 1 new message on %{site_link} from %{from_nickname}. Read the message: %{private_message_link} To unsubscribe from notification emails click here: %{unsubscribe_link} reset_password_instructions: subject: Reset password instructions body: |- Hi! We have received a request to reset your account password on %{site_link}. Your account login is %{nickname}. To reset you password click this link: %{reset_password_link} If you didn't make a request to reset your password just ignore this message. Your password will not change until you click the link above.
777
YAML
27.814814
85
0.65251
shikimori/shikimori/config/locales/simple_form.ru.yml
ru: simple_form: yes: Да no: Нет required: text: 'Обязательное поле' mark: '*' # You can uncomment the line below if you need to overwrite the whole required html. # When using html, text and mark won't be used. # html: '<abbr title="required">*</abbr>' error_notification: default_message: "Пожалуйста, исправьте следующие ошибки:" # Labels and hints examples # labels: # defaults: # password: 'Password' # user: # new: # email: 'E-mail to sign in.' # edit: # email: 'E-mail.' # hints: # defaults: # username: 'User name to sign in.' # password: 'No special characters, please.' labels: user: nickname: Логин (никнейм) anime_video: author_name: Автор (озвучки, субтитров) anime_video_author_id: Автор (озвучки, субтитров) placeholders: topic: title: Название топика options: topic: type: Topic: Топик Topics::NewsTopic: Новостной топик user: sex: male: муж. female: жен. hints: user: nickname: Чувствителен к регистру password: Чувствителен к регистру email: >- Чувствителен к регистру<br> Письма на<span class="b-tag narrow">@mail.ru</span><span class="b-tag narrow">@inbox.ru</span><span class="b-tag narrow">@list.ru</span><span class="b-tag narrow">@bk.ru</span> могут попадать в спам, проверяй в этой папке тоже. user_preferences: apply_user_styles: >- Другие пользователи сайта могут задавать собственные стили (изменять внешний вид сайта) для страниц своего профиля и клубов.<br>Отключив эту настройку, вы всегда будете видеть стандартный стиль сайта. favorites_in_profile: >- Изменение этой настройки поломает стандартную вёрстку профиля. <br>Можно менять, если у тебя собственные стили в профиле. version: reason: &optional Не обязательно anime: &anime_hints description_ru_source: *optional description_en_source: *optional episodes: Для онгоингов с неизвестным числом эпизодов ставь "0" more_info: >- Импортированные с MAL тексты не отображаются, пока из текста не удалён маркер <code class="b-code_inline">[MAL]</code> manga: <<: *anime_hints volumes: Для онгоингов с неизвестным числом томов ставь "0" chapters: Для онгоингов с неизвестным числом глав ставь "0" anime_video: author_name: >- Формат записи: Название_проекта/студии (Ник_даббера1 & Ник_даббера2) list_import: list: Поддерживает Shikimori JSON и MyAnimeList XML списки (до 15mb) club: is_censored: Обязательная настройка для клубов со "взрослыми" картинками и текстами is_private: Содержимое клуба видно только участникам клуба и модераторам is_non_thematic: Не тематические клубы не отображаются на страницах аниме и манги is_shadowbanned: Скрывает клуб для всех, кроме его участников club_page: parent_page_id: Внутри какой страницы отображается страница magic_submit: devise: sessions: new: &sign_in submit: Войти disable_with: Вход&#133; users: registrations: new: submit: Зарегистрироваться disable_with: Регистрация&#133; passwords: new: submit: Отправить инструкцию disable_with: Отправляем инструкцию&#133; sessions: new: <<: *sign_in dashboards: dynamic: <<: *sign_in club_invite: &send submit: Отправить disable_with: Отправка&#133; comment: &comment submit: Написать disable_with: Отправка&#133; message: <<: *comment anime_video_report: index: <<: *send list_import: submit: Импортировать disable_with: Импорт&#133; default: submit: Сохранить retry: Попробовать ещё раз disable_with: Сохранение&#133; new: submit: Создать edit: submit: Сохранить feedback: <<: *comment helpers: submit: user: &user_buttons create: Сохранить update: Сохранить user_preferences: <<: *user_buttons topic: <<: *user_buttons critique: <<: *user_buttons
4,607
YAML
27.269938
186
0.585196
shikimori/shikimori/config/locales/frontend/edit_field.ru.yml
ru: frontend: synonyms: &synonyms nothing_here: Нет названий name: Название licensors: <<: *synonyms coub_tags: <<: *synonyms fansubbers: <<: *synonyms fandubbers: <<: *synonyms desynced: nothing_here: Нет поля name: Название поля options: nothing_here: Нет настроек name: Настройка
373
YAML
17.699999
32
0.571046
shikimori/shikimori/config/locales/frontend/about.ru.yml
ru: frontend: about: views: Просмотры visits: Посещения unique_visitors: Уникальные посетители comments_per_day: Комментариев за день new_users_per_day: Новых пользователей за день
217
YAML
23.22222
52
0.682028
shikimori/shikimori/config/locales/frontend/search.ru.yml
ru: frontend: search: nothing_found: Ничего не найдено. mode: index: Текущая страница anime: Аниме manga: Манга ranobe: Ранобэ character: Персонаж person: Человек
229
YAML
18.166665
39
0.563319
shikimori/shikimori/config/locales/frontend/collections.en.yml
en: frontend: collections: kind: anime: Anime manga: Manga ranobe: Light Novel and Novel character: Characters person: People group_name: Group name disabled_add_group_hint: >- To add a new group, fill in the blank group name json_warning: >- Do not edit this if you are not sure what you are doing! Inserting invalid data will break the page. autocomplete: anime: Anime title manga: Manga title ranobe: Light Novel title character: Character name person: Person name
608
YAML
26.681817
64
0.601974
shikimori/shikimori/config/locales/frontend/pages.en.yml
en: frontend: pages: # p-animes.coffee p_animes: hentai: Hentai / Roskomnadzor licensed: Licensed in Russia no_data: No data watch_online: Watch Online # p-contests p_contests: # p-contests/_form.coffee candidate: one: '%{count} candidate' few: '%{count} candidates' many: '%{count} candidates' other: '%{count} candidates' # p-profiles p_profiles: # p-profiles/ban.coffee page_is_reloading: Page is reloading... # p-profiles/show.coffee hour: one: hour few: hours many: hours day: one: day few: days many: days label: full: >- %{hours} %{hourWord} since %{fromDate} till %{toDate} (%{days} %{dayWord}) short: >- %{hours} %{hourWord} on %{date} # p-user_rates p_user_rates: # p-user_rates/index.coffee insufficient_data: Insufficient data error_occurred: Error occurred changes_saved: Changes saved rewatch: one: rewatch few: rewatches many: rewatches reread: one: re-read few: re-reads many: re-reads # p-recommendations-index.coffee p_recommendations_index: dont_recommend_franchise: Don't recommend this franchise any more
1,465
YAML
23.847457
73
0.517406
shikimori/shikimori/config/locales/frontend/collections.ru.yml
ru: frontend: collections: kind: anime: Аниме manga: Манга ranobe: Ранобэ и новеллы character: Персонажи person: Люди group_name: Название группы disabled_add_group_hint: >- Для добавления следующей группы заполните пустое название группы json_warning: >- Не редактируй это, если не уверен(-а) в том, что делаешь! Вставка неправильных данных сломает работу страницы. autocomplete: anime: Название аниме manga: Название манги ranobe: Название ранобэ character: Имя персонажа person: Имя человека
635
YAML
27.90909
72
0.626772
shikimori/shikimori/config/locales/frontend/statistics.ru.yml
ru: frontend: statistics: number: Количество anime_with_score: <b>%{count}</b> аниме с оценкой <b>%{score}</b> anime_of_type: <b>%{count}</b> аниме типа <b>%{type}</b> anime_in_year: '%{count} %{type} за %{year} год' anime_with_rating_in_year: '%{count} аниме у %{rating} за %{year} год' share: Процент ratings_share: '%{percent}% у %{rating} за %{year} год' genres_share: '%{percent}% у %{genre} за %{year} год'
467
YAML
37.999997
76
0.569593
shikimori/shikimori/config/locales/frontend/achievements.en.yml
en: frontend: achievements: title: gained: Achievement Gained lost: Achievement Lost
113
YAML
15.285712
34
0.610619
shikimori/shikimori/config/locales/frontend/pages.ru.yml
ru: frontend: pages: # p-animes.coffee p_animes: hentai: Хентай / Роскомнадзор licensed: Лицензировано в РФ no_data: Нет данных watch_online: Смотреть онлайн # p-contests p_contests: # p-contests/_form.coffee candidate: one: '%{count} участник' few: '%{count} участника' many: '%{count} участников' other: '%{count} участников' # p-profiles p_profiles: # p_profiles/ban.coffee page_is_reloading: Перезагрузка страницы... # p-profiles/show.coffee hour: one: час few: часа many: часов day: one: день few: дня many: дней label: full: >- %{hours} %{hourWord} с %{fromDate} по %{toDate} (%{days} %{dayWord}) short: >- %{hours} %{hourWord} %{date} # p_user-rates p_user_rates: # p_user-rates/index.coffee insufficient_data: Недостаточно данных error_occurred: Произошла ошибка changes_saved: Изменения сохранены rewatch: one: повторный просмотр few: повторных просмотра many: повторных просмотров reread: one: повторное прочтение few: повторных прочтения many: повторных прочтений # p-recommendations-index.coffee p_recommendations_index: dont_recommend_franchise: Больше не рекомендовать эту франшизу
1,535
YAML
25.033898
70
0.540065
shikimori/shikimori/config/locales/frontend/search.en.yml
en: frontend: search: nothing_found: Nothing found. mode: index: This page anime: Anime manga: Manga ranobe: Light novel character: Character person: Person
223
YAML
17.666665
35
0.55157
shikimori/shikimori/config/locales/frontend/external_links.ru.yml
ru: frontend: external_links: nothing_here: Нет ссылок groups: links: Ссылки watch_online: Онлайн-просмотр warn: youtube: Ссылка на официальный канал, где выложены серии для онлайн-просмотра watch_online: Ссылка на плейлист/страницу с плеером/официальный канал с онлайн-просмотром
337
YAML
29.72727
97
0.682493
shikimori/shikimori/config/locales/frontend/about.en.yml
en: frontend: about: views: Views visits: Visits unique_visitors: Unique visitors comments_per_day: Comments per day new_users_per_day: New users per day
190
YAML
20.22222
42
0.636842
shikimori/shikimori/config/locales/frontend/shiki_editor.en.yml
en: frontend: shiki_editor: not_available: Commenting will be available one day after registering text_cant_be_blank: Text can't be blank file: file bold: Bold italic: Italic underline: Underlined strike: Strikethrough color: Color undo: Undo last change redo: Redo last change spoiler: Spoiler spoiler_inline: Spoiler code_inline: Code link: Link smiley: Smiley shiki_link: Shiki link image: Image by link upload: Images upload spoiler_block: Spoiler block code_block: Code block bullet_list: List headline: Headline blockquote: Quote prompt: image_url: Image URL link_url: Link URL spoiler_label: Spoiler label preview: Preview source: Source code huge_content_mode: Text is too large. The visual editor will fail and has therefore been disabled. huge_content_pasted: Text is too large. The visual editor will fail and therefore paste has been cancelled. normal_content_mode: The visual editor is available again unsaved_content: label: The editor has unsaved draft. Restore it? draft: Draft 'yes': Yes 'no': No colors: yellow: Yellow orange: Orange red: Red pink: Pink violet: Violet blue: Blue green: Green brown: Brown gray: Gray black: Black headlines: header_1: '# Large header' header_2: '## Medium header' header_3: '### Small header' headline: '#### Headline' midheadline: '##### Subheadline'
1,677
YAML
27.931034
113
0.60167
shikimori/shikimori/config/locales/frontend/images.ru.yml
ru: frontend: images: delete: Удалить confirm: Подтвердить cancel: Отменить
100
YAML
13.42857
26
0.61
shikimori/shikimori/config/locales/frontend/achievements.ru.yml
ru: frontend: achievements: title: gained: Открыто достижение lost: Потеряно достижение
116
YAML
15.714283
34
0.62069
shikimori/shikimori/config/locales/frontend/application.ru.yml
ru: frontend: application: sure_to_leave_page: |- Здесь написан и не сохранён какой-то комментарий! Точно покинуть страницу?
153
YAML
20.999997
57
0.640523
shikimori/shikimori/config/locales/frontend/statistics.en.yml
en: frontend: statistics: number: Number anime_with_score: <b>%{count}</b> anime scored <b>%{score}</b> anime_of_type: <b>%{count}</b> anime of type <b>%{type}</b> anime_in_year: '%{count} %{type} in %{year}' anime_with_rating_in_year: '%{count} %{rating} anime in %{year}' share: Share ratings_share: '%{percent}% share of %{rating} in %{year}' genres_share: '%{percent}% share of %{genre} in %{year}'
457
YAML
37.166664
70
0.56674
shikimori/shikimori/config/locales/frontend/user_rates.ru.yml
ru: frontend: user_rates: button: add_to_list: Добавить в список remove_from_my_list: Удалить из списка
132
YAML
17.999997
46
0.606061
shikimori/shikimori/config/locales/frontend/application.en.yml
en: frontend: application: sure_to_leave_page: |- You have not saved your comment! Are you sure you want to leave this page?
153
YAML
20.999997
49
0.614379
shikimori/shikimori/config/locales/frontend/dynamic_elements.en.yml
en: frontend: dynamic_elements: check_height: expand: expand shiki_editable: your_request_will_be_considered: >- Your request will be considered. Domo arigato. topic: load_comments: Load next %{comment_count} %{of_total_comments} %{comment_word} type_label: Topic of: of comment: one: comment few: comments many: comments new_message_added: one: '%{count} new message added' few: '%{count} new messages added' many: '%{count} new messages added' new_comment_added: one: '%{count} new comment added' few: '%{count} new comments added' many: '%{count} new comments added' comment: type_label: Comment marked_as_offtopic: Comment has been marked as offtopic multiple_marked_as_offtopic: Comments have been marked as offtopic marked_as_summary: Comment has been marked as summary not_marked_as_offtopic: Removed offtopic marking not_marked_as_summary: Removed summary marking dialog: type_label: Dialog message: type_label: Message review: type_label: Review authorized_action: register_to_complete_action: >- Please <a href="/users/sign_up">register</a> to complete this action. day_registered_action: action_will_be_available: >- Action will be available one day after <a href="/users/sign_up">registration</a>. week_registered_action: action_will_be_available: >- Action will be available one day after <a href="/users/sign_up">registration</a>. not_implemented_yet_action: This action is temporarily not available
1,816
YAML
28.786885
86
0.598568
shikimori/shikimori/config/locales/frontend/dynamic_elements.ru.yml
ru: frontend: dynamic_elements: check_height: expand: развернуть shiki_editable: your_request_will_be_considered: >- Твой запрос будет рассмотрен. Домо аригато. topic: load_comments: Загрузить ещё %{comment_count} %{of_total_comments} %{comment_word} type_label: Топик of: из comment: one: комментарий few: комментария many: комментариев new_message_added: one: Добавлено %{count} новое сообщение few: Добавлено %{count} новых сообщения many: Добавлено %{count} новых сообщений new_comment_added: one: Добавлен %{count} новый комментарий few: Добавлено %{count} новых комментария many: Добавлено %{count} новых комментариев comment: type_label: Комментарий marked_as_offtopic: Комментарий помечен оффтопиком multiple_marked_as_offtopic: Комментарии помечены оффтопиком marked_as_summary: Комментарий помечен отзывом not_marked_as_offtopic: Метка оффтопика снята not_marked_as_summary: Метка отзыва снята dialog: type_label: Диалог message: type_label: Сообщение review: type_label: Отзыв authorized_action: register_to_complete_action: >- Для этого действия тебе необходима <a href="/users/sign_up">регистрация</a> на сайте. day_registered_action: action_will_be_available: >- Действие станет доступно через сутки после <a href="/users/sign_up">регистрации</a>. week_registered_action: action_will_be_available: >- Действие станет доступно через неделю после <a href="/users/sign_up">регистрации</a>. not_implemented_yet_action: Это действие временно недоступно
1,864
YAML
29.57377
90
0.621781