file_path
stringlengths 20
207
| content
stringlengths 5
3.85M
| size
int64 5
3.85M
| lang
stringclasses 9
values | avg_line_length
float64 1.33
100
| max_line_length
int64 4
993
| alphanum_fraction
float64 0.26
0.93
|
---|---|---|---|---|---|---|
teerameth/omni.isaac.fiborobotlab/omni/isaac/fiborobotlab/mooncake/mooncake_task_2.py | from omniisaacgymenvs.tasks.base.rl_task import RLTask
from mooncake import Mooncake
import omni
from pxr import UsdPhysics, Gf, UsdGeom
from omni.isaac.core.articulations import ArticulationView
from omni.isaac.core.prims import RigidPrimView
from omni.isaac.core.utils.prims import get_prim_at_path
import omni.isaac.core.utils.torch.rotations as torch_rot
from omni.isaac.isaac_sensor import _isaac_sensor
from omni.isaac.core.utils.torch.maths import torch_rand_float, tensor_clamp, unscale
import numpy as np
import torch
import torch.nn.functional as f
import math
def q2falling(q):
norm_vec = f.normalize(q[:, 1:], p=1, dim=1)
return 2 * torch.acos(q[:, 0]) * torch.sqrt((norm_vec[:, 0] * norm_vec[:, 0] + norm_vec[:, 1] * norm_vec[:, 1]))
class MooncakeTask(RLTask):
def __init__(
self,
name,
sim_config,
env,
offset=None
) -> None:
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
self._num_envs = self._task_cfg["env"]["numEnvs"]
self._env_spacing = self._task_cfg["env"]["envSpacing"]
self._ball_size = 0.12
self._ball_positions = torch.tensor([0.0, 0.0, 0.12]) # ball diameter is 12 cm.
self._robot_offset = 0.1962
self._jump_offset = 0.01
self._reset_dist = self._task_cfg["env"]["resetDist"]
self._max_push_effort = self._task_cfg["env"]["maxEffort"]
self._max_wheel_velocity = self._task_cfg["env"]["maxWheelVelocity"]
self.heading_weight = self._task_cfg["env"]["headingWeight"]
self.up_weight = self._task_cfg["env"]["upWeight"]
self.actions_cost_scale = self._task_cfg["env"]["actionsCost"]
self.energy_cost_scale = self._task_cfg["env"]["energyCost"]
self.joints_at_limit_cost_scale = self._task_cfg["env"]["jointsAtLimitCost"]
self.death_cost = self._task_cfg["env"]["deathCost"]
self.termination_height = self._task_cfg["env"]["terminationHeight"]
self.alive_reward_scale = self._task_cfg["env"]["alive_reward_scale"]
self._max_episode_length = 5000
self._num_observations = 22
self._num_actions = 3
self._imu_buf = [{"lin_acc_x": 0.0, "lin_acc_y": 0.0, "lin_acc_z": 0.0, "ang_vel_x": 0.0, "ang_vel_y": 0.0,
"ang_vel_z": 0.0}] * 128 # default initial sensor buffer
self._is = _isaac_sensor.acquire_imu_sensor_interface() # Sensor reader
self.previous_fall_angle = None
RLTask.__init__(self, name, env)
return
def set_up_scene(self, scene) -> None:
self.get_mooncake() # mush be called before "super().set_up_scene(scene)"
# self.get_ball()
super().set_up_scene(scene)
self._robots = ArticulationView(prim_paths_expr="/World/envs/*/Mooncake/mooncake", name="mooncake_view")
# Add ball for each robot
stage = omni.usd.get_context().get_stage()
for robot_path in self._robots.prim_paths:
ball_path = robot_path[:-18] + "/ball" # remove "/Mooncake/mooncake" and add "/ball" instead
cubeGeom = UsdGeom.Sphere.Define(stage, ball_path)
ballPrim = stage.GetPrimAtPath(ball_path)
size = self._ball_size
offset = Gf.Vec3f(0.0, 0.0, self._ball_size)
cubeGeom.CreateRadiusAttr(size)
cubeGeom.AddTranslateOp().Set(offset)
# Attach Rigid Body and Collision Preset
rigid_api = UsdPhysics.RigidBodyAPI.Apply(ballPrim)
mass_api = UsdPhysics.MassAPI.Apply(ballPrim)
mass_api.CreateMassAttr(4)
rigid_api.CreateRigidBodyEnabledAttr(True)
UsdPhysics.CollisionAPI.Apply(ballPrim)
phys_api = UsdPhysics.MaterialAPI.Apply(ballPrim)
phys_api.CreateStaticFrictionAttr().Set(1.0)
phys_api.CreateDynamicFrictionAttr().Set(1.0)
self._ball = RigidPrimView(prim_paths_expr="/World/envs/*/ball", name="ball_view")
scene.add(self._robots)
scene.add(self._ball)
# self.meters_per_unit = UsdGeom.GetStageMetersPerUnit(omni.usd.get_context().get_stage())
return
def get_mooncake(self): # must be called at very first line of set_up_scene()
robot_position = self._ball_positions
robot_position[2] += self._robot_offset
mooncake = Mooncake(prim_path=self.default_zero_env_path + "/Mooncake", name="Mooncake",
translation=robot_position)
# applies articulation settings from the task configuration yaml file
self._sim_config.apply_articulation_settings("Mooncake", get_prim_at_path(mooncake.prim_path),
self._sim_config.parse_actor_config("Mooncake"))
def get_ball(self):
from omni.isaac.core.objects import DynamicSphere
ball = self._my_world.scene.add(
DynamicSphere(
prim_path=self.default_zero_env_path + "/Ball",
name="ball",
position=self._ball_positions,
radius=12, # mediciene ball diameter 24cm.
color=np.array([1.0, 0, 0]),
mass=4,
)
)
# ball = Ball(prim_path=self.default_zero_env_path + "/Ball", name="Ball", translation=self._ball_positions)
def get_robot(self): return self._robots
def get_observations(self) -> dict:
# dof_pos = self._robots.get_joint_positions(clone=False)
dof_vel = self._robots.get_joint_velocities(clone=False)
wheel_vel_0 = dof_vel[:, self._wheel_0_dof_idx]
wheel_vel_1 = dof_vel[:, self._wheel_1_dof_idx]
wheel_vel_2 = dof_vel[:, self._wheel_2_dof_idx]
imu_accel_x = torch.tensor([imu["lin_acc_x"] for imu in self._imu_buf])
imu_accel_y = torch.tensor([imu["lin_acc_y"] for imu in self._imu_buf])
imu_accel_z = torch.tensor([imu["lin_acc_z"] for imu in self._imu_buf])
imu_gyro_x = torch.tensor([imu["ang_vel_x"] for imu in self._imu_buf])
imu_gyro_y = torch.tensor([imu["ang_vel_y"] for imu in self._imu_buf])
imu_gyro_z = torch.tensor([imu["ang_vel_z"] for imu in self._imu_buf])
self.obs_buf[:, 0] = wheel_vel_0
self.obs_buf[:, 1] = wheel_vel_1
self.obs_buf[:, 2] = wheel_vel_2
self.obs_buf[:, 3] = imu_accel_x
self.obs_buf[:, 4] = imu_accel_y
self.obs_buf[:, 5] = imu_accel_z
self.obs_buf[:, 6] = imu_gyro_x
self.obs_buf[:, 7] = imu_gyro_y
self.obs_buf[:, 8] = imu_gyro_z
robot_v = self._robots.get_linear_velocities()
ball_v = self._robots.get_linear_velocities()
robot_w = self._robots.get_angular_velocities()
_, robot_orientation = self._robots.get_world_poses()
self.obs_buf[:, 9] = robot_v[:, 0]
self.obs_buf[:, 10] = robot_v[:, 1]
self.obs_buf[:, 11] = robot_v[:, 2]
self.obs_buf[:, 12] = ball_v[:, 0]
self.obs_buf[:, 13] = ball_v[:, 1]
self.obs_buf[:, 14] = ball_v[:, 2]
self.obs_buf[:, 15] = robot_w[:, 0]
self.obs_buf[:, 16] = robot_w[:, 1]
self.obs_buf[:, 17] = robot_w[:, 2]
self.obs_buf[:, 18] = robot_orientation[:, 0]
self.obs_buf[:, 19] = robot_orientation[:, 1]
self.obs_buf[:, 20] = robot_orientation[:, 2]
self.obs_buf[:, 21] = robot_orientation[:, 3]
observations = {
self._robots.name: {
"obs_buf": self.obs_buf
}
}
# print("observations: %s"%(str(observations)))
return observations
def pre_physics_step(self, actions) -> None:
# print("Action: %s"%(str(actions)))
torch.nan_to_num(actions, nan=0.0) # replace NaN with zero
reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(reset_env_ids) > 0:
self.reset_idx(reset_env_ids)
self.actions = actions.clone().to(self._device) # save for later energy calculation
# actions[:, 0] = actions[:, 0] * self._max_wheel_velocity
# actions[:, 1] = actions[:, 1] * self._max_wheel_velocity
# actions[:, 2] = actions[:, 2] * 0.5 # omega_bz
## calculate wheel_velocities from wheel's kinematics
# actions = actions.to(self._device) * self._max_wheel_velocity
# wheel_velocities = torch.zeros((self._robots.count, 3), dtype=torch.float32, device=self._device)
# wheel_velocities[:, 0] = -12.8558 * actions[:, 1] - 11.0172 * actions[:, 2]
# wheel_velocities[:, 1] = 11.1334 * actions[:, 0] + 6.4279 * actions[:, 1] + 8.2664 * actions[:, 2]
# wheel_velocities[:, 2] = 6.4279 * actions[:, 1] - 11.1334 * actions[:, 0] + 8.2664 * actions[:, 2]
wheel_velocities = torch.zeros((self._robots.count, self._robots.num_dof), dtype=torch.float32, device=self._device)
wheel_velocities[:, self._wheel_0_dof_idx] = -12.8558 * actions[:, 1] - 11.0172 * actions[:, 2]
wheel_velocities[:, self._wheel_1_dof_idx] = 11.1334 * actions[:, 0] + 6.4279 * actions[:, 1] + 8.2664 * actions[:, 2]
wheel_velocities[:, self._wheel_2_dof_idx] = 6.4279 * actions[:, 1] - 11.1334 * actions[:, 0] + 8.2664 * actions[:, 2]
# print("wheel_velocities: %s" % (str(wheel_velocities)))
self.wheel_velocities = wheel_velocities.clone().to(self._device) # save for later energy calculation
# wheel_effort = torch.zeros((self._robots.count, self._robots.num_dof), dtype=torch.float32, device=self._device)
# wheel_effort[:, self._wheel_0_dof_idx] = actions[:, 0] * self._max_push_effort
# wheel_effort[:, self._wheel_1_dof_idx] = actions[:, 1] * self._max_push_effort
# wheel_effort[:, self._wheel_2_dof_idx] = actions[:, 2] * self._max_push_effort
# print("wheel_effort: %s" % (str(wheel_effort)))
## Apply joint velocities
from omni.isaac.core.utils.types import ArticulationActions # batched version of ArticulationAction
stage = omni.usd.get_context().get_stage()
for env in range(self._num_envs):
axle_0 = UsdPhysics.DriveAPI.Get(
stage.GetPrimAtPath("/World/envs/env_{}/Mooncake/mooncake/base_plate/wheel_0_joint".format(env)),
"angular")
axle_1 = UsdPhysics.DriveAPI.Get(
stage.GetPrimAtPath("/World/envs/env_{}/Mooncake/mooncake/base_plate/wheel_1_joint".format(env)),
"angular")
axle_2 = UsdPhysics.DriveAPI.Get(
stage.GetPrimAtPath("/World/envs/env_{}/Mooncake/mooncake/base_plate/wheel_2_joint".format(env)),
"angular")
# set_drive_parameters(axle_0, "velocity", math.degrees(wheel_velocities[env, 0]), 0.05, math.radians(1e7))
# set_drive_parameters(axle_1, "velocity", math.degrees(wheel_velocities[env, 1]), 0.05, math.radians(1e7))
# set_drive_parameters(axle_2, "velocity", math.degrees(wheel_velocities[env, 2]), 0.05, math.radians(1e7))
# self._robots.apply_action(ArticulationActions(joint_efforts=wheel_effort))
self._robots.apply_action(ArticulationActions(joint_velocities=wheel_velocities))
# self._robots[env].apply_wheel_actions(ArticulationAction(joint_efforts=wheel_effort[env]))
# set_drive_parameters(axle_0, "effort", wheel_effort[env, 0], 0, math.radians(1e7))
# set_drive_parameters(axle_1, "effort", wheel_effort[env, 1], 0, math.radians(1e7))
# set_drive_parameters(axle_2, "effort", wheel_effort[env, 2], 0, math.radians(1e7))
## Read IMU & store in buffer ##
buffer = []
robots_prim_path = self._robots.prim_paths
for robot_prim_path in robots_prim_path:
reading = self._is.get_sensor_readings(
robot_prim_path + "/base_plate/sensor") # read from select sensor (by prim_path)
if reading.shape[0]:
buffer.append(reading[-1]) # get only lastest reading
else:
buffer.append({"lin_acc_x": 0.0, "lin_acc_y": 0.0, "lin_acc_z": 0.0, "ang_vel_x": 0.0, "ang_vel_y": 0.0,
"ang_vel_z": 0.0}) # default initial sensor buffer
self._imu_buf = buffer
def reset_idx(self, env_ids):
num_resets = len(env_ids)
## randomize DOF velocities ##
# dof_vel = torch_rand_float(-0.1, 0.1, (num_resets, 57), device=self._device)
# self._robots.set_joint_velocities(dof_vel, indices=env_ids) # apply resets
## Reset Ball positions ##
ball_pos, ball_rot = self.initial_root_pos[env_ids], self.initial_root_rot[env_ids]
ball_pos[:, 2] = 0.12 # force ball to touch floor prefectly
root_vel = torch.zeros((num_resets, 6), device=self._device)
# Apply Ball position
self._ball.set_world_poses(ball_pos, ball_rot, indices=env_ids)
## Random Ball velocities ##
ball_vel = torch_rand_float(-0.01, 0.01, (num_resets, 6), device=self._device)
self._ball.set_velocities(ball_vel, indices=env_ids)
## Random Robot positions & orientations ##
fall_direction = torch_rand_float(-np.pi, np.pi, (num_resets, 1), device=self._device).reshape(-1)
fall_direction_axis = torch.Tensor([0, 0, 1]).repeat(num_resets, 1).to(self._device).reshape(-1, 3)
fall_angle = torch_rand_float(0, np.pi/8, (num_resets, 1), device=self._device).reshape(-1)
fall_angle_axis = torch.Tensor([0, 1, 0]).repeat(num_resets, 1).to(self._device).reshape(-1, 3)
fall_direction_quat = torch_rot.quat_from_angle_axis(fall_direction, fall_direction_axis)
fall_angle_quat = torch_rot.quat_from_angle_axis(fall_angle, fall_angle_axis)
## Apply Robot position ##
robot_pos = ball_pos.clone() # use ball position as reference
robot_offset = torch.Tensor([0, 0, self._robot_offset]).repeat(num_resets).to(self._device).reshape(-1, 3) # Distance from ball center to robot center is 18 cm.
robot_pos = robot_pos + robot_offset
# robot_pos = robot_pos + torch_rot.quat_rotate(fall_angle_quat, torch_rot.quat_rotate(fall_direction_quat, robot_offset))
robot_rot = self.initial_root_rot[env_ids]
# robot_rot = torch_rot.quat_apply(fall_direction_quat, robot_rot)
# robot_rot = torch_rot.quat_apply(fall_angle_quat, robot_rot)
# root_pos, root_rot = self.initial_root_pos[env_ids], self.initial_root_rot[env_ids]
# root_vel = torch.zeros((num_resets, 6), device=self._device)
#
self._robots.set_world_poses(robot_pos, robot_rot, indices=env_ids)
self._robots.set_velocities(root_vel, indices=env_ids)
# bookkeeping
self.reset_buf[env_ids] = 0
self.progress_buf[env_ids] = 0
def post_reset(self): # Run only once after simulation started
# self._robots = self.get_robot()
self.initial_root_pos, self.initial_root_rot = self._robots.get_world_poses() # save initial position for reset
self.initial_dof_pos = self._robots.get_joint_positions()
# initialize some data used later on
# self.start_rotation = torch.tensor([1, 0, 0, 0], device=self._device, dtype=torch.float32)
# self.up_vec = torch.tensor([0, 0, 1], dtype=torch.float32, device=self._device).repeat((self.num_envs, 1))
# self.heading_vec = torch.tensor([1, 0, 0], dtype=torch.float32, device=self._device).repeat((self.num_envs, 1))
# self.inv_start_rot = quat_conjugate(self.start_rotation).repeat((self.num_envs, 1))
# self.basis_vec0 = self.heading_vec.clone()
# self.basis_vec1 = self.up_vec.clone()
self._wheel_0_dof_idx = self._robots.get_dof_index("wheel_0_joint")
self._wheel_1_dof_idx = self._robots.get_dof_index("wheel_1_joint")
self._wheel_2_dof_idx = self._robots.get_dof_index("wheel_2_joint")
# randomize all envs
indices = torch.arange(self._robots.count, dtype=torch.int64, device=self._device)
self.reset_idx(indices)
def calculate_metrics(self) -> None: # calculate reward for each env
wheel_vel = self.obs_buf[:, :3]
# if 'self.previous_wheel_vel' in locals():
# wheel_accel = wheel_vel - self.previous_wheel_vel
# else: wheel_accel = torch.zeros_like(wheel_vel)
# wheel_accel_cost = torch.sum(wheel_accel**2, dim=-1)
# self.previous_wheel_vel = wheel_vel.clone()
balls_position, balls_orientation = self._ball.get_world_poses()
robots_position, robots_orientation = self._robots.get_world_poses()
robots_omega = self._robots.get_angular_velocities()
fall_angles = q2falling(robots_orientation) # find fall angle of all robot (batched)
if self.previous_fall_angle is None: falling_velocity = torch.zeros_like(robots_position[:, 0])
else: falling_velocity = fall_angles - self.previous_fall_angle
## aligning up axis of robot and environment
# up_proj = torch.cos(fall_angles)
# up_reward = torch.zeros_like(fall_angles)
# up_reward = torch.where(up_proj > 0.93, up_reward + self.up_weight, up_reward)
# falling_penalty = fall_angles
q1 = self.initial_root_rot # world frame
q2 = robots_orientation # robot orientation
# find product of quaternions
product_quaternion = torch.sum(q1*q2,dim=-1) # <q1, q2>
quaternion_distance = 1 - (product_quaternion**2)
# print(quaternion_distance)
## energy penalty for movement
# actions_cost = torch.sum(self.wheel_velocities ** 2, dim=-1)
# electricity_cost = torch.sum(torch.abs(self.actions * obs_buf[:, 12+num_dof:12+num_dof*2])* self.motor_effort_ratio.unsqueeze(0), dim=-1)
## rotation penality
# rotation_cost = torch.sum(torch_rot.quat_diff_rad(robots_orientation, self.initial_root_rot)** 2, dim=-1)
## reward for duration of staying alive
alive_reward = torch.ones_like(fall_angles) * self.alive_reward_scale
# progress_reward = potentials - prev_potentials
# print(robots_position - balls_position)
# print(torch.sum((robots_position - balls_position)**2, dim=-1))
total_reward = (
- quaternion_distance
# - wheel_accel_cost * 0.05
# - falling_velocity
+ alive_reward
# + up_reward
# - math.e**(-0.01*fall_angles)
# - actions_cost * self.actions_cost_scale
# - torch.sum(robots_omega**2, dim=-1) * 10
# - rotation_cost * 10
)
# adjust reward for fallen agents
total_reward = torch.where(
robots_position[:, 2] < self.termination_height, # fall by height
torch.ones_like(total_reward) * self.death_cost,
total_reward
)
total_reward = torch.where(
fall_angles > 50 / 180 * math.pi, # fall by angle
torch.ones_like(total_reward) * self.death_cost,
total_reward
)
total_reward = torch.where(
torch.sum((robots_position - balls_position)**2, dim=-1) > (self._robot_offset+self._jump_offset)**2, # jump beyond jump_offset
torch.ones_like(total_reward) * self.death_cost,
total_reward
)
self.previous_fall_angle = fall_angles
self.rew_buf[:] = total_reward
def is_done(self) -> None: # check termination for each env
balls_position, balls_orientation = self._ball.get_world_poses()
robots_position, robots_orientation = self._robots.get_world_poses()
fall_angles = q2falling(robots_orientation) # find fall angle of all robot (batched)
robot_z_position = robots_position[:, 2]
# print("Z position", robot_z_position)
resets = torch.zeros(self._num_envs, dtype=torch.long, device=self._device)
resets = torch.where(torch.sum((robots_position - balls_position)**2, dim=-1) > (self._robot_offset+self._jump_offset)**2, 1, resets) # jump beyond jump_offset
resets = torch.where(robot_z_position < 0.25, 1, resets) # reset by falling (Z-position)
resets = torch.where(fall_angles > 50*(np.pi / 180), 1, resets) # reset by falling (angle)
resets = torch.where(self.progress_buf >= self._max_episode_length, 1, resets) # reset by time
self.reset_buf[:] = resets
| 20,553 | Python | 50.129353 | 169 | 0.604583 |
teerameth/omni.isaac.fiborobotlab/omni/isaac/fiborobotlab/mooncake/eval.py | from env2 import MoonCakeEnv
from stable_baselines3 import PPO
from stable_baselines3.ppo import CnnPolicy, MlpPolicy
from stable_baselines3.common.callbacks import CheckpointCallback
policy_path = "./mlp_policy/mooncake_policy_checkpoint_200000_steps"
my_env = MoonCakeEnv(headless=False, max_episode_length=99999999, display_every_iter=1)
model = PPO.load(policy_path)
for _ in range(20):
obs = my_env.reset()
done = False
while not done:
actions, _ = model.predict(observation=obs, deterministic=True)
obs, reward, done, info = my_env.step(actions)
my_env.render()
my_env.close() | 622 | Python | 31.789472 | 87 | 0.733119 |
teerameth/omni.isaac.fiborobotlab/omni/isaac/fiborobotlab/mooncake/rlgames_train_old.py | from omniisaacgymenvs.utils.hydra_cfg.hydra_utils import *
from omniisaacgymenvs.utils.hydra_cfg.reformat import omegaconf_to_dict, print_dict
from omniisaacgymenvs.utils.rlgames.rlgames_utils import RLGPUAlgoObserver, RLGPUEnv
from omniisaacgymenvs.utils.config_utils.path_utils import retrieve_checkpoint_path
from omniisaacgymenvs.envs.vec_env_rlgames import VecEnvRLGames
import hydra
from omegaconf import DictConfig
from rl_games.common import env_configurations, vecenv
from rl_games.torch_runner import Runner
import os
class RLGTrainer():
def __init__(self, cfg, cfg_dict):
self.cfg = cfg
self.cfg_dict = cfg_dict
def launch_rlg_hydra(self, env):
# `create_rlgpu_env` is environment construction function which is passed to RL Games and called internally.
# We use the helper function here to specify the environment config.
self.cfg_dict["task"]["test"] = self.cfg.test
# register the rl-games adapter to use inside the runner
vecenv.register('RLGPU',
lambda config_name, num_actors, **kwargs: RLGPUEnv(config_name, num_actors, **kwargs))
env_configurations.register('rlgpu', {
'vecenv_type': 'RLGPU',
'env_creator': lambda **kwargs: env
})
self.rlg_config_dict = omegaconf_to_dict(self.cfg.train)
def run(self):
# create runner and set the settings
runner = Runner(RLGPUAlgoObserver())
runner.load(self.rlg_config_dict)
runner.reset()
# dump config dict
experiment_dir = os.path.join('runs', self.cfg.train.params.config.name)
os.makedirs(experiment_dir, exist_ok=True)
with open(os.path.join(experiment_dir, 'config.yaml'), 'w') as f:
f.write(OmegaConf.to_yaml(self.cfg))
runner.run({
'train': not self.cfg.test,
'play': self.cfg.test,
'checkpoint': self.cfg.checkpoint,
'sigma': None
})
@hydra.main(config_name="config", config_path="./cfg")
def parse_hydra_configs(cfg: DictConfig):
# ensure checkpoints can be specified as relative paths
if cfg.checkpoint:
cfg.checkpoint = retrieve_checkpoint_path(cfg.checkpoint)
if cfg.checkpoint is None:
quit()
cfg_dict = omegaconf_to_dict(cfg)
print_dict(cfg_dict)
headless = cfg.headless
env = VecEnvRLGames(headless=headless)
from scripts.sim_config import SimConfig
sim_config = SimConfig(cfg_dict)
cfg = DictConfig(sim_config.config)
# from mooncake_task_2 import MooncakeTask
from mooncake_task_test import MooncakeTask
task = MooncakeTask(name="Mooncake",
sim_config=sim_config,
env=env
)
env.set_task(task=task, sim_params=sim_config.get_physics_params(), backend="torch", init_sim=True)
# task = initialize_task(cfg_dict, env)
print(cfg)
# sets seed. if seed is -1 will pick a random one
from omni.isaac.core.utils.torch.maths import set_seed
cfg.seed = set_seed(cfg.seed, torch_deterministic=cfg.torch_deterministic)
rlg_trainer = RLGTrainer(cfg, cfg_dict)
rlg_trainer.launch_rlg_hydra(env)
rlg_trainer.run()
env.close()
if __name__ == '__main__':
parse_hydra_configs()
| 3,310 | Python | 32.11 | 116 | 0.659215 |
teerameth/omni.isaac.fiborobotlab/omni/isaac/fiborobotlab/mooncake/mooncake_task_test.py | from omniisaacgymenvs.tasks.base.rl_task import RLTask
from mooncake import Mooncake
import omni
from scripts.common import set_drive_parameters
from pxr import UsdPhysics, Gf, UsdGeom
from omni.isaac.core.articulations import ArticulationView
from omni.isaac.core.prims import RigidPrimView
from omni.isaac.core.utils.prims import get_prim_at_path
import omni.isaac.core.utils.torch.rotations as torch_rot
from omni.isaac.isaac_sensor import _isaac_sensor
from omni.isaac.core.utils.torch.maths import torch_rand_float, tensor_clamp, unscale
import numpy as np
import torch
import torch.nn.functional as f
import math
def q2falling(q):
norm_vec = f.normalize(q[:, 1:], p=1, dim=1)
return 2 * torch.acos(q[:, 0]) * torch.sqrt((norm_vec[:, 0] * norm_vec[:, 0] + norm_vec[:, 1] * norm_vec[:, 1]))
class MooncakeTask(RLTask):
def __init__(
self,
name,
sim_config,
env,
offset=None
) -> None:
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
self._num_envs = self._task_cfg["env"]["numEnvs"]
self._env_spacing = self._task_cfg["env"]["envSpacing"]
self._ball_size = 0.12
self._ball_positions = torch.tensor([0.0, 0.0, 0.12]) # ball diameter is 12 cm.
self._robot_offset = 0.0762 # 0.1962
self._jump_offset = 0.01
self._reset_dist = self._task_cfg["env"]["resetDist"]
self._max_push_effort = self._task_cfg["env"]["maxEffort"]
self._max_wheel_velocity = self._task_cfg["env"]["maxWheelVelocity"]
self.heading_weight = self._task_cfg["env"]["headingWeight"]
self.up_weight = self._task_cfg["env"]["upWeight"]
self.actions_cost_scale = self._task_cfg["env"]["actionsCost"]
self.energy_cost_scale = self._task_cfg["env"]["energyCost"]
self.joints_at_limit_cost_scale = self._task_cfg["env"]["jointsAtLimitCost"]
self.death_cost = self._task_cfg["env"]["deathCost"]
self.termination_height = self._task_cfg["env"]["terminationHeight"]
self.alive_reward_scale = self._task_cfg["env"]["alive_reward_scale"]
self._max_episode_length = 5000
self._num_observations = 22
self._num_actions = 3
self._imu_buf = [{"lin_acc_x": 0.0, "lin_acc_y": 0.0, "lin_acc_z": 0.0, "ang_vel_x": 0.0, "ang_vel_y": 0.0,
"ang_vel_z": 0.0}] * 128 # default initial sensor buffer
self._is = _isaac_sensor.acquire_imu_sensor_interface() # Sensor reader
self.previous_fall_angle = None
RLTask.__init__(self, name, env)
return
def set_up_scene(self, scene) -> None:
self.get_mooncake() # mush be called before "super().set_up_scene(scene)"
# self.get_ball()
super().set_up_scene(scene)
self._robots = ArticulationView(prim_paths_expr="/World/envs/*/Mooncake/mooncake", name="mooncake_view")
# Add ball for each robot
# stage = omni.usd.get_context().get_stage()
# for robot_path in self._robots.prim_paths:
# ball_path = robot_path[:-18] + "/ball" # remove "/Mooncake/mooncake" and add "/ball" instead
# cubeGeom = UsdGeom.Sphere.Define(stage, ball_path)
# ballPrim = stage.GetPrimAtPath(ball_path)
# size = self._ball_size
# offset = Gf.Vec3f(0.0, 0.0, self._ball_size)
# cubeGeom.CreateRadiusAttr(size)
# cubeGeom.AddTranslateOp().Set(offset)
# # Attach Rigid Body and Collision Preset
# rigid_api = UsdPhysics.RigidBodyAPI.Apply(ballPrim)
# mass_api = UsdPhysics.MassAPI.Apply(ballPrim)
# mass_api.CreateMassAttr(4)
# rigid_api.CreateRigidBodyEnabledAttr(True)
#
# UsdPhysics.CollisionAPI.Apply(ballPrim)
#
# phys_api = UsdPhysics.MaterialAPI.Apply(ballPrim)
# phys_api.CreateStaticFrictionAttr().Set(1.0)
# phys_api.CreateDynamicFrictionAttr().Set(1.0)
# self._ball = RigidPrimView(prim_paths_expr="/World/envs/*/ball", name="ball_view")
scene.add(self._robots)
# scene.add(self._ball)
# self.meters_per_unit = UsdGeom.GetStageMetersPerUnit(omni.usd.get_context().get_stage())
return
def get_mooncake(self): # must be called at very first line of set_up_scene()
robot_position = self._ball_positions
robot_position[2] += self._robot_offset
mooncake = Mooncake(prim_path=self.default_zero_env_path + "/Mooncake", name="Mooncake",
translation=robot_position)
# applies articulation settings from the task configuration yaml file
self._sim_config.apply_articulation_settings("Mooncake", get_prim_at_path(mooncake.prim_path),
self._sim_config.parse_actor_config("Mooncake"))
def get_ball(self):
from omni.isaac.core.objects import DynamicSphere
ball = self._my_world.scene.add(
DynamicSphere(
prim_path=self.default_zero_env_path + "/Ball",
name="ball",
position=self._ball_positions,
radius=12, # mediciene ball diameter 24cm.
color=np.array([1.0, 0, 0]),
mass=4,
)
)
# ball = Ball(prim_path=self.default_zero_env_path + "/Ball", name="Ball", translation=self._ball_positions)
def get_robot(self): return self._robots
def get_observations(self) -> dict:
# dof_pos = self._robots.get_joint_positions(clone=False)
dof_vel = self._robots.get_joint_velocities(clone=False)
wheel_vel_0 = dof_vel[:, self._wheel_0_dof_idx]
wheel_vel_1 = dof_vel[:, self._wheel_1_dof_idx]
wheel_vel_2 = dof_vel[:, self._wheel_2_dof_idx]
imu_accel_x = torch.tensor([imu["lin_acc_x"] for imu in self._imu_buf])
imu_accel_y = torch.tensor([imu["lin_acc_y"] for imu in self._imu_buf])
imu_accel_z = torch.tensor([imu["lin_acc_z"] for imu in self._imu_buf])
imu_gyro_x = torch.tensor([imu["ang_vel_x"] for imu in self._imu_buf])
imu_gyro_y = torch.tensor([imu["ang_vel_y"] for imu in self._imu_buf])
imu_gyro_z = torch.tensor([imu["ang_vel_z"] for imu in self._imu_buf])
self.obs_buf[:, 0] = wheel_vel_0
self.obs_buf[:, 1] = wheel_vel_1
self.obs_buf[:, 2] = wheel_vel_2
self.obs_buf[:, 3] = imu_accel_x
self.obs_buf[:, 4] = imu_accel_y
self.obs_buf[:, 5] = imu_accel_z
self.obs_buf[:, 6] = imu_gyro_x
self.obs_buf[:, 7] = imu_gyro_y
self.obs_buf[:, 8] = imu_gyro_z
robot_v = self._robots.get_linear_velocities()
ball_v = self._robots.get_linear_velocities()
robot_w = self._robots.get_angular_velocities()
_, robot_orientation = self._robots.get_world_poses()
self.obs_buf[:, 9] = robot_v[:, 0]
self.obs_buf[:, 10] = robot_v[:, 1]
self.obs_buf[:, 11] = robot_v[:, 2]
self.obs_buf[:, 12] = ball_v[:, 0]
self.obs_buf[:, 13] = ball_v[:, 1]
self.obs_buf[:, 14] = ball_v[:, 2]
self.obs_buf[:, 15] = robot_w[:, 0]
self.obs_buf[:, 16] = robot_w[:, 1]
self.obs_buf[:, 17] = robot_w[:, 2]
self.obs_buf[:, 18] = robot_orientation[:, 0]
self.obs_buf[:, 19] = robot_orientation[:, 1]
self.obs_buf[:, 20] = robot_orientation[:, 2]
self.obs_buf[:, 21] = robot_orientation[:, 3]
observations = {
self._robots.name: {
"obs_buf": self.obs_buf
}
}
# print("observations: %s"%(str(observations)))
return observations
def pre_physics_step(self, actions) -> None:
# print("Action: %s"%(str(actions)))
torch.nan_to_num(actions, nan=0.0) # replace NaN with zero
reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(reset_env_ids) > 0:
self.reset_idx(reset_env_ids)
self.actions = actions.clone().to(self._device) # save for later energy calculation
actions[:, 0] = 0
actions[:, 1] = 0
actions[:, 2] = 1
# actions[:, 0] = actions[:, 0] * self._max_wheel_velocity
# actions[:, 1] = actions[:, 1] * self._max_wheel_velocity
# actions[:, 2] = actions[:, 2] * 0.5 # omega_bz
## calculate wheel_velocities from wheel's kinematics
# actions = actions.to(self._device) * self._max_wheel_velocity
# wheel_velocities = torch.zeros((self._robots.count, 3), dtype=torch.float32, device=self._device)
# wheel_velocities[:, 0] = -12.8558 * actions[:, 1] - 11.0172 * actions[:, 2]
# wheel_velocities[:, 1] = 11.1334 * actions[:, 0] + 6.4279 * actions[:, 1] + 8.2664 * actions[:, 2]
# wheel_velocities[:, 2] = 6.4279 * actions[:, 1] - 11.1334 * actions[:, 0] + 8.2664 * actions[:, 2]
wheel_velocities = torch.zeros((self._robots.count, self._robots.num_dof), dtype=torch.float32, device=self._device)
wheel_velocities[:, self._wheel_0_dof_idx] = -12.8558 * actions[:, 1] - 11.0172 * actions[:, 2]
wheel_velocities[:, self._wheel_1_dof_idx] = 11.1334 * actions[:, 0] + 6.4279 * actions[:, 1] + 8.2664 * actions[:, 2]
wheel_velocities[:, self._wheel_2_dof_idx] = 6.4279 * actions[:, 1] - 11.1334 * actions[:, 0] + 8.2664 * actions[:, 2]
print("wheel_velocities: %s" % (str(wheel_velocities)))
self.wheel_velocities = wheel_velocities.clone().to(self._device) # save for later energy calculation
# wheel_effort = torch.zeros((self._robots.count, self._robots.num_dof), dtype=torch.float32, device=self._device)
# wheel_effort[:, self._wheel_0_dof_idx] = actions[:, 0] * self._max_push_effort
# wheel_effort[:, self._wheel_1_dof_idx] = actions[:, 1] * self._max_push_effort
# wheel_effort[:, self._wheel_2_dof_idx] = actions[:, 2] * self._max_push_effort
# print("wheel_effort: %s" % (str(wheel_effort)))
## Apply joint velocities
from omni.isaac.core.utils.types import ArticulationActions # batched version of ArticulationAction
stage = omni.usd.get_context().get_stage()
for env in range(self._num_envs):
axle_0 = UsdPhysics.DriveAPI.Get(
stage.GetPrimAtPath("/World/envs/env_{}/Mooncake/mooncake/base_plate/wheel_0_joint".format(env)),
"angular")
axle_1 = UsdPhysics.DriveAPI.Get(
stage.GetPrimAtPath("/World/envs/env_{}/Mooncake/mooncake/base_plate/wheel_1_joint".format(env)),
"angular")
axle_2 = UsdPhysics.DriveAPI.Get(
stage.GetPrimAtPath("/World/envs/env_{}/Mooncake/mooncake/base_plate/wheel_2_joint".format(env)),
"angular")
set_drive_parameters(axle_0, "velocity", math.degrees(wheel_velocities[env, 0]), 0.05, math.radians(1e7))
set_drive_parameters(axle_1, "velocity", math.degrees(wheel_velocities[env, 1]), 0.05, math.radians(1e7))
set_drive_parameters(axle_2, "velocity", math.degrees(wheel_velocities[env, 2]), 0.05, math.radians(1e7))
# self._robots.apply_action(ArticulationActions(joint_efforts=wheel_effort))
# self._robots.apply_action(ArticulationActions(joint_velocities=wheel_velocities))
# self._robots[env].apply_wheel_actions(ArticulationAction(joint_efforts=wheel_effort[env]))
# set_drive_parameters(axle_0, "effort", wheel_effort[env, 0], 0, math.radians(1e7))
# set_drive_parameters(axle_1, "effort", wheel_effort[env, 1], 0, math.radians(1e7))
# set_drive_parameters(axle_2, "effort", wheel_effort[env, 2], 0, math.radians(1e7))
## Read IMU & store in buffer ##
buffer = []
robots_prim_path = self._robots.prim_paths
for robot_prim_path in robots_prim_path:
reading = self._is.get_sensor_readings(
robot_prim_path + "/base_plate/sensor") # read from select sensor (by prim_path)
if reading.shape[0]:
buffer.append(reading[-1]) # get only lastest reading
else:
buffer.append({"lin_acc_x": 0.0, "lin_acc_y": 0.0, "lin_acc_z": 0.0, "ang_vel_x": 0.0, "ang_vel_y": 0.0,
"ang_vel_z": 0.0}) # default initial sensor buffer
self._imu_buf = buffer
def reset_idx(self, env_ids):
num_resets = len(env_ids)
## randomize DOF velocities ##
# dof_vel = torch_rand_float(-0.1, 0.1, (num_resets, 57), device=self._device)
# self._robots.set_joint_velocities(dof_vel, indices=env_ids) # apply resets
## Reset Ball positions ##
# ball_pos, ball_rot = self.initial_root_pos[env_ids], self.initial_root_rot[env_ids]
# ball_pos[:, 2] = 0.12 # force ball to touch floor prefectly
# root_vel = torch.zeros((num_resets, 6), device=self._device)
# # Apply Ball position
# self._ball.set_world_poses(ball_pos, ball_rot, indices=env_ids)
#
# ## Random Ball velocities ##
# ball_vel = torch_rand_float(-0.01, 0.01, (num_resets, 6), device=self._device)
# self._ball.set_velocities(ball_vel, indices=env_ids)
## Random Robot positions & orientations ##
fall_direction = torch_rand_float(-np.pi, np.pi, (num_resets, 1), device=self._device).reshape(-1)
fall_direction_axis = torch.Tensor([0, 0, 1]).repeat(num_resets, 1).to(self._device).reshape(-1, 3)
fall_angle = torch_rand_float(0, np.pi/8, (num_resets, 1), device=self._device).reshape(-1)
fall_angle_axis = torch.Tensor([0, 1, 0]).repeat(num_resets, 1).to(self._device).reshape(-1, 3)
fall_direction_quat = torch_rot.quat_from_angle_axis(fall_direction, fall_direction_axis)
fall_angle_quat = torch_rot.quat_from_angle_axis(fall_angle, fall_angle_axis)
## Apply Robot position ##
# robot_pos = ball_pos.clone() # use ball position as reference
robot_offset = torch.Tensor([0, 0, self._robot_offset]).repeat(num_resets).to(self._device).reshape(-1, 3) # Distance from ball center to robot center is 18 cm.
robot_pos = self.initial_root_pos[env_ids] + robot_offset
# robot_pos = robot_pos + torch_rot.quat_rotate(fall_angle_quat, torch_rot.quat_rotate(fall_direction_quat, robot_offset))
robot_rot = self.initial_root_rot[env_ids]
# robot_rot = torch_rot.quat_apply(fall_direction_quat, robot_rot)
# robot_rot = torch_rot.quat_apply(fall_angle_quat, robot_rot)
# root_pos, root_rot = self.initial_root_pos[env_ids], self.initial_root_rot[env_ids]
root_vel = torch.zeros((num_resets, 6), device=self._device)
#
self._robots.set_world_poses(robot_pos, robot_rot, indices=env_ids)
self._robots.set_velocities(root_vel, indices=env_ids)
# bookkeeping
self.reset_buf[env_ids] = 0
self.progress_buf[env_ids] = 0
def post_reset(self): # Run only once after simulation started
# self._robots = self.get_robot()
self.initial_root_pos, self.initial_root_rot = self._robots.get_world_poses() # save initial position for reset
self.initial_dof_pos = self._robots.get_joint_positions()
# initialize some data used later on
# self.start_rotation = torch.tensor([1, 0, 0, 0], device=self._device, dtype=torch.float32)
# self.up_vec = torch.tensor([0, 0, 1], dtype=torch.float32, device=self._device).repeat((self.num_envs, 1))
# self.heading_vec = torch.tensor([1, 0, 0], dtype=torch.float32, device=self._device).repeat((self.num_envs, 1))
# self.inv_start_rot = quat_conjugate(self.start_rotation).repeat((self.num_envs, 1))
# self.basis_vec0 = self.heading_vec.clone()
# self.basis_vec1 = self.up_vec.clone()
self._wheel_0_dof_idx = self._robots.get_dof_index("wheel_0_joint")
self._wheel_1_dof_idx = self._robots.get_dof_index("wheel_1_joint")
self._wheel_2_dof_idx = self._robots.get_dof_index("wheel_2_joint")
# randomize all envs
indices = torch.arange(self._robots.count, dtype=torch.int64, device=self._device)
self.reset_idx(indices)
def calculate_metrics(self) -> None: # calculate reward for each env
wheel_vel = self.obs_buf[:, :3]
# if 'self.previous_wheel_vel' in locals():
# wheel_accel = wheel_vel - self.previous_wheel_vel
# else: wheel_accel = torch.zeros_like(wheel_vel)
# wheel_accel_cost = torch.sum(wheel_accel**2, dim=-1)
# self.previous_wheel_vel = wheel_vel.clone()
# balls_position, balls_orientation = self._ball.get_world_poses()
robots_position, robots_orientation = self._robots.get_world_poses()
robots_omega = self._robots.get_angular_velocities()
fall_angles = q2falling(robots_orientation) # find fall angle of all robot (batched)
if self.previous_fall_angle is None: falling_velocity = torch.zeros_like(robots_position[:, 0])
else: falling_velocity = fall_angles - self.previous_fall_angle
## aligning up axis of robot and environment
# up_proj = torch.cos(fall_angles)
# up_reward = torch.zeros_like(fall_angles)
# up_reward = torch.where(up_proj > 0.93, up_reward + self.up_weight, up_reward)
# falling_penalty = fall_angles
q1 = self.initial_root_rot # world frame
q2 = robots_orientation # robot orientation
# find product of quaternions
product_quaternion = torch.sum(q1*q2,dim=-1) # <q1, q2>
quaternion_distance = 1 - (product_quaternion**2)
# print(quaternion_distance)
## energy penalty for movement
# actions_cost = torch.sum(self.wheel_velocities ** 2, dim=-1)
# electricity_cost = torch.sum(torch.abs(self.actions * obs_buf[:, 12+num_dof:12+num_dof*2])* self.motor_effort_ratio.unsqueeze(0), dim=-1)
## rotation penality
# rotation_cost = torch.sum(torch_rot.quat_diff_rad(robots_orientation, self.initial_root_rot)** 2, dim=-1)
## reward for duration of staying alive
alive_reward = torch.ones_like(fall_angles) * self.alive_reward_scale
# progress_reward = potentials - prev_potentials
# print(robots_position - balls_position)
# print(torch.sum((robots_position - balls_position)**2, dim=-1))
total_reward = (
- quaternion_distance
# - wheel_accel_cost * 0.05
# - falling_velocity
+ alive_reward
# + up_reward
# - math.e**(-0.01*fall_angles)
# - actions_cost * self.actions_cost_scale
# - torch.sum(robots_omega**2, dim=-1) * 10
# - rotation_cost * 10
)
# adjust reward for fallen agents
total_reward = torch.where(
robots_position[:, 2] < self.termination_height, # fall by height
torch.ones_like(total_reward) * self.death_cost,
total_reward
)
# total_reward = torch.where(
# fall_angles > 50 / 180 * math.pi, # fall by angle
# torch.ones_like(total_reward) * self.death_cost,
# total_reward
# )
# total_reward = torch.where(
# torch.sum((robots_position - balls_position)**2, dim=-1) > (self._robot_offset+self._jump_offset)**2, # jump beyond jump_offset
# torch.ones_like(total_reward) * self.death_cost,
# total_reward
# )
self.previous_fall_angle = fall_angles
self.rew_buf[:] = total_reward
def is_done(self) -> None: # check termination for each env
# balls_position, balls_orientation = self._ball.get_world_poses()
robots_position, robots_orientation = self._robots.get_world_poses()
fall_angles = q2falling(robots_orientation) # find fall angle of all robot (batched)
robot_z_position = robots_position[:, 2]
# print("Z position", robot_z_position)
resets = torch.zeros(self._num_envs, dtype=torch.long, device=self._device)
# resets = torch.where(torch.sum((robots_position - balls_position)**2, dim=-1) > (self._robot_offset+self._jump_offset)**2, 1, resets) # jump beyond jump_offset
# resets = torch.where(robot_z_position < 0.25, 1, resets) # reset by falling (Z-position)
# resets = torch.where(fall_angles > 50*(np.pi / 180), 1, resets) # reset by falling (angle)
# resets = torch.where(self.progress_buf >= self._max_episode_length, 1, resets) # reset by time
self.reset_buf[:] = resets
| 20,819 | Python | 50.154791 | 169 | 0.601134 |
teerameth/omni.isaac.fiborobotlab/omni/isaac/fiborobotlab/mooncake/mooncake_old.py | from typing import Optional, Tuple
import numpy as np
from omni.isaac.core.robots.robot import Robot
from omni.isaac.core.utils.nucleus import find_nucleus_server
from omni.isaac.core.utils.types import ArticulationAction
from omni.isaac.core.utils.prims import get_prim_at_path, define_prim
import carb
class MoonCake(Robot):
"""[summary]
Args:
stage (Usd.Stage): [description]
prim_path (str): [description]
name (str): [description]
usd_path (str, optional): [description]
position (Optional[np.ndarray], optional): [description]. Defaults to None.
orientation (Optional[np.ndarray], optional): [description]. Defaults to None.
"""
def __init__(
self,
prim_path: str,
name: str = "mooncake",
usd_path: Optional[str] = None,
position: Optional[np.ndarray] = None,
orientation: Optional[np.ndarray] = None,
) -> None:
prim = get_prim_at_path(prim_path)
if not prim.IsValid():
prim = define_prim(prim_path, "Xform")
if usd_path:
prim.GetReferences().AddReference(usd_path)
else:
result, nucleus_server = find_nucleus_server()
if result is False:
carb.log_error("Could not find nucleus server with /Isaac folder")
return
asset_path = nucleus_server + "/Library/mooncake.usd" # load from nucleus server
prim.GetReferences().AddReference(asset_path)
super().__init__(
prim_path=prim_path, name=name, position=position, orientation=orientation, articulation_controller=None
)
self._wheel_dof_names = ["wheel_0_joint", "wheel_1_joint", "wheel_2_joint"]
self._wheel_dof_indices = None
return
@property
def wheel_dof_indicies(self) -> Tuple[int, int, int]:
"""[summary]
Returns:
int: [description]
"""
return self._wheel_dof_indices
def get_wheel_positions(self) -> Tuple[float, float, float]:
"""[summary]
Returns:
Tuple[float, float, float]: [description]
"""
joint_positions = self.get_joint_positions()
return joint_positions[self._wheel_dof_indices[0]], joint_positions[self._wheel_dof_indices[1]], \
joint_positions[self._wheel_dof_indices[2]]
def set_wheel_positions(self, positions: Tuple[float, float]) -> None:
"""[summary]
Args:
positions (Tuple[float, float, float]): [description]
"""
joint_positions = [None, None, None]
joint_positions[self._wheel_dof_indices[0]] = positions[0]
joint_positions[self._wheel_dof_indices[1]] = positions[1]
joint_positions[self._wheel_dof_indices[2]] = positions[2]
self.set_joint_positions(positions=np.array(joint_positions))
return
def get_wheel_velocities(self) -> Tuple[float, float, float]:
"""[summary]
Returns:
Tuple[np.ndarray, np.ndarray, np.ndarray]: [description]
"""
joint_velocities = self.get_joint_velocities()
return joint_velocities[self._wheel_dof_indices[0]], joint_velocities[self._wheel_dof_indices[1]], \
joint_velocities[self._wheel_dof_indices[2]]
def set_wheel_velocities(self, velocities: Tuple[float, float, float]) -> None:
"""[summary]
Args:
velocities (Tuple[float, float, float]): [description]
"""
joint_velocities = [None, None, None]
joint_velocities[self._wheel_dof_indices[0]] = velocities[0]
joint_velocities[self._wheel_dof_indices[1]] = velocities[1]
joint_velocities[self._wheel_dof_indices[2]] = velocities[2]
self.set_joint_velocities(velocities=np.array(joint_velocities))
return
def get_wheel_efforts(self) -> Tuple[float, float, float]:
"""[summary]
Returns:
Tuple[np.ndarray, np.ndarray, np.ndarray]: [description]
"""
joint_efforts = self.get_joint_efforts()
return joint_efforts[self._wheel_dof_indices[0]], joint_efforts[self._wheel_dof_indices[1]], joint_efforts[
self._wheel_dof_indices[2]]
def set_wheel_efforts(self, velocities: Tuple[float, float, float]) -> None:
"""[summary]
Args:
efforts (Tuple[float, float, float]): [description]
"""
joint_efforts = [None, None]
joint_efforts[self._wheel_dof_indices[0]] = velocities[0]
joint_efforts[self._wheel_dof_indices[1]] = velocities[1]
joint_efforts[self._wheel_dof_indices[2]] = velocities[2]
self.set_joint_efforts(efforts=np.array(joint_efforts))
return
def apply_wheel_actions(self, actions: ArticulationAction) -> None:
"""[summary]
Args:
actions (ArticulationAction): [description]
"""
actions_length = actions.get_length()
if actions_length is not None and actions_length != 3:
raise Exception("ArticulationAction passed should be equal to 3")
joint_actions = ArticulationAction()
if actions.joint_positions is not None:
joint_actions.joint_positions = np.zeros(self.num_dof)
joint_actions.joint_positions[self._wheel_dof_indices[0]] = actions.joint_positions[0]
joint_actions.joint_positions[self._wheel_dof_indices[1]] = actions.joint_positions[1]
joint_actions.joint_positions[self._wheel_dof_indices[2]] = actions.joint_positions[2]
if actions.joint_velocities is not None:
joint_actions.joint_velocities = np.zeros(self.num_dof)
joint_actions.joint_velocities[self._wheel_dof_indices[0]] = actions.joint_velocities[0]
joint_actions.joint_velocities[self._wheel_dof_indices[1]] = actions.joint_velocities[1]
joint_actions.joint_velocities[self._wheel_dof_indices[2]] = actions.joint_velocities[2]
if actions.joint_efforts is not None:
joint_actions.joint_efforts = np.zeros(self.num_dof)
joint_actions.joint_efforts[self._wheel_dof_indices[0]] = actions.joint_efforts[0]
joint_actions.joint_efforts[self._wheel_dof_indices[1]] = actions.joint_efforts[1]
joint_actions.joint_efforts[self._wheel_dof_indices[2]] = actions.joint_efforts[2]
self.apply_action(control_actions=joint_actions)
return
def initialize(self) -> None:
"""[summary]
"""
super().initialize()
print(self._dofs_infos) # print Orderdict of all dof_name:dof_object
self._wheel_dof_indices = (
self.get_dof_index(self._wheel_dof_names[0]),
self.get_dof_index(self._wheel_dof_names[1]),
self.get_dof_index(self._wheel_dof_names[2]),
)
return
def post_reset(self) -> None:
"""[summary]
"""
super().post_reset()
# print(len(self._articulation_controller._dof_controllers))
# print(self._articulation_controller._dof_controllers)
# Assign kd only for driven acticulation (3 wheels) and leave other as None
kds = [None] * len(self._articulation_controller._dof_controllers)
for i in self._wheel_dof_indices: kds[i] = 1e2
self._articulation_controller.set_gains(kds=kds)
self._articulation_controller.switch_control_mode(mode="effort") # effort, velocity, position
return | 7,539 | Python | 43.352941 | 116 | 0.614803 |
teerameth/omni.isaac.fiborobotlab/omni/isaac/fiborobotlab/mooncake/train_ac2.py | from env import MoonCakeEnv
import gym
from stable_baselines3 import A2C
from stable_baselines3.common.env_util import make_vec_env
my_env = MoonCakeEnv(headless=False)
# Parallel environments
# env = make_vec_env("CartPole-v1", n_envs=4)
model = A2C("MlpPolicy", my_env, verbose=1)
model.learn(total_timesteps=5000000)
model.save("a2c_cartpole")
# del model # remove to demonstrate saving and loading
#
# model = A2C.load("a2c_cartpole")
#
# obs = env.reset()
# while True:
# action, _states = model.predict(obs)
# obs, rewards, dones, info = env.step(action)
# env.render() | 591 | Python | 24.739129 | 58 | 0.717428 |
teerameth/omni.isaac.fiborobotlab/omni/isaac/fiborobotlab/mooncake/train_ppo_lstm.py | import numpy as np
from env_mooncake import MoonCakeEnv
import gym
import numpy as np
import wandb
from wandb.integration.sb3 import WandbCallback
from sb3_contrib import RecurrentPPO
from stable_baselines3.common.evaluation import evaluate_policy
config = {
"policy_type": "MlpLstmPolicy",
"total_timesteps": 3000000,
"env_name": "MoonCake-v3",
}
run = wandb.init(
project="mooncake_test",
config=config,
sync_tensorboard=True, # auto-upload sb3's tensorboard metrics
# monitor_gym=True, # auto-upload the videos of agents playing the game
# save_code=True, # optional
)
env = MoonCakeEnv(skip_frame=1,
physics_dt=1.0 / 100.0,
rendering_dt=1.0 / 60.0,
max_episode_length=10,
display_every_iter=20,
headless=False,
observation_list=["lin_acc_x", "lin_acc_y", "lin_acc_z", "ang_vel_x", "ang_vel_y", "ang_vel_z", "robot_rotation_x", "robot_rotation_y", "robot_rotation_z"])
model = RecurrentPPO("MlpLstmPolicy", env, verbose=1, tensorboard_log=f"runs/{run.id}", device="cuda")
model.learn(
total_timesteps=config["total_timesteps"],
callback=WandbCallback(
gradient_save_freq=1000,
model_save_path=f"models/{run.id}",
verbose=2,
),
)
run.finish()
# mean_reward, std_reward = evaluate_policy(model, env, n_eval_episodes=20, warn=False)
# print(mean_reward)
model.save("ppo_recurrent")
del model # remove to demonstrate saving and loading
model = RecurrentPPO.load("ppo_recurrent")
observations = env.reset()
# cell and hidden state of the LSTM
lstm_states = None
num_envs = 1
# Episode start signals are used to reset the lstm states
episode_starts = np.ones((num_envs,), dtype=bool)
while True:
# obs = [observations["lin_acc_y"], observations["lin_acc_z"], observations["ang_vel_x"]]
# obs = np.array(obs, dtype=np.float32)
action, lstm_states = model.predict(observations, state=lstm_states, episode_start=episode_starts, deterministic=True)
observations, rewards, dones, info = env.step(action)
episode_starts = dones
env.render()
if dones:
lstm_states = None # Clear internal states
observations = env.reset() | 2,245 | Python | 32.029411 | 174 | 0.668597 |
teerameth/omni.isaac.fiborobotlab/omni/isaac/fiborobotlab/mooncake/mooncake_task.py | import time
from omniisaacgymenvs.tasks.base.rl_task import RLTask
from mooncake import Mooncake, Ball
import omni
from scripts.common import set_drive_parameters
from pxr import UsdPhysics, Gf, UsdGeom
from omni.isaac.core.articulations import ArticulationView
from omni.isaac.core.prims import RigidPrimView
from omni.isaac.core.utils.prims import get_prim_at_path, get_all_matching_child_prims
import omni.isaac.core.utils.torch.rotations as torch_rot
# from omni.isaac.core.utils.torch.rotations import compute_heading_and_up, compute_rot, quat_conjugate, quat_from_angle_axis, quat_rotate
from omni.isaac.core.utils.torch.maths import torch_rand_float, tensor_clamp, unscale
from omni.isaac.isaac_sensor import _isaac_sensor
import numpy as np
import torch
import torch.nn.functional as f
import math
import random
def euler_to_quaternion(r):
(roll, pitch, yaw) = (r[0], r[1], r[2])
qx = np.sin(roll/2) * np.cos(pitch/2) * np.cos(yaw/2) - np.cos(roll/2) * np.sin(pitch/2) * np.sin(yaw/2)
qy = np.cos(roll/2) * np.sin(pitch/2) * np.cos(yaw/2) + np.sin(roll/2) * np.cos(pitch/2) * np.sin(yaw/2)
qz = np.cos(roll/2) * np.cos(pitch/2) * np.sin(yaw/2) - np.sin(roll/2) * np.sin(pitch/2) * np.cos(yaw/2)
qw = np.cos(roll/2) * np.cos(pitch/2) * np.cos(yaw/2) + np.sin(roll/2) * np.sin(pitch/2) * np.sin(yaw/2)
return [qx, qy, qz, qw]
# def q2falling(q):
# fall_angle = 2*torch.acos(q[:,0])*torch.sqrt((q[:,1]*q[:,1] + q[:,2]*q[:,2])/(q[:,1]*q[:,1]) + q[:,2]*q[:,2] + q[:,3]*q[:,3])
# return fall_angle
# def q2falling(robots_orientation):
# up_vectors = torch.zeros_like(robots_orientation)
# up_vectors[:, 3] = 1
# return torch_rot.quat_diff_rad(robots_orientation, up_vectors)
def q2falling(q):
norm_vec = f.normalize(q[:, 1:], p=1, dim=1)
return 2 * torch.acos(q[:, 0]) * torch.sqrt((norm_vec[:, 0] * norm_vec[:, 0] + norm_vec[:, 1] * norm_vec[:, 1]))
class MooncakeTask(RLTask):
def __init__(
self,
name,
sim_config,
env,
offset=None
) -> None:
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
self._num_envs = self._task_cfg["env"]["numEnvs"]
self._env_spacing = self._task_cfg["env"]["envSpacing"]
self._ball_size = 0.12
self._ball_positions = torch.tensor([0.0, 0.0, 0.12]) # ball diameter is 12 cm.
self._robot_offset = 0.1962
self._jump_offset = 0.01
self._reset_dist = self._task_cfg["env"]["resetDist"]
self._max_push_effort = self._task_cfg["env"]["maxEffort"]
self._max_wheel_velocity = self._task_cfg["env"]["maxWheelVelocity"]
self.heading_weight = self._task_cfg["env"]["headingWeight"]
self.up_weight = self._task_cfg["env"]["upWeight"]
self.actions_cost_scale = self._task_cfg["env"]["actionsCost"]
self.energy_cost_scale = self._task_cfg["env"]["energyCost"]
self.joints_at_limit_cost_scale = self._task_cfg["env"]["jointsAtLimitCost"]
self.death_cost = self._task_cfg["env"]["deathCost"]
self.termination_height = self._task_cfg["env"]["terminationHeight"]
self.alive_reward_scale = self._task_cfg["env"]["alive_reward_scale"]
self._max_episode_length = 5000
self._num_observations = 22
self._num_actions = 3
self._imu_buf = [{"lin_acc_x":0.0, "lin_acc_y":0.0, "lin_acc_z":0.0, "ang_vel_x":0.0, "ang_vel_y":0.0, "ang_vel_z":0.0}]*128 # default initial sensor buffer
self._is = _isaac_sensor.acquire_imu_sensor_interface() # Sensor reader
self.previous_fall_angle = None
RLTask.__init__(self, name, env)
return
def set_up_scene(self, scene) -> None:
self.get_mooncake() # mush be called before "super().set_up_scene(scene)"
# self.get_ball()
super().set_up_scene(scene)
print(get_all_matching_child_prims("/"))
self._robots = ArticulationView(prim_paths_expr="/World/envs/*/Mooncake/mooncake", name="mooncake_view")
# Add ball for each robot
stage = omni.usd.get_context().get_stage()
for robot_path in self._robots.prim_paths:
ball_path = robot_path[:-18] + "/ball" # remove "/Mooncake/mooncake" and add "/ball" instead
cubeGeom = UsdGeom.Sphere.Define(stage, ball_path)
ballPrim = stage.GetPrimAtPath(ball_path)
size = self._ball_size
offset = Gf.Vec3f(0.0, 0.0, self._ball_size)
cubeGeom.CreateRadiusAttr(size)
cubeGeom.AddTranslateOp().Set(offset)
# Attach Rigid Body and Collision Preset
rigid_api = UsdPhysics.RigidBodyAPI.Apply(ballPrim)
mass_api = UsdPhysics.MassAPI.Apply(ballPrim)
mass_api.CreateMassAttr(4)
rigid_api.CreateRigidBodyEnabledAttr(True)
UsdPhysics.CollisionAPI.Apply(ballPrim)
phys_api = UsdPhysics.MaterialAPI.Apply(ballPrim)
phys_api.CreateStaticFrictionAttr().Set(1.0)
phys_api.CreateDynamicFrictionAttr().Set(1.0)
print(get_all_matching_child_prims("/"))
self._ball = RigidPrimView(prim_paths_expr="/World/envs/*/ball", name="ball_view")
scene.add(self._robots)
scene.add(self._ball)
# self.meters_per_unit = UsdGeom.GetStageMetersPerUnit(omni.usd.get_context().get_stage())
return
def get_mooncake(self): # must be called at very first line of set_up_scene()
robot_position = self._ball_positions
robot_position[2] += self._robot_offset
mooncake = Mooncake(prim_path=self.default_zero_env_path + "/Mooncake", name="Mooncake", translation=robot_position)
# applies articulation settings from the task configuration yaml file
self._sim_config.apply_articulation_settings("Mooncake", get_prim_at_path(mooncake.prim_path), self._sim_config.parse_actor_config("Mooncake"))
def get_ball(self):
ball = Ball(prim_path=self.default_zero_env_path + "/Ball", name="Ball", translation=self._ball_positions)
def get_robot(self):
return self._robots
def get_observations(self) -> dict:
# dof_pos = self._robots.get_joint_positions(clone=False)
dof_vel = self._robots.get_joint_velocities(clone=False)
wheel_vel_0 = dof_vel[:, self._wheel_0_dof_idx]
wheel_vel_1 = dof_vel[:, self._wheel_1_dof_idx]
wheel_vel_2 = dof_vel[:, self._wheel_2_dof_idx]
imu_accel_x = torch.tensor([imu["lin_acc_x"] for imu in self._imu_buf])
imu_accel_y = torch.tensor([imu["lin_acc_y"] for imu in self._imu_buf])
imu_accel_z = torch.tensor([imu["lin_acc_z"] for imu in self._imu_buf])
imu_gyro_x = torch.tensor([imu["ang_vel_x"] for imu in self._imu_buf])
imu_gyro_y = torch.tensor([imu["ang_vel_y"] for imu in self._imu_buf])
imu_gyro_z = torch.tensor([imu["ang_vel_z"] for imu in self._imu_buf])
self.obs_buf[:, 0] = wheel_vel_0
self.obs_buf[:, 1] = wheel_vel_1
self.obs_buf[:, 2] = wheel_vel_2
self.obs_buf[:, 3] = imu_accel_x
self.obs_buf[:, 4] = imu_accel_y
self.obs_buf[:, 5] = imu_accel_z
self.obs_buf[:, 6] = imu_gyro_x
self.obs_buf[:, 7] = imu_gyro_y
self.obs_buf[:, 8] = imu_gyro_z
robot_v = self._robots.get_linear_velocities()
ball_v = self._robots.get_linear_velocities()
robot_w = self._robots.get_angular_velocities()
_, robot_orientation = self._robots.get_world_poses()
self.obs_buf[:, 9] = robot_v[:, 0]
self.obs_buf[:, 10] = robot_v[:, 1]
self.obs_buf[:, 11] = robot_v[:, 2]
self.obs_buf[:, 12] = ball_v[:, 0]
self.obs_buf[:, 13] = ball_v[:, 1]
self.obs_buf[:, 14] = ball_v[:, 2]
self.obs_buf[:, 15] = robot_w[:, 0]
self.obs_buf[:, 16] = robot_w[:, 1]
self.obs_buf[:, 17] = robot_w[:, 2]
self.obs_buf[:, 18] = robot_orientation[:, 0]
self.obs_buf[:, 19] = robot_orientation[:, 1]
self.obs_buf[:, 20] = robot_orientation[:, 2]
self.obs_buf[:, 21] = robot_orientation[:, 3]
observations = {
self._robots.name: {
"obs_buf": self.obs_buf
}
}
# print(observations)
return observations
def pre_physics_step(self, actions) -> None:
torch.nan_to_num(actions, nan=0.0) # replace NaN with zero
reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(reset_env_ids) > 0:
self.reset_idx(reset_env_ids)
self.actions = actions.clone().to(self._device) # save for later energy calculation
# actions[:, 0] = actions[:, 0] * self._max_wheel_velocity
# actions[:, 1] = actions[:, 1] * self._max_wheel_velocity
# actions[:, 2] = actions[:, 2] * 0.5 # omega_bz
## calculate wheel_velocities from wheel's kinematics
# actions = actions.to(self._device) * self._max_wheel_velocity
# wheel_velocities = torch.zeros((self._robots.count, 3), dtype=torch.float32, device=self._device)
# wheel_velocities[:, 0] = -12.8558 * actions[:, 1] - 11.0172 * actions[:, 2]
# wheel_velocities[:, 1] = 11.1334 * actions[:, 0] + 6.4279 * actions[:, 1] + 8.2664 * actions[:, 2]
# wheel_velocities[:, 2] = 6.4279 * actions[:, 1] - 11.1334 * actions[:, 0] + 8.2664 * actions[:, 2]
# wheel_velocities = torch.zeros((self._robots.count, self._robots.num_dof), dtype=torch.float32, device=self._device)
# wheel_velocities[:, self._wheel_0_dof_idx] = -12.8558 * actions[:, 1] - 11.0172 * actions[:, 2]
# wheel_velocities[:, self._wheel_1_dof_idx] = 11.1334 * actions[:, 0] + 6.4279 * actions[:, 1] + 8.2664 * actions[:, 2]
# wheel_velocities[:, self._wheel_2_dof_idx] = 6.4279 * actions[:, 1] - 11.1334 * actions[:, 0] + 8.2664 * actions[:, 2]
# self.wheel_velocities = wheel_velocities.clone().to(self._device) # save for later energy calculation
wheel_effort = torch.zeros((self._robots.count, self._robots.num_dof), dtype=torch.float32, device=self._device)
wheel_effort[:, self._wheel_0_dof_idx] = actions[:, 0] * self._max_push_effort
wheel_effort[:, self._wheel_1_dof_idx] = actions[:, 1] * self._max_push_effort
wheel_effort[:, self._wheel_2_dof_idx] = actions[:, 2] * self._max_push_effort
## Apply joint velocities
from omni.isaac.core.utils.types import ArticulationActions # batched version of ArticulationAction
stage = omni.usd.get_context().get_stage()
for env in range(self._num_envs):
axle_0 = UsdPhysics.DriveAPI.Get(stage.GetPrimAtPath("/World/envs/env_{}/Mooncake/mooncake/base_plate/wheel_0_joint".format(env)), "angular")
axle_1 = UsdPhysics.DriveAPI.Get(stage.GetPrimAtPath("/World/envs/env_{}/Mooncake/mooncake/base_plate/wheel_1_joint".format(env)), "angular")
axle_2 = UsdPhysics.DriveAPI.Get(stage.GetPrimAtPath("/World/envs/env_{}/Mooncake/mooncake/base_plate/wheel_2_joint".format(env)), "angular")
# set_drive_parameters(axle_0, "velocity", math.degrees(wheel_velocities[env, 0]), 0.05, math.radians(1e7))
# set_drive_parameters(axle_1, "velocity", math.degrees(wheel_velocities[env, 1]), 0.05, math.radians(1e7))
# set_drive_parameters(axle_2, "velocity", math.degrees(wheel_velocities[env, 2]), 0.05, math.radians(1e7))
self._robots.apply_action(ArticulationActions(joint_efforts=wheel_effort))
# self._robots.apply_action(ArticulationActions(joint_velocities=wheel_velocities))
# self._robots[env].apply_wheel_actions(ArticulationAction(joint_efforts=wheel_effort[env]))
# set_drive_parameters(axle_0, "effort", wheel_effort[env, 0], 0, math.radians(1e7))
# set_drive_parameters(axle_1, "effort", wheel_effort[env, 1], 0, math.radians(1e7))
# set_drive_parameters(axle_2, "effort", wheel_effort[env, 2], 0, math.radians(1e7))
## Read IMU & store in buffer ##
buffer = []
robots_prim_path = self._robots.prim_paths
for robot_prim_path in robots_prim_path:
reading = self._is.get_sensor_readings(robot_prim_path + "/base_plate/sensor") # read from select sensor (by prim_path)
if reading.shape[0]:
buffer.append(reading[-1]) # get only lastest reading
else: buffer.append({"lin_acc_x":0.0, "lin_acc_y":0.0, "lin_acc_z":0.0, "ang_vel_x":0.0, "ang_vel_y":0.0, "ang_vel_z":0.0}) # default initial sensor buffer
self._imu_buf = buffer
def reset_idx(self, env_ids):
num_resets = len(env_ids)
## randomize DOF velocities ##
# dof_vel = torch_rand_float(-0.1, 0.1, (num_resets, 57), device=self._device)
# self._robots.set_joint_velocities(dof_vel, indices=env_ids) # apply resets
## Reset Ball positions ##
ball_pos, ball_rot = self.initial_root_pos[env_ids], self.initial_root_rot[env_ids]
ball_pos[:, 2] = 0.12 # force ball to touch floor prefectly
root_vel = torch.zeros((num_resets, 6), device=self._device)
# Apply Ball position
self._ball.set_world_poses(ball_pos, ball_rot, indices=env_ids)
## Random Ball velocities ##
ball_vel = torch_rand_float(-0.01, 0.01, (num_resets, 6), device=self._device)
self._ball.set_velocities(ball_vel, indices=env_ids)
## Random Robot positions & orientations ##
fall_direction = torch_rand_float(-np.pi, np.pi, (num_resets, 1), device=self._device).reshape(-1)
fall_direction_axis = torch.Tensor([0, 0, 1]).repeat(num_resets, 1).to(self._device).reshape(-1, 3)
fall_angle = torch_rand_float(0, np.pi/8, (num_resets, 1), device=self._device).reshape(-1)
fall_angle_axis = torch.Tensor([0, 1, 0]).repeat(num_resets, 1).to(self._device).reshape(-1, 3)
fall_direction_quat = torch_rot.quat_from_angle_axis(fall_direction, fall_direction_axis)
fall_angle_quat = torch_rot.quat_from_angle_axis(fall_angle, fall_angle_axis)
## Apply Robot position ##
robot_pos = ball_pos.clone() # use ball position as reference
robot_offset = torch.Tensor([0, 0, self._robot_offset]).repeat(num_resets).to(self._device).reshape(-1, 3) # Distance from ball center to robot center is 18 cm.
robot_pos = robot_pos + robot_offset
# robot_pos = robot_pos + torch_rot.quat_rotate(fall_angle_quat, torch_rot.quat_rotate(fall_direction_quat, robot_offset))
robot_rot = self.initial_root_rot[env_ids]
# robot_rot = torch_rot.quat_apply(fall_direction_quat, robot_rot)
# robot_rot = torch_rot.quat_apply(fall_angle_quat, robot_rot)
# root_pos, root_rot = self.initial_root_pos[env_ids], self.initial_root_rot[env_ids]
# root_vel = torch.zeros((num_resets, 6), device=self._device)
#
self._robots.set_world_poses(robot_pos, robot_rot, indices=env_ids)
self._robots.set_velocities(root_vel, indices=env_ids)
# bookkeeping
self.reset_buf[env_ids] = 0
self.progress_buf[env_ids] = 0
def post_reset(self): # Run only once after simulation started
# self._robots = self.get_robot()
self.initial_root_pos, self.initial_root_rot = self._robots.get_world_poses() # save initial position for reset
self.initial_dof_pos = self._robots.get_joint_positions()
# initialize some data used later on
# self.start_rotation = torch.tensor([1, 0, 0, 0], device=self._device, dtype=torch.float32)
# self.up_vec = torch.tensor([0, 0, 1], dtype=torch.float32, device=self._device).repeat((self.num_envs, 1))
# self.heading_vec = torch.tensor([1, 0, 0], dtype=torch.float32, device=self._device).repeat((self.num_envs, 1))
# self.inv_start_rot = quat_conjugate(self.start_rotation).repeat((self.num_envs, 1))
# self.basis_vec0 = self.heading_vec.clone()
# self.basis_vec1 = self.up_vec.clone()
self._wheel_0_dof_idx = self._robots.get_dof_index("wheel_0_joint")
self._wheel_1_dof_idx = self._robots.get_dof_index("wheel_1_joint")
self._wheel_2_dof_idx = self._robots.get_dof_index("wheel_2_joint")
# randomize all envs
indices = torch.arange(self._robots.count, dtype=torch.int64, device=self._device)
self.reset_idx(indices)
def calculate_metrics(self) -> None: # calculate reward for each env
wheel_vel = self.obs_buf[:, :3]
# if 'self.previous_wheel_vel' in locals():
# wheel_accel = wheel_vel - self.previous_wheel_vel
# else: wheel_accel = torch.zeros_like(wheel_vel)
# wheel_accel_cost = torch.sum(wheel_accel**2, dim=-1)
# self.previous_wheel_vel = wheel_vel.clone()
balls_position, balls_orientation = self._ball.get_world_poses()
robots_position, robots_orientation = self._robots.get_world_poses()
robots_omega = self._robots.get_angular_velocities()
fall_angles = q2falling(robots_orientation) # find fall angle of all robot (batched)
if self.previous_fall_angle is None: falling_velocity = torch.zeros_like(robots_position[:, 0])
else: falling_velocity = fall_angles - self.previous_fall_angle
## aligning up axis of robot and environment
# up_proj = torch.cos(fall_angles)
# up_reward = torch.zeros_like(fall_angles)
# up_reward = torch.where(up_proj > 0.93, up_reward + self.up_weight, up_reward)
# falling_penalty = fall_angles
q1 = self.initial_root_rot # world frame
q2 = robots_orientation # robot orientation
# find product of quaternions
product_quaternion = torch.sum(q1*q2,dim=-1) # <q1, q2>
quaternion_distance = 1 - (product_quaternion**2)
# print(quaternion_distance)
## energy penalty for movement
# actions_cost = torch.sum(self.wheel_velocities ** 2, dim=-1)
# electricity_cost = torch.sum(torch.abs(self.actions * obs_buf[:, 12+num_dof:12+num_dof*2])* self.motor_effort_ratio.unsqueeze(0), dim=-1)
## rotation penality
# rotation_cost = torch.sum(torch_rot.quat_diff_rad(robots_orientation, self.initial_root_rot)** 2, dim=-1)
## reward for duration of staying alive
alive_reward = torch.ones_like(fall_angles) * self.alive_reward_scale
# progress_reward = potentials - prev_potentials
# print(robots_position - balls_position)
# print(torch.sum((robots_position - balls_position)**2, dim=-1))
total_reward = (
- quaternion_distance
# - wheel_accel_cost * 0.05
# - falling_velocity
+ alive_reward
# + up_reward
# - math.e**(-0.01*fall_angles)
# - actions_cost * self.actions_cost_scale
# - torch.sum(robots_omega**2, dim=-1) * 10
# - rotation_cost * 10
)
# adjust reward for fallen agents
total_reward = torch.where(
robots_position[:, 2] < self.termination_height, # fall by height
torch.ones_like(total_reward) * self.death_cost,
total_reward
)
total_reward = torch.where(
fall_angles > 50 / 180 * math.pi, # fall by angle
torch.ones_like(total_reward) * self.death_cost,
total_reward
)
total_reward = torch.where(
torch.sum((robots_position - balls_position)**2, dim=-1) > (self._robot_offset+self._jump_offset)**2, # jump beyond jump_offset
torch.ones_like(total_reward) * self.death_cost,
total_reward
)
self.previous_fall_angle = fall_angles
self.rew_buf[:] = total_reward
def is_done(self) -> None: # check termination for each env
balls_position, balls_orientation = self._ball.get_world_poses()
robots_position, robots_orientation = self._robots.get_world_poses()
fall_angles = q2falling(robots_orientation) # find fall angle of all robot (batched)
robot_z_position = robots_position[:, 2]
# print("Z position", robot_z_position)
resets = torch.zeros(self._num_envs, dtype=torch.long, device=self._device)
resets = torch.where(torch.sum((robots_position - balls_position)**2, dim=-1) > (self._robot_offset+self._jump_offset)**2, 1, resets) # jump beyond jump_offset
resets = torch.where(robot_z_position < 0.25, 1, resets) # reset by falling (Z-position)
resets = torch.where(fall_angles > 50*(np.pi / 180), 1, resets) # reset by falling (angle)
resets = torch.where(self.progress_buf >= self._max_episode_length, 1, resets) # reset by time
self.reset_buf[:] = resets
| 20,930 | Python | 50.427518 | 169 | 0.615767 |
teerameth/omni.isaac.fiborobotlab/omni/isaac/fiborobotlab/mooncake/rlgames_train.py | from omniisaacgymenvs.utils.hydra_cfg.hydra_utils import *
from omniisaacgymenvs.utils.hydra_cfg.reformat import omegaconf_to_dict, print_dict
from omniisaacgymenvs.utils.rlgames.rlgames_utils import RLGPUAlgoObserver, RLGPUEnv
from omniisaacgymenvs.utils.task_util import initialize_task
from omniisaacgymenvs.utils.config_utils.path_utils import retrieve_checkpoint_path
from omniisaacgymenvs.envs.vec_env_rlgames import VecEnvRLGames
import hydra
from omegaconf import DictConfig
from rl_games.common import env_configurations, vecenv
from rl_games.torch_runner import Runner
import datetime
import os
import torch
class RLGTrainer():
def __init__(self, cfg, cfg_dict):
self.cfg = cfg
self.cfg_dict = cfg_dict
def launch_rlg_hydra(self, env):
# `create_rlgpu_env` is environment construction function which is passed to RL Games and called internally.
# We use the helper function here to specify the environment config.
self.cfg_dict["task"]["test"] = self.cfg.test
# register the rl-games adapter to use inside the runner
vecenv.register('RLGPU',
lambda config_name, num_actors, **kwargs: RLGPUEnv(config_name, num_actors, **kwargs))
env_configurations.register('rlgpu', {
'vecenv_type': 'RLGPU',
'env_creator': lambda **kwargs: env
})
self.rlg_config_dict = omegaconf_to_dict(self.cfg.train)
def run(self):
# create runner and set the settings
runner = Runner(RLGPUAlgoObserver())
runner.load(self.rlg_config_dict)
runner.reset()
# dump config dict
experiment_dir = os.path.join('runs', self.cfg.train.params.config.name)
os.makedirs(experiment_dir, exist_ok=True)
with open(os.path.join(experiment_dir, 'config.yaml'), 'w') as f:
f.write(OmegaConf.to_yaml(self.cfg))
runner.run({
'train': not self.cfg.test,
'play': self.cfg.test,
'checkpoint': self.cfg.checkpoint,
'sigma': None
})
@hydra.main(config_name="config", config_path="./cfg")
def parse_hydra_configs(cfg: DictConfig):
time_str = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
headless = cfg.headless
env = VecEnvRLGames(headless=headless, sim_device=cfg.device_id)
# ensure checkpoints can be specified as relative paths
if cfg.checkpoint:
cfg.checkpoint = retrieve_checkpoint_path(cfg.checkpoint)
if cfg.checkpoint is None:
quit()
cfg_dict = omegaconf_to_dict(cfg)
print_dict(cfg_dict)
from scripts.sim_config import SimConfig
sim_config = SimConfig(cfg_dict)
cfg = DictConfig(sim_config.config)
# from mooncake_task_2 import MooncakeTask
from mooncake_task_2 import MooncakeTask
task = MooncakeTask(name="Mooncake",
sim_config=sim_config,
env=env
)
# task = initialize_task(cfg_dict, env)
# sets seed. if seed is -1 will pick a random one
from omni.isaac.core.utils.torch.maths import set_seed
cfg.seed = set_seed(cfg.seed, torch_deterministic=cfg.torch_deterministic)
if cfg.wandb_activate:
# Make sure to install WandB if you actually use this.
import wandb
run_name = f"{cfg.wandb_name}_{time_str}"
wandb.init(
project=cfg.wandb_project,
group=cfg.wandb_group,
entity=cfg.wandb_entity,
config=cfg_dict,
sync_tensorboard=True,
id=run_name,
resume="allow",
monitor_gym=True,
)
rlg_trainer = RLGTrainer(cfg, cfg_dict)
rlg_trainer.launch_rlg_hydra(env)
rlg_trainer.run()
env.close()
if cfg.wandb_activate:
wandb.finish()
if __name__ == '__main__':
parse_hydra_configs()
| 3,895 | Python | 31.198347 | 116 | 0.640822 |
teerameth/omni.isaac.fiborobotlab/omni/isaac/fiborobotlab/mooncake/env_mooncake.py | import random
import gym
from gym import spaces
import numpy as np
import math
import time
import carb
from omni.isaac.imu_sensor import _imu_sensor
state_low = [-100, -100, -10]
state_high = [100, 100, 10]
action_low = [-1]
action_high = [1]
def q2falling(q):
q[0] = 1 if q[0] > 1 else q[0]
try:
if q[1] == 0 and q[2] == 0 and q[3] == 0:
return 0
return 2*math.acos(q[0])*math.sqrt((q[1]**2 + q[2]**2)/(q[1]**2 + q[2]**2 + q[3]**2))
except:
print(q)
return 0
class MoonCakeEnv(gym.Env):
metadata = {"render.modes": ["human"]}
def __init__(
self,
skip_frame=1,
physics_dt=1.0 / 100.0,
rendering_dt=1.0 / 60.0,
max_episode_length=60,
display_every_iter=20,
seed=0,
headless=True,
observation_list=["lin_acc_y", "lin_acc_z", "ang_vel_x"],
) -> None:
from omni.isaac.kit import SimulationApp
## Specify simulation parameters ##
self._physics_dt = physics_dt
self._rendering_dt = rendering_dt
self._max_episode_length = max_episode_length / self._physics_dt # 60 second after reset
self._skip_frame = skip_frame
self._iteration_count = 0
self._display_every_iter = display_every_iter
self._update_every = 1
self._explore_every = 5
self._headless = headless
self._observation_list = observation_list
self.simulation_app = SimulationApp({"headless": self._headless, "anti_aliasing": 0})
self.previous_observations = {}
## Setup World ##
from omni.isaac.core import World
from mooncake_old import MoonCake
from omni.isaac.core.objects import DynamicSphere
self.world = World(physics_dt=self._physics_dt, rendering_dt=self._rendering_dt, stage_units_in_meters=0.01)
self.world.scene.add_default_ground_plane()
self.robot = self.world.scene.add(
MoonCake(
prim_path="/mooncake",
name="mooncake_mk0",
position=np.array([0, 0.0, 30.0]),
orientation=np.array([1.0, 0.0, 0.0, 0.0]),
)
)
self.ball = self.world.scene.add(
DynamicSphere(
prim_path="/ball",
name="ball",
position=np.array([0, 0, 12]),
radius=12, # mediciene ball diameter 24cm.
color=np.array([1.0, 0, 0]),
mass=4,
)
)
## Setup IMU ##
self.imu_interface = _imu_sensor.acquire_imu_sensor_interface()
self.props = _imu_sensor.SensorProperties()
self.props.position = carb.Float3(0, 0, 10) # translate from /obike/chassic to above motor (cm.)
self.props.orientation = carb.Float4(1, 0, 0, 0) # (x, y, z, w)
self.props.sensorPeriod = 1 / 500 # 2ms
self._sensor_handle = self.imu_interface.add_sensor_on_body("/obike/chassic", self.props)
self.action_space = spaces.Box(low=-1.0, high=1.0, shape=(3,), dtype=np.float32)
self.observation_space = spaces.Box(low=-1.0, high=1.0, shape=(len(self._observation_list),), dtype=np.float32)
def step(self, action):
## EXECUTE ACTION ##
from omni.isaac.core.utils.types import ArticulationAction
# print(action)
# action = (action-0.5)+random.random()*0.01
joint_actions = ArticulationAction()
joint_actions.joint_efforts = np.zeros(self.robot.num_dof)
# joint_actions.joint_velocities = np.zeros(self.robot.num_dof)
# joint_actions.joint_positions = np.zeros(self.robot.num_dof)
# joint_actions.joint_efforts[self.robot._wheel_dof_indices[0]] = action[0] * 1000 * 2
# joint_actions.joint_efforts[self.robot._wheel_dof_indices[1]] = action[1] * 1000 * 2
# joint_actions.joint_efforts[self.robot._wheel_dof_indices[2]] = action[2] * 1000 * 2
# joint_actions.joint_velocities[self.robot._wheel_dof_indices[1]] = -0.2* 1000
# joint_actions.joint_positions[self.robot._wheel_dof_indices[2]] = 0
# self.robot.apply_action(control_actions=joint_actions)
self.robot.apply_wheel_actions(ArticulationAction(joint_efforts=[action[i] * 3000 for i in range(3)]))
self.world.step(render=(not self._headless) and (self._iteration_count%self._display_every_iter==0))
observations = self.get_observation()
reward = self.previous_observations['fall_rotation'] - observations['fall_rotation']
reward *= 100
## Check for stop event ##
exceed_time_limit = self.world.current_time_step_index >= self._max_episode_length
robot_fall = True if observations['fall_rotation'] > 50 / 180 * math.pi else False
done = exceed_time_limit or robot_fall
info = {}
obs = [observations[name] for name in self._observation_list]
scaled_observation = []
for name in self._observation_list:
if "lin_acc" in name: scaled_observation.append(observations[name]/1000) # (-1000,1000)cm/s^2 -> (-1,1)
if "ang_vel" in name: scaled_observation.append(observations[name]/10) # (-10,10)rad/s -> (-1,1)
if "rotation" in name: scaled_observation.append(observations[name]) # quaternion already inrange(0,1)
self.previous_observations = observations.copy()
return obs, reward, done, info
def reset(self):
self._iteration_count += 1
self.world.reset()
self.robot.initialize()
# self.world.scene.remove("/obike")
# from obike import Obike
# self.robot = self.world.scene.add(
# Obike(
# prim_path="/obike",
# name="obike_mk0",
# position=np.array([10 * random.random(), 10 * random.random(), 1.435]),
# orientation=np.array([1.0, 0.0, 0.0, 0.0]),
# )
# )
observations = self.get_observation()
obs = [observations[name] for name in self._observation_list]
self.previous_observations['fall_rotation'] = 0
return obs
def get_observation(self):
observations = {"robot_position_x":None, "robot_position_y":None, "robot_position_z":None, "robot_rotation_x":None, "robot_rotation_y":None, "robot_rotation_z":None, "robot_rotation_w":None, "lin_acc_x":None, "lin_acc_y":None, "lin_acc_z":None, "ang_vel_x":None, "ang_vel_y":None, "ang_vel_z":None}
[observations["robot_position_x"], observations["robot_position_y"], observations["robot_position_z"]], [observations["robot_rotation_x"], observations["robot_rotation_y"], observations["robot_rotation_z"], observations["robot_rotation_w"]] = self.robot.get_world_pose()
reading = self.imu_interface.get_sensor_readings(self._sensor_handle)
if reading.shape[0] == 0: # no valid data in buffer -> init observation wih zeros
observations["lin_acc_x"], observations["lin_acc_y"], observations["lin_acc_z"], observations["ang_vel_x"], observations["ang_vel_y"], observations["ang_vel_z"] = 0, 0, 0, 0, 0, 0
else:
observations["lin_acc_x"], observations["lin_acc_y"], observations["lin_acc_z"], observations["ang_vel_x"], observations["ang_vel_y"], observations["ang_vel_z"] = reading[-1]["lin_acc_x"], reading[-1]["lin_acc_y"], reading[-1]["lin_acc_z"], reading[-1]["ang_vel_x"], reading[-1]["ang_vel_y"], reading[-1]["ang_vel_z"]
observations["fall_rotation"] = q2falling([observations["robot_rotation_x"], observations["robot_rotation_y"], observations["robot_rotation_z"], observations["robot_rotation_w"]])
return observations
def close(self):
pass
def seed(self, seed=None):
self.np_random, seed = gym.utils.seeding.np_random(seed)
np.random.seed(seed)
return [seed] | 7,919 | Python | 48.81132 | 331 | 0.59856 |
teerameth/omni.isaac.fiborobotlab/omni/isaac/fiborobotlab/mooncake/train_ddpg.py | from env import MoonCakeEnv
import gym
import numpy as np
from stable_baselines3 import DDPG
from stable_baselines3.common.noise import NormalActionNoise, OrnsteinUhlenbeckActionNoise
my_env = MoonCakeEnv(headless=False)
# env = gym.make("Pendulum-v1")
# The noise objects for DDPG
n_actions = my_env.action_space.shape[-1]
# action_noise = NormalActionNoise(mean=np.zeros(n_actions), sigma=0.01 * np.ones(n_actions))
model = DDPG("MlpPolicy", my_env, verbose=1)
# model = DDPG("MlpPolicy", my_env, action_noise=action_noise, verbose=1)
model.learn(total_timesteps=10000, log_interval=10)
model.save("ddpg_pendulum")
# env = model.get_env()
#
# del model # remove to demonstrate saving and loading
#
# model = DDPG.load("ddpg_pendulum")
#
# obs = env.reset()
# while True:
# action, _states = model.predict(obs)
# obs, rewards, dones, info = env.step(action)
# env.render() | 889 | Python | 29.689654 | 93 | 0.727784 |
teerameth/omni.isaac.fiborobotlab/omni/isaac/fiborobotlab/mooncake/train_dqn.py | from env import MoonCakeEnv
import gym
from stable_baselines3 import DQN
my_env = MoonCakeEnv(headless=False)
# env = gym.make("CartPole-v0")
model = DQN("MlpPolicy", my_env, verbose=1)
model.learn(total_timesteps=10000, log_interval=4)
model.save("dqn_cartpole")
# del model # remove to demonstrate saving and loading
#
# model = DQN.load("dqn_cartpole")
#
# obs = env.reset()
# while True:
# action, _states = model.predict(obs, deterministic=True)
# obs, reward, done, info = env.step(action)
# env.render()
# if done:
# obs = env.reset() | 566 | Python | 24.772726 | 62 | 0.685512 |
teerameth/omni.isaac.fiborobotlab/omni/isaac/fiborobotlab/mooncake/mooncake.py | from typing import Optional
import numpy as np
import torch
from omni.isaac.core.robots.robot import Robot
from omni.isaac.core.utils.nucleus import get_server_path
from omni.isaac.core.utils.stage import add_reference_to_stage
from omni.isaac.core.utils.types import ArticulationAction
import carb
class Mooncake(Robot):
def __init__(
self,
prim_path: str,
name: Optional[str] = "Mooncake",
usd_path: Optional[str] = None,
translation: Optional[np.ndarray] = None,
orientation: Optional[np.ndarray] = None,
) -> None:
self._usd_path = usd_path
self._name = name
if self._usd_path is None:
server_path = get_server_path()
if server_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
self._usd_path = server_path + "/Library/mooncake.usd"
add_reference_to_stage(self._usd_path, prim_path)
super().__init__(
prim_path=prim_path,
name=name,
translation=translation,
orientation=orientation,
articulation_controller=None,
)
self._wheel_dof_indices = [self.get_dof_index("wheel_0_joint"),
self.get_dof_index("wheel_1_joint"),
self.get_dof_index("wheel_2_joint")]
def apply_wheel_actions(self, actions: ArticulationAction) -> None:
"""[summary]
Args:
actions (ArticulationAction): [description]
"""
actions_length = actions.get_length()
if actions_length is not None and actions_length != 3:
raise Exception("ArticulationAction passed should be equal to 3")
joint_actions = ArticulationAction()
if actions.joint_positions is not None:
joint_actions.joint_positions = np.zeros(self.num_dof)
joint_actions.joint_positions[self._wheel_dof_indices[0]] = actions.joint_positions[0]
joint_actions.joint_positions[self._wheel_dof_indices[1]] = actions.joint_positions[1]
joint_actions.joint_positions[self._wheel_dof_indices[2]] = actions.joint_positions[2]
if actions.joint_velocities is not None:
joint_actions.joint_velocities = np.zeros(self.num_dof)
joint_actions.joint_velocities[self._wheel_dof_indices[0]] = actions.joint_velocities[0]
joint_actions.joint_velocities[self._wheel_dof_indices[1]] = actions.joint_velocities[1]
joint_actions.joint_velocities[self._wheel_dof_indices[2]] = actions.joint_velocities[2]
if actions.joint_efforts is not None:
joint_actions.joint_efforts = np.zeros(self.num_dof)
joint_actions.joint_efforts[self._wheel_dof_indices[0]] = actions.joint_efforts[0]
joint_actions.joint_efforts[self._wheel_dof_indices[1]] = actions.joint_efforts[1]
joint_actions.joint_efforts[self._wheel_dof_indices[2]] = actions.joint_efforts[2]
self.apply_action(control_actions=joint_actions)
return
# class Ball(Robot):
# def __init__(
# self,
# prim_path: str,
# name: Optional[str] = "Ball",
# usd_path: Optional[str] = None,
# translation: Optional[np.ndarray] = None,
# orientation: Optional[np.ndarray] = None,
# ) -> None:
# self._usd_path = usd_path
# self._name = name
#
# if self._usd_path is None:
# server_path = get_server_path()
# if server_path is None:
# carb.log_error("Could not find Isaac Sim assets folder")
# self._usd_path = server_path + "/Library/ball.usd"
#
# add_reference_to_stage(self._usd_path, prim_path)
#
# super().__init__(
# prim_path=prim_path,
# name=name,
# translation=translation,
# orientation=orientation,
# articulation_controller=None,
# ) | 4,034 | Python | 40.597938 | 100 | 0.594695 |
teerameth/omni.isaac.fiborobotlab/omni/isaac/fiborobotlab/mooncake/train_select_observe.py | from stable_baselines3 import PPO
from stable_baselines3.ppo import CnnPolicy, MlpPolicy
from stable_baselines3.common.callbacks import CheckpointCallback
import torch as th
from env_select_observe import MoonCakeEnv
import os
log_dir = "./mlp_policy"
# set headles to false to visualize training
b = [1, 1, 1, 1, 1]
save_dir = log_dir + "/mooncake_policy_" + str(b[0])+str(b[1])+str(b[2])+str(b[3])+str(b[4])
os.mkdir(save_dir)
with open(save_dir + '/log.txt', 'w') as f:
f.write(str(b) + '\n')
[print("####################################################################################################") for i in range(3)]
print(b)
[print("####################################################################################################") for i in range(3)]
my_env = MoonCakeEnv(headless=True, observ_selection = [b[0], b[1], b[2], b[3], b[4]]) #
# checkpoint_callback = CheckpointCallback(save_freq=10000, save_path=log_dir, name_prefix="mooncake_policy_checkpoint")
model = PPO(MlpPolicy,
my_env,
verbose=1,
n_steps=10000,
batch_size=100,
learning_rate=0.00025,
)
# model.learn(total_timesteps=500000, callback=[checkpoint_callback])
model.learn(total_timesteps=500000)
model.save(save_dir)
my_env.close()
| 1,297 | Python | 37.176469 | 129 | 0.560524 |
teerameth/omni.isaac.fiborobotlab/omni/isaac/fiborobotlab/mooncake/train.py | from env2 import MoonCakeEnv
from stable_baselines3 import PPO
from stable_baselines3.ppo import CnnPolicy, MlpPolicy
from stable_baselines3.common.callbacks import CheckpointCallback
import torch as th
log_dir = "./mlp_policy"
# set headles to false to visualize training
my_env = MoonCakeEnv(headless=False)
policy_kwargs = dict(activation_fn=th.nn.Tanh, net_arch=[16, dict(pi=[64, 32], vf=[64, 32])])
total_timesteps = 5000000
checkpoint_callback = CheckpointCallback(save_freq=10000, save_path=log_dir, name_prefix="mooncake_policy_checkpoint")
# model = PPO(
# CnnPolicy,
# my_env,
# policy_kwargs=policy_kwargs,
# verbose=1,
# n_steps=10000,
# batch_size=1000,
# learning_rate=0.00025,
# gamma=0.9995,
# device="cuda",
# ent_coef=0,
# vf_coef=0.5,
# max_grad_norm=10,
# tensorboard_log=log_dir,
# )
model = PPO(MlpPolicy,
my_env,
verbose=1,
n_steps=10000,
batch_size=100,
learning_rate=0.00025
)
model.learn(total_timesteps=total_timesteps, callback=[checkpoint_callback])
model.save(log_dir + "/mooncake_policy")
my_env.close()
| 1,165 | Python | 26.116278 | 118 | 0.663519 |
teerameth/omni.isaac.fiborobotlab/omni/isaac/fiborobotlab/mooncake/mooncake_task_3.py | from omniisaacgymenvs.tasks.base.rl_task import RLTask
from mooncake import Mooncake
import omni
from pxr import UsdPhysics, Gf, UsdGeom
from omni.isaac.core.articulations import ArticulationView
from omni.isaac.core.prims import RigidPrimView
from omni.isaac.core.utils.prims import get_prim_at_path
import omni.isaac.core.utils.torch.rotations as torch_rot
from omni.isaac.isaac_sensor import _isaac_sensor
from omni.isaac.core.utils.torch.maths import torch_rand_float, tensor_clamp, unscale
import numpy as np
import torch
import torch.nn.functional as f
import math
class MooncakeTask(RLTask): | 601 | Python | 32.444443 | 85 | 0.828619 |
teerameth/omni.isaac.fiborobotlab/omni/isaac/fiborobotlab/mooncake/examples/continuous_control.py | # -*- coding: utf-8 -*-
"""
A Continuous Control Implementation for TensorFlow 2.0
Author: W.J.A. van Heeswijk
Date: 11-8-2020
This code is supplemental to the following note:
'Implementing Gaussian Actor Networks for Continuous Control in TensorFlow 2.0'
https://www.researchgate.net/publication/343714359_Implementing_Gaussian_Actor_Networks_for_Continuous_Control_in_TensorFlow_20
Corresponding blog post:
https://towardsdatascience.com/a-minimal-working-example-for-continuous-policy-gradients-in-tensorflow-2-0-d3413ec38c6b
Python 3.8 and TensorFlow 2.3 were used to write the algorithm
This code has been published under the GNU GPLv3 license
"""
# Needed for training the network
import numpy as np
import tensorflow as tf
import tensorflow.keras as keras
import tensorflow.keras.layers as layers
import tensorflow.keras.initializers as initializers
# Needed for animation
import matplotlib.pyplot as plt
import time
"""Plot output"""
plt.ion()
# here we are creating sub plots
figure, ax = plt.subplots(figsize=(10, 8))
# Add labels and legend
plt.xlabel('Episode')
plt.ylabel('Parameter value')
plt.grid()
plt.legend(loc='best')
def plot():
# fig = plt.figure()
# ax = fig.add_subplot(1, 1, 1)
# Append arrays
epoch_ar.append(int(i))
mu_ar.append(float(mu))
sigma_ar.append(float(sigma))
reward_ar.append(float(reward))
target_ar.append(float(mu_target))
# Plot outcomes
ax.plot(epoch_ar, mu_ar, label='mu')
ax.plot(epoch_ar, sigma_ar, label='sigma')
ax.plot(epoch_ar, reward_ar, label='reward')
ax.plot(epoch_ar, target_ar, label='target')
# plt.show()
figure.canvas.draw()
figure.canvas.flush_events()
time.sleep(0.01)
"""Construct the actor network with mu and sigma as output"""
def ConstructActorNetwork(bias_mu, bias_sigma):
inputs = layers.Input(shape=(1,)) # input dimension
hidden1 = layers.Dense(5, activation="relu", kernel_initializer=initializers.he_normal())(inputs)
hidden2 = layers.Dense(5, activation="relu", kernel_initializer=initializers.he_normal())(hidden1)
mu = layers.Dense(1, activation="linear", kernel_initializer=initializers.Zeros(), \
bias_initializer=initializers.Constant(bias_mu))(hidden2)
sigma = layers.Dense(1, activation="softplus", kernel_initializer=initializers.Zeros(), \
bias_initializer=initializers.Constant(bias_sigma))(hidden2)
actor_network = keras.Model(inputs=inputs, outputs=[mu, sigma])
return actor_network
"""Weighted Gaussian log likelihood loss function"""
def CustomLossGaussian(state, action, reward):
# Obtain mu and sigma from actor network
nn_mu, nn_sigma = actor_network(state)
# Obtain pdf of Gaussian distribution
pdf_value = tf.exp(-0.5 * ((action - nn_mu) / (nn_sigma)) ** 2) * \
1 / (nn_sigma * tf.sqrt(2 * np.pi))
# Compute log probability
log_probability = tf.math.log(pdf_value + 1e-5)
# Compute weighted loss
loss_actor = - reward * log_probability
return loss_actor
"""Main code"""
# Initialize fixed state
state = tf.constant([[1.0]])
# Define properties reward function
mu_target = 4.0
target_range = 0.25
max_reward = 1.0
# Create actor network
bias_mu = 0.0 # bias 0.0 yields mu=0.0 with linear activation function
bias_sigma = 0.55 # bias 0.55 yields sigma=1.0 with softplus activation function
actor_network = ConstructActorNetwork(bias_mu, bias_sigma)
opt = keras.optimizers.Adam(learning_rate=0.001)
# Initialize arrays for plot
epoch_ar = []
mu_ar = []
sigma_ar = []
reward_ar = []
target_ar = []
for i in range(10000 + 1):
# Obtain mu and sigma from network
mu, sigma = actor_network(state)
# Draw action from normal distribution
action = tf.random.normal \
([1], mean=mu, stddev=sigma)
# Compute reward
reward = max_reward / max(target_range, abs(mu_target - action)) * target_range
# Update network weights
with tf.GradientTape() as tape:
# Compute Gaussian loss
loss_value = CustomLossGaussian(state, action, reward)
# Compute gradients
grads = tape.gradient(loss_value, actor_network.trainable_variables)
# Apply gradients to update network weights
opt.apply_gradients(zip(grads, actor_network.trainable_variables))
# Update console output and plot
if np.mod(i, 100) == 0:
print('\n======episode', i, '======')
print('mu', float(mu))
print('sigma', float(sigma))
print('action', float(action))
print('reward', float(reward))
print('loss', float(loss_value))
plot() | 4,644 | Python | 29.966666 | 127 | 0.68497 |
teerameth/omni.isaac.fiborobotlab/omni/isaac/fiborobotlab/mooncake/examples/ppo_lstm.py | import numpy as np
import gym
from sb3_contrib import RecurrentPPO
from stable_baselines3.common.evaluation import evaluate_policy
env = gym.make('CartPole-v1')
model = RecurrentPPO("MlpLstmPolicy", env, verbose=1)
model.learn(50000)
mean_reward, std_reward = evaluate_policy(model, env, n_eval_episodes=20, warn=False)
print(mean_reward)
model.save("ppo_recurrent")
del model # remove to demonstrate saving and loading
model = RecurrentPPO.load("ppo_recurrent")
obs = env.reset()
# cell and hidden state of the LSTM
lstm_states = None
num_envs = 1
# Episode start signals are used to reset the lstm states
episode_starts = np.ones((num_envs,), dtype=bool)
while True:
action, lstm_states = model.predict(obs, state=lstm_states, episode_start=episode_starts, deterministic=True)
obs, rewards, dones, info = env.step(action)
episode_starts = dones
env.render()
if dones:
lstm_states = None
env.reset() | 942 | Python | 28.468749 | 113 | 0.733546 |
teerameth/omni.isaac.fiborobotlab/omni/isaac/fiborobotlab/mooncake/examples/ppo_recurrent.py | # pysac -m pip install sb3-contrib
import numpy as np
from sb3_contrib import RecurrentPPO
from stable_baselines3.common.evaluation import evaluate_policy
model = RecurrentPPO("MlpLstmPolicy", "CartPole-v1", verbose=1)
model.learn(50000)
env = model.get_env()
mean_reward, std_reward = evaluate_policy(model, env, n_eval_episodes=20, warn=False)
print(mean_reward)
model.save("ppo_recurrent")
del model # remove to demonstrate saving and loading
model = RecurrentPPO.load("ppo_recurrent")
obs = env.reset()
# cell and hidden state of the LSTM
lstm_states = None
num_envs = 1
# Episode start signals are used to reset the lstm states
episode_starts = np.ones((num_envs,), dtype=bool)
while True:
action, lstm_states = model.predict(obs, state=lstm_states, episode_start=episode_starts, deterministic=True)
obs, rewards, dones, info = env.step(action)
episode_starts = dones
env.render() | 907 | Python | 30.310344 | 113 | 0.749724 |
teerameth/omni.isaac.fiborobotlab/omni/isaac/fiborobotlab/mooncake/examples/simple_CartPole.py | import time
import keras
import tensorflow as tf
import tensorflow.keras as keras
import tensorflow.keras.layers as layers
import tensorflow.keras.initializers as initializers
import numpy as np
import gym
import matplotlib.pyplot as plt
env = gym.make('CartPole-v1')
gamma = 0.99
def discount_rewards(r):
""" take 1D float array of rewards and compute discounted reward """
discounted_r = np.zeros_like(r)
running_add = 0
for t in reversed(range(0, r.size)):
running_add = running_add * gamma + r[t]
discounted_r[t] = running_add
return discounted_r
"""Weighted Gaussian log likelihood loss function"""
def CustomLossGaussian(state, action, reward):
# Obtain mu and sigma from actor network
nn_mu, nn_sigma = actor_network(state.reshape(-1, 4))
# Obtain pdf of Gaussian distribution
pdf_value = tf.exp(-0.5 * ((action - nn_mu) / (nn_sigma)) ** 2) * 1 / (nn_sigma * tf.sqrt(2 * np.pi))
# Compute log probability
log_probability = tf.math.log(pdf_value + 1e-5)
# Compute weighted loss
loss_actor = - reward * log_probability
return loss_actor
class SimpleAgent(keras.Model):
def __init__(self, s_size, a_size, h_size):
super(SimpleAgent, self).__init__()
# Input() can create a placeholder from an arbitrary tf.TypeSpec
# self.state_in = keras.Input(type_spec=tf.RaggedTensorSpec(shape=[None, s_size], dtype=tf.float32))
self.state_in = keras.Input(shape=[None, s_size], dtype=tf.float32)
self.hidden1 = layers.Dense(h_size, activation="relu")
self.hidden2 = layers.Dense(h_size, activation="relu")
self.mu = layers.Dense(a_size, activation="linear", kernel_initializer=initializers.Zeros())# , bias_initializer=initializers.Constant(bias_mu)
self.sigma = layers.Dense(a_size, activation="softplus", kernel_initializer=initializers.Zeros())# , bias_initializer=initializers.Constant(bias_sigma)
def call(self, inputs, training=False, mask=None):
x = self.state_in = inputs
x = self.hidden1(x, training=training)
x = self.hidden2(x, training=training)
return [self.mu(x, training=training), self.sigma(x, training=training)]
actor_network = SimpleAgent(s_size=4, a_size=1, h_size=8)
print(actor_network.trainable_variables)
max_angle = 0.418
opt = keras.optimizers.Adam(learning_rate=0.001)
update_frequency = 5
i = 0
total_reward = []
total_length = []
total_episodes = 10000
max_episode = 9999
gradBuffer = None
# gradBuffer = actor_network.trainable_variables
# for ix,grad in enumerate(gradBuffer): gradBuffer[ix] = grad * 0
while i < total_episodes:
state = env.reset()
running_reward = 0
ep_history = []
for j in range(max_episode):
mu, sigma = actor_network(state.reshape(-1, 4)) # Obtain mu and sigma from network
action = tf.random.normal([1], mean=mu, stddev=sigma) # Draw action from normal distribution
# | Num | Action |
# |-----|------------------------|
# | 0 | Push cart to the left |
# | 1 | Push cart to the right |
cart_action = 1 if action.numpy().reshape(-1) > 0 else 0 # threshold action since cart_action is discrete
next_state, reward, d, _ = env.step(cart_action)
# if i % 100 == 0 and i!=0: env.render()
delta_angle = abs(state[2]) # manually calculate reward from falling angle
reward = 1 - (delta_angle / max_angle)
# env.render()
# print(reward, d)
# time.sleep(0.1)
ep_history.append([state, action, reward, next_state])
running_reward += reward
if d==True:
# Update the network
ep_history = np.array(ep_history)
ep_history[:, 2] = discount_rewards(ep_history[:, 2])
if gradBuffer is None: # init gradBuffer
gradBuffer = actor_network.trainable_variables
for ix, grad in enumerate(gradBuffer): gradBuffer[ix] = grad * 0
with tf.GradientTape() as tape:
# Compute Gaussian loss
loss_value = CustomLossGaussian(state, action, reward)
# Compute gradients
grads = tape.gradient(loss_value, actor_network.trainable_variables)
# Apply gradients to update network weights
# opt.apply_gradients(zip(grads, actor_network.trainable_variables))
for idx, grad in enumerate(grads): gradBuffer[idx] += grad
if i % update_frequency == 0 and i != 0:
# Apply gradients to update network weights
opt.apply_gradients(zip(gradBuffer, actor_network.trainable_variables))
for ix, grad in enumerate(gradBuffer): gradBuffer[ix] = grad * 0 # reset buffer
total_reward.append(running_reward)
total_length.append(j)
break
state = next_state
if i % 100 == 0: print(np.mean(total_reward[-100:]))
i += 1 | 4,989 | Python | 42.771929 | 159 | 0.629786 |
teerameth/omni.isaac.fiborobotlab/omni/isaac/fiborobotlab/mooncake/examples/test_RNN.py | import tensorflow as tf
from tensorflow.keras import layers
import numpy as np
paragraph1 = np.random.random((20, 10, 50)).astype(np.float32)
paragraph2 = np.random.random((20, 10, 50)).astype(np.float32)
paragraph3 = np.random.random((20, 10, 50)).astype(np.float32)
lstm_layer = layers.LSTM(64, stateful=True)
output = lstm_layer(paragraph1)
output = lstm_layer(paragraph2)
existing_state = lstm_layer.states
e = list(existing_state)
for item in e:
print(item.numpy().shape)
new_lstm_layer = layers.LSTM(64)
new_output = new_lstm_layer(paragraph3, initial_state=existing_state) | 587 | Python | 29.947367 | 69 | 0.747871 |
teerameth/omni.isaac.fiborobotlab/omni/isaac/fiborobotlab/mooncake/examples/test.py | import numpy as np
import tensorflow as tf
np.random.seed(42)
tf.random.set_seed(42)
input_dim = 3
output_dim = 3
num_timesteps = 2
batch_size = 10
nodes = 10
input_layer = tf.keras.Input(shape=(num_timesteps, input_dim), batch_size=batch_size)
cell = tf.keras.layers.LSTMCell(
nodes,
kernel_initializer='glorot_uniform',
recurrent_initializer='glorot_uniform',
bias_initializer='zeros',
)
lstm = tf.keras.layers.RNN(
cell,
return_state=True,
return_sequences=True,
stateful=True,
)
lstm_out, hidden_state, cell_state = lstm(input_layer)
output = tf.keras.layers.Dense(output_dim)(lstm_out)
mdl = tf.keras.Model(
inputs=input_layer,
outputs=[hidden_state, cell_state, output]
)
# We can now test what’s going on by passing a batch through the network (look Ma, no tf.Session!):
x = np.random.rand(batch_size, num_timesteps, input_dim).astype(np.float32)
h_state, c_state, out = mdl(x)
print(np.mean(out))
# If we pass this same batch again, we get different result as the hidden state has been changed:
h_state, c_state, out = mdl(x)
print(np.mean(out))
# If we reset the hidden state, we can recover our initial output:
lstm.reset_states(states=[np.zeros((batch_size, nodes)), np.zeros((batch_size, nodes))])
h_state, c_state, out = mdl(x)
print(np.mean(out))
# This method also allows us to use other values than all zeros for the hidden state:
lstm.reset_states(states=[np.ones((batch_size, nodes)), np.ones((batch_size, nodes))])
h_state, c_state, out = mdl(x)
print(np.mean(out)) | 1,532 | Python | 29.058823 | 99 | 0.710836 |
teerameth/omni.isaac.fiborobotlab/omni/isaac/fiborobotlab/mooncake/examples/td3.py | import tensorflow as tf
import numpy as np
import gym
from tensorflow.keras.models import load_model
# !pip3 install box2d-py
print(tf.config.list_physical_devices('GPU'))
env = gym.make("LunarLanderContinuous-v2")
state_low = env.observation_space.low
state_high = env.observation_space.high
action_low = env.action_space.low
action_high = env.action_space.high
print(state_low)
print(state_high)
print(action_low)
print(action_high)
class RBuffer():
def __init__(self, maxsize, statedim, naction):
self.cnt = 0
self.maxsize = maxsize
self.state_memory = np.zeros((maxsize, *statedim), dtype=np.float32)
self.action_memory = np.zeros((maxsize, naction), dtype=np.float32)
self.reward_memory = np.zeros((maxsize,), dtype=np.float32)
self.next_state_memory = np.zeros((maxsize, *statedim), dtype=np.float32)
self.done_memory = np.zeros((maxsize,), dtype=np.bool)
def storexp(self, state, next_state, action, done, reward):
index = self.cnt % self.maxsize
self.state_memory[index] = state
self.action_memory[index] = action
self.reward_memory[index] = reward
self.next_state_memory[index] = next_state
self.done_memory[index] = 1 - int(done)
self.cnt += 1
def sample(self, batch_size):
max_mem = min(self.cnt, self.maxsize)
batch = np.random.choice(max_mem, batch_size, replace=False)
states = self.state_memory[batch]
next_states = self.next_state_memory[batch]
rewards = self.reward_memory[batch]
actions = self.action_memory[batch]
dones = self.done_memory[batch]
return states, next_states, rewards, actions, dones
class Critic(tf.keras.Model):
def __init__(self):
super(Critic, self).__init__()
self.f1 = tf.keras.layers.Dense(512, activation='relu')
self.f2 = tf.keras.layers.Dense(512, activation='relu')
self.v = tf.keras.layers.Dense(1, activation=None)
def call(self, inputstate, action):
x = self.f1(tf.concat([inputstate, action], axis=1))
x = self.f2(x)
x = self.v(x)
return x
class Actor(tf.keras.Model):
def __init__(self, no_action):
super(Actor, self).__init__()
self.f1 = tf.keras.layers.Dense(512, activation='relu')
self.f2 = tf.keras.layers.Dense(512, activation='relu')
self.mu = tf.keras.layers.Dense(no_action, activation='tanh')
def call(self, state):
x = self.f1(state)
x = self.f2(x)
x = self.mu(x)
return x
class Agent():
def __init__(self, n_action=len(env.action_space.high)):
self.actor_main = Actor(n_action)
self.actor_target = Actor(n_action)
self.critic_main = Critic()
self.critic_main2 = Critic()
self.critic_target = Critic()
self.critic_target2 = Critic()
self.batch_size = 64
self.n_actions = len(env.action_space.high)
self.a_opt = tf.keras.optimizers.Adam(0.001)
# self.actor_target = tf.keras.optimizers.Adam(.001)
self.c_opt1 = tf.keras.optimizers.Adam(0.002)
self.c_opt2 = tf.keras.optimizers.Adam(0.002)
# self.critic_target = tf.keras.optimizers.Adam(.002)
self.memory = RBuffer(1_00_000, env.observation_space.shape, len(env.action_space.high))
self.trainstep = 0
# self.replace = 5
self.gamma = 0.99
self.min_action = env.action_space.low[0]
self.max_action = env.action_space.high[0]
self.actor_update_steps = 2
self.warmup = 200
def act(self, state, evaluate=False):
if self.trainstep > self.warmup:
evaluate = True
state = tf.convert_to_tensor([state], dtype=tf.float32)
actions = self.actor_main(state)
if not evaluate:
actions += tf.random.normal(shape=[self.n_actions], mean=0.0, stddev=0.1)
actions = self.max_action * (tf.clip_by_value(actions, self.min_action, self.max_action))
# print(actions)
return actions[0]
def savexp(self, state, next_state, action, done, reward):
self.memory.storexp(state, next_state, action, done, reward)
def update_target(self):
self.actor_target.set_weights(self.actor_main.get_weights())
self.critic_target.set_weights(self.critic_main.get_weights())
self.critic_target2.set_weights(self.critic_main2.get_weights())
def train(self):
if self.memory.cnt < self.batch_size:
return
states, next_states, rewards, actions, dones = self.memory.sample(self.batch_size)
states = tf.convert_to_tensor(states, dtype=tf.float32)
next_states = tf.convert_to_tensor(next_states, dtype=tf.float32)
rewards = tf.convert_to_tensor(rewards, dtype=tf.float32)
actions = tf.convert_to_tensor(actions, dtype=tf.float32)
# dones = tf.convert_to_tensor(dones, dtype= tf.bool)
with tf.GradientTape() as tape1, tf.GradientTape() as tape2:
target_actions = self.actor_target(next_states)
target_actions += tf.clip_by_value(
tf.random.normal(shape=[*np.shape(target_actions)], mean=0.0, stddev=0.2), -0.5, 0.5)
target_actions = self.max_action * (tf.clip_by_value(target_actions, self.min_action, self.max_action))
target_next_state_values = tf.squeeze(self.critic_target(next_states, target_actions), 1)
target_next_state_values2 = tf.squeeze(self.critic_target2(next_states, target_actions), 1)
critic_value = tf.squeeze(self.critic_main(states, actions), 1)
critic_value2 = tf.squeeze(self.critic_main2(states, actions), 1)
next_state_target_value = tf.math.minimum(target_next_state_values, target_next_state_values2)
target_values = rewards + self.gamma * next_state_target_value * dones
critic_loss1 = tf.keras.losses.MSE(target_values, critic_value)
critic_loss2 = tf.keras.losses.MSE(target_values, critic_value2)
grads1 = tape1.gradient(critic_loss1, self.critic_main.trainable_variables)
grads2 = tape2.gradient(critic_loss2, self.critic_main2.trainable_variables)
self.c_opt1.apply_gradients(zip(grads1, self.critic_main.trainable_variables))
self.c_opt2.apply_gradients(zip(grads2, self.critic_main2.trainable_variables))
self.trainstep += 1
if self.trainstep % self.actor_update_steps == 0:
with tf.GradientTape() as tape3:
new_policy_actions = self.actor_main(states)
actor_loss = -self.critic_main(states, new_policy_actions)
actor_loss = tf.math.reduce_mean(actor_loss)
grads3 = tape3.gradient(actor_loss, self.actor_main.trainable_variables)
self.a_opt.apply_gradients(zip(grads3, self.actor_main.trainable_variables))
# if self.trainstep % self.replace == 0:
self.update_target()
with tf.device('GPU:0'):
tf.random.set_seed(336699)
agent = Agent(2)
episods = 20000
ep_reward = []
total_avgr = []
target = False
for s in range(episods):
if target == True:
break
total_reward = 0
state = env.reset()
done = False
while not done:
if s%10==0: env.render()
action = agent.act(state)
next_state, reward, done, _ = env.step(action)
agent.savexp(state, next_state, action, done, reward)
agent.train()
state = next_state
total_reward += reward
if done:
ep_reward.append(total_reward)
avg_reward = np.mean(ep_reward[-100:])
total_avgr.append(avg_reward)
print("total reward after {} steps is {} and avg reward is {}".format(s, total_reward, avg_reward))
if avg_reward == 200:
target = True
| 7,977 | Python | 37.917073 | 115 | 0.620409 |
teerameth/omni.isaac.fiborobotlab/omni/isaac/fiborobotlab/mooncake/examples/simple_CartPole_actor_critic_manual_loop.py | import collections
import gym
import numpy as np
import statistics
import tensorflow as tf
import tqdm
from matplotlib import pyplot as plt
from tensorflow.keras import layers
from typing import Any, List, Sequence, Tuple
# Create the environment
env = gym.make("CartPole-v1")
# Set seed for experiment reproducibility
seed = 42
env.seed(seed)
tf.random.set_seed(seed)
np.random.seed(seed)
# Small epsilon value for stabilizing division operations
eps = np.finfo(np.float32).eps.item()
class ActorCritic(tf.keras.Model):
"""Combined actor-critic network."""
def __init__(
self,
num_actions: int,
num_hidden_units: int):
"""Initialize."""
super().__init__()
self.common = layers.Dense(num_hidden_units, activation="relu")
# self.lstm = layers.LSTM(units=num_hidden_units, activation="relu", recurrent_activation="sigmoid", stateful=True)
self.actor = layers.Dense(num_actions)
self.critic = layers.Dense(1)
def call(self, inputs: tf.Tensor, states = None) -> Tuple[tf.Tensor, tf.Tensor]:
x = inputs
x = self.common(x)
# if states is None: states = self.lstm.get_initial_state(x)
# else: states = self.lstm.states
# x = self.lstm(inputs, initial_state=states)
return self.actor(x), self.critic(x)
num_actions = env.action_space.n # 2
num_hidden_units = 128
model = ActorCritic(num_actions, num_hidden_units)
# Wrap OpenAI Gym's `env.step` call as an operation in a TensorFlow function.
# This would allow it to be included in a callable TensorFlow graph.
def env_step(action: np.ndarray) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""Returns state, reward and done flag given an action."""
state, reward, done, _ = env.step(action)
return (state.astype(np.float32),
np.array(reward, np.int32),
np.array(done, np.int32))
def tf_env_step(action: tf.Tensor) -> List[tf.Tensor]:
return tf.numpy_function(env_step, [action], [tf.float32, tf.int32, tf.int32])
def run_episode(
initial_state: tf.Tensor,
model: tf.keras.Model,
max_steps: int) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor]:
"""Runs a single episode to collect training data."""
action_probs = tf.TensorArray(dtype=tf.float32, size=0, dynamic_size=True)
values = tf.TensorArray(dtype=tf.float32, size=0, dynamic_size=True)
rewards = tf.TensorArray(dtype=tf.int32, size=0, dynamic_size=True)
initial_state_shape = initial_state.shape
state = initial_state
for t in tf.range(max_steps):
# Convert state into a batched tensor (batch size = 1)
state = tf.expand_dims(state, 0)
# Run the model and to get action probabilities and critic value
action_logits_t, value = model(state)
print("ACTION_LOGITS_T")
print(action_logits_t)
print("S.D.")
print(value)
# Sample next action from the action probability distribution
action = tf.random.categorical(action_logits_t, 1)[0, 0]
print("ACTION")
print(action)
action_probs_t = tf.nn.softmax(action_logits_t)
# Store critic values
values = values.write(t, tf.squeeze(value))
# Store log probability of the action chosen
action_probs = action_probs.write(t, action_probs_t[0, action])
# Apply action to the environment to get next state and reward
state, reward, done = tf_env_step(action)
state.set_shape(initial_state_shape)
# Store reward
rewards = rewards.write(t, reward)
if tf.cast(done, tf.bool):
break
action_probs = action_probs.stack()
values = values.stack()
rewards = rewards.stack()
return action_probs, values, rewards
def get_expected_return(
rewards: tf.Tensor,
gamma: float,
standardize: bool = True) -> tf.Tensor:
"""Compute expected returns per timestep."""
n = tf.shape(rewards)[0]
returns = tf.TensorArray(dtype=tf.float32, size=n)
# Start from the end of `rewards` and accumulate reward sums
# into the `returns` array
rewards = tf.cast(rewards[::-1], dtype=tf.float32)
discounted_sum = tf.constant(0.0)
discounted_sum_shape = discounted_sum.shape
for i in tf.range(n):
reward = rewards[i]
discounted_sum = reward + gamma * discounted_sum
discounted_sum.set_shape(discounted_sum_shape)
returns = returns.write(i, discounted_sum)
returns = returns.stack()[::-1]
if standardize:
returns = ((returns - tf.math.reduce_mean(returns)) / (tf.math.reduce_std(returns) + eps))
return returns
huber_loss = tf.keras.losses.Huber(reduction=tf.keras.losses.Reduction.SUM)
def compute_loss(
action_probs: tf.Tensor,
values: tf.Tensor,
returns: tf.Tensor) -> tf.Tensor:
"""Computes the combined actor-critic loss."""
advantage = returns - values
action_log_probs = tf.math.log(action_probs)
actor_loss = -tf.math.reduce_sum(action_log_probs * advantage)
critic_loss = huber_loss(values, returns)
return actor_loss + critic_loss
optimizer = tf.keras.optimizers.Adam(learning_rate=0.01)
@tf.function
def train_step(
initial_state: tf.Tensor,
model: tf.keras.Model,
optimizer: tf.keras.optimizers.Optimizer,
gamma: float,
max_steps_per_episode: int) -> tf.Tensor:
"""Runs a model training step."""
with tf.GradientTape() as tape:
# Run the model for one episode to collect training data
action_probs, values, rewards = run_episode( initial_state, model, max_steps_per_episode)
# Calculate expected returns
returns = get_expected_return(rewards, gamma)
# Convert training data to appropriate TF tensor shapes
action_probs, values, returns = [tf.expand_dims(x, 1) for x in [action_probs, values, returns]]
# Calculating loss values to update our network
loss = compute_loss(action_probs, values, returns)
# Compute the gradients from the loss
grads = tape.gradient(loss, model.trainable_variables)
# Apply the gradients to the model's parameters
optimizer.apply_gradients(zip(grads, model.trainable_variables))
episode_reward = tf.math.reduce_sum(rewards)
return episode_reward
min_episodes_criterion = 100
max_episodes = 10000
max_steps_per_episode = 1000
# Cartpole-v0 is considered solved if average reward is >= 195 over 100
# consecutive trials
reward_threshold = 195
running_reward = 0
# Discount factor for future rewards
gamma = 0.99
# Keep last episodes reward
episodes_reward: collections.deque = collections.deque(maxlen=min_episodes_criterion)
with tqdm.trange(max_episodes) as t:
for i in t:
initial_state = tf.constant(env.reset(), dtype=tf.float32)
episode_reward = int(train_step(initial_state, model, optimizer, gamma, max_steps_per_episode))
episodes_reward.append(episode_reward)
running_reward = statistics.mean(episodes_reward)
t.set_description(f'Episode {i}')
t.set_postfix(
episode_reward=episode_reward, running_reward=running_reward)
# Show average episode reward every 10 episodes
if i % 10 == 0:
pass # print(f'Episode {i}: average reward: {avg_reward}')
if running_reward > reward_threshold and i >= min_episodes_criterion:
break
print(f'\nSolved at episode {i}: average reward: {running_reward:.2f}!')
| 7,501 | Python | 32.19469 | 123 | 0.665245 |
teerameth/omni.isaac.fiborobotlab/omni/isaac/fiborobotlab/mooncake/examples/simple_RNN_state_reuse.py | import random
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
batch_size = 16
batch_count = 10
patience = 5 # if no improvement in N steps -> stop training
timesteps = 9
def get_train_data():
x_batch_train, y_batch_train = [], []
for i in range(batch_size):
offset = random.random()
width = random.random()*3
sequence = np.cos(np.arange(offset, offset+width, width/(timesteps+1)))
x_batch_train.append(sequence[:timesteps])
y_batch_train.append((sequence[timesteps]+1)/2)
x_batch_train = np.array(x_batch_train).reshape((batch_size, timesteps, 1))
y_batch_train = np.array(y_batch_train).reshape((batch_size, 1))
return x_batch_train, y_batch_train
def get_val_data():
x_batch_val, y_batch_val = [], []
for i in range(batch_size):
offset = i/batch_size
width = (1+i)/batch_size*3
sequence = np.cos(np.arange(offset, offset+width, width/(timesteps+1)))
x_batch_val.append(sequence[:timesteps])
y_batch_val.append((sequence[timesteps]+1)/2)
x_batch_val = np.array(x_batch_val).reshape((batch_size, timesteps, 1))
y_batch_val = np.array(y_batch_val).reshape((batch_size, 1))
return x_batch_val, y_batch_val
def train_step(x, y, model, optimizer, loss_fn, train_acc_metric):
with tf.GradientTape() as tape:
logits = model(x, training=True)
loss_value = loss_fn(y, logits)
grads = tape.gradient(loss_value, model.trainable_weights)
optimizer.apply_gradients(zip(grads, model.trainable_weights))
train_acc_metric.update_state(y, logits) # update training matric
return loss_value
def test_step(x, y, model, loss_fn, val_acc_metric):
val_logits = model(x, training=False)
loss_value = loss_fn(y, val_logits)
val_acc_metric.update_state(y, val_logits)
return loss_value
class SimpleLSTM(keras.Model):
def __init__(self, num_input, embedding_dim, lstm_units, num_output):
super().__init__(self)
# self.embedding = layers.Embedding(num_input, embedding_dim)
self.lstm1 = layers.LSTM(lstm_units, return_sequences=True, return_state=True)
self.dense = layers.Dense(num_output)
def call(self, inputs, states=None, return_state = False, training=False):
x = inputs
# x = self.embedding(x, training=training)
if states is None: states = self.lstm1.get_initial_state(x) # state shape = (2, batch_size, lstm_units)
print(x.shape)
print(len(states))
print(states[0].shape)
x, sequence, states = self.lstm1(x, initial_state=states, training=training)
x = self.dense(x, training=training)
if return_state: return x, states
else: return x
model = SimpleLSTM(num_input=9, embedding_dim=32, lstm_units=64, num_output=3)
model.build(input_shape=(1, 9))
model.summary()
optimizer = tf.optimizers.Adam(learning_rate=0.0025)
loss_fn = keras.losses.MeanSquaredError() # Instantiate a loss function.
train_mse_metric = keras.metrics.MeanSquaredError()
val_mse_metric = keras.metrics.MeanSquaredError()
test_mse_metric = keras.metrics.MeanSquaredError()
val_loss_tracker = []
for epoch in range(1000):
print("\nStart of epoch %d" % (epoch,))
train_loss = []
val_loss = []
test_loss = []
# Iterate over the batches of the dataset
for step in range(batch_count):
x_batch_train, y_batch_train = get_train_data()
loss_value = train_step(x_batch_train,
y_batch_train,
model,
optimizer,
loss_fn,
train_mse_metric)
train_loss.append(float(loss_value))
# Run a validation loop at the end of each epoch
for step in range(batch_count):
x_batch_val, y_batch_val = get_val_data()
val_loss_value = test_step(x_batch_val, y_batch_val,
model, loss_fn,
val_mse_metric)
val_loss.append(float(val_loss_value))
val_loss_tracker.append(np.mean(val_loss))
# Display metrics at the end of each epoch
train_acc = train_mse_metric.result()
print("Training mse over epoch: %.4f" % (float(train_acc),))
val_acc = val_mse_metric.result()
print("Validation mse: %.4f" % (float(val_acc),))
test_acc = test_mse_metric.result()
# Reset metrics at the end of each epoch
train_mse_metric.reset_states()
val_mse_metric.reset_states()
if len(val_loss_tracker) > patience:
still_better = False
for i in range(patience):
if val_loss_tracker[len(val_loss_tracker) - patience + i] < min(
val_loss_tracker[:len(val_loss_tracker) - patience]): still_better = True
if still_better == False: break | 4,889 | Python | 40.794871 | 111 | 0.628554 |
teerameth/omni.isaac.fiborobotlab/omni/isaac/fiborobotlab/mooncake/examples/ppo_lstm_wandb.py | import numpy as np
import gym
import wandb
from wandb.integration.sb3 import WandbCallback
from sb3_contrib import RecurrentPPO
from stable_baselines3.common.evaluation import evaluate_policy
config = {
"policy_type": "MlpLstmPolicy",
"total_timesteps": 25000,
"env_name": "CartPole-v1",
}
run = wandb.init(
project="sb3",
config=config,
sync_tensorboard=True, # auto-upload sb3's tensorboard metrics
# monitor_gym=True, # auto-upload the videos of agents playing the game
# save_code=True, # optional
)
env = gym.make('CartPole-v1')
model = RecurrentPPO("MlpLstmPolicy", env, verbose=1, tensorboard_log=f"runs/{run.id}")
model.learn(
total_timesteps=config["total_timesteps"],
callback=WandbCallback(
gradient_save_freq=100,
model_save_path=f"models/{run.id}",
verbose=2,
),
)
run.finish()
# mean_reward, std_reward = evaluate_policy(model, env, n_eval_episodes=20, warn=False)
# print(mean_reward)
model.save("ppo_recurrent")
del model # remove to demonstrate saving and loading
model = RecurrentPPO.load("ppo_recurrent")
obs = env.reset()
# cell and hidden state of the LSTM
lstm_states = None
num_envs = 1
# Episode start signals are used to reset the lstm states
episode_starts = np.ones((num_envs,), dtype=bool)
while True:
action, lstm_states = model.predict(obs, state=lstm_states, episode_start=episode_starts, deterministic=True)
obs, rewards, dones, info = env.step(action)
episode_starts = dones
env.render()
if dones:
lstm_states = None
env.reset() | 1,577 | Python | 26.68421 | 113 | 0.700063 |
teerameth/omni.isaac.fiborobotlab/omni/isaac/fiborobotlab/mooncake/examples/simple_RNN.py | import random
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
batch_size = 16
batch_count = 10
patience = 5 # if no improvement in N steps -> stop training
timesteps = 9
input_dim = 1
# Build the RNN model
def build_model():
model = keras.models.Sequential(
[
keras.layers.LSTM(32, input_shape=(timesteps, input_dim)), # (time-steps, n_features)
# keras.layers.BatchNormalization(),
keras.layers.Dense(1),
]
)
return model
def get_train_data():
x_batch_train, y_batch_train = [], []
for i in range(batch_size):
offset = random.random()
width = random.random()*3
sequence = np.cos(np.arange(offset, offset+width, width/(timesteps+1)))
x_batch_train.append(sequence[:timesteps])
y_batch_train.append((sequence[timesteps]+1)/2)
x_batch_train = np.array(x_batch_train).reshape((batch_size, timesteps, 1))
y_batch_train = np.array(y_batch_train).reshape((batch_size, 1))
return x_batch_train, y_batch_train
def get_val_data():
x_batch_val, y_batch_val = [], []
for i in range(batch_size):
offset = i/batch_size
width = (1+i)/batch_size*3
sequence = np.cos(np.arange(offset, offset+width, width/(timesteps+1)))
x_batch_val.append(sequence[:timesteps])
y_batch_val.append((sequence[timesteps]+1)/2)
x_batch_val = np.array(x_batch_val).reshape((batch_size, timesteps, 1))
y_batch_val = np.array(y_batch_val).reshape((batch_size, 1))
return x_batch_val, y_batch_val
def train_step(x, y, model, optimizer, loss_fn, train_acc_metric):
with tf.GradientTape() as tape:
logits = model(x, training=True)
loss_value = loss_fn(y, logits)
grads = tape.gradient(loss_value, model.trainable_weights)
optimizer.apply_gradients(zip(grads, model.trainable_weights))
train_acc_metric.update_state(y, logits) # update training matric
return loss_value
def test_step(x, y, model, loss_fn, val_acc_metric):
val_logits = model(x, training=False)
loss_value = loss_fn(y, val_logits)
val_acc_metric.update_state(y, val_logits)
return loss_value
# mnist = keras.datasets.mnist
# (x_train, y_train), (x_test, y_test) = mnist.load_data()
# x_train, x_test = x_train / 255.0, x_test / 255.0
# sample, sample_label = x_train[0], y_train[0]
model = build_model()
print(model.summary())
optimizer = tf.optimizers.Adam(learning_rate=0.0025)
# Instantiate a loss function.
# loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True) # Computes the crossentropy loss between the labels and predictions. (use one-hot) produces a category index of the most likely matching category.
# loss_fn = keras.losses.CategoricalCrossentropy(from_logits=True) # Computes the crossentropy loss between the labels and predictions. (use one-hot) produces a one-hot array containing the probable match for each category.
loss_fn = keras.losses.MeanSquaredError() # Instantiate a loss function.
# Prepare the metrics.
# train_acc_metric = keras.metrics.CategoricalAccuracy()
# val_acc_metric = keras.metrics.CategoricalAccuracy()
# test_acc_metric = keras.metrics.CategoricalAccuracy()
train_acc_metric = keras.metrics.MeanSquaredError()
val_acc_metric = keras.metrics.MeanSquaredError()
test_acc_metric = keras.metrics.MeanSquaredError()
val_loss_tracker = []
for epoch in range(1000):
print("\nStart of epoch %d" % (epoch,))
train_loss = []
val_loss = []
test_loss = []
# Iterate over the batches of the dataset
for step in range(batch_count):
x_batch_train, y_batch_train = get_train_data()
loss_value = train_step(x_batch_train,
y_batch_train,
model,
optimizer,
loss_fn,
train_acc_metric)
train_loss.append(float(loss_value))
# Run a validation loop at the end of each epoch
for step in range(batch_count):
x_batch_val, y_batch_val = get_val_data()
val_loss_value = test_step(x_batch_val, y_batch_val,
model, loss_fn,
val_acc_metric)
val_loss.append(float(val_loss_value))
val_loss_tracker.append(np.mean(val_loss))
# Display metrics at the end of each epoch
train_acc = train_acc_metric.result()
print("Training mse over epoch: %.4f" % (float(train_acc),))
val_acc = val_acc_metric.result()
print("Validation mse: %.4f" % (float(val_acc),))
test_acc = test_acc_metric.result()
# Reset metrics at the end of each epoch
train_acc_metric.reset_states()
val_acc_metric.reset_states()
if len(val_loss_tracker) > patience:
still_better = False
for i in range(patience):
if val_loss_tracker[len(val_loss_tracker) - patience + i] < min(
val_loss_tracker[:len(val_loss_tracker) - patience]): still_better = True
if still_better == False: break
# model.fit(
# x_train, y_train, validation_data=(x_test, y_test), batch_size=batch_size, epochs=10
# ) | 5,220 | Python | 40.768 | 224 | 0.643295 |
teerameth/omni.isaac.fiborobotlab/omni/isaac/fiborobotlab/mooncake/examples/wandb_RNN.py | import wandb
from wandb.keras import WandbCallback
wandb.login()
import random
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
# batch_size = 16
train_batch_count = 20 # number of training batch(s) per epoch
val_batch_count = 10 # number of validation batch(s) per epoch
patience = 10 # if no improvement in N steps -> stop training
# timesteps = 9
input_dim = 1
sweep_config = {
'method': 'random', # bayes
'metric': {
'name': 'val_loss',
'goal': 'minimize'
},
'early_terminate': {
'type': 'hyperband',
'min_iter': 10
},
'parameters': {
'timesteps': {
"min": 1,
"max": 20
},
'batch_size': {
'values': [8, 16, 32, 64]
},
'learning_rate':{
"min": 0.0001,
"max": 0.1
},
'n_lstm': {
"min": 1,
"max": 64
}
}
}
# Build the RNN model
def build_model(n_lstm, timesteps):
model = keras.models.Sequential(
[
keras.layers.LSTM(n_lstm, input_shape=(timesteps, input_dim)), # (time-steps, n_features)
# keras.layers.BatchNormalization(),
keras.layers.Dense(1),
]
)
return model
def get_train_data(batch_size, timesteps):
x_batch_train, y_batch_train = [], []
for i in range(batch_size):
offset = random.random()
width = random.random()*3
sequence = np.cos(np.arange(offset, offset+width, width/(timesteps+1)))
x_batch_train.append(sequence[:timesteps])
y_batch_train.append((sequence[timesteps]+1)/2)
x_batch_train = np.array(x_batch_train).reshape((batch_size, timesteps, 1))
y_batch_train = np.array(y_batch_train).reshape((batch_size, 1))
return x_batch_train, y_batch_train
def get_val_data(batch_size, timesteps):
x_batch_val, y_batch_val = [], []
for i in range(batch_size):
offset = i/batch_size
width = (1+i)/batch_size*3
sequence = np.cos(np.arange(offset, offset+width, width/(timesteps+1)))
x_batch_val.append(sequence[:timesteps])
y_batch_val.append((sequence[timesteps]+1)/2)
x_batch_val = np.array(x_batch_val).reshape((batch_size, timesteps, 1))
y_batch_val = np.array(y_batch_val).reshape((batch_size, 1))
return x_batch_val, y_batch_val
def train_step(x, y, model, optimizer, loss_fn, train_acc_metric):
with tf.GradientTape() as tape:
logits = model(x, training=True)
loss_value = loss_fn(y, logits)
grads = tape.gradient(loss_value, model.trainable_weights)
optimizer.apply_gradients(zip(grads, model.trainable_weights))
train_acc_metric.update_state(y, logits) # update training matric
return loss_value
def test_step(x, y, model, loss_fn, val_acc_metric):
val_logits = model(x, training=False)
loss_value = loss_fn(y, val_logits)
val_acc_metric.update_state(y, val_logits)
return loss_value
def train(model,
optimizer,
loss_fn,
train_mse_metric,
val_mse_metric,
batch_size,
timesteps):
val_loss_tracker = []
for epoch in range(1000):
print("\nStart of epoch %d" % (epoch,))
train_loss = []
val_loss = []
# Iterate over the batches of the dataset
for step in range(train_batch_count):
x_batch_train, y_batch_train = get_train_data(batch_size=batch_size, timesteps=timesteps)
loss_value = train_step(x_batch_train,
y_batch_train,
model,
optimizer,
loss_fn,
train_mse_metric)
train_loss.append(float(loss_value))
# Run a validation loop at the end of each epoch
for step in range(val_batch_count):
x_batch_val, y_batch_val = get_val_data(batch_size=batch_size, timesteps=timesteps)
val_loss_value = test_step(x_batch_val,
y_batch_val,
model,
loss_fn,
val_mse_metric)
val_loss.append(float(val_loss_value))
val_loss_tracker.append(np.mean(val_loss)) # track validation loss for no improvment elimination manually
# Display metrics at the end of each epoch
train_mse = train_mse_metric.result()
print("Training mse over epoch: %.4f" % (float(train_mse),))
val_mse= val_mse_metric.result()
print("Validation mse: %.4f" % (float(val_mse),))
# Reset metrics at the end of each epoch
train_mse_metric.reset_states()
val_mse_metric.reset_states()
# 3️⃣ log metrics using wandb.log
wandb.log({'epochs': epoch,
'loss': np.mean(train_loss),
'mse': float(train_mse),
'val_loss': np.mean(val_loss),
'val_mse': float(val_mse)})
if len(val_loss_tracker) > patience:
still_better = False
for i in range(patience):
if val_loss_tracker[len(val_loss_tracker) - patience + i] < min(val_loss_tracker[:len(val_loss_tracker) - patience]): still_better = True
if still_better == False: break
def sweep_train():
config_defaults = { # default hyperparameters
'batch_size': 8,
'learning_rate': 0.01
}
# Initialize wandb with a sample project name
wandb.init(config=config_defaults) # this gets over-written in the Sweep
wandb.config.architecture_name = "RNN"
wandb.config.dataset_name = "cosine_test"
model = build_model(n_lstm=wandb.config.n_lstm, timesteps=wandb.config.timesteps)
print(model.summary())
optimizer = tf.optimizers.Adam(learning_rate=wandb.config.learning_rate)
# Instantiate a loss function.
# loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True) # Computes the crossentropy loss between the labels and predictions. (use one-hot) produces a category index of the most likely matching category.
# loss_fn = keras.losses.CategoricalCrossentropy(from_logits=True) # Computes the crossentropy loss between the labels and predictions. (use one-hot) produces a one-hot array containing the probable match for each category.
loss_fn = keras.losses.MeanSquaredError() # Instantiate a loss function.
# Prepare the metrics.
# train_acc_metric = keras.metrics.CategoricalAccuracy()
# val_acc_metric = keras.metrics.CategoricalAccuracy()
train_mse_metric = keras.metrics.MeanSquaredError()
val_mse_metric = keras.metrics.MeanSquaredError()
train(model, optimizer, loss_fn, train_mse_metric, val_mse_metric, batch_size=wandb.config.batch_size, timesteps=wandb.config.timesteps)
sweep_id = wandb.sweep(sweep_config, project="cosine_RNN_test")
wandb.agent(sweep_id, function=sweep_train, count=20000)
| 7,089 | Python | 38.83146 | 228 | 0.598533 |
teerameth/omni.isaac.fiborobotlab/omni/isaac/fiborobotlab/mooncake/examples/simple_CartPole_actor_critic.py | import collections
import gym
import numpy as np
import statistics
import tensorflow as tf
import tqdm
from matplotlib import pyplot as plt
from tensorflow.keras import layers
from typing import Any, List, Sequence, Tuple
# Create the environment
env = gym.make("CartPole-v1")
# Set seed for experiment reproducibility
seed = 42
env.seed(seed)
tf.random.set_seed(seed)
np.random.seed(seed)
# Small epsilon value for stabilizing division operations
eps = np.finfo(np.float32).eps.item()
class ActorCritic(tf.keras.Model):
"""Combined actor-critic network."""
def __init__(
self,
num_actions: int,
num_hidden_units: int):
"""Initialize."""
super().__init__()
self.common = layers.Dense(num_hidden_units, activation="relu")
self.actor = layers.Dense(num_actions)
self.critic = layers.Dense(1)
def call(self, inputs: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor]:
x = self.common(inputs)
return self.actor(x), self.critic(x)
num_actions = env.action_space.n # 2
num_hidden_units = 128
model = ActorCritic(num_actions, num_hidden_units)
# Wrap OpenAI Gym's `env.step` call as an operation in a TensorFlow function.
# This would allow it to be included in a callable TensorFlow graph.
def env_step(action: np.ndarray) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""Returns state, reward and done flag given an action."""
state, reward, done, _ = env.step(action)
return (state.astype(np.float32),
np.array(reward, np.int32),
np.array(done, np.int32))
def tf_env_step(action: tf.Tensor) -> List[tf.Tensor]:
return tf.numpy_function(env_step, [action], [tf.float32, tf.int32, tf.int32])
def run_episode(
initial_state: tf.Tensor,
model: tf.keras.Model,
max_steps: int) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor]:
"""Runs a single episode to collect training data."""
action_probs = tf.TensorArray(dtype=tf.float32, size=0, dynamic_size=True)
values = tf.TensorArray(dtype=tf.float32, size=0, dynamic_size=True)
rewards = tf.TensorArray(dtype=tf.int32, size=0, dynamic_size=True)
initial_state_shape = initial_state.shape
state = initial_state
for t in tf.range(max_steps):
# Convert state into a batched tensor (batch size = 1)
state = tf.expand_dims(state, 0)
# Run the model and to get action probabilities and critic value
action_logits_t, value = model(state)
# Sample next action from the action probability distribution
action = tf.random.categorical(action_logits_t, 1)[0, 0]
action_probs_t = tf.nn.softmax(action_logits_t)
# Store critic values
values = values.write(t, tf.squeeze(value))
# Store log probability of the action chosen
action_probs = action_probs.write(t, action_probs_t[0, action])
# Apply action to the environment to get next state and reward
state, reward, done = tf_env_step(action)
state.set_shape(initial_state_shape)
# Store reward
rewards = rewards.write(t, reward)
if tf.cast(done, tf.bool):
break
action_probs = action_probs.stack()
values = values.stack()
rewards = rewards.stack()
return action_probs, values, rewards
def get_expected_return(
rewards: tf.Tensor,
gamma: float,
standardize: bool = True) -> tf.Tensor:
"""Compute expected returns per timestep."""
n = tf.shape(rewards)[0]
returns = tf.TensorArray(dtype=tf.float32, size=n)
# Start from the end of `rewards` and accumulate reward sums
# into the `returns` array
rewards = tf.cast(rewards[::-1], dtype=tf.float32)
discounted_sum = tf.constant(0.0)
discounted_sum_shape = discounted_sum.shape
for i in tf.range(n):
reward = rewards[i]
discounted_sum = reward + gamma * discounted_sum
discounted_sum.set_shape(discounted_sum_shape)
returns = returns.write(i, discounted_sum)
returns = returns.stack()[::-1]
if standardize:
returns = ((returns - tf.math.reduce_mean(returns)) / (tf.math.reduce_std(returns) + eps))
return returns
huber_loss = tf.keras.losses.Huber(reduction=tf.keras.losses.Reduction.SUM)
def compute_loss(
action_probs: tf.Tensor,
values: tf.Tensor,
returns: tf.Tensor) -> tf.Tensor:
"""Computes the combined actor-critic loss."""
advantage = returns - values
action_log_probs = tf.math.log(action_probs)
actor_loss = -tf.math.reduce_sum(action_log_probs * advantage)
critic_loss = huber_loss(values, returns)
return actor_loss + critic_loss
optimizer = tf.keras.optimizers.Adam(learning_rate=0.01)
@tf.function
def train_step(
initial_state: tf.Tensor,
model: tf.keras.Model,
optimizer: tf.keras.optimizers.Optimizer,
gamma: float,
max_steps_per_episode: int) -> tf.Tensor:
"""Runs a model training step."""
with tf.GradientTape() as tape:
# Run the model for one episode to collect training data
action_probs, values, rewards = run_episode( initial_state, model, max_steps_per_episode)
# Calculate expected returns
returns = get_expected_return(rewards, gamma)
# Convert training data to appropriate TF tensor shapes
action_probs, values, returns = [tf.expand_dims(x, 1) for x in [action_probs, values, returns]]
# Calculating loss values to update our network
loss = compute_loss(action_probs, values, returns)
# Compute the gradients from the loss
grads = tape.gradient(loss, model.trainable_variables)
# Apply the gradients to the model's parameters
optimizer.apply_gradients(zip(grads, model.trainable_variables))
episode_reward = tf.math.reduce_sum(rewards)
return episode_reward
min_episodes_criterion = 100
max_episodes = 10000
max_steps_per_episode = 1000
# Cartpole-v0 is considered solved if average reward is >= 195 over 100
# consecutive trials
reward_threshold = 195
running_reward = 0
# Discount factor for future rewards
gamma = 0.99
# Keep last episodes reward
episodes_reward: collections.deque = collections.deque(maxlen=min_episodes_criterion)
with tqdm.trange(max_episodes) as t:
for i in t:
initial_state = tf.constant(env.reset(), dtype=tf.float32)
episode_reward = int(train_step(
initial_state, model, optimizer, gamma, max_steps_per_episode))
episodes_reward.append(episode_reward)
running_reward = statistics.mean(episodes_reward)
t.set_description(f'Episode {i}')
t.set_postfix(
episode_reward=episode_reward, running_reward=running_reward)
# Show average episode reward every 10 episodes
if i % 10 == 0:
pass # print(f'Episode {i}: average reward: {avg_reward}')
if running_reward > reward_threshold and i >= min_episodes_criterion:
break
print(f'\nSolved at episode {i}: average reward: {running_reward:.2f}!')
| 7,043 | Python | 31.611111 | 103 | 0.669175 |
teerameth/omni.isaac.fiborobotlab/omni/isaac/fiborobotlab/mooncake/examples/imu/mpu9250_i2c.py | import smbus
bus = smbus.SMBus(1)
# this is to be saved in the local folder under the name "mpu9250_i2c.py"
# it will be used as the I2C controller and function harbor for the project
# refer to datasheet and register map for full explanation
import smbus,time
def MPU6050_start():
# alter sample rate (stability)
samp_rate_div = 0 # sample rate = 8 kHz/(1+samp_rate_div)
bus.write_byte_data(MPU6050_ADDR, SMPLRT_DIV, samp_rate_div)
time.sleep(0.1)
# reset all sensors
bus.write_byte_data(MPU6050_ADDR,PWR_MGMT_1,0x00)
time.sleep(0.1)
# power management and crystal settings
bus.write_byte_data(MPU6050_ADDR, PWR_MGMT_1, 0x01)
time.sleep(0.1)
#Write to Configuration register
bus.write_byte_data(MPU6050_ADDR, CONFIG, 0)
time.sleep(0.1)
#Write to Gyro configuration register
gyro_config_sel = [0b00000,0b010000,0b10000,0b11000] # byte registers
gyro_config_vals = [250.0,500.0,1000.0,2000.0] # degrees/sec
gyro_indx = 0
bus.write_byte_data(MPU6050_ADDR, GYRO_CONFIG, int(gyro_config_sel[gyro_indx]))
time.sleep(0.1)
#Write to Accel configuration register
accel_config_sel = [0b00000,0b01000,0b10000,0b11000] # byte registers
accel_config_vals = [2.0,4.0,8.0,16.0] # g (g = 9.81 m/s^2)
accel_indx = 0
bus.write_byte_data(MPU6050_ADDR, ACCEL_CONFIG, int(accel_config_sel[accel_indx]))
time.sleep(0.1)
# interrupt register (related to overflow of data [FIFO])
bus.write_byte_data(MPU6050_ADDR, INT_ENABLE, 1)
time.sleep(0.1)
return gyro_config_vals[gyro_indx],accel_config_vals[accel_indx]
def read_raw_bits(register):
# read accel and gyro values
high = bus.read_byte_data(MPU6050_ADDR, register)
low = bus.read_byte_data(MPU6050_ADDR, register+1)
# combine higha and low for unsigned bit value
value = ((high << 8) | low)
# convert to +- value
if(value > 32768):
value -= 65536
return value
def mpu6050_conv():
# raw acceleration bits
acc_x = read_raw_bits(ACCEL_XOUT_H)
acc_y = read_raw_bits(ACCEL_YOUT_H)
acc_z = read_raw_bits(ACCEL_ZOUT_H)
# raw temp bits
## t_val = read_raw_bits(TEMP_OUT_H) # uncomment to read temp
# raw gyroscope bits
gyro_x = read_raw_bits(GYRO_XOUT_H)
gyro_y = read_raw_bits(GYRO_YOUT_H)
gyro_z = read_raw_bits(GYRO_ZOUT_H)
#convert to acceleration in g and gyro dps
a_x = (acc_x/(2.0**15.0))*accel_sens
a_y = (acc_y/(2.0**15.0))*accel_sens
a_z = (acc_z/(2.0**15.0))*accel_sens
w_x = (gyro_x/(2.0**15.0))*gyro_sens
w_y = (gyro_y/(2.0**15.0))*gyro_sens
w_z = (gyro_z/(2.0**15.0))*gyro_sens
## temp = ((t_val)/333.87)+21.0 # uncomment and add below in return
return a_x,a_y,a_z,w_x,w_y,w_z
def AK8963_start():
bus.write_byte_data(AK8963_ADDR,AK8963_CNTL,0x00)
time.sleep(0.1)
AK8963_bit_res = 0b0001 # 0b0001 = 16-bit
AK8963_samp_rate = 0b0110 # 0b0010 = 8 Hz, 0b0110 = 100 Hz
AK8963_mode = (AK8963_bit_res <<4)+AK8963_samp_rate # bit conversion
bus.write_byte_data(AK8963_ADDR,AK8963_CNTL,AK8963_mode)
time.sleep(0.1)
def AK8963_reader(register):
# read magnetometer values
low = bus.read_byte_data(AK8963_ADDR, register-1)
high = bus.read_byte_data(AK8963_ADDR, register)
# combine higha and low for unsigned bit value
value = ((high << 8) | low)
# convert to +- value
if(value > 32768):
value -= 65536
return value
def AK8963_conv():
# raw magnetometer bits
loop_count = 0
while 1:
mag_x = AK8963_reader(HXH)
mag_y = AK8963_reader(HYH)
mag_z = AK8963_reader(HZH)
# the next line is needed for AK8963
if bin(bus.read_byte_data(AK8963_ADDR,AK8963_ST2))=='0b10000':
break
loop_count+=1
#convert to acceleration in g and gyro dps
m_x = (mag_x/(2.0**15.0))*mag_sens
m_y = (mag_y/(2.0**15.0))*mag_sens
m_z = (mag_z/(2.0**15.0))*mag_sens
return m_x,m_y,m_z
# MPU6050 Registers
MPU6050_ADDR = 0x68
PWR_MGMT_1 = 0x6B
SMPLRT_DIV = 0x19
CONFIG = 0x1A
GYRO_CONFIG = 0x1B
ACCEL_CONFIG = 0x1C
INT_ENABLE = 0x38
ACCEL_XOUT_H = 0x3B
ACCEL_YOUT_H = 0x3D
ACCEL_ZOUT_H = 0x3F
TEMP_OUT_H = 0x41
GYRO_XOUT_H = 0x43
GYRO_YOUT_H = 0x45
GYRO_ZOUT_H = 0x47
#AK8963 registers
AK8963_ADDR = 0x0C
AK8963_ST1 = 0x02
HXH = 0x04
HYH = 0x06
HZH = 0x08
AK8963_ST2 = 0x09
AK8963_CNTL = 0x0A
mag_sens = 4900.0 # magnetometer sensitivity: 4800 uT
# start I2C driver
bus = smbus.SMBus(1) # start comm with i2c bus
gyro_sens,accel_sens = MPU6050_start() # instantiate gyro/accel
# AK8963_start() # instantiate magnetometer
| 4,734 | Python | 30.151316 | 86 | 0.637093 |
teerameth/omni.isaac.fiborobotlab/omni/isaac/fiborobotlab/mooncake/examples/imu/test_sensor_visualizer.py | ###############################################
# MPU6050 9-DoF Example Printout
from mpu9250_i2c import *
time.sleep(1) # delay necessary to allow mpu9250 to settle
print('recording data')
# while 1:
# try:
# ax,ay,az,wx,wy,wz = mpu6050_conv() # read and convert mpu6050 data
# # mx,my,mz = AK8963_conv() # read and convert AK8963 magnetometer data
# except:
# continue
# print('{}'.format('-'*30))
# print('accel [g]: x = {0:2.2f}, y = {1:2.2f}, z {2:2.2f}= '.format(ax,ay,az))
# print('gyro [dps]: x = {0:2.2f}, y = {1:2.2f}, z = {2:2.2f}'.format(wx,wy,wz))
# # print('mag [uT]: x = {0:2.2f}, y = {1:2.2f}, z = {2:2.2f}'.format(mx,my,mz))
# # print('{}'.format('-'*30))
# time.sleep(0.01)
#!/usr/bin/python
"""
Update a simple plot as rapidly as possible to measure speed.
"""
import argparse
from collections import deque
from time import perf_counter
import numpy as np
import pyqtgraph as pg
import pyqtgraph.functions as fn
import pyqtgraph.parametertree as ptree
from pyqtgraph.Qt import QtCore, QtGui, QtWidgets
nsamples = 50
ax_list =[0]*nsamples
ay_list =[0]*nsamples
az_list =[0]*nsamples
gx_list =[0]*nsamples
gy_list =[0]*nsamples
gz_list =[0]*nsamples
readrate = 100
class MonkeyCurveItem(pg.PlotCurveItem):
def __init__(self, *args, **kwds):
super().__init__(*args, **kwds)
self.monkey_mode = ''
def setMethod(self, param, value):
self.monkey_mode = value
def paint(self, painter, opt, widget):
if self.monkey_mode not in ['drawPolyline']:
return super().paint(painter, opt, widget)
painter.setRenderHint(painter.RenderHint.Antialiasing, self.opts['antialias'])
painter.setPen(pg.mkPen(self.opts['pen']))
if self.monkey_mode == 'drawPolyline':
painter.drawPolyline(fn.arrayToQPolygonF(self.xData, self.yData))
app = pg.mkQApp("Plot Speed Test")
default_pen = pg.mkPen()
pw = pg.PlotWidget()
pw.setRange(QtCore.QRectF(0, -5, nsamples, 10))
splitter = QtWidgets.QSplitter()
splitter.addWidget(pw)
splitter.show()
pw.setWindowTitle('pyqtgraph example: PlotSpeedTest')
pw.setLabel('bottom', 'Index', units='B')
curve = MonkeyCurveItem(pen=default_pen, brush='b')
pw.addItem(curve)
rollingAverageSize = 1000
elapsed = deque(maxlen=rollingAverageSize)
def resetTimings(*args):
elapsed.clear()
# def makeData(*args):
# global data, connect_array, ptr
# # sigopts = params.child('sigopts')
# if sigopts['noise']:
# data += np.random.normal(size=data.shape)
# connect_array = np.ones(data.shape[-1], dtype=bool)
# ptr = 0
# pw.setRange(QtCore.QRectF(0, -10, nsamples, 20))
def onUseOpenGLChanged(param, enable):
pw.useOpenGL(enable)
def onEnableExperimentalChanged(param, enable):
pg.setConfigOption('enableExperimental', enable)
def onPenChanged(param, pen):
curve.setPen(pen)
def onFillChanged(param, enable):
curve.setFillLevel(0.0 if enable else None)
# params.child('sigopts').sigTreeStateChanged.connect(makeData)
# params.child('useOpenGL').sigValueChanged.connect(onUseOpenGLChanged)
# params.child('enableExperimental').sigValueChanged.connect(onEnableExperimentalChanged)
# params.child('pen').sigValueChanged.connect(onPenChanged)
# params.child('fill').sigValueChanged.connect(onFillChanged)
# params.child('plotMethod').sigValueChanged.connect(curve.setMethod)
# params.sigTreeStateChanged.connect(resetTimings)
# makeData()
fpsLastUpdate = perf_counter()
def update():
global curve, data, ptr, elapsed, fpsLastUpdate, readrate
global ax_list,ay_list,az_list,gx_list,gy_list,gz_list
t_start = perf_counter()
ax,ay,az,gx,gy,gz = mpu6050_conv() # read and convert mpu6050 data
ax_list.append(ax)
ay_list.append(ay)
az_list.append(az)
gx_list.append(gx)
gy_list.append(gy)
gz_list.append(gz)
ax_list = ax_list[1:]
ay_list = ay_list[1:]
az_list = az_list[1:]
gx_list = gx_list[1:]
gy_list = gy_list[1:]
gz_list = gz_list[1:]
# Measure
curve.setData(ax_list)
app.processEvents(QtCore.QEventLoop.ProcessEventsFlag.AllEvents)
t_end = perf_counter()
time.sleep((1/readrate)-(t_end-t_start)) # desire - currentfps
timer = QtCore.QTimer()
timer.timeout.connect(update)
timer.start(0)
if __name__ == '__main__':
pg.exec()
| 4,362 | Python | 28.281879 | 89 | 0.663916 |
teerameth/omni.isaac.fiborobotlab/omni/isaac/fiborobotlab/mooncake/cfg/config.yaml |
# Task name - used to pick the class to load
task_name: ${task.name}
# experiment name. defaults to name of training config
experiment: ''
# if set to positive integer, overrides the default number of environments
num_envs: ''
# seed - set to -1 to choose random seed
seed: 42
# set to True for deterministic performance
torch_deterministic: False
# set the maximum number of learning iterations to train for. overrides default per-environment setting
max_iterations: ''
## Device config
physics_engine: 'physx'
# whether to use cpu or gpu pipeline
pipeline: 'gpu'
# whether to use cpu or gpu physx
sim_device: 'gpu'
# used for gpu pipeline only - device id for running sim and task
device_id: 0
# device to run RL
rl_device: 'cuda:0'
## PhysX arguments
num_threads: 8 # Number of worker threads per scene used by PhysX - for CPU PhysX only.
solver_type: 1 # 0: pgs, 1: tgs
# RLGames Arguments
# test - if set, run policy in inference mode (requires setting checkpoint to load)
test: False
# used to set checkpoint path
checkpoint: ''
# disables rendering
headless: False
# set default task and default training config based on task
defaults:
- task: Mooncake
- train: ${task}PPO
- hydra/job_logging: disabled
# set the directory where the output files get saved
hydra:
output_subdir: null
run:
dir: .
| 1,327 | YAML | 23.592592 | 103 | 0.736247 |
teerameth/omni.isaac.fiborobotlab/omni/isaac/fiborobotlab/mooncake/cfg/task/Mooncake.yaml | # used to create the object
name: Mooncake
physics_engine: ${..physics_engine}
# if given, will override the device setting in gym.
env:
numEnvs: ${resolve_default:32,${...num_envs}}
envSpacing: 1.0
resetDist: 3.0
maxEffort: 100
maxWheelVelocity: 0.3
clipObservations: 5.0
clipActions: 1.0
controlFrequencyInv: 2 # 60 Hz
# reward parameters
headingWeight: 0.5
upWeight: 0.1
# cost parameters
actionsCost: 0.01
energyCost: 0.05
dofVelocityScale: 0.1
angularVelocityScale: 0.25
contactForceScale: 0.01
jointsAtLimitCost: 0.25
deathCost: -1.0
terminationHeight: 0.25
alive_reward_scale: 2.0
sim:
dt: 0.01 # 1/100 s
use_gpu_pipeline: ${eq:${...pipeline},"gpu"}
gravity: [0.0, 0.0, -9.81]
add_ground_plane: True
add_distant_light: True
use_flatcache: True
enable_scene_query_support: False
default_physics_material:
static_friction: 1.0
dynamic_friction: 1.0
restitution: 0.0
physx:
worker_thread_count: ${....num_threads}
solver_type: ${....solver_type}
use_gpu: ${eq:${....sim_device},"gpu"} # set to False to run on CPU
solver_position_iteration_count: 4
solver_velocity_iteration_count: 0
contact_offset: 0.02
rest_offset: 0.001
bounce_threshold_velocity: 0.2
friction_offset_threshold: 0.04
friction_correlation_distance: 0.025
enable_sleeping: True
enable_stabilization: True
max_depenetration_velocity: 100.0
# GPU buffers
gpu_max_rigid_contact_count: 524288
gpu_max_rigid_patch_count: 33554432
gpu_found_lost_pairs_capacity: 8192
gpu_found_lost_aggregate_pairs_capacity: 262144
gpu_total_aggregate_pairs_capacity: 1048576
gpu_max_soft_body_contacts: 1048576
gpu_max_particle_contacts: 1048576
gpu_heap_capacity: 33554432
gpu_temp_buffer_capacity: 16777216
gpu_max_num_partitions: 8
Mooncake:
# -1 to use default values
override_usd_defaults: False
fixed_base: False
enable_self_collisions: False
enable_gyroscopic_forces: True
# also in stage params
# per-actor
solver_position_iteration_count: 4
solver_velocity_iteration_count: 0
sleep_threshold: 0.005
stabilization_threshold: 0.001
# per-body
density: -1
max_depenetration_velocity: 100.0
# per-shape
contact_offset: 0.02
rest_offset: 0.001 | 2,346 | YAML | 24.791209 | 71 | 0.68798 |
teerameth/omni.isaac.fiborobotlab/omni/isaac/fiborobotlab/mooncake/cfg/train/MooncakePPO.yaml | params:
seed: ${...seed}
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [64, 64]
activation: elu
initializer:
name: default
regularizer:
name: None
rnn:
name: 'lstm'
units: 64
layers: 2
before_mlp: False
concat_input: True
layer_norm: True
load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint
load_path: ${...checkpoint} # path to the checkpoint to load
config:
name: ${resolve_default:Mooncake,${....experiment}}
full_experiment_name: ${.name}
device: ${....rl_device}
device_name: ${....rl_device}
env_name: rlgpu
ppo: True
mixed_precision: False
normalize_input: True
normalize_value: True
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 0.1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 3e-4
lr_schedule: adaptive
kl_threshold: 0.008
score_to_win: 20000
max_epochs: ${resolve_default:10000,${....max_iterations}}
save_best_after: 50
save_frequency: 25
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: True
e_clip: 0.2
horizon_length: 16
minibatch_size: 256
mini_epochs: 8
critic_coef: 4
clip_value: True
seq_len: 4
bounds_loss_coef: 0.0001 | 1,681 | YAML | 21.131579 | 101 | 0.587745 |
teerameth/omni.isaac.fiborobotlab/omni/isaac/fiborobotlab/mooncake/scripts/common.py | # Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import os
import carb.tokens
import omni
from pxr import UsdGeom, PhysxSchema, UsdPhysics
def set_drive_parameters(drive, target_type, target_value, stiffness=None, damping=None, max_force=None):
"""Enable velocity drive for a given joint"""
if target_type == "position":
if not drive.GetTargetPositionAttr():
drive.CreateTargetPositionAttr(target_value)
else:
drive.GetTargetPositionAttr().Set(target_value)
elif target_type == "velocity":
if not drive.GetTargetVelocityAttr():
drive.CreateTargetVelocityAttr(target_value)
else:
drive.GetTargetVelocityAttr().Set(target_value)
if stiffness is not None:
if not drive.GetStiffnessAttr():
drive.CreateStiffnessAttr(stiffness)
else:
drive.GetStiffnessAttr().Set(stiffness)
if damping is not None:
if not drive.GetDampingAttr():
drive.CreateDampingAttr(damping)
else:
drive.GetDampingAttr().Set(damping)
if max_force is not None:
if not drive.GetMaxForceAttr():
drive.CreateMaxForceAttr(max_force)
else:
drive.GetMaxForceAttr().Set(max_force)
| 1,653 | Python | 34.191489 | 105 | 0.69147 |
teerameth/omni.isaac.fiborobotlab/omni/isaac/fiborobotlab/mooncake/scripts/sim_config.py | from omniisaacgymenvs.utils.config_utils.default_scene_params import *
import copy
import omni.usd
class SimConfig():
def __init__(self, config: dict = None):
if config is None:
config = dict()
self._config = config
self._cfg = config.get("task", dict())
self._parse_config()
if self._config["test"] == True:
self._sim_params["enable_scene_query_support"] = True
def _parse_config(self):
# general sim parameter
self._sim_params = copy.deepcopy(default_sim_params)
self._default_physics_material = copy.deepcopy(default_physics_material)
sim_cfg = self._cfg.get("sim", None)
if sim_cfg is not None:
for opt in sim_cfg.keys():
if opt in self._sim_params:
if opt == "default_physics_material":
for material_opt in sim_cfg[opt]:
self._default_physics_material[material_opt] = sim_cfg[opt][material_opt]
else:
self._sim_params[opt] = sim_cfg[opt]
else:
print("Sim params does not have attribute: ", opt)
self._sim_params["default_physics_material"] = self._default_physics_material
# physx parameters
self._physx_params = copy.deepcopy(default_physx_params)
if sim_cfg is not None and "physx" in sim_cfg:
for opt in sim_cfg["physx"].keys():
if opt in self._physx_params:
self._physx_params[opt] = sim_cfg["physx"][opt]
else:
print("Physx sim params does not have attribute: ", opt)
self._sanitize_device()
def _sanitize_device(self):
if self._sim_params["use_gpu_pipeline"]:
self._physx_params["use_gpu"] = True
# device should be in sync with pipeline
if self._sim_params["use_gpu_pipeline"]:
self._config["sim_device"] = f"cuda:{self._config['device_id']}"
else:
self._config["sim_device"] = "cpu"
# also write to physics params for setting sim device
self._physx_params["sim_device"] = self._config["sim_device"]
print("Pipeline: ", "GPU" if self._sim_params["use_gpu_pipeline"] else "CPU")
print("Pipeline Device: ", self._config["sim_device"])
print("Sim Device: ", "GPU" if self._physx_params["use_gpu"] else "CPU")
def parse_actor_config(self, actor_name):
actor_params = copy.deepcopy(default_actor_options)
if "sim" in self._cfg and actor_name in self._cfg["sim"]:
actor_cfg = self._cfg["sim"][actor_name]
for opt in actor_cfg.keys():
if actor_cfg[opt] != -1 and opt in actor_params:
actor_params[opt] = actor_cfg[opt]
elif opt not in actor_params:
print("Actor params does not have attribute: ", opt)
return actor_params
def _get_actor_config_value(self, actor_name, attribute_name, attribute=None):
actor_params = self.parse_actor_config(actor_name)
if attribute is not None:
if attribute_name not in actor_params:
return attribute.Get()
if actor_params[attribute_name] != -1:
return actor_params[attribute_name]
elif actor_params["override_usd_defaults"] and not attribute.IsAuthored():
return self._physx_params[attribute_name]
else:
if actor_params[attribute_name] != -1:
return actor_params[attribute_name]
@property
def sim_params(self):
return self._sim_params
@property
def config(self):
return self._config
@property
def task_config(self):
return self._cfg
@property
def physx_params(self):
return self._physx_params
def get_physics_params(self):
return {**self.sim_params, **self.physx_params}
def _get_physx_collision_api(self, prim):
from pxr import UsdPhysics, PhysxSchema
physx_collision_api = PhysxSchema.PhysxCollisionAPI(prim)
if not physx_collision_api:
physx_collision_api = PhysxSchema.PhysxCollisionAPI.Apply(prim)
return physx_collision_api
def _get_physx_rigid_body_api(self, prim):
from pxr import UsdPhysics, PhysxSchema
physx_rb_api = PhysxSchema.PhysxRigidBodyAPI(prim)
if not physx_rb_api:
physx_rb_api = PhysxSchema.PhysxRigidBodyAPI.Apply(prim)
return physx_rb_api
def _get_physx_articulation_api(self, prim):
from pxr import UsdPhysics, PhysxSchema
arti_api = PhysxSchema.PhysxArticulationAPI(prim)
if not arti_api:
arti_api = PhysxSchema.PhysxArticulationAPI.Apply(prim)
return arti_api
def set_contact_offset(self, name, prim, value=None):
physx_collision_api = self._get_physx_collision_api(prim)
contact_offset = physx_collision_api.GetContactOffsetAttr()
# if not contact_offset:
# contact_offset = physx_collision_api.CreateContactOffsetAttr()
if value is None:
value = self._get_actor_config_value(name, "contact_offset", contact_offset)
if value != -1:
contact_offset.Set(value)
def set_rest_offset(self, name, prim, value=None):
physx_collision_api = self._get_physx_collision_api(prim)
rest_offset = physx_collision_api.GetRestOffsetAttr()
# if not rest_offset:
# rest_offset = physx_collision_api.CreateRestOffsetAttr()
if value is None:
value = self._get_actor_config_value(name, "rest_offset", rest_offset)
if value != -1:
rest_offset.Set(value)
def set_position_iteration(self, name, prim, value=None):
physx_rb_api = self._get_physx_rigid_body_api(prim)
solver_position_iteration_count = physx_rb_api.GetSolverPositionIterationCountAttr()
if value is None:
value = self._get_actor_config_value(name, "solver_position_iteration_count", solver_position_iteration_count)
if value != -1:
solver_position_iteration_count.Set(value)
def set_velocity_iteration(self, name, prim, value=None):
physx_rb_api = self._get_physx_rigid_body_api(prim)
solver_velocity_iteration_count = physx_rb_api.GetSolverVelocityIterationCountAttr()
if value is None:
value = self._get_actor_config_value(name, "solver_velocity_iteration_count", solver_position_iteration_count)
if value != -1:
solver_velocity_iteration_count.Set(value)
def set_max_depenetration_velocity(self, name, prim, value=None):
physx_rb_api = self._get_physx_rigid_body_api(prim)
max_depenetration_velocity = physx_rb_api.GetMaxDepenetrationVelocityAttr()
if value is None:
value = self._get_actor_config_value(name, "max_depenetration_velocity", max_depenetration_velocity)
if value != -1:
max_depenetration_velocity.Set(value)
def set_sleep_threshold(self, name, prim, value=None):
physx_rb_api = self._get_physx_rigid_body_api(prim)
sleep_threshold = physx_rb_api.GetSleepThresholdAttr()
if value is None:
value = self._get_actor_config_value(name, "sleep_threshold", sleep_threshold)
if value != -1:
sleep_threshold.Set(value)
def set_stabilization_threshold(self, name, prim, value=None):
physx_rb_api = self._get_physx_rigid_body_api(prim)
stabilization_threshold = physx_rb_api.GetStabilizationThresholdAttr()
if value is None:
value = self._get_actor_config_value(name, "stabilization_threshold", stabilization_threshold)
if value != -1:
stabilization_threshold.Set(value)
def set_gyroscopic_forces(self, name, prim, value=None):
physx_rb_api = self._get_physx_rigid_body_api(prim)
enable_gyroscopic_forces = physx_rb_api.GetEnableGyroscopicForcesAttr()
if value is None:
value = self._get_actor_config_value(name, "enable_gyroscopic_forces", enable_gyroscopic_forces)
if value != -1:
enable_gyroscopic_forces.Set(value)
def set_density(self, name, prim, value=None):
physx_rb_api = self._get_physx_rigid_body_api(prim)
density = physx_rb_api.GetDensityAttr()
if value is None:
value = self._get_actor_config_value(name, "density", density)
if value != -1:
density.Set(value)
# auto-compute mass
self.set_mass(prim, 0.0)
def set_mass(self, name, prim, value=None):
physx_rb_api = self._get_physx_rigid_body_api(prim)
mass = physx_rb_api.GetMassAttr()
if value is None:
value = self._get_actor_config_value(name, "mass", mass)
if value != -1:
mass.Set(value)
def retain_acceleration(self, prim):
# retain accelerations if running with more than one substep
physx_rb_api = self._get_physx_rigid_body_api(prim)
if self._sim_params["substeps"] > 1:
physx_rb_api.GetRetainAccelerationsAttr().Set(True)
def add_fixed_base(self, name, prim, cfg, value=None):
from pxr import UsdPhysics, PhysxSchema
stage = omni.usd.get_context().get_stage()
if value is None:
value = self._get_actor_config_value(name, "fixed_base")
if value:
root_joint_path = f"{prim.GetPath()}_fixedBaseRootJoint"
joint = UsdPhysics.Joint.Define(stage, root_joint_path)
joint.CreateBody1Rel().SetTargets([prim.GetPath()])
self.apply_articulation_settings(name, joint.GetPrim(), cfg, force_articulation=True)
def set_articulation_position_iteration(self, name, prim, value=None):
arti_api = self._get_physx_articulation_api(prim)
solver_position_iteration_count = arti_api.GetSolverPositionIterationCountAttr()
if value is None:
value = self._get_actor_config_value(name, "solver_position_iteration_count", solver_position_iteration_count)
if value != -1:
solver_position_iteration_count.Set(value)
def set_articulation_velocity_iteration(self, name, prim, value=None):
arti_api = self._get_physx_articulation_api(prim)
solver_velocity_iteration_count = arti_api.GetSolverVelocityIterationCountAttr()
if value is None:
value = self._get_actor_config_value(name, "solver_velocity_iteration_count", solver_position_iteration_count)
if value != -1:
solver_velocity_iteration_count.Set(value)
def set_articulation_sleep_threshold(self, name, prim, value=None):
arti_api = self._get_physx_articulation_api(prim)
sleep_threshold = arti_api.GetSleepThresholdAttr()
if value is None:
value = self._get_actor_config_value(name, "sleep_threshold", sleep_threshold)
if value != -1:
sleep_threshold.Set(value)
def set_articulation_stabilization_threshold(self, name, prim, value=None):
arti_api = self._get_physx_articulation_api(prim)
stabilization_threshold = arti_api.GetStabilizationThresholdAttr()
if value is None:
value = self._get_actor_config_value(name, "stabilization_threshold", stabilization_threshold)
if value != -1:
stabilization_threshold.Set(value)
def apply_rigid_body_settings(self, name, prim, cfg, is_articulation):
from pxr import UsdPhysics, PhysxSchema
stage = omni.usd.get_context().get_stage()
rb_api = UsdPhysics.RigidBodyAPI.Get(stage, prim.GetPath())
physx_rb_api = PhysxSchema.PhysxRigidBodyAPI.Get(stage, prim.GetPath())
if not physx_rb_api:
physx_rb_api = PhysxSchema.PhysxRigidBodyAPI.Apply(prim)
# if it's a body in an articulation, it's handled at articulation root
if not is_articulation:
self.add_fixed_base(name, prim, cfg, cfg["fixed_base"])
self.set_position_iteration(name, prim, cfg["solver_position_iteration_count"])
self.set_velocity_iteration(name, prim, cfg["solver_velocity_iteration_count"])
self.set_max_depenetration_velocity(name, prim, cfg["max_depenetration_velocity"])
self.set_sleep_threshold(name, prim, cfg["sleep_threshold"])
self.set_stabilization_threshold(name, prim, cfg["stabilization_threshold"])
self.set_gyroscopic_forces(name, prim, cfg["enable_gyroscopic_forces"])
# density and mass
mass_api = UsdPhysics.MassAPI.Get(stage, prim.GetPath())
if mass_api is None:
mass_api = UsdPhysics.MassAPI.Apply(prim)
mass_attr = mass_api.GetMassAttr()
density_attr = mass_api.GetDensityAttr()
if not mass_attr:
mass_attr = mass_api.CreateMassAttr()
if not density_attr:
density_attr = mass_api.CreateDensityAttr()
if cfg["density"] != -1:
density_attr.Set(cfg["density"])
mass_attr.Set(0.0) # mass is to be computed
elif cfg["override_usd_defaults"] and not density_attr.IsAuthored() and not mass_attr.IsAuthored():
density_attr.Set(self._physx_params["density"])
self.retain_acceleration(prim)
def apply_rigid_shape_settings(self, name, prim, cfg):
from pxr import UsdPhysics, PhysxSchema
stage = omni.usd.get_context().get_stage()
# collision APIs
collision_api = UsdPhysics.CollisionAPI(prim)
if not collision_api:
collision_api = UsdPhysics.CollisionAPI.Apply(prim)
physx_collision_api = PhysxSchema.PhysxCollisionAPI(prim)
if not physx_collision_api:
physx_collision_api = PhysxSchema.PhysxCollisionAPI.Apply(prim)
self.set_contact_offset(name, prim, cfg["contact_offset"])
self.set_rest_offset(name, prim, cfg["rest_offset"])
def apply_articulation_settings(self, name, prim, cfg, force_articulation=False):
from pxr import UsdPhysics, PhysxSchema
stage = omni.usd.get_context().get_stage()
is_articulation = False
# check if is articulation
prims = [prim]
while len(prims) > 0:
prim = prims.pop(0)
articulation_api = UsdPhysics.ArticulationRootAPI.Get(stage, prim.GetPath())
physx_articulation_api = PhysxSchema.PhysxArticulationAPI.Get(stage, prim.GetPath())
if articulation_api or physx_articulation_api:
is_articulation = True
if not is_articulation and force_articulation:
articulation_api = UsdPhysics.ArticulationRootAPI.Apply(prim)
physx_articulation_api = PhysxSchema.PhysxArticulationAPI.Apply(prim)
# parse through all children prims
prims = [prim]
while len(prims) > 0:
prim = prims.pop(0)
rb = UsdPhysics.RigidBodyAPI(prim)
collision_body = UsdPhysics.CollisionAPI(prim)
articulation = UsdPhysics.ArticulationRootAPI(prim)
if rb:
self.apply_rigid_body_settings(name, prim, cfg, is_articulation)
if collision_body:
self.apply_rigid_shape_settings(name, prim, cfg)
if articulation:
articulation_api = UsdPhysics.ArticulationRootAPI.Get(stage, prim.GetPath())
physx_articulation_api = PhysxSchema.PhysxArticulationAPI.Get(stage, prim.GetPath())
# enable self collisions
enable_self_collisions = physx_articulation_api.GetEnabledSelfCollisionsAttr()
if cfg["enable_self_collisions"] != -1:
enable_self_collisions.Set(cfg["enable_self_collisions"])
if not force_articulation:
self.add_fixed_base(name, prim, cfg, cfg["fixed_base"])
self.set_articulation_position_iteration(name, prim, cfg["solver_position_iteration_count"])
self.set_articulation_velocity_iteration(name, prim, cfg["solver_velocity_iteration_count"])
self.set_articulation_sleep_threshold(name, prim, cfg["sleep_threshold"])
self.set_articulation_stabilization_threshold(name, prim, cfg["stabilization_threshold"])
children_prims = prim.GetPrim().GetChildren()
prims = prims + children_prims
| 16,516 | Python | 43.761517 | 122 | 0.626181 |
teerameth/omni.isaac.fiborobotlab/omni/isaac/fiborobotlab/mooncake/scripts/lqr_controller.py | import numpy as np
import scipy
from scipy import linalg
import control
import math
def k_gain_calculator(Q_state,R_state):
# defined dynamics parameters
mb = 4.0
mB = 3.3155
mw = 1.098
# mB = 4.9064
# mw = 1.415
rb = 0.12
rw = 0.05
l = 0.208
Ib = 0.01165
IB = 0.3
Iw = 0.00004566491
g = -9.81
IB_xy = 0.3
Iw_xy = 0.0002770398
###############
A_mat_3_2 = -(g*(rb**4)*(l*mB + mw*rb + mw*rw)*( (mw*(rw**3)) - Iw*rb + (l*mB*(rw**2)) + (mw*rb*(rw**2)))) / ( (IB*Ib*(rw**2)) + (IB*Iw*(rb**2)) + (Ib*Iw*(rb**2)) + (Iw*mB*(rb**4)) + (Iw*mb*(rb**4)) + (Ib*mw*(rw**4)) + (4*Iw*mw*(rb**4)) + (2*Iw*l*mB*(rb**3)) + (2*Ib*mw*rb*(rw**3)) + (4*Iw*mw*(rb**3)*rw) - ((l**2)*(mB**2)*(rb**2)*(rw**2)) + (IB*mB*(rb**2)*(rw**2)) + (IB*mb*(rb**2)*(rw**2)) + (IB*mw*(rb**2)*(rw**2)) + (Ib*mw*(rb**2)*(rw**2)) + (Iw*mw*(rb**2)*(rw**2)) + (mB*mw*(rb**2)*(rw**4)) + (2*mB*mw*(rb**3)*(rw**3)) + (mB*mw*(rb**4)*(rw**2)) + (mb*mw*(rb**2)*(rw**4)) + (2*mb*mw*(rb**3)*(rw**3)) + (mb*mw*(rb**4)*(rw**2)) - (2*l*mB*mw*(rb**2)*(rw**3)) - (2*l*mB*mw*(rb**3)*(rw**2)))
A_mat_4_2 = (g*(l*mB + mw*rb + mw*rw)*( (Ib*(rw**2)) + (Iw*(rb**2)) + (mB*(rb**2)*(rw**2)) + (mb*(rb**2)*(rw**2)) + (mw*(rb**2)*(rw**2))))/( (IB*Ib*(rw**2)) + (IB*Iw*(rb**2)) + (Ib*Iw*(rb**2)) + (Iw*mB*(rb**4)) + (Iw*mb*(rb**4)) + (Ib*mw*(rw**4)) + (4*Iw*mw*(rb**4)) + (2*Iw*l*mB*(rb**3)) + (2*Ib*mw*rb*(rw**3)) + (4*Iw*mw*(rb**3)*rw) - ((l**2)*(mB**2)*(rb**2)*(rw**2)) + (IB*mB*(rb**2)*(rw**2)) + (IB*mb*(rb**2)*(rw**2)) + (IB*mw*(rb**2)*(rw**2)) + (Ib*mw*(rb**2)*(rw**2)) + (Iw*mw*(rb**2)*(rw**2)) + (mB*mw*(rb**2)*(rw**4)) + (2*mB*mw*(rb**3)*(rw**3)) + (mB*mw*(rb**4)*(rw**2)) + (mb*mw*(rb**2)*(rw**4)) + (2*mb*mw*(rb**3)*(rw**3)) + (mb*mw*(rb**4)*(rw**2)) - (2*l*mB*mw*(rb**2)*(rw**3)) - (2*l*mB*mw*(rb**3)*(rw**2)))
B_mat_3 = ((rb**2)*rw*( (2*mw*(rb**2)) + (3*mw*rb*rw) + (l*mB*rb) + (mw*rw**2) + IB))/( (IB*Ib*(rw**2)) + (IB*Iw*(rb**2)) + (Ib*Iw*(rb**2)) + (Iw*mB*(rb**4)) + (Iw*mb*(rb**4)) + (Ib*mw*(rw**4)) + (4*Iw*mw*(rb**4)) + (2*Iw*l*mB*(rb**3)) + (2*Ib*mw*rb*(rw**3)) + (4*Iw*mw*(rb**3)*rw) - ((l**2)*(mB**2)*(rb**2)*(rw**2)) + (IB*mB*(rb**2)*(rw**2)) + (IB*mb*(rb**2)*(rw**2)) + (IB*mw*(rb**2)*(rw**2)) + (Ib*mw*(rb**2)*(rw**2)) + (Iw*mw*(rb**2)*(rw**2)) + (mB*mw*(rb**2)*(rw**4)) + (2*mB*mw*(rb**3)*(rw**3)) + (mB*mw*(rb**4)*(rw**2)) + (mb*mw*rb**2*rw**4) + (2*mb*mw*(rb**3)*(rw**3)) + (mb*mw*(rb**4)*(rw**2)) - (2*l*mB*mw*(rb**2)*(rw**3)) - (2*l*mB*mw*(rb**3)*(rw**2)))
B_mat_4 = -(rb*rw*(Ib + (mB*(rb**2)) + (mb*(rb**2)) + (2*mw*rb**2) + (l*mB*rb) + (mw*rb*rw)))/((IB*Ib*(rw**2)) + (IB*Iw*(rb**2)) + (Ib*Iw*(rb**2)) + (Iw*mB*(rb**4)) + (Iw*mb*(rb**4)) + (Ib*mw*(rw**4)) + (4*Iw*mw*(rb**4)) + (2*Iw*l*mB*(rb**3)) + 2*Ib*mw*rb*rw**3 + 4*Iw*mw*rb**3*rw - l**2*mB**2*rb**2*rw**2 + IB*mB*rb**2*rw**2 + IB*mb*rb**2*rw**2 + IB*mw*rb**2*rw**2 + Ib*mw*rb**2*rw**2 + Iw*mw*rb**2*rw**2 + mB*mw*rb**2*rw**4 + 2*mB*mw*rb**3*rw**3 + mB*mw*rb**4*rw**2 + mb*mw*rb**2*rw**4 + 2*mb*mw*rb**3*rw**3 + mb*mw*rb**4*rw**2 - 2*l*mB*mw*rb**2*rw**3 - 2*l*mB*mw*rb**3*rw**2)
B_mat_xy = (-rb/( (IB_xy*rw**2) + (mw*rw**2*(rb+rw)**2 + (Iw_xy*rb**2))))
A_sys = np.array([ [0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 1],
[0, A_mat_3_2, 0, 0, 0, 0, 0, 0],
[A_mat_3_2, 0, 0, 0, 0, 0, 0, 0],
[0, A_mat_4_2, 0, 0, 0, 0, 0, 0],
[A_mat_4_2, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0] ])
B_sys =np.array([ [0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[B_mat_3, 0, 0],
[0, B_mat_3, 0],
[B_mat_4, 0, 0],
[0, B_mat_4, 0],
[0, 0, B_mat_xy]])
C_sys = np.eye(8)
D_sys = np.zeros((8,3))
# Q_state = np.array([5, 5, 5, 30, 30, 30, 30, 30])
# R_state = np.array([10, 10, 10])
Q = np.eye(8) * Q_state
R = np.eye(3) * R_state
K, S, E = control.lqr(A_sys, B_sys, Q, R)
# print("A_sys =", A_sys)
# print("B_sys =", B_sys)
# print("C_sys =", C_sys)
# print("D_sys =", D_sys)
# print("K =", K)
# print(2*5**2*3*2)
return K
def euler_from_quaternion(x, y, z, w):
"""
Convert a quaternion into euler angles (roll, pitch, yaw)
roll is rotation around x in radians (counterclockwise)
pitch is rotation around y in radians (counterclockwise)
yaw is rotation around z in radians (counterclockwise)
"""
t0 = +2.0 * (w * x + y * z)
t1 = +1.0 - 2.0 * (x * x + y * y)
roll_x = math.atan2(t0, t1)
t2 = +2.0 * (w * y - z * x)
t2 = +1.0 if t2 > +1.0 else t2
t2 = -1.0 if t2 < -1.0 else t2
pitch_y = math.asin(t2)
t3 = +2.0 * (w * z + x * y)
t4 = +1.0 - 2.0 * (y * y + z * z)
yaw_z = math.atan2(t3, t4)
return [roll_x, pitch_y, yaw_z] # in radians
def lqr_controller(x_ref,x_fb,K):
u= K @ (x_ref-x_fb)
return u
def euler_from_quaternion(x, y, z, w):
"""
Convert a quaternion into euler angles (roll, pitch, yaw)
roll is rotation around x in radians (counterclockwise)
pitch is rotation around y in radians (counterclockwise)
yaw is rotation around z in radians (counterclockwise)
"""
t0 = +2.0 * (w * x + y * z)
t1 = +1.0 - 2.0 * (x * x + y * y)
roll_x = math.atan2(t0, t1)
t2 = +2.0 * (w * y - z * x)
t2 = +1.0 if t2 > +1.0 else t2
t2 = -1.0 if t2 < -1.0 else t2
pitch_y = math.asin(t2)
t3 = +2.0 * (w * z + x * y)
t4 = +1.0 - 2.0 * (y * y + z * z)
yaw_z = math.atan2(t3, t4)
return [roll_x, pitch_y, yaw_z] # in radians
def ball_velocity(pre_vel,now_vel,dt):
v = (now_vel-pre_vel)/dt
return v
def Txyz2wheel(Txyz):
alpha = 50.0*np.pi/180.0 # wheelaxis_theta
Twheels = np.array([[-np.cos(alpha) , 0 , -np.sin(alpha)],
[np.cos(alpha)/2 , -np.sqrt(3)*np.cos(alpha)/2 , -np.sin(alpha)],
[np.cos(alpha)/2 , np.sqrt(3)*np.cos(alpha)/2, -np.sin(alpha)]]) @ Txyz # -x axis
# Twheels = np.array([[ np.cos(alpha) , 0 , -np.sin(alpha)],
# [-np.cos(alpha)/2 , np.sqrt(3)*np.cos(alpha)/2 , -np.sin(alpha)],
# [-np.cos(alpha)/2 , -np.sqrt(3)*np.cos(alpha)/2, -np.sin(alpha)]]) @ Txyz # x axis
return Twheels | 6,666 | Python | 54.099173 | 724 | 0.419442 |
teerameth/omni.isaac.fiborobotlab/omni/isaac/fiborobotlab/mooncake/scripts/create_instanceable_assets.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import omni.usd
import omni.client
from pxr import UsdGeom, Sdf
def update_reference(source_prim_path, source_reference_path, target_reference_path):
stage = omni.usd.get_context().get_stage()
prims = [stage.GetPrimAtPath(source_prim_path)]
while len(prims) > 0:
prim = prims.pop(0)
prim_spec = stage.GetRootLayer().GetPrimAtPath(prim.GetPath())
reference_list = prim_spec.referenceList
refs = reference_list.GetAddedOrExplicitItems()
if len(refs) > 0:
for ref in refs:
if ref.assetPath == source_reference_path:
prim.GetReferences().RemoveReference(ref)
prim.GetReferences().AddReference(assetPath=target_reference_path, primPath=prim.GetPath())
prims = prims + prim.GetChildren()
def create_parent_xforms(asset_usd_path, source_prim_path, save_as_path=None):
""" Adds a new UsdGeom.Xform prim for each Mesh/Geometry prim under source_prim_path.
Moves material assignment to new parent prim if any exists on the Mesh/Geometry prim.
Args:
asset_usd_path (str): USD file path for asset
source_prim_path (str): USD path of root prim
save_as_path (str): USD file path for modified USD stage. Defaults to None, will save in same file.
"""
omni.usd.get_context().open_stage(asset_usd_path)
stage = omni.usd.get_context().get_stage()
prims = [stage.GetPrimAtPath(source_prim_path)]
edits = Sdf.BatchNamespaceEdit()
while len(prims) > 0:
prim = prims.pop(0)
print(prim)
if prim.GetTypeName() in ["Mesh", "Capsule", "Sphere", "Box"]:
new_xform = UsdGeom.Xform.Define(stage, str(prim.GetPath()) + "_xform")
print(prim, new_xform)
edits.Add(Sdf.NamespaceEdit.Reparent(prim.GetPath(), new_xform.GetPath(), 0))
continue
children_prims = prim.GetChildren()
prims = prims + children_prims
stage.GetRootLayer().Apply(edits)
if save_as_path is None:
omni.usd.get_context().save_stage()
else:
omni.usd.get_context().save_as_stage(save_as_path)
def convert_asset_instanceable(asset_usd_path, source_prim_path, save_as_path=None, create_xforms=True):
""" Makes all mesh/geometry prims instanceable.
Can optionally add UsdGeom.Xform prim as parent for all mesh/geometry prims.
Makes a copy of the asset USD file, which will be used for referencing.
Updates asset file to convert all parent prims of mesh/geometry prims to reference cloned USD file.
Args:
asset_usd_path (str): USD file path for asset
source_prim_path (str): USD path of root prim
save_as_path (str): USD file path for modified USD stage. Defaults to None, will save in same file.
create_xforms (bool): Whether to add new UsdGeom.Xform prims to mesh/geometry prims.
"""
if create_xforms:
create_parent_xforms(asset_usd_path, source_prim_path, save_as_path)
asset_usd_path = save_as_path
instance_usd_path = ".".join(asset_usd_path.split(".")[:-1]) + "_meshes.usd"
omni.client.copy(asset_usd_path, instance_usd_path)
omni.usd.get_context().open_stage(asset_usd_path)
stage = omni.usd.get_context().get_stage()
prims = [stage.GetPrimAtPath(source_prim_path)]
while len(prims) > 0:
prim = prims.pop(0)
if prim:
if prim.GetTypeName() in ["Mesh", "Capsule", "Sphere", "Box"]:
parent_prim = prim.GetParent()
if parent_prim and not parent_prim.IsInstance():
parent_prim.GetReferences().AddReference(assetPath=instance_usd_path, primPath=str(parent_prim.GetPath()))
parent_prim.SetInstanceable(True)
continue
children_prims = prim.GetChildren()
prims = prims + children_prims
if save_as_path is None:
omni.usd.get_context().save_stage()
else:
omni.usd.get_context().save_as_stage(save_as_path)
| 5,639 | Python | 43.761904 | 126 | 0.675829 |
teerameth/omni.isaac.fiborobotlab/omni/isaac/fiborobotlab/mooncake/scripts/import_mooncake.py | # Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import torch
from torch import roll
import omni
import omni.kit.commands
import omni.usd
import omni.client
import asyncio
import math
import weakref
import omni.ui as ui
from omni.kit.menu.utils import add_menu_items, remove_menu_items, MenuItemDescription
from omni.isaac.isaac_sensor import _isaac_sensor
from omni.isaac.core.prims import RigidPrimView
from .common import set_drive_parameters
from pxr import UsdLux, Sdf, Gf, UsdPhysics, Usd, UsdGeom
from omni.isaac.ui.ui_utils import setup_ui_headers, get_style, btn_builder
from omni.isaac.core.utils.prims import get_prim_at_path
EXTENSION_NAME = "Import Mooncake"
def create_parent_xforms(asset_usd_path, source_prim_path, save_as_path=None):
""" Adds a new UsdGeom.Xform prim for each Mesh/Geometry prim under source_prim_path.
Moves material assignment to new parent prim if any exists on the Mesh/Geometry prim.
Args:
asset_usd_path (str): USD file path for asset
source_prim_path (str): USD path of root prim
save_as_path (str): USD file path for modified USD stage. Defaults to None, will save in same file.
"""
omni.usd.get_context().open_stage(asset_usd_path)
stage = omni.usd.get_context().get_stage()
prims = [stage.GetPrimAtPath(source_prim_path)]
edits = Sdf.BatchNamespaceEdit()
while len(prims) > 0:
prim = prims.pop(0)
print(prim)
if prim.GetTypeName() in ["Mesh", "Capsule", "Sphere", "Box"]:
new_xform = UsdGeom.Xform.Define(stage, str(prim.GetPath()) + "_xform")
print(prim, new_xform)
edits.Add(Sdf.NamespaceEdit.Reparent(prim.GetPath(), new_xform.GetPath(), 0))
continue
children_prims = prim.GetChildren()
prims = prims + children_prims
stage.GetRootLayer().Apply(edits)
if save_as_path is None:
omni.usd.get_context().save_stage()
else:
omni.usd.get_context().save_as_stage(save_as_path)
def velocity2omega(v_x, v_y, w_z=0, d=0.105, r=0.1):
omega_0 = (v_x-d*w_z)/r
omega_1 = -(v_x-math.sqrt(3)*v_y+2*d*w_z)/(2*r)
omega_2 = -(v_x+math.sqrt(3)*v_y+2*d*w_z)/(2*r)
return [omega_0, omega_1, omega_2]
class Extension(omni.ext.IExt):
def on_startup(self, ext_id: str):
ext_manager = omni.kit.app.get_app().get_extension_manager()
self._ext_id = ext_id
self._extension_path = ext_manager.get_extension_path(ext_id)
self._menu_items = [
MenuItemDescription(
name="Import Robots",
sub_menu=[
MenuItemDescription(name="Mooncake URDF", onclick_fn=lambda a=weakref.proxy(self): a._menu_callback())
],
)
]
add_menu_items(self._menu_items, "Isaac Examples")
self._build_ui()
def _build_ui(self):
self._window = omni.ui.Window(
EXTENSION_NAME, width=0, height=0, visible=False, dockPreference=ui.DockPreference.LEFT_BOTTOM
)
with self._window.frame:
with ui.VStack(spacing=5, height=0):
title = "Import a Mooncake Robot via URDF"
doc_link = "https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/ext_omni_isaac_urdf.html"
overview = "This Example shows you import an NVIDIA Mooncake robot via URDF.\n\nPress the 'Open in IDE' button to view the source code."
setup_ui_headers(self._ext_id, __file__, title, doc_link, overview)
frame = ui.CollapsableFrame(
title="Command Panel",
height=0,
collapsed=False,
style=get_style(),
style_type_name_override="CollapsableFrame",
horizontal_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_AS_NEEDED,
vertical_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_ALWAYS_ON,
)
with frame:
with ui.VStack(style=get_style(), spacing=5):
dict = {
"label": "Load Robot",
"type": "button",
"text": "Load",
"tooltip": "Load a Mooncake Robot into the Scene",
"on_clicked_fn": self._on_load_robot,
}
btn_builder(**dict)
dict = {
"label": "Configure Drives",
"type": "button",
"text": "Configure",
"tooltip": "Configure Joint Drives",
"on_clicked_fn": self._on_config_robot,
}
btn_builder(**dict)
dict = {
"label": "Spin Robot",
"type": "button",
"text": "move",
"tooltip": "Spin the Robot in Place",
"on_clicked_fn": self._on_config_drives,
}
btn_builder(**dict)
def on_shutdown(self):
remove_menu_items(self._menu_items, "Isaac Examples")
self._window = None
def _menu_callback(self):
self._window.visible = not self._window.visible
def _on_load_robot(self):
load_stage = asyncio.ensure_future(omni.usd.get_context().new_stage_async())
asyncio.ensure_future(self._load_mooncake(load_stage))
async def _load_mooncake(self, task):
done, pending = await asyncio.wait({task})
if task in done:
viewport = omni.kit.viewport_legacy.get_default_viewport_window()
viewport.set_camera_position("/OmniverseKit_Persp", -1.02, 1.26, 0.5, True)
viewport.set_camera_target("/OmniverseKit_Persp", 2.20, -2.18, -1.60, True)
stage = omni.usd.get_context().get_stage()
scene = UsdPhysics.Scene.Define(stage, Sdf.Path("/physicsScene"))
scene.CreateGravityDirectionAttr().Set(Gf.Vec3f(0.0, 0.0, -1.0))
scene.CreateGravityMagnitudeAttr().Set(9.81)
result, plane_path = omni.kit.commands.execute(
"AddGroundPlaneCommand",
stage=stage,
planePath="/groundPlane",
axis="Z",
size=1500.0,
position=Gf.Vec3f(0, 0, 0),
color=Gf.Vec3f(0.5),
)
status, import_config = omni.kit.commands.execute("URDFCreateImportConfig")
import_config.merge_fixed_joints = True
import_config.import_inertia_tensor = False
# import_config.distance_scale = 100
import_config.fix_base = False
import_config.set_make_instanceable(True)
# import_config.set_instanceable_usd_path("./mooncake_instanceable.usd")
import_config.set_instanceable_usd_path("omniverse://localhost/Library/Robots/mooncake/mooncake_instanceable.usd")
import_config.set_default_drive_type(2) # 0=None, 1=position, 2=velocity
import_config.make_default_prim = True
import_config.create_physics_scene = True
result, robot_path = omni.kit.commands.execute(
"URDFParseAndImportFile",
urdf_path=self._extension_path + "/data/urdf/robots/mooncake/urdf/mooncake.urdf",
import_config=import_config,
dest_path="omniverse://localhost/Library/Robots/mooncake/mooncake.usd"
)
# convert_asset_instanceable(asset_usd_path=, # USD file path to the current existing USD asset
# source_prim_path=, # USD prim path of root prim of the asset
# save_as_path=None, # USD file path for modified USD stage. Defaults to None, will save in same file.
# create_xforms=True)
# create_parent_xforms(
# asset_usd_path='omniverse://localhost/Library/Robots/mooncake.usd',
# source_prim_path="/mooncake",
# save_as_path='omniverse://localhost/Library/Robots/mooncake_instanceable.usd'
# )
# make sure the ground plane is under root prim and not robot
# omni.kit.commands.execute(
# "MovePrimCommand", path_from=robot_path, path_to="/mooncake", keep_world_transform=True
# )
distantLight = UsdLux.DistantLight.Define(stage, Sdf.Path("/DistantLight"))
distantLight.CreateIntensityAttr(500)
def _on_config_robot(self):
stage = omni.usd.get_context().get_stage()
# Make all rollers spin freely by removing extra drive API
for wheel_index in range(3):
for plate_index in range(2):
for roller_index in range(9):
prim_path = "/mooncake/wheel_{}/roller_{}_{}_{}_joint".format(wheel_index, wheel_index, plate_index, roller_index)
prim = stage.GetPrimAtPath(prim_path)
omni.kit.commands.execute(
"UnapplyAPISchemaCommand",
api=UsdPhysics.DriveAPI,
prim=prim,
api_prefix="drive",
multiple_api_token="angular",
)
## Attact IMU sensor ##
self._is = _isaac_sensor.acquire_imu_sensor_interface()
self.body_path = "/mooncake/base_plate"
result, sensor = omni.kit.commands.execute(
"IsaacSensorCreateImuSensor",
path="/sensor",
parent=self.body_path,
sensor_period=1 / 500.0, # 2ms
translation=Gf.Vec3d(0, 0, 17.15), # translate to surface of /mooncake/top_plate
orientation=Gf.Quatd(1, 0, 0, 0), # (x, y, z, w)
visualize=True,
)
omni.kit.commands.execute('ChangeProperty',
prop_path=Sdf.Path('/mooncake.xformOp:translate'),
value=Gf.Vec3d(0.0, 0.0, 0.3162),
prev=Gf.Vec3d(0.0, 0.0, 0.0))
# Set Damping & Stiffness
# Position Control: for position controlled joints, set a high stiffness and relatively low or zero damping.
# Velocity Control: for velocity controller joints, set a high damping and zero stiffness.
# omni.kit.commands.execute('ChangeProperty',
# prop_path=Sdf.Path(
# '/mooncake/base_plate/wheel_0_joint.drive:angular:physics:stiffness'),
# value=0.0,
# prev=10000000.0)
# omni.kit.commands.execute('ChangeProperty',
# prop_path=Sdf.Path(
# '/mooncake/base_plate/wheel_1_joint.drive:angular:physics:stiffness'),
# value=0.0,
# prev=10000000.0)
# omni.kit.commands.execute('ChangeProperty',
# prop_path=Sdf.Path('/mooncake/base_plate/wheel_2_joint.drive:angular:physics:stiffness'),
# value=0.0,
# prev=10000000.0)
###############
# Create Ball #
###############
# result, ball_path = omni.kit.commands.execute(
# "CreatePrimWithDefaultXform",
# prim_type="Sphere",
# attributes={'radius':0.12},
# select_new_prim=True
# )
# omni.kit.commands.execute("MovePrimCommand", path_from='/mooncake/Sphere', path_to='/mooncake/ball')
# omni.kit.commands.execute('ChangeProperty',
# prop_path=Sdf.Path('/mooncake/ball.xformOp:translate'),
# value=Gf.Vec3d(0.0, 0.0, -0.1962),
# prev=Gf.Vec3d(0.0, 0.0, 0.0))
# omni.kit.commands.execute('SetRigidBody',
# path=Sdf.Path('/mooncake/ball'),
# approximationShape='convexHull',
# kinematic=False)
#
# omni.kit.commands.execute('AddPhysicsComponent',
# usd_prim=get_prim_at_path('/mooncake/ball'),
# component='PhysicsMassAPI')
# # omni.kit.commands.execute('ApplyAPISchema',
# # api= 'pxr.UsdPhysics.MassAPI',
# # prim=get_prim_at_path('/mooncake/ball'))
# omni.kit.commands.execute('ChangeProperty',
# prop_path=Sdf.Path('/mooncake/ball.physics:mass'),
# value=4.0,
# prev=0.0)
## USE 3 IMUs ##
# result, sensor = omni.kit.commands.execute(
# "IsaacSensorCreateImuSensor",
# path="/sensor0",
# parent=self.body_path,
# sensor_period=1 / 500.0, # 2ms
# offset=Gf.Vec3d(0, 15, 17.15), # translate to upper surface of /mooncake/top_plate
# orientation=Gf.Quatd(1, 0, 0, 0), # (x, y, z, w)
# visualize=True,
# )
# result, sensor = omni.kit.commands.execute(
# "IsaacSensorCreateImuSensor",
# path="/sensor1",
# parent=self.body_path,
# sensor_period=1 / 500.0, # 2ms
# offset=Gf.Vec3d(15*math.sqrt(3)/2, -15/2, 17.15), # translate to surface of /mooncake/top_plate
# orientation=Gf.Quatd(1, 0, 0, 0), # (x, y, z, w)
# visualize=True,
# )
# result, sensor = omni.kit.commands.execute(
# "IsaacSensorCreateImuSensor",
# path="/sensor2",
# parent=self.body_path,
# sensor_period=1 / 500.0, # 2ms
# offset=Gf.Vec3d(-15*math.sqrt(3)/2, -15/2, 17.15), # translate to surface of /mooncake/top_plate
# orientation=Gf.Quatd(1, 0, 0, 0), # (x, y, z, w)
# visualize=True,
# )
def _on_config_drives(self):
# self._on_config_robot() # make sure drives are configured first
stage = omni.usd.get_context().get_stage()
# set each axis to spin at a rate of 1 rad/s
axle_0 = UsdPhysics.DriveAPI.Get(stage.GetPrimAtPath("/mooncake/base_plate/wheel_0_joint"), "angular")
axle_1 = UsdPhysics.DriveAPI.Get(stage.GetPrimAtPath("/mooncake/base_plate/wheel_1_joint"), "angular")
axle_2 = UsdPhysics.DriveAPI.Get(stage.GetPrimAtPath("/mooncake/base_plate/wheel_2_joint"), "angular")
omega = velocity2omega(0, 0.1, 0)
print(omega)
set_drive_parameters(axle_0, "velocity", math.degrees(omega[0]), 0, math.radians(1e7))
set_drive_parameters(axle_1, "velocity", math.degrees(omega[1]), 0, math.radians(1e7))
set_drive_parameters(axle_2, "velocity", math.degrees(omega[2]), 0, math.radians(1e7))
| 15,614 | Python | 46.752293 | 152 | 0.546433 |
teerameth/omni.isaac.fiborobotlab/omni/isaac/fiborobotlab/mooncake/scripts/test.py | # import omni.isaac.core.utils.torch.rotations as torch_rot
import torch
import torch.nn.functional as f
def normalize(x, eps: float = 1e-9):
return x / x.norm(p=2, dim=-1).clamp(min=eps, max=None).unsqueeze(-1)
def quat_unit(a):
return normalize(a)
def quat_mul(a, b):
assert a.shape == b.shape
shape = a.shape
a = a.reshape(-1, 4)
b = b.reshape(-1, 4)
w1, x1, y1, z1 = a[:, 0], a[:, 1], a[:, 2], a[:, 3]
w2, x2, y2, z2 = b[:, 0], b[:, 1], b[:, 2], b[:, 3]
ww = (z1 + x1) * (x2 + y2)
yy = (w1 - y1) * (w2 + z2)
zz = (w1 + y1) * (w2 - z2)
xx = ww + yy + zz
qq = 0.5 * (xx + (z1 - x1) * (x2 - y2))
w = qq - ww + (z1 - y1) * (y2 - z2)
x = qq - xx + (x1 + w1) * (x2 + w2)
y = qq - yy + (w1 - x1) * (y2 + z2)
z = qq - zz + (z1 + y1) * (w2 - x2)
quat = torch.stack([w, x, y, z], dim=-1).view(shape)
return quat
def quat_conjugate(a):
shape = a.shape
a = a.reshape(-1, 4)
return torch.cat((a[:, 0:1], -a[:, 1:]), dim=-1).view(shape)
def quat_diff_rad(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
"""
Get the difference in radians between two quaternions.
Args:
a: first quaternion, shape (N, 4)
b: second quaternion, shape (N, 4)
Returns:
Difference in radians, shape (N,)
"""
b_conj = quat_conjugate(b)
mul = quat_mul(a, b_conj)
# 2 * torch.acos(torch.abs(mul[:, -1]))
return 2.0 * torch.asin(torch.clamp(torch.norm(mul[:, 1:], p=2, dim=-1), max=1.0))
def q2falling(q):
# q = f.normalize(q, p=1, dim=1)
norm_vec = f.normalize(q[:, 1:], p=1, dim=1)
print(norm_vec)
return 2 * torch.acos(q[:, 0]) * torch.sqrt((norm_vec[:, 0]*norm_vec[:, 0]+norm_vec[:, 1]*norm_vec[:, 1]))
# return 2*torch.asin(torch.norm(torch.mul(robots_orientation[:, 1:], up_vectors[:, 1:])))
# return quat_diff_rad(robots_orientation, up_vectors)
test_a = torch.zeros((1, 4))
test_a[:, 0] = 1
test_b = torch.zeros_like(test_a)
test_b[:, 0] = 0.71
test_b[:, 3] = 0.71
# print(quat_diff_rad(test_a, test_b))
print(q2falling(test_b)/3.14*180)
test_b = torch.zeros_like(test_a)
test_b[:, 0] = 0.71
test_b[:, 2] = 0.71
print(q2falling(test_b)/3.14*180)
test_b = torch.zeros_like(test_a)
test_b[:, 0] = 0.71
test_b[:, 1] = 0.71
print(q2falling(test_b)/3.14*180)
test_b = torch.zeros_like(test_a)
test_b[:, 0] = 0.64
test_b[:, 3] = 0.77
print(q2falling(test_b)/3.14*180) | 2,416 | Python | 29.987179 | 110 | 0.550083 |
teerameth/omni.isaac.fiborobotlab/omni/isaac/fiborobotlab/mooncake/scripts/import_ball.py | #launch Isaac Sim before any other imports
#default first two lines in any standalone application
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": False}) # we can also run as headless.
from omni.isaac.core import World
from omni.isaac.core.objects import DynamicSphere
import numpy as np
world = World()
world.scene.add_default_ground_plane()
ball = world.scene.add(
DynamicSphere(
prim_path="/ball",
name="ball",
position=np.array([0, 0, 0.12]),
radius=0.12, # mediciene ball diameter 24cm.
color=np.array([1.0, 0, 0]),
mass=4,
)
)
# Resetting the world needs to be called before querying anything related to an articulation specifically.
# Its recommended to always do a reset after adding your assets, for physics handles to be propagated properly
world.reset()
while True:
# position, orientation = fancy_cube.get_world_pose()
# linear_velocity = fancy_cube.get_linear_velocity()
# # will be shown on terminal
# print("Cube position is : " + str(position))
# print("Cube's orientation is : " + str(orientation))
# print("Cube's linear velocity is : " + str(linear_velocity))
# # we have control over stepping physics and rendering in this workflow
# # things run in sync
world.step(render=True) # execute one physics step and one rendering step
simulation_app.close() # close Isaac Sim | 1,485 | Python | 40.277777 | 110 | 0.674074 |
teerameth/omni.isaac.fiborobotlab/omni/isaac/fiborobotlab/mooncake/scripts/hello_rl.py | import gym
from stable_baselines3 import A2C
env = gym.make('CartPole-v1')
model = A2C('MlpPolicy', env, verbose=1)
model.learn(total_timesteps=10000)
obs = env.reset()
for i in range(1000):
action, _state = model.predict(obs, deterministic=True)
obs, reward, done, info = env.step(action)
env.render()
if done:
obs = env.reset() | 355 | Python | 21.249999 | 59 | 0.676056 |
teerameth/omni.isaac.fiborobotlab/omni/isaac/fiborobotlab/mooncake/scripts/log2graph.py | import matplotlib.pyplot as plt
import pandas as pd
plt.xlim([0, 500000])
plt.ylim([0, 6000])
fig=plt.figure()
d = {}
for i, filename in enumerate(['log_11111', 'log_01111', 'log_10111', 'log_11011', 'log_11101', 'log_11110']):
# path = "F:/mooncake_policy_01111/log.txt"
f = open('log/'+filename+'.txt')
lines = f.readlines()
topic = lines[0][:-1]
lines = lines[1:]
ax=fig.add_subplot(6, 1, i+1)
# ax = plt.subplot(6, 1, i+1)
# ax.set_xlim(xmin=0.0, xmax=500000)
ax.set_ylim(ymin=0.0, ymax=600)
ax.xaxis.set_major_locator(plt.MaxNLocator(10))
ax.yaxis.set_major_locator(plt.MaxNLocator(10))
X, Y = [], []
for line in lines:
[x, y] = line[:-1].split('\t')
X.append(x)
Y.append(y)
ax.plot(X, Y)
d[topic+'x'] = X.copy()
d[topic+'y'] = Y.copy()
plt.show()
df = pd.DataFrame(data=d)
df.to_csv('log.csv') | 885 | Python | 27.580644 | 109 | 0.579661 |
teerameth/omni.isaac.fiborobotlab/omni/isaac/fiborobotlab/mooncake/scripts/rl_luna_lander.py | import gym
from stable_baselines3 import DQN
from stable_baselines3.common.evaluation import evaluate_policy
# Create environment
env = gym.make('LunarLander-v2')
# Instantiate the agent
model = DQN('MlpPolicy', env, verbose=1)
# Train the agent
model.learn(total_timesteps=int(2e5))
# Save the agent
model.save("dqn_lunar")
del model # delete trained model to demonstrate loading
# Load the trained agent
# NOTE: if you have loading issue, you can pass `print_system_info=True`
# to compare the system on which the model was trained vs the current one
# model = DQN.load("dqn_lunar", env=env, print_system_info=True)
model = DQN.load("dqn_lunar", env=env)
# Evaluate the agent
# NOTE: If you use wrappers with your environment that modify rewards,
# this will be reflected here. To evaluate with original rewards,
# wrap environment in a "Monitor" wrapper before other wrappers.
mean_reward, std_reward = evaluate_policy(model, model.get_env(), n_eval_episodes=10)
# Enjoy trained agent
obs = env.reset()
for i in range(1000):
action, _states = model.predict(obs, deterministic=True)
obs, rewards, dones, info = env.step(action)
env.render() | 1,174 | Python | 32.571428 | 85 | 0.739353 |
teerameth/omni.isaac.fiborobotlab/omni/isaac/fiborobotlab/mooncake/scripts/test/test2.py | import ray
from ray.rllib.agents.ppo import PPOTrainer
ray.init() # Skip or set to ignore if already called
config = {'gamma': 0.9,
'lr': 1e-2,
'num_workers': 4,
'train_batch_size': 1000,
'model': {
'fcnet_hiddens': [128, 128]
}}
trainer = PPOTrainer(env='CartPole-v0', config=config)
for i in range(100):
print(trainer.train()) | 396 | Python | 29.538459 | 54 | 0.575758 |
teerameth/omni.isaac.fiborobotlab/omni/isaac/fiborobotlab/mooncake/scripts/test/test3.py | # from env import MoonCakeEnv
import ray
import ray.rllib.agents.ppo as ppo
import shutil, os
CHECKPOINT_ROOT = "tmp/ppo/taxi"
shutil.rmtree(CHECKPOINT_ROOT, ignore_errors=True, onerror=None)
ray_results = os.getenv("HOME") + "/ray_results/"
shutil.rmtree(ray_results, ignore_errors=True, onerror=None)
ray.shutdown()
ray.init(ignore_reinit_error=True)
SELECT_ENV = "Taxi-v3"
config = ppo.DEFAULT_CONFIG.copy()
config["log_level"] = "WARN"
agent = ppo.PPOTrainer(config, env=SELECT_ENV)
N_ITER = 30
s = "{:3d} reward {:6.2f}/{:6.2f}/{:6.2f} len {:6.2f} saved {}"
for n in range(N_ITER):
result = agent.train()
file_name = agent.save(CHECKPOINT_ROOT)
print(s.format(
n + 1,
result["episode_reward_min"],
result["episode_reward_mean"],
result["episode_reward_max"],
result["episode_len_mean"],
file_name
)) | 845 | Python | 23.171428 | 64 | 0.680473 |
teerameth/omni.isaac.fiborobotlab/omni/isaac/fiborobotlab/mooncake/scripts/test/test.py | # Import the RL algorithm (Trainer) we would like to use.
from ray.rllib.agents.ppo import PPOTrainer
# Configure the algorithm.
config = {
# Environment (RLlib understands openAI gym registered strings).
"env": "Taxi-v3",
# Use 2 environment workers (aka "rollout workers") that parallelly
# collect samples from their own environment clone(s).
"num_workers": 2,
# Change this to "framework: torch", if you are using PyTorch.
# Also, use "framework: tf2" for tf2.x eager execution.
"framework": "tf",
# Tweak the default model provided automatically by RLlib,
# given the environment's observation- and action spaces.
"model": {
"fcnet_hiddens": [64, 64],
"fcnet_activation": "relu",
},
# Set up a separate evaluation worker set for the
# `trainer.evaluate()` call after training (see below).
"evaluation_num_workers": 1,
# Only for evaluation runs, render the env.
"evaluation_config": {
"render_env": True,
},
}
# Create our RLlib Trainer.
trainer = PPOTrainer(config=config)
# Run it for n training iterations. A training iteration includes
# parallel sample collection by the environment workers as well as
# loss calculation on the collected batch and a model update.
for _ in range(30):
print(trainer.train())
# Evaluate the trained Trainer (and render each timestep to the shell's
# output).
trainer.evaluate()
| 1,423 | Python | 33.731706 | 71 | 0.690794 |
teerameth/omni.isaac.fiborobotlab/omni/isaac/fiborobotlab/mooncake/environments/env_mooncake_vector.py | import gym
from gym import spaces
import numpy as np
import math
import time
import carb
from omni.isaac.imu_sensor import _imu_sensor
| 137 | Python | 12.799999 | 45 | 0.80292 |
teerameth/omni.isaac.fiborobotlab/omni/isaac/fiborobotlab/mooncake/utils/demo_util.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
def initialize_demo(config, env, init_sim=True):
from omniisaacgymenvs.demos.anymal_terrain import AnymalTerrainDemo
# Mappings from strings to environments
task_map = {
"AnymalTerrain": AnymalTerrainDemo,
}
from omniisaacgymenvs.utils.config_utils.sim_config import SimConfig
sim_config = SimConfig(config)
cfg = sim_config.config
task = task_map[cfg["task_name"]](
name=cfg["task_name"], sim_config=sim_config, env=env
)
env.set_task(task=task, sim_params=sim_config.get_physics_params(), backend="torch", init_sim=init_sim)
return task | 2,167 | Python | 44.166666 | 107 | 0.757268 |
teerameth/omni.isaac.fiborobotlab/omni/isaac/fiborobotlab/mooncake/utils/task_util.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
def initialize_task(config, env, init_sim=True):
from omniisaacgymenvs.tasks.allegro_hand import AllegroHandTask
from omniisaacgymenvs.tasks.ant import AntLocomotionTask
from omniisaacgymenvs.tasks.anymal import AnymalTask
from omniisaacgymenvs.tasks.anymal_terrain import AnymalTerrainTask
from omniisaacgymenvs.tasks.ball_balance import BallBalanceTask
from omniisaacgymenvs.tasks.cartpole import CartpoleTask
from omniisaacgymenvs.tasks.franka_cabinet import FrankaCabinetTask
from omniisaacgymenvs.tasks.humanoid import HumanoidLocomotionTask
from omniisaacgymenvs.tasks.ingenuity import IngenuityTask
from omniisaacgymenvs.tasks.quadcopter import QuadcopterTask
from omniisaacgymenvs.tasks.shadow_hand import ShadowHandTask
from omniisaacgymenvs.tasks.crazyflie import CrazyflieTask
# Mappings from strings to environments
task_map = {
"AllegroHand": AllegroHandTask,
"Ant": AntLocomotionTask,
"Anymal": AnymalTask,
"AnymalTerrain": AnymalTerrainTask,
"BallBalance": BallBalanceTask,
"Cartpole": CartpoleTask,
"FrankaCabinet": FrankaCabinetTask,
"Humanoid": HumanoidLocomotionTask,
"Ingenuity": IngenuityTask,
"Quadcopter": QuadcopterTask,
"Crazyflie": CrazyflieTask,
"ShadowHand": ShadowHandTask,
"ShadowHandOpenAI_FF": ShadowHandTask,
"ShadowHandOpenAI_LSTM": ShadowHandTask,
}
from .config_utils.sim_config import SimConfig
sim_config = SimConfig(config)
cfg = sim_config.config
task = task_map[cfg["task_name"]](
name=cfg["task_name"], sim_config=sim_config, env=env
)
env.set_task(task=task, sim_params=sim_config.get_physics_params(), backend="torch", init_sim=init_sim)
return task | 3,370 | Python | 45.819444 | 107 | 0.755786 |
teerameth/omni.isaac.fiborobotlab/omni/isaac/fiborobotlab/mooncake/utils/domain_randomization/randomize.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import copy
import omni
import omni.replicator.core as rep
import omni.replicator.isaac as dr
import numpy as np
import torch
from omni.isaac.core.prims import RigidPrimView
class Randomizer():
def __init__(self, sim_config):
self._cfg = sim_config.task_config
self._config = sim_config.config
self.randomize = False
dr_config = self._cfg.get("domain_randomization", None)
self.distributions = dict()
self.active_domain_randomizations = dict()
self._observations_dr_params = None
self._actions_dr_params = None
if dr_config is not None:
randomize = dr_config.get("randomize", False)
randomization_params = dr_config.get("randomization_params", None)
if randomize and randomization_params is not None:
self.randomize = True
self.min_frequency = dr_config.get("min_frequency", 1)
def apply_on_startup_domain_randomization(self, task):
if self.randomize:
torch.manual_seed(self._config["seed"])
randomization_params = self._cfg["domain_randomization"]["randomization_params"]
for opt in randomization_params.keys():
if opt == "rigid_prim_views":
if randomization_params["rigid_prim_views"] is not None:
for view_name in randomization_params["rigid_prim_views"].keys():
if randomization_params["rigid_prim_views"][view_name] is not None:
for attribute, params in randomization_params["rigid_prim_views"][view_name].items():
params = randomization_params["rigid_prim_views"][view_name][attribute]
if attribute in ["scale", "mass", "density"] and params is not None:
if "on_startup" in params.keys():
if not set(('operation','distribution', 'distribution_parameters')).issubset(params["on_startup"]):
raise ValueError(f"Please ensure the following randomization parameters for {view_name} {attribute} " + \
"on_startup are provided: operation, distribution, distribution_parameters.")
view = task._env._world.scene._scene_registry.rigid_prim_views[view_name]
if attribute == "scale":
self.randomize_scale_on_startup(
view=view,
distribution=params["on_startup"]["distribution"],
distribution_parameters=params["on_startup"]["distribution_parameters"],
operation=params["on_startup"]["operation"],
sync_dim_noise=True,
)
elif attribute == "mass":
self.randomize_mass_on_startup(
view=view,
distribution=params["on_startup"]["distribution"],
distribution_parameters=params["on_startup"]["distribution_parameters"],
operation=params["on_startup"]["operation"],
)
elif attribute == "density":
self.randomize_density_on_startup(
view=view,
distribution=params["on_startup"]["distribution"],
distribution_parameters=params["on_startup"]["distribution_parameters"],
operation=params["on_startup"]["operation"],
)
if opt == "articulation_views":
if randomization_params["articulation_views"] is not None:
for view_name in randomization_params["articulation_views"].keys():
if randomization_params["articulation_views"][view_name] is not None:
for attribute, params in randomization_params["articulation_views"][view_name].items():
params = randomization_params["articulation_views"][view_name][attribute]
if attribute in ["scale"] and params is not None:
if "on_startup" in params.keys():
if not set(('operation','distribution', 'distribution_parameters')).issubset(params["on_startup"]):
raise ValueError(f"Please ensure the following randomization parameters for {view_name} {attribute} " + \
"on_startup are provided: operation, distribution, distribution_parameters.")
view = task._env._world.scene._scene_registry.articulated_views[view_name]
if attribute == "scale":
self.randomize_scale_on_startup(
view=view,
distribution=params["on_startup"]["distribution"],
distribution_parameters=params["on_startup"]["distribution_parameters"],
operation=params["on_startup"]["operation"],
sync_dim_noise=True
)
else:
dr_config = self._cfg.get("domain_randomization", None)
if dr_config is None:
raise ValueError("No domain randomization parameters are specified in the task yaml config file")
randomize = dr_config.get("randomize", False)
randomization_params = dr_config.get("randomization_params", None)
if randomize == False or randomization_params is None:
print("On Startup Domain randomization will not be applied.")
def set_up_domain_randomization(self, task):
if self.randomize:
randomization_params = self._cfg["domain_randomization"]["randomization_params"]
rep.set_global_seed(self._config["seed"])
with dr.trigger.on_rl_frame(num_envs=self._cfg["env"]["numEnvs"]):
for opt in randomization_params.keys():
if opt == "observations":
self._set_up_observations_randomization(task)
elif opt == "actions":
self._set_up_actions_randomization(task)
elif opt == "simulation":
if randomization_params["simulation"] is not None:
self.distributions["simulation"] = dict()
dr.physics_view.register_simulation_context(task._env._world)
for attribute, params in randomization_params["simulation"].items():
self._set_up_simulation_randomization(attribute, params)
elif opt == "rigid_prim_views":
if randomization_params["rigid_prim_views"] is not None:
self.distributions["rigid_prim_views"] = dict()
for view_name in randomization_params["rigid_prim_views"].keys():
if randomization_params["rigid_prim_views"][view_name] is not None:
self.distributions["rigid_prim_views"][view_name] = dict()
dr.physics_view.register_rigid_prim_view(
rigid_prim_view=task._env._world.scene._scene_registry.rigid_prim_views[view_name],
)
for attribute, params in randomization_params["rigid_prim_views"][view_name].items():
if attribute not in ["scale", "density"]:
self._set_up_rigid_prim_view_randomization(view_name, attribute, params)
elif opt == "articulation_views":
if randomization_params["articulation_views"] is not None:
self.distributions["articulation_views"] = dict()
for view_name in randomization_params["articulation_views"].keys():
if randomization_params["articulation_views"][view_name] is not None:
self.distributions["articulation_views"][view_name] = dict()
dr.physics_view.register_articulation_view(
articulation_view=task._env._world.scene._scene_registry.articulated_views[view_name],
)
for attribute, params in randomization_params["articulation_views"][view_name].items():
if attribute not in ["scale"]:
self._set_up_articulation_view_randomization(view_name, attribute, params)
rep.orchestrator.run()
else:
dr_config = self._cfg.get("domain_randomization", None)
if dr_config is None:
raise ValueError("No domain randomization parameters are specified in the task yaml config file")
randomize = dr_config.get("randomize", False)
randomization_params = dr_config.get("randomization_params", None)
if randomize == False or randomization_params is None:
print("Domain randomization will not be applied.")
def _set_up_observations_randomization(self, task):
task.randomize_observations = True
self._observations_dr_params = self._cfg["domain_randomization"]["randomization_params"]["observations"]
if self._observations_dr_params is None:
raise ValueError(f"Observations randomization parameters are not provided.")
if "on_reset" in self._observations_dr_params.keys():
if not set(('operation','distribution', 'distribution_parameters')).issubset(self._observations_dr_params["on_reset"].keys()):
raise ValueError(f"Please ensure the following observations on_reset randomization parameters are provided: " + \
"operation, distribution, distribution_parameters.")
self.active_domain_randomizations[("observations", "on_reset")] = np.array(self._observations_dr_params["on_reset"]["distribution_parameters"])
if "on_interval" in self._observations_dr_params.keys():
if not set(('frequency_interval', 'operation','distribution', 'distribution_parameters')).issubset(self._observations_dr_params["on_interval"].keys()):
raise ValueError(f"Please ensure the following observations on_interval randomization parameters are provided: " + \
"frequency_interval, operation, distribution, distribution_parameters.")
self.active_domain_randomizations[("observations", "on_interval")] = np.array(self._observations_dr_params["on_interval"]["distribution_parameters"])
self._observations_counter_buffer = torch.zeros((self._cfg["env"]["numEnvs"]), dtype=torch.int, device=self._config["sim_device"])
self._observations_correlated_noise = torch.zeros((self._cfg["env"]["numEnvs"], task.num_observations), device=self._config["sim_device"])
def _set_up_actions_randomization(self, task):
task.randomize_actions = True
self._actions_dr_params = self._cfg["domain_randomization"]["randomization_params"]["actions"]
if self._actions_dr_params is None:
raise ValueError(f"Actions randomization parameters are not provided.")
if "on_reset" in self._actions_dr_params.keys():
if not set(('operation','distribution', 'distribution_parameters')).issubset(self._actions_dr_params["on_reset"].keys()):
raise ValueError(f"Please ensure the following actions on_reset randomization parameters are provided: " + \
"operation, distribution, distribution_parameters.")
self.active_domain_randomizations[("actions", "on_reset")] = np.array(self._actions_dr_params["on_reset"]["distribution_parameters"])
if "on_interval" in self._actions_dr_params.keys():
if not set(('frequency_interval', 'operation','distribution', 'distribution_parameters')).issubset(self._actions_dr_params["on_interval"].keys()):
raise ValueError(f"Please ensure the following actions on_interval randomization parameters are provided: " + \
"frequency_interval, operation, distribution, distribution_parameters.")
self.active_domain_randomizations[("actions", "on_interval")] = np.array(self._actions_dr_params["on_interval"]["distribution_parameters"])
self._actions_counter_buffer = torch.zeros((self._cfg["env"]["numEnvs"]), dtype=torch.int, device=self._config["sim_device"])
self._actions_correlated_noise = torch.zeros((self._cfg["env"]["numEnvs"], task.num_actions), device=self._config["sim_device"])
def apply_observations_randomization(self, observations, reset_buf):
env_ids = reset_buf.nonzero(as_tuple=False).squeeze(-1)
self._observations_counter_buffer[env_ids] = 0
self._observations_counter_buffer += 1
if "on_reset" in self._observations_dr_params.keys():
observations[:] = self._apply_correlated_noise(
buffer_type="observations",
buffer=observations,
reset_ids=env_ids,
operation=self._observations_dr_params["on_reset"]["operation"],
distribution=self._observations_dr_params["on_reset"]["distribution"],
distribution_parameters=self._observations_dr_params["on_reset"]["distribution_parameters"],
)
if "on_interval" in self._observations_dr_params.keys():
randomize_ids = (self._observations_counter_buffer >= self._observations_dr_params["on_interval"]["frequency_interval"]).nonzero(as_tuple=False).squeeze(-1)
self._observations_counter_buffer[randomize_ids] = 0
observations[:] = self._apply_uncorrelated_noise(
buffer=observations,
randomize_ids=randomize_ids,
operation=self._observations_dr_params["on_interval"]["operation"],
distribution=self._observations_dr_params["on_interval"]["distribution"],
distribution_parameters=self._observations_dr_params["on_interval"]["distribution_parameters"],
)
return observations
def apply_actions_randomization(self, actions, reset_buf):
env_ids = reset_buf.nonzero(as_tuple=False).squeeze(-1)
self._actions_counter_buffer[env_ids] = 0
self._actions_counter_buffer += 1
if "on_reset" in self._actions_dr_params.keys():
actions[:] = self._apply_correlated_noise(
buffer_type="actions",
buffer=actions,
reset_ids=env_ids,
operation=self._actions_dr_params["on_reset"]["operation"],
distribution=self._actions_dr_params["on_reset"]["distribution"],
distribution_parameters=self._actions_dr_params["on_reset"]["distribution_parameters"],
)
if "on_interval" in self._actions_dr_params.keys():
randomize_ids = (self._actions_counter_buffer >= self._actions_dr_params["on_interval"]["frequency_interval"]).nonzero(as_tuple=False).squeeze(-1)
self._actions_counter_buffer[randomize_ids] = 0
actions[:] = self._apply_uncorrelated_noise(
buffer=actions,
randomize_ids=randomize_ids,
operation=self._actions_dr_params["on_interval"]["operation"],
distribution=self._actions_dr_params["on_interval"]["distribution"],
distribution_parameters=self._actions_dr_params["on_interval"]["distribution_parameters"],
)
return actions
def _apply_uncorrelated_noise(self, buffer, randomize_ids, operation, distribution, distribution_parameters):
if distribution == "gaussian" or distribution == "normal":
noise = torch.normal(mean=distribution_parameters[0], std=distribution_parameters[1], size=(len(randomize_ids), buffer.shape[1]), device=self._config["sim_device"])
elif distribution == "uniform":
noise = (distribution_parameters[1] - distribution_parameters[0]) * torch.rand((len(randomize_ids), buffer.shape[1]), device=self._config["sim_device"]) + distribution_parameters[0]
elif distribution == "loguniform" or distribution == "log_uniform":
noise = torch.exp((np.log(distribution_parameters[1]) - np.log(distribution_parameters[0])) * torch.rand((len(randomize_ids), buffer.shape[1]), device=self._config["sim_device"]) + np.log(distribution_parameters[0]))
else:
print(f"The specified {distribution} distribution is not supported.")
if operation == "additive":
buffer[randomize_ids] += noise
elif operation == "scaling":
buffer[randomize_ids] *= noise
else:
print(f"The specified {operation} operation type is not supported.")
return buffer
def _apply_correlated_noise(self, buffer_type, buffer, reset_ids, operation, distribution, distribution_parameters):
if buffer_type == "observations":
correlated_noise_buffer = self._observations_correlated_noise
elif buffer_type == "actions":
correlated_noise_buffer = self._actions_correlated_noise
if len(reset_ids) > 0:
if distribution == "gaussian" or distribution == "normal":
correlated_noise_buffer[reset_ids] = torch.normal(mean=distribution_parameters[0], std=distribution_parameters[1], size=(len(reset_ids), buffer.shape[1]), device=self._config["sim_device"])
elif distribution == "uniform":
correlated_noise_buffer[reset_ids] = (distribution_parameters[1] - distribution_parameters[0]) * torch.rand((len(reset_ids), buffer.shape[1]), device=self._config["sim_device"]) + distribution_parameters[0]
elif distribution == "loguniform" or distribution == "log_uniform":
correlated_noise_buffer[reset_ids] = torch.exp((np.log(distribution_parameters[1]) - np.log(distribution_parameters[0])) * torch.rand((len(reset_ids), buffer.shape[1]), device=self._config["sim_device"]) + np.log(distribution_parameters[0]))
else:
print(f"The specified {distribution} distribution is not supported.")
if operation == "additive":
buffer += correlated_noise_buffer
elif operation == "scaling":
buffer *= correlated_noise_buffer
else:
print(f"The specified {operation} operation type is not supported.")
return buffer
def _set_up_simulation_randomization(self, attribute, params):
if params is None:
raise ValueError(f"Randomization parameters for simulation {attribute} is not provided.")
if attribute in dr.SIMULATION_CONTEXT_ATTRIBUTES:
self.distributions["simulation"][attribute] = dict()
if "on_reset" in params.keys():
if not set(('operation','distribution', 'distribution_parameters')).issubset(params["on_reset"]):
raise ValueError(f"Please ensure the following randomization parameters for simulation {attribute} on_reset are provided: " + \
"operation, distribution, distribution_parameters.")
self.active_domain_randomizations[("simulation", attribute, "on_reset")] = np.array(params["on_reset"]["distribution_parameters"])
kwargs = {"operation": params["on_reset"]["operation"]}
self.distributions["simulation"][attribute]["on_reset"] = self._generate_distribution(
dimension=dr.physics_view._simulation_context_initial_values[attribute].shape[0],
view_name="simulation",
attribute=attribute,
params=params["on_reset"],
)
kwargs[attribute] = self.distributions["simulation"][attribute]["on_reset"]
with dr.gate.on_env_reset():
dr.physics_view.randomize_simulation_context(**kwargs)
if "on_interval" in params.keys():
if not set(('frequency_interval', 'operation','distribution', 'distribution_parameters')).issubset(params["on_interval"]):
raise ValueError(f"Please ensure the following randomization parameters for simulation {attribute} on_interval are provided: " + \
"frequency_interval, operation, distribution, distribution_parameters.")
self.active_domain_randomizations[("simulation", attribute, "on_interval")] = np.array(params["on_interval"]["distribution_parameters"])
kwargs = {"operation": params["on_interval"]["operation"]}
self.distributions["simulation"][attribute]["on_interval"] = self._generate_distribution(
dimension=dr.physics_view._simulation_context_initial_values[attribute].shape[0],
view_name="simulation",
attribute=attribute,
params=params["on_interval"],
)
kwargs[attribute] = self.distributions["simulation"][attribute]["on_interval"]
with dr.gate.on_interval(interval=params["on_interval"]["frequency_interval"]):
dr.physics_view.randomize_simulation_context(**kwargs)
def _set_up_rigid_prim_view_randomization(self, view_name, attribute, params):
if params is None:
raise ValueError(f"Randomization parameters for rigid prim view {view_name} {attribute} is not provided.")
if attribute in dr.RIGID_PRIM_ATTRIBUTES:
self.distributions["rigid_prim_views"][view_name][attribute] = dict()
if "on_reset" in params.keys():
if not set(('operation','distribution', 'distribution_parameters')).issubset(params["on_reset"]):
raise ValueError(f"Please ensure the following randomization parameters for {view_name} {attribute} on_reset are provided: " + \
"operation, distribution, distribution_parameters.")
self.active_domain_randomizations[("rigid_prim_views", view_name, attribute, "on_reset")] = np.array(params["on_reset"]["distribution_parameters"])
kwargs = {"view_name": view_name, "operation": params["on_reset"]["operation"]}
if attribute == "material_properties" and "num_buckets" in params["on_reset"].keys():
kwargs["num_buckets"] = params["on_reset"]["num_buckets"]
self.distributions["rigid_prim_views"][view_name][attribute]["on_reset"] = self._generate_distribution(
dimension=dr.physics_view._rigid_prim_views_initial_values[view_name][attribute].shape[1],
view_name=view_name,
attribute=attribute,
params=params["on_reset"],
)
kwargs[attribute] = self.distributions["rigid_prim_views"][view_name][attribute]["on_reset"]
with dr.gate.on_env_reset():
dr.physics_view.randomize_rigid_prim_view(**kwargs)
if "on_interval" in params.keys():
if not set(('frequency_interval', 'operation','distribution', 'distribution_parameters')).issubset(params["on_interval"]):
raise ValueError(f"Please ensure the following randomization parameters for {view_name} {attribute} on_interval are provided: " + \
"frequency_interval, operation, distribution, distribution_parameters.")
self.active_domain_randomizations[("rigid_prim_views", view_name, attribute, "on_interval")] = np.array(params["on_interval"]["distribution_parameters"])
kwargs = {"view_name": view_name, "operation": params["on_interval"]["operation"]}
if attribute == "material_properties" and "num_buckets" in params["on_interval"].keys():
kwargs["num_buckets"] = params["on_interval"]["num_buckets"]
self.distributions["rigid_prim_views"][view_name][attribute]["on_interval"] = self._generate_distribution(
dimension=dr.physics_view._rigid_prim_views_initial_values[view_name][attribute].shape[1],
view_name=view_name,
attribute=attribute,
params=params["on_interval"],
)
kwargs[attribute] = self.distributions["rigid_prim_views"][view_name][attribute]["on_interval"]
with dr.gate.on_interval(interval=params["on_interval"]["frequency_interval"]):
dr.physics_view.randomize_rigid_prim_view(**kwargs)
else:
raise ValueError(f"The attribute {attribute} for {view_name} is invalid for domain randomization.")
def _set_up_articulation_view_randomization(self, view_name, attribute, params):
if params is None:
raise ValueError(f"Randomization parameters for articulation view {view_name} {attribute} is not provided.")
if attribute in dr.ARTICULATION_ATTRIBUTES:
self.distributions["articulation_views"][view_name][attribute] = dict()
if "on_reset" in params.keys():
if not set(('operation','distribution', 'distribution_parameters')).issubset(params["on_reset"]):
raise ValueError(f"Please ensure the following randomization parameters for {view_name} {attribute} on_reset are provided: " + \
"operation, distribution, distribution_parameters.")
self.active_domain_randomizations[("articulation_views", view_name, attribute, "on_reset")] = np.array(params["on_reset"]["distribution_parameters"])
kwargs = {"view_name": view_name, "operation": params["on_reset"]["operation"]}
if attribute == "material_properties" and "num_buckets" in params["on_reset"].keys():
kwargs["num_buckets"] = params["on_reset"]["num_buckets"]
self.distributions["articulation_views"][view_name][attribute]["on_reset"] = self._generate_distribution(
dimension=dr.physics_view._articulation_views_initial_values[view_name][attribute].shape[1],
view_name=view_name,
attribute=attribute,
params=params["on_reset"],
)
kwargs[attribute] = self.distributions["articulation_views"][view_name][attribute]["on_reset"]
with dr.gate.on_env_reset():
dr.physics_view.randomize_articulation_view(**kwargs)
if "on_interval" in params.keys():
if not set(('frequency_interval', 'operation','distribution', 'distribution_parameters')).issubset(params["on_interval"]):
raise ValueError(f"Please ensure the following randomization parameters for {view_name} {attribute} on_interval are provided: " + \
"frequency_interval, operation, distribution, distribution_parameters.")
self.active_domain_randomizations[("articulation_views", view_name, attribute, "on_interval")] = np.array(params["on_interval"]["distribution_parameters"])
kwargs = {"view_name": view_name, "operation": params["on_interval"]["operation"]}
if attribute == "material_properties" and "num_buckets" in params["on_interval"].keys():
kwargs["num_buckets"] = params["on_interval"]["num_buckets"]
self.distributions["articulation_views"][view_name][attribute]["on_interval"] = self._generate_distribution(
dimension=dr.physics_view._articulation_views_initial_values[view_name][attribute].shape[1],
view_name=view_name,
attribute=attribute,
params=params["on_interval"],
)
kwargs[attribute] = self.distributions["articulation_views"][view_name][attribute]["on_interval"]
with dr.gate.on_interval(interval=params["on_interval"]["frequency_interval"]):
dr.physics_view.randomize_articulation_view(**kwargs)
else:
raise ValueError(f"The attribute {attribute} for {view_name} is invalid for domain randomization.")
def _generate_distribution(self, view_name, attribute, dimension, params):
dist_params = self._sanitize_distribution_parameters(attribute, dimension, params["distribution_parameters"])
if params["distribution"] == "uniform":
return rep.distribution.uniform(tuple(dist_params[0]), tuple(dist_params[1]))
elif params["distribution"] == "gaussian" or params["distribution"] == "normal":
return rep.distribution.normal(tuple(dist_params[0]), tuple(dist_params[1]))
elif params["distribution"] == "loguniform" or params["distribution"] == "log_uniform":
return rep.distribution.log_uniform(tuple(dist_params[0]), tuple(dist_params[1]))
else:
raise ValueError(f"The provided distribution for {view_name} {attribute} is not supported. "
+ "Options: uniform, gaussian/normal, loguniform/log_uniform"
)
def _sanitize_distribution_parameters(self, attribute, dimension, params):
distribution_parameters = np.array(params)
if distribution_parameters.shape == (2,):
# if the user does not provide a set of parameters for each dimension
dist_params = [[distribution_parameters[0]]*dimension, [distribution_parameters[1]]*dimension]
elif distribution_parameters.shape == (2, dimension):
# if the user provides a set of parameters for each dimension in the format [[...], [...]]
dist_params = distribution_parameters.tolist()
elif attribute in ["material_properties", "body_inertias"] and distribution_parameters.shape == (2, 3):
# if the user only provides the parameters for one body in the articulation, assume the same parameters for all other links
dist_params = [[distribution_parameters[0]] * (dimension // 3), [distribution_parameters[1]] * (dimension // 3)]
else:
raise ValueError(f"The provided distribution_parameters for {view_name} {attribute} is invalid due to incorrect dimensions.")
return dist_params
def set_dr_distribution_parameters(self, distribution_parameters, *distribution_path):
if distribution_path not in self.active_domain_randomizations.keys():
raise ValueError(f"Cannot find a valid domain randomization distribution using the path {distribution_path}.")
if distribution_path[0] == "observations":
if len(distribution_parameters) == 2:
self._observations_dr_params[distribution_path[1]]["distribution_parameters"] = distribution_parameters
else:
raise ValueError(f"Please provide distribution_parameters for observations {distribution_path[1]} " +
"in the form of [dist_param_1, dist_param_2]")
elif distribution_path[0] == "actions":
if len(distribution_parameters) == 2:
self._actions_dr_params[distribution_path[1]]["distribution_parameters"] = distribution_parameters
else:
raise ValueError(f"Please provide distribution_parameters for actions {distribution_path[1]} " +
"in the form of [dist_param_1, dist_param_2]")
else:
replicator_distribution = self.distributions[distribution_path[0]][distribution_path[1]][distribution_path[2]]
if distribution_path[0] == "rigid_prim_views" or distribution_path[0] == "articulation_views":
replicator_distribution = replicator_distribution[distribution_path[3]]
if replicator_distribution.node.get_node_type().get_node_type() == "omni.replicator.core.OgnSampleUniform" \
or replicator_distribution.node.get_node_type().get_node_type() == "omni.replicator.core.OgnSampleLogUniform":
dimension = len(dr.utils.get_distribution_params(replicator_distribution, ["lower"])[0])
dist_params = self._sanitize_distribution_parameters(distribution_path[-2], dimension, distribution_parameters)
dr.utils.set_distribution_params(replicator_distribution, {"lower": dist_params[0], "upper": dist_params[1]})
elif replicator_distribution.node.get_node_type().get_node_type() == "omni.replicator.core.OgnSampleNormal":
dimension = len(dr.utils.get_distribution_params(replicator_distribution, ["mean"])[0])
dist_params = self._sanitize_distribution_parameters(distribution_path[-2], dimension, distribution_parameters)
dr.utils.set_distribution_params(replicator_distribution, {"mean": dist_params[0], "std": dist_params[1]})
def get_dr_distribution_parameters(self, *distribution_path):
if distribution_path not in self.active_domain_randomizations.keys():
raise ValueError(f"Cannot find a valid domain randomization distribution using the path {distribution_path}.")
if distribution_path[0] == "observations":
return self._observations_dr_params[distribution_path[1]]["distribution_parameters"]
elif distribution_path[0] == "actions":
return self._actions_dr_params[distribution_path[1]]["distribution_parameters"]
else:
replicator_distribution = self.distributions[distribution_path[0]][distribution_path[1]][distribution_path[2]]
if distribution_path[0] == "rigid_prim_views" or distribution_path[0] == "articulation_views":
replicator_distribution = replicator_distribution[distribution_path[3]]
if replicator_distribution.node.get_node_type().get_node_type() == "omni.replicator.core.OgnSampleUniform" \
or replicator_distribution.node.get_node_type().get_node_type() == "omni.replicator.core.OgnSampleLogUniform":
return dr.utils.get_distribution_params(replicator_distribution, ["lower", "upper"])
elif replicator_distribution.node.get_node_type().get_node_type() == "omni.replicator.core.OgnSampleNormal":
return dr.utils.get_distribution_params(replicator_distribution, ["mean", "std"])
def get_initial_dr_distribution_parameters(self, *distribution_path):
if distribution_path not in self.active_domain_randomizations.keys():
raise ValueError(f"Cannot find a valid domain randomization distribution using the path {distribution_path}.")
return self.active_domain_randomizations[distribution_path].copy()
def _generate_noise(self, distribution, distribution_parameters, size, device):
if distribution == "gaussian" or distribution == "normal":
noise = torch.normal(mean=distribution_parameters[0], std=distribution_parameters[1], size=size, device=device)
elif distribution == "uniform":
noise = (distribution_parameters[1] - distribution_parameters[0]) * torch.rand(size, device=device) + distribution_parameters[0]
elif distribution == "loguniform" or distribution == "log_uniform":
noise = torch.exp((np.log(distribution_parameters[1]) - np.log(distribution_parameters[0])) * torch.rand(size, device=device) + np.log(distribution_parameters[0]))
else:
print(f"The specified {distribution} distribution is not supported.")
return noise
def randomize_scale_on_startup(self, view, distribution, distribution_parameters, operation, sync_dim_noise=True):
scales = view.get_local_scales()
if sync_dim_noise:
dist_params = np.asarray(self._sanitize_distribution_parameters(attribute="scale", dimension=1, params=distribution_parameters))
noise = self._generate_noise(distribution, dist_params.squeeze(), (view.count,), view._device).repeat(3,1).T
else:
dist_params = np.asarray(self._sanitize_distribution_parameters(attribute="scale", dimension=3, params=distribution_parameters))
noise = torch.zeros((view.count, 3), device=view._device)
for i in range(3):
noise[:, i] = self._generate_noise(distribution, dist_params[:, i], (view.count,), view._device)
if operation == "additive":
scales += noise
elif operation == "scaling":
scales *= noise
elif operation == "direct":
scales = noise
else:
print(f"The specified {operation} operation type is not supported.")
view.set_local_scales(scales=scales)
def randomize_mass_on_startup(self, view, distribution, distribution_parameters, operation):
if isinstance(view, omni.isaac.core.prims.RigidPrimView):
masses = view.get_masses()
dist_params = np.asarray(self._sanitize_distribution_parameters(attribute=f"{view.name} mass", dimension=1, params=distribution_parameters))
noise = self._generate_noise(distribution, dist_params.squeeze(), (view.count,), view._device)
set_masses = view.set_masses
if operation == "additive":
masses += noise
elif operation == "scaling":
masses *= noise
elif operation == "direct":
masses = noise
else:
print(f"The specified {operation} operation type is not supported.")
set_masses(masses)
def randomize_density_on_startup(self, view, distribution, distribution_parameters, operation):
if isinstance(view, omni.isaac.core.prims.RigidPrimView):
densities = view.get_densities()
dist_params = np.asarray(self._sanitize_distribution_parameters(attribute=f"{view.name} density", dimension=1, params=distribution_parameters))
noise = self._generate_noise(distribution, dist_params.squeeze(), (view.count,), view._device)
set_densities = view.set_densities
if operation == "additive":
densities += noise
elif operation == "scaling":
densities *= noise
elif operation == "direct":
densities = noise
else:
print(f"The specified {operation} operation type is not supported.")
set_densities(densities)
| 41,504 | Python | 70.683938 | 257 | 0.602593 |
teerameth/omni.isaac.fiborobotlab/omni/isaac/fiborobotlab/mooncake/utils/rlgames/rlgames_utils.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from rl_games.common import env_configurations, vecenv
from rl_games.common.algo_observer import AlgoObserver
from rl_games.algos_torch import torch_ext
import torch
import numpy as np
from typing import Callable
class RLGPUAlgoObserver(AlgoObserver):
"""Allows us to log stats from the env along with the algorithm running stats. """
def __init__(self):
pass
def after_init(self, algo):
self.algo = algo
self.mean_scores = torch_ext.AverageMeter(1, self.algo.games_to_track).to(self.algo.ppo_device)
self.ep_infos = []
self.direct_info = {}
self.writer = self.algo.writer
def process_infos(self, infos, done_indices):
assert isinstance(infos, dict), "RLGPUAlgoObserver expects dict info"
if isinstance(infos, dict):
if 'episode' in infos:
self.ep_infos.append(infos['episode'])
if len(infos) > 0 and isinstance(infos, dict): # allow direct logging from env
self.direct_info = {}
for k, v in infos.items():
# only log scalars
if isinstance(v, float) or isinstance(v, int) or (isinstance(v, torch.Tensor) and len(v.shape) == 0):
self.direct_info[k] = v
def after_clear_stats(self):
self.mean_scores.clear()
def after_print_stats(self, frame, epoch_num, total_time):
if self.ep_infos:
for key in self.ep_infos[0]:
infotensor = torch.tensor([], device=self.algo.device)
for ep_info in self.ep_infos:
# handle scalar and zero dimensional tensor infos
if not isinstance(ep_info[key], torch.Tensor):
ep_info[key] = torch.Tensor([ep_info[key]])
if len(ep_info[key].shape) == 0:
ep_info[key] = ep_info[key].unsqueeze(0)
infotensor = torch.cat((infotensor, ep_info[key].to(self.algo.device)))
value = torch.mean(infotensor)
self.writer.add_scalar('Episode/' + key, value, epoch_num)
self.ep_infos.clear()
for k, v in self.direct_info.items():
self.writer.add_scalar(f'{k}/frame', v, frame)
self.writer.add_scalar(f'{k}/iter', v, epoch_num)
self.writer.add_scalar(f'{k}/time', v, total_time)
if self.mean_scores.current_size > 0:
mean_scores = self.mean_scores.get_mean()
self.writer.add_scalar('scores/mean', mean_scores, frame)
self.writer.add_scalar('scores/iter', mean_scores, epoch_num)
self.writer.add_scalar('scores/time', mean_scores, total_time)
class RLGPUEnv(vecenv.IVecEnv):
def __init__(self, config_name, num_actors, **kwargs):
self.env = env_configurations.configurations[config_name]['env_creator'](**kwargs)
def step(self, action):
return self.env.step(action)
def reset(self):
return self.env.reset()
def get_number_of_agents(self):
return self.env.get_number_of_agents()
def get_env_info(self):
info = {}
info['action_space'] = self.env.action_space
info['observation_space'] = self.env.observation_space
if self.env.num_states > 0:
info['state_space'] = self.env.state_space
print(info['action_space'], info['observation_space'], info['state_space'])
else:
print(info['action_space'], info['observation_space'])
return info
| 5,154 | Python | 42.319327 | 121 | 0.642608 |
teerameth/omni.isaac.fiborobotlab/omni/isaac/fiborobotlab/mooncake/utils/config_utils/sim_config.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from omniisaacgymenvs.utils.config_utils.default_scene_params import *
import copy
import omni.usd
import numpy as np
import torch
class SimConfig():
def __init__(self, config: dict = None):
if config is None:
config = dict()
self._config = config
self._cfg = config.get("task", dict())
self._parse_config()
if self._config["test"] == True:
self._sim_params["enable_scene_query_support"] = True
if self._config["headless"] == True and not self._sim_params["enable_cameras"]:
self._sim_params["use_flatcache"] = False
self._sim_params["enable_viewport"] = False
def _parse_config(self):
# general sim parameter
self._sim_params = copy.deepcopy(default_sim_params)
self._default_physics_material = copy.deepcopy(default_physics_material)
sim_cfg = self._cfg.get("sim", None)
if sim_cfg is not None:
for opt in sim_cfg.keys():
if opt in self._sim_params:
if opt == "default_physics_material":
for material_opt in sim_cfg[opt]:
self._default_physics_material[material_opt] = sim_cfg[opt][material_opt]
else:
self._sim_params[opt] = sim_cfg[opt]
else:
print("Sim params does not have attribute: ", opt)
self._sim_params["default_physics_material"] = self._default_physics_material
# physx parameters
self._physx_params = copy.deepcopy(default_physx_params)
if sim_cfg is not None and "physx" in sim_cfg:
for opt in sim_cfg["physx"].keys():
if opt in self._physx_params:
self._physx_params[opt] = sim_cfg["physx"][opt]
else:
print("Physx sim params does not have attribute: ", opt)
self._sanitize_device()
def _sanitize_device(self):
if self._sim_params["use_gpu_pipeline"]:
self._physx_params["use_gpu"] = True
# device should be in sync with pipeline
if self._sim_params["use_gpu_pipeline"]:
self._config["sim_device"] = f"cuda:{self._config['device_id']}"
else:
self._config["sim_device"] = "cpu"
# also write to physics params for setting sim device
self._physx_params["sim_device"] = self._config["sim_device"]
print("Pipeline: ", "GPU" if self._sim_params["use_gpu_pipeline"] else "CPU")
print("Pipeline Device: ", self._config["sim_device"])
print("Sim Device: ", "GPU" if self._physx_params["use_gpu"] else "CPU")
def parse_actor_config(self, actor_name):
actor_params = copy.deepcopy(default_actor_options)
if "sim" in self._cfg and actor_name in self._cfg["sim"]:
actor_cfg = self._cfg["sim"][actor_name]
for opt in actor_cfg.keys():
if actor_cfg[opt] != -1 and opt in actor_params:
actor_params[opt] = actor_cfg[opt]
elif opt not in actor_params:
print("Actor params does not have attribute: ", opt)
return actor_params
def _get_actor_config_value(self, actor_name, attribute_name, attribute=None):
actor_params = self.parse_actor_config(actor_name)
if attribute is not None:
if attribute_name not in actor_params:
return attribute.Get()
if actor_params[attribute_name] != -1:
return actor_params[attribute_name]
elif actor_params["override_usd_defaults"] and not attribute.IsAuthored():
return self._physx_params[attribute_name]
else:
if actor_params[attribute_name] != -1:
return actor_params[attribute_name]
@property
def sim_params(self):
return self._sim_params
@property
def config(self):
return self._config
@property
def task_config(self):
return self._cfg
@property
def physx_params(self):
return self._physx_params
def get_physics_params(self):
return {**self.sim_params, **self.physx_params}
def _get_physx_collision_api(self, prim):
from pxr import UsdPhysics, PhysxSchema
physx_collision_api = PhysxSchema.PhysxCollisionAPI(prim)
if not physx_collision_api:
physx_collision_api = PhysxSchema.PhysxCollisionAPI.Apply(prim)
return physx_collision_api
def _get_physx_rigid_body_api(self, prim):
from pxr import UsdPhysics, PhysxSchema
physx_rb_api = PhysxSchema.PhysxRigidBodyAPI(prim)
if not physx_rb_api:
physx_rb_api = PhysxSchema.PhysxRigidBodyAPI.Apply(prim)
return physx_rb_api
def _get_physx_articulation_api(self, prim):
from pxr import UsdPhysics, PhysxSchema
arti_api = PhysxSchema.PhysxArticulationAPI(prim)
if not arti_api:
arti_api = PhysxSchema.PhysxArticulationAPI.Apply(prim)
return arti_api
def set_contact_offset(self, name, prim, value=None):
physx_collision_api = self._get_physx_collision_api(prim)
contact_offset = physx_collision_api.GetContactOffsetAttr()
# if not contact_offset:
# contact_offset = physx_collision_api.CreateContactOffsetAttr()
if value is None:
value = self._get_actor_config_value(name, "contact_offset", contact_offset)
if value != -1:
contact_offset.Set(value)
def set_rest_offset(self, name, prim, value=None):
physx_collision_api = self._get_physx_collision_api(prim)
rest_offset = physx_collision_api.GetRestOffsetAttr()
# if not rest_offset:
# rest_offset = physx_collision_api.CreateRestOffsetAttr()
if value is None:
value = self._get_actor_config_value(name, "rest_offset", rest_offset)
if value != -1:
rest_offset.Set(value)
def set_position_iteration(self, name, prim, value=None):
physx_rb_api = self._get_physx_rigid_body_api(prim)
solver_position_iteration_count = physx_rb_api.GetSolverPositionIterationCountAttr()
if value is None:
value = self._get_actor_config_value(name, "solver_position_iteration_count", solver_position_iteration_count)
if value != -1:
solver_position_iteration_count.Set(value)
def set_velocity_iteration(self, name, prim, value=None):
physx_rb_api = self._get_physx_rigid_body_api(prim)
solver_velocity_iteration_count = physx_rb_api.GetSolverVelocityIterationCountAttr()
if value is None:
value = self._get_actor_config_value(name, "solver_velocity_iteration_count", solver_position_iteration_count)
if value != -1:
solver_velocity_iteration_count.Set(value)
def set_max_depenetration_velocity(self, name, prim, value=None):
physx_rb_api = self._get_physx_rigid_body_api(prim)
max_depenetration_velocity = physx_rb_api.GetMaxDepenetrationVelocityAttr()
if value is None:
value = self._get_actor_config_value(name, "max_depenetration_velocity", max_depenetration_velocity)
if value != -1:
max_depenetration_velocity.Set(value)
def set_sleep_threshold(self, name, prim, value=None):
physx_rb_api = self._get_physx_rigid_body_api(prim)
sleep_threshold = physx_rb_api.GetSleepThresholdAttr()
if value is None:
value = self._get_actor_config_value(name, "sleep_threshold", sleep_threshold)
if value != -1:
sleep_threshold.Set(value)
def set_stabilization_threshold(self, name, prim, value=None):
physx_rb_api = self._get_physx_rigid_body_api(prim)
stabilization_threshold = physx_rb_api.GetStabilizationThresholdAttr()
if value is None:
value = self._get_actor_config_value(name, "stabilization_threshold", stabilization_threshold)
if value != -1:
stabilization_threshold.Set(value)
def set_gyroscopic_forces(self, name, prim, value=None):
physx_rb_api = self._get_physx_rigid_body_api(prim)
enable_gyroscopic_forces = physx_rb_api.GetEnableGyroscopicForcesAttr()
if value is None:
value = self._get_actor_config_value(name, "enable_gyroscopic_forces", enable_gyroscopic_forces)
if value != -1:
enable_gyroscopic_forces.Set(value)
def set_density(self, name, prim, value=None):
physx_rb_api = self._get_physx_rigid_body_api(prim)
density = physx_rb_api.GetDensityAttr()
if value is None:
value = self._get_actor_config_value(name, "density", density)
if value != -1:
density.Set(value)
# auto-compute mass
self.set_mass(prim, 0.0)
def set_mass(self, name, prim, value=None):
physx_rb_api = self._get_physx_rigid_body_api(prim)
mass = physx_rb_api.GetMassAttr()
if value is None:
value = self._get_actor_config_value(name, "mass", mass)
if value != -1:
mass.Set(value)
def retain_acceleration(self, prim):
# retain accelerations if running with more than one substep
physx_rb_api = self._get_physx_rigid_body_api(prim)
if self._sim_params["substeps"] > 1:
physx_rb_api.GetRetainAccelerationsAttr().Set(True)
def add_fixed_base(self, name, prim, cfg, value=None):
from pxr import UsdPhysics, PhysxSchema
stage = omni.usd.get_context().get_stage()
if value is None:
value = self._get_actor_config_value(name, "fixed_base")
if value:
root_joint_path = f"{prim.GetPath()}_fixedBaseRootJoint"
joint = UsdPhysics.Joint.Define(stage, root_joint_path)
joint.CreateBody1Rel().SetTargets([prim.GetPath()])
self.apply_articulation_settings(name, joint.GetPrim(), cfg, force_articulation=True)
def set_articulation_position_iteration(self, name, prim, value=None):
arti_api = self._get_physx_articulation_api(prim)
solver_position_iteration_count = arti_api.GetSolverPositionIterationCountAttr()
if value is None:
value = self._get_actor_config_value(name, "solver_position_iteration_count", solver_position_iteration_count)
if value != -1:
solver_position_iteration_count.Set(value)
def set_articulation_velocity_iteration(self, name, prim, value=None):
arti_api = self._get_physx_articulation_api(prim)
solver_velocity_iteration_count = arti_api.GetSolverVelocityIterationCountAttr()
if value is None:
value = self._get_actor_config_value(name, "solver_velocity_iteration_count", solver_position_iteration_count)
if value != -1:
solver_velocity_iteration_count.Set(value)
def set_articulation_sleep_threshold(self, name, prim, value=None):
arti_api = self._get_physx_articulation_api(prim)
sleep_threshold = arti_api.GetSleepThresholdAttr()
if value is None:
value = self._get_actor_config_value(name, "sleep_threshold", sleep_threshold)
if value != -1:
sleep_threshold.Set(value)
def set_articulation_stabilization_threshold(self, name, prim, value=None):
arti_api = self._get_physx_articulation_api(prim)
stabilization_threshold = arti_api.GetStabilizationThresholdAttr()
if value is None:
value = self._get_actor_config_value(name, "stabilization_threshold", stabilization_threshold)
if value != -1:
stabilization_threshold.Set(value)
def apply_rigid_body_settings(self, name, prim, cfg, is_articulation):
from pxr import UsdPhysics, PhysxSchema
stage = omni.usd.get_context().get_stage()
rb_api = UsdPhysics.RigidBodyAPI.Get(stage, prim.GetPath())
physx_rb_api = PhysxSchema.PhysxRigidBodyAPI.Get(stage, prim.GetPath())
if not physx_rb_api:
physx_rb_api = PhysxSchema.PhysxRigidBodyAPI.Apply(prim)
# if it's a body in an articulation, it's handled at articulation root
if not is_articulation:
self.add_fixed_base(name, prim, cfg, cfg["fixed_base"])
self.set_position_iteration(name, prim, cfg["solver_position_iteration_count"])
self.set_velocity_iteration(name, prim, cfg["solver_velocity_iteration_count"])
self.set_max_depenetration_velocity(name, prim, cfg["max_depenetration_velocity"])
self.set_sleep_threshold(name, prim, cfg["sleep_threshold"])
self.set_stabilization_threshold(name, prim, cfg["stabilization_threshold"])
self.set_gyroscopic_forces(name, prim, cfg["enable_gyroscopic_forces"])
# density and mass
mass_api = UsdPhysics.MassAPI.Get(stage, prim.GetPath())
if mass_api is None:
mass_api = UsdPhysics.MassAPI.Apply(prim)
mass_attr = mass_api.GetMassAttr()
density_attr = mass_api.GetDensityAttr()
if not mass_attr:
mass_attr = mass_api.CreateMassAttr()
if not density_attr:
density_attr = mass_api.CreateDensityAttr()
if cfg["density"] != -1:
density_attr.Set(cfg["density"])
mass_attr.Set(0.0) # mass is to be computed
elif cfg["override_usd_defaults"] and not density_attr.IsAuthored() and not mass_attr.IsAuthored():
density_attr.Set(self._physx_params["density"])
self.retain_acceleration(prim)
def apply_rigid_shape_settings(self, name, prim, cfg):
from pxr import UsdPhysics, PhysxSchema
stage = omni.usd.get_context().get_stage()
# collision APIs
collision_api = UsdPhysics.CollisionAPI(prim)
if not collision_api:
collision_api = UsdPhysics.CollisionAPI.Apply(prim)
physx_collision_api = PhysxSchema.PhysxCollisionAPI(prim)
if not physx_collision_api:
physx_collision_api = PhysxSchema.PhysxCollisionAPI.Apply(prim)
self.set_contact_offset(name, prim, cfg["contact_offset"])
self.set_rest_offset(name, prim, cfg["rest_offset"])
def apply_articulation_settings(self, name, prim, cfg, force_articulation=False):
from pxr import UsdPhysics, PhysxSchema
stage = omni.usd.get_context().get_stage()
is_articulation = False
# check if is articulation
prims = [prim]
while len(prims) > 0:
prim = prims.pop(0)
articulation_api = UsdPhysics.ArticulationRootAPI.Get(stage, prim.GetPath())
physx_articulation_api = PhysxSchema.PhysxArticulationAPI.Get(stage, prim.GetPath())
if articulation_api or physx_articulation_api:
is_articulation = True
if not is_articulation and force_articulation:
articulation_api = UsdPhysics.ArticulationRootAPI.Apply(prim)
physx_articulation_api = PhysxSchema.PhysxArticulationAPI.Apply(prim)
# parse through all children prims
prims = [prim]
while len(prims) > 0:
prim = prims.pop(0)
rb = UsdPhysics.RigidBodyAPI(prim)
collision_body = UsdPhysics.CollisionAPI(prim)
articulation = UsdPhysics.ArticulationRootAPI(prim)
if rb:
self.apply_rigid_body_settings(name, prim, cfg, is_articulation)
if collision_body:
self.apply_rigid_shape_settings(name, prim, cfg)
if articulation:
articulation_api = UsdPhysics.ArticulationRootAPI.Get(stage, prim.GetPath())
physx_articulation_api = PhysxSchema.PhysxArticulationAPI.Get(stage, prim.GetPath())
# enable self collisions
enable_self_collisions = physx_articulation_api.GetEnabledSelfCollisionsAttr()
if cfg["enable_self_collisions"] != -1:
enable_self_collisions.Set(cfg["enable_self_collisions"])
if not force_articulation:
self.add_fixed_base(name, prim, cfg, cfg["fixed_base"])
self.set_articulation_position_iteration(name, prim, cfg["solver_position_iteration_count"])
self.set_articulation_velocity_iteration(name, prim, cfg["solver_velocity_iteration_count"])
self.set_articulation_sleep_threshold(name, prim, cfg["sleep_threshold"])
self.set_articulation_stabilization_threshold(name, prim, cfg["stabilization_threshold"])
children_prims = prim.GetPrim().GetChildren()
prims = prims + children_prims
| 18,299 | Python | 44.29703 | 122 | 0.639816 |
teerameth/omni.isaac.fiborobotlab/omni/isaac/fiborobotlab/mooncake/utils/config_utils/default_scene_params.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
default_physx_params = {
### Per-scene settings
"use_gpu": False,
"worker_thread_count": 4,
"solver_type": 1, # 0: PGS, 1:TGS
"bounce_threshold_velocity": 0.2,
"friction_offset_threshold": 0.04, # A threshold of contact separation distance used to decide if a contact
# point will experience friction forces.
"friction_correlation_distance": 0.025, # Contact points can be merged into a single friction anchor if the
# distance between the contacts is smaller than correlation distance.
# disabling these can be useful for debugging
"enable_sleeping": True,
"enable_stabilization": True,
# GPU buffers
"gpu_max_rigid_contact_count": 512 * 1024,
"gpu_max_rigid_patch_count": 80 * 1024,
"gpu_found_lost_pairs_capacity": 1024,
"gpu_found_lost_aggregate_pairs_capacity": 1024,
"gpu_total_aggregate_pairs_capacity": 1024,
"gpu_max_soft_body_contacts": 1024 * 1024,
"gpu_max_particle_contacts": 1024 * 1024,
"gpu_heap_capacity": 64 * 1024 * 1024,
"gpu_temp_buffer_capacity": 16 * 1024 * 1024,
"gpu_max_num_partitions": 8,
### Per-actor settings ( can override in actor_options )
"solver_position_iteration_count": 4,
"solver_velocity_iteration_count": 1,
"sleep_threshold": 0.0, # Mass-normalized kinetic energy threshold below which an actor may go to sleep.
# Allowed range [0, max_float).
"stabilization_threshold": 0.0, # Mass-normalized kinetic energy threshold below which an actor may
# participate in stabilization. Allowed range [0, max_float).
### Per-body settings ( can override in actor_options )
"enable_gyroscopic_forces": False,
"density": 1000.0, # density to be used for bodies that do not specify mass or density
"max_depenetration_velocity": 100.0,
### Per-shape settings ( can override in actor_options )
"contact_offset": 0.02,
"rest_offset": 0.001
}
default_physics_material = {
"static_friction": 1.0,
"dynamic_friction": 1.0,
"restitution": 0.0
}
default_sim_params = {
"gravity": [0.0, 0.0, -9.81],
"dt": 1.0 / 60.0,
"substeps": 1,
"use_gpu_pipeline": True,
"add_ground_plane": True,
"add_distant_light": True,
"use_flatcache": True,
"enable_scene_query_support": False,
"enable_cameras": False,
"default_physics_material": default_physics_material
}
default_actor_options = {
# -1 means use authored value from USD or default values from default_sim_params if not explicitly authored in USD.
# If an attribute value is not explicitly authored in USD, add one with the value given here,
# which overrides the USD default.
"override_usd_defaults": False,
"fixed_base": -1,
"enable_self_collisions": -1,
"enable_gyroscopic_forces": -1,
"solver_position_iteration_count": -1,
"solver_velocity_iteration_count": -1,
"sleep_threshold": -1,
"stabilization_threshold": -1,
"max_depenetration_velocity": -1,
"density": -1,
"mass": -1,
"contact_offset": -1,
"rest_offset": -1
}
| 4,758 | Python | 41.115044 | 119 | 0.683901 |
teerameth/omni.isaac.fiborobotlab/omni/isaac/fiborobotlab/mooncake/utils/config_utils/path_utils.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import carb
from hydra.utils import to_absolute_path
import os
def is_valid_local_file(path):
return os.path.isfile(path)
def is_valid_ov_file(path):
import omni.client
result, entry = omni.client.stat(path)
return result == omni.client.Result.OK
def download_ov_file(source_path, target_path):
import omni.client
result = omni.client.copy(source_path, target_path)
if result == omni.client.Result.OK:
return True
return False
def break_ov_path(path):
import omni.client
return omni.client.break_url(path)
def retrieve_checkpoint_path(path):
# check if it's a local path
if is_valid_local_file(path):
return to_absolute_path(path)
# check if it's an OV path
elif is_valid_ov_file(path):
ov_path = break_ov_path(path)
file_name = os.path.basename(ov_path.path)
target_path = f"checkpoints/{file_name}"
copy_to_local = download_ov_file(path, target_path)
return to_absolute_path(target_path)
else:
carb.log_error(f"Invalid checkpoint path: {path}")
return None | 2,656 | Python | 38.656716 | 80 | 0.735693 |
teerameth/omni.isaac.fiborobotlab/omni/isaac/fiborobotlab/mooncake/utils/hydra_cfg/hydra_utils.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import hydra
from omegaconf import DictConfig, OmegaConf
## OmegaConf & Hydra Config
# Resolvers used in hydra configs (see https://omegaconf.readthedocs.io/en/2.1_branch/usage.html#resolvers)
OmegaConf.register_new_resolver('eq', lambda x, y: x.lower()==y.lower())
OmegaConf.register_new_resolver('contains', lambda x, y: x.lower() in y.lower())
OmegaConf.register_new_resolver('if', lambda pred, a, b: a if pred else b)
# allows us to resolve default arguments which are copied in multiple places in the config. used primarily for
# num_ensv
OmegaConf.register_new_resolver('resolve_default', lambda default, arg: default if arg=='' else arg)
| 2,207 | Python | 51.571427 | 110 | 0.775714 |
teerameth/omni.isaac.fiborobotlab/omni/isaac/fiborobotlab/mooncake/utils/hydra_cfg/reformat.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from omegaconf import DictConfig, OmegaConf
from typing import Dict
def omegaconf_to_dict(d: DictConfig)->Dict:
"""Converts an omegaconf DictConfig to a python Dict, respecting variable interpolation."""
ret = {}
for k, v in d.items():
if isinstance(v, DictConfig):
ret[k] = omegaconf_to_dict(v)
else:
ret[k] = v
return ret
def print_dict(val, nesting: int = -4, start: bool = True):
"""Outputs a nested dictionory."""
if type(val) == dict:
if not start:
print('')
nesting += 4
for k in val:
print(nesting * ' ', end='')
print(k, end=': ')
print_dict(val[k], nesting, start=False)
else:
print(val) | 2,307 | Python | 41.74074 | 95 | 0.70958 |
teerameth/omni.isaac.fiborobotlab/omni/isaac/fiborobotlab/mooncake/utils/terrain_utils/terrain_utils.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
from numpy.random import choice
from scipy import interpolate
from math import sqrt
from omni.isaac.core.prims import XFormPrim
from pxr import UsdPhysics, Sdf, Gf, PhysxSchema
def random_uniform_terrain(terrain, min_height, max_height, step=1, downsampled_scale=None,):
"""
Generate a uniform noise terrain
Parameters
terrain (SubTerrain): the terrain
min_height (float): the minimum height of the terrain [meters]
max_height (float): the maximum height of the terrain [meters]
step (float): minimum height change between two points [meters]
downsampled_scale (float): distance between two randomly sampled points ( musty be larger or equal to terrain.horizontal_scale)
"""
if downsampled_scale is None:
downsampled_scale = terrain.horizontal_scale
# switch parameters to discrete units
min_height = int(min_height / terrain.vertical_scale)
max_height = int(max_height / terrain.vertical_scale)
step = int(step / terrain.vertical_scale)
heights_range = np.arange(min_height, max_height + step, step)
height_field_downsampled = np.random.choice(heights_range, (int(terrain.width * terrain.horizontal_scale / downsampled_scale), int(
terrain.length * terrain.horizontal_scale / downsampled_scale)))
x = np.linspace(0, terrain.width * terrain.horizontal_scale, height_field_downsampled.shape[0])
y = np.linspace(0, terrain.length * terrain.horizontal_scale, height_field_downsampled.shape[1])
f = interpolate.interp2d(y, x, height_field_downsampled, kind='linear')
x_upsampled = np.linspace(0, terrain.width * terrain.horizontal_scale, terrain.width)
y_upsampled = np.linspace(0, terrain.length * terrain.horizontal_scale, terrain.length)
z_upsampled = np.rint(f(y_upsampled, x_upsampled))
terrain.height_field_raw += z_upsampled.astype(np.int16)
return terrain
def sloped_terrain(terrain, slope=1):
"""
Generate a sloped terrain
Parameters:
terrain (SubTerrain): the terrain
slope (int): positive or negative slope
Returns:
terrain (SubTerrain): update terrain
"""
x = np.arange(0, terrain.width)
y = np.arange(0, terrain.length)
xx, yy = np.meshgrid(x, y, sparse=True)
xx = xx.reshape(terrain.width, 1)
max_height = int(slope * (terrain.horizontal_scale / terrain.vertical_scale) * terrain.width)
terrain.height_field_raw[:, np.arange(terrain.length)] += (max_height * xx / terrain.width).astype(terrain.height_field_raw.dtype)
return terrain
def pyramid_sloped_terrain(terrain, slope=1, platform_size=1.):
"""
Generate a sloped terrain
Parameters:
terrain (terrain): the terrain
slope (int): positive or negative slope
platform_size (float): size of the flat platform at the center of the terrain [meters]
Returns:
terrain (SubTerrain): update terrain
"""
x = np.arange(0, terrain.width)
y = np.arange(0, terrain.length)
center_x = int(terrain.width / 2)
center_y = int(terrain.length / 2)
xx, yy = np.meshgrid(x, y, sparse=True)
xx = (center_x - np.abs(center_x-xx)) / center_x
yy = (center_y - np.abs(center_y-yy)) / center_y
xx = xx.reshape(terrain.width, 1)
yy = yy.reshape(1, terrain.length)
max_height = int(slope * (terrain.horizontal_scale / terrain.vertical_scale) * (terrain.width / 2))
terrain.height_field_raw += (max_height * xx * yy).astype(terrain.height_field_raw.dtype)
platform_size = int(platform_size / terrain.horizontal_scale / 2)
x1 = terrain.width // 2 - platform_size
x2 = terrain.width // 2 + platform_size
y1 = terrain.length // 2 - platform_size
y2 = terrain.length // 2 + platform_size
min_h = min(terrain.height_field_raw[x1, y1], 0)
max_h = max(terrain.height_field_raw[x1, y1], 0)
terrain.height_field_raw = np.clip(terrain.height_field_raw, min_h, max_h)
return terrain
def discrete_obstacles_terrain(terrain, max_height, min_size, max_size, num_rects, platform_size=1.):
"""
Generate a terrain with gaps
Parameters:
terrain (terrain): the terrain
max_height (float): maximum height of the obstacles (range=[-max, -max/2, max/2, max]) [meters]
min_size (float): minimum size of a rectangle obstacle [meters]
max_size (float): maximum size of a rectangle obstacle [meters]
num_rects (int): number of randomly generated obstacles
platform_size (float): size of the flat platform at the center of the terrain [meters]
Returns:
terrain (SubTerrain): update terrain
"""
# switch parameters to discrete units
max_height = int(max_height / terrain.vertical_scale)
min_size = int(min_size / terrain.horizontal_scale)
max_size = int(max_size / terrain.horizontal_scale)
platform_size = int(platform_size / terrain.horizontal_scale)
(i, j) = terrain.height_field_raw.shape
height_range = [-max_height, -max_height // 2, max_height // 2, max_height]
width_range = range(min_size, max_size, 4)
length_range = range(min_size, max_size, 4)
for _ in range(num_rects):
width = np.random.choice(width_range)
length = np.random.choice(length_range)
start_i = np.random.choice(range(0, i-width, 4))
start_j = np.random.choice(range(0, j-length, 4))
terrain.height_field_raw[start_i:start_i+width, start_j:start_j+length] = np.random.choice(height_range)
x1 = (terrain.width - platform_size) // 2
x2 = (terrain.width + platform_size) // 2
y1 = (terrain.length - platform_size) // 2
y2 = (terrain.length + platform_size) // 2
terrain.height_field_raw[x1:x2, y1:y2] = 0
return terrain
def wave_terrain(terrain, num_waves=1, amplitude=1.):
"""
Generate a wavy terrain
Parameters:
terrain (terrain): the terrain
num_waves (int): number of sine waves across the terrain length
Returns:
terrain (SubTerrain): update terrain
"""
amplitude = int(0.5*amplitude / terrain.vertical_scale)
if num_waves > 0:
div = terrain.length / (num_waves * np.pi * 2)
x = np.arange(0, terrain.width)
y = np.arange(0, terrain.length)
xx, yy = np.meshgrid(x, y, sparse=True)
xx = xx.reshape(terrain.width, 1)
yy = yy.reshape(1, terrain.length)
terrain.height_field_raw += (amplitude*np.cos(yy / div) + amplitude*np.sin(xx / div)).astype(
terrain.height_field_raw.dtype)
return terrain
def stairs_terrain(terrain, step_width, step_height):
"""
Generate a stairs
Parameters:
terrain (terrain): the terrain
step_width (float): the width of the step [meters]
step_height (float): the height of the step [meters]
Returns:
terrain (SubTerrain): update terrain
"""
# switch parameters to discrete units
step_width = int(step_width / terrain.horizontal_scale)
step_height = int(step_height / terrain.vertical_scale)
num_steps = terrain.width // step_width
height = step_height
for i in range(num_steps):
terrain.height_field_raw[i * step_width: (i + 1) * step_width, :] += height
height += step_height
return terrain
def pyramid_stairs_terrain(terrain, step_width, step_height, platform_size=1.):
"""
Generate stairs
Parameters:
terrain (terrain): the terrain
step_width (float): the width of the step [meters]
step_height (float): the step_height [meters]
platform_size (float): size of the flat platform at the center of the terrain [meters]
Returns:
terrain (SubTerrain): update terrain
"""
# switch parameters to discrete units
step_width = int(step_width / terrain.horizontal_scale)
step_height = int(step_height / terrain.vertical_scale)
platform_size = int(platform_size / terrain.horizontal_scale)
height = 0
start_x = 0
stop_x = terrain.width
start_y = 0
stop_y = terrain.length
while (stop_x - start_x) > platform_size and (stop_y - start_y) > platform_size:
start_x += step_width
stop_x -= step_width
start_y += step_width
stop_y -= step_width
height += step_height
terrain.height_field_raw[start_x: stop_x, start_y: stop_y] = height
return terrain
def stepping_stones_terrain(terrain, stone_size, stone_distance, max_height, platform_size=1., depth=-10):
"""
Generate a stepping stones terrain
Parameters:
terrain (terrain): the terrain
stone_size (float): horizontal size of the stepping stones [meters]
stone_distance (float): distance between stones (i.e size of the holes) [meters]
max_height (float): maximum height of the stones (positive and negative) [meters]
platform_size (float): size of the flat platform at the center of the terrain [meters]
depth (float): depth of the holes (default=-10.) [meters]
Returns:
terrain (SubTerrain): update terrain
"""
# switch parameters to discrete units
stone_size = int(stone_size / terrain.horizontal_scale)
stone_distance = int(stone_distance / terrain.horizontal_scale)
max_height = int(max_height / terrain.vertical_scale)
platform_size = int(platform_size / terrain.horizontal_scale)
height_range = np.arange(-max_height-1, max_height, step=1)
start_x = 0
start_y = 0
terrain.height_field_raw[:, :] = int(depth / terrain.vertical_scale)
if terrain.length >= terrain.width:
while start_y < terrain.length:
stop_y = min(terrain.length, start_y + stone_size)
start_x = np.random.randint(0, stone_size)
# fill first hole
stop_x = max(0, start_x - stone_distance)
terrain.height_field_raw[0: stop_x, start_y: stop_y] = np.random.choice(height_range)
# fill row
while start_x < terrain.width:
stop_x = min(terrain.width, start_x + stone_size)
terrain.height_field_raw[start_x: stop_x, start_y: stop_y] = np.random.choice(height_range)
start_x += stone_size + stone_distance
start_y += stone_size + stone_distance
elif terrain.width > terrain.length:
while start_x < terrain.width:
stop_x = min(terrain.width, start_x + stone_size)
start_y = np.random.randint(0, stone_size)
# fill first hole
stop_y = max(0, start_y - stone_distance)
terrain.height_field_raw[start_x: stop_x, 0: stop_y] = np.random.choice(height_range)
# fill column
while start_y < terrain.length:
stop_y = min(terrain.length, start_y + stone_size)
terrain.height_field_raw[start_x: stop_x, start_y: stop_y] = np.random.choice(height_range)
start_y += stone_size + stone_distance
start_x += stone_size + stone_distance
x1 = (terrain.width - platform_size) // 2
x2 = (terrain.width + platform_size) // 2
y1 = (terrain.length - platform_size) // 2
y2 = (terrain.length + platform_size) // 2
terrain.height_field_raw[x1:x2, y1:y2] = 0
return terrain
def convert_heightfield_to_trimesh(height_field_raw, horizontal_scale, vertical_scale, slope_threshold=None):
"""
Convert a heightfield array to a triangle mesh represented by vertices and triangles.
Optionally, corrects vertical surfaces above the provide slope threshold:
If (y2-y1)/(x2-x1) > slope_threshold -> Move A to A' (set x1 = x2). Do this for all directions.
B(x2,y2)
/|
/ |
/ |
(x1,y1)A---A'(x2',y1)
Parameters:
height_field_raw (np.array): input heightfield
horizontal_scale (float): horizontal scale of the heightfield [meters]
vertical_scale (float): vertical scale of the heightfield [meters]
slope_threshold (float): the slope threshold above which surfaces are made vertical. If None no correction is applied (default: None)
Returns:
vertices (np.array(float)): array of shape (num_vertices, 3). Each row represents the location of each vertex [meters]
triangles (np.array(int)): array of shape (num_triangles, 3). Each row represents the indices of the 3 vertices connected by this triangle.
"""
hf = height_field_raw
num_rows = hf.shape[0]
num_cols = hf.shape[1]
y = np.linspace(0, (num_cols-1)*horizontal_scale, num_cols)
x = np.linspace(0, (num_rows-1)*horizontal_scale, num_rows)
yy, xx = np.meshgrid(y, x)
if slope_threshold is not None:
slope_threshold *= horizontal_scale / vertical_scale
move_x = np.zeros((num_rows, num_cols))
move_y = np.zeros((num_rows, num_cols))
move_corners = np.zeros((num_rows, num_cols))
move_x[:num_rows-1, :] += (hf[1:num_rows, :] - hf[:num_rows-1, :] > slope_threshold)
move_x[1:num_rows, :] -= (hf[:num_rows-1, :] - hf[1:num_rows, :] > slope_threshold)
move_y[:, :num_cols-1] += (hf[:, 1:num_cols] - hf[:, :num_cols-1] > slope_threshold)
move_y[:, 1:num_cols] -= (hf[:, :num_cols-1] - hf[:, 1:num_cols] > slope_threshold)
move_corners[:num_rows-1, :num_cols-1] += (hf[1:num_rows, 1:num_cols] - hf[:num_rows-1, :num_cols-1] > slope_threshold)
move_corners[1:num_rows, 1:num_cols] -= (hf[:num_rows-1, :num_cols-1] - hf[1:num_rows, 1:num_cols] > slope_threshold)
xx += (move_x + move_corners*(move_x == 0)) * horizontal_scale
yy += (move_y + move_corners*(move_y == 0)) * horizontal_scale
# create triangle mesh vertices and triangles from the heightfield grid
vertices = np.zeros((num_rows*num_cols, 3), dtype=np.float32)
vertices[:, 0] = xx.flatten()
vertices[:, 1] = yy.flatten()
vertices[:, 2] = hf.flatten() * vertical_scale
triangles = -np.ones((2*(num_rows-1)*(num_cols-1), 3), dtype=np.uint32)
for i in range(num_rows - 1):
ind0 = np.arange(0, num_cols-1) + i*num_cols
ind1 = ind0 + 1
ind2 = ind0 + num_cols
ind3 = ind2 + 1
start = 2*i*(num_cols-1)
stop = start + 2*(num_cols-1)
triangles[start:stop:2, 0] = ind0
triangles[start:stop:2, 1] = ind3
triangles[start:stop:2, 2] = ind1
triangles[start+1:stop:2, 0] = ind0
triangles[start+1:stop:2, 1] = ind2
triangles[start+1:stop:2, 2] = ind3
return vertices, triangles
def add_terrain_to_stage(stage, vertices, triangles, position=None, orientation=None):
num_faces = triangles.shape[0]
terrain_mesh = stage.DefinePrim("/World/terrain", "Mesh")
terrain_mesh.GetAttribute("points").Set(vertices)
terrain_mesh.GetAttribute("faceVertexIndices").Set(triangles.flatten())
terrain_mesh.GetAttribute("faceVertexCounts").Set(np.asarray([3]*num_faces))
terrain = XFormPrim(prim_path="/World/terrain",
name="terrain",
position=position,
orientation=orientation)
UsdPhysics.CollisionAPI.Apply(terrain.prim)
# collision_api = UsdPhysics.MeshCollisionAPI.Apply(terrain.prim)
# collision_api.CreateApproximationAttr().Set("meshSimplification")
physx_collision_api = PhysxSchema.PhysxCollisionAPI.Apply(terrain.prim)
physx_collision_api.GetContactOffsetAttr().Set(0.02)
physx_collision_api.GetRestOffsetAttr().Set(0.00)
class SubTerrain:
def __init__(self, terrain_name="terrain", width=256, length=256, vertical_scale=1.0, horizontal_scale=1.0):
self.terrain_name = terrain_name
self.vertical_scale = vertical_scale
self.horizontal_scale = horizontal_scale
self.width = width
self.length = length
self.height_field_raw = np.zeros((self.width, self.length), dtype=np.int16)
| 17,478 | Python | 42.917085 | 147 | 0.655166 |
teerameth/omni.isaac.fiborobotlab/omni/isaac/fiborobotlab/mooncake/utils/terrain_utils/create_terrain_demo.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os, sys
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(SCRIPT_DIR)
import omni
from omni.isaac.kit import SimulationApp
import numpy as np
import torch
simulation_app = SimulationApp({"headless": False})
from abc import abstractmethod
from omni.isaac.core.tasks import BaseTask
from omni.isaac.core.prims import RigidPrimView, RigidPrim, XFormPrim
from omni.isaac.core import World
from omni.isaac.core.objects import DynamicSphere
from omni.isaac.core.utils.prims import define_prim, get_prim_at_path
from omni.isaac.core.utils.nucleus import find_nucleus_server
from omni.isaac.core.utils.stage import add_reference_to_stage, get_current_stage
from omni.isaac.core.materials import PreviewSurface
from omni.isaac.cloner import GridCloner
from pxr import UsdPhysics, UsdLux, UsdShade, Sdf, Gf, UsdGeom, PhysxSchema
from terrain_utils import *
class TerrainCreation(BaseTask):
def __init__(self, name, num_envs, num_per_row, env_spacing, config=None, offset=None,) -> None:
BaseTask.__init__(self, name=name, offset=offset)
self._num_envs = num_envs
self._num_per_row = num_per_row
self._env_spacing = env_spacing
self._device = "cpu"
self._cloner = GridCloner(self._env_spacing, self._num_per_row)
self._cloner.define_base_env(self.default_base_env_path)
define_prim(self.default_zero_env_path)
@property
def default_base_env_path(self):
return "/World/envs"
@property
def default_zero_env_path(self):
return f"{self.default_base_env_path}/env_0"
def set_up_scene(self, scene) -> None:
self._stage = get_current_stage()
distantLight = UsdLux.DistantLight.Define(self._stage, Sdf.Path("/World/DistantLight"))
distantLight.CreateIntensityAttr(2000)
self.get_terrain()
self.get_ball()
super().set_up_scene(scene)
prim_paths = self._cloner.generate_paths("/World/envs/env", self._num_envs)
print(f"cloning {self._num_envs} environments...")
self._env_pos = self._cloner.clone(
source_prim_path="/World/envs/env_0",
prim_paths=prim_paths
)
return
def get_terrain(self):
# create all available terrain types
num_terains = 8
terrain_width = 12.
terrain_length = 12.
horizontal_scale = 0.25 # [m]
vertical_scale = 0.005 # [m]
num_rows = int(terrain_width/horizontal_scale)
num_cols = int(terrain_length/horizontal_scale)
heightfield = np.zeros((num_terains*num_rows, num_cols), dtype=np.int16)
def new_sub_terrain():
return SubTerrain(width=num_rows, length=num_cols, vertical_scale=vertical_scale, horizontal_scale=horizontal_scale)
heightfield[0:num_rows, :] = random_uniform_terrain(new_sub_terrain(), min_height=-0.2, max_height=0.2, step=0.2, downsampled_scale=0.5).height_field_raw
heightfield[num_rows:2*num_rows, :] = sloped_terrain(new_sub_terrain(), slope=-0.5).height_field_raw
heightfield[2*num_rows:3*num_rows, :] = pyramid_sloped_terrain(new_sub_terrain(), slope=-0.5).height_field_raw
heightfield[3*num_rows:4*num_rows, :] = discrete_obstacles_terrain(new_sub_terrain(), max_height=0.5, min_size=1., max_size=5., num_rects=20).height_field_raw
heightfield[4*num_rows:5*num_rows, :] = wave_terrain(new_sub_terrain(), num_waves=2., amplitude=1.).height_field_raw
heightfield[5*num_rows:6*num_rows, :] = stairs_terrain(new_sub_terrain(), step_width=0.75, step_height=-0.5).height_field_raw
heightfield[6*num_rows:7*num_rows, :] = pyramid_stairs_terrain(new_sub_terrain(), step_width=0.75, step_height=-0.5).height_field_raw
heightfield[7*num_rows:8*num_rows, :] = stepping_stones_terrain(new_sub_terrain(), stone_size=1.,
stone_distance=1., max_height=0.5, platform_size=0.).height_field_raw
vertices, triangles = convert_heightfield_to_trimesh(heightfield, horizontal_scale=horizontal_scale, vertical_scale=vertical_scale, slope_threshold=1.5)
position = np.array([-6.0, 48.0, 0])
orientation = np.array([0.70711, 0.0, 0.0, -0.70711])
add_terrain_to_stage(stage=self._stage, vertices=vertices, triangles=triangles, position=position, orientation=orientation)
def get_ball(self):
ball = DynamicSphere(prim_path=self.default_zero_env_path + "/ball",
name="ball",
translation=np.array([0.0, 0.0, 1.0]),
mass=0.5,
radius=0.2,)
def post_reset(self):
for i in range(self._num_envs):
ball_prim = self._stage.GetPrimAtPath(f"{self.default_base_env_path}/env_{i}/ball")
color = 0.5 + 0.5 * np.random.random(3)
visual_material = PreviewSurface(prim_path=f"{self.default_base_env_path}/env_{i}/ball/Looks/visual_material", color=color)
binding_api = UsdShade.MaterialBindingAPI(ball_prim)
binding_api.Bind(visual_material.material, bindingStrength=UsdShade.Tokens.strongerThanDescendants)
def get_observations(self):
pass
def calculate_metrics(self) -> None:
pass
def is_done(self) -> None:
pass
if __name__ == "__main__":
world = World(
stage_units_in_meters=1.0,
rendering_dt=1.0/60.0,
backend="torch",
device="cpu",
)
num_envs = 800
num_per_row = 80
env_spacing = 0.56*2
terrain_creation_task = TerrainCreation(name="TerrainCreation",
num_envs=num_envs,
num_per_row=num_per_row,
env_spacing=env_spacing,
)
world.add_task(terrain_creation_task)
world.reset()
while simulation_app.is_running():
if world.is_playing():
if world.current_time_step_index == 0:
world.reset(soft=True)
world.step(render=True)
else:
world.step(render=True)
simulation_app.close() | 7,869 | Python | 43.213483 | 166 | 0.650654 |
teerameth/omni.isaac.fiborobotlab/omni/isaac/fiborobotlab/mooncake/utils/usd_utils/create_instanceable_assets.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import omni.usd
import omni.client
from pxr import UsdGeom, Sdf
def update_reference(source_prim_path, source_reference_path, target_reference_path):
stage = omni.usd.get_context().get_stage()
prims = [stage.GetPrimAtPath(source_prim_path)]
while len(prims) > 0:
prim = prims.pop(0)
prim_spec = stage.GetRootLayer().GetPrimAtPath(prim.GetPath())
reference_list = prim_spec.referenceList
refs = reference_list.GetAddedOrExplicitItems()
if len(refs) > 0:
for ref in refs:
if ref.assetPath == source_reference_path:
prim.GetReferences().RemoveReference(ref)
prim.GetReferences().AddReference(assetPath=target_reference_path, primPath=prim.GetPath())
prims = prims + prim.GetChildren()
def create_parent_xforms(asset_usd_path, source_prim_path, save_as_path=None):
""" Adds a new UsdGeom.Xform prim for each Mesh/Geometry prim under source_prim_path.
Moves material assignment to new parent prim if any exists on the Mesh/Geometry prim.
Args:
asset_usd_path (str): USD file path for asset
source_prim_path (str): USD path of root prim
save_as_path (str): USD file path for modified USD stage. Defaults to None, will save in same file.
"""
omni.usd.get_context().open_stage(asset_usd_path)
stage = omni.usd.get_context().get_stage()
prims = [stage.GetPrimAtPath(source_prim_path)]
edits = Sdf.BatchNamespaceEdit()
while len(prims) > 0:
prim = prims.pop(0)
print(prim)
if prim.GetTypeName() in ["Mesh", "Capsule", "Sphere", "Box"]:
new_xform = UsdGeom.Xform.Define(stage, str(prim.GetPath()) + "_xform")
print(prim, new_xform)
edits.Add(Sdf.NamespaceEdit.Reparent(prim.GetPath(), new_xform.GetPath(), 0))
continue
children_prims = prim.GetChildren()
prims = prims + children_prims
stage.GetRootLayer().Apply(edits)
if save_as_path is None:
omni.usd.get_context().save_stage()
else:
omni.usd.get_context().save_as_stage(save_as_path)
def convert_asset_instanceable(asset_usd_path, source_prim_path, save_as_path=None, create_xforms=True):
""" Makes all mesh/geometry prims instanceable.
Can optionally add UsdGeom.Xform prim as parent for all mesh/geometry prims.
Makes a copy of the asset USD file, which will be used for referencing.
Updates asset file to convert all parent prims of mesh/geometry prims to reference cloned USD file.
Args:
asset_usd_path (str): USD file path for asset
source_prim_path (str): USD path of root prim
save_as_path (str): USD file path for modified USD stage. Defaults to None, will save in same file.
create_xforms (bool): Whether to add new UsdGeom.Xform prims to mesh/geometry prims.
"""
if create_xforms:
create_parent_xforms(asset_usd_path, source_prim_path, save_as_path)
asset_usd_path = save_as_path
instance_usd_path = ".".join(asset_usd_path.split(".")[:-1]) + "_meshes.usd"
omni.client.copy(asset_usd_path, instance_usd_path)
omni.usd.get_context().open_stage(asset_usd_path)
stage = omni.usd.get_context().get_stage()
prims = [stage.GetPrimAtPath(source_prim_path)]
while len(prims) > 0:
prim = prims.pop(0)
if prim:
if prim.GetTypeName() in ["Mesh", "Capsule", "Sphere", "Box"]:
parent_prim = prim.GetParent()
if parent_prim and not parent_prim.IsInstance():
parent_prim.GetReferences().AddReference(assetPath=instance_usd_path, primPath=str(parent_prim.GetPath()))
parent_prim.SetInstanceable(True)
continue
children_prims = prim.GetChildren()
prims = prims + children_prims
if save_as_path is None:
omni.usd.get_context().save_stage()
else:
omni.usd.get_context().save_as_stage(save_as_path)
| 5,639 | Python | 43.761904 | 126 | 0.675829 |
teerameth/omni.isaac.fiborobotlab/omni/isaac/fiborobotlab/obike/train_obike_dqn.py | import collections
import time
import sys
import gym
import numpy as np
import statistics
import tensorflow as tf
import tqdm
import math
from matplotlib import pyplot as plt
from tensorflow.keras import layers
from typing import Any, List, Sequence, Tuple
import carb
from omni.isaac.kit import SimulationApp
from omni.isaac.imu_sensor import _imu_sensor
## Create environments ##
from env_obike import Env
env = Env(physics_dt=1/100, rendering_dt=1/30, headless=False)
state = env.reset()
# Small epsilon value for stabilizing division operations
eps = np.finfo(np.float32).eps.item()
class ActorCritic(tf.keras.Model):
"""Combined actor-critic network."""
def __init__(
self,
num_actions: int,
num_hidden_units: int):
"""Initialize."""
super().__init__()
self.common = layers.Dense(num_hidden_units, activation="relu")
# self.lstm = layers.LSTM(units=num_hidden_units, activation="relu", recurrent_activation="sigmoid", stateful=True)
self.actor = layers.Dense(num_actions)
self.critic = layers.Dense(1)
def call(self, inputs: tf.Tensor, states = None) -> Tuple[tf.Tensor, tf.Tensor]:
x = inputs
x = self.common(x)
# if states is None: states = self.lstm.get_initial_state(x)
# else: states = self.lstm.states
# x = self.lstm(inputs, initial_state=states)
return self.actor(x), self.critic(x)
num_actions = env.num_action_space # 1
num_hidden_units = 16
model = ActorCritic(num_actions, num_hidden_units)
# Wrap OpenAI Gym's `env.step` call as an operation in a TensorFlow function.
# This would allow it to be included in a callable TensorFlow graph.
def env_step(action: np.ndarray) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""Returns state, reward and done flag given an action."""
state, reward, done, _ = env.step(action, render=True)
return (state.astype(np.float32),
np.array(reward, np.int32),
np.array(done, np.int32))
def tf_env_step(action: tf.Tensor) -> List[tf.Tensor]:
return tf.numpy_function(env_step, [action], [tf.float32, tf.int32, tf.int32])
def run_episode(
initial_state: tf.Tensor,
model: tf.keras.Model,
max_steps: int) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor]:
"""Runs a single episode to collect training data."""
action_probs = tf.TensorArray(dtype=tf.float32, size=0, dynamic_size=True)
values = tf.TensorArray(dtype=tf.float32, size=0, dynamic_size=True)
rewards = tf.TensorArray(dtype=tf.int32, size=0, dynamic_size=True)
initial_state_shape = initial_state.shape
state = initial_state
for t in tf.range(max_steps):
# Convert state into a batched tensor (batch size = 1)
state = tf.expand_dims(state, 0)
# Run the model and to get action mean and S.D.
action_logits_t, value = model(state)
# Sample next action from the action probability distribution
# action = tf.random.categorical(action_logits_t, 1)[0, 0]
action = tf.random.normal(shape=[1], mean=action_logits_t, stddev=value)[0][0]
action_probs_t = tf.nn.softmax(action_logits_t)
# Store critic values
values = values.write(t, tf.squeeze(value))
# Store log probability of the action chosen
# action_probs = action_probs.write(t, action_probs_t[0, action])
action_probs = action_probs.write(t, action_logits_t[0, 0])
# Apply action to the environment to get next state and reward
state, reward, done = tf_env_step(action)
state.set_shape(initial_state_shape)
# Store reward
rewards = rewards.write(t, reward)
if tf.cast(done, tf.bool):
break
action_probs = action_probs.stack()
values = values.stack()
rewards = rewards.stack()
return action_probs, values, rewards
def get_expected_return(
rewards: tf.Tensor,
gamma: float,
standardize: bool = True) -> tf.Tensor:
"""Compute expected returns per timestep."""
n = tf.shape(rewards)[0]
returns = tf.TensorArray(dtype=tf.float32, size=n)
# Start from the end of `rewards` and accumulate reward sums
# into the `returns` array
rewards = tf.cast(rewards[::-1], dtype=tf.float32)
discounted_sum = tf.constant(0.0)
discounted_sum_shape = discounted_sum.shape
for i in tf.range(n):
reward = rewards[i]
discounted_sum = reward + gamma * discounted_sum
discounted_sum.set_shape(discounted_sum_shape)
returns = returns.write(i, discounted_sum)
returns = returns.stack()[::-1]
if standardize:
returns = ((returns - tf.math.reduce_mean(returns)) / (tf.math.reduce_std(returns) + eps))
return returns
huber_loss = tf.keras.losses.Huber(reduction=tf.keras.losses.Reduction.SUM)
def compute_loss(
action_probs: tf.Tensor,
values: tf.Tensor,
returns: tf.Tensor) -> tf.Tensor:
"""Computes the combined actor-critic loss."""
advantage = returns - values
action_log_probs = tf.math.log(action_probs)
actor_loss = -tf.math.reduce_sum(action_log_probs * advantage)
critic_loss = huber_loss(values, returns)
return actor_loss + critic_loss
optimizer = tf.keras.optimizers.Adam(learning_rate=0.0001)
# @tf.function
def train_step(
initial_state: tf.Tensor,
model: tf.keras.Model,
optimizer: tf.keras.optimizers.Optimizer,
gamma: float,
max_steps_per_episode: int) -> tf.Tensor:
"""Runs a model training step."""
with tf.GradientTape() as tape:
# Run the model for one episode to collect training data
action_probs, values, rewards = run_episode(initial_state, model, max_steps_per_episode)
# Calculate expected returns
returns = get_expected_return(rewards, gamma)
# Convert training data to appropriate TF tensor shapes
action_probs, values, returns = [tf.expand_dims(x, 1) for x in [action_probs, values, returns]]
# Calculating loss values to update our network
loss = compute_loss(action_probs, values, returns)
# Compute the gradients from the loss
grads = tape.gradient(loss, model.trainable_variables)
# Apply the gradients to the model's parameters
optimizer.apply_gradients(zip(grads, model.trainable_variables))
episode_reward = tf.math.reduce_sum(rewards)
return episode_reward
min_episodes_criterion = 100
max_episodes = 10000
max_steps_per_episode = 1000
## O-bike balancing is considered solved if average reward is >= 180 over 100 consecutive trials
reward_threshold = 180
running_reward = 0
# Discount factor for future rewards
gamma = 0.99
# Keep last episodes reward
episodes_reward: collections.deque = collections.deque(maxlen=min_episodes_criterion)
with tqdm.trange(max_episodes) as ep:
for i in ep:
initial_state = tf.constant(env.reset(), dtype=tf.float32)
with tf.GradientTape() as tape:
# Run the model for one episode to collect training data
action_probs = tf.TensorArray(dtype=tf.float32, size=0, dynamic_size=True)
values = tf.TensorArray(dtype=tf.float32, size=0, dynamic_size=True)
rewards = tf.TensorArray(dtype=tf.int32, size=0, dynamic_size=True)
initial_state_shape = initial_state.shape
state = initial_state
for t in tf.range(max_steps_per_episode):
# Convert state into a batched tensor (batch size = 1)
state = tf.expand_dims(state, 0)
# print("STATE")
# print(state)
# Run the model and to get action mean and S.D.
action_logits_t, value = model(state)
# print("action_logits_t, VALUE")
# print(action_logits_t, value)
# Sample next action from the action probability distribution
# action = tf.random.categorical(action_logits_t, 1)[0, 0]
action = tf.random.normal(shape=[1], mean=action_logits_t, stddev=value)[0][0]
action_probs_t = tf.nn.softmax(action_logits_t)
# Store critic values
values = values.write(t, tf.squeeze(value))
# Store log probability of the action chosen
# action_probs = action_probs.write(t, action_probs_t[0, action])
action_probs = action_probs.write(t, action_logits_t[0, 0])
# Apply action to the environment to get next state and reward
# print("ACTION")
# print(action)
state, reward, done, _ = env.step(action)
# state, reward, done = tf_env_step(action)
# state.set_shape(initial_state_shape)
# Store reward
rewards = rewards.write(t, reward)
if tf.cast(done, tf.bool):
break
action_probs = action_probs.stack()
values = values.stack()
rewards = rewards.stack()
# action_probs, values, rewards = run_episode(initial_state, model, max_steps_per_episode)
# Calculate expected returns
returns = get_expected_return(rewards, gamma)
# Convert training data to appropriate TF tensor shapes
action_probs, values, returns = [tf.expand_dims(x, 1) for x in [action_probs, values, returns]]
# Calculating loss values to update our network
loss = compute_loss(action_probs, values, returns)
# Compute the gradients from the loss
grads = tape.gradient(loss, model.trainable_variables)
# Apply the gradients to the model's parameters
optimizer.apply_gradients(zip(grads, model.trainable_variables))
episode_reward = int(tf.math.reduce_sum(rewards))
# episode_reward = int(train_step(initial_state, model, optimizer, gamma, max_steps_per_episode))
episodes_reward.append(episode_reward)
running_reward = statistics.mean(episodes_reward)
ep.set_description(f'Episode {i}')
ep.set_postfix(episode_reward=episode_reward, running_reward=running_reward)
# Show average episode reward every 10 episodes
if i % 10 == 0:
pass # print(f'Episode {i}: average reward: {avg_reward}')
if running_reward > reward_threshold and i >= min_episodes_criterion:
break
print(f'\nSolved at episode {i}: average reward: {running_reward:.2f}!') | 10,523 | Python | 37.130435 | 123 | 0.643543 |
teerameth/omni.isaac.fiborobotlab/omni/isaac/fiborobotlab/obike/obike_old.py | from typing import Optional, Tuple
import numpy as np
from omni.isaac.core.robots.robot import Robot
from omni.isaac.core.utils.nucleus import find_nucleus_server
from omni.isaac.core.utils.types import ArticulationAction
from omni.isaac.core.utils.prims import get_prim_at_path, define_prim
import carb
class Obike(Robot):
"""[summary]
Args:
stage (Usd.Stage): [description]
prim_path (str): [description]
name (str): [description]
usd_path (str, optional): [description]
position (Optional[np.ndarray], optional): [description]. Defaults to None.
orientation (Optional[np.ndarray], optional): [description]. Defaults to None.
"""
def __init__(
self,
prim_path: str,
name: str = "mooncake",
usd_path: Optional[str] = None,
position: Optional[np.ndarray] = None,
orientation: Optional[np.ndarray] = None,
) -> None:
prim = get_prim_at_path(prim_path)
if not prim.IsValid():
prim = define_prim(prim_path, "Xform")
if usd_path:
prim.GetReferences().AddReference(usd_path)
else:
result, nucleus_server = find_nucleus_server()
if result is False:
carb.log_error("Could not find nucleus server with /Isaac folder")
return
asset_path = nucleus_server + "/Library/obike.usd" # load from nucleus server
prim.GetReferences().AddReference(asset_path)
super().__init__(
prim_path=prim_path, name=name, position=position, orientation=orientation, articulation_controller=None
)
self._wheel_dof_names = ["reaction_wheel_joint", "rear_wheel_joint", "front_wheel_arm_joint"]
self._wheel_dof_indices = None
return
@property
def wheel_dof_indicies(self) -> Tuple[int, int, int]:
"""[summary]
Returns:
int: [description]
"""
return self._wheel_dof_indices
def get_wheel_positions(self) -> Tuple[float, float, float]:
"""[summary]
Returns:
Tuple[float, float, float]: [description]
"""
joint_positions = self.get_joint_positions()
return joint_positions[self._wheel_dof_indices[0]], joint_positions[self._wheel_dof_indices[1]], \
joint_positions[self._wheel_dof_indices[2]]
def set_wheel_positions(self, positions: Tuple[float, float]) -> None:
"""[summary]
Args:
positions (Tuple[float, float, float]): [description]
"""
joint_positions = [None, None, None]
joint_positions[self._wheel_dof_indices[0]] = positions[0]
joint_positions[self._wheel_dof_indices[1]] = positions[1]
joint_positions[self._wheel_dof_indices[2]] = positions[2]
self.set_joint_positions(positions=np.array(joint_positions))
return
def get_wheel_velocities(self) -> Tuple[float, float, float]:
"""[summary]
Returns:
Tuple[np.ndarray, np.ndarray, np.ndarray]: [description]
"""
joint_velocities = self.get_joint_velocities()
return joint_velocities[self._wheel_dof_indices[0]], joint_velocities[self._wheel_dof_indices[1]], \
joint_velocities[self._wheel_dof_indices[2]]
def set_wheel_velocities(self, velocities: Tuple[float, float, float]) -> None:
"""[summary]
Args:
velocities (Tuple[float, float, float]): [description]
"""
joint_velocities = [None, None, None]
joint_velocities[self._wheel_dof_indices[0]] = velocities[0]
joint_velocities[self._wheel_dof_indices[1]] = velocities[1]
joint_velocities[self._wheel_dof_indices[2]] = velocities[2]
self.set_joint_velocities(velocities=np.array(joint_velocities))
return
def get_wheel_efforts(self) -> Tuple[float, float, float]:
"""[summary]
Returns:
Tuple[np.ndarray, np.ndarray, np.ndarray]: [description]
"""
joint_efforts = self.get_joint_efforts()
return joint_efforts[self._wheel_dof_indices[0]], joint_efforts[self._wheel_dof_indices[1]], joint_efforts[
self._wheel_dof_indices[2]]
def set_wheel_efforts(self, velocities: Tuple[float, float, float]) -> None:
"""[summary]
Args:
efforts (Tuple[float, float, float]): [description]
"""
joint_efforts = [None, None]
joint_efforts[self._wheel_dof_indices[0]] = velocities[0]
joint_efforts[self._wheel_dof_indices[1]] = velocities[1]
joint_efforts[self._wheel_dof_indices[2]] = velocities[2]
self.set_joint_efforts(efforts=np.array(joint_efforts))
return
def apply_wheel_actions(self, actions: ArticulationAction) -> None:
"""[summary]
Args:
actions (ArticulationAction): [description]
"""
actions_length = actions.get_length()
if actions_length is not None and actions_length != 3:
raise Exception("ArticulationAction passed should be equal to 3")
joint_actions = ArticulationAction()
if actions.joint_positions is not None:
joint_actions.joint_positions = np.zeros(self.num_dof)
joint_actions.joint_positions[self._wheel_dof_indices[0]] = actions.joint_positions[0]
joint_actions.joint_positions[self._wheel_dof_indices[1]] = actions.joint_positions[1]
joint_actions.joint_positions[self._wheel_dof_indices[2]] = actions.joint_positions[2]
if actions.joint_velocities is not None:
joint_actions.joint_velocities = np.zeros(self.num_dof)
joint_actions.joint_velocities[self._wheel_dof_indices[0]] = actions.joint_velocities[0]
joint_actions.joint_velocities[self._wheel_dof_indices[1]] = actions.joint_velocities[1]
joint_actions.joint_velocities[self._wheel_dof_indices[2]] = actions.joint_velocities[2]
if actions.joint_efforts is not None:
joint_actions.joint_efforts = np.zeros(self.num_dof)
joint_actions.joint_efforts[self._wheel_dof_indices[0]] = actions.joint_efforts[0]
joint_actions.joint_efforts[self._wheel_dof_indices[1]] = actions.joint_efforts[1]
joint_actions.joint_efforts[self._wheel_dof_indices[2]] = actions.joint_efforts[2]
self.apply_action(control_actions=joint_actions)
return
def initialize(self) -> None:
"""[summary]
"""
super().initialize()
# print(self._dofs_infos) # print Orderdict of all dof_name:dof_object
self._wheel_dof_indices = (
self.get_dof_index(self._wheel_dof_names[0]),
self.get_dof_index(self._wheel_dof_names[1]),
self.get_dof_index(self._wheel_dof_names[2]),
)
return
def post_reset(self) -> None:
"""[summary]
"""
super().post_reset()
# print(len(self._articulation_controller._dof_controllers))
# print(self._articulation_controller._dof_controllers)
# Assign kd only for driven acticulation (3 wheels) and leave other as None
kds = [None] * len(self._articulation_controller._dof_controllers)
for i in self._wheel_dof_indices: kds[i] = 1e2
self._articulation_controller.set_gains(kds=kds)
self._articulation_controller.switch_control_mode(mode="effort") # effort, velocity, position
return | 7,562 | Python | 41.251396 | 116 | 0.614388 |
teerameth/omni.isaac.fiborobotlab/omni/isaac/fiborobotlab/obike/train_ppo_lstm.py | import numpy as np
from env_obike import ObikeEnv
import gym
import numpy as np
import wandb
from wandb.integration.sb3 import WandbCallback
from sb3_contrib import RecurrentPPO
from stable_baselines3.common.evaluation import evaluate_policy
config = {
"policy_type": "MlpLstmPolicy",
"total_timesteps": 300000,
"env_name": "CartPole-v1",
}
run = wandb.init(
project="obike_test",
config=config,
sync_tensorboard=True, # auto-upload sb3's tensorboard metrics
# monitor_gym=True, # auto-upload the videos of agents playing the game
# save_code=True, # optional
)
env = ObikeEnv(skip_frame=1,
physics_dt=1.0 / 100.0,
rendering_dt=1.0 / 60.0,
max_episode_length=10,
display_every_iter=20,
headless=False,
observation_list=["lin_acc_x", "lin_acc_y", "lin_acc_z", "ang_vel_x", "ang_vel_y", "ang_vel_z", "robot_rotation_x", "robot_rotation_y", "robot_rotation_z"])
model = RecurrentPPO("MlpLstmPolicy", env, verbose=1, tensorboard_log=f"runs/{run.id}", device="cuda")
model.learn(
total_timesteps=config["total_timesteps"],
callback=WandbCallback(
gradient_save_freq=1000,
model_save_path=f"models/{run.id}",
verbose=2,
),
)
run.finish()
# mean_reward, std_reward = evaluate_policy(model, env, n_eval_episodes=20, warn=False)
# print(mean_reward)
model.save("ppo_recurrent")
del model # remove to demonstrate saving and loading
model = RecurrentPPO.load("ppo_recurrent")
observations = env.reset()
# cell and hidden state of the LSTM
lstm_states = None
num_envs = 1
# Episode start signals are used to reset the lstm states
episode_starts = np.ones((num_envs,), dtype=bool)
while True:
# obs = [observations["lin_acc_y"], observations["lin_acc_z"], observations["ang_vel_x"]]
# obs = np.array(obs, dtype=np.float32)
action, lstm_states = model.predict(observations, state=lstm_states, episode_start=episode_starts, deterministic=True)
observations, rewards, dones, info = env.step(action)
episode_starts = dones
env.render()
if dones:
lstm_states = None # Clear internal states
observations = env.reset() | 2,214 | Python | 31.573529 | 171 | 0.672087 |
teerameth/omni.isaac.fiborobotlab/omni/isaac/fiborobotlab/obike/obike_task.py | import time
from omniisaacgymenvs.tasks.base.rl_task import RLTask
from obike import Obike
import omni
from pxr import Gf, UsdGeom
from omni.isaac.core.articulations import ArticulationView
from omni.isaac.core.utils.prims import get_prim_at_path, get_all_matching_child_prims
from omni.isaac.core.utils.torch.rotations import compute_heading_and_up, compute_rot, quat_conjugate
from omni.isaac.core.utils.torch.maths import torch_rand_float, tensor_clamp, unscale
from omni.isaac.isaac_sensor import _isaac_sensor
import numpy as np
import torch
import math
import random
def euler_to_quaternion(r):
(roll, pitch, yaw) = (r[0], r[1], r[2])
qx = np.sin(roll/2) * np.cos(pitch/2) * np.cos(yaw/2) - np.cos(roll/2) * np.sin(pitch/2) * np.sin(yaw/2)
qy = np.cos(roll/2) * np.sin(pitch/2) * np.cos(yaw/2) + np.sin(roll/2) * np.cos(pitch/2) * np.sin(yaw/2)
qz = np.cos(roll/2) * np.cos(pitch/2) * np.sin(yaw/2) - np.sin(roll/2) * np.sin(pitch/2) * np.cos(yaw/2)
qw = np.cos(roll/2) * np.cos(pitch/2) * np.cos(yaw/2) + np.sin(roll/2) * np.sin(pitch/2) * np.sin(yaw/2)
return [qx, qy, qz, qw]
def q2falling(q):
fall_angle = 2*torch.acos(q[:,0])*torch.sqrt((q[:,1]*q[:,1] + q[:,2]*q[:,2])/(q[:,1]*q[:,1]) + q[:,2]*q[:,2] + q[:,3]*q[:,3])
return fall_angle
class ObikeTask(RLTask):
def __init__(
self,
name,
sim_config,
env,
offset=None
) -> None:
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
self._num_envs = self._task_cfg["env"]["numEnvs"]
self._env_spacing = self._task_cfg["env"]["envSpacing"]
self._robot_positions = torch.tensor([0.0, 0.0, 0.0167])
self._reset_dist = self._task_cfg["env"]["resetDist"]
self._max_push_effort = self._task_cfg["env"]["maxEffort"]
self._max_episode_length = 500
self._num_observations = 4
self._num_actions = 1
self._imu_buf = [{"lin_acc_x":0.0, "lin_acc_y":0.0, "lin_acc_z":0.0, "ang_vel_x":0.0, "ang_vel_y":0.0, "ang_vel_z":0.0}]*128 # default initial sensor buffer
self._is = _isaac_sensor.acquire_imu_sensor_interface() # Sensor reader
RLTask.__init__(self, name, env)
return
def set_up_scene(self, scene) -> None:
self.get_obike() # mush be called before "super().set_up_scene(scene)"
super().set_up_scene(scene)
print(get_all_matching_child_prims("/"))
self._robots = ArticulationView(prim_paths_expr="/World/envs/*/Obike/obike", name="obike_view")
scene.add(self._robots)
self.meters_per_unit = UsdGeom.GetStageMetersPerUnit(omni.usd.get_context().get_stage())
return
def get_obike(self): # must be called at very first line of set_up_scene()
obike = Obike(prim_path=self.default_zero_env_path + "/Obike", name="Obike", translation=self._robot_positions)
# applies articulation settings from the task configuration yaml file
self._sim_config.apply_articulation_settings("Obike", get_prim_at_path(obike.prim_path), self._sim_config.parse_actor_config("Obike"))
def get_robot(self):
return self._robots
def get_observations(self) -> dict:
# dof_pos = self._robots.get_joint_positions(clone=False)
dof_vel = self._robots.get_joint_velocities(clone=False)
reaction_vel = dof_vel[:, self._reaction_wheel_dof_idx]
imu_accel_y = torch.tensor([imu["lin_acc_y"] for imu in self._imu_buf])
imu_accel_z = torch.tensor([imu["lin_acc_z"] for imu in self._imu_buf])
imu_gyro_x = torch.tensor([imu["ang_vel_x"] for imu in self._imu_buf])
self.obs_buf[:, 0] = reaction_vel
self.obs_buf[:, 1] = imu_accel_y
self.obs_buf[:, 2] = imu_accel_z
self.obs_buf[:, 3] = imu_gyro_x
observations = {
self._robots.name: {
"obs_buf": self.obs_buf
}
}
# print(observations)
return observations
def pre_physics_step(self, actions) -> None:
reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(reset_env_ids) > 0:
self.reset_idx(reset_env_ids)
actions = actions.to(self._device)
actions = 2*(actions - 0.5)
forces = torch.zeros((self._robots.count, self._robots.num_dof), dtype=torch.float32, device=self._device)
forces[:, self._reaction_wheel_dof_idx] = self._max_push_effort * actions[:, 0]
indices = torch.arange(self._robots.count, dtype=torch.int32, device=self._device)
self._robots.set_joint_efforts(forces, indices=indices) # apply joints torque
## Read IMU & store in buffer ##
buffer = []
robots_prim_path = self._robots.prim_paths
for robot_prim_path in robots_prim_path:
reading = self._is.get_sensor_readings(robot_prim_path + "/chassic/sensor") # read from select sensor (by prim_path)
if reading.shape[0]:
buffer.append(reading[-1]) # get only lastest reading
else: buffer.append({"lin_acc_x":0.0, "lin_acc_y":0.0, "lin_acc_z":0.0, "ang_vel_x":0.0, "ang_vel_y":0.0, "ang_vel_z":0.0}) # default initial sensor buffer
self._imu_buf = buffer
def reset_idx(self, env_ids):
num_resets = len(env_ids)
# randomize DOF velocities
dof_vel = torch_rand_float(-0.1, 0.1, (num_resets, 1), device=self._device)
# apply resets
self._robots.set_joint_velocities(dof_vel, indices=env_ids)
root_pos, root_rot = self.initial_root_pos[env_ids], self.initial_root_rot[env_ids]
root_vel = torch.zeros((num_resets, 6), device=self._device)
self._robots.set_world_poses(root_pos, root_rot, indices=env_ids)
self._robots.set_velocities(root_vel, indices=env_ids)
# bookkeeping
self.reset_buf[env_ids] = 0
self.progress_buf[env_ids] = 0
def post_reset(self): # Run only once after simulation started
# self._robots = self.get_robot()
self.initial_root_pos, self.initial_root_rot = self._robots.get_world_poses() # save initial position for reset
self.initial_dof_pos = self._robots.get_joint_positions()
# initialize some data used later on
self.start_rotation = torch.tensor([1, 0, 0, 0], device=self._device, dtype=torch.float32)
self.up_vec = torch.tensor([0, 0, 1], dtype=torch.float32, device=self._device).repeat((self.num_envs, 1))
self.heading_vec = torch.tensor([1, 0, 0], dtype=torch.float32, device=self._device).repeat((self.num_envs, 1))
self.inv_start_rot = quat_conjugate(self.start_rotation).repeat((self.num_envs, 1))
self.basis_vec0 = self.heading_vec.clone()
self.basis_vec1 = self.up_vec.clone()
self._reaction_wheel_dof_idx = self._robots.get_dof_index("reaction_wheel_joint")
self._rear_wheel_dof_idx = self._robots.get_dof_index("rear_wheel_joint")
self._steering_arm_dof_idx = self._robots.get_dof_index("front_wheel_arm_joint")
# randomize all envs
indices = torch.arange(self._robots.count, dtype=torch.int64, device=self._device)
self.reset_idx(indices)
def calculate_metrics(self) -> None: # calculate reward for each env
reaction_vel = self.obs_buf[:, 0]
robots_position, robots_orientation = self._robots.get_world_poses()
fall_angles = q2falling(robots_orientation) # find fall angle of all robot (batched)
# cart_pos = self.obs_buf[:, 0]
# cart_vel = self.obs_buf[:, 1]
# pole_angle = self.obs_buf[:, 2]
# pole_vel = self.obs_buf[:, 3]
# reward = 1.0 - pole_angle * pole_angle - 0.01 * torch.abs(cart_vel) - 0.005 * torch.abs(pole_vel)
# reward = torch.where(torch.abs(cart_pos) > self._reset_dist, torch.ones_like(reward) * -2.0, reward)
# reward = torch.where(torch.abs(pole_angle) > np.pi / 2, torch.ones_like(reward) * -2.0, reward)
reward = 1.0 - fall_angles * fall_angles - 0.01 * torch.abs(reaction_vel)
reward = torch.where(torch.abs(fall_angles) > 25*(np.pi / 180), torch.ones_like(reward) * -2.0, reward) # fall_angle must <= 25 degree
self.rew_buf[:] = reward
def is_done(self) -> None: # check termination for each env
robots_position, robots_orientation = self._robots.get_world_poses()
fall_angles = q2falling(robots_orientation) # find fall angle of all robot (batched)
# cart_pos = self.obs_buf[:, 0]
# pole_pos = self.obs_buf[:, 2]
# resets = torch.where(torch.abs(cart_pos) > self._reset_dist, 1, 0)
# resets = torch.where(torch.abs(pole_pos) > math.pi / 2, 1, resets)
# resets = torch.where(self.progress_buf >= self._max_episode_length, 1, resets)
resets = torch.where(torch.abs(fall_angles) > 25*(np.pi / 180), 1, 0) # reset by falling
resets = torch.where(self.progress_buf >= self._max_episode_length, 1, resets) # reset by time
self.reset_buf[:] = resets
| 9,113 | Python | 45.030303 | 168 | 0.617799 |
teerameth/omni.isaac.fiborobotlab/omni/isaac/fiborobotlab/obike/test.py | import time
import tensorflow as tf
import random
from env_obike import ObikeEnv
env = ObikeEnv(headless=False)
state = env.reset()
print(state)
with tf.GradientTape() as tape:
while True:
state, reward, done, _ = env.step(action=0.5)
print(state, reward, done)
if done:
state = env.reset()
print(state) | 355 | Python | 24.42857 | 53 | 0.639437 |
teerameth/omni.isaac.fiborobotlab/omni/isaac/fiborobotlab/obike/rlgames_train.py | from omniisaacgymenvs.utils.hydra_cfg.hydra_utils import *
from omniisaacgymenvs.utils.hydra_cfg.reformat import omegaconf_to_dict, print_dict
from omniisaacgymenvs.utils.rlgames.rlgames_utils import RLGPUAlgoObserver, RLGPUEnv
from omniisaacgymenvs.utils.config_utils.path_utils import retrieve_checkpoint_path
from omniisaacgymenvs.envs.vec_env_rlgames import VecEnvRLGames
import hydra
from omegaconf import DictConfig
from rl_games.common import env_configurations, vecenv
from rl_games.torch_runner import Runner
import os
class RLGTrainer():
def __init__(self, cfg, cfg_dict):
self.cfg = cfg
self.cfg_dict = cfg_dict
def launch_rlg_hydra(self, env):
# `create_rlgpu_env` is environment construction function which is passed to RL Games and called internally.
# We use the helper function here to specify the environment config.
self.cfg_dict["task"]["test"] = self.cfg.test
# register the rl-games adapter to use inside the runner
vecenv.register('RLGPU',
lambda config_name, num_actors, **kwargs: RLGPUEnv(config_name, num_actors, **kwargs))
env_configurations.register('rlgpu', {
'vecenv_type': 'RLGPU',
'env_creator': lambda **kwargs: env
})
self.rlg_config_dict = omegaconf_to_dict(self.cfg.train)
def run(self):
# create runner and set the settings
runner = Runner(RLGPUAlgoObserver())
runner.load(self.rlg_config_dict)
runner.reset()
# dump config dict
experiment_dir = os.path.join('runs', self.cfg.train.params.config.name)
os.makedirs(experiment_dir, exist_ok=True)
with open(os.path.join(experiment_dir, 'config.yaml'), 'w') as f:
f.write(OmegaConf.to_yaml(self.cfg))
runner.run({
'train': not self.cfg.test,
'play': self.cfg.test,
'checkpoint': self.cfg.checkpoint,
'sigma': None
})
@hydra.main(config_name="config", config_path="./cfg")
def parse_hydra_configs(cfg: DictConfig):
# ensure checkpoints can be specified as relative paths
if cfg.checkpoint:
cfg.checkpoint = retrieve_checkpoint_path(cfg.checkpoint)
if cfg.checkpoint is None:
quit()
cfg_dict = omegaconf_to_dict(cfg)
print_dict(cfg_dict)
headless = cfg.headless
env = VecEnvRLGames(headless=headless)
from scripts.sim_config import SimConfig
sim_config = SimConfig(cfg_dict)
cfg = DictConfig(sim_config.config)
from obike_task import ObikeTask
task = ObikeTask(name="Obike",
sim_config=sim_config,
env=env
)
env.set_task(task=task, sim_params=sim_config.get_physics_params(), backend="torch", init_sim=True)
# task = initialize_task(cfg_dict, env)
print(cfg)
# sets seed. if seed is -1 will pick a random one
from omni.isaac.core.utils.torch.maths import set_seed
cfg.seed = set_seed(cfg.seed, torch_deterministic=cfg.torch_deterministic)
rlg_trainer = RLGTrainer(cfg, cfg_dict)
rlg_trainer.launch_rlg_hydra(env)
rlg_trainer.run()
env.close()
if __name__ == '__main__':
parse_hydra_configs()
| 3,246 | Python | 31.797979 | 116 | 0.6565 |
teerameth/omni.isaac.fiborobotlab/omni/isaac/fiborobotlab/obike/env_obike.py | import random
import gym
from gym import spaces
import numpy as np
import math
import time
import carb
from omni.isaac.imu_sensor import _imu_sensor
state_low = [-100, -100, -10]
state_high = [100, 100, 10]
action_low = [-1]
action_high = [1]
def q2falling(q):
q[0] = 1 if q[0] > 1 else q[0]
try:
if q[1] == 0 and q[2] == 0 and q[3] == 0:
return 0
return 2*math.acos(q[0])*math.sqrt((q[1]**2 + q[2]**2)/(q[1]**2 + q[2]**2 + q[3]**2))
except:
print(q)
return 0
class ObikeEnv(gym.Env):
metadata = {"render.modes": ["human"]}
def __init__(
self,
skip_frame=1,
physics_dt=1.0 / 100.0,
rendering_dt=1.0 / 60.0,
max_episode_length=60,
display_every_iter=20,
seed=0,
headless=True,
observation_list=["lin_acc_y", "lin_acc_z", "ang_vel_x"],
) -> None:
from omni.isaac.kit import SimulationApp
## Specify simulation parameters ##
self._physics_dt = physics_dt
self._rendering_dt = rendering_dt
self._max_episode_length = max_episode_length / self._physics_dt # 60 second after reset
self._skip_frame = skip_frame
self._iteration_count = 0
self._display_every_iter = display_every_iter
self._update_every = 1
self._explore_every = 5
self._headless = headless
self._observation_list = observation_list
self.simulation_app = SimulationApp({"headless": self._headless, "anti_aliasing": 0})
## Setup World ##
from omni.isaac.core import World
from obike_old import Obike
# from omni.isaac.core.objects import DynamicSphere
self.world = World(physics_dt=self._physics_dt, rendering_dt=self._rendering_dt, stage_units_in_meters=0.01)
self.world.scene.add_default_ground_plane()
self.robot = self.world.scene.add(
Obike(
prim_path="/obike",
name="obike_mk0",
position=np.array([0, 0.0, 1.46]),
orientation=np.array([1.0, 0.0, 0.0, 0.0]),
)
)
## Setup IMU ##
self.imu_interface = _imu_sensor.acquire_imu_sensor_interface()
self.props = _imu_sensor.SensorProperties()
self.props.position = carb.Float3(0, 0, 10) # translate from /obike/chassic to above motor (cm.)
self.props.orientation = carb.Float4(1, 0, 0, 0) # (x, y, z, w)
self.props.sensorPeriod = 1 / 500 # 2ms
self._sensor_handle = self.imu_interface.add_sensor_on_body("/obike/chassic", self.props)
self.action_space = spaces.Box(low=-1.0, high=1.0, shape=(1,), dtype=np.float32)
self.observation_space = spaces.Box(low=-1.0, high=1.0, shape=(len(self._observation_list),), dtype=np.float32)
def step(self, action):
## EXECUTE ACTION ##
from omni.isaac.core.utils.types import ArticulationAction
# print(action)
# action = (action-0.5)+random.random()*0.01
joint_actions = ArticulationAction()
joint_actions.joint_efforts = np.zeros(self.robot.num_dof)
joint_actions.joint_velocities = np.zeros(self.robot.num_dof)
joint_actions.joint_positions = np.zeros(self.robot.num_dof)
joint_actions.joint_efforts[self.robot._wheel_dof_indices[0]] = action * 1000 * 0.607
joint_actions.joint_velocities[self.robot._wheel_dof_indices[1]] = -0.2* 1000
joint_actions.joint_positions[self.robot._wheel_dof_indices[2]] = 0
self.robot.apply_action(control_actions=joint_actions)
# self.robot.apply_wheel_actions(ArticulationAction(joint_efforts=[action * 1000 * 0.607, -0.2* 1000, 0]))
self.world.step(render=(not self._headless) and (self._iteration_count%self._display_every_iter==0))
observations = self.get_observation()
reward = 1 - observations['fall_rotation']*5
## Check for stop event ##
exceed_time_limit = self.world.current_time_step_index >= self._max_episode_length
robot_fall = True if observations['fall_rotation'] > 25 / 180 * math.pi else False
done = exceed_time_limit or robot_fall
info = {}
obs = [observations[name] for name in self._observation_list]
scaled_observation = []
for name in self._observation_list:
if "lin_acc" in name: scaled_observation.append(observations[name]/1000) # (-1000,1000)cm/s^2 -> (-1,1)
if "ang_vel" in name: scaled_observation.append(observations[name]/10) # (-10,10)rad/s -> (-1,1)
if "rotation" in name: scaled_observation.append(observations[name]) # quaternion already inrange(0,1)
return obs, reward, done, info
def reset(self):
self._iteration_count += 1
self.world.reset()
self.robot.initialize()
# self.world.scene.remove("/obike")
# from obike import Obike
# self.robot = self.world.scene.add(
# Obike(
# prim_path="/obike",
# name="obike_mk0",
# position=np.array([10 * random.random(), 10 * random.random(), 1.435]),
# orientation=np.array([1.0, 0.0, 0.0, 0.0]),
# )
# )
observations = self.get_observation()
obs = [observations[name] for name in self._observation_list]
return obs
def get_observation(self):
observations = {"robot_position_x":None, "robot_position_y":None, "robot_position_z":None, "robot_rotation_x":None, "robot_rotation_y":None, "robot_rotation_z":None, "robot_rotation_w":None, "lin_acc_x":None, "lin_acc_y":None, "lin_acc_z":None, "ang_vel_x":None, "ang_vel_y":None, "ang_vel_z":None}
[observations["robot_position_x"], observations["robot_position_y"], observations["robot_position_z"]], [observations["robot_rotation_x"], observations["robot_rotation_y"], observations["robot_rotation_z"], observations["robot_rotation_w"]] = self.robot.get_world_pose()
reading = self.imu_interface.get_sensor_readings(self._sensor_handle)
if reading.shape[0] == 0: # no valid data in buffer -> init observation wih zeros
observations["lin_acc_x"], observations["lin_acc_y"], observations["lin_acc_z"], observations["ang_vel_x"], observations["ang_vel_y"], observations["ang_vel_z"] = 0, 0, 0, 0, 0, 0
else:
observations["lin_acc_x"], observations["lin_acc_y"], observations["lin_acc_z"], observations["ang_vel_x"], observations["ang_vel_y"], observations["ang_vel_z"] = reading[-1]["lin_acc_x"], reading[-1]["lin_acc_y"], reading[-1]["lin_acc_z"], reading[-1]["ang_vel_x"], reading[-1]["ang_vel_y"], reading[-1]["ang_vel_z"]
observations["fall_rotation"] = q2falling([observations["robot_rotation_x"], observations["robot_rotation_y"], observations["robot_rotation_z"], observations["robot_rotation_w"]])
return observations
def close(self):
pass
def seed(self, seed=None):
self.np_random, seed = gym.utils.seeding.np_random(seed)
np.random.seed(seed)
return [seed] | 7,149 | Python | 49.352112 | 331 | 0.60498 |
teerameth/omni.isaac.fiborobotlab/omni/isaac/fiborobotlab/obike/td3.py | import tensorflow as tf
import numpy as np
class RBuffer():
def __init__(self, maxsize, statedim, naction):
self.cnt = 0
self.maxsize = maxsize
print(maxsize, statedim)
self.state_memory = np.zeros((maxsize, *statedim), dtype=np.float32)
self.action_memory = np.zeros((maxsize, naction), dtype=np.float32)
self.reward_memory = np.zeros((maxsize,), dtype=np.float32)
self.next_state_memory = np.zeros((maxsize, *statedim), dtype=np.float32)
self.done_memory = np.zeros((maxsize,), dtype=np.bool)
def storexp(self, state, next_state, action, done, reward):
index = self.cnt % self.maxsize
self.state_memory[index] = state
self.action_memory[index] = action
self.reward_memory[index] = reward
self.next_state_memory[index] = next_state
self.done_memory[index] = 1 - int(done)
self.cnt += 1
def sample(self, batch_size):
max_mem = min(self.cnt, self.maxsize)
batch = np.random.choice(max_mem, batch_size, replace=False)
states = self.state_memory[batch]
next_states = self.next_state_memory[batch]
rewards = self.reward_memory[batch]
actions = self.action_memory[batch]
dones = self.done_memory[batch]
return states, next_states, rewards, actions, dones
class Critic(tf.keras.Model):
def __init__(self):
super(Critic, self).__init__()
self.f1 = tf.keras.layers.Dense(512, activation='relu')
self.f2 = tf.keras.layers.Dense(512, activation='relu')
self.v = tf.keras.layers.Dense(1, activation=None)
def call(self, inputstate, action):
x = self.f1(tf.concat([inputstate, action], axis=1))
x = self.f2(x)
x = self.v(x)
return x
class Actor(tf.keras.Model):
def __init__(self, no_action):
super(Actor, self).__init__()
self.f1 = tf.keras.layers.Dense(512, activation='relu')
self.f2 = tf.keras.layers.Dense(512, activation='relu')
self.mu = tf.keras.layers.Dense(no_action, activation='tanh')
def call(self, state):
x = self.f1(state)
x = self.f2(x)
x = self.mu(x)
return x
class Agent():
def __init__(self, n_action, n_state, action_low, action_high):
self.actor_main = Actor(n_action)
self.actor_target = Actor(n_action)
self.critic_main = Critic()
self.critic_main2 = Critic()
self.critic_target = Critic()
self.critic_target2 = Critic()
self.batch_size = 64
self.n_actions = n_action
self.a_opt = tf.keras.optimizers.Adam(0.001)
# self.actor_target = tf.keras.optimizers.Adam(.001)
self.c_opt1 = tf.keras.optimizers.Adam(0.002)
self.c_opt2 = tf.keras.optimizers.Adam(0.002)
# self.critic_target = tf.keras.optimizers.Adam(.002)
self.memory = RBuffer(maxsize=1000, statedim=(n_state,), naction=n_action)
self.trainstep = 0
# self.replace = 5
self.gamma = 0.99
self.min_action = action_low
self.max_action = action_high
self.actor_update_steps = 2
self.warmup = 200
def act(self, state, evaluate=False):
if self.trainstep > self.warmup:
evaluate = True
state = tf.convert_to_tensor([state], dtype=tf.float32)
actions = self.actor_main(state)
if not evaluate:
actions += tf.random.normal(shape=[self.n_actions], mean=0.0, stddev=0.1)
actions = self.max_action * (tf.clip_by_value(actions, self.min_action, self.max_action))
# print(actions)
return actions[0]
def savexp(self, state, next_state, action, done, reward):
self.memory.storexp(state, next_state, action, done, reward)
def update_target(self):
self.actor_target.set_weights(self.actor_main.get_weights())
self.critic_target.set_weights(self.critic_main.get_weights())
self.critic_target2.set_weights(self.critic_main2.get_weights())
def train(self):
if self.memory.cnt < self.batch_size:
return
states, next_states, rewards, actions, dones = self.memory.sample(self.batch_size)
states = tf.convert_to_tensor(states, dtype=tf.float32)
next_states = tf.convert_to_tensor(next_states, dtype=tf.float32)
rewards = tf.convert_to_tensor(rewards, dtype=tf.float32)
actions = tf.convert_to_tensor(actions, dtype=tf.float32)
# dones = tf.convert_to_tensor(dones, dtype= tf.bool)
with tf.GradientTape() as tape1, tf.GradientTape() as tape2:
target_actions = self.actor_target(next_states)
target_actions += tf.clip_by_value(
tf.random.normal(shape=[*np.shape(target_actions)], mean=0.0, stddev=0.2), -0.5, 0.5)
target_actions = self.max_action * (tf.clip_by_value(target_actions, self.min_action, self.max_action))
target_next_state_values = tf.squeeze(self.critic_target(next_states, target_actions), 1)
target_next_state_values2 = tf.squeeze(self.critic_target2(next_states, target_actions), 1)
critic_value = tf.squeeze(self.critic_main(states, actions), 1)
critic_value2 = tf.squeeze(self.critic_main2(states, actions), 1)
next_state_target_value = tf.math.minimum(target_next_state_values, target_next_state_values2)
target_values = rewards + self.gamma * next_state_target_value * dones
critic_loss1 = tf.keras.losses.MSE(target_values, critic_value)
critic_loss2 = tf.keras.losses.MSE(target_values, critic_value2)
grads1 = tape1.gradient(critic_loss1, self.critic_main.trainable_variables)
grads2 = tape2.gradient(critic_loss2, self.critic_main2.trainable_variables)
self.c_opt1.apply_gradients(zip(grads1, self.critic_main.trainable_variables))
self.c_opt2.apply_gradients(zip(grads2, self.critic_main2.trainable_variables))
self.trainstep += 1
if self.trainstep % self.actor_update_steps == 0:
with tf.GradientTape() as tape3:
new_policy_actions = self.actor_main(states)
actor_loss = -self.critic_main(states, new_policy_actions)
actor_loss = tf.math.reduce_mean(actor_loss)
grads3 = tape3.gradient(actor_loss, self.actor_main.trainable_variables)
self.a_opt.apply_gradients(zip(grads3, self.actor_main.trainable_variables))
# if self.trainstep % self.replace == 0:
self.update_target() | 6,582 | Python | 41.198718 | 115 | 0.627621 |
teerameth/omni.isaac.fiborobotlab/omni/isaac/fiborobotlab/obike/obike.py | from typing import Optional
import numpy as np
import torch
from omni.isaac.core.robots.robot import Robot
from omni.isaac.core.utils.nucleus import get_server_path
from omni.isaac.core.utils.stage import add_reference_to_stage
import carb
class Obike(Robot):
def __init__(
self,
prim_path: str,
name: Optional[str] = "Obike",
usd_path: Optional[str] = None,
translation: Optional[np.ndarray] = None,
orientation: Optional[np.ndarray] = None,
) -> None:
self._usd_path = usd_path
self._name = name
if self._usd_path is None:
server_path = get_server_path()
if server_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
self._usd_path = server_path + "/Library/obike.usd"
add_reference_to_stage(self._usd_path, prim_path)
super().__init__(
prim_path=prim_path,
name=name,
translation=translation,
orientation=orientation,
articulation_controller=None,
) | 1,116 | Python | 30.027777 | 72 | 0.58871 |
teerameth/omni.isaac.fiborobotlab/omni/isaac/fiborobotlab/obike/train_obike_lstm_Karn.py | import gym
from gym import spaces
import numpy as np
import math
import time
import carb
from omni.isaac.kit import SimulationApp
from omni.isaac.imu_sensor import _imu_sensor
def discount_rewards(r, gamma=0.8):
discounted_r = np.zeros_like(r)
running_add = 0
for t in reversed(range(0, r.size)):
running_add = running_add * gamma + r[t]
discounted_r[t] = running_add
return discounted_r
def q2falling(q):
q[0] = 1 if q[0] > 1 else q[0]
try:
if q[1] == 0 and q[2] == 0 and q[3] == 0:
return 0
return 2*math.acos(q[0])*math.sqrt((q[1]**2 + q[2]**2)/(q[1]**2 + q[2]**2 + q[3]**2))
except:
print(q)
return 0
def omni_unit2_sensor_unit(observations):
observations[0] = observations[0] / 981.0
observations[1] = observations[1] / 981.0
observations[2] = ( observations[2] * 180.0 ) / math.pi
return observations
def sensor_unit2_omni_unit(observations):
observations[0] = observations[0] * 981.0
observations[1] = observations[1] * 981.0
observations[2] = ( observations[2] * math.pi ) / 180.0
return observations
## Specify simulation parameters ##
_physics_dt = 1/100
_rendering_dt = 1/30
_max_episode_length = 60/_physics_dt # 60 second after reset
_iteration_count = 0
_display_every_iter = 1
_update_every = 1
_headless = False
simulation_app = SimulationApp({"headless": _headless, "anti_aliasing": 0})
## Setup World ##
from omni.isaac.core import World
from obike_old import Obike
# from omni.isaac.core.objects import DynamicSphere
world = World(physics_dt=_physics_dt, rendering_dt=_rendering_dt, stage_units_in_meters=0.01)
world.scene.add_default_ground_plane()
robot = world.scene.add(
Obike(
prim_path="/obike",
name="obike_mk0",
position=np.array([0, 0.0, 1.435]),
orientation=np.array([1.0, 0.0, 0.0, 0.0]),
)
)
## Setup IMU ##
imu_interface = _imu_sensor.acquire_imu_sensor_interface()
props = _imu_sensor.SensorProperties()
props.position = carb.Float3(0, 0, 10) # translate from /obike/chassic to above motor (cm.)
props.orientation = carb.Float4(0, 0, 0, 1) # (x, y, z, w)
props.sensorPeriod = 1 / 500 # 2ms
_sensor_handle = imu_interface.add_sensor_on_body("/obike/chassic", props)
## Create LSTM Model ##
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
## Train parameter ##
n_episodes = 10000
input_dim = 3
output_dim = 1
num_timesteps = 1
batch_size = 1
lstm_nodes = 32
input_layer = tf.keras.Input(shape=(num_timesteps, input_dim), batch_size=batch_size)
lstm_cell = tf.keras.layers.LSTMCell(
lstm_nodes,
kernel_initializer='glorot_uniform',
recurrent_initializer='glorot_uniform',
bias_initializer='zeros',
)
lstm_layer = tf.keras.layers.RNN(
lstm_cell,
return_state=True,
return_sequences=True,
stateful=True,
)
lstm_out, hidden_state, cell_state = lstm_layer(input_layer)
output = tf.keras.layers.Dense(output_dim)(lstm_out)
model = tf.keras.Model(
inputs=input_layer,
outputs=[hidden_state, cell_state, output]
)
# class SimpleLSTM(keras.Model):
# def __init__(self, lstm_units, num_output):
# super().__init__(self)
# cell = layers.LSTMCell(lstm_units,
# kernel_initializer='glorot_uniform',
# recurrent_initializer='glorot_uniform',
# bias_initializer='zeros')
# self.lstm = tf.keras.layers.RNN(cell,
# return_state = True,
# return_sequences=True,
# stateful=False)
# lstm_out, hidden_state, cell_state = self.lstm(input_layer)
#
#
# self.lstm1 = layers.LSTM(lstm_units, return_sequences=True, return_state=True)
# self.dense = layers.Dense(num_output)
#
# def get_zero_initial_state(self, inputs):
# return [tf.zeros((batch_size, lstm_nodes)), tf.zeros((batch_size, lstm_nodes))]
# def __call__(self, inputs, states = None):
# if states is None:
# self.lstm.get_initial_state = self.get_zero_initial_state
# def call(self, inputs, states=None, return_state = False, training=False):
# x = inputs
# if states is None: states = self.lstm1.get_initial_state(x) # state shape = (2, batch_size, lstm_units)
# print(x.shape)
# print(len(states))
# print(states[0].shape)
# x, sequence, states = self.lstm1(x, initial_state=states, training=training)
# x = self.dense(x, training=training)
#
# if return_state: return x, states
# else: return x
# @tf.function
# def train_step(self, inputs):
# inputs, labels = inputs
# with tf.GradientTape() as tape:
# predictions = self(inputs, training=True)
# loss = self.loss(labels, predictions)
# grads = tape.gradient(loss, model.trainable_variables)
# self.optimizer.apply_gradients(zip(grads, model.trainable_variables))
#
# return {'loss': loss}
# model = SimpleLSTM(lstm_units=8, num_output=1)
# model.build(input_shape=(None, 1, 3))
# model.summary()
optimizer = tf.optimizers.Adam(learning_rate=0.0025)
loss_fn = keras.losses.MeanSquaredError() # Instantiate a loss function.
# train_mse_metric = keras.metrics.MeanSquaredError()
scores = []
gradBuffer = model.trainable_variables
for ix, grad in enumerate(gradBuffer): gradBuffer[ix] = grad * 0
for e in range(n_episodes):
# print("\nStart of episodes %d" % (e,))
# Reset the environment
world.reset()
lstm_layer.reset_states(states=[np.zeros((batch_size, lstm_nodes)), np.zeros((batch_size, lstm_nodes))])
previous_states = None # reset LSTM's internal state
render_counter = 0
ep_memory = []
ep_score = 0
done = False
previous = {'robot_position': None, 'robot_rotation': None, 'fall_rotation': None}
present = {'robot_position': None, 'robot_rotation': None, 'fall_rotation': None}
while not done:
previous['robot_position'], previous['robot_rotation'] = robot.get_world_pose()
previous['fall_rotation'] = q2falling(previous['robot_rotation'])
reading = imu_interface.get_sensor_readings(_sensor_handle)
if reading.shape[0] == 0:# no valid data in buffer -> init observation wih zeros
observations = np.array([0, 0, 0])
else: # IMU will return [???, acc_x, acc_y, acc_z, gyr_x, gyr_y, gyr_z]
observations = np.array([reading[-1]["lin_acc_y"], # Use only lastest data in buffer
reading[-1]["lin_acc_z"],
reading[-1]["ang_vel_x"]])
## convert omniverse unit to sensor_read unit
world_sensor = omni_unit2_sensor_unit(observations)
key_gen = np.load('/home/teera/.local/share/ov/pkg/isaac_sim-2021.2.1/exts/omni.isaac.fiborobotlab/omni/isaac/fiborobotlab/obike/mu_cov_bike.npz')
noi_accy,noi_accz,noi_ang_vel_x = np.random.default_rng().multivariate_normal(world_sensor, key_gen["cov"], 1).T
## convert sensor_read unit to omniverse unit
noisy_sensor = np.array([noi_accy,noi_accz,noi_ang_vel_x])
sim_observations = sensor_unit2_omni_unit(noisy_sensor)
## Scale accel from (-1000, 1000) -> (0, 1) for NN inputs
## Scale gyro from (-4.36, 4.36) -> (0, 1) for NN inputs (4.36 rad = 250 deg)
# print(observations)
# time.sleep(1)
sim_observations[0] = (sim_observations[0] / 2000) + 0.5
sim_observations[1] = (sim_observations[1] / 2000) + 0.5
sim_observations[2] = (sim_observations[2] / 8.72) + 0.5
sim_observations = np.array(sim_observations, dtype=np.float32).reshape((batch_size, num_timesteps, input_dim)) # add extra dimension for batch_size=1
with tf.GradientTape() as tape:
# forward pass
h_state, c_state, logits = model(sim_observations) # required input_shape=(None, 1, 3)
# logits, previous_states = model.call(inputs=observations, states=previous_states, return_state=True, training=True)
a_dist = logits.numpy()
## Choose random action with p = action dist
# print("A_DIST")
# print(a_dist)
# a = np.random.choice(a_dist[0], p=a_dist[0])
# a = np.argmax(a_dist == a)
a = a_dist + 0.1*((np.random.rand(*a_dist.shape))-0.5) # random with uniform distribution (.shape will return tuple so unpack with *)
loss = loss_fn([a], logits)
# loss = previous['fall_rotation']
## EXECUTE ACTION ##
from omni.isaac.core.utils.types import ArticulationAction
# print("LOGITS")
# print(logits)
robot.apply_wheel_actions(ArticulationAction(joint_efforts=[a*100*0.607, 0, 0]))
world.step(render=True)
present['robot_position'], present['robot_rotation'] = robot.get_world_pose()
present['fall_rotation'] = q2falling(present['robot_rotation'])
reward = previous['fall_rotation'] - present['fall_rotation'] # calculate reward from movement toward center
## Check for stop event ##
exceed_time_limit = world.current_time_step_index >= _max_episode_length
robot_fall = True if previous['fall_rotation'] > 25 / 180 * math.pi else False
done = exceed_time_limit or robot_fall
ep_score += reward
# if done: reward-= 10 # small trick to make training faster
grads = tape.gradient(loss, model.trainable_weights)
ep_memory.append([grads, reward])
scores.append(ep_score)
# Discount the rewards
ep_memory = np.array(ep_memory)
ep_memory[:, 1] = discount_rewards(ep_memory[:, 1])
for grads, reward in ep_memory:
for ix, grad in enumerate(grads):
gradBuffer[ix] += grad * reward
if e % _update_every == 0:
optimizer.apply_gradients(zip(gradBuffer, model.trainable_variables))
for ix, grad in enumerate(gradBuffer): gradBuffer[ix] = grad * 0
if e % 100 == 0:
print("Episode {} Score {}".format(e, np.mean(scores[-100:]))) | 10,253 | Python | 40.853061 | 159 | 0.619233 |
teerameth/omni.isaac.fiborobotlab/omni/isaac/fiborobotlab/obike/train_obike_drqn.py | import carb
from omni.isaac.kit import SimulationApp
from omni.isaac.imu_sensor import _imu_sensor
import numpy as np
import math
import time
import tensorflow as tf
def q2falling(q):
q[0] = 1 if q[0] > 1 else q[0]
try:
if q[1] == 0 and q[2] == 0 and q[3] == 0:
return 0
return 2*math.acos(q[0])*math.sqrt((q[1]**2 + q[2]**2)/(q[1]**2 + q[2]**2 + q[3]**2))
except:
print(q)
return 0
def q2falling(q):
q[0] = 1 if q[0] > 1 else q[0]
try:
if q[1] == 0 and q[2] == 0 and q[3] == 0:
return 0
return 2*math.acos(q[0])*math.sqrt((q[1]**2 + q[2]**2)/(q[1]**2 + q[2]**2 + q[3]**2))
except:
print(q)
return 0
## Specify simulation parameters ##
_physics_dt = 1/1000
_rendering_dt = 1/30
_max_episode_length = 60/_physics_dt # 60 second after reset
_iteration_count = 0
_display_every_iter = 1
_update_every = 1
_explore_every = 5
_headless = False
simulation_app = SimulationApp({"headless": _headless, "anti_aliasing": 0})
## Setup World ##
from omni.isaac.core import World
from obike_old import Obike
# from omni.isaac.core.objects import DynamicSphere
world = World(physics_dt=_physics_dt, rendering_dt=_rendering_dt, stage_units_in_meters=0.01)
world.scene.add_default_ground_plane()
robot = world.scene.add(
Obike(
prim_path="/obike",
name="obike_mk0",
position=np.array([0, 0.0, 1.435]),
orientation=np.array([1.0, 0.0, 0.0, 0.0]),
)
)
## Setup IMU ##
imu_interface = _imu_sensor.acquire_imu_sensor_interface()
props = _imu_sensor.SensorProperties()
props.position = carb.Float3(0, 0, 10) # translate from /obike/chassic to above motor (cm.)
props.orientation = carb.Float4(1, 0, 0, 0) # (x, y, z, w)
props.sensorPeriod = 1 / 500 # 2ms
_sensor_handle = imu_interface.add_sensor_on_body("/obike/chassic", props)
## Create LSTM Model ##
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
| 1,996 | Python | 29.257575 | 93 | 0.623747 |
teerameth/omni.isaac.fiborobotlab/omni/isaac/fiborobotlab/obike/train_obike_lstm.py | import gym
from gym import spaces
import numpy as np
import math
import time
import carb
from omni.isaac.kit import SimulationApp
from omni.isaac.imu_sensor import _imu_sensor
def discount_rewards(r, gamma=0.8):
discounted_r = np.zeros_like(r)
running_add = 0
for t in reversed(range(0, r.size)):
running_add = running_add * gamma + r[t]
discounted_r[t] = running_add
return discounted_r
def q2falling(q):
q[0] = 1 if q[0] > 1 else q[0]
try:
if q[1] == 0 and q[2] == 0 and q[3] == 0:
return 0
return 2*math.acos(q[0])*math.sqrt((q[1]**2 + q[2]**2)/(q[1]**2 + q[2]**2 + q[3]**2))
except:
print(q)
return 0
## Specify simulation parameters ##
_physics_dt = 1/1000
_rendering_dt = 1/30
_max_episode_length = 60/_physics_dt # 60 second after reset
_iteration_count = 0
_display_every_iter = 1
_update_every = 1
_explore_every = 5
_headless = False
simulation_app = SimulationApp({"headless": _headless, "anti_aliasing": 0})
## Setup World ##
from omni.isaac.core import World
from obike_old import Obike
# from omni.isaac.core.objects import DynamicSphere
world = World(physics_dt=_physics_dt, rendering_dt=_rendering_dt, stage_units_in_meters=0.01)
world.scene.add_default_ground_plane()
robot = world.scene.add(
Obike(
prim_path="/obike",
name="obike_mk0",
position=np.array([0, 0.0, 1.435]),
orientation=np.array([1.0, 0.0, 0.0, 0.0]),
)
)
## Setup IMU ##
imu_interface = _imu_sensor.acquire_imu_sensor_interface()
props = _imu_sensor.SensorProperties()
props.position = carb.Float3(0, 0, 10) # translate from /obike/chassic to above motor (cm.)
props.orientation = carb.Float4(1, 0, 0, 0) # (x, y, z, w)
props.sensorPeriod = 1 / 500 # 2ms
_sensor_handle = imu_interface.add_sensor_on_body("/obike/chassic", props)
## Create LSTM Model ##
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
## Train parameter ##
n_episodes = 10000
input_dim = 3
output_dim = 1
num_timesteps = 1
batch_size = 1
lstm_nodes = 32
input_layer = tf.keras.Input(shape=(num_timesteps, input_dim), batch_size=batch_size)
lstm_cell = tf.keras.layers.LSTMCell(
lstm_nodes,
kernel_initializer='glorot_uniform',
recurrent_initializer='glorot_uniform',
bias_initializer='zeros',
)
lstm_layer = tf.keras.layers.RNN(
lstm_cell,
return_state=True,
return_sequences=True,
stateful=True,
)
lstm_out, hidden_state, cell_state = lstm_layer(input_layer)
hidden_layer = tf.keras.layers.Dense(32)(lstm_out)
output = tf.keras.layers.Dense(output_dim)(hidden_layer)
model = tf.keras.Model(
inputs=input_layer,
outputs=[hidden_state, cell_state, output]
)
optimizer = tf.optimizers.Adam(learning_rate=0.0025)
loss_fn = keras.losses.MeanSquaredError() # Instantiate a loss function.
# train_mse_metric = keras.metrics.MeanSquaredError()
scores = []
gradBuffer = model.trainable_variables
for ix, grad in enumerate(gradBuffer): gradBuffer[ix] = grad * 0
for e in range(n_episodes):
# print("\nStart of episodes %d" % (e,))
# Reset the environment
world.reset()
lstm_layer.reset_states()
# lstm_layer.reset_states(states=[np.zeros((batch_size, lstm_nodes)), np.zeros((batch_size, lstm_nodes))])
previous_states = None # reset LSTM's internal state
render_counter = 0
ep_memory = []
ep_score = 0
done = False
previous = {'robot_position': None, 'robot_rotation': None, 'fall_rotation': None}
present = {'robot_position': None, 'robot_rotation': None, 'fall_rotation': None}
while not done:
previous['robot_position'], previous['robot_rotation'] = robot.get_world_pose()
previous['fall_rotation'] = q2falling(previous['robot_rotation'])
reading = imu_interface.get_sensor_readings(_sensor_handle)
if reading.shape[0] == 0:# no valid data in buffer -> init observation wih zeros
observations = np.array([0, 0, 0])
else: # IMU will return [???, acc_x, acc_y, acc_z, gyr_x, gyr_y, gyr_z]
observations = np.array([reading[-1]["lin_acc_y"], # Use only lastest data in buffer
reading[-1]["lin_acc_z"],
reading[-1]["ang_vel_x"]])
## Scale accel from (-1000, 1000) -> (0, 1) for NN inputs
## Scale gyro from (-4.36, 4.36) -> (0, 1) for NN inputs (4.36 rad = 250 deg)
# print(observations)
# time.sleep(1)
observations[0] = (observations[0] / 2000) + 0.5
observations[1] = (observations[1] / 2000) + 0.5
observations[2] = (observations[2] / 8.72) + 0.5
observations = np.array(observations, dtype=np.float32).reshape((batch_size, num_timesteps, input_dim)) # add extra dimension for batch_size=1
# print(observations)
with tf.GradientTape() as tape:
# forward pass
h_state, c_state, logits = model(observations) # required input_shape=(None, 1, 3)
# logits, previous_states = model.call(inputs=observations, states=previous_states, return_state=True, training=True)
a_dist = logits.numpy()
a_dist = (a_dist-0.5) * 2 # map (0,1) -> (-1, 1)
## Choose random action with p = action dist
# print("A_DIST")
# print(a_dist)
# a = np.random.choice(a_dist[0], p=a_dist[0])
# a = np.argmax(a_dist == a)
if e % _explore_every == 0:
a = ((np.random.rand(*a_dist.shape))-0.5)*2
else:
a = a_dist + 0.1*((np.random.rand(*a_dist.shape))-0.5) # random with uniform distribution (.shape will return tuple so unpack with *)
loss = loss_fn([a], logits)
# loss = previous['fall_rotation']
## EXECUTE ACTION ##
from omni.isaac.core.utils.types import ArticulationAction
# print("LOGITS")
# print(logits)
# print(a)
robot.apply_wheel_actions(ArticulationAction(joint_efforts=[a*100*0.607, 0, 0]))
world.step(render=True)
present['robot_position'], present['robot_rotation'] = robot.get_world_pose()
present['fall_rotation'] = q2falling(present['robot_rotation'])
reward = previous['fall_rotation'] - present['fall_rotation'] # calculate reward from movement toward center
## Check for stop event ##
exceed_time_limit = world.current_time_step_index >= _max_episode_length
robot_fall = True if previous['fall_rotation'] > 50 / 180 * math.pi else False
done = exceed_time_limit or robot_fall
ep_score += reward
# if done: reward-= 10 # small trick to make training faster
grads = tape.gradient(loss, model.trainable_weights)
ep_memory.append([grads, reward])
scores.append(ep_score)
# Discount the rewards
ep_memory = np.array(ep_memory)
ep_memory[:, 1] = discount_rewards(ep_memory[:, 1])
for grads, reward in ep_memory:
for ix, grad in enumerate(grads):
gradBuffer[ix] += grad * reward
if e % _update_every == 0:
optimizer.apply_gradients(zip(gradBuffer, model.trainable_variables))
for ix, grad in enumerate(gradBuffer): gradBuffer[ix] = grad * 0
if e % 100 == 0:
print("Episode {} Score {}".format(e, np.mean(scores[-100:]))) | 7,386 | Python | 38.084656 | 152 | 0.623477 |
teerameth/omni.isaac.fiborobotlab/omni/isaac/fiborobotlab/obike/train_obike_td3.py | import tensorflow as tf
import numpy as np
import math
import time
def q2falling(q):
q[0] = 1 if q[0] > 1 else q[0]
try:
if q[1] == 0 and q[2] == 0 and q[3] == 0:
return 0
return 2*math.acos(q[0])*math.sqrt((q[1]**2 + q[2]**2)/(q[1]**2 + q[2]**2 + q[3]**2))
except:
print(q)
return 0
import carb
from omni.isaac.kit import SimulationApp
from omni.isaac.imu_sensor import _imu_sensor
print(tf.config.list_physical_devices('GPU'))
state_low = [-100, -100, -10]
state_high = [100, 100, 10]
action_low = [-1]
action_high = [1]
## Specify simulation parameters ##
_physics_dt = 1/100
_rendering_dt = 1/30
_max_episode_length = 60/_physics_dt # 60 second after reset
_iteration_count = 0
_display_every_iter = 1
_update_every = 1
_explore_every = 5
_headless = False
simulation_app = SimulationApp({"headless": _headless, "anti_aliasing": 0})
## Setup World ##
from omni.isaac.core import World
from obike_old import Obike
# from omni.isaac.core.objects import DynamicSphere
world = World(physics_dt=_physics_dt, rendering_dt=_rendering_dt, stage_units_in_meters=0.01)
world.scene.add_default_ground_plane()
robot = world.scene.add(
Obike(
prim_path="/obike",
name="obike_mk0",
position=np.array([0, 0.0, 1.435]),
orientation=np.array([1.0, 0.0, 0.0, 0.0]),
)
)
## Setup IMU ##
imu_interface = _imu_sensor.acquire_imu_sensor_interface()
props = _imu_sensor.SensorProperties()
props.position = carb.Float3(0, 0, 10) # translate from /obike/chassic to above motor (cm.)
props.orientation = carb.Float4(1, 0, 0, 0) # (x, y, z, w)
props.sensorPeriod = 1 / 500 # 2ms
_sensor_handle = imu_interface.add_sensor_on_body("/obike/chassic", props)
from td3 import RBuffer, Critic, Actor, Agent
with tf.device('GPU:0'):
# tf.random.set_seed(336699)
agent = Agent(n_action=1, n_state=3, action_low=action_low, action_high=action_high)
episods = 20000
ep_reward = []
total_avgr = []
target = False
for s in range(episods):
if target == True:
break
total_reward = 0
world.reset()
done = False
previous = {'robot_position': None, 'robot_rotation': None, 'fall_rotation': None, "lin_acc_y": None, "lin_acc_z": None, "ang_vel_x": None}
present = {'robot_position': None, 'robot_rotation': None, 'fall_rotation': None, "lin_acc_y": None, "lin_acc_z": None, "ang_vel_x": None}
while not done:
if previous['robot_position'] is None: # initial state
robot_position, _ = robot.get_world_pose()
previous = {'robot_position': robot_position[0],
'robot_rotation': robot_position[1],
'fall_rotation': robot_position[2],
"lin_acc_y": 0,
"lin_acc_z": 0,
"ang_vel_x": 0}
state = np.array([previous["lin_acc_y"],
previous["lin_acc_z"],
previous["ang_vel_x"]], dtype=np.float32)
## Scale accel from (-1000, 1000) -> (0, 1) for NN inputs
## Scale gyro from (-4.36, 4.36) -> (0, 1) for NN inputs (4.36 rad = 250 deg)
# print(observations)
# time.sleep(1)
# observations[0] = (state[0] / 2000) + 0.5
# observations[1] = (state[1] / 2000) + 0.5
# observations[2] = (state[2] / 8.72) + 0.5
action = agent.act(state)
# print(action)
## EXECUTE ACTION ##
from omni.isaac.core.utils.types import ArticulationAction
robot.apply_wheel_actions(ArticulationAction(joint_efforts=[action * 100 * 0.607, 0, 0]))
world.step(render=True)
reading = imu_interface.get_sensor_readings(_sensor_handle)
if reading.shape[0] == 0: # no valid data in buffer -> init observation wih zeros
observations = np.array([0, 0, 0], dtype=np.float32)
print("Warning! no data in IMU buffer")
else: # IMU will return [???, acc_x, acc_y, acc_z, gyr_x, gyr_y, gyr_z]
present["lin_acc_y"], present["lin_acc_z"], present["ang_vel_x"] = reading[-1]["lin_acc_y"], reading[-1]["lin_acc_z"], reading[-1]["ang_vel_x"]
next_state = np.array([present["lin_acc_y"],
present["lin_acc_z"],
present["ang_vel_x"]])
present['robot_position'], present['robot_rotation'] = robot.get_world_pose()
present['fall_rotation'] = q2falling(present['robot_rotation'])
reward = 1
# reward = previous['fall_rotation'] - present['fall_rotation'] # calculate reward from movement toward center
## Check for stop event ##
exceed_time_limit = world.current_time_step_index >= _max_episode_length
robot_fall = True if present['fall_rotation'] > 50 / 180 * math.pi else False
done = exceed_time_limit or robot_fall
agent.savexp(state, next_state, action, done, reward)
agent.train()
## UPDATE STATE ##
previous = present.copy()
total_reward += reward
if done:
ep_reward.append(total_reward)
avg_reward = np.mean(ep_reward[-100:])
total_avgr.append(avg_reward)
print("total reward after {} steps is {} and avg reward is {}".format(s, total_reward, avg_reward))
if avg_reward == 200: target = True | 5,653 | Python | 40.270073 | 159 | 0.558995 |
teerameth/omni.isaac.fiborobotlab/omni/isaac/fiborobotlab/obike/cfg/config.yaml |
# Task name - used to pick the class to load
task_name: ${task.name}
# experiment name. defaults to name of training config
experiment: ''
# if set to positive integer, overrides the default number of environments
num_envs: ''
# seed - set to -1 to choose random seed
seed: 42
# set to True for deterministic performance
torch_deterministic: False
# set the maximum number of learning iterations to train for. overrides default per-environment setting
max_iterations: ''
## Device config
physics_engine: 'physx'
# whether to use cpu or gpu pipeline
pipeline: 'gpu'
# whether to use cpu or gpu physx
sim_device: 'gpu'
# used for gpu pipeline only - device id for running sim and task
device_id: 0
# device to run RL
rl_device: 'cuda:0'
## PhysX arguments
num_threads: 4 # Number of worker threads per scene used by PhysX - for CPU PhysX only.
solver_type: 1 # 0: pgs, 1: tgs
# RLGames Arguments
# test - if set, run policy in inference mode (requires setting checkpoint to load)
test: False
# used to set checkpoint path
checkpoint: ''
# disables rendering
headless: False
# set default task and default training config based on task
defaults:
- task: Obike
- train: ${task}PPO
- hydra/job_logging: disabled
# set the directory where the output files get saved
hydra:
output_subdir: null
run:
dir: .
| 1,324 | YAML | 23.537037 | 103 | 0.73565 |
teerameth/omni.isaac.fiborobotlab/omni/isaac/fiborobotlab/obike/cfg/task/Obike.yaml | # used to create the object
name: Obike
physics_engine: ${..physics_engine}
# if given, will override the device setting in gym.
env:
numEnvs: ${resolve_default:64,${...num_envs}}
envSpacing: 0.3
resetDist: 3.0
maxEffort: 10
clipObservations: 5.0
clipActions: 1.0
controlFrequencyInv: 2 # 60 Hz
sim:
dt: 0.0083 # 1/120 s
use_gpu_pipeline: ${eq:${...pipeline},"gpu"}
gravity: [0.0, 0.0, -9.81]
add_ground_plane: True
add_distant_light: True
use_flatcache: True
enable_scene_query_support: False
default_physics_material:
static_friction: 1.0
dynamic_friction: 1.0
restitution: 0.0
physx:
worker_thread_count: ${....num_threads}
solver_type: ${....solver_type}
use_gpu: ${eq:${....sim_device},"gpu"} # set to False to run on CPU
solver_position_iteration_count: 4
solver_velocity_iteration_count: 0
contact_offset: 0.02
rest_offset: 0.001
bounce_threshold_velocity: 0.2
friction_offset_threshold: 0.04
friction_correlation_distance: 0.025
enable_sleeping: True
enable_stabilization: True
max_depenetration_velocity: 100.0
# GPU buffers
gpu_max_rigid_contact_count: 524288
gpu_max_rigid_patch_count: 81920
gpu_found_lost_pairs_capacity: 1024
gpu_found_lost_aggregate_pairs_capacity: 262144
gpu_total_aggregate_pairs_capacity: 1024
gpu_max_soft_body_contacts: 1048576
gpu_max_particle_contacts: 1048576
gpu_heap_capacity: 67108864
gpu_temp_buffer_capacity: 16777216
gpu_max_num_partitions: 8
Obike:
# -1 to use default values
override_usd_defaults: False
fixed_base: False
enable_self_collisions: False
enable_gyroscopic_forces: True
# also in stage params
# per-actor
solver_position_iteration_count: 4
solver_velocity_iteration_count: 0
sleep_threshold: 0.005
stabilization_threshold: 0.001
# per-body
density: -1
max_depenetration_velocity: 100.0
# per-shape
contact_offset: 0.02
rest_offset: 0.001 | 2,016 | YAML | 25.893333 | 71 | 0.681052 |
teerameth/omni.isaac.fiborobotlab/omni/isaac/fiborobotlab/obike/cfg/train/ObikePPO.yaml | params:
seed: ${...seed}
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [32, 32]
activation: elu
initializer:
name: default
regularizer:
name: None
rnn:
name: 'lstm'
units: 32
layers: 2
before_mlp: False
concat_input: True
layer_norm: True
load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint
load_path: ${...checkpoint} # path to the checkpoint to load
config:
name: ${resolve_default:Obike,${....experiment}}
full_experiment_name: ${.name}
device: ${....rl_device}
device_name: ${....rl_device}
env_name: rlgpu
ppo: True
mixed_precision: False
normalize_input: True
normalize_value: True
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 0.1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 3e-4
lr_schedule: adaptive
kl_threshold: 0.008
score_to_win: 20000
max_epochs: ${resolve_default:1000,${....max_iterations}}
save_best_after: 50
save_frequency: 25
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: True
e_clip: 0.2
horizon_length: 16
minibatch_size: 1024
mini_epochs: 8
critic_coef: 4
clip_value: True
seq_len: 4
bounds_loss_coef: 0.0001 | 1,678 | YAML | 21.092105 | 101 | 0.587008 |
teerameth/omni.isaac.fiborobotlab/omni/isaac/fiborobotlab/obike/scripts/common.py | # Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import os
import carb.tokens
import omni
from pxr import UsdGeom, PhysxSchema, UsdPhysics
def set_drive_parameters(drive, target_type, target_value, stiffness=None, damping=None, max_force=None):
"""Enable velocity drive for a given joint"""
if target_type == "position":
if not drive.GetTargetPositionAttr():
drive.CreateTargetPositionAttr(target_value)
else:
drive.GetTargetPositionAttr().Set(target_value)
elif target_type == "velocity":
if not drive.GetTargetVelocityAttr():
drive.CreateTargetVelocityAttr(target_value)
else:
drive.GetTargetVelocityAttr().Set(target_value)
if stiffness is not None:
if not drive.GetStiffnessAttr():
drive.CreateStiffnessAttr(stiffness)
else:
drive.GetStiffnessAttr().Set(stiffness)
if damping is not None:
if not drive.GetDampingAttr():
drive.CreateDampingAttr(damping)
else:
drive.GetDampingAttr().Set(damping)
if max_force is not None:
if not drive.GetMaxForceAttr():
drive.CreateMaxForceAttr(max_force)
else:
drive.GetMaxForceAttr().Set(max_force)
| 1,653 | Python | 34.191489 | 105 | 0.69147 |
teerameth/omni.isaac.fiborobotlab/omni/isaac/fiborobotlab/obike/scripts/sim_config.py | from omniisaacgymenvs.utils.config_utils.default_scene_params import *
import copy
import omni.usd
class SimConfig():
def __init__(self, config: dict = None):
if config is None:
config = dict()
self._config = config
self._cfg = config.get("task", dict())
self._parse_config()
if self._config["test"] == True:
self._sim_params["enable_scene_query_support"] = True
def _parse_config(self):
# general sim parameter
self._sim_params = copy.deepcopy(default_sim_params)
self._default_physics_material = copy.deepcopy(default_physics_material)
sim_cfg = self._cfg.get("sim", None)
if sim_cfg is not None:
for opt in sim_cfg.keys():
if opt in self._sim_params:
if opt == "default_physics_material":
for material_opt in sim_cfg[opt]:
self._default_physics_material[material_opt] = sim_cfg[opt][material_opt]
else:
self._sim_params[opt] = sim_cfg[opt]
else:
print("Sim params does not have attribute: ", opt)
self._sim_params["default_physics_material"] = self._default_physics_material
# physx parameters
self._physx_params = copy.deepcopy(default_physx_params)
if sim_cfg is not None and "physx" in sim_cfg:
for opt in sim_cfg["physx"].keys():
if opt in self._physx_params:
self._physx_params[opt] = sim_cfg["physx"][opt]
else:
print("Physx sim params does not have attribute: ", opt)
self._sanitize_device()
def _sanitize_device(self):
if self._sim_params["use_gpu_pipeline"]:
self._physx_params["use_gpu"] = True
# device should be in sync with pipeline
if self._sim_params["use_gpu_pipeline"]:
self._config["sim_device"] = f"cuda:{self._config['device_id']}"
else:
self._config["sim_device"] = "cpu"
# also write to physics params for setting sim device
self._physx_params["sim_device"] = self._config["sim_device"]
print("Pipeline: ", "GPU" if self._sim_params["use_gpu_pipeline"] else "CPU")
print("Pipeline Device: ", self._config["sim_device"])
print("Sim Device: ", "GPU" if self._physx_params["use_gpu"] else "CPU")
def parse_actor_config(self, actor_name):
actor_params = copy.deepcopy(default_actor_options)
if "sim" in self._cfg and actor_name in self._cfg["sim"]:
actor_cfg = self._cfg["sim"][actor_name]
for opt in actor_cfg.keys():
if actor_cfg[opt] != -1 and opt in actor_params:
actor_params[opt] = actor_cfg[opt]
elif opt not in actor_params:
print("Actor params does not have attribute: ", opt)
return actor_params
def _get_actor_config_value(self, actor_name, attribute_name, attribute=None):
actor_params = self.parse_actor_config(actor_name)
if attribute is not None:
if attribute_name not in actor_params:
return attribute.Get()
if actor_params[attribute_name] != -1:
return actor_params[attribute_name]
elif actor_params["override_usd_defaults"] and not attribute.IsAuthored():
return self._physx_params[attribute_name]
else:
if actor_params[attribute_name] != -1:
return actor_params[attribute_name]
@property
def sim_params(self):
return self._sim_params
@property
def config(self):
return self._config
@property
def task_config(self):
return self._cfg
@property
def physx_params(self):
return self._physx_params
def get_physics_params(self):
return {**self.sim_params, **self.physx_params}
def _get_physx_collision_api(self, prim):
from pxr import UsdPhysics, PhysxSchema
physx_collision_api = PhysxSchema.PhysxCollisionAPI(prim)
if not physx_collision_api:
physx_collision_api = PhysxSchema.PhysxCollisionAPI.Apply(prim)
return physx_collision_api
def _get_physx_rigid_body_api(self, prim):
from pxr import UsdPhysics, PhysxSchema
physx_rb_api = PhysxSchema.PhysxRigidBodyAPI(prim)
if not physx_rb_api:
physx_rb_api = PhysxSchema.PhysxRigidBodyAPI.Apply(prim)
return physx_rb_api
def _get_physx_articulation_api(self, prim):
from pxr import UsdPhysics, PhysxSchema
arti_api = PhysxSchema.PhysxArticulationAPI(prim)
if not arti_api:
arti_api = PhysxSchema.PhysxArticulationAPI.Apply(prim)
return arti_api
def set_contact_offset(self, name, prim, value=None):
physx_collision_api = self._get_physx_collision_api(prim)
contact_offset = physx_collision_api.GetContactOffsetAttr()
# if not contact_offset:
# contact_offset = physx_collision_api.CreateContactOffsetAttr()
if value is None:
value = self._get_actor_config_value(name, "contact_offset", contact_offset)
if value != -1:
contact_offset.Set(value)
def set_rest_offset(self, name, prim, value=None):
physx_collision_api = self._get_physx_collision_api(prim)
rest_offset = physx_collision_api.GetRestOffsetAttr()
# if not rest_offset:
# rest_offset = physx_collision_api.CreateRestOffsetAttr()
if value is None:
value = self._get_actor_config_value(name, "rest_offset", rest_offset)
if value != -1:
rest_offset.Set(value)
def set_position_iteration(self, name, prim, value=None):
physx_rb_api = self._get_physx_rigid_body_api(prim)
solver_position_iteration_count = physx_rb_api.GetSolverPositionIterationCountAttr()
if value is None:
value = self._get_actor_config_value(name, "solver_position_iteration_count", solver_position_iteration_count)
if value != -1:
solver_position_iteration_count.Set(value)
def set_velocity_iteration(self, name, prim, value=None):
physx_rb_api = self._get_physx_rigid_body_api(prim)
solver_velocity_iteration_count = physx_rb_api.GetSolverVelocityIterationCountAttr()
if value is None:
value = self._get_actor_config_value(name, "solver_velocity_iteration_count", solver_position_iteration_count)
if value != -1:
solver_velocity_iteration_count.Set(value)
def set_max_depenetration_velocity(self, name, prim, value=None):
physx_rb_api = self._get_physx_rigid_body_api(prim)
max_depenetration_velocity = physx_rb_api.GetMaxDepenetrationVelocityAttr()
if value is None:
value = self._get_actor_config_value(name, "max_depenetration_velocity", max_depenetration_velocity)
if value != -1:
max_depenetration_velocity.Set(value)
def set_sleep_threshold(self, name, prim, value=None):
physx_rb_api = self._get_physx_rigid_body_api(prim)
sleep_threshold = physx_rb_api.GetSleepThresholdAttr()
if value is None:
value = self._get_actor_config_value(name, "sleep_threshold", sleep_threshold)
if value != -1:
sleep_threshold.Set(value)
def set_stabilization_threshold(self, name, prim, value=None):
physx_rb_api = self._get_physx_rigid_body_api(prim)
stabilization_threshold = physx_rb_api.GetStabilizationThresholdAttr()
if value is None:
value = self._get_actor_config_value(name, "stabilization_threshold", stabilization_threshold)
if value != -1:
stabilization_threshold.Set(value)
def set_gyroscopic_forces(self, name, prim, value=None):
physx_rb_api = self._get_physx_rigid_body_api(prim)
enable_gyroscopic_forces = physx_rb_api.GetEnableGyroscopicForcesAttr()
if value is None:
value = self._get_actor_config_value(name, "enable_gyroscopic_forces", enable_gyroscopic_forces)
if value != -1:
enable_gyroscopic_forces.Set(value)
def set_density(self, name, prim, value=None):
physx_rb_api = self._get_physx_rigid_body_api(prim)
density = physx_rb_api.GetDensityAttr()
if value is None:
value = self._get_actor_config_value(name, "density", density)
if value != -1:
density.Set(value)
# auto-compute mass
self.set_mass(prim, 0.0)
def set_mass(self, name, prim, value=None):
physx_rb_api = self._get_physx_rigid_body_api(prim)
mass = physx_rb_api.GetMassAttr()
if value is None:
value = self._get_actor_config_value(name, "mass", mass)
if value != -1:
mass.Set(value)
def retain_acceleration(self, prim):
# retain accelerations if running with more than one substep
physx_rb_api = self._get_physx_rigid_body_api(prim)
if self._sim_params["substeps"] > 1:
physx_rb_api.GetRetainAccelerationsAttr().Set(True)
def add_fixed_base(self, name, prim, cfg, value=None):
from pxr import UsdPhysics, PhysxSchema
stage = omni.usd.get_context().get_stage()
if value is None:
value = self._get_actor_config_value(name, "fixed_base")
if value:
root_joint_path = f"{prim.GetPath()}_fixedBaseRootJoint"
joint = UsdPhysics.Joint.Define(stage, root_joint_path)
joint.CreateBody1Rel().SetTargets([prim.GetPath()])
self.apply_articulation_settings(name, joint.GetPrim(), cfg, force_articulation=True)
def set_articulation_position_iteration(self, name, prim, value=None):
arti_api = self._get_physx_articulation_api(prim)
solver_position_iteration_count = arti_api.GetSolverPositionIterationCountAttr()
if value is None:
value = self._get_actor_config_value(name, "solver_position_iteration_count", solver_position_iteration_count)
if value != -1:
solver_position_iteration_count.Set(value)
def set_articulation_velocity_iteration(self, name, prim, value=None):
arti_api = self._get_physx_articulation_api(prim)
solver_velocity_iteration_count = arti_api.GetSolverVelocityIterationCountAttr()
if value is None:
value = self._get_actor_config_value(name, "solver_velocity_iteration_count", solver_position_iteration_count)
if value != -1:
solver_velocity_iteration_count.Set(value)
def set_articulation_sleep_threshold(self, name, prim, value=None):
arti_api = self._get_physx_articulation_api(prim)
sleep_threshold = arti_api.GetSleepThresholdAttr()
if value is None:
value = self._get_actor_config_value(name, "sleep_threshold", sleep_threshold)
if value != -1:
sleep_threshold.Set(value)
def set_articulation_stabilization_threshold(self, name, prim, value=None):
arti_api = self._get_physx_articulation_api(prim)
stabilization_threshold = arti_api.GetStabilizationThresholdAttr()
if value is None:
value = self._get_actor_config_value(name, "stabilization_threshold", stabilization_threshold)
if value != -1:
stabilization_threshold.Set(value)
def apply_rigid_body_settings(self, name, prim, cfg, is_articulation):
from pxr import UsdPhysics, PhysxSchema
stage = omni.usd.get_context().get_stage()
rb_api = UsdPhysics.RigidBodyAPI.Get(stage, prim.GetPath())
physx_rb_api = PhysxSchema.PhysxRigidBodyAPI.Get(stage, prim.GetPath())
if not physx_rb_api:
physx_rb_api = PhysxSchema.PhysxRigidBodyAPI.Apply(prim)
# if it's a body in an articulation, it's handled at articulation root
if not is_articulation:
self.add_fixed_base(name, prim, cfg, cfg["fixed_base"])
self.set_position_iteration(name, prim, cfg["solver_position_iteration_count"])
self.set_velocity_iteration(name, prim, cfg["solver_velocity_iteration_count"])
self.set_max_depenetration_velocity(name, prim, cfg["max_depenetration_velocity"])
self.set_sleep_threshold(name, prim, cfg["sleep_threshold"])
self.set_stabilization_threshold(name, prim, cfg["stabilization_threshold"])
self.set_gyroscopic_forces(name, prim, cfg["enable_gyroscopic_forces"])
# density and mass
mass_api = UsdPhysics.MassAPI.Get(stage, prim.GetPath())
if mass_api is None:
mass_api = UsdPhysics.MassAPI.Apply(prim)
mass_attr = mass_api.GetMassAttr()
density_attr = mass_api.GetDensityAttr()
if not mass_attr:
mass_attr = mass_api.CreateMassAttr()
if not density_attr:
density_attr = mass_api.CreateDensityAttr()
if cfg["density"] != -1:
density_attr.Set(cfg["density"])
mass_attr.Set(0.0) # mass is to be computed
elif cfg["override_usd_defaults"] and not density_attr.IsAuthored() and not mass_attr.IsAuthored():
density_attr.Set(self._physx_params["density"])
self.retain_acceleration(prim)
def apply_rigid_shape_settings(self, name, prim, cfg):
from pxr import UsdPhysics, PhysxSchema
stage = omni.usd.get_context().get_stage()
# collision APIs
collision_api = UsdPhysics.CollisionAPI(prim)
if not collision_api:
collision_api = UsdPhysics.CollisionAPI.Apply(prim)
physx_collision_api = PhysxSchema.PhysxCollisionAPI(prim)
if not physx_collision_api:
physx_collision_api = PhysxSchema.PhysxCollisionAPI.Apply(prim)
self.set_contact_offset(name, prim, cfg["contact_offset"])
self.set_rest_offset(name, prim, cfg["rest_offset"])
def apply_articulation_settings(self, name, prim, cfg, force_articulation=False):
from pxr import UsdPhysics, PhysxSchema
stage = omni.usd.get_context().get_stage()
is_articulation = False
# check if is articulation
prims = [prim]
while len(prims) > 0:
prim = prims.pop(0)
articulation_api = UsdPhysics.ArticulationRootAPI.Get(stage, prim.GetPath())
physx_articulation_api = PhysxSchema.PhysxArticulationAPI.Get(stage, prim.GetPath())
if articulation_api or physx_articulation_api:
is_articulation = True
if not is_articulation and force_articulation:
articulation_api = UsdPhysics.ArticulationRootAPI.Apply(prim)
physx_articulation_api = PhysxSchema.PhysxArticulationAPI.Apply(prim)
# parse through all children prims
prims = [prim]
while len(prims) > 0:
prim = prims.pop(0)
rb = UsdPhysics.RigidBodyAPI(prim)
collision_body = UsdPhysics.CollisionAPI(prim)
articulation = UsdPhysics.ArticulationRootAPI(prim)
if rb:
self.apply_rigid_body_settings(name, prim, cfg, is_articulation)
if collision_body:
self.apply_rigid_shape_settings(name, prim, cfg)
if articulation:
articulation_api = UsdPhysics.ArticulationRootAPI.Get(stage, prim.GetPath())
physx_articulation_api = PhysxSchema.PhysxArticulationAPI.Get(stage, prim.GetPath())
# enable self collisions
enable_self_collisions = physx_articulation_api.GetEnabledSelfCollisionsAttr()
if cfg["enable_self_collisions"] != -1:
enable_self_collisions.Set(cfg["enable_self_collisions"])
if not force_articulation:
self.add_fixed_base(name, prim, cfg, cfg["fixed_base"])
self.set_articulation_position_iteration(name, prim, cfg["solver_position_iteration_count"])
self.set_articulation_velocity_iteration(name, prim, cfg["solver_velocity_iteration_count"])
self.set_articulation_sleep_threshold(name, prim, cfg["sleep_threshold"])
self.set_articulation_stabilization_threshold(name, prim, cfg["stabilization_threshold"])
children_prims = prim.GetPrim().GetChildren()
prims = prims + children_prims
| 16,516 | Python | 43.761517 | 122 | 0.626181 |
teerameth/omni.isaac.fiborobotlab/omni/isaac/fiborobotlab/obike/scripts/import_obike.py | # Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import omni
import omni.kit.commands
import asyncio
import math
import weakref
import omni.ui as ui
from omni.kit.menu.utils import add_menu_items, remove_menu_items, MenuItemDescription
from omni.isaac.isaac_sensor import _isaac_sensor
from .common import set_drive_parameters
from pxr import UsdLux, Sdf, Gf, UsdPhysics
from omni.isaac.ui.ui_utils import setup_ui_headers, get_style, btn_builder
EXTENSION_NAME = "Import O-Bike"
class Extension(omni.ext.IExt):
def on_startup(self, ext_id: str):
ext_manager = omni.kit.app.get_app().get_extension_manager()
self._ext_id = ext_id
self._extension_path = ext_manager.get_extension_path(ext_id)
self._menu_items = [
MenuItemDescription(
name="Import Robots",
sub_menu=[
MenuItemDescription(name="O-Bike URDF", onclick_fn=lambda a=weakref.proxy(self): a._menu_callback())
],
)
]
add_menu_items(self._menu_items, "Isaac Examples")
self._build_ui()
def _build_ui(self):
self._window = omni.ui.Window(
EXTENSION_NAME, width=0, height=0, visible=False, dockPreference=ui.DockPreference.LEFT_BOTTOM
)
with self._window.frame:
with ui.VStack(spacing=5, height=0):
title = "Import a O-Bike Robot via URDF"
doc_link = "https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/ext_omni_isaac_urdf.html"
overview = "This Example shows you import an NVIDIA O-Bike robot via URDF.\n\nPress the 'Open in IDE' button to view the source code."
setup_ui_headers(self._ext_id, __file__, title, doc_link, overview)
frame = ui.CollapsableFrame(
title="Command Panel",
height=0,
collapsed=False,
style=get_style(),
style_type_name_override="CollapsableFrame",
horizontal_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_AS_NEEDED,
vertical_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_ALWAYS_ON,
)
with frame:
with ui.VStack(style=get_style(), spacing=5):
dict = {
"label": "Load Robot",
"type": "button",
"text": "Load",
"tooltip": "Load a O-Bike Robot into the Scene",
"on_clicked_fn": self._on_load_robot,
}
btn_builder(**dict)
dict = {
"label": "Configure Drives",
"type": "button",
"text": "Configure",
"tooltip": "Configure Joint Drives",
"on_clicked_fn": self._on_config_robot,
}
btn_builder(**dict)
dict = {
"label": "Spin Robot",
"type": "button",
"text": "move",
"tooltip": "Spin the Robot in Place",
"on_clicked_fn": self._on_config_drives,
}
btn_builder(**dict)
def on_shutdown(self):
remove_menu_items(self._menu_items, "Isaac Examples")
self._window = None
def _menu_callback(self):
self._window.visible = not self._window.visible
def _on_load_robot(self):
load_stage = asyncio.ensure_future(omni.usd.get_context().new_stage_async())
asyncio.ensure_future(self._load_obike(load_stage))
async def _load_obike(self, task):
done, pending = await asyncio.wait({task})
if task in done:
status, import_config = omni.kit.commands.execute("URDFCreateImportConfig")
import_config.merge_fixed_joints = True
import_config.import_inertia_tensor = False
# import_config.distance_scale = 100
import_config.fix_base = False # Attact wheel to /groundPlane
import_config.make_default_prim = True
import_config.create_physics_scene = True
omni.kit.commands.execute(
"URDFParseAndImportFile",
urdf_path=self._extension_path + "/data/urdf/robots/obike/urdf/obike.urdf",
import_config=import_config,
)
viewport = omni.kit.viewport_legacy.get_default_viewport_window()
viewport.set_camera_position("/OmniverseKit_Persp", -51, 63, 25, True)
viewport.set_camera_target("/OmniverseKit_Persp", 220, -218, -160, True)
stage = omni.usd.get_context().get_stage()
scene = UsdPhysics.Scene.Define(stage, Sdf.Path("/physicsScene"))
scene.CreateGravityDirectionAttr().Set(Gf.Vec3f(0.0, 0.0, -1.0))
scene.CreateGravityMagnitudeAttr().Set(981.0)
result, plane_path = omni.kit.commands.execute(
"AddGroundPlaneCommand",
stage=stage,
planePath="/groundPlane",
axis="Z",
size=1500.0,
position=Gf.Vec3f(0, 0, 0),
color=Gf.Vec3f(0.5),
)
# make sure the ground plane is under root prim and not robot
omni.kit.commands.execute(
"MovePrimCommand", path_from=plane_path, path_to="/groundPlane", keep_world_transform=True
)
distantLight = UsdLux.DistantLight.Define(stage, Sdf.Path("/DistantLight"))
distantLight.CreateIntensityAttr(500)
def _on_config_robot(self):
stage = omni.usd.get_context().get_stage()
# make front wheel spin freely
prim_path = "/obike/chassic/front_wheel_joint"
prim = stage.GetPrimAtPath(prim_path)
omni.kit.commands.execute(
"UnapplyAPISchemaCommand",
api=UsdPhysics.DriveAPI,
prim=prim,
api_prefix="drive",
multiple_api_token="angular",
)
## Attact IMU sensor ##
self._is = _isaac_sensor.acquire_imu_sensor_interface()
self.body_path = "/obike/chassic"
result, sensor = omni.kit.commands.execute(
"IsaacSensorCreateImuSensor",
path="/sensor",
parent=self.body_path,
sensor_period=1 / 500.0,
offset=Gf.Vec3d(0, 0, 10),
orientation=Gf.Quatd(1, 0, 0, 0),
visualize=True,
)
def _on_config_drives(self):
# self._on_config_robot() # make sure drives are configured first
stage = omni.usd.get_context().get_stage()
# set each axis to spin at a rate of 1 rad/s
axle_steering = UsdPhysics.DriveAPI.Get(stage.GetPrimAtPath("/obike/chassic/front_wheel_arm_joint"), "angular")
axle_rear_wheel = UsdPhysics.DriveAPI.Get(stage.GetPrimAtPath("/obike/chassic/rear_wheel_joint"), "angular")
axle_reaction_wheel = UsdPhysics.DriveAPI.Get(stage.GetPrimAtPath("/obike/chassic/reaction_wheel_joint"), "angular")
# omega = velocity2omega(0, 0.1, 0)
# print(omega)
set_drive_parameters(axle_steering, "position", math.degrees(3), 0, math.radians(1e7))
set_drive_parameters(axle_rear_wheel, "velocity", math.degrees(-10), 0, math.radians(1e7))
set_drive_parameters(axle_reaction_wheel, "velocity", math.degrees(4), 0, math.radians(1e7))
| 8,065 | Python | 42.6 | 150 | 0.566646 |
teerameth/omni.isaac.fiborobotlab/omni/isaac/fiborobotlab/hanuman/hanuman.py | from typing import Optional
import numpy as np
import torch
from omni.isaac.core.robots.robot import Robot
from omni.isaac.core.utils.nucleus import get_server_path
from omni.isaac.core.utils.stage import add_reference_to_stage
import carb
class hanuman(Robot):
def __init__(
self,
prim_path: str,
name: Optional[str] = "Hanuman",
usd_path: Optional[str] = None,
translation: Optional[np.ndarray] = None,
orientation: Optional[np.ndarray] = None,
) -> None:
self._usd_path = usd_path
self._name = name
if self._usd_path is None:
server_path = get_server_path()
if server_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
self._usd_path = server_path + "/Library/hanumanv1.usd"
add_reference_to_stage(self._usd_path, prim_path)
super().__init__(
prim_path=prim_path,
name=name,
translation=translation,
orientation=orientation,
articulation_controller=None,
)
| 1,102 | Python | 28.026315 | 72 | 0.603448 |
teerameth/omni.isaac.fiborobotlab/omni/isaac/fiborobotlab/hanuman/scripts/common.py | # Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import os
import carb.tokens
import omni
from pxr import UsdGeom, PhysxSchema, UsdPhysics
def set_drive_parameters(drive, target_type, target_value, stiffness=None, damping=None, max_force=None):
"""Enable velocity drive for a given joint"""
if target_type == "position":
if not drive.GetTargetPositionAttr():
drive.CreateTargetPositionAttr(target_value)
else:
drive.GetTargetPositionAttr().Set(target_value)
elif target_type == "velocity":
if not drive.GetTargetVelocityAttr():
drive.CreateTargetVelocityAttr(target_value)
else:
drive.GetTargetVelocityAttr().Set(target_value)
if stiffness is not None:
if not drive.GetStiffnessAttr():
drive.CreateStiffnessAttr(stiffness)
else:
drive.GetStiffnessAttr().Set(stiffness)
if damping is not None:
if not drive.GetDampingAttr():
drive.CreateDampingAttr(damping)
else:
drive.GetDampingAttr().Set(damping)
if max_force is not None:
if not drive.GetMaxForceAttr():
drive.CreateMaxForceAttr(max_force)
else:
drive.GetMaxForceAttr().Set(max_force)
| 1,653 | Python | 34.191489 | 105 | 0.69147 |
teerameth/omni.isaac.fiborobotlab/omni/isaac/fiborobotlab/hanuman/scripts/import_hanuman.py | # Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import omni
import omni.kit.commands
import asyncio
import math
import weakref
import omni.ui as ui
from omni.kit.menu.utils import add_menu_items, remove_menu_items, MenuItemDescription
from omni.isaac.isaac_sensor import _isaac_sensor
from .common import set_drive_parameters
from pxr import UsdLux, Sdf, Gf, UsdPhysics
from omni.isaac.ui.ui_utils import setup_ui_headers, get_style, btn_builder
EXTENSION_NAME = "Import Hanuman"
class Extension(omni.ext.IExt):
def on_startup(self, ext_id: str):
ext_manager = omni.kit.app.get_app().get_extension_manager()
self._ext_id = ext_id
self._extension_path = ext_manager.get_extension_path(ext_id)
self._menu_items = [
MenuItemDescription(
name="Import Robots",
sub_menu=[
MenuItemDescription(name="Hanuman URDF", onclick_fn=lambda a=weakref.proxy(self): a._menu_callback())
],
)
]
add_menu_items(self._menu_items, "Isaac Examples")
self._build_ui()
def _build_ui(self):
self._window = omni.ui.Window(
EXTENSION_NAME, width=0, height=0, visible=False, dockPreference=ui.DockPreference.LEFT_BOTTOM
)
with self._window.frame:
with ui.VStack(spacing=5, height=0):
title = "Import a Hanuman humanoid Robot via URDF"
doc_link = "https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/ext_omni_isaac_urdf.html"
overview = "This Example shows you import an NVIDIA hanuman robot via URDF.\n\nPress the 'Open in IDE' button to view the source code."
setup_ui_headers(self._ext_id, __file__, title, doc_link, overview)
frame = ui.CollapsableFrame(
title="Command Panel",
height=0,
collapsed=False,
style=get_style(),
style_type_name_override="CollapsableFrame",
horizontal_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_AS_NEEDED,
vertical_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_ALWAYS_ON,
)
with frame:
with ui.VStack(style=get_style(), spacing=5):
dict = {
"label": "Load Robot",
"type": "button",
"text": "Load",
"tooltip": "Load a O-Bike Robot into the Scene",
"on_clicked_fn": self._on_load_robot,
}
btn_builder(**dict)
dict = {
"label": "Configure Drives",
"type": "button",
"text": "Configure",
"tooltip": "Configure Joint Drives",
"on_clicked_fn": self._on_config_robot,
}
btn_builder(**dict)
dict = {
"label": "Spin Robot",
"type": "button",
"text": "move",
"tooltip": "Spin the Robot in Place",
"on_clicked_fn": self._on_config_drives,
}
btn_builder(**dict)
def on_shutdown(self):
remove_menu_items(self._menu_items, "Isaac Examples")
self._window = None
def _menu_callback(self):
self._window.visible = not self._window.visible
def _on_load_robot(self):
load_stage = asyncio.ensure_future(omni.usd.get_context().new_stage_async())
asyncio.ensure_future(self._load_obike(load_stage))
async def _load_obike(self, task):
done, pending = await asyncio.wait({task})
if task in done:
status, import_config = omni.kit.commands.execute("URDFCreateImportConfig")
import_config.merge_fixed_joints = True
import_config.import_inertia_tensor = False
# import_config.distance_scale = 100
import_config.fix_base = False # Attact wheel to /groundPlane
import_config.make_default_prim = True
import_config.create_physics_scene = True
omni.kit.commands.execute(
"URDFParseAndImportFile",
urdf_path=self._extension_path + "/data/urdf/robots/hanumanURDFv1/urdf/hanumanURDFv1.urdf",
import_config=import_config,
)
viewport = omni.kit.viewport_legacy.get_default_viewport_window()
viewport.set_camera_position("/OmniverseKit_Persp", -51, 63, 25, True)
viewport.set_camera_target("/OmniverseKit_Persp", 220, -218, -160, True)
stage = omni.usd.get_context().get_stage()
scene = UsdPhysics.Scene.Define(stage, Sdf.Path("/physicsScene"))
scene.CreateGravityDirectionAttr().Set(Gf.Vec3f(0.0, 0.0, -1.0))
scene.CreateGravityMagnitudeAttr().Set(981.0)
result, plane_path = omni.kit.commands.execute(
"AddGroundPlaneCommand",
stage=stage,
planePath="/groundPlane",
axis="Z",
size=1500.0,
position=Gf.Vec3f(0, 0, 0),
color=Gf.Vec3f(0.5),
)
# make sure the ground plane is under root prim and not robot
omni.kit.commands.execute(
"MovePrimCommand", path_from=plane_path, path_to="/groundPlane", keep_world_transform=True
)
distantLight = UsdLux.DistantLight.Define(stage, Sdf.Path("/DistantLight"))
distantLight.CreateIntensityAttr(500)
def _on_config_robot(self):
## Attact IMU sensor ##
self._is = _isaac_sensor.acquire_imu_sensor_interface()
self.body_path = "/full_assemandframe01.SLDASM/body"
result, sensor = omni.kit.commands.execute(
"IsaacSensorCreateImuSensor",
path="/sensor",
parent=self.body_path,
sensor_period=1 / 500.0,
offset=Gf.Vec3d(0, 0, 5),
orientation=Gf.Quatd(1, 0, 0, 0),
visualize=True,
)
def _on_config_drives(self):
# self._on_config_robot() # make sure drives are configured first
stage = omni.usd.get_context().get_stage()
# set each axis to spin at a rate of 1 rad/s
axle_steering = UsdPhysics.DriveAPI.Get(stage.GetPrimAtPath("/obike/chassic/front_wheel_arm_joint"), "angular")
axle_rear_wheel = UsdPhysics.DriveAPI.Get(stage.GetPrimAtPath("/obike/chassic/rear_wheel_joint"), "angular")
axle_reaction_wheel = UsdPhysics.DriveAPI.Get(stage.GetPrimAtPath("/obike/chassic/reaction_wheel_joint"), "angular")
# omega = velocity2omega(0, 0.1, 0)
# print(omega)
set_drive_parameters(axle_steering, "position", math.degrees(3), 0, math.radians(1e7))
set_drive_parameters(axle_rear_wheel, "velocity", math.degrees(-10), 0, math.radians(1e7))
set_drive_parameters(axle_reaction_wheel, "velocity", math.degrees(4), 0, math.radians(1e7))
| 7,703 | Python | 43.275862 | 151 | 0.569648 |
teerameth/omni.isaac.fiborobotlab/omni/isaac/fiborobotlab/taufinder/scripts/common.py | # Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import os
import carb.tokens
import omni
from pxr import UsdGeom, PhysxSchema, UsdPhysics
def set_drive_parameters(drive, target_type, target_value, stiffness=None, damping=None, max_force=None):
"""Enable velocity drive for a given joint"""
if target_type == "position":
if not drive.GetTargetPositionAttr():
drive.CreateTargetPositionAttr(target_value)
else:
drive.GetTargetPositionAttr().Set(target_value)
elif target_type == "velocity":
if not drive.GetTargetVelocityAttr():
drive.CreateTargetVelocityAttr(target_value)
else:
drive.GetTargetVelocityAttr().Set(target_value)
if stiffness is not None:
if not drive.GetStiffnessAttr():
drive.CreateStiffnessAttr(stiffness)
else:
drive.GetStiffnessAttr().Set(stiffness)
if damping is not None:
if not drive.GetDampingAttr():
drive.CreateDampingAttr(damping)
else:
drive.GetDampingAttr().Set(damping)
if max_force is not None:
if not drive.GetMaxForceAttr():
drive.CreateMaxForceAttr(max_force)
else:
drive.GetMaxForceAttr().Set(max_force)
| 1,653 | Python | 34.191489 | 105 | 0.69147 |
teerameth/omni.isaac.fiborobotlab/omni/isaac/fiborobotlab/taufinder/scripts/train_taufinder.py | import gym
from gym import spaces
import numpy as np
import math
import time
import carb
from omni.isaac.kit import SimulationApp
from omni.isaac.imu_sensor import _imu_sensor
# def discount_rewards(r, gamma=0.8):
# discounted_r = np.zeros_like(r)
# running_add = 0
# for t in reversed(range(0, r.size)):
# running_add = running_add * gamma + r[t]
# discounted_r[t] = running_add
# return discounted_r
# def q2falling(q):
# q[0] = 1 if q[0] > 1 else q[0]
# try:
# if q[1] == 0 and q[2] == 0 and q[3] == 0:
# return 0
# return 2*math.acos(q[0])*math.sqrt((q[1]**2 + q[2]**2)/(q[1]**2 + q[2]**2 + q[3]**2))
# except:
# print(q)
# return 0
## Specify simulation parameters ##
_physics_dt = 1/1000
_rendering_dt = 1/30
_max_episode_length = 60/_physics_dt # 60 second after reset
_iteration_count = 0
_display_every_iter = 1
_update_every = 1
_explore_every = 5
_headless = False
simulation_app = SimulationApp({"headless": _headless, "anti_aliasing": 0})
## Setup World ##
from omni.isaac.core import World
from taufinder import Taufinder
# from omni.isaac.core.objects import DynamicSphere
world = World(physics_dt=_physics_dt, rendering_dt=_rendering_dt, stage_units_in_meters=0.01)
world.scene.add_default_ground_plane()
robot = world.scene.add(
Taufinder(
prim_path="/taufinder",
name="taufinder_mk0",
position=np.array([0, 0.0, 1.435]),
orientation=np.array([1.0, 0.0, 0.0, 0.0]),
)
)
# # Setup IMU ##
# imu_interface = _imu_sensor.acquire_imu_sensor_interface()
# props = _imu_sensor.SensorProperties()
# props.position = carb.Float3(0, 0, 10) # translate from /obike/chassic to above motor (cm.)
# props.orientation = carb.Float4(1, 0, 0, 0) # (x, y, z, w)
# props.sensorPeriod = 1 / 500 # 2ms
# _sensor_handle = imu_interface.add_sensor_on_body("/obike/chassic", props)
# ## Create LSTM Model ##
# import tensorflow as tf
# from tensorflow import keras
# from tensorflow.keras import layers
# ## Train parameter ##
# n_episodes = 10000
# input_dim = 3
# output_dim = 1
# num_timesteps = 1
# batch_size = 1
# lstm_nodes = 32
# input_layer = tf.keras.Input(shape=(num_timesteps, input_dim), batch_size=batch_size)
# lstm_cell = tf.keras.layers.LSTMCell(
# lstm_nodes,
# kernel_initializer='glorot_uniform',
# recurrent_initializer='glorot_uniform',
# bias_initializer='zeros',
# )
# lstm_layer = tf.keras.layers.RNN(
# lstm_cell,
# return_state=True,
# return_sequences=True,
# stateful=True,
# )
# lstm_out, hidden_state, cell_state = lstm_layer(input_layer)
# hidden_layer = tf.keras.layers.Dense(32)(lstm_out)
# output = tf.keras.layers.Dense(output_dim)(hidden_layer)
# model = tf.keras.Model(
# inputs=input_layer,
# outputs=[hidden_state, cell_state, output]
# )
# optimizer = tf.optimizers.Adam(learning_rate=0.0025)
# loss_fn = keras.losses.MeanSquaredError() # Instantiate a loss function.
# # train_mse_metric = keras.metrics.MeanSquaredError()
# scores = []
# gradBuffer = model.trainable_variables
# for ix, grad in enumerate(gradBuffer): gradBuffer[ix] = grad * 0
# for e in range(n_episodes):
# # print("\nStart of episodes %d" % (e,))
# # Reset the environment
# world.reset()
# lstm_layer.reset_states()
# # lstm_layer.reset_states(states=[np.zeros((batch_size, lstm_nodes)), np.zeros((batch_size, lstm_nodes))])
# previous_states = None # reset LSTM's internal state
# render_counter = 0
# ep_memory = []
# ep_score = 0
# done = False
# previous = {'robot_position': None, 'robot_rotation': None, 'fall_rotation': None}
# present = {'robot_position': None, 'robot_rotation': None, 'fall_rotation': None}
# while not done:
# previous['robot_position'], previous['robot_rotation'] = robot.get_world_pose()
# previous['fall_rotation'] = q2falling(previous['robot_rotation'])
# reading = imu_interface.get_sensor_readings(_sensor_handle)
# if reading.shape[0] == 0:# no valid data in buffer -> init observation wih zeros
# observations = np.array([0, 0, 0])
# else: # IMU will return [???, acc_x, acc_y, acc_z, gyr_x, gyr_y, gyr_z]
# observations = np.array([reading[-1]["lin_acc_y"], # Use only lastest data in buffer
# reading[-1]["lin_acc_z"],
# reading[-1]["ang_vel_x"]])
# ## Scale accel from (-1000, 1000) -> (0, 1) for NN inputs
# ## Scale gyro from (-4.36, 4.36) -> (0, 1) for NN inputs (4.36 rad = 250 deg)
# # print(observations)
# # time.sleep(1)
# observations[0] = (observations[0] / 2000) + 0.5
# observations[1] = (observations[1] / 2000) + 0.5
# observations[2] = (observations[2] / 8.72) + 0.5
# observations = np.array(observations, dtype=np.float32).reshape((batch_size, num_timesteps, input_dim)) # add extra dimension for batch_size=1
# # print(observations)
# with tf.GradientTape() as tape:
# # forward pass
# h_state, c_state, logits = model(observations) # required input_shape=(None, 1, 3)
# # logits, previous_states = model.call(inputs=observations, states=previous_states, return_state=True, training=True)
# a_dist = logits.numpy()
# a_dist = (a_dist-0.5) * 2 # map (0,1) -> (-1, 1)
# ## Choose random action with p = action dist
# # print("A_DIST")
# # print(a_dist)
# # a = np.random.choice(a_dist[0], p=a_dist[0])
# # a = np.argmax(a_dist == a)
# if e % _explore_every == 0:
# a = ((np.random.rand(*a_dist.shape))-0.5)*2
# else:
# a = a_dist + 0.1*((np.random.rand(*a_dist.shape))-0.5) # random with uniform distribution (.shape will return tuple so unpack with *)
# loss = loss_fn([a], logits)
# # loss = previous['fall_rotation']
# ## EXECUTE ACTION ##
# from omni.isaac.core.utils.types import ArticulationAction
# # print("LOGITS")
# # print(logits)
# # print(a)
# robot.apply_wheel_actions(ArticulationAction(joint_efforts=[a*100*0.607, 0, 0]))
# world.step(render=True)
# present['robot_position'], present['robot_rotation'] = robot.get_world_pose()
# present['fall_rotation'] = q2falling(present['robot_rotation'])
# reward = previous['fall_rotation'] - present['fall_rotation'] # calculate reward from movement toward center
# ## Check for stop event ##
# exceed_time_limit = world.current_time_step_index >= _max_episode_length
# robot_fall = True if previous['fall_rotation'] > 50 / 180 * math.pi else False
# done = exceed_time_limit or robot_fall
# ep_score += reward
# # if done: reward-= 10 # small trick to make training faster
# grads = tape.gradient(loss, model.trainable_weights)
# ep_memory.append([grads, reward])
# scores.append(ep_score)
# # Discount the rewards
# ep_memory = np.array(ep_memory)
# ep_memory[:, 1] = discount_rewards(ep_memory[:, 1])
# for grads, reward in ep_memory:
# for ix, grad in enumerate(grads):
# gradBuffer[ix] += grad * reward
# if e % _update_every == 0:
# optimizer.apply_gradients(zip(gradBuffer, model.trainable_variables))
# for ix, grad in enumerate(gradBuffer): gradBuffer[ix] = grad * 0
# if e % 100 == 0:
# print("Episode {} Score {}".format(e, np.mean(scores[-100:])))
while True:
# world.step()
## EXECUTE ACTION ##
from omni.isaac.core.utils.types import ArticulationAction
# print("LOGITS")
# print(logits)
# print(a)
robot.apply_wheel_actions(ArticulationAction(joint_efforts=[10000.00]))
world.step() | 7,948 | Python | 39.146464 | 154 | 0.603926 |
teerameth/omni.isaac.fiborobotlab/omni/isaac/fiborobotlab/taufinder/scripts/import_taufinder.py | # Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from torch import roll
import omni
import omni.kit.commands
import asyncio
import math
import weakref
import omni.ui as ui
from omni.kit.menu.utils import add_menu_items, remove_menu_items, MenuItemDescription
from .common import set_drive_parameters
from pxr import UsdLux, Sdf, Gf, UsdPhysics
from omni.isaac.ui.ui_utils import setup_ui_headers, get_style, btn_builder
EXTENSION_NAME = "Import taufinder"
class Extension(omni.ext.IExt):
def on_startup(self, ext_id: str):
ext_manager = omni.kit.app.get_app().get_extension_manager()
self._ext_id = ext_id
self._extension_path = ext_manager.get_extension_path(ext_id)
self._menu_items = [
MenuItemDescription(
name="Import Robots",
sub_menu=[
MenuItemDescription(name="taufinder URDF", onclick_fn=lambda a=weakref.proxy(self): a._menu_callback())
],
)
]
add_menu_items(self._menu_items, "Isaac Examples")
self._build_ui()
def _build_ui(self):
self._window = omni.ui.Window(
EXTENSION_NAME, width=0, height=0, visible=False, dockPreference=ui.DockPreference.LEFT_BOTTOM
)
with self._window.frame:
with ui.VStack(spacing=5, height=0):
title = "Import a taufinder Robot via URDF"
doc_link = "https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/ext_omni_isaac_urdf.html"
overview = "This Example shows you import an NVIDIA taufinder robot via URDF.\n\nPress the 'Open in IDE' button to view the source code."
setup_ui_headers(self._ext_id, __file__, title, doc_link, overview)
frame = ui.CollapsableFrame(
title="Command Panel",
height=0,
collapsed=False,
style=get_style(),
style_type_name_override="CollapsableFrame",
horizontal_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_AS_NEEDED,
vertical_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_ALWAYS_ON,
)
with frame:
with ui.VStack(style=get_style(), spacing=5):
dict = {
"label": "Load Robot",
"type": "button",
"text": "Load",
"tooltip": "Load a taufinder Robot into the Scene",
"on_clicked_fn": self._on_load_robot,
}
btn_builder(**dict)
dict = {
"label": "Configure Drives",
"type": "button",
"text": "Configure",
"tooltip": "Configure Joint Drives",
"on_clicked_fn": self._on_config_robot,
}
btn_builder(**dict)
dict = {
"label": "Spin Robot",
"type": "button",
"text": "move",
"tooltip": "Spin the Robot in Place",
"on_clicked_fn": self._on_config_drives,
}
btn_builder(**dict)
def on_shutdown(self):
remove_menu_items(self._menu_items, "Isaac Examples")
self._window = None
def _menu_callback(self):
self._window.visible = not self._window.visible
def _on_load_robot(self):
load_stage = asyncio.ensure_future(omni.usd.get_context().new_stage_async())
asyncio.ensure_future(self._load_obike(load_stage))
async def _load_obike(self, task):
done, pending = await asyncio.wait({task})
if task in done:
status, import_config = omni.kit.commands.execute("URDFCreateImportConfig")
import_config.merge_fixed_joints = True
import_config.import_inertia_tensor = True
# import_config.distance_scale = 100
import_config.fix_base = True # Attact wheel to /groundPlane
import_config.make_default_prim = True
import_config.create_physics_scene = True
omni.kit.commands.execute(
"URDFParseAndImportFile",
urdf_path=self._extension_path + "/data/urdf/robots/taufinder/urdf/taufinder.urdf",
import_config=import_config,
)
viewport = omni.kit.viewport.get_default_viewport_window()
viewport.set_camera_position("/OmniverseKit_Persp", -51, 63, 25, True)
viewport.set_camera_target("/OmniverseKit_Persp", 220, -218, -160, True)
stage = omni.usd.get_context().get_stage()
scene = UsdPhysics.Scene.Define(stage, Sdf.Path("/physicsScene"))
scene.CreateGravityDirectionAttr().Set(Gf.Vec3f(0.0, 0.0, -1.0))
scene.CreateGravityMagnitudeAttr().Set(981.0)
result, plane_path = omni.kit.commands.execute(
"AddGroundPlaneCommand",
stage=stage,
planePath="/groundPlane",
axis="Z",
size=1500.0,
position=Gf.Vec3f(0, 0, 0),
color=Gf.Vec3f(0.5),
)
# make sure the ground plane is under root prim and not robot
omni.kit.commands.execute(
"MovePrimCommand", path_from=plane_path, path_to="/groundPlane", keep_world_transform=True
)
distantLight = UsdLux.DistantLight.Define(stage, Sdf.Path("/DistantLight"))
distantLight.CreateIntensityAttr(500)
def _on_config_robot(self):
stage = omni.usd.get_context().get_stage()
# make front wheel spin freely
prim_path = "/obike/chassic/front_wheel_joint"
prim = stage.GetPrimAtPath(prim_path)
omni.kit.commands.execute(
"UnapplyAPISchemaCommand",
api=UsdPhysics.DriveAPI,
prim=prim,
api_prefix="drive",
multiple_api_token="angular",
)
def _on_config_drives(self):
# self._on_config_robot() # make sure drives are configured first
stage = omni.usd.get_context().get_stage()
# set each axis to spin at a rate of 1 rad/s
axle_motor = UsdPhysics.DriveAPI.Get(stage.GetPrimAtPath("/taufinder/base_link/motor_axis_joint"), "angular")
# omega = velocity2omega(0, 0.1, 0)
# print(omega)
set_drive_parameters(axle_motor, "effort",10)
| 7,119 | Python | 41.130177 | 153 | 0.559489 |
teerameth/omni.isaac.fiborobotlab/omni/isaac/fiborobotlab/taufinder/scripts/taufinder.py | from typing import Optional, Tuple
import numpy as np
from omni.isaac.core.robots.robot import Robot
from omni.isaac.core.utils.nucleus import find_nucleus_server
from omni.isaac.core.utils.types import ArticulationAction
from omni.isaac.core.utils.prims import get_prim_at_path, define_prim
import carb
class Taufinder(Robot):
"""[summary]
Args:
stage (Usd.Stage): [description]
prim_path (str): [description]
name (str): [description]
usd_path (str, optional): [description]
position (Optional[np.ndarray], optional): [description]. Defaults to None.
orientation (Optional[np.ndarray], optional): [description]. Defaults to None.
"""
def __init__(
self,
prim_path: str,
name: str = "taufinder",
usd_path: Optional[str] = None,
position: Optional[np.ndarray] = None,
orientation: Optional[np.ndarray] = None,
) -> None:
prim = get_prim_at_path(prim_path)
if not prim.IsValid():
prim = define_prim(prim_path, "Xform")
if usd_path:
prim.GetReferences().AddReference(usd_path)
else:
result, nucleus_server = find_nucleus_server()
if result is False:
carb.log_error("Could not find nucleus server with /Isaac folder")
return
asset_path = nucleus_server + "/Library/taufinder.usd" # load from nucleus server
prim.GetReferences().AddReference(asset_path)
super().__init__(
prim_path=prim_path, name=name, position=position, orientation=orientation, articulation_controller=None
)
self._wheel = ["motor_joint"]
self._wheel_dof_indices = 0
return
@property
def wheel_dof_indicies(self) -> Tuple[int]:
"""[summary]
Returns:
int: [description]
"""
return self._wheel_dof_indices
def get_wheel_positions(self) -> Tuple[float]:
"""[summary]
Returns:
Tuple[float, float, float]: [description]
"""
joint_positions = self.get_joint_positions()
return joint_positions[self._wheel_dof_indices[0]], joint_positions[self._wheel_dof_indices[1]], joint_positions[self._wheel_dof_indices[2]]
def set_wheel_positions(self, positions: Tuple[float, float]) -> None:
"""[summary]
Args:
positions (Tuple[float, float, float]): [description]
"""
joint_positions = [None, None, None]
joint_positions[self._wheel_dof_indices[0]] = positions[0]
joint_positions[self._wheel_dof_indices[1]] = positions[1]
joint_positions[self._wheel_dof_indices[2]] = positions[2]
self.set_joint_positions(positions=np.array(joint_positions))
return
def get_wheel_velocities(self) -> Tuple[float]:
"""[summary]
Returns:
Tuple[np.ndarray, np.ndarray, np.ndarray]: [description]
"""
joint_velocities = self.get_joint_velocities()
return joint_velocities[self._wheel_dof_indices[0]], joint_velocities[self._wheel_dof_indices[1]], joint_velocities[self._wheel_dof_indices[2]]
def set_wheel_velocities(self, velocities: Tuple[float, float, float]) -> None:
"""[summary]
Args:
velocities (Tuple[float, float, float]): [description]
"""
joint_velocities = [None, None, None]
joint_velocities[self._wheel_dof_indices[0]] = velocities[0]
joint_velocities[self._wheel_dof_indices[1]] = velocities[1]
joint_velocities[self._wheel_dof_indices[2]] = velocities[2]
self.set_joint_velocities(velocities=np.array(joint_velocities))
return
def get_wheel_efforts(self) -> Tuple[float]:
"""[summary]
Returns:
Tuple[np.ndarray, np.ndarray, np.ndarray]: [description]
"""
joint_efforts = self.get_joint_efforts()
return joint_efforts[self._wheel_dof_indices[0]]
def set_wheel_efforts(self, velocities: Tuple[float]) -> None:
"""[summary]
Args:
efforts (Tuple[float, float, float]): [description]
"""
joint_efforts = [None, None]
joint_efforts[self._wheel_dof_indices[0]] = velocities[0]
self.set_joint_efforts(efforts=np.array(joint_efforts))
return
def apply_wheel_actions(self, actions: ArticulationAction) -> None:
"""[summary]
Args:
actions (ArticulationAction): [description]
"""
actions_length = actions.get_length()
if actions_length is not None and actions_length != 1:
raise Exception("ArticulationAction passed should be equal to 1")
joint_actions = ArticulationAction()
if actions.joint_positions is not None:
joint_actions.joint_positions = np.zeros(self.num_dof)
joint_actions.joint_positions[self._wheel_dof_indices[0]] = actions.joint_positions[0]
if actions.joint_velocities is not None:
joint_actions.joint_velocities = np.zeros(self.num_dof)
joint_actions.joint_velocities[self._wheel_dof_indices[0]] = actions.joint_velocities[0]
if actions.joint_efforts is not None:
joint_actions.joint_efforts = np.zeros(self.num_dof)
joint_actions.joint_efforts= actions.joint_efforts[0]
self.apply_action(control_actions=joint_actions)
return
def initialize(self) -> None:
"""[summary]
"""
super().initialize()
# print(self._dofs_infos) # print Orderdict of all dof_name:dof_object
self._wheel_dof_indices = (
self.get_dof_index(self._wheel_dof_names[0]),
)
return
def post_reset(self) -> None:
"""[summary]
"""
super().post_reset()
# print(len(self._articulation_controller._dof_controllers))
# print(self._articulation_controller._dof_controllers)
# Assign kd only for driven acticulation (3 wheels) and leave other as None
kds = [None]*len(self._articulation_controller._dof_controllers)
for i in self._wheel_dof_indices: kds[i] = 1e2
self._articulation_controller.set_gains(kds=kds)
self._articulation_controller.switch_control_mode(mode="effort") # effort, velocity, position
return | 6,426 | Python | 37.716867 | 151 | 0.609711 |
teerameth/omni.isaac.fiborobotlab/data/urdf/robots/hanumanURDFv1/package.xml | <package format="2">
<name>hanumanURDFv1</name>
<version>1.0.0</version>
<description>
<p>URDF Description package for hanumanURDFv1</p>
<p>This package contains configuration data, 3D models and launch files
for hanumanURDFv1 robot</p>
</description>
<author>TODO</author>
<maintainer email="[email protected]" />
<license>BSD</license>
<buildtool_depend>catkin</buildtool_depend>
<depend>roslaunch</depend>
<depend>robot_state_publisher</depend>
<depend>rviz</depend>
<depend>joint_state_publisher_gui</depend>
<depend>gazebo</depend>
<export>
<architecture_independent />
</export>
</package> | 634 | XML | 29.238094 | 75 | 0.714511 |
teerameth/omni.isaac.fiborobotlab/data/urdf/robots/hanumanURDFv1/config/joint_names_hanumanURDFv1.yaml | controller_joint_names: ['', 'JR1', 'JR2', 'JR3', 'JR4', 'JR5', 'JR6', 'JL1', 'JL2', 'JL3', 'JL4', 'JL5', 'JL6', 'JAR1', 'JAR2', 'JAR3', 'JAL1', 'JAL2', 'JAL3', 'JH1', 'JH2', ]
| 177 | YAML | 87.999956 | 176 | 0.485876 |
AkaneFoundation/Omni/README.md | # Omni


[](https://t.me/AkaneDev)
## Features
- Up-to-date material 3 design
- Lightweight, no spyware or bloat
- Compass with latitude & longitude
- Gradienter (WIP)
- Barometer (WIP)
- Coin flipper (WIP)
- Ruler (WIP)
- Strength-adjustable flashlight (WIP)
## Installation
You can download the latest stable version of the app from [GitHub releases](https://github.com/AkaneFoundation/Omni/releases/latest).
## Building
To build this app, you will need the latest beta version of [Android Studio](https://developer.android.com/studio) and fast network environment.
## License
This project is licensed under the GNU General Public License v3.0 - see the [LICENSE](https://github.com/AkaneFoundation/Omni/blob/master/LICENSE) file for details.
## Notice
- For bug reporting: [Telegram](https://t.me/AkaneDev)
| 1,217 | Markdown | 44.111109 | 165 | 0.771569 |
AkaneFoundation/Omni/app/src/main/res/values/strings.xml | <resources>
<string name="app_name">Omni</string>
<string name="north">N</string>
<string name="northeast">NE</string>
<string name="northwest">NW</string>
<string name="south">S</string>
<string name="southeast">SE</string>
<string name="southwest">SW</string>
<string name="west">W</string>
<string name="east">E</string>
<string name="direction_1" translatable="false">0</string>
<string name="direction_2" translatable="false">30</string>
<string name="direction_3" translatable="false">60</string>
<string name="direction_4" translatable="false">90</string>
<string name="direction_5" translatable="false">120</string>
<string name="direction_6" translatable="false">150</string>
<string name="direction_7" translatable="false">180</string>
<string name="direction_8" translatable="false">210</string>
<string name="direction_9" translatable="false">240</string>
<string name="direction_10" translatable="false">270</string>
<string name="direction_11" translatable="false">300</string>
<string name="direction_12" translatable="false">330</string>
<string name="degree_format" translatable="false">" %1$s°"</string>
<string name="longitude_desc">N</string>
<string name="latitude_desc">E</string>
<string name="unknown_location">Unknown location</string>
<string name="perm_dialog_title">Location</string>
<string name="decline">No</string>
<string name="accept">Yes</string>
<string name="perm_dialog_text">Omni requires your location data in order to display latitude and longitude. That may requires communications with your location provider\'s server. If you select \"no\", you still can use the basic functionalities of omni compass.</string>
<string name="position_default_text" translatable="false">__.____°</string>
<string name="settings">Settings</string>
<string name="settings_app_name">App name</string>
<string name="author" translatable="false">123Duo3, AkaneTan</string>
<string name="settings_author_name">Author</string>
<string name="version">Version</string>
<string name="settings_unknown_version">Unknown version</string>
<string name="warning_dialog_title">Warning</string>
<string name="warning_dialog_text">It looks like your device doesn\'t have an available rotation vector sensor. The compass feature of Omni will be disabled. However, you still can use the location feature.</string>
<string name="dismiss">Dismiss</string>
<string name="delimiter">,\ </string>
<string name="location_format">%1$s%2$s%3$s%4$s%5$s%6$s%7$s%8$s%9$s</string>
</resources> | 2,651 | XML | 60.674417 | 276 | 0.701622 |
AkaneFoundation/Omni/app/src/main/res/values/theme_overlays.xml | <resources>
<style name="ThemeOverlay.AppTheme.MediumContrast" parent="Theme.Material3.Light.NoActionBar">
<item name="colorPrimary">@color/md_theme_primary_mediumContrast</item>
<item name="colorOnPrimary">@color/md_theme_onPrimary_mediumContrast</item>
<item name="colorPrimaryContainer">@color/md_theme_primaryContainer_mediumContrast</item>
<item name="colorOnPrimaryContainer">@color/md_theme_onPrimaryContainer_mediumContrast</item>
<item name="colorSecondary">@color/md_theme_secondary_mediumContrast</item>
<item name="colorOnSecondary">@color/md_theme_onSecondary_mediumContrast</item>
<item name="colorSecondaryContainer">@color/md_theme_secondaryContainer_mediumContrast</item>
<item name="colorOnSecondaryContainer">@color/md_theme_onSecondaryContainer_mediumContrast</item>
<item name="colorTertiary">@color/md_theme_tertiary_mediumContrast</item>
<item name="colorOnTertiary">@color/md_theme_onTertiary_mediumContrast</item>
<item name="colorTertiaryContainer">@color/md_theme_tertiaryContainer_mediumContrast</item>
<item name="colorOnTertiaryContainer">@color/md_theme_onTertiaryContainer_mediumContrast</item>
<item name="colorError">@color/md_theme_error_mediumContrast</item>
<item name="colorOnError">@color/md_theme_onError_mediumContrast</item>
<item name="colorErrorContainer">@color/md_theme_errorContainer_mediumContrast</item>
<item name="colorOnErrorContainer">@color/md_theme_onErrorContainer_mediumContrast</item>
<item name="android:colorBackground">@color/md_theme_background_mediumContrast</item>
<item name="colorOnBackground">@color/md_theme_onBackground_mediumContrast</item>
<item name="colorSurface">@color/md_theme_surface_mediumContrast</item>
<item name="colorOnSurface">@color/md_theme_onSurface_mediumContrast</item>
<item name="colorSurfaceVariant">@color/md_theme_surfaceVariant_mediumContrast</item>
<item name="colorOnSurfaceVariant">@color/md_theme_onSurfaceVariant_mediumContrast</item>
<item name="colorOutline">@color/md_theme_outline_mediumContrast</item>
<item name="colorOutlineVariant">@color/md_theme_outlineVariant_mediumContrast</item>
<item name="colorSurfaceInverse">@color/md_theme_inverseSurface_mediumContrast</item>
<item name="colorOnSurfaceInverse">@color/md_theme_inverseOnSurface_mediumContrast</item>
<item name="colorPrimaryInverse">@color/md_theme_inversePrimary_mediumContrast</item>
<item name="colorPrimaryFixed">@color/md_theme_primaryFixed_mediumContrast</item>
<item name="colorOnPrimaryFixed">@color/md_theme_onPrimaryFixed_mediumContrast</item>
<item name="colorPrimaryFixedDim">@color/md_theme_primaryFixedDim_mediumContrast</item>
<item name="colorOnPrimaryFixedVariant">@color/md_theme_onPrimaryFixedVariant_mediumContrast</item>
<item name="colorSecondaryFixed">@color/md_theme_secondaryFixed_mediumContrast</item>
<item name="colorOnSecondaryFixed">@color/md_theme_onSecondaryFixed_mediumContrast</item>
<item name="colorSecondaryFixedDim">@color/md_theme_secondaryFixedDim_mediumContrast</item>
<item name="colorOnSecondaryFixedVariant">@color/md_theme_onSecondaryFixedVariant_mediumContrast</item>
<item name="colorTertiaryFixed">@color/md_theme_tertiaryFixed_mediumContrast</item>
<item name="colorOnTertiaryFixed">@color/md_theme_onTertiaryFixed_mediumContrast</item>
<item name="colorTertiaryFixedDim">@color/md_theme_tertiaryFixedDim_mediumContrast</item>
<item name="colorOnTertiaryFixedVariant">@color/md_theme_onTertiaryFixedVariant_mediumContrast</item>
<item name="colorSurfaceDim">@color/md_theme_surfaceDim_mediumContrast</item>
<item name="colorSurfaceBright">@color/md_theme_surfaceBright_mediumContrast</item>
<item name="colorSurfaceContainerLowest">@color/md_theme_surfaceContainerLowest_mediumContrast</item>
<item name="colorSurfaceContainerLow">@color/md_theme_surfaceContainerLow_mediumContrast</item>
<item name="colorSurfaceContainer">@color/md_theme_surfaceContainer_mediumContrast</item>
<item name="colorSurfaceContainerHigh">@color/md_theme_surfaceContainerHigh_mediumContrast</item>
<item name="colorSurfaceContainerHighest">@color/md_theme_surfaceContainerHighest_mediumContrast</item>
</style>
<style name="ThemeOverlay.AppTheme.HighContrast" parent="Theme.Material3.Light.NoActionBar">
<item name="colorPrimary">@color/md_theme_primary_highContrast</item>
<item name="colorOnPrimary">@color/md_theme_onPrimary_highContrast</item>
<item name="colorPrimaryContainer">@color/md_theme_primaryContainer_highContrast</item>
<item name="colorOnPrimaryContainer">@color/md_theme_onPrimaryContainer_highContrast</item>
<item name="colorSecondary">@color/md_theme_secondary_highContrast</item>
<item name="colorOnSecondary">@color/md_theme_onSecondary_highContrast</item>
<item name="colorSecondaryContainer">@color/md_theme_secondaryContainer_highContrast</item>
<item name="colorOnSecondaryContainer">@color/md_theme_onSecondaryContainer_highContrast</item>
<item name="colorTertiary">@color/md_theme_tertiary_highContrast</item>
<item name="colorOnTertiary">@color/md_theme_onTertiary_highContrast</item>
<item name="colorTertiaryContainer">@color/md_theme_tertiaryContainer_highContrast</item>
<item name="colorOnTertiaryContainer">@color/md_theme_onTertiaryContainer_highContrast</item>
<item name="colorError">@color/md_theme_error_highContrast</item>
<item name="colorOnError">@color/md_theme_onError_highContrast</item>
<item name="colorErrorContainer">@color/md_theme_errorContainer_highContrast</item>
<item name="colorOnErrorContainer">@color/md_theme_onErrorContainer_highContrast</item>
<item name="android:colorBackground">@color/md_theme_background_highContrast</item>
<item name="colorOnBackground">@color/md_theme_onBackground_highContrast</item>
<item name="colorSurface">@color/md_theme_surface_highContrast</item>
<item name="colorOnSurface">@color/md_theme_onSurface_highContrast</item>
<item name="colorSurfaceVariant">@color/md_theme_surfaceVariant_highContrast</item>
<item name="colorOnSurfaceVariant">@color/md_theme_onSurfaceVariant_highContrast</item>
<item name="colorOutline">@color/md_theme_outline_highContrast</item>
<item name="colorOutlineVariant">@color/md_theme_outlineVariant_highContrast</item>
<item name="colorSurfaceInverse">@color/md_theme_inverseSurface_highContrast</item>
<item name="colorOnSurfaceInverse">@color/md_theme_inverseOnSurface_highContrast</item>
<item name="colorPrimaryInverse">@color/md_theme_inversePrimary_highContrast</item>
<item name="colorPrimaryFixed">@color/md_theme_primaryFixed_highContrast</item>
<item name="colorOnPrimaryFixed">@color/md_theme_onPrimaryFixed_highContrast</item>
<item name="colorPrimaryFixedDim">@color/md_theme_primaryFixedDim_highContrast</item>
<item name="colorOnPrimaryFixedVariant">@color/md_theme_onPrimaryFixedVariant_highContrast</item>
<item name="colorSecondaryFixed">@color/md_theme_secondaryFixed_highContrast</item>
<item name="colorOnSecondaryFixed">@color/md_theme_onSecondaryFixed_highContrast</item>
<item name="colorSecondaryFixedDim">@color/md_theme_secondaryFixedDim_highContrast</item>
<item name="colorOnSecondaryFixedVariant">@color/md_theme_onSecondaryFixedVariant_highContrast</item>
<item name="colorTertiaryFixed">@color/md_theme_tertiaryFixed_highContrast</item>
<item name="colorOnTertiaryFixed">@color/md_theme_onTertiaryFixed_highContrast</item>
<item name="colorTertiaryFixedDim">@color/md_theme_tertiaryFixedDim_highContrast</item>
<item name="colorOnTertiaryFixedVariant">@color/md_theme_onTertiaryFixedVariant_highContrast</item>
<item name="colorSurfaceDim">@color/md_theme_surfaceDim_highContrast</item>
<item name="colorSurfaceBright">@color/md_theme_surfaceBright_highContrast</item>
<item name="colorSurfaceContainerLowest">@color/md_theme_surfaceContainerLowest_highContrast</item>
<item name="colorSurfaceContainerLow">@color/md_theme_surfaceContainerLow_highContrast</item>
<item name="colorSurfaceContainer">@color/md_theme_surfaceContainer_highContrast</item>
<item name="colorSurfaceContainerHigh">@color/md_theme_surfaceContainerHigh_highContrast</item>
<item name="colorSurfaceContainerHighest">@color/md_theme_surfaceContainerHighest_highContrast</item>
</style>
</resources>
| 8,875 | XML | 88.656565 | 111 | 0.754254 |
AkaneFoundation/Omni/app/src/main/res/values/themes.xml | <resources xmlns:tools="http://schemas.android.com/tools">
<!-- Base application theme. -->
<style name="Base.Theme.Omni" parent="Theme.Material3.DynamicColors.DayNight.NoActionBar">
<item name="android:forceDarkAllowed" tools:targetApi="q">false</item>
<item name="android:windowLayoutInDisplayCutoutMode" tools:targetApi="o_mr1">shortEdges</item>
<item name="fontFamily">@font/hankengrotesk</item>
<item name="android:fontFamily">@font/hankengrotesk</item>
<item name="viewInflaterClass">uk.akane.omni.logic.ui.ViewCompatInflater</item>
<!-- Work around b/331383944: PreferenceFragmentCompat permanently mutates activity theme (enables vertical scrollbars) -->
<item name="android:scrollbars">none</item>
<item name="materialAlertDialogTheme">@style/ThemeOverlay.App.MaterialAlertDialog</item>
</style>
<style name="PreV31.Theme.Omni" parent="Base.Theme.Omni">
<item name="colorPrimary">@color/md_theme_primary</item>
<item name="colorOnPrimary">@color/md_theme_onPrimary</item>
<item name="colorPrimaryContainer">@color/md_theme_primaryContainer</item>
<item name="colorOnPrimaryContainer">@color/md_theme_onPrimaryContainer</item>
<item name="colorSecondary">@color/md_theme_secondary</item>
<item name="colorOnSecondary">@color/md_theme_onSecondary</item>
<item name="colorSecondaryContainer">@color/md_theme_secondaryContainer</item>
<item name="colorOnSecondaryContainer">@color/md_theme_onSecondaryContainer</item>
<item name="colorTertiary">@color/md_theme_tertiary</item>
<item name="colorOnTertiary">@color/md_theme_onTertiary</item>
<item name="colorTertiaryContainer">@color/md_theme_tertiaryContainer</item>
<item name="colorOnTertiaryContainer">@color/md_theme_onTertiaryContainer</item>
<item name="colorError">@color/md_theme_error</item>
<item name="colorOnError">@color/md_theme_onError</item>
<item name="colorErrorContainer">@color/md_theme_errorContainer</item>
<item name="colorOnErrorContainer">@color/md_theme_onErrorContainer</item>
<item name="android:colorBackground">@color/md_theme_background</item>
<item name="colorOnBackground">@color/md_theme_onBackground</item>
<item name="colorSurface">@color/md_theme_surface</item>
<item name="colorOnSurface">@color/md_theme_onSurface</item>
<item name="colorSurfaceVariant">@color/md_theme_surfaceVariant</item>
<item name="colorOnSurfaceVariant">@color/md_theme_onSurfaceVariant</item>
<item name="colorOutline">@color/md_theme_outline</item>
<item name="colorOutlineVariant">@color/md_theme_outlineVariant</item>
<item name="colorSurfaceInverse">@color/md_theme_inverseSurface</item>
<item name="colorOnSurfaceInverse">@color/md_theme_inverseOnSurface</item>
<item name="colorPrimaryInverse">@color/md_theme_inversePrimary</item>
<item name="colorPrimaryFixed">@color/md_theme_primaryFixed</item>
<item name="colorOnPrimaryFixed">@color/md_theme_onPrimaryFixed</item>
<item name="colorPrimaryFixedDim">@color/md_theme_primaryFixedDim</item>
<item name="colorOnPrimaryFixedVariant">@color/md_theme_onPrimaryFixedVariant</item>
<item name="colorSecondaryFixed">@color/md_theme_secondaryFixed</item>
<item name="colorOnSecondaryFixed">@color/md_theme_onSecondaryFixed</item>
<item name="colorSecondaryFixedDim">@color/md_theme_secondaryFixedDim</item>
<item name="colorOnSecondaryFixedVariant">@color/md_theme_onSecondaryFixedVariant</item>
<item name="colorTertiaryFixed">@color/md_theme_tertiaryFixed</item>
<item name="colorOnTertiaryFixed">@color/md_theme_onTertiaryFixed</item>
<item name="colorTertiaryFixedDim">@color/md_theme_tertiaryFixedDim</item>
<item name="colorOnTertiaryFixedVariant">@color/md_theme_onTertiaryFixedVariant</item>
<item name="colorSurfaceDim">@color/md_theme_surfaceDim</item>
<item name="colorSurfaceBright">@color/md_theme_surfaceBright</item>
<item name="colorSurfaceContainerLowest">@color/md_theme_surfaceContainerLowest</item>
<item name="colorSurfaceContainerLow">@color/md_theme_surfaceContainerLow</item>
<item name="colorSurfaceContainer">@color/md_theme_surfaceContainer</item>
<item name="colorSurfaceContainerHigh">@color/md_theme_surfaceContainerHigh</item>
<item name="colorSurfaceContainerHighest">@color/md_theme_surfaceContainerHighest</item>
<item name="materialAlertDialogTheme">@style/ThemeOverlay.App.MaterialAlertDialog</item>
</style>
<style name="ThemeOverlay.App.MaterialAlertDialog" parent="ThemeOverlay.Material3.MaterialAlertDialog.Centered">
<item name="fontFamily">sans-serif</item>
<item name="android:fontFamily">sans-serif</item>
<item name="materialAlertDialogTitleTextStyle">@style/MaterialAlertDialog.App.Title.Text</item>
<item name="materialAlertDialogBodyTextStyle">@style/MaterialAlertDialog.App.Body.Text</item>
<item name="materialAlertDialogTitleIconStyle">@style/MaterialAlertDialog.App.Icon</item>
<item name="buttonBarPositiveButtonStyle">@style/Widget.App.Button</item>
<item name="buttonBarNegativeButtonStyle">@style/Widget.App.Button</item>
</style>
<style name="MaterialAlertDialog.App.Title.Text" parent="MaterialAlertDialog.Material3.Title.Text.CenterStacked">
<item name="android:textSize">24sp</item>
</style>
<style name="MaterialAlertDialog.App.Body.Text" parent="MaterialAlertDialog.Material3.Body.Text">
<item name="android:textSize">15sp</item>
</style>
<style name="MaterialAlertDialog.App.Icon" parent="MaterialAlertDialog.Material3.Title.Icon.CenterStacked">
<item name="android:layout_height">32dp</item>
<item name="android:layout_width">32dp</item>
<item name="android:layout_marginTop">12dp</item>
</style>
<style name="Widget.App.Button" parent="Widget.Material3.Button.TextButton.Dialog">
<item name="android:textFontWeight">600</item>
</style>
<style name="CompassTextAppearance">
<item name="android:textSize">22sp</item>
<item name="android:textColor">?colorOutline</item>
<item name="android:textFontWeight">500</item>
</style>
<style name="Base.Night.Theme.Splash" parent="Theme.SplashScreen">
<item name="postSplashScreenTheme">@style/Theme.Omni</item>
</style>
<style name="Base.Day.Theme.Splash" parent="Base.Night.Theme.Splash">
<item name="android:windowLightStatusBar">true</item>
<item name="android:windowLightNavigationBar" tools:targetApi="o_mr1">true</item>
</style>
<style name="PreV31Day.Theme.Splash" parent="Base.Day.Theme.Splash">
<item name="windowSplashScreenAnimatedIcon">@drawable/ic_launcher_foreground</item>
<item name="windowSplashScreenIconBackgroundColor">@color/ic_launcher_background</item>
</style>
<style name="PreV31Night.Theme.Splash" parent="Base.Night.Theme.Splash">
<item name="windowSplashScreenAnimatedIcon">@drawable/ic_launcher_foreground</item>
<item name="windowSplashScreenIconBackgroundColor">@color/ic_launcher_background</item>
</style>
<style name="Day.Theme.Splash" parent="PreV31Day.Theme.Splash" />
<style name="Night.Theme.Splash" parent="PreV31Night.Theme.Splash" />
<style name="Theme.Omni" parent="PreV31.Theme.Omni" />
</resources> | 7,611 | XML | 62.966386 | 131 | 0.718959 |
AkaneFoundation/Omni/app/src/main/res/values/colors.xml | <resources>
<color name="md_theme_primary">#2E6A44</color>
<color name="md_theme_onPrimary">#FFFFFF</color>
<color name="md_theme_primaryContainer">#B1F1C1</color>
<color name="md_theme_onPrimaryContainer">#00210E</color>
<color name="md_theme_secondary">#4F6353</color>
<color name="md_theme_onSecondary">#FFFFFF</color>
<color name="md_theme_secondaryContainer">#D2E8D4</color>
<color name="md_theme_onSecondaryContainer">#0D1F13</color>
<color name="md_theme_tertiary">#3A646F</color>
<color name="md_theme_onTertiary">#FFFFFF</color>
<color name="md_theme_tertiaryContainer">#BEEAF6</color>
<color name="md_theme_onTertiaryContainer">#001F26</color>
<color name="md_theme_error">#BA1A1A</color>
<color name="md_theme_onError">#FFFFFF</color>
<color name="md_theme_errorContainer">#FFDAD6</color>
<color name="md_theme_onErrorContainer">#410002</color>
<color name="md_theme_background">#F6FBF3</color>
<color name="md_theme_onBackground">#181D18</color>
<color name="md_theme_surface">#F6FBF3</color>
<color name="md_theme_onSurface">#181D18</color>
<color name="md_theme_surfaceVariant">#DDE5DB</color>
<color name="md_theme_onSurfaceVariant">#414942</color>
<color name="md_theme_outline">#717971</color>
<color name="md_theme_outlineVariant">#C1C9BF</color>
<color name="md_theme_scrim">#000000</color>
<color name="md_theme_inverseSurface">#2C322D</color>
<color name="md_theme_inverseOnSurface">#EDF2EB</color>
<color name="md_theme_inversePrimary">#96D5A6</color>
<color name="md_theme_primaryFixed">#B1F1C1</color>
<color name="md_theme_onPrimaryFixed">#00210E</color>
<color name="md_theme_primaryFixedDim">#96D5A6</color>
<color name="md_theme_onPrimaryFixedVariant">#12512E</color>
<color name="md_theme_secondaryFixed">#D2E8D4</color>
<color name="md_theme_onSecondaryFixed">#0D1F13</color>
<color name="md_theme_secondaryFixedDim">#B6CCB8</color>
<color name="md_theme_onSecondaryFixedVariant">#384B3C</color>
<color name="md_theme_tertiaryFixed">#BEEAF6</color>
<color name="md_theme_onTertiaryFixed">#001F26</color>
<color name="md_theme_tertiaryFixedDim">#A2CEDA</color>
<color name="md_theme_onTertiaryFixedVariant">#214C57</color>
<color name="md_theme_surfaceDim">#D7DBD4</color>
<color name="md_theme_surfaceBright">#F6FBF3</color>
<color name="md_theme_surfaceContainerLowest">#FFFFFF</color>
<color name="md_theme_surfaceContainerLow">#F0F5ED</color>
<color name="md_theme_surfaceContainer">#EBEFE8</color>
<color name="md_theme_surfaceContainerHigh">#E5EAE2</color>
<color name="md_theme_surfaceContainerHighest">#DFE4DC</color>
<color name="md_theme_primary_mediumContrast">#0C4D2A</color>
<color name="md_theme_onPrimary_mediumContrast">#FFFFFF</color>
<color name="md_theme_primaryContainer_mediumContrast">#458158</color>
<color name="md_theme_onPrimaryContainer_mediumContrast">#FFFFFF</color>
<color name="md_theme_secondary_mediumContrast">#344738</color>
<color name="md_theme_onSecondary_mediumContrast">#FFFFFF</color>
<color name="md_theme_secondaryContainer_mediumContrast">#657A69</color>
<color name="md_theme_onSecondaryContainer_mediumContrast">#FFFFFF</color>
<color name="md_theme_tertiary_mediumContrast">#1C4853</color>
<color name="md_theme_onTertiary_mediumContrast">#FFFFFF</color>
<color name="md_theme_tertiaryContainer_mediumContrast">#517B86</color>
<color name="md_theme_onTertiaryContainer_mediumContrast">#FFFFFF</color>
<color name="md_theme_error_mediumContrast">#8C0009</color>
<color name="md_theme_onError_mediumContrast">#FFFFFF</color>
<color name="md_theme_errorContainer_mediumContrast">#DA342E</color>
<color name="md_theme_onErrorContainer_mediumContrast">#FFFFFF</color>
<color name="md_theme_background_mediumContrast">#F6FBF3</color>
<color name="md_theme_onBackground_mediumContrast">#181D18</color>
<color name="md_theme_surface_mediumContrast">#F6FBF3</color>
<color name="md_theme_onSurface_mediumContrast">#181D18</color>
<color name="md_theme_surfaceVariant_mediumContrast">#DDE5DB</color>
<color name="md_theme_onSurfaceVariant_mediumContrast">#3D453E</color>
<color name="md_theme_outline_mediumContrast">#596159</color>
<color name="md_theme_outlineVariant_mediumContrast">#757D75</color>
<color name="md_theme_scrim_mediumContrast">#000000</color>
<color name="md_theme_inverseSurface_mediumContrast">#2C322D</color>
<color name="md_theme_inverseOnSurface_mediumContrast">#EDF2EB</color>
<color name="md_theme_inversePrimary_mediumContrast">#96D5A6</color>
<color name="md_theme_primaryFixed_mediumContrast">#458158</color>
<color name="md_theme_onPrimaryFixed_mediumContrast">#FFFFFF</color>
<color name="md_theme_primaryFixedDim_mediumContrast">#2C6741</color>
<color name="md_theme_onPrimaryFixedVariant_mediumContrast">#FFFFFF</color>
<color name="md_theme_secondaryFixed_mediumContrast">#657A69</color>
<color name="md_theme_onSecondaryFixed_mediumContrast">#FFFFFF</color>
<color name="md_theme_secondaryFixedDim_mediumContrast">#4D6151</color>
<color name="md_theme_onSecondaryFixedVariant_mediumContrast">#FFFFFF</color>
<color name="md_theme_tertiaryFixed_mediumContrast">#517B86</color>
<color name="md_theme_onTertiaryFixed_mediumContrast">#FFFFFF</color>
<color name="md_theme_tertiaryFixedDim_mediumContrast">#38626D</color>
<color name="md_theme_onTertiaryFixedVariant_mediumContrast">#FFFFFF</color>
<color name="md_theme_surfaceDim_mediumContrast">#D7DBD4</color>
<color name="md_theme_surfaceBright_mediumContrast">#F6FBF3</color>
<color name="md_theme_surfaceContainerLowest_mediumContrast">#FFFFFF</color>
<color name="md_theme_surfaceContainerLow_mediumContrast">#F0F5ED</color>
<color name="md_theme_surfaceContainer_mediumContrast">#EBEFE8</color>
<color name="md_theme_surfaceContainerHigh_mediumContrast">#E5EAE2</color>
<color name="md_theme_surfaceContainerHighest_mediumContrast">#DFE4DC</color>
<color name="md_theme_primary_highContrast">#002912</color>
<color name="md_theme_onPrimary_highContrast">#FFFFFF</color>
<color name="md_theme_primaryContainer_highContrast">#0C4D2A</color>
<color name="md_theme_onPrimaryContainer_highContrast">#FFFFFF</color>
<color name="md_theme_secondary_highContrast">#142619</color>
<color name="md_theme_onSecondary_highContrast">#FFFFFF</color>
<color name="md_theme_secondaryContainer_highContrast">#344738</color>
<color name="md_theme_onSecondaryContainer_highContrast">#FFFFFF</color>
<color name="md_theme_tertiary_highContrast">#00262E</color>
<color name="md_theme_onTertiary_highContrast">#FFFFFF</color>
<color name="md_theme_tertiaryContainer_highContrast">#1C4853</color>
<color name="md_theme_onTertiaryContainer_highContrast">#FFFFFF</color>
<color name="md_theme_error_highContrast">#4E0002</color>
<color name="md_theme_onError_highContrast">#FFFFFF</color>
<color name="md_theme_errorContainer_highContrast">#8C0009</color>
<color name="md_theme_onErrorContainer_highContrast">#FFFFFF</color>
<color name="md_theme_background_highContrast">#F6FBF3</color>
<color name="md_theme_onBackground_highContrast">#181D18</color>
<color name="md_theme_surface_highContrast">#F6FBF3</color>
<color name="md_theme_onSurface_highContrast">#000000</color>
<color name="md_theme_surfaceVariant_highContrast">#DDE5DB</color>
<color name="md_theme_onSurfaceVariant_highContrast">#1E261F</color>
<color name="md_theme_outline_highContrast">#3D453E</color>
<color name="md_theme_outlineVariant_highContrast">#3D453E</color>
<color name="md_theme_scrim_highContrast">#000000</color>
<color name="md_theme_inverseSurface_highContrast">#2C322D</color>
<color name="md_theme_inverseOnSurface_highContrast">#FFFFFF</color>
<color name="md_theme_inversePrimary_highContrast">#BBFBCA</color>
<color name="md_theme_primaryFixed_highContrast">#0C4D2A</color>
<color name="md_theme_onPrimaryFixed_highContrast">#FFFFFF</color>
<color name="md_theme_primaryFixedDim_highContrast">#003519</color>
<color name="md_theme_onPrimaryFixedVariant_highContrast">#FFFFFF</color>
<color name="md_theme_secondaryFixed_highContrast">#344738</color>
<color name="md_theme_onSecondaryFixed_highContrast">#FFFFFF</color>
<color name="md_theme_secondaryFixedDim_highContrast">#1E3123</color>
<color name="md_theme_onSecondaryFixedVariant_highContrast">#FFFFFF</color>
<color name="md_theme_tertiaryFixed_highContrast">#1C4853</color>
<color name="md_theme_onTertiaryFixed_highContrast">#FFFFFF</color>
<color name="md_theme_tertiaryFixedDim_highContrast">#00323B</color>
<color name="md_theme_onTertiaryFixedVariant_highContrast">#FFFFFF</color>
<color name="md_theme_surfaceDim_highContrast">#D7DBD4</color>
<color name="md_theme_surfaceBright_highContrast">#F6FBF3</color>
<color name="md_theme_surfaceContainerLowest_highContrast">#FFFFFF</color>
<color name="md_theme_surfaceContainerLow_highContrast">#F0F5ED</color>
<color name="md_theme_surfaceContainer_highContrast">#EBEFE8</color>
<color name="md_theme_surfaceContainerHigh_highContrast">#E5EAE2</color>
<color name="md_theme_surfaceContainerHighest_highContrast">#DFE4DC</color>
</resources>
| 9,534 | XML | 65.215277 | 81 | 0.73799 |
AkaneFoundation/Omni/app/src/main/res/values-v31/themes.xml | <resources>
<style name="Day.Theme.Splash" parent="Base.Day.Theme.Splash" />
<style name="Night.Theme.Splash" parent="Base.Night.Theme.Splash" />
<style name="Theme.Omni" parent="Base.Theme.Omni" />
</resources> | 223 | XML | 43.799991 | 72 | 0.686099 |
AkaneFoundation/Omni/app/src/main/res/values-night/theme_overlays.xml | <resources>
<style name="ThemeOverlay.AppTheme.MediumContrast" parent="Theme.Material3.Dark.NoActionBar">
<item name="colorPrimary">@color/md_theme_primary_mediumContrast</item>
<item name="colorOnPrimary">@color/md_theme_onPrimary_mediumContrast</item>
<item name="colorPrimaryContainer">@color/md_theme_primaryContainer_mediumContrast</item>
<item name="colorOnPrimaryContainer">@color/md_theme_onPrimaryContainer_mediumContrast</item>
<item name="colorSecondary">@color/md_theme_secondary_mediumContrast</item>
<item name="colorOnSecondary">@color/md_theme_onSecondary_mediumContrast</item>
<item name="colorSecondaryContainer">@color/md_theme_secondaryContainer_mediumContrast</item>
<item name="colorOnSecondaryContainer">@color/md_theme_onSecondaryContainer_mediumContrast</item>
<item name="colorTertiary">@color/md_theme_tertiary_mediumContrast</item>
<item name="colorOnTertiary">@color/md_theme_onTertiary_mediumContrast</item>
<item name="colorTertiaryContainer">@color/md_theme_tertiaryContainer_mediumContrast</item>
<item name="colorOnTertiaryContainer">@color/md_theme_onTertiaryContainer_mediumContrast</item>
<item name="colorError">@color/md_theme_error_mediumContrast</item>
<item name="colorOnError">@color/md_theme_onError_mediumContrast</item>
<item name="colorErrorContainer">@color/md_theme_errorContainer_mediumContrast</item>
<item name="colorOnErrorContainer">@color/md_theme_onErrorContainer_mediumContrast</item>
<item name="android:colorBackground">@color/md_theme_background_mediumContrast</item>
<item name="colorOnBackground">@color/md_theme_onBackground_mediumContrast</item>
<item name="colorSurface">@color/md_theme_surface_mediumContrast</item>
<item name="colorOnSurface">@color/md_theme_onSurface_mediumContrast</item>
<item name="colorSurfaceVariant">@color/md_theme_surfaceVariant_mediumContrast</item>
<item name="colorOnSurfaceVariant">@color/md_theme_onSurfaceVariant_mediumContrast</item>
<item name="colorOutline">@color/md_theme_outline_mediumContrast</item>
<item name="colorOutlineVariant">@color/md_theme_outlineVariant_mediumContrast</item>
<item name="colorSurfaceInverse">@color/md_theme_inverseSurface_mediumContrast</item>
<item name="colorOnSurfaceInverse">@color/md_theme_inverseOnSurface_mediumContrast</item>
<item name="colorPrimaryInverse">@color/md_theme_inversePrimary_mediumContrast</item>
<item name="colorPrimaryFixed">@color/md_theme_primaryFixed_mediumContrast</item>
<item name="colorOnPrimaryFixed">@color/md_theme_onPrimaryFixed_mediumContrast</item>
<item name="colorPrimaryFixedDim">@color/md_theme_primaryFixedDim_mediumContrast</item>
<item name="colorOnPrimaryFixedVariant">@color/md_theme_onPrimaryFixedVariant_mediumContrast</item>
<item name="colorSecondaryFixed">@color/md_theme_secondaryFixed_mediumContrast</item>
<item name="colorOnSecondaryFixed">@color/md_theme_onSecondaryFixed_mediumContrast</item>
<item name="colorSecondaryFixedDim">@color/md_theme_secondaryFixedDim_mediumContrast</item>
<item name="colorOnSecondaryFixedVariant">@color/md_theme_onSecondaryFixedVariant_mediumContrast</item>
<item name="colorTertiaryFixed">@color/md_theme_tertiaryFixed_mediumContrast</item>
<item name="colorOnTertiaryFixed">@color/md_theme_onTertiaryFixed_mediumContrast</item>
<item name="colorTertiaryFixedDim">@color/md_theme_tertiaryFixedDim_mediumContrast</item>
<item name="colorOnTertiaryFixedVariant">@color/md_theme_onTertiaryFixedVariant_mediumContrast</item>
<item name="colorSurfaceDim">@color/md_theme_surfaceDim_mediumContrast</item>
<item name="colorSurfaceBright">@color/md_theme_surfaceBright_mediumContrast</item>
<item name="colorSurfaceContainerLowest">@color/md_theme_surfaceContainerLowest_mediumContrast</item>
<item name="colorSurfaceContainerLow">@color/md_theme_surfaceContainerLow_mediumContrast</item>
<item name="colorSurfaceContainer">@color/md_theme_surfaceContainer_mediumContrast</item>
<item name="colorSurfaceContainerHigh">@color/md_theme_surfaceContainerHigh_mediumContrast</item>
<item name="colorSurfaceContainerHighest">@color/md_theme_surfaceContainerHighest_mediumContrast</item>
</style>
<style name="ThemeOverlay.AppTheme.HighContrast" parent="Theme.Material3.Dark.NoActionBar">
<item name="colorPrimary">@color/md_theme_primary_highContrast</item>
<item name="colorOnPrimary">@color/md_theme_onPrimary_highContrast</item>
<item name="colorPrimaryContainer">@color/md_theme_primaryContainer_highContrast</item>
<item name="colorOnPrimaryContainer">@color/md_theme_onPrimaryContainer_highContrast</item>
<item name="colorSecondary">@color/md_theme_secondary_highContrast</item>
<item name="colorOnSecondary">@color/md_theme_onSecondary_highContrast</item>
<item name="colorSecondaryContainer">@color/md_theme_secondaryContainer_highContrast</item>
<item name="colorOnSecondaryContainer">@color/md_theme_onSecondaryContainer_highContrast</item>
<item name="colorTertiary">@color/md_theme_tertiary_highContrast</item>
<item name="colorOnTertiary">@color/md_theme_onTertiary_highContrast</item>
<item name="colorTertiaryContainer">@color/md_theme_tertiaryContainer_highContrast</item>
<item name="colorOnTertiaryContainer">@color/md_theme_onTertiaryContainer_highContrast</item>
<item name="colorError">@color/md_theme_error_highContrast</item>
<item name="colorOnError">@color/md_theme_onError_highContrast</item>
<item name="colorErrorContainer">@color/md_theme_errorContainer_highContrast</item>
<item name="colorOnErrorContainer">@color/md_theme_onErrorContainer_highContrast</item>
<item name="android:colorBackground">@color/md_theme_background_highContrast</item>
<item name="colorOnBackground">@color/md_theme_onBackground_highContrast</item>
<item name="colorSurface">@color/md_theme_surface_highContrast</item>
<item name="colorOnSurface">@color/md_theme_onSurface_highContrast</item>
<item name="colorSurfaceVariant">@color/md_theme_surfaceVariant_highContrast</item>
<item name="colorOnSurfaceVariant">@color/md_theme_onSurfaceVariant_highContrast</item>
<item name="colorOutline">@color/md_theme_outline_highContrast</item>
<item name="colorOutlineVariant">@color/md_theme_outlineVariant_highContrast</item>
<item name="colorSurfaceInverse">@color/md_theme_inverseSurface_highContrast</item>
<item name="colorOnSurfaceInverse">@color/md_theme_inverseOnSurface_highContrast</item>
<item name="colorPrimaryInverse">@color/md_theme_inversePrimary_highContrast</item>
<item name="colorPrimaryFixed">@color/md_theme_primaryFixed_highContrast</item>
<item name="colorOnPrimaryFixed">@color/md_theme_onPrimaryFixed_highContrast</item>
<item name="colorPrimaryFixedDim">@color/md_theme_primaryFixedDim_highContrast</item>
<item name="colorOnPrimaryFixedVariant">@color/md_theme_onPrimaryFixedVariant_highContrast</item>
<item name="colorSecondaryFixed">@color/md_theme_secondaryFixed_highContrast</item>
<item name="colorOnSecondaryFixed">@color/md_theme_onSecondaryFixed_highContrast</item>
<item name="colorSecondaryFixedDim">@color/md_theme_secondaryFixedDim_highContrast</item>
<item name="colorOnSecondaryFixedVariant">@color/md_theme_onSecondaryFixedVariant_highContrast</item>
<item name="colorTertiaryFixed">@color/md_theme_tertiaryFixed_highContrast</item>
<item name="colorOnTertiaryFixed">@color/md_theme_onTertiaryFixed_highContrast</item>
<item name="colorTertiaryFixedDim">@color/md_theme_tertiaryFixedDim_highContrast</item>
<item name="colorOnTertiaryFixedVariant">@color/md_theme_onTertiaryFixedVariant_highContrast</item>
<item name="colorSurfaceDim">@color/md_theme_surfaceDim_highContrast</item>
<item name="colorSurfaceBright">@color/md_theme_surfaceBright_highContrast</item>
<item name="colorSurfaceContainerLowest">@color/md_theme_surfaceContainerLowest_highContrast</item>
<item name="colorSurfaceContainerLow">@color/md_theme_surfaceContainerLow_highContrast</item>
<item name="colorSurfaceContainer">@color/md_theme_surfaceContainer_highContrast</item>
<item name="colorSurfaceContainerHigh">@color/md_theme_surfaceContainerHigh_highContrast</item>
<item name="colorSurfaceContainerHighest">@color/md_theme_surfaceContainerHighest_highContrast</item>
</style>
</resources>
| 8,873 | XML | 88.636363 | 111 | 0.754198 |
AkaneFoundation/Omni/app/src/main/res/values-night/colors.xml | <resources>
<color name="md_theme_primary">#96D5A6</color>
<color name="md_theme_onPrimary">#00391C</color>
<color name="md_theme_primaryContainer">#12512E</color>
<color name="md_theme_onPrimaryContainer">#B1F1C1</color>
<color name="md_theme_secondary">#B6CCB8</color>
<color name="md_theme_onSecondary">#223527</color>
<color name="md_theme_secondaryContainer">#384B3C</color>
<color name="md_theme_onSecondaryContainer">#D2E8D4</color>
<color name="md_theme_tertiary">#A2CEDA</color>
<color name="md_theme_onTertiary">#023640</color>
<color name="md_theme_tertiaryContainer">#214C57</color>
<color name="md_theme_onTertiaryContainer">#BEEAF6</color>
<color name="md_theme_error">#FFB4AB</color>
<color name="md_theme_onError">#690005</color>
<color name="md_theme_errorContainer">#93000A</color>
<color name="md_theme_onErrorContainer">#FFDAD6</color>
<color name="md_theme_background">#101510</color>
<color name="md_theme_onBackground">#DFE4DC</color>
<color name="md_theme_surface">#101510</color>
<color name="md_theme_onSurface">#DFE4DC</color>
<color name="md_theme_surfaceVariant">#414942</color>
<color name="md_theme_onSurfaceVariant">#C1C9BF</color>
<color name="md_theme_outline">#8B938A</color>
<color name="md_theme_outlineVariant">#414942</color>
<color name="md_theme_scrim">#000000</color>
<color name="md_theme_inverseSurface">#DFE4DC</color>
<color name="md_theme_inverseOnSurface">#2C322D</color>
<color name="md_theme_inversePrimary">#2E6A44</color>
<color name="md_theme_primaryFixed">#B1F1C1</color>
<color name="md_theme_onPrimaryFixed">#00210E</color>
<color name="md_theme_primaryFixedDim">#96D5A6</color>
<color name="md_theme_onPrimaryFixedVariant">#12512E</color>
<color name="md_theme_secondaryFixed">#D2E8D4</color>
<color name="md_theme_onSecondaryFixed">#0D1F13</color>
<color name="md_theme_secondaryFixedDim">#B6CCB8</color>
<color name="md_theme_onSecondaryFixedVariant">#384B3C</color>
<color name="md_theme_tertiaryFixed">#BEEAF6</color>
<color name="md_theme_onTertiaryFixed">#001F26</color>
<color name="md_theme_tertiaryFixedDim">#A2CEDA</color>
<color name="md_theme_onTertiaryFixedVariant">#214C57</color>
<color name="md_theme_surfaceDim">#101510</color>
<color name="md_theme_surfaceBright">#353A35</color>
<color name="md_theme_surfaceContainerLowest">#0A0F0B</color>
<color name="md_theme_surfaceContainerLow">#181D18</color>
<color name="md_theme_surfaceContainer">#1C211C</color>
<color name="md_theme_surfaceContainerHigh">#262B26</color>
<color name="md_theme_surfaceContainerHighest">#313631</color>
<color name="md_theme_primary_mediumContrast">#9AD9AA</color>
<color name="md_theme_onPrimary_mediumContrast">#001B0A</color>
<color name="md_theme_primaryContainer_mediumContrast">#619E73</color>
<color name="md_theme_onPrimaryContainer_mediumContrast">#000000</color>
<color name="md_theme_secondary_mediumContrast">#BAD0BD</color>
<color name="md_theme_onSecondary_mediumContrast">#081A0E</color>
<color name="md_theme_secondaryContainer_mediumContrast">#819684</color>
<color name="md_theme_onSecondaryContainer_mediumContrast">#000000</color>
<color name="md_theme_tertiary_mediumContrast">#A6D2DE</color>
<color name="md_theme_onTertiary_mediumContrast">#00191F</color>
<color name="md_theme_tertiaryContainer_mediumContrast">#6D97A3</color>
<color name="md_theme_onTertiaryContainer_mediumContrast">#000000</color>
<color name="md_theme_error_mediumContrast">#FFBAB1</color>
<color name="md_theme_onError_mediumContrast">#370001</color>
<color name="md_theme_errorContainer_mediumContrast">#FF5449</color>
<color name="md_theme_onErrorContainer_mediumContrast">#000000</color>
<color name="md_theme_background_mediumContrast">#101510</color>
<color name="md_theme_onBackground_mediumContrast">#DFE4DC</color>
<color name="md_theme_surface_mediumContrast">#101510</color>
<color name="md_theme_onSurface_mediumContrast">#F7FCF4</color>
<color name="md_theme_surfaceVariant_mediumContrast">#414942</color>
<color name="md_theme_onSurfaceVariant_mediumContrast">#C5CDC3</color>
<color name="md_theme_outline_mediumContrast">#9DA59C</color>
<color name="md_theme_outlineVariant_mediumContrast">#7D857D</color>
<color name="md_theme_scrim_mediumContrast">#000000</color>
<color name="md_theme_inverseSurface_mediumContrast">#DFE4DC</color>
<color name="md_theme_inverseOnSurface_mediumContrast">#262B27</color>
<color name="md_theme_inversePrimary_mediumContrast">#14522F</color>
<color name="md_theme_primaryFixed_mediumContrast">#B1F1C1</color>
<color name="md_theme_onPrimaryFixed_mediumContrast">#001507</color>
<color name="md_theme_primaryFixedDim_mediumContrast">#96D5A6</color>
<color name="md_theme_onPrimaryFixedVariant_mediumContrast">#003F20</color>
<color name="md_theme_secondaryFixed_mediumContrast">#D2E8D4</color>
<color name="md_theme_onSecondaryFixed_mediumContrast">#041509</color>
<color name="md_theme_secondaryFixedDim_mediumContrast">#B6CCB8</color>
<color name="md_theme_onSecondaryFixedVariant_mediumContrast">#283A2C</color>
<color name="md_theme_tertiaryFixed_mediumContrast">#BEEAF6</color>
<color name="md_theme_onTertiaryFixed_mediumContrast">#001419</color>
<color name="md_theme_tertiaryFixedDim_mediumContrast">#A2CEDA</color>
<color name="md_theme_onTertiaryFixedVariant_mediumContrast">#0B3C46</color>
<color name="md_theme_surfaceDim_mediumContrast">#101510</color>
<color name="md_theme_surfaceBright_mediumContrast">#353A35</color>
<color name="md_theme_surfaceContainerLowest_mediumContrast">#0A0F0B</color>
<color name="md_theme_surfaceContainerLow_mediumContrast">#181D18</color>
<color name="md_theme_surfaceContainer_mediumContrast">#1C211C</color>
<color name="md_theme_surfaceContainerHigh_mediumContrast">#262B26</color>
<color name="md_theme_surfaceContainerHighest_mediumContrast">#313631</color>
<color name="md_theme_primary_highContrast">#EFFFEF</color>
<color name="md_theme_onPrimary_highContrast">#000000</color>
<color name="md_theme_primaryContainer_highContrast">#9AD9AA</color>
<color name="md_theme_onPrimaryContainer_highContrast">#000000</color>
<color name="md_theme_secondary_highContrast">#EFFFEF</color>
<color name="md_theme_onSecondary_highContrast">#000000</color>
<color name="md_theme_secondaryContainer_highContrast">#BAD0BD</color>
<color name="md_theme_onSecondaryContainer_highContrast">#000000</color>
<color name="md_theme_tertiary_highContrast">#F4FCFF</color>
<color name="md_theme_onTertiary_highContrast">#000000</color>
<color name="md_theme_tertiaryContainer_highContrast">#A6D2DE</color>
<color name="md_theme_onTertiaryContainer_highContrast">#000000</color>
<color name="md_theme_error_highContrast">#FFF9F9</color>
<color name="md_theme_onError_highContrast">#000000</color>
<color name="md_theme_errorContainer_highContrast">#FFBAB1</color>
<color name="md_theme_onErrorContainer_highContrast">#000000</color>
<color name="md_theme_background_highContrast">#101510</color>
<color name="md_theme_onBackground_highContrast">#DFE4DC</color>
<color name="md_theme_surface_highContrast">#101510</color>
<color name="md_theme_onSurface_highContrast">#FFFFFF</color>
<color name="md_theme_surfaceVariant_highContrast">#414942</color>
<color name="md_theme_onSurfaceVariant_highContrast">#F5FDF3</color>
<color name="md_theme_outline_highContrast">#C5CDC3</color>
<color name="md_theme_outlineVariant_highContrast">#C5CDC3</color>
<color name="md_theme_scrim_highContrast">#000000</color>
<color name="md_theme_inverseSurface_highContrast">#DFE4DC</color>
<color name="md_theme_inverseOnSurface_highContrast">#000000</color>
<color name="md_theme_inversePrimary_highContrast">#003218</color>
<color name="md_theme_primaryFixed_highContrast">#B6F6C5</color>
<color name="md_theme_onPrimaryFixed_highContrast">#000000</color>
<color name="md_theme_primaryFixedDim_highContrast">#9AD9AA</color>
<color name="md_theme_onPrimaryFixedVariant_highContrast">#001B0A</color>
<color name="md_theme_secondaryFixed_highContrast">#D6EDD8</color>
<color name="md_theme_onSecondaryFixed_highContrast">#000000</color>
<color name="md_theme_secondaryFixedDim_highContrast">#BAD0BD</color>
<color name="md_theme_onSecondaryFixedVariant_highContrast">#081A0E</color>
<color name="md_theme_tertiaryFixed_highContrast">#C2EEFB</color>
<color name="md_theme_onTertiaryFixed_highContrast">#000000</color>
<color name="md_theme_tertiaryFixedDim_highContrast">#A6D2DE</color>
<color name="md_theme_onTertiaryFixedVariant_highContrast">#00191F</color>
<color name="md_theme_surfaceDim_highContrast">#101510</color>
<color name="md_theme_surfaceBright_highContrast">#353A35</color>
<color name="md_theme_surfaceContainerLowest_highContrast">#0A0F0B</color>
<color name="md_theme_surfaceContainerLow_highContrast">#181D18</color>
<color name="md_theme_surfaceContainer_highContrast">#1C211C</color>
<color name="md_theme_surfaceContainerHigh_highContrast">#262B26</color>
<color name="md_theme_surfaceContainerHighest_highContrast">#313631</color>
</resources>
| 9,534 | XML | 65.215277 | 81 | 0.73799 |
AkaneFoundation/Omni/app/src/main/res/drawable/ic_star_shape.xml | <vector xmlns:android="http://schemas.android.com/apk/res/android"
android:width="396.96dp"
android:height="396.96dp"
android:viewportWidth="396.96"
android:viewportHeight="396.96">
<path
android:fillColor="#FF000000"
android:pathData="M247.09,28.82l21.68,39.64c13.79,25.21 34.52,45.95 59.74,59.74l39.64,21.68c38.43,21.02 38.43,76.2 0,97.22l-39.64,21.68c-25.21,13.79 -45.95,34.52 -59.74,59.74l-21.68,39.64c-21.02,38.43 -76.2,38.43 -97.22,0l-21.68,-39.64c-13.79,-25.21 -34.52,-45.95 -59.74,-59.74l-39.64,-21.68c-38.43,-21.02 -38.43,-76.2 0,-97.22l39.64,-21.68c25.21,-13.79 45.95,-34.52 59.74,-59.74l21.68,-39.64c21.02,-38.43 76.2,-38.43 97.22,0Z"
android:strokeWidth="0"/>
</vector>
| 718 | XML | 64.363631 | 433 | 0.664345 |
AkaneFoundation/Omni/app/src/main/res/drawable/location_indicator.xml | <vector xmlns:android="http://schemas.android.com/apk/res/android"
android:width="37dp"
android:height="32dp"
android:viewportWidth="37"
android:viewportHeight="32">
<path
android:pathData="M25.328,27.819C22.209,32.927 14.791,32.927 11.672,27.819L2.116,12.169C-1.14,6.838 2.697,0 8.943,0L28.057,0C34.303,0 38.14,6.838 34.884,12.169L25.328,27.819Z"
android:fillColor="#000000" />
</vector>
| 419 | XML | 40.999996 | 181 | 0.694511 |
AkaneFoundation/Omni/app/src/main/res/drawable/ic_close.xml | <vector xmlns:android="http://schemas.android.com/apk/res/android"
android:width="24dp"
android:height="24dp"
android:viewportWidth="960"
android:viewportHeight="960"
android:tint="?attr/colorOnSurface">
<path
android:fillColor="@android:color/white"
android:pathData="M256,760L200,704L424,480L200,256L256,200L480,424L704,200L760,256L536,480L760,704L704,760L480,536L256,760Z"/>
</vector>
| 422 | XML | 37.454542 | 132 | 0.736967 |
AkaneFoundation/Omni/app/src/main/res/drawable/ic_arrow_back.xml | <vector xmlns:android="http://schemas.android.com/apk/res/android"
android:width="24dp"
android:height="24dp"
android:viewportWidth="960"
android:viewportHeight="960"
android:tint="?attr/colorControlNormal"
android:autoMirrored="true">
<path
android:fillColor="@android:color/white"
android:pathData="M313,520L537,744L480,800L160,480L480,160L537,216L313,440L800,440L800,520L313,520Z"/>
</vector>
| 434 | XML | 35.249997 | 108 | 0.730415 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.