file_path
stringlengths 20
207
| content
stringlengths 5
3.85M
| size
int64 5
3.85M
| lang
stringclasses 9
values | avg_line_length
float64 1.33
100
| max_line_length
int64 4
993
| alphanum_fraction
float64 0.26
0.93
|
---|---|---|---|---|---|---|
leggedrobotics/viplanner/omniverse/extension/omni.viplanner/omni/viplanner/viplanner/mdp/actions/__init__.py | # Copyright (c) 2023-2024, ETH Zurich (Robotics Systems Lab)
# Author: Pascal Roth
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from .navigation_actions import * # noqa: F401, F403
| 203 | Python | 24.499997 | 60 | 0.729064 |
leggedrobotics/viplanner/omniverse/extension/omni.viplanner/omni/viplanner/viplanner/mdp/commands/path_follower_command_generator_cfg.py | # Copyright (c) 2023-2024, ETH Zurich (Robotics Systems Lab)
# Author: Pascal Roth
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
# Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Sub-module containing command generators for the velocity-based locomotion task."""
import math
from dataclasses import MISSING
from omni.isaac.orbit.managers import CommandTermCfg
from omni.isaac.orbit.utils.configclass import configclass
from typing_extensions import Literal
from .path_follower_command_generator import PathFollowerCommandGenerator
@configclass
class PathFollowerCommandGeneratorCfg(CommandTermCfg):
class_type: PathFollowerCommandGenerator = PathFollowerCommandGenerator
"""Name of the command generator class."""
robot_attr: str = MISSING
"""Name of the robot attribute from the environment."""
path_frame: Literal["world", "robot"] = "world"
"""Frame in which the path is defined.
- ``world``: the path is defined in the world frame. Also called ``odom``.
- ``robot``: the path is defined in the robot frame. Also called ``base``.
"""
lookAheadDistance: float = MISSING
"""The lookahead distance for the path follower."""
two_way_drive: bool = False
"""Allow robot to use reverse gear."""
switch_time_threshold: float = 1.0
"""Time threshold to switch between the forward and backward drive."""
maxSpeed: float = 0.5
"""Maximum speed of the robot."""
maxAccel: float = 2.5 / 100.0 # 2.5 / 100
"""Maximum acceleration of the robot."""
joyYaw: float = 1.0
"""TODO: add description"""
yawRateGain: float = 7.0 # 3.5
"""Gain for the yaw rate."""
stopYawRateGain: float = 7.0 # 3.5
""""""
maxYawRate: float = 90.0 * math.pi / 360
dirDiffThre: float = 0.7
stopDisThre: float = 0.2
slowDwnDisThre: float = 0.3
slowRate1: float = 0.25
slowRate2: float = 0.5
noRotAtGoal: bool = True
autonomyMode: bool = False
dynamic_lookahead: bool = False
min_points_within_lookahead: int = 3
| 2,121 | Python | 31.646153 | 86 | 0.689769 |
leggedrobotics/viplanner/omniverse/extension/omni.viplanner/omni/viplanner/viplanner/mdp/commands/__init__.py | # Copyright (c) 2023-2024, ETH Zurich (Robotics Systems Lab)
# Author: Pascal Roth
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from .path_follower_command_generator import PathFollowerCommandGenerator
from .path_follower_command_generator_cfg import PathFollowerCommandGeneratorCfg
__all__ = ["PathFollowerCommandGeneratorCfg", "PathFollowerCommandGenerator"]
| 383 | Python | 33.909088 | 80 | 0.806789 |
leggedrobotics/viplanner/omniverse/extension/omni.viplanner/omni/viplanner/viplanner/mdp/commands/path_follower_command_generator.py | # Copyright (c) 2023-2024, ETH Zurich (Robotics Systems Lab)
# Author: Pascal Roth
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
# Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Sub-module containing command generators for the velocity-based locomotion task."""
from __future__ import annotations
import math
from typing import TYPE_CHECKING, Sequence
import omni.isaac.orbit.utils.math as math_utils
import torch
from omni.isaac.orbit.assets.articulation import Articulation
from omni.isaac.orbit.envs import RLTaskEnv
from omni.isaac.orbit.managers import CommandTerm
from omni.isaac.orbit.markers import VisualizationMarkers
from omni.isaac.orbit.markers.config import (
BLUE_ARROW_X_MARKER_CFG,
GREEN_ARROW_X_MARKER_CFG,
)
from omni.isaac.orbit.sim import SimulationContext
if TYPE_CHECKING:
from .path_follower_command_generator_cfg import PathFollowerCommandGeneratorCfg
class PathFollowerCommandGenerator(CommandTerm):
r"""Command generator that generates a velocity command in SE(2) from a path given by a local planner.
The command comprises of a linear velocity in x and y direction and an angular velocity around
the z-axis. It is given in the robot's base frame.
The path follower acts as a PD-controller that checks for the last point on the path within a lookahead distance
and uses it to compute the steering angle and the linear velocity.
"""
cfg: PathFollowerCommandGeneratorCfg
"""The configuration of the command generator."""
def __init__(self, cfg: PathFollowerCommandGeneratorCfg, env: RLTaskEnv):
"""Initialize the command generator.
Args:
cfg (PathFollowerCommandGeneratorCfg): The configuration of the command generator.
env (object): The environment.
"""
super().__init__(cfg, env)
# -- robot
self.robot: Articulation = env.scene[cfg.robot_attr]
# -- Simulation Context
self.sim: SimulationContext = SimulationContext.instance()
# -- buffers
self.vehicleSpeed: torch.Tensor = torch.zeros(self.num_envs, device=self.device)
self.switch_time: torch.Tensor = torch.zeros(self.num_envs, device=self.device)
self.vehicleYawRate: torch.Tensor = torch.zeros(self.num_envs, device=self.device)
self.navigation_forward: torch.Tensor = torch.ones(self.num_envs, device=self.device, dtype=torch.bool)
self.twist: torch.Tensor = torch.zeros((self.num_envs, 3), device=self.device)
self.goal_reached: torch.Tensor = torch.zeros(self.num_envs, device=self.device, dtype=torch.bool)
# -- debug vis
self._base_vel_goal_markers = None
self._base_vel_markers = None
def __str__(self) -> str:
"""Return a string representation of the command generator."""
msg = "PathFollowerCommandGenerator:\n"
msg += f"\tCommand dimension: {tuple(self.command.shape[1:])}\n"
msg += f"\tLookahead distance: {self.cfg.lookAheadDistance}\n"
return msg
"""
Properties
"""
@property
def command(self) -> torch.Tensor:
"""The desired base velocity command in the base frame. Shape is (num_envs, 3)."""
return self.twist
"""
Operations.
"""
def reset(self, env_ids: Sequence[int] | None = None) -> dict:
"""Reset the command generator.
This function resets the command generator. It should be called whenever the environment is reset.
Args:
env_ids (Optional[Sequence[int]], optional): The list of environment IDs to reset. Defaults to None.
"""
if env_ids is None:
env_ids = ...
self.vehicleSpeed = torch.zeros(self.num_envs, device=self.device)
self.switch_time = torch.zeros(self.num_envs, device=self.device)
self.vehicleYawRate = torch.zeros(self.num_envs, device=self.device)
self.navigation_forward = torch.ones(self.num_envs, device=self.device, dtype=torch.bool)
self.twist = torch.zeros((self.num_envs, 3), device=self.device)
self.goal_reached = torch.zeros(self.num_envs, device=self.device, dtype=torch.bool)
return {}
def compute(self, dt: float):
"""Compute the command.
Paths as a tensor of shape (num_envs, N, 3) where N is number of poses on the path. The paths
should be given in the base frame of the robot. Num_envs is equal to the number of robots spawned in all
environments.
"""
# get paths
paths = self._env.action_manager._terms[0]._processed_navigation_velocity_actions.clone()
# get number of pases of the paths
num_envs, N, _ = paths.shape
assert N > 0, "PathFollowerCommandGenerator: paths must have at least one poses."
# get the current simulation time
curr_time = self.sim.current_time
# define current maxSpeed for the velocities
max_speed = torch.ones(num_envs, device=self.device) * self.cfg.maxSpeed
# transform path in base/ robot frame if given in world frame
if self.cfg.path_frame == "world":
paths = math_utils.quat_apply(
math_utils.quat_inv(self.robot.data.root_quat_w[:, None, :].repeat(1, N, 1)),
paths - self.robot.data.root_pos_w[:, None, :],
)
# get distance that robot has to travel until last set waypoint
distance_end_point = torch.linalg.norm(paths[:, -1, :2], axis=1)
# get point that is still within the lookAheadDis, compute its distance and the z-axis rotation
dis_all_poses = torch.linalg.norm(paths[:, :, :2], axis=2)
sorted_dis, sorted_dis_idx = torch.sort(dis_all_poses, dim=1)
if self.cfg.dynamic_lookahead:
dis_max = sorted_dis[:, self.cfg.min_points_within_lookahead - 1]
dis_max_poses = paths[:, sorted_dis_idx[:, self.cfg.min_points_within_lookahead - 1], :2]
else:
sorted_dis[sorted_dis > self.cfg.lookAheadDistance] = 0.0
dis_max, dis_max_idx = sorted_dis.max(dim=1)
dis_max_poses = paths[
torch.arange(self.num_envs), sorted_dis_idx[torch.arange(self.num_envs), dis_max_idx], :2
]
direction_diff = -torch.atan2(dis_max_poses[:, 1], dis_max_poses[:, 0])
# decide whether to drive forward or backward
if self.cfg.two_way_drive:
switch_time_threshold_exceeded = curr_time - self.switch_time > self.cfg.switch_time_threshold
# get index of robots that should switch direction
switch_to_backward_idx = torch.all(
torch.vstack(
(abs(direction_diff) > math.pi / 2, switch_time_threshold_exceeded, self.navigation_forward)
),
dim=0,
)
switch_to_forward_idx = torch.all(
torch.vstack(
(abs(direction_diff) < math.pi / 2, switch_time_threshold_exceeded, ~self.navigation_forward)
),
dim=0,
)
# update buffers
self.navigation_forward[switch_to_backward_idx] = False
self.navigation_forward[switch_to_forward_idx] = True
self.switch_time[switch_to_backward_idx] = curr_time
self.switch_time[switch_to_forward_idx] = curr_time
# adapt direction difference and maxSpeed depending on driving direction
direction_diff[~self.navigation_forward] += math.pi
limit_radians = torch.all(torch.vstack((direction_diff > math.pi, ~self.navigation_forward)), dim=0)
direction_diff[limit_radians] -= 2 * math.pi
max_speed[~self.navigation_forward] *= -1
# determine yaw rate of robot
vehicleYawRate = torch.zeros(num_envs, device=self.device)
stop_yaw_rate_bool = abs(direction_diff) < 2.0 * self.cfg.maxAccel
vehicleYawRate[stop_yaw_rate_bool] = -self.cfg.stopYawRateGain * direction_diff[stop_yaw_rate_bool]
vehicleYawRate[~stop_yaw_rate_bool] = -self.cfg.yawRateGain * direction_diff[~stop_yaw_rate_bool]
# limit yaw rate of robot
vehicleYawRate[vehicleYawRate > self.cfg.maxYawRate] = self.cfg.maxYawRate
vehicleYawRate[vehicleYawRate < -self.cfg.maxYawRate] = -self.cfg.maxYawRate
# catch special cases
if not self.cfg.autonomyMode:
vehicleYawRate[max_speed == 0.0] = self.cfg.maxYawRate * self.cfg.joyYaw
if N <= 1:
vehicleYawRate *= 0
max_speed *= 0
elif self.cfg.noRotAtGoal:
vehicleYawRate[dis_max < self.cfg.stopDisThre] = 0.0
# determine joyspeed at the end of the path
slow_down_bool = distance_end_point / self.cfg.slowDwnDisThre < max_speed
max_speed[slow_down_bool] *= distance_end_point[slow_down_bool] / self.cfg.slowDwnDisThre
# update vehicle speed
drive_at_max_speed = torch.all(
torch.vstack((abs(direction_diff) < self.cfg.dirDiffThre, dis_max > self.cfg.stopDisThre)), dim=0
)
increase_speed = torch.all(torch.vstack((self.vehicleSpeed < max_speed, drive_at_max_speed)), dim=0)
decrease_speed = torch.all(torch.vstack((self.vehicleSpeed > max_speed, drive_at_max_speed)), dim=0)
self.vehicleSpeed[increase_speed] += self.cfg.maxAccel
self.vehicleSpeed[decrease_speed] -= self.cfg.maxAccel
increase_speed = torch.all(torch.vstack((self.vehicleSpeed <= 0, ~drive_at_max_speed)), dim=0)
decrease_speed = torch.all(torch.vstack((self.vehicleSpeed > 0, ~drive_at_max_speed)), dim=0)
self.vehicleSpeed[increase_speed] += self.cfg.maxAccel
self.vehicleSpeed[decrease_speed] -= self.cfg.maxAccel
# update twist command
self.twist[:, 0] = self.vehicleSpeed
self.twist[abs(self.vehicleSpeed) < self.cfg.maxAccel * dt, 0] = 0.0
self.twist[abs(self.vehicleSpeed) > self.cfg.maxSpeed, 0] = self.cfg.maxSpeed
self.twist[:, 2] = vehicleYawRate
return self.twist
"""
Implementation specific functions.
"""
def _update_command(self):
pass
def _update_metrics(self):
pass
def _resample_command(self, env_ids: Sequence[int]):
pass
def _set_debug_vis_impl(self, debug_vis: bool):
# set visibility of markers
# note: parent only deals with callbacks. not their visibility
if debug_vis:
# create markers if necessary for the first tome
if not hasattr(self, "base_vel_goal_visualizer"):
# -- goal
marker_cfg = GREEN_ARROW_X_MARKER_CFG.copy()
marker_cfg.prim_path = "/Visuals/Command/velocity_goal"
marker_cfg.markers["arrow"].scale = (0.5, 0.5, 0.5)
self.base_vel_goal_visualizer = VisualizationMarkers(marker_cfg)
# -- current
marker_cfg = BLUE_ARROW_X_MARKER_CFG.copy()
marker_cfg.prim_path = "/Visuals/Command/velocity_current"
marker_cfg.markers["arrow"].scale = (0.5, 0.5, 0.5)
self.base_vel_visualizer = VisualizationMarkers(marker_cfg)
# set their visibility to true
self.base_vel_goal_visualizer.set_visibility(True)
self.base_vel_visualizer.set_visibility(True)
else:
if hasattr(self, "base_vel_goal_visualizer"):
self.base_vel_goal_visualizer.set_visibility(False)
self.base_vel_visualizer.set_visibility(False)
def _debug_vis_callback(self, event):
# get marker location
# -- base state
base_pos_w = self.robot.data.root_pos_w.clone()
base_pos_w[:, 2] += 0.5
# -- resolve the scales and quaternions
vel_des_arrow_scale, vel_des_arrow_quat = self._resolve_xy_velocity_to_arrow(self.command[:, :2])
vel_arrow_scale, vel_arrow_quat = self._resolve_xy_velocity_to_arrow(self.robot.data.root_lin_vel_b[:, :2])
# display markers
self.base_vel_goal_visualizer.visualize(base_pos_w, vel_des_arrow_quat, vel_des_arrow_scale)
self.base_vel_visualizer.visualize(base_pos_w, vel_arrow_quat, vel_arrow_scale)
"""
Internal helpers.
"""
def _resolve_xy_velocity_to_arrow(self, xy_velocity: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
"""Converts the XY base velocity command to arrow direction rotation."""
# obtain default scale of the marker
default_scale = self.base_vel_goal_visualizer.cfg.markers["arrow"].scale
# arrow-scale
arrow_scale = torch.tensor(default_scale, device=self.device).repeat(xy_velocity.shape[0], 1)
arrow_scale[:, 0] *= torch.linalg.norm(xy_velocity, dim=1)
# arrow-direction
heading_angle = torch.atan2(xy_velocity[:, 1], xy_velocity[:, 0])
zeros = torch.zeros_like(heading_angle)
arrow_quat = math_utils.quat_from_euler_xyz(zeros, zeros, heading_angle)
return arrow_scale, arrow_quat
| 13,132 | Python | 44.286207 | 116 | 0.640496 |
leggedrobotics/viplanner/omniverse/extension/omni.viplanner/omni/viplanner/config/base_cfg.py | # Copyright (c) 2023-2024, ETH Zurich (Robotics Systems Lab)
# Author: Pascal Roth
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
import os
import omni.viplanner.viplanner.mdp as mdp
from omni.isaac.orbit.envs import RLTaskEnvCfg
from omni.isaac.orbit.managers import ObservationGroupCfg as ObsGroup
from omni.isaac.orbit.managers import ObservationTermCfg as ObsTerm
from omni.isaac.orbit.managers import RandomizationTermCfg as RandTerm
from omni.isaac.orbit.managers import SceneEntityCfg
from omni.isaac.orbit.managers import TerminationTermCfg as DoneTerm
from omni.isaac.orbit.utils import configclass
from omni.isaac.orbit.utils.assets import ISAAC_ORBIT_NUCLEUS_DIR
##
# MDP settings
##
@configclass
class RewardsCfg:
pass
@configclass
class ActionsCfg:
"""Action specifications for the MDP."""
paths = mdp.NavigationActionCfg(
asset_name="robot",
low_level_decimation=4,
low_level_action=mdp.JointPositionActionCfg(
asset_name="robot", joint_names=[".*"], scale=0.5, use_default_offset=True
),
low_level_policy_file=os.path.join(ISAAC_ORBIT_NUCLEUS_DIR, "Policies", "ANYmal-C", "policy.pt"),
)
@configclass
class ObservationsCfg:
"""Observation specifications for the MDP."""
@configclass
class PolicyCfg(ObsGroup):
"""Observations for policy group."""
# observation terms (order preserved)
base_lin_vel = ObsTerm(func=mdp.base_lin_vel)
base_ang_vel = ObsTerm(func=mdp.base_ang_vel)
projected_gravity = ObsTerm(func=mdp.projected_gravity)
velocity_commands = ObsTerm(func=mdp.generated_commands, params={"command_name": "vel_command"})
joint_pos = ObsTerm(func=mdp.joint_pos_rel)
joint_vel = ObsTerm(func=mdp.joint_vel_rel)
actions = ObsTerm(func=mdp.low_level_actions)
height_scan = ObsTerm(
func=mdp.height_scan,
params={"sensor_cfg": SceneEntityCfg("height_scanner")},
clip=(-1.0, 1.0),
)
def __post_init__(self):
self.enable_corruption = True
self.concatenate_terms = True
@configclass
class PlannerImageCfg(ObsGroup):
depth_measurement = ObsTerm(
func=mdp.isaac_camera_data,
params={"sensor_cfg": SceneEntityCfg("depth_camera"), "data_type": "distance_to_image_plane"},
)
semantic_measurement = ObsTerm(
func=mdp.isaac_camera_data,
params={"sensor_cfg": SceneEntityCfg("semantic_camera"), "data_type": "semantic_segmentation"},
)
def __post_init__(self):
self.concatenate_terms = False
self.enable_corruption = False
@configclass
class PlannerTransformCfg(ObsGroup):
cam_position = ObsTerm(
func=mdp.cam_position,
params={"sensor_cfg": SceneEntityCfg("depth_camera")},
)
cam_orientation = ObsTerm(
func=mdp.cam_orientation,
params={"sensor_cfg": SceneEntityCfg("depth_camera")},
)
def __post_init__(self):
self.concatenate_terms = False
self.enable_corruption = False
# observation groups
policy: PolicyCfg = PolicyCfg()
planner_image: PlannerImageCfg = PlannerImageCfg()
planner_transform: PlannerTransformCfg = PlannerTransformCfg()
@configclass
class TerminationsCfg:
"""Termination terms for the MDP."""
time_out = DoneTerm(func=mdp.time_out, time_out=True)
base_contact = DoneTerm(
func=mdp.illegal_contact,
params={"sensor_cfg": SceneEntityCfg("contact_forces", body_names="base"), "threshold": 1.0},
)
@configclass
class RandomizationCfg:
"""Configuration for randomization."""
reset_base = RandTerm(
func=mdp.reset_root_state_uniform,
mode="reset",
params={
"asset_cfg": SceneEntityCfg("robot"),
"pose_range": {"x": (0, 0), "y": (0, 0), "yaw": (0, 0)},
"velocity_range": {
"x": (0.0, 0.0),
"y": (0.0, 0.0),
"z": (0.0, 0.0),
"roll": (0.0, 0.0),
"pitch": (0.0, 0.0),
"yaw": (0.0, 0.0),
},
},
)
reset_robot_joints = RandTerm(
func=mdp.reset_joints_by_scale,
mode="reset",
params={
"asset_cfg": SceneEntityCfg("robot"),
"position_range": (0.0, 0.0),
"velocity_range": (0.0, 0.0),
},
)
@configclass
class CommandsCfg:
"""Command specifications for the MDP."""
vel_command: mdp.PathFollowerCommandGeneratorCfg = mdp.PathFollowerCommandGeneratorCfg(
robot_attr="robot",
lookAheadDistance=1.0,
debug_vis=True,
)
##
# Environment configuration
##
@configclass
class ViPlannerBaseCfg(RLTaskEnvCfg):
"""Configuration for the locomotion velocity-tracking environment."""
# Basic settings
observations: ObservationsCfg = ObservationsCfg()
actions: ActionsCfg = ActionsCfg()
commands: CommandsCfg = CommandsCfg()
# managers
terminations: TerminationsCfg = TerminationsCfg()
randomization: RandomizationCfg = RandomizationCfg()
rewards: RewardsCfg = RewardsCfg()
def __post_init__(self):
"""Post initialization."""
# general settings
self.decimation = 20 # 10 Hz
self.episode_length_s = 60.0
# simulation settings
self.sim.dt = 0.005
self.sim.disable_contact_processing = True
self.sim.physics_material.static_friction = 1.0
self.sim.physics_material.dynamic_friction = 1.0
self.sim.physics_material.friction_combine_mode = "max"
self.sim.physics_material.restitution_combine_mode = "max"
# update sensor update periods
# we tick all the sensors based on the smallest update period (physics update period)
self.scene.height_scanner.update_period = 4 * self.sim.dt # should we low-level decimation
self.scene.contact_forces.update_period = self.sim.dt
| 6,096 | Python | 30.590673 | 107 | 0.629429 |
leggedrobotics/viplanner/omniverse/extension/omni.viplanner/omni/viplanner/config/carla_cfg.py | # Copyright (c) 2023-2024, ETH Zurich (Robotics Systems Lab)
# Author: Pascal Roth
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
import os
import omni.isaac.orbit.sim as sim_utils
from omni.isaac.orbit.assets import AssetBaseCfg
from omni.isaac.orbit.scene import InteractiveSceneCfg
from omni.isaac.orbit.sensors import CameraCfg, ContactSensorCfg, RayCasterCfg, patterns
from omni.isaac.orbit.utils import configclass
from omni.viplanner.utils import UnRealImporterCfg
##
# Pre-defined configs
##
# isort: off
from omni.isaac.orbit_assets.anymal import ANYMAL_C_CFG
from .base_cfg import ViPlannerBaseCfg
from ..viplanner import DATA_DIR
##
# Scene definition
##
@configclass
class TerrainSceneCfg(InteractiveSceneCfg):
"""Configuration for the terrain scene with a legged robot."""
# ground terrain
terrain = UnRealImporterCfg(
prim_path="/World/Carla",
physics_material=sim_utils.RigidBodyMaterialCfg(
friction_combine_mode="multiply",
restitution_combine_mode="multiply",
static_friction=1.0,
dynamic_friction=1.0,
),
usd_path="${USER_PATH_TO_USD}/carla.usd",
groundplane=True,
cw_config_file=os.path.join(DATA_DIR, "town01", "cw_multiply_cfg.yml"),
sem_mesh_to_class_map=os.path.join(DATA_DIR, "town01", "keyword_mapping.yml"),
people_config_file=os.path.join(DATA_DIR, "town01", "people_cfg.yml"),
vehicle_config_file=os.path.join(DATA_DIR, "town01", "vehicle_cfg.yml"),
axis_up="Z",
)
# robots
robot = ANYMAL_C_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot")
robot.init_state.pos = (8.0, -0.5, 0.6)
robot.init_state.rot = (0.5253, 0.0, 0.0, 0.8509)
# sensors
height_scanner = RayCasterCfg(
prim_path="{ENV_REGEX_NS}/Robot/base",
offset=RayCasterCfg.OffsetCfg(pos=(0.0, 0.0, 0.5)),
attach_yaw_only=True,
pattern_cfg=patterns.GridPatternCfg(resolution=0.1, size=[1.6, 1.0]),
debug_vis=True,
mesh_prim_paths=["/World/GroundPlane"],
)
contact_forces = ContactSensorCfg(prim_path="{ENV_REGEX_NS}/Robot/.*", history_length=3, debug_vis=False)
# lights
light = AssetBaseCfg(
prim_path="/World/light",
spawn=sim_utils.DistantLightCfg(
color=(1.0, 1.0, 1.0),
intensity=1000.0,
),
)
# camera
depth_camera = CameraCfg(
prim_path="{ENV_REGEX_NS}/Robot/base/depth_camera",
offset=CameraCfg.OffsetCfg(pos=(0.510, 0.0, 0.015), rot=(-0.5, 0.5, -0.5, 0.5)),
spawn=sim_utils.PinholeCameraCfg(),
width=848,
height=480,
data_types=["distance_to_image_plane"],
)
semantic_camera = CameraCfg(
prim_path="{ENV_REGEX_NS}/Robot/base/semantic_camera",
offset=CameraCfg.OffsetCfg(pos=(0.510, 0.0, 0.015), rot=(-0.5, 0.5, -0.5, 0.5)),
spawn=sim_utils.PinholeCameraCfg(),
width=1280,
height=720,
data_types=["semantic_segmentation"],
)
##
# Environment configuration
##
@configclass
class ViPlannerCarlaCfg(ViPlannerBaseCfg):
"""Configuration for the locomotion velocity-tracking environment."""
# Scene settings
scene: TerrainSceneCfg = TerrainSceneCfg(num_envs=1, env_spacing=1.0, replicate_physics=False)
def __post_init__(self):
"""Post initialization."""
super().__post_init__()
# adapt viewer
self.viewer.eye = (105, -132, 6.5)
self.viewer.lookat = (113.5, -132, 1.0)
# change ANYmal position
self.scene.robot.init_state.pos = (118.0, -126.0, 1.0)
| 3,637 | Python | 31.19469 | 109 | 0.640088 |
leggedrobotics/viplanner/omniverse/extension/omni.viplanner/omni/viplanner/config/warehouse_cfg.py | # Copyright (c) 2023-2024, ETH Zurich (Robotics Systems Lab)
# Author: Pascal Roth
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
import os
import omni.isaac.orbit.sim as sim_utils
from omni.isaac.orbit.assets import AssetBaseCfg
from omni.isaac.orbit.scene import InteractiveSceneCfg
from omni.isaac.orbit.sensors import CameraCfg, ContactSensorCfg, RayCasterCfg, patterns
from omni.isaac.orbit.utils import configclass
from omni.viplanner.utils import UnRealImporterCfg
from ..viplanner import DATA_DIR
from .base_cfg import ViPlannerBaseCfg
##
# Pre-defined configs
##
# isort: off
from omni.isaac.orbit_assets.anymal import ANYMAL_C_CFG
##
# Scene definition
##
@configclass
class TerrainSceneCfg(InteractiveSceneCfg):
"""Configuration for the terrain scene with a legged robot."""
# ground terrain
terrain = UnRealImporterCfg(
prim_path="/World/Warehouse",
physics_material=sim_utils.RigidBodyMaterialCfg(
friction_combine_mode="multiply",
restitution_combine_mode="multiply",
static_friction=1.0,
dynamic_friction=1.0,
),
usd_path="${USER_PATH_TO_USD}/warehouse_new.usd",
groundplane=True,
sem_mesh_to_class_map=os.path.join(DATA_DIR, "warehouse", "keyword_mapping.yml"),
people_config_file=os.path.join(DATA_DIR, "warehouse", "people_cfg.yml"),
axis_up="Z",
)
# robots
robot = ANYMAL_C_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot")
robot.init_state.pos = (5.0, 5.5, 0.6)
robot.init_state.rot = (0.5253, 0.0, 0.0, 0.8509)
# sensors
height_scanner = RayCasterCfg(
prim_path="{ENV_REGEX_NS}/Robot/base",
offset=RayCasterCfg.OffsetCfg(pos=(0.0, 0.0, 0.5)),
attach_yaw_only=True,
pattern_cfg=patterns.GridPatternCfg(resolution=0.1, size=[1.6, 1.0]),
debug_vis=True,
mesh_prim_paths=["/World/Warehouse"],
)
contact_forces = ContactSensorCfg(prim_path="{ENV_REGEX_NS}/Robot/.*", history_length=3, debug_vis=False)
# lights
light = AssetBaseCfg(
prim_path="/World/light",
spawn=sim_utils.DistantLightCfg(
color=(1.0, 1.0, 1.0),
intensity=1000.0,
),
)
# camera
depth_camera = CameraCfg(
prim_path="{ENV_REGEX_NS}/Robot/base/depth_camera",
offset=CameraCfg.OffsetCfg(pos=(0.510, 0.0, 0.015), rot=(-0.5, 0.5, -0.5, 0.5)),
spawn=sim_utils.PinholeCameraCfg(),
width=848,
height=480,
data_types=["distance_to_image_plane"],
)
semantic_camera = CameraCfg(
prim_path="{ENV_REGEX_NS}/Robot/base/semantic_camera",
offset=CameraCfg.OffsetCfg(pos=(0.510, 0.0, 0.015), rot=(-0.5, 0.5, -0.5, 0.5)),
spawn=sim_utils.PinholeCameraCfg(),
width=1280,
height=720,
data_types=["semantic_segmentation"],
)
##
# Environment configuration
##
@configclass
class ViPlannerWarehouseCfg(ViPlannerBaseCfg):
"""Configuration for the locomotion velocity-tracking environment."""
# Scene settings
scene: TerrainSceneCfg = TerrainSceneCfg(num_envs=1, env_spacing=1.0)
def __post_init__(self):
"""Post initialization."""
super().__post_init__()
# adapt viewer
self.viewer.eye = (5, 12, 5)
self.viewer.lookat = (5, 0, 0.0)
| 3,363 | Python | 29.306306 | 109 | 0.641392 |
leggedrobotics/viplanner/omniverse/extension/omni.viplanner/omni/viplanner/config/matterport_cfg.py | # Copyright (c) 2023-2024, ETH Zurich (Robotics Systems Lab)
# Author: Pascal Roth
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
import omni.isaac.orbit.sim as sim_utils
import omni.viplanner.viplanner.mdp as mdp
from omni.isaac.matterport.config import MatterportImporterCfg
from omni.isaac.matterport.domains import MatterportRayCasterCfg
from omni.isaac.orbit.assets import AssetBaseCfg
from omni.isaac.orbit.managers import ObservationGroupCfg as ObsGroup
from omni.isaac.orbit.managers import ObservationTermCfg as ObsTerm
from omni.isaac.orbit.managers import SceneEntityCfg
from omni.isaac.orbit.scene import InteractiveSceneCfg
from omni.isaac.orbit.sensors import ContactSensorCfg, patterns
from omni.isaac.orbit.utils import configclass
from omni.viplanner.utils import VIPlannerMatterportRayCasterCameraCfg
from .base_cfg import ObservationsCfg, ViPlannerBaseCfg
##
# Pre-defined configs
##
# isort: off
from omni.isaac.orbit_assets.anymal import ANYMAL_C_CFG
##
# Scene definition
##
@configclass
class TerrainSceneCfg(InteractiveSceneCfg):
"""Configuration for the terrain scene with a legged robot."""
# ground terrain
terrain = MatterportImporterCfg(
prim_path="/World/matterport",
terrain_type="matterport",
physics_material=sim_utils.RigidBodyMaterialCfg(
friction_combine_mode="multiply",
restitution_combine_mode="multiply",
static_friction=1.0,
dynamic_friction=1.0,
),
obj_filepath="${USER_PATH_TO_USD}/matterport.usd",
groundplane=True,
)
# robots
robot = ANYMAL_C_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot")
robot.init_state.pos = (8.0, -0.5, 0.6)
robot.init_state.rot = (0.6126, 0.0327, 0.0136, -0.7896)
# sensors
height_scanner = MatterportRayCasterCfg(
prim_path="{ENV_REGEX_NS}/Robot/base",
offset=MatterportRayCasterCfg.OffsetCfg(pos=(0.0, 0.0, 0.5)),
attach_yaw_only=True,
pattern_cfg=patterns.GridPatternCfg(resolution=0.1, size=[1.6, 1.0]),
debug_vis=True,
mesh_prim_paths=["${USER_PATH_TO_USD}/matterport.ply"],
)
contact_forces = ContactSensorCfg(prim_path="{ENV_REGEX_NS}/Robot/.*", history_length=3, debug_vis=False)
# lights
light = AssetBaseCfg(
prim_path="/World/light",
spawn=sim_utils.DistantLightCfg(
color=(1.0, 1.0, 1.0),
intensity=1000.0,
),
)
sphere_1 = AssetBaseCfg(
prim_path="/World/sphere_1",
spawn=sim_utils.SphereLightCfg(
color=(1.0, 1.0, 1.0),
intensity=3000.0,
),
)
sphere_1.init_state.pos = (8, 1, 2.0)
sphere_2 = AssetBaseCfg(
prim_path="/World/sphere_2",
spawn=sim_utils.SphereLightCfg(
color=(1.0, 1.0, 1.0),
intensity=30000.0,
),
)
sphere_2.init_state.pos = (10.5, -5.5, 2.0)
sphere_3 = AssetBaseCfg(
prim_path="/World/sphere_3",
spawn=sim_utils.SphereLightCfg(
color=(1.0, 1.0, 1.0),
intensity=30000.0,
),
)
sphere_3.init_state.pos = (6.0, -5.5, 2.0)
sphere_4 = AssetBaseCfg(
prim_path="/World/sphere_4",
spawn=sim_utils.SphereLightCfg(
color=(1.0, 1.0, 1.0),
intensity=30000.0,
),
)
sphere_4.init_state.pos = (8.0, -12, 2.0)
# camera
depth_camera = VIPlannerMatterportRayCasterCameraCfg(
prim_path="{ENV_REGEX_NS}/Robot/base",
offset=VIPlannerMatterportRayCasterCameraCfg.OffsetCfg(
pos=(0.510, 0.0, 0.015), rot=(-0.5, 0.5, -0.5, 0.5)
), # FIXME: currently in ROS convention
pattern_cfg=patterns.PinholeCameraPatternCfg(
width=848,
height=480,
# intrinsics=(430.31607, 0.0, 428.28408, 0.0, 430.31607, 244.00695, 0.0, 0.0, 1.0), # FIXME: intrinsics not supported yet
),
debug_vis=False,
max_distance=10,
mesh_prim_paths=["${USER_PATH_TO_USD}/matterport.ply"],
data_types=["distance_to_image_plane"],
)
semantic_camera = VIPlannerMatterportRayCasterCameraCfg(
prim_path="{ENV_REGEX_NS}/Robot/base",
offset=VIPlannerMatterportRayCasterCameraCfg.OffsetCfg(
pos=(0.510, 0.0, 0.015), rot=(-0.5, 0.5, -0.5, 0.5)
), # FIXME: currently in ROS convention
pattern_cfg=patterns.PinholeCameraPatternCfg(
width=1280,
height=720,
# intrinsics=(644.15496, 0.0, 639.53125, 0.0, 643.49212, 366.30880, 0.0, 0.0, 1.0), # FIXME: intrinsics not supported yet
),
data_types=["semantic_segmentation"],
debug_vis=False,
mesh_prim_paths=["${USER_PATH_TO_USD}/matterport.ply"],
)
@configclass
class MatterportObservationsCfg(ObservationsCfg):
"""Observations for locomotion and planner with adjustments for Matterport Environments"""
@configclass
class MatterportPlannerImageCfg(ObsGroup):
depth_measurement = ObsTerm(
func=mdp.matterport_raycast_camera_data,
params={"sensor_cfg": SceneEntityCfg("depth_camera"), "data_type": "distance_to_image_plane"},
)
semantic_measurement = ObsTerm(
func=mdp.matterport_raycast_camera_data,
params={"sensor_cfg": SceneEntityCfg("semantic_camera"), "data_type": "semantic_segmentation"},
)
def __post_init__(self):
self.concatenate_terms = False
self.enable_corruption = False
planner_image: MatterportPlannerImageCfg = MatterportPlannerImageCfg()
##
# Environment configuration
##
@configclass
class ViPlannerMatterportCfg(ViPlannerBaseCfg):
"""Configuration for the locomotion velocity-tracking environment."""
# Scene settings
scene: TerrainSceneCfg = TerrainSceneCfg(num_envs=1, env_spacing=1.0)
# adjust image observations
observations: MatterportObservationsCfg = MatterportObservationsCfg()
def __post_init__(self):
"""Post initialization."""
super().__post_init__()
# adapt viewer
self.viewer.eye = (8.5, 3.0, 2.5)
self.viewer.lookat = (8.5, -4.0, 0.0)
| 6,213 | Python | 33.715084 | 134 | 0.6385 |
leggedrobotics/viplanner/omniverse/extension/omni.viplanner/omni/viplanner/config/__init__.py | # Copyright (c) 2023-2024, ETH Zurich (Robotics Systems Lab)
# Author: Pascal Roth
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from .carla_cfg import ViPlannerCarlaCfg
from .matterport_cfg import ViPlannerMatterportCfg
from .warehouse_cfg import ViPlannerWarehouseCfg
__all__ = [
"ViPlannerMatterportCfg",
"ViPlannerCarlaCfg",
"ViPlannerWarehouseCfg",
]
| 389 | Python | 23.374999 | 60 | 0.758355 |
leggedrobotics/viplanner/omniverse/extension/omni.viplanner/omni/viplanner/utils/viplanner_matterport_raycast_camera.py | # Copyright (c) 2023-2024, ETH Zurich (Robotics Systems Lab)
# Author: Pascal Roth
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
import torch
import yaml
from omni.isaac.matterport.domains import MatterportRayCasterCamera
from omni.isaac.orbit.sensors.ray_caster import RayCasterCameraCfg
from omni.isaac.orbit.utils.configclass import configclass
from omni.viplanner.viplanner import DATA_DIR
from viplanner.config.viplanner_sem_meta import VIPlannerSemMetaHandler
class VIPlannerMatterportRayCasterCamera(MatterportRayCasterCamera):
def __init__(self, cfg: object):
super().__init__(cfg)
def _color_mapping(self):
viplanner_sem = VIPlannerSemMetaHandler()
with open(DATA_DIR + "/mpcat40_to_vip_sem.yml") as file:
map_mpcat40_to_vip_sem = yaml.safe_load(file)
color = viplanner_sem.get_colors_for_names(list(map_mpcat40_to_vip_sem.values()))
self.color = torch.tensor(color, device=self._device, dtype=torch.uint8)
@configclass
class VIPlannerMatterportRayCasterCameraCfg(RayCasterCameraCfg):
class_type = VIPlannerMatterportRayCasterCamera
| 1,132 | Python | 34.406249 | 89 | 0.757951 |
leggedrobotics/viplanner/omniverse/extension/omni.viplanner/omni/viplanner/utils/unreal_importer.py | # Copyright (c) 2023-2024, ETH Zurich (Robotics Systems Lab)
# Author: Pascal Roth
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import annotations
import os
from typing import TYPE_CHECKING
import carb
import numpy as np
import omni
import omni.isaac.core.utils.prims as prim_utils
import omni.isaac.orbit.sim as sim_utils
import trimesh
import yaml
from omni.isaac.core.utils.semantics import add_update_semantics, remove_all_semantics
from omni.isaac.orbit.terrains import TerrainImporter
from omni.isaac.orbit.utils.assets import ISAAC_NUCLEUS_DIR
from omni.isaac.orbit.utils.warp import convert_to_warp_mesh
from pxr import Gf, Usd, UsdGeom
if TYPE_CHECKING:
from .unreal_importer_cfg import UnRealImporterCfg
class UnRealImporter(TerrainImporter):
"""
Default stairs environment for testing
"""
cfg: UnRealImporterCfg
def __init__(self, cfg: UnRealImporterCfg) -> None:
"""
:param
"""
super().__init__(cfg)
# modify mesh
if self.cfg.cw_config_file:
self._multiply_crosswalks()
if self.cfg.people_config_file:
self._insert_people()
if self.cfg.vehicle_config_file:
self._insert_vehicles()
# assign semantic labels
if self.cfg.sem_mesh_to_class_map:
self._add_semantics()
"""
Import Functions
"""
def import_usd(self, key: str, usd_path: str):
"""Import a mesh from a USD file.
USD file can contain arbitrary many meshes.
Note:
We do not apply any material properties to the mesh. The material properties should
be defined in the USD file.
Args:
key: The key to store the mesh.
usd_path: The path to the USD file.
Raises:
ValueError: If a terrain with the same key already exists.
"""
# add mesh to the dict
if key in self.meshes:
raise ValueError(f"Mesh with key {key} already exists. Existing keys: {self.meshes.keys()}.")
# add the prim path
cfg = sim_utils.UsdFileCfg(usd_path=usd_path)
if self.cfg.axis_up == "Y" or self.cfg.axis_up == "y":
cfg.func(self.cfg.prim_path + f"/{key}", cfg, orientation=(0.2759, 0.4469, 0.4469, 0.7240))
else:
cfg.func(self.cfg.prim_path + f"/{key}", cfg)
# assign each submesh it's own geometry prim --> important for raytracing to be able to identify the submesh
submeshes = self.get_mesh_prims(self.cfg.prim_path + f"/{key}")
# get material
# physics material
# material = PhysicsMaterial(
# "/World/PhysicsMaterial", static_friction=0.7, dynamic_friction=0.7, restitution=0
# )
for submesh, submesh_name in zip(submeshes[0], submeshes[1]):
# # create geometry prim
# GeometryPrim(
# prim_path=submesh.GetPath().pathString,
# name="collision",
# position=None,
# orientation=None,
# collision=True,
# ).apply_physics_material(material)
# physx_utils.setCollider(submesh, approximationShape="None")
# "None" will use the base triangle mesh if available
# cast into UsdGeomMesh
mesh_prim = UsdGeom.Mesh(submesh)
# store the mesh
vertices = np.asarray(mesh_prim.GetPointsAttr().Get())
faces = np.asarray(mesh_prim.GetFaceVertexIndicesAttr().Get())
# check if both faces and vertices are valid
if not vertices or not faces:
carb.log_warn(f"Mesh {submesh_name} has no faces or vertices.")
continue
faces = faces.reshape(-1, 3)
self.meshes[submesh_name] = trimesh.Trimesh(vertices=vertices, faces=faces)
# create a warp mesh
device = "cuda" if "cuda" in self.device else "cpu"
self.warp_meshes[submesh_name] = convert_to_warp_mesh(vertices, faces, device=device)
# add colliders and physics material
if self.cfg.groundplane:
ground_plane_cfg = sim_utils.GroundPlaneCfg(
physics_material=self.cfg.physics_material, size=(500, 500), visible=False
)
ground_plane = ground_plane_cfg.func("/World/GroundPlane", ground_plane_cfg, translation=(0, 0, -0.1))
ground_plane.visible = False
""" Assign Semantic Labels """
def _add_semantics(self):
# remove all previous semantic labels
remove_all_semantics(prim_utils.get_prim_at_path(self.cfg.prim_path + "/terrain"), recursive=True)
# get mesh prims
mesh_prims, mesh_prims_name = self.get_mesh_prims(self.cfg.prim_path + "/terrain")
carb.log_info(f"Total of {len(mesh_prims)} meshes in the scene, start assigning semantic class ...")
# mapping from prim name to class
with open(self.cfg.sem_mesh_to_class_map) as stream:
class_keywords = yaml.safe_load(stream)
# make all the string lower case
mesh_prims_name = [mesh_prim_single.lower() for mesh_prim_single in mesh_prims_name]
keywords_class_mapping_lower = {
key: [value_single.lower() for value_single in value] for key, value in class_keywords.items()
}
# assign class to mesh in ISAAC
def recursive_semUpdate(prim, sem_class_name: str, update_submesh: bool) -> bool:
# Necessary for Park Mesh
if (
prim.GetName() == "HierarchicalInstancedStaticMesh"
): # or "FoliageInstancedStaticMeshComponent" in prim.GetName():
add_update_semantics(prim, sem_class_name)
update_submesh = True
children = prim.GetChildren()
if len(children) > 0:
for child in children:
update_submesh = recursive_semUpdate(child, sem_class_name, update_submesh)
return update_submesh
def recursive_meshInvestigator(mesh_idx, mesh_name, mesh_prim_list) -> bool:
success = False
for class_name, keywords in keywords_class_mapping_lower.items():
if any([keyword in mesh_name for keyword in keywords]):
update_submesh = recursive_semUpdate(mesh_prim_list[mesh_idx], class_name, False)
if not update_submesh:
add_update_semantics(mesh_prim_list[mesh_idx], class_name)
success = True
break
if not success:
success_child = []
mesh_prims_children, mesh_prims_name_children = self.get_mesh_prims(
mesh_prim_list[mesh_idx].GetPrimPath().pathString
)
mesh_prims_name_children = [mesh_prim_single.lower() for mesh_prim_single in mesh_prims_name_children]
for mesh_idx_child, mesh_name_child in enumerate(mesh_prims_name_children):
success_child.append(
recursive_meshInvestigator(mesh_idx_child, mesh_name_child, mesh_prims_children)
)
success = any(success_child)
return success
mesh_list = []
for mesh_idx, mesh_name in enumerate(mesh_prims_name):
success = recursive_meshInvestigator(mesh_idx=mesh_idx, mesh_name=mesh_name, mesh_prim_list=mesh_prims)
if success:
mesh_list.append(mesh_idx)
missing = [i for x, y in zip(mesh_list, mesh_list[1:]) for i in range(x + 1, y) if y - x > 1]
assert len(mesh_list) > 0, "No mesh is assigned a semantic class!"
assert len(mesh_list) == len(
mesh_prims_name
), f"Not all meshes are assigned a semantic class! Following mesh names are included yet: {[mesh_prims_name[miss_idx] for miss_idx in missing]}"
carb.log_info("Semantic mapping done.")
return
""" Modify Mesh """
def _multiply_crosswalks(self) -> None:
"""Increase number of crosswalks in the scene."""
with open(self.cfg.cw_config_file) as stream:
multipy_cfg: dict = yaml.safe_load(stream)
# get the stage
stage = omni.usd.get_context().get_stage()
# get town prim
town_prim = multipy_cfg.pop("town_prim")
# init counter
crosswalk_add_counter = 0
for key, value in multipy_cfg.items():
print(f"Execute crosswalk multiplication '{key}'")
# iterate over the number of crosswalks to be created
for copy_idx in range(value["factor"]):
success = omni.usd.duplicate_prim(
stage=stage,
prim_path=os.path.join(self.cfg.prim_path + "/terrain", town_prim, value["cw_prim"]),
path_to=os.path.join(
self.cfg.prim_path + "/terrain",
town_prim,
value["cw_prim"] + f"_cp{copy_idx}" + value.get("suffix", ""),
),
duplicate_layers=True,
)
assert success, f"Failed to duplicate crosswalk '{key}'"
# get crosswalk prim
prim = prim_utils.get_prim_at_path(
os.path.join(
self.cfg.prim_path + "/terrain",
town_prim,
value["cw_prim"] + f"_cp{copy_idx}" + value.get("suffix", ""),
)
)
xform = UsdGeom.Mesh(prim).AddTranslateOp()
xform.Set(
Gf.Vec3d(value["translation"][0], value["translation"][1], value["translation"][2]) * (copy_idx + 1)
)
# update counter
crosswalk_add_counter += 1
carb.log_info(f"Number of crosswalks added: {crosswalk_add_counter}")
print(f"Number of crosswalks added: {crosswalk_add_counter}")
return
def _insert_vehicles(self):
# load vehicle config file
with open(self.cfg.vehicle_config_file) as stream:
vehicle_cfg: dict = yaml.safe_load(stream)
# get the stage
stage = omni.usd.get_context().get_stage()
# get town prim and all its meshes
town_prim = vehicle_cfg.pop("town_prim")
mesh_prims: dict = prim_utils.get_prim_at_path(f"{self.cfg.prim_path}/terrain/{town_prim}").GetChildren()
mesh_prims_name = [mesh_prim_single.GetName() for mesh_prim_single in mesh_prims]
# car counter
car_add_counter = 0
for key, vehicle in vehicle_cfg.items():
print(f"Execute vehicle multiplication '{key}'")
# get all meshs that include the keystring
meshs = [
mesh_prim_single for mesh_prim_single in mesh_prims_name if vehicle["prim_part"] in mesh_prim_single
]
# iterate over the number of vehicles to be created
for idx, translation in enumerate(vehicle["translation"]):
for single_mesh in meshs:
success = omni.usd.duplicate_prim(
stage=stage,
prim_path=os.path.join(self.cfg.prim_path + "/terrain", town_prim, single_mesh),
path_to=os.path.join(
self.cfg.prim_path + "/terrain", town_prim, single_mesh + key + f"_cp{idx}"
),
duplicate_layers=True,
)
assert success, f"Failed to duplicate vehicle '{key}'"
prim = prim_utils.get_prim_at_path(
os.path.join(self.cfg.prim_path + "/terrain", town_prim, single_mesh + key + f"_cp{idx}")
)
xform = UsdGeom.Mesh(prim).AddTranslateOp()
xform.Set(Gf.Vec3d(translation[0], translation[1], translation[2]))
car_add_counter += 1
carb.log_info(f"Number of vehicles added: {car_add_counter}")
print(f"Number of vehicles added: {car_add_counter}")
return
def _insert_people(self):
# load people config file
with open(self.cfg.people_config_file) as stream:
people_cfg: dict = yaml.safe_load(stream)
# if self.cfg.scale == 1.0:
# scale_people = 100
# else:
# scale_people = 1
for key, person_cfg in people_cfg.items():
carb.log_verbose(f"Insert person '{key}'")
self.insert_single_person(
person_cfg["prim_name"],
person_cfg["translation"],
scale_people=1,
usd_path=person_cfg.get("usd_path", "People/Characters/F_Business_02/F_Business_02.usd"),
)
# TODO: movement of the people
carb.log_info(f"Number of people added: {len(people_cfg)}")
print(f"Number of people added: {len(people_cfg)}")
return
@staticmethod
def insert_single_person(
prim_name: str,
translation: list,
scale_people: float = 1.0,
usd_path: str = "People/Characters/F_Business_02/F_Business_02.usd",
) -> None:
person_prim = prim_utils.create_prim(
prim_path=os.path.join("/World/People", prim_name),
translation=tuple(translation),
usd_path=os.path.join(ISAAC_NUCLEUS_DIR, usd_path),
scale=(scale_people, scale_people, scale_people),
)
if isinstance(person_prim.GetAttribute("xformOp:orient").Get(), Gf.Quatd):
person_prim.GetAttribute("xformOp:orient").Set(Gf.Quatd(1.0, 0.0, 0.0, 0.0))
else:
person_prim.GetAttribute("xformOp:orient").Set(Gf.Quatf(1.0, 0.0, 0.0, 0.0))
add_update_semantics(person_prim, "person")
# add collision body
UsdGeom.Mesh(person_prim)
return
@staticmethod
def get_mesh_prims(env_prim: str) -> tuple[list[Usd.Prim], list[str]]:
def recursive_search(start_prim: str, mesh_prims: list):
for curr_prim in prim_utils.get_prim_at_path(start_prim).GetChildren():
if curr_prim.GetTypeName() == "Xform" or curr_prim.GetTypeName() == "Mesh":
mesh_prims.append(curr_prim)
elif curr_prim.GetTypeName() == "Scope":
mesh_prims = recursive_search(start_prim=curr_prim.GetPath().pathString, mesh_prims=mesh_prims)
return mesh_prims
assert prim_utils.is_prim_path_valid(env_prim), f"Prim path '{env_prim}' is not valid"
mesh_prims = []
mesh_prims = recursive_search(env_prim, mesh_prims)
# mesh_prims: dict = prim_utils.get_prim_at_path(self.cfg.prim_path + "/" + self.cfg.usd_name.split(".")[0]).GetChildren()
mesh_prims_name = [mesh_prim_single.GetName() for mesh_prim_single in mesh_prims]
return mesh_prims, mesh_prims_name
| 15,107 | Python | 38.653543 | 152 | 0.571854 |
leggedrobotics/viplanner/omniverse/extension/omni.viplanner/omni/viplanner/utils/__init__.py | # Copyright (c) 2023-2024, ETH Zurich (Robotics Systems Lab)
# Author: Pascal Roth
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from .unreal_importer import UnRealImporter
from .unreal_importer_cfg import UnRealImporterCfg
from .viplanner_matterport_raycast_camera import (
VIPlannerMatterportRayCasterCamera,
VIPlannerMatterportRayCasterCameraCfg,
)
__all__ = [
"VIPlannerMatterportRayCasterCamera",
"VIPlannerMatterportRayCasterCameraCfg",
"UnRealImporter",
"UnRealImporterCfg",
]
| 529 | Python | 25.499999 | 60 | 0.773157 |
leggedrobotics/viplanner/omniverse/extension/omni.viplanner/omni/viplanner/utils/unreal_importer_cfg.py | # Copyright (c) 2023-2024, ETH Zurich (Robotics Systems Lab)
# Author: Pascal Roth
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from omni.isaac.orbit.terrains import TerrainImporterCfg
from omni.isaac.orbit.utils import configclass
from .unreal_importer import UnRealImporter
@configclass
class UnRealImporterCfg(TerrainImporterCfg):
class_type: type = UnRealImporter
"""The class name of the terrain importer."""
terrain_type = "usd"
"""The type of terrain to generate. Defaults to "usd".
"""
# scale
scale: float = 0.01 # 0.01 # carla: 0.01 nomoko: 1 park: 0.01 warehouse: 1.0 # scale the scene to be in meters
# up axis
axis_up: str = "Z" # carla, nomoko: "Y", park, warehouse: "Z"
# multiply crosswalks
cw_config_file: str | None = None
# mesh to semantic class mapping --> only if set, semantic classes will be added to the scene
sem_mesh_to_class_map: str | None = None # os.path.join(DATA_DIR, "park", "keyword_mapping.yml") os.path.join(DATA_DIR, "town01", "keyword_mapping.yml")
# add Groundplane to the scene
groundplane: bool = True
# add people to the scene
people_config_file: str | None = None
# multiply vehicles
vehicle_config_file: str | None = None
groundplane: bool = True
| 1,306 | Python | 32.51282 | 158 | 0.682236 |
leggedrobotics/viplanner/omniverse/extension/omni.viplanner/data/mpcat40_to_vip_sem.yml | # Copyright (c) 2023-2024, ETH Zurich (Robotics Systems Lab)
# Author: Pascal Roth
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
void: static
wall: wall
floor: floor
chair: furniture
door: door
table: furniture
picture: wall
cabinet: furniture
cushion: furniture
window: wall
sofa: furniture
bed: furniture
curtain: wall
chest_of_drawers: furniture
plant: vegetation
sink: furniture
stairs: stairs
ceiling: ceiling
toilet: furniture
stool: furniture
towel: indoor_soft
mirror: wall
tv_monitor: wall
shower: furniture
column: wall
bathtub: furniture
counter: furniture
fireplace: furniture
lighting: static
beam: furniture
railing: wall
shelving: wall
blinds: wall
gym_equipment: furniture
seating: furniture
board_panel: wall
furniture: furniture
appliances: dynamic
clothes: indoor_soft
objects: static
misc: static
unlabeled: static
| 1,286 | YAML | 25.265306 | 60 | 0.530327 |
leggedrobotics/viplanner/omniverse/extension/omni.viplanner/data/town02/cw_multiply_cfg.yml | # Copyright (c) 2023-2024, ETH Zurich (Robotics Systems Lab)
# Author: Pascal Roth
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
# Definition of which crosswalks should be repeated how often along which axis
# Adjusted for: TOWN02
# each entry has the following format:
# name:
# cw_prim: [str] prim of the crosswalk in the loaded town file
# factor: [int] number how often the crosswalk should be repeated
# translation: [float, float] vector along which the crosswalk should be repeated, defines the position of the first
# repeated crosswalk, every following crosswalk will be placed at the position of the
# previous one plus the translation vector
# suffix: [str] optional, str will be added to the copied prim of the new crosswalk
# NOTE: rotations and scales applied to the mesh are not applied to the translations given here, i.e. they have to be
# in the original dataformat of the town file, i.e. y-up and in cm
town_prim: "Town02"
cw_2:
cw_prim: "Road_Crosswalk_Town02_8"
factor: 4
translation: [+1500, 0, 0]
cw_3:
cw_prim: "Road_Crosswalk_Town02_10"
factor: 2
translation: [-1500, 0, 0]
cw_4:
cw_prim: "Road_Crosswalk_Town02_9"
factor: 4
translation: [+1500, 0, 0]
suffix: "_neg"
cw_5:
cw_prim: "Road_Crosswalk_Town02_11"
factor: 4
translation: [1500, 0, 0]
cw_6_pos:
cw_prim: "Road_Crosswalk_Town02_12"
factor: 1
translation: [0, 0, 1500]
cw_6_neg:
cw_prim: "Road_Crosswalk_Town02_12"
factor: 2
translation: [0, 0, -1500]
cw_7_neg:
cw_prim: "Road_Crosswalk_Town02_7"
factor: 1
translation: [-1500, 0, 0]
cw_7_pos:
cw_prim: "Road_Crosswalk_Town02_7"
factor: 1
translation: [1500, 0, 0]
cw_8:
cw_prim: "Road_Crosswalk_Town02_4"
factor: 2
translation: [1500, 0, 0]
cw_9:
cw_prim: "Road_Crosswalk_Town02_3"
factor: 4
translation: [1500, 0, 0]
cw_10:
cw_prim: "Road_Crosswalk_Town02_6"
factor: 2
translation: [-1500, 0, 0]
cw_11_neg:
cw_prim: "Road_Crosswalk_Town02_1"
factor: 4
translation: [-1500, 0, 0]
cw_11_pos:
cw_prim: "Road_Crosswalk_Town02_1"
factor: 2
translation: [+1500, 0, 0]
cw_12:
cw_prim: "Road_Crosswalk_Town02_2"
factor: 4
translation: [-1500, 0, 0]
cw_13:
cw_prim: "Road_Crosswalk_Town02_13"
factor: 2
translation: [0, 0, +1500]
cw_14_pos:
cw_prim: "Road_Crosswalk_Town02_15"
factor: 2
translation: [0, 0, +1500]
cw_14_neg:
cw_prim: "Road_Crosswalk_Town02_15"
factor: 1
translation: [0, 0, -1500]
cw_15:
cw_prim: "Road_Crosswalk_Town02_16"
factor: 2
translation: [0, 0, -1500]
cw_16_neg:
cw_prim: "Road_Crosswalk_Town02_17"
factor: 2
translation: [0, 0, -1500]
cw_16_pos:
cw_prim: "Road_Crosswalk_Town02_17"
factor: 4
translation: [0, 0, +1500]
cw_17_neg:
cw_prim: "Road_Crosswalk_Town02_19"
factor: 4
translation: [0, 0, -1500]
cw_17_pos:
cw_prim: "Road_Crosswalk_Town02_19"
factor: 1
translation: [0, 0, +1500]
cw_18:
cw_prim: "Road_Crosswalk_Town02_20"
factor: 3
translation: [0, 0, +1500]
# EoF
| 3,140 | YAML | 21.276596 | 120 | 0.646178 |
leggedrobotics/viplanner/omniverse/extension/omni.viplanner/data/town02/vehicle_cfg.yml | # Copyright (c) 2023-2024, ETH Zurich (Robotics Systems Lab)
# Author: Pascal Roth
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
# Definition of where additional vehicles should be added
# Adjusted for: TOWN02
# each entry has the following format:
# name:
# prim_part: [str] part of the prim of the vehicle that should be multiplied (every prim containing this string will be multiplied)
# translation: [[float, float, float]] list of translations of the vehicle
# NOTE: rotations and scales applied to the mesh are not applied to the translations given here, i.e. they have to be
# in the original dataformat of the town file, i.e. y-up and in cm
# NOTE: for Town02, take "Vh_Car_SeatLeon_54" for vehicles along the x axis
town_prim: "Town02"
vehicle_1:
prim_part: "Vh_Car_SeatLeon_54"
translation:
# horizontal road low
- [3900, 0, 600]
- [3900, 0, 3000]
- [3900, 0, 3500]
- [3900, 0, 4000]
- [3900, 0, 6000]
- [3900, 0, -1500]
- [3900, 0, -4000]
- [3900, 0, -7500]
- [3900, 0, -8000]
- [3500, 0, -10000]
- [3500, 0, -7500]
- [3500, 0, -3000]
- [3500, 0, 1000]
- [3500, 0, 5000]
# horizontal road middle
- [-10800, 0, 1000]
- [-10800, 0, 5000]
- [-10800, 0, -2500]
# horizontal road high
- [-15800, 0, 2000]
- [-15800, 0, 4700]
- [-16200, 0, 3400]
- [-16200, 0, 0]
- [-16200, 0, -3000]
- [-16200, 0, -6000]
- [-16200, 0, -9000]
# EoF
| 1,585 | YAML | 28.37037 | 160 | 0.567192 |
leggedrobotics/viplanner/omniverse/extension/omni.viplanner/data/town02/keyword_mapping.yml | # Copyright (c) 2023-2024, ETH Zurich (Robotics Systems Lab)
# Author: Pascal Roth
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
road:
- Road_Road
- Road_Marking
- ManholeCover
- roadunique
sidewalk:
- Road_Sidewalk
- SideWalkCube
- Road_Grass # pedestrian terrain (between building, squares, ...)
crosswalk:
- Road_Crosswalk
floor:
- Pathwalk # way to the door of a building
- PathWay # wat to the door of a building
- curb
- iron_plank
- Cube
- Floor
vehicle:
- Van
- Vehicle
- Car
building:
- NewBlueprint # roofs, windows, other parts of buildings
- CityBuilding
- Suburb
- House
- MergingBuilding
- BuildingWall
- garage
- airConditioner
- Office
- Block
- Apartment
- ConstructBuilding
- snacksStand
- doghouse
- streetCounter
- fountain
- container
- pergola
- GuardShelter
- atm
- awning
- bus_stop
- NewsStand
- ironplank
- kiosk
- TownHall
wall:
- GardenWall
- Wall
- RepSpline # fences or walls to limit residential areas
- RepeatedMeshesAlongSpline # should make the spline go around the building --> not working in isaac
fence:
- urbanFence
- chain_barrier
- picketFence
- fence
pole:
- bollard
- Lamppost
- Parklight
- CityLamp
- Traffic_Light_Base
- ElectricPole
- PoleCylinder
traffic_sign:
- streetBillboard
- RoundSign
- roadsigns
traffic_light:
- TLights
- TL_BotCover
- SM_Charger
- SM_FreewayLights
bench:
- bench
vegetation:
- tree
- Stone
- Cypress
- PlantPot
- TreePot
- Maple
- Beech
- FanPalm
- Sassafras
- Pine_Bush
- Hedge
- Bush
- palm
- acer
- plant_pit
- arbusto_pine
terrain:
- dirtDebris # roughness in the terrain, street or sidewalk (traversable but more difficult)
- GrassLeaf
- Grass
- LandscapeComponent
- Ash
water_surface:
- TileLake
sky:
- terrain2
- sky
dynamic:
- Trashbag
- advertise
- creased_box
- garbage
- trashcan
- clothes_line
- barbecue
- ConstructionCone
- box
- droppingasset
- barrel
static:
- firehydrant
- Gnome
- metroMap
- Bikeparking
- StaticMesh # gate barrier
- trampoline
- wheelbarrow
- NewspaperBox
- swing
- bin
- big_plane
- plane
- slide
- instancedfoliageactor
- roadbillboard
- prophitreacting_child # vending machines
- prop_wateringcan
furniture:
- Campingtable
- swingcouch
- table
- chair
| 2,436 | YAML | 15.466216 | 103 | 0.665435 |
leggedrobotics/viplanner/omniverse/extension/omni.viplanner/data/town01/cw_multiply_cfg.yml | # Copyright (c) 2023-2024, ETH Zurich (Robotics Systems Lab)
# Author: Pascal Roth
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
# Definition of which crosswalks should be repeated how often along which axis
# Adjusted for: TOWN01
# each entry has the following format:
# name:
# cw_prim: [str] prim of the crosswalk in the loaded town file
# factor: [int] number how often the crosswalk should be repeated
# translation: [float, float] vector along which the crosswalk should be repeated, defines the position of the first
# repeated crosswalk, every following crosswalk will be placed at the position of the
# previous one plus the translation vector
# suffix: [str] optional, str will be added to the copied prim of the new crosswalk
# NOTE: rotations and scales applied to the mesh are not applied to the translations given here, i.e. they have to be
# in the original dataformat of the town file, i.e. y-up and in cm
town_prim: "Town01_Opt/Town01_Opt"
cw_2:
cw_prim: "Road_Crosswalk_Town01_2"
factor: 2
translation: [0, 0, -1500]
cw_3_pos:
cw_prim: "Road_Crosswalk_Town01_3"
factor: 6
translation: [1500, 0, 0]
cw_3_neg:
cw_prim: "Road_Crosswalk_Town01_3"
factor: 1
translation: [-1500, 0, 0]
suffix: "_neg"
cw_4:
cw_prim: "Road_Crosswalk_Town01_4"
factor: 1
translation: [1500, 0, 0]
cw_5:
cw_prim: "Road_Crosswalk_Town01_5"
factor: 3
translation: [1500, 0, 0]
cw_6:
cw_prim: "Road_Crosswalk_Town01_6"
factor: 3
translation: [0, 0, -1500]
cw_9:
cw_prim: "Road_Crosswalk_Town01_9"
factor: 2
translation: [0, 0, -1500]
cw_10:
cw_prim: "Road_Crosswalk_Town01_10"
factor: 1
translation: [0, 0, 1500]
cw_11:
cw_prim: "Road_Crosswalk_Town01_11"
factor: 1
translation: [0, 0, 1500]
cw_14:
cw_prim: "Road_Crosswalk_Town01_14"
factor: 1
translation: [0, 0, 1500]
cw_15:
cw_prim: "Road_Crosswalk_Town01_15"
factor: 2
translation: [0, 0, -1500]
cw_18:
cw_prim: "Road_Crosswalk_Town01_18"
factor: 5
translation: [1500, 0, 0]
cw_19:
cw_prim: "Road_Crosswalk_Town01_19"
factor: 2
translation: [1500, 0, 0]
cw_21:
cw_prim: "Road_Crosswalk_Town01_21"
factor: 3
translation: [1500, 0, 0]
cw_22:
cw_prim: "Road_Crosswalk_Town01_22"
factor: 5
translation: [1500, 0, 0]
cw_24:
cw_prim: "Road_Crosswalk_Town01_24"
factor: 3
translation: [-1500, 0, 0]
cw_26_pos:
cw_prim: "Road_Crosswalk_Town01_26"
factor: 5
translation: [1500, 0, 0]
cw_26_neg:
cw_prim: "Road_Crosswalk_Town01_26"
factor: 3
translation: [-1500, 0, 0]
suffix: "_neg"
cw_28:
cw_prim: "Road_Crosswalk_Town01_28"
factor: 4
translation: [0, 0, 1500]
cw_29:
cw_prim: "Road_Crosswalk_Town01_29"
factor: 4
translation: [0, 0, 1500]
cw_30:
cw_prim: "Road_Crosswalk_Town01_30"
factor: 4
translation: [0, 0, 1500]
cw_31:
cw_prim: "Road_Crosswalk_Town01_31"
factor: 2
translation: [0, 0, 1500]
cw_32:
cw_prim: "Road_Crosswalk_Town01_32"
factor: 6
translation: [0, 0, -1500]
cw_33_pos:
cw_prim: "Road_Crosswalk_Town01_33"
factor: 4
translation: [1500, 0, 0]
cw_33_neg:
cw_prim: "Road_Crosswalk_Town01_33"
factor: 3
translation: [-2500, 0, 0]
suffix: "_neg"
cw_34:
cw_prim: "Road_Crosswalk_Town01_34"
factor: 7
translation: [1500, 0, 0]
cw_35:
cw_prim: "Road_Crosswalk_Town01_35"
factor: 1
translation: [1500, 0, 0]
cw_36_pos:
cw_prim: "Road_Crosswalk_Town01_36"
factor: 1
translation: [0, 0, 1500]
cw_36_neg:
cw_prim: "Road_Crosswalk_Town01_36"
factor: 5
translation: [0, 0, -1500]
suffix: "_neg"
cw_40:
cw_prim: "Road_Crosswalk_Town01_40"
factor: 4
translation: [1500, 0, 0]
# EoF
| 3,790 | YAML | 20.178771 | 120 | 0.645646 |
leggedrobotics/viplanner/omniverse/extension/omni.viplanner/data/town01/vehicle_cfg.yml | # Copyright (c) 2023-2024, ETH Zurich (Robotics Systems Lab)
# Author: Pascal Roth
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
# Definition of where additional vehicles should be added
# Adjusted for: TOWN01
# each entry has the following format:
# name:
# prim_part: [str] part of the prim of the vehicle that should be multiplied (every prim containing this string will be multiplied)
# translation: [[float, float, float]] list of translations of the vehicle
# NOTE: rotations and scales applied to the mesh are not applied to the translations given here, i.e. they have to be
# in the original dataformat of the town file, i.e. y-up and in cm
# NOTE: for Town01, take "ChevroletImpala_High_V4" for vehicles along the x axis and "JeepWranglerRubicon_36"
# for vehicles along the y axis
town_prim: "Town01_Opt"
vehicle_1:
prim_part: "ChevroletImpala_High_V4"
translation:
- [-15300, 0, -4000]
- [-15300, 0, 0]
- [-15300, 0, 15000]
- [-15600, 0, 21000]
- [9000, 0, 20500]
- [9400, 0, 15000]
- [9400, 0, 9000]
- [9400, 0, 7000]
- [9000, 0, 6000]
- [9000, 0, 500]
- [9000, 0, -4000]
vehicle_2:
prim_part: "JeepWranglerRubicon_36"
translation:
- [0, 0, -1500]
- [3500, 0, -1500]
- [5300, 0, -1900]
- [9000, 0, -1900]
- [16500, 0, -1500]
- [22500, 0, -1900]
- [25000, 0, 3800]
- [20000, 0, 4200]
- [17000, 0, 4200]
- [12000, 0, 3800]
- [7000, 0, 3800]
- [7000, 0, 11100]
- [11000, 0, 11500]
- [16000, 0, 11100]
- [20000, 0, 11100]
- [26000, 0, 11500]
- [26000, 0, 17800]
- [23000, 0, 18200]
- [18000, 0, 18200]
- [14000, 0, 17800]
- [13500, 0, 18200]
- [10000, 0, 18200]
- [9500, 0, 17800]
- [4000, 0, 17800]
- [2000, 0, 30800]
- [-1000, 0, 31300]
- [6000, 0, 31300]
- [12000, 0, 30800]
- [15000, 0, 30800]
- [15600, 0, 30800]
- [16400, 0, 30800]
- [21000, 0, 31300]
- [25000, 0, 31300]
# EoF
| 2,053 | YAML | 26.026315 | 160 | 0.571359 |
leggedrobotics/viplanner/omniverse/extension/omni.viplanner/data/town01/area_filter_cfg.yaml | # Definition of which areas should not be explored and used to sample points
# Adjusted for: TOWN01
# each entry has the following format:
# name:
# x_low: [float] low number of the x axis
# x_high: [float] high number of the x axis
# y_low: [float] low number of the y axis
# y_high: [float] high number of the y axis
area_1:
x_low: 208.9
x_high: 317.8
y_low: 100.5
y_high: 325.5
area_2:
x_low: 190.3
x_high: 315.8
y_low: 12.7
y_high: 80.6
area_3:
x_low: 123.56
x_high: 139.37
y_low: 10
y_high: 80.0
| 601 | YAML | 20.499999 | 76 | 0.570715 |
leggedrobotics/viplanner/omniverse/extension/omni.viplanner/data/town01/keyword_mapping.yml | # Copyright (c) 2023-2024, ETH Zurich (Robotics Systems Lab)
# Author: Pascal Roth
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
road:
- Road_Road
- Road_Marking
- ManholeCover
- roadunique
sidewalk:
- Road_Sidewalk
- SideWalkCube
- Road_Grass # pedestrian terrain (between building, squares, ...)
crosswalk:
- Road_Crosswalk
floor:
- Pathwalk # way to the door of a building
- PathWay # wat to the door of a building
- curb
- iron_plank
- Cube
vehicle:
- Van
- Vehicle
- Car
building:
- NewBlueprint # roofs, windows, other parts of buildings
- CityBuilding
- Suburb
- House
- MergingBuilding
- BuildingWall
- garage
- airConditioner
- Office
- Block
- Apartment
- ConstructBuilding
- snacksStand
- doghouse
- streetCounter
- fountain
- container
- pergola
- GuardShelter
- atm
- awning
- bus_stop
- NewsStand
- ironplank
- kiosk
wall:
- GardenWall
- Wall
- RepSpline # fences or walls to limit residential areas
- RepeatedMeshesAlongSpline # should make the spline go around the building --> not working in isaac
fence:
- urbanFence
- chain_barrier
- picketFence
- fence
pole:
- bollard
- Lamppost
- Parklight
- CityLamp
- Traffic_Light_Base
traffic_sign:
- streetBillboard
- RoundSign
- roadsigns
traffic_light:
- TLights
- TL_BotCover
bench:
- bench
vegetation:
- tree
- Stone
- Cypress
- PlantPot
- TreePot
- Maple
- Beech
- FanPalm
- Sassafras
- Pine_Bush
- Hedge
- Bush
- palm
- acer
terrain:
- dirtDebris # roughness in the terrain, street or sidewalk (traversable but more difficult)
- GrassLeaf
- Grass
- LandscapeComponent
- Ash
water_surface:
- TileLake
sky:
- terrain2
- sky
dynamic:
- Trashbag
- advertise
- creased_box
- garbage
- trashcan
- clothes_line
- barbecue
- ConstructionCone
- box
- droppingasset
- barrel
static:
- firehydrant
- Gnome
- metroMap
- Bikeparking
- StaticMesh # gate barrier
- trampoline
- wheelbarrow
- NewspaperBox
- swing
- bin
- big_plane
- slide
- instancedfoliageactor
- roadbillboard
- prophitreacting_child # vending machines
furniture:
- Campingtable
- swingcouch
- table
- chair
| 2,281 | YAML | 15.536232 | 103 | 0.666813 |
leggedrobotics/viplanner/omniverse/extension/omni.viplanner/data/warehouse/people_cfg.yml | # Copyright (c) 2023-2024, ETH Zurich (Robotics Systems Lab)
# Author: Pascal Roth
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
person_1:
prim_name: "Person_1"
translation: [4.23985, -2.42198, 0.0]
target: [0, 0, 0]
usd_path: People/Characters/male_adult_construction_01_new/male_adult_construction_01_new.usd
person_2:
prim_name: "Person_2"
translation: [2.51653, 7.80822, 0.0]
target: [0, 0, 0]
usd_path: People/Characters/male_adult_construction_03/male_adult_construction_03.usd
person_3:
prim_name: "Person_3"
translation: [5.07179, 3.8561, 0.0]
target: [0, 0, 0]
usd_path: People/Characters/male_adult_construction_05_new/male_adult_construction_05_new.usd
person_4:
prim_name: "Person_4"
translation: [-3.2015, 11.79695, 0.0]
target: [0, 0, 0]
usd_path: People/Characters/original_male_adult_construction_01/male_adult_construction_01.usd
person_5:
prim_name: "Person_5"
translation: [-6.70566, 7.58019, 0.0]
target: [0, 0, 0]
usd_path: People/Characters/original_male_adult_construction_02/male_adult_construction_02.usd
person_6:
prim_name: "Person_6"
translation: [-5.12784, 2.43409, 0.0]
target: [0, 0, 0]
usd_path: People/Characters/original_male_adult_construction_05/male_adult_construction_05.usd
person_7:
prim_name: "Person_7"
translation: [-6.98476, -9.47249, 0.0]
target: [0, 0, 0]
usd_path: People/Characters/male_adult_construction_01_new/male_adult_construction_01_new.usd
person_8:
prim_name: "Person_8"
translation: [-1.63744, -3.43285, 0.0]
target: [0, 0, 0]
usd_path: People/Characters/male_adult_construction_01_new/male_adult_construction_01_new.usd
person_9:
prim_name: "Person_9"
translation: [6.15617, -8.3114, 0.0]
target: [0, 0, 0]
usd_path: People/Characters/original_male_adult_construction_05/male_adult_construction_05.usd
person_10:
prim_name: "Person_10"
translation: [5.34416, -7.47814, 0.0]
target: [0, 0, 0]
usd_path: People/Characters/male_adult_construction_05_new/male_adult_construction_05_new.usd
| 2,054 | YAML | 30.136363 | 96 | 0.706426 |
leggedrobotics/viplanner/omniverse/extension/omni.viplanner/data/warehouse/keyword_mapping.yml | # Copyright (c) 2023-2024, ETH Zurich (Robotics Systems Lab)
# Author: Pascal Roth
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
floor:
- SM_Floor1
- SM_Floor2
- SM_Floor3
- SM_Floor4
- SM_Floor5
- SM_Floor6
- groundplane
wall:
- FuseBox
- SM_PillarA
- SM_Sign
- SM_Wall
- S_Barcode
bench:
- Bench
ceiling:
- SM_Ceiling
- PillarPartA
- SM_Beam
- SM_Bracket
static:
- LampCeiling
- SM_FloorDecal
- SM_FireExtinguisher
furniture:
- SM_Rack
- SM_SignCVer
- S_AisleSign
- SM_Palette
- SM_CardBox
- SmallKLT
- SM_PushCarta
- SM_CratePlastic
| 617 | YAML | 12.434782 | 60 | 0.645057 |
leggedrobotics/viplanner/viplanner/depth_reconstruct.py | # Copyright (c) 2023-2024, ETH Zurich (Robotics Systems Lab)
# Author: Pascal Roth
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
# python
import os
import cv2
import numpy as np
import open3d as o3d
import scipy.spatial.transform as tf
from tqdm import tqdm
# imperative-cost-map
from viplanner.config import ReconstructionCfg, VIPlannerSemMetaHandler
class DepthReconstruction:
"""
Reconstruct 3D Map with depth images, assumes the ground truth camera odom is known
Config parameters can be set in ReconstructionCfg
Expects following datastructure:
- env_name
- camera_extrinsic.txt (format: x y z qx qy qz qw)
- intrinsics.txt (expects ROS CameraInfo format --> P-Matrix)
- depth (either png and/ or npy)
- xxxx.png (images should be named with 4 digits, e.g. 0000.png, 0001.png, etc.)
- xxxx.npy (arrays should be named with 4 digits, e.g. 0000.npy, 0001.npy, etc.)
- semantics (optional)
- xxxx.png (images should be named with 4 digits, e.g. 0000.png, 0001.png, etc., RGB images)
when both depth and semantic images are available, then define sem_suffic and depth_suffix in ReconstructionCfg to differentiate between the two with the following structure:
- env_name
- camera_extrinsic{depth_suffix}.txt (format: x y z qx qy qz qw)
- camera_extrinsic{sem_suffix}.txt (format: x y z qx qy qz qw)
- intrinsics.txt (expects ROS CameraInfo format --> P-Matrix) (contains both intrinsics for depth and semantic images)
- depth (either png and/ or npy)
- xxxx{depth_suffix}.png (images should be named with 4 digits, e.g. 0000.png, 0001.png, etc.)
- xxxx{depth_suffix}.npy (arrays should be named with 4 digits, e.g. 0000.npy, 0001.npy, etc.)
- semantics (optional)
- xxxx{sem_suffix}.png (images should be named with 4 digits, e.g. 0000.png, 0001.png, etc., RGB Images)
in the case of high resolution depth images for the reconstruction, the following additional directory is expected:
- depth_high_res (either png and/ or npy)
- xxxx{depth_suffix}.png (images should be named with 4 digits, e.g. 0000.png, 0001.png, etc.)
- xxxx{depth_suffix}.npy (arrays should be named with 4 digits, e.g. 0000.npy, 0001.npy, etc.)
"""
debug = False
def __init__(self, cfg: ReconstructionCfg):
# get config
self._cfg: ReconstructionCfg = cfg
# read camera params and odom
self.K_depth: np.ndarray = None
self.K_sem: np.ndarray = None
self._read_intrinsic()
self.extrinsics_depth: np.ndarray = None
self.extrinsics_sem: np.ndarray = None
self._read_extrinsic()
# semantic classes for viplanner
self.sem_handler = VIPlannerSemMetaHandler()
# control flag if point-cloud has been loaded
self._is_constructed = False
# variables
self._pcd: o3d.geometry.PointCloud = None
print("Ready to read depth data.")
# public methods
def depth_reconstruction(self):
# identify start and end image idx for the reconstruction
N = len(self.extrinsics_depth)
if self._cfg.max_images:
self._start_idx = self._cfg.start_idx
self._end_idx = min(self._cfg.start_idx + self._cfg.max_images, N)
else:
self._start_idx = 0
self._end_idx = N
if self._cfg.point_cloud_batch_size > self._end_idx - self._start_idx:
print(
"[WARNING] batch size must be smaller or equal than number of"
" images to reconstruct, now set to max value"
f" {self._end_idx - self._start_idx}"
)
self._cfg.point_cloud_batch_size = self._end_idx - self._start_idx
print("total number of images for reconstruction:" f" {int(self._end_idx - self._start_idx)}")
# get pixel tensor for reprojection
pixels = self._computePixelTensor()
# init point-cloud
self._pcd = o3d.geometry.PointCloud() # point size (n, 3)
first_batch = True
# init lists
points_all = []
if self._cfg.semantics:
sem_map_all = []
for img_counter, img_idx in enumerate(
tqdm(
range(self._end_idx - self._start_idx),
desc="Reconstructing 3D Points",
)
):
im = self._load_depth_image(img_idx)
extrinsics = self.extrinsics_depth[img_idx + self._start_idx]
# project points in world frame
rot = tf.Rotation.from_quat(extrinsics[3:]).as_matrix()
points = im.reshape(-1, 1) * (rot @ pixels.T).T
# filter points with 0 depth --> otherwise obstacles at camera position
non_zero_idx = np.where(points.any(axis=1))[0]
points_final = points[non_zero_idx] + extrinsics[:3]
if self._cfg.semantics and self._cfg.high_res_depth:
img_path = os.path.join(
self._cfg.get_data_path(),
"semantics",
str(self._start_idx + img_idx).zfill(4) + self._cfg.sem_suffix + ".png",
)
sem_image = cv2.imread(img_path) # load in BGR format
sem_image = cv2.cvtColor(sem_image, cv2.COLOR_BGR2RGB)
sem_points = sem_image.reshape(-1, 3)[non_zero_idx]
points_all.append(points_final)
sem_map_all.append(sem_points)
elif self._cfg.semantics:
sem_annotation, filter_idx = self._get_semantic_image(points_final, img_idx)
points_all.append(points_final[filter_idx])
sem_map_all.append(sem_annotation)
else:
points_all.append(points_final)
# update point cloud
if img_counter % self._cfg.point_cloud_batch_size == 0:
print("updating open3d geometry point cloud with" f" {self._cfg.point_cloud_batch_size} images ...")
if first_batch:
self._pcd.points = o3d.utility.Vector3dVector(np.vstack(points_all))
if self._cfg.semantics:
self._pcd.colors = o3d.utility.Vector3dVector(np.vstack(sem_map_all) / 255.0)
first_batch = False
else:
self._pcd.points.extend(np.vstack(points_all))
if self._cfg.semantics:
self._pcd.colors.extend(np.vstack(sem_map_all) / 255.0)
# reset buffer lists
del points_all
points_all = []
if self._cfg.semantics:
del sem_map_all
sem_map_all = []
# apply downsampling
print("downsampling point cloud with voxel size" f" {self._cfg.voxel_size} ...")
self._pcd = self._pcd.voxel_down_sample(self._cfg.voxel_size)
# add last batch
if len(points_all) > 0:
print("updating open3d geometry point cloud with last images ...")
self._pcd.points.extend(np.vstack(points_all))
points_all = None
if self._cfg.semantics:
self._pcd.colors.extend(np.vstack(sem_map_all) / 255.0)
sem_map_all = None
# apply downsampling
print(f"downsampling point cloud with voxel size {self._cfg.voxel_size} ...")
self._pcd = self._pcd.voxel_down_sample(self._cfg.voxel_size)
# update flag
self._is_constructed = True
print("construction completed.")
return
def show_pcd(self):
if not self._is_constructed:
print("no reconstructed cloud")
return
origin = o3d.geometry.TriangleMesh.create_coordinate_frame(
size=1.0, origin=np.min(np.asarray(self._pcd.points), axis=0)
)
o3d.visualization.draw_geometries([self._pcd, origin], mesh_show_wireframe=True) # visualize point cloud
return
def save_pcd(self):
if not self._is_constructed:
print("save points failed, no reconstructed cloud!")
print("save output files to: " + os.path.join(self._cfg.data_dir, self._cfg.env))
# pre-create the folder for the mapping
os.makedirs(
os.path.join(
os.path.join(self._cfg.data_dir, self._cfg.env),
"maps",
"cloud",
),
exist_ok=True,
)
os.makedirs(
os.path.join(os.path.join(self._cfg.data_dir, self._cfg.env), "maps", "data"),
exist_ok=True,
)
os.makedirs(
os.path.join(
os.path.join(self._cfg.data_dir, self._cfg.env),
"maps",
"params",
),
exist_ok=True,
)
# save clouds
o3d.io.write_point_cloud(
os.path.join(self._cfg.data_dir, self._cfg.env, "cloud.ply"),
self._pcd,
) # save point cloud
print("saved point cloud to ply file.")
@property
def pcd(self):
return self._pcd
"""helper functions"""
def _read_extrinsic(self) -> None:
if self._cfg.semantics:
extrinsic_path = os.path.join(
self._cfg.get_data_path(),
"camera_extrinsic" + self._cfg.sem_suffix + ".txt",
)
self.extrinsics_sem = np.loadtxt(extrinsic_path, delimiter=",")
if self._cfg.high_res_depth:
assert self._cfg.semantics, (
"high res depth requires semantic images since depth should be" " recorded with semantic camera"
)
self.extrinsics_depth = self.extrinsics_sem
else:
extrinsic_path = os.path.join(
self._cfg.get_data_path(),
"camera_extrinsic" + self._cfg.depth_suffix + ".txt",
)
self.extrinsics_depth = np.loadtxt(extrinsic_path, delimiter=",")
return
def _read_intrinsic(self) -> None:
intrinsic_path = os.path.join(self._cfg.get_data_path(), "intrinsics.txt")
P = np.loadtxt(intrinsic_path, delimiter=",") # assumes ROS P matrix
self._intrinsic = list(P)
if self._cfg.semantics:
self.K_depth = P[0].reshape(3, 4)[:3, :3]
self.K_sem = P[1].reshape(3, 4)[:3, :3]
else:
self.K_depth = P.reshape(3, 4)[:3, :3]
if self._cfg.high_res_depth:
self.K_depth = self.K_sem
return
def _load_depth_image(self, idx: int) -> np.ndarray:
# get path to images
if self._cfg.high_res_depth:
dir_path = os.path.join(self._cfg.get_data_path(), "depth_high_res")
else:
dir_path = os.path.join(self._cfg.get_data_path(), "depth")
if os.path.isfile(
os.path.join(
dir_path,
str(idx + self._start_idx).zfill(4) + self._cfg.depth_suffix + ".npy",
)
):
img_array = (
np.load(
os.path.join(
dir_path,
str(idx + self._start_idx).zfill(4) + self._cfg.depth_suffix + ".npy",
)
)
/ self._cfg.depth_scale
)
else:
img_path = os.path.join(
dir_path,
str(idx + self._start_idx).zfill(4) + self._cfg.depth_suffix + ".png",
)
img_array = cv2.imread(img_path, cv2.IMREAD_ANYDEPTH) / self._cfg.depth_scale
img_array[~np.isfinite(img_array)] = 0
return img_array
def _computePixelTensor(self):
depth_img = self._load_depth_image(0)
# get image plane mesh grid
pix_u = np.arange(0, depth_img.shape[1])
pix_v = np.arange(0, depth_img.shape[0])
grid = np.meshgrid(pix_u, pix_v)
pixels = np.vstack(list(map(np.ravel, grid))).T
pixels = np.hstack([pixels, np.ones((len(pixels), 1))]) # add ones for 3D coordinates
# transform to camera frame
k_inv = np.linalg.inv(self.K_depth)
pix_cam_frame = np.matmul(k_inv, pixels.T)
# reorder to be in "robotics" axis order (x forward, y left, z up)
return pix_cam_frame[[2, 0, 1], :].T * np.array([1, -1, -1])
def _get_semantic_image(self, points, idx):
# load semantic image and pose
img_path = os.path.join(
self._cfg.get_data_path(),
"semantics",
str(self._start_idx + idx).zfill(4) + self._cfg.sem_suffix + ".png",
)
sem_image = cv2.imread(img_path) # loads in bgr order
sem_image = cv2.cvtColor(sem_image, cv2.COLOR_BGR2RGB)
pose_sem = self.extrinsics_sem[idx + self._cfg.start_idx]
# transform points to semantic camera frame
points_sem_cam_frame = (tf.Rotation.from_quat(pose_sem[3:]).as_matrix().T @ (points - pose_sem[:3]).T).T
# normalize points
points_sem_cam_frame_norm = points_sem_cam_frame / points_sem_cam_frame[:, 0][:, np.newaxis]
# reorder points be camera convention (z-forward)
points_sem_cam_frame_norm = points_sem_cam_frame_norm[:, [1, 2, 0]] * np.array([-1, -1, 1])
# transform points to pixel coordinates
pixels = (self.K_sem @ points_sem_cam_frame_norm.T).T
# filter points outside of image
filter_idx = (
(pixels[:, 0] >= 0)
& (pixels[:, 0] < sem_image.shape[1])
& (pixels[:, 1] >= 0)
& (pixels[:, 1] < sem_image.shape[0])
)
# get semantic annotation
sem_annotation = sem_image[
pixels[filter_idx, 1].astype(int),
pixels[filter_idx, 0].astype(int),
]
# remove all pixels that have no semantic annotation
non_classified_idx = np.all(sem_annotation == self.sem_handler.class_color["static"], axis=1)
sem_annotation = sem_annotation[~non_classified_idx]
filter_idx[np.where(filter_idx)[0][non_classified_idx]] = False
return sem_annotation, filter_idx
if __name__ == "__main__":
cfg = ReconstructionCfg()
# start depth reconstruction
depth_constructor = DepthReconstruction(cfg)
depth_constructor.depth_reconstruction()
depth_constructor.save_pcd()
depth_constructor.show_pcd()
# EoF
| 14,567 | Python | 38.16129 | 178 | 0.560102 |
leggedrobotics/viplanner/viplanner/__init__.py | # Copyright (c) 2023-2024, ETH Zurich (Robotics Systems Lab)
# Author: Pascal Roth
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
# This line will be programmatically read/write by setup.py.
# Leave them at the bottom of this file and don't touch them.
__version__ = "0.1"
| 292 | Python | 28.299997 | 61 | 0.726027 |
leggedrobotics/viplanner/viplanner/cost_builder.py | # Copyright (c) 2023-2024, ETH Zurich (Robotics Systems Lab)
# Author: Pascal Roth
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
# imperative-cost-map
from viplanner.config import CostMapConfig
from viplanner.cost_maps import CostMapPCD, SemCostMap, TsdfCostMap
def main(cfg: CostMapConfig, final_viz: bool = True):
assert any([cfg.semantics, cfg.geometry]), "no cost map type selected"
# create semantic cost map
if cfg.semantics:
print("============ Creating Semantic Map from cloud ===============")
sem_cost_map = SemCostMap(cfg.general, cfg.sem_cost_map, visualize=cfg.visualize)
sem_cost_map.pcd_init()
data, coord = sem_cost_map.create_costmap()
# create tsdf cost map
elif cfg.geometry:
print("============== Creating tsdf Map from cloud =================")
tsdf_cost_map = TsdfCostMap(cfg.general, cfg.tsdf_cost_map)
tsdf_cost_map.ReadPointFromFile()
data, coord = tsdf_cost_map.CreateTSDFMap()
(tsdf_cost_map.VizCloud(tsdf_cost_map.obs_pcd) if cfg.visualize else None)
else:
raise ValueError("no cost map type selected")
# set coords in costmap config
cfg.x_start, cfg.y_start = coord
# construct final cost map as pcd and save parameters
print("======== Generate and Save costmap as Point-Cloud ===========")
cost_mapper = CostMapPCD(
cfg=cfg,
tsdf_array=data[0],
viz_points=data[1],
ground_array=data[2],
load_from_file=False,
)
cost_mapper.SaveTSDFMap()
if final_viz:
cost_mapper.ShowTSDFMap(cost_map=True)
return
if __name__ == "__main__":
cfg = CostMapConfig()
main(cfg)
# EoF
| 1,716 | Python | 30.796296 | 89 | 0.630536 |
leggedrobotics/viplanner/viplanner/train.py | # Copyright (c) 2023-2024, ETH Zurich (Robotics Systems Lab)
# Author: Pascal Roth
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
# python
import torch
torch.set_default_dtype(torch.float32)
# imperative-planning-learning
from viplanner.config import DataCfg, TrainCfg
from viplanner.utils.trainer import Trainer
if __name__ == "__main__":
env_list_combi = [
"2azQ1b91cZZ", # matterport mesh
"JeFG25nYj2p", # matterport mesh
"Vvot9Ly1tCj", # matterport mesh
"town01", # carla mesh
"ur6pFq6Qu1A", # matterport mesh
"B6ByNegPMKs", # matterport mesh
"8WUmhLawc2A", # matterport mesh
"town01", # carla mesh
"2n8kARJN3HM", # matterport mesh
]
carla: TrainCfg = TrainCfg(
sem=True,
cost_map_name="cost_map_sem",
env_list=env_list_combi,
test_env_id=8,
file_name="combi_more_data",
data_cfg=DataCfg(
max_goal_distance=10.0,
),
n_visualize=128,
wb_project="viplanner",
)
trainer = Trainer(carla)
trainer.train()
trainer.test()
trainer.save_config()
torch.cuda.empty_cache()
| 1,189 | Python | 25.444444 | 60 | 0.61312 |
leggedrobotics/viplanner/viplanner/cost_maps/cost_to_pcd.py | # Copyright (c) 2023-2024, ETH Zurich (Robotics Systems Lab)
# Author: Pascal Roth
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
import argparse
import os
from typing import Optional, Union
import numpy as np
import open3d as o3d
import pypose as pp
import torch
import yaml
# viplanner
from viplanner.config.costmap_cfg import CostMapConfig, Loader
torch.set_default_dtype(torch.float32)
class CostMapPCD:
def __init__(
self,
cfg: CostMapConfig,
tsdf_array: np.ndarray,
viz_points: np.ndarray,
ground_array: np.ndarray,
gpu_id: Optional[int] = 0,
load_from_file: Optional[bool] = False,
):
# determine device
if torch.cuda.is_available() and gpu_id is not None:
self.device = torch.device("cuda:" + str(gpu_id))
else:
self.device = torch.device("cpu")
# args
self.cfg: CostMapConfig = cfg
self.load_from_file: bool = load_from_file
self.tsdf_array: torch.Tensor = torch.tensor(tsdf_array, device=self.device)
self.viz_points: np.ndarray = viz_points
self.ground_array: torch.Tensor = torch.tensor(ground_array, device=self.device)
# init flag
self.map_init = False
# init pointclouds
self.pcd_tsdf = o3d.geometry.PointCloud()
self.pcd_viz = o3d.geometry.PointCloud()
# execute setup
self.num_x: int = 0
self.num_y: int = 0
self.setup()
return
def setup(self):
# expand of cost map
self.num_x, self.num_y = self.tsdf_array.shape
# visualization points
self.pcd_viz.points = o3d.utility.Vector3dVector(self.viz_points)
# set cost map
self.SetUpCostArray()
# update pcd instance
xv, yv = np.meshgrid(
np.linspace(0, self.num_x * self.cfg.general.resolution, self.num_x),
np.linspace(0, self.num_y * self.cfg.general.resolution, self.num_y),
indexing="ij",
)
T = np.concatenate((np.expand_dims(xv, axis=0), np.expand_dims(yv, axis=0)), axis=0)
T = np.concatenate(
(
T,
np.expand_dims(self.cost_array.cpu().detach().numpy(), axis=0),
),
axis=0,
)
if self.load_from_file:
wps = T.reshape(3, -1).T + np.array([self.cfg.x_start, self.cfg.y_start, 0.0])
self.pcd_tsdf.points = o3d.utility.Vector3dVector(wps)
else:
self.pcd_tsdf.points = o3d.utility.Vector3dVector(T.reshape(3, -1).T)
self.map_init = True
return
def ShowTSDFMap(self, cost_map=True): # not run with cuda
if not self.map_init:
print("Error: cannot show map, map has not been init yet!")
return
if cost_map:
o3d.visualization.draw_geometries([self.pcd_tsdf])
else:
o3d.visualization.draw_geometries([self.pcd_viz])
return
def Pos2Ind(self, points: Union[torch.Tensor, pp.LieTensor]):
# points [torch shapes [num_p, 3]]
start_xy = torch.tensor(
[self.cfg.x_start, self.cfg.y_start],
dtype=torch.float64,
device=points.device,
).expand(1, 1, -1)
if isinstance(points, pp.LieTensor):
H = (points.tensor()[:, :, 0:2] - start_xy) / self.cfg.general.resolution
else:
H = (points[:, :, 0:2] - start_xy) / self.cfg.general.resolution
mask = torch.logical_and(
(H > 0).all(axis=2),
(H < torch.tensor([self.num_x, self.num_y], device=points.device)[None, None, :]).all(axis=2),
)
return self.NormInds(H), H[mask, :]
def NormInds(self, H):
norm_matrix = torch.tensor(
[self.num_x / 2.0, self.num_y / 2.0],
dtype=torch.float64,
device=H.device,
)
H = (H - norm_matrix) / norm_matrix
return H
def DeNormInds(self, NH):
norm_matrix = torch.tensor(
[self.num_x / 2.0, self.num_y / 2.0],
dtype=torch.float64,
device=NH.device,
)
NH = NH * norm_matrix + norm_matrix
return NH
def SaveTSDFMap(self):
if not self.map_init:
print("Error: map has not been init yet!")
return
# make directories
os.makedirs(
os.path.join(self.cfg.general.root_path, "maps", "data"),
exist_ok=True,
)
os.makedirs(
os.path.join(self.cfg.general.root_path, "maps", "cloud"),
exist_ok=True,
)
os.makedirs(
os.path.join(self.cfg.general.root_path, "maps", "params"),
exist_ok=True,
)
map_path = os.path.join(
self.cfg.general.root_path,
"maps",
"data",
self.cfg.map_name + "_map.txt",
)
ground_path = os.path.join(
self.cfg.general.root_path,
"maps",
"data",
self.cfg.map_name + "_ground.txt",
)
cloud_path = os.path.join(
self.cfg.general.root_path,
"maps",
"cloud",
self.cfg.map_name + "_cloud.txt",
)
# save data
np.savetxt(map_path, self.tsdf_array.cpu())
np.savetxt(ground_path, self.ground_array.cpu())
np.savetxt(cloud_path, self.viz_points)
# save config parameters
yaml_path = os.path.join(
self.cfg.general.root_path,
"maps",
"params",
f"config_{self.cfg.map_name}.yaml",
)
with open(yaml_path, "w+") as file:
yaml.dump(
vars(self.cfg),
file,
allow_unicode=True,
default_flow_style=False,
)
print("TSDF Map saved.")
return
def SetUpCostArray(self):
self.cost_array = self.tsdf_array
return
@classmethod
def ReadTSDFMap(cls, root_path: str, map_name: str, gpu_id: Optional[int] = None):
# read config
with open(os.path.join(root_path, "maps", "params", f"config_{map_name}.yaml")) as f:
cfg: CostMapConfig = CostMapConfig(**yaml.load(f, Loader))
# load data
tsdf_array = np.loadtxt(os.path.join(root_path, "maps", "data", map_name + "_map.txt"))
viz_points = np.loadtxt(os.path.join(root_path, "maps", "cloud", map_name + "_cloud.txt"))
ground_array = np.loadtxt(os.path.join(root_path, "maps", "data", map_name + "_ground.txt"))
return cls(
cfg=cfg,
tsdf_array=tsdf_array,
viz_points=viz_points,
ground_array=ground_array,
gpu_id=gpu_id,
load_from_file=True,
)
if __name__ == "__main__":
# parse environment directory and cost_map name
parser = argparse.ArgumentParser(prog="Show Costmap", description="Show Costmap")
parser.add_argument(
"-e",
"--env",
type=str,
help="path to the environment directory",
required=True,
)
parser.add_argument("-m", "--map", type=str, help="name of the cost_map", required=True)
args = parser.parse_args()
# show costmap
map = CostMapPCD.ReadTSDFMap(args.env, args.map)
map.ShowTSDFMap()
# EoF
| 7,414 | Python | 30.419491 | 106 | 0.545455 |
leggedrobotics/viplanner/viplanner/cost_maps/__init__.py | # Copyright (c) 2023-2024, ETH Zurich (Robotics Systems Lab)
# Author: Pascal Roth
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from .cost_to_pcd import CostMapPCD
from .sem_cost_map import SemCostMap
from .tsdf_cost_map import TsdfCostMap
__all__ = ["TsdfCostMap", "SemCostMap", "CostMapPCD"]
# EoF
| 323 | Python | 22.142856 | 60 | 0.733746 |
leggedrobotics/viplanner/viplanner/cost_maps/sem_cost_map.py | # Copyright (c) 2023-2024, ETH Zurich (Robotics Systems Lab)
# Author: Pascal Roth
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
import math
import multiprocessing as mp
# python
import os
from functools import partial
from typing import Tuple
import matplotlib.pyplot as plt
import numpy as np
import open3d as o3d
import scipy
# imperative-cost-map
from viplanner.config import (
OBSTACLE_LOSS,
GeneralCostMapConfig,
SemCostMapConfig,
VIPlannerSemMetaHandler,
)
class SemCostMap:
"""
Cost Map based on semantic information
"""
def __init__(
self,
cfg_general: GeneralCostMapConfig,
cfg: SemCostMapConfig,
visualize: bool = True,
):
self._cfg_general = cfg_general
self._cfg_sem = cfg
self.visualize = visualize
# init VIPlanner Semantic Class Meta Handler
self.sem_meta = VIPlannerSemMetaHandler()
# cost map init parameters
self.pcd: o3d.geometry.PointCloud = None
self.pcd_filtered: o3d.geometry.PointCloud = None
self.height_map: np.ndarray = None
self._num_x: int = 0.0
self._num_y: int = 0.0
self._start_x: float = 0.0
self._start_y: float = 0.0
self._init_done: bool = False
# cost map
self.grid_cell_loss: np.ndarray = None
return
def pcd_init(self) -> None:
# load pcd and filter it
print("COST-MAP INIT START")
print("start loading and filtering point cloud from:" f" {self._cfg_general.ply_file}")
pc_path = os.path.join(self._cfg_general.root_path, self._cfg_general.ply_file)
assert os.path.exists(pc_path), f"point cloud file does not exist: {pc_path}"
self.pcd = o3d.io.read_point_cloud(pc_path)
# filter for x and y coordinates
if any(
[
self._cfg_general.x_max,
self._cfg_general.x_min,
self._cfg_general.y_max,
self._cfg_general.y_min,
]
):
pts = np.asarray(self.pcd.points)
pts_x_idx_upper = (
(pts[:, 0] < self._cfg_general.x_max)
if self._cfg_general.x_max is not None
else np.ones(pts.shape[0], dtype=bool)
)
pts_x_idx_lower = (
(pts[:, 0] > self._cfg_general.x_min)
if self._cfg_general.x_min is not None
else np.ones(pts.shape[0], dtype=bool)
)
pts_y_idx_upper = (
(pts[:, 1] < self._cfg_general.y_max)
if self._cfg_general.y_max is not None
else np.ones(pts.shape[0], dtype=bool)
)
pts_y_idx_lower = (
(pts[:, 1] > self._cfg_general.y_min)
if self._cfg_general.y_min is not None
else np.ones(pts.shape[0], dtype=bool)
)
self.pcd = self.pcd.select_by_index(
np.where(
np.vstack(
(
pts_x_idx_lower,
pts_x_idx_upper,
pts_y_idx_upper,
pts_y_idx_lower,
)
).all(axis=0)
)[0]
)
# set parameters
self._set_map_parameters(self.pcd)
# get ground height map
if self._cfg_sem.compute_height_map:
self.height_map = self._pcd_ground_height_map(self.pcd)
else:
self.height_map = np.zeros((self._num_x, self._num_y))
# filter point cloud depending on height
self.pcd_filtered = self._pcd_filter()
# update init flag
self._init_done = True
print("COST-MAP INIT DONE")
return
def create_costmap(self) -> Tuple[list, list]:
assert self._init_done, "cost map not initialized, call pcd_init() first"
print("COST-MAP CREATION START")
# get the loss for each grid cell
grid_loss = self._get_grid_loss()
# make grid loss differentiable
grid_loss = self._dense_grid_loss(grid_loss)
print("COST-MAP CREATION DONE")
return [grid_loss, self.pcd_filtered.points, self.height_map], [
float(self._start_x),
float(self._start_y),
]
"""Helper functions"""
def _pcd_ground_height_map(self, pcd: o3d.geometry.PointCloud) -> np.ndarray:
"Start building height map"
# for each grid cell, get the point with the highest z value
pts = np.asarray(pcd.points)
pts_grid_idx_red, pts_idx = self._get_unqiue_grid_idx(pts)
# ground height of human constructed things (buildings, bench, etc.) should be equal to the ground height of the surrounding terrain/ street
# --> classify the selected points and change depending on the class
# get colors
color = np.asarray(pcd.colors)[pts_idx] * 255.0
# pts to class idx array
pts_ground = np.zeros(color.shape[0], dtype=bool)
# assign each point to a class
color = color.astype(int)
for class_name, class_color in self.sem_meta.class_color.items():
pts_idx_of_class = (color == class_color).all(axis=1).nonzero()[0]
pts_ground[pts_idx_of_class] = self.sem_meta.class_ground[class_name]
# filter outliers
pts_ground_idx = pts_idx[pts_ground]
if False:
pcd_ground = pcd.select_by_index(pts_ground_idx)
_, ind = pcd_ground.remove_radius_outlier(nb_points=5, radius=5 * self._cfg_general.resolution)
pts_ground_idx = pts_ground_idx[ind]
pts_ground_red = np.zeros(pts_grid_idx_red.shape[0], dtype=bool)
pts_ground_red[np.where(pts_ground)[0][ind]] = True
pts_ground = pts_ground_red
# fit kdtree to the points on the ground and assign ground height to all other points based on the nearest neighbor
pts_ground_location = pts[pts_ground_idx]
ground_kdtree = scipy.spatial.KDTree(pts_ground_location)
_, non_ground_neighbor_idx = ground_kdtree.query(pts[pts_idx[~pts_ground]], workers=-1)
# init height map and assign ground height to all points on the ground
height_pts_ground = np.zeros(pts_grid_idx_red.shape[0])
height_pts_ground[pts_ground] = pts_ground_location[:, 2]
height_pts_ground[~pts_ground] = pts_ground_location[non_ground_neighbor_idx, 2]
# fill the holes
height_map = np.full((self._num_x, self._num_y), np.nan)
height_map[pts_grid_idx_red[:, 0], pts_grid_idx_red[:, 1]] = height_pts_ground
hole_idx = np.vstack(np.where(np.isnan(height_map))).T
kdtree_grid = scipy.spatial.KDTree(pts_grid_idx_red)
distance, neighbor_idx = kdtree_grid.query(hole_idx, k=3, workers=-1)
weights = distance / np.sum(distance, axis=1)[:, None]
height_map[hole_idx[:, 0], hole_idx[:, 1]] = np.sum(height_pts_ground[neighbor_idx] * weights, axis=1)
if self.visualize:
# visualize the height map
plt.imshow(height_map)
plt.colorbar()
plt.show()
print("Done building height map")
return height_map
def _pcd_filter(self) -> o3d.geometry.PointCloud:
"""remove points above the robot height, under the ground and filter for outliers"""
pts = np.asarray(self.pcd.points)
if self.height_map is not None:
pts_grid_idx = (
np.round((pts[:, :2] - np.array([self._start_x, self._start_y])) / self._cfg_general.resolution)
).astype(int)
pts[:, 2] -= self.height_map[pts_grid_idx[:, 0], pts_grid_idx[:, 1]]
pts_ceil_idx = pts[:, 2] < self._cfg_sem.robot_height * self._cfg_sem.robot_height_factor
pts_ground_idx = (
pts[:, 2] > self._cfg_sem.ground_height
if self._cfg_sem.ground_height is not None
else np.ones(pts.shape[0], dtype=bool)
)
pcd_height_filtered = self.pcd.select_by_index(
np.where(np.vstack((pts_ceil_idx, pts_ground_idx)).all(axis=0))[0]
)
# downsampling
if self._cfg_sem.downsample:
pcd_height_filtered = pcd_height_filtered.voxel_down_sample(self._cfg_general.resolution)
print("Voxel Downsampling applied")
# remove statistical outliers
pcd_filtered, _ = pcd_height_filtered.remove_statistical_outlier(
nb_neighbors=self._cfg_sem.nb_neighbors,
std_ratio=self._cfg_sem.std_ratio,
)
return pcd_filtered
def _set_map_parameters(self, pcd: o3d.geometry.PointCloud) -> None:
"""Define the size and start position of the cost map"""
pts = np.asarray(pcd.points)
assert pts.shape[0] > 0, "No points received."
# get max and minimum of cost map
max_x, max_y, _ = np.amax(pts, axis=0) + self._cfg_general.clear_dist
min_x, min_y, _ = np.amin(pts, axis=0) - self._cfg_general.clear_dist
prev_param = (
self._num_x,
self._num_y,
round(self._start_x, 3),
round(self._start_y, 3),
)
self._num_x = np.ceil((max_x - min_x) / self._cfg_general.resolution / 10).astype(int) * 10
self._num_y = np.ceil((max_y - min_y) / self._cfg_general.resolution / 10).astype(int) * 10
self._start_x = (max_x + min_x) / 2.0 - self._num_x / 2.0 * self._cfg_general.resolution
self._start_y = (max_y + min_y) / 2.0 - self._num_y / 2.0 * self._cfg_general.resolution
print(f"cost map size set to: {self._num_x} x {self._num_y}")
if prev_param != (
self._num_x,
self._num_y,
round(self._start_x, 3),
round(self._start_y, 3),
):
print("Map parameters changed!")
return True
return False
def _class_mapping(self) -> np.ndarray:
# get colors
color = np.asarray(self.pcd_filtered.colors) * 255.0
# pts to class idx array
pts_class_idx = np.ones(color.shape[0], dtype=int) * -1
# assign each point to a class
color = color.astype(int)
for class_idx, class_color in enumerate(self.sem_meta.colors):
pts_idx_of_class = (color == class_color).all(axis=1).nonzero()[0]
pts_class_idx[pts_idx_of_class] = class_idx
# identify points with unknown classes --> remove from point cloud
known_idx = np.where(pts_class_idx != -1)[0]
self.pcd_filtered = self.pcd_filtered.select_by_index(known_idx)
print(f"Class of {len(known_idx)} points identified" f" ({len(known_idx) / len(color)} %).")
return pts_class_idx[known_idx]
@staticmethod
def _smoother(
pts_idx: np.ndarray,
pts_grid: np.ndarray,
pts_loss: np.ndarray,
conv_crit: float,
nb_neigh: int,
change_decimal: int,
max_iterations: int,
) -> np.ndarray:
# get grid idx for each point
print(f"Process {mp.current_process().name} started")
lock.acquire() # do not access the same memort twice
pts_loss_local = pts_loss[pts_idx].copy()
pts_grid_local = pts_grid[pts_idx].copy()
lock.release()
print(f"Process {mp.current_process().name} data loaded")
# fit kd-tree to available points
kd_tree = scipy.spatial.KDTree(pts_grid_local)
pt_dist, pt_neigh_idx = kd_tree.query(pts_grid_local, k=nb_neigh + 1)
pt_dist = pt_dist[:, 1:] # filter the point itself
pt_neigh_idx = pt_neigh_idx[:, 1:] # filter the point itself
# turn distance into weight
# pt_dist_weighted = pt_dist * np.linspace(1, 0.01, nb_neigh)
pt_dist_inv = 1.0 / pt_dist
pt_dist_inv[
~np.isfinite(pt_dist_inv)
] = 0.0 # set inf to 0 (inf or nan values when closest point at the same position)
pt_weights = scipy.special.softmax(pt_dist_inv, axis=1)
# smooth losses
counter = 0
pts_loss_smooth = pts_loss_local.copy()
while counter < max_iterations:
counter += 1
pts_loss_smooth = np.sum(pts_loss_smooth[pt_neigh_idx] * pt_weights, axis=1)
conv_rate = (
np.sum(np.round(pts_loss_smooth, change_decimal) != np.round(pts_loss_local, change_decimal))
/ pts_loss_local.shape[0]
)
if conv_rate > conv_crit:
print(
f"Process {mp.current_process().name} converged with"
f" {np.round(conv_rate * 100, decimals=2)} % of changed"
f" points after {counter} iterations."
)
break
return pts_loss_smooth
@staticmethod
def _smoother_init(l_local: mp.Lock) -> None:
global lock
lock = l_local
return
def _get_grid_loss(self) -> np.ndarray:
"""convert points to grid"""
# get class mapping --> execute first because pcd are filtered
class_idx = self._class_mapping()
# update map parameters --> has to be done after mapping because last step where points are removed
changed = self._set_map_parameters(self.pcd_filtered)
if changed and self._cfg_sem.compute_height_map:
print("Recompute heightmap map due to changed parameters")
self.height_map = self._pcd_ground_height_map(self.pcd_filtered)
elif changed:
self.height_map = np.zeros((self._num_x, self._num_y))
# get points
pts = np.asarray(self.pcd_filtered.points)
pts_grid = (pts[:, :2] - np.array([self._start_x, self._start_y])) / self._cfg_general.resolution
# get loss for each point
pts_loss = np.zeros(class_idx.shape[0])
for sem_class in range(len(self.sem_meta.losses)):
pts_loss[class_idx == sem_class] = self.sem_meta.losses[sem_class]
# split task index
num_tasks = self._cfg_sem.nb_tasks if self._cfg_sem.nb_tasks else mp.cpu_count()
pts_task_idx = np.array_split(np.random.permutation(pts_loss.shape[0]), num_tasks)
# create pool with lock
lock_local = mp.Lock()
pool = mp.pool.Pool(processes=num_tasks, initializer=self._smoother_init, initargs=(lock_local,))
loss_array = pool.map(
partial(
self._smoother,
pts_grid=pts_grid,
pts_loss=pts_loss,
conv_crit=self._cfg_sem.conv_crit,
nb_neigh=self._cfg_sem.nb_neigh,
change_decimal=self._cfg_sem.change_decimal,
max_iterations=self._cfg_sem.max_iterations,
),
pts_task_idx,
)
pool.close()
pool.join()
# reassemble loss array
smooth_loss = np.zeros_like(pts_loss)
for process_idx in range(num_tasks):
smooth_loss[pts_task_idx[process_idx]] = loss_array[process_idx]
if False: # self.visualize:
plt.scatter(pts[:, 0], pts[:, 1], c=smooth_loss, cmap="jet")
plt.show()
return smooth_loss
def _distance_based_gradient(
self,
loss_level_idx: np.ndarray,
loss_min: float,
loss_max: float,
log_scaling: bool,
) -> np.ndarray:
grid = np.zeros((self._num_x, self._num_y))
# distance transform
grid[loss_level_idx] = 1
grid = scipy.ndimage.distance_transform_edt(grid)
# loss scaling
if log_scaling:
grid[grid > 0.0] = np.log(grid[grid > 0.0] + math.e)
else:
grid = (grid - np.min(grid)) / (np.max(grid) - np.min(grid))
grid = grid * (loss_max - loss_min) + loss_min
return grid[loss_level_idx]
def _dense_grid_loss(self, smooth_loss: np.ndarray) -> None:
# get grid idx of all classified points
pts = np.asarray(self.pcd_filtered.points)
pts_grid_idx_red, pts_idx = self._get_unqiue_grid_idx(pts)
grid_loss = np.ones((self._num_x, self._num_y)) * -10
grid_loss[pts_grid_idx_red[:, 0], pts_grid_idx_red[:, 1]] = smooth_loss[pts_idx]
# get grid idx of all (non-) classified points
non_classified_idx = np.where(grid_loss == -10)
non_classified_idx = np.vstack((non_classified_idx[0], non_classified_idx[1])).T
kdtree = scipy.spatial.KDTree(pts_grid_idx_red)
distances, idx = kdtree.query(non_classified_idx, k=1)
# only use points within the mesh, i.e. distance to nearest neighbor smaller than 10 cells
within_mesh = distances < 10
# assign each point its neighbor loss
grid_loss[
non_classified_idx[within_mesh, 0],
non_classified_idx[within_mesh, 1],
] = grid_loss[
pts_grid_idx_red[idx[within_mesh], 0],
pts_grid_idx_red[idx[within_mesh], 1],
]
# apply smoothing for filter missclassified points
grid_loss[
non_classified_idx[~within_mesh, 0],
non_classified_idx[~within_mesh, 1],
] = OBSTACLE_LOSS
grid_loss = scipy.ndimage.gaussian_filter(grid_loss, sigma=self._cfg_sem.sigma_smooth)
# get different loss levels
loss_levels = np.unique(self.sem_meta.losses)
assert round(loss_levels[0], 3) == 0.0, f"Lowest loss level should be 0.0, instead found {loss_levels[0]}."
if round(loss_levels[-1], 3) == 1.0:
print("WARNING: Highest loss level should be 1.0, instead found" f" {loss_levels[-1]}.")
# intended traversable area is best traversed with maximum distance to any area with higher cost
# apply distance transform to nearest obstacle to enforce smallest loss when distance is max
traversable_idx = np.where(
np.round(grid_loss, decimals=self._cfg_sem.round_decimal_traversable) == loss_levels[0]
)
grid_loss[traversable_idx] = (
self._distance_based_gradient(
traversable_idx,
loss_levels[0],
abs(self._cfg_sem.negative_reward),
False,
)
* -1
)
# outside of the mesh is an obstacle and all points over obstacle threshold of grid loss are obstacles
obs_within_mesh_idx = np.where(grid_loss > self._cfg_sem.obstacle_threshold * loss_levels[-1])
obs_idx = (
np.hstack((obs_within_mesh_idx[0], non_classified_idx[~within_mesh, 0])),
np.hstack((obs_within_mesh_idx[1], non_classified_idx[~within_mesh, 1])),
)
grid_loss[obs_idx] = self._distance_based_gradient(obs_idx, None, None, True)
# repeat distance transform for intermediate loss levels
for i in range(1, len(loss_levels) - 1):
loss_level_idx = np.where(
np.round(grid_loss, decimals=self._cfg_sem.round_decimal_traversable) == loss_levels[i]
)
grid_loss[loss_level_idx] = self._distance_based_gradient(
loss_level_idx, loss_levels[i], loss_levels[i + 1], False
)
assert not (grid_loss == -10).any(), "There are still grid cells without a loss value."
# elevate grid_loss to avoid negative values due to negative reward in area with smallest loss level
if np.min(grid_loss) < 0:
grid_loss = grid_loss + np.abs(np.min(grid_loss))
# smooth loss again
loss_smooth = scipy.ndimage.gaussian_filter(grid_loss, sigma=self._cfg_general.sigma_smooth)
# plot grid classes and losses
if self.visualize:
fig, axs = plt.subplots(2, 2)
axs[0, 0].set_title("grid loss")
axs[0, 0].imshow(grid_loss, cmap="jet")
axs[0, 1].set_title("loss smooth")
axs[0, 1].imshow(loss_smooth, cmap="jet")
axs[1, 0].set_title("grid loss x-grad")
axs[1, 0].imshow(
np.log(np.abs(scipy.ndimage.sobel(grid_loss, axis=0, mode="constant")) + math.e) - 1,
cmap="jet",
)
axs[1, 1].set_title("grid loss y-grad")
axs[1, 1].imshow(
np.log(np.abs(scipy.ndimage.sobel(grid_loss, axis=1, mode="constant")) + math.e) - 1,
cmap="jet",
)
plt.show()
return loss_smooth
def _get_unqiue_grid_idx(self, pts):
"""
Will select the points that are unique in their grid position and have the highest z location
"""
pts_grid_idx = (
np.round((pts[:, :2] - np.array([self._start_x, self._start_y])) / self._cfg_general.resolution)
).astype(int)
# convert pts_grid_idx to 1d array
pts_grid_idx_1d = pts_grid_idx[:, 0] * self._num_x + pts_grid_idx[:, 1]
# get index of all points mapped to the same grid location --> take highest value to avoid local minima in e.g. cars
# following solution given at: https://stackoverflow.com/questions/30003068/how-to-get-a-list-of-all-indices-of-repeated-elements-in-a-numpy-array
# creates an array of indices, sorted by unique element
idx_sort = np.argsort(pts_grid_idx_1d)
# sorts pts_grid_idx_1d so all unique elements are together
pts_grid_idx_1d_sorted = pts_grid_idx_1d[idx_sort]
# returns the unique values, the index of the first occurrence of a value, and the count for each element
vals, idx_start, count = np.unique(pts_grid_idx_1d_sorted, return_counts=True, return_index=True)
# splits the indices into separate arrays
pts_grid_location_map = np.split(idx_sort, idx_start[1:])
# filter for points with more than one occurrence
pts_grid_location_map = np.array(pts_grid_location_map, dtype=object)
pts_grid_location_map_multiple = pts_grid_location_map[count > 1]
# get index with maximum z value for all points mapped to the same grid location
pts_grid_location_map_multiple_idx = np.array(
[
pts_grid_location_map_multiple[idx][np.argmax(pts[pts_idx, 2])]
for idx, pts_idx in enumerate(pts_grid_location_map_multiple)
]
)
# combine indices to get for every grid location the index of the point with the highest z value
grid_idx = np.zeros(len(pts_grid_location_map), dtype=int)
grid_idx[count > 1] = pts_grid_location_map_multiple_idx
grid_idx[count == 1] = pts_grid_location_map[count == 1]
pts_grid_idx_red = pts_grid_idx[grid_idx]
return pts_grid_idx_red, grid_idx
# EoF
| 22,808 | Python | 38.736934 | 154 | 0.576026 |
leggedrobotics/viplanner/viplanner/cost_maps/tsdf_cost_map.py | # Copyright (c) 2023-2024, ETH Zurich (Robotics Systems Lab)
# Author: Pascal Roth
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
import math
# python
import os
import numpy as np
import open3d as o3d
from scipy import ndimage
from scipy.ndimage import gaussian_filter
# imperative-cost-map
from viplanner.config import GeneralCostMapConfig, TsdfCostMapConfig
class TsdfCostMap:
"""
Cost Map based on geometric information
"""
def __init__(self, cfg_general: GeneralCostMapConfig, cfg_tsdf: TsdfCostMapConfig):
self._cfg_general = cfg_general
self._cfg_tsdf = cfg_tsdf
# set init flag
self.is_map_ready = False
# init point clouds
self.obs_pcd = o3d.geometry.PointCloud()
self.free_pcd = o3d.geometry.PointCloud()
return
def UpdatePCDwithPs(self, P_obs, P_free, is_downsample=False):
self.obs_pcd.points = o3d.utility.Vector3dVector(P_obs)
self.free_pcd.points = o3d.utility.Vector3dVector(P_free)
if is_downsample:
self.obs_pcd = self.obs_pcd.voxel_down_sample(self._cfg_general.resolution)
self.free_pcd = self.free_pcd.voxel_down_sample(self._cfg_general.resolution * 0.85)
self.obs_points = np.asarray(self.obs_pcd.points)
self.free_points = np.asarray(self.free_pcd.points)
print("number of obs points: %d, free points: %d" % (self.obs_points.shape[0], self.free_points.shape[0]))
def ReadPointFromFile(self):
pcd_load = o3d.io.read_point_cloud(os.path.join(self._cfg_general.root_path, self._cfg_general.ply_file))
obs_p, free_p = self.TerrainAnalysis(np.asarray(pcd_load.points))
self.UpdatePCDwithPs(obs_p, free_p, is_downsample=True)
if self._cfg_tsdf.filter_outliers:
obs_p = self.FilterCloud(self.obs_points)
free_p = self.FilterCloud(self.free_points, outlier_filter=False)
self.UpdatePCDwithPs(obs_p, free_p)
self.UpdateMapParams()
return
def TerrainAnalysis(self, input_points):
obs_points = np.zeros(input_points.shape)
free_poins = np.zeros(input_points.shape)
obs_idx = 0
free_idx = 0
# naive approach with z values
for p in input_points:
p_height = p[2] + self._cfg_tsdf.offset_z
if (p_height > self._cfg_tsdf.ground_height * 1.2) and (
p_height < self._cfg_tsdf.robot_height * self._cfg_tsdf.robot_height_factor
): # remove ground and ceiling
obs_points[obs_idx, :] = p
obs_idx = obs_idx + 1
elif p_height < self._cfg_tsdf.ground_height and p_height > -self._cfg_tsdf.ground_height:
free_poins[free_idx, :] = p
free_idx = free_idx + 1
return obs_points[:obs_idx, :], free_poins[:free_idx, :]
def UpdateMapParams(self):
if self.obs_points.shape[0] == 0:
print("No points received.")
return
max_x, max_y, _ = np.amax(self.obs_points, axis=0) + self._cfg_general.clear_dist
min_x, min_y, _ = np.amin(self.obs_points, axis=0) - self._cfg_general.clear_dist
self.num_x = np.ceil((max_x - min_x) / self._cfg_general.resolution / 10).astype(int) * 10
self.num_y = np.ceil((max_y - min_y) / self._cfg_general.resolution / 10).astype(int) * 10
self.start_x = (max_x + min_x) / 2.0 - self.num_x / 2.0 * self._cfg_general.resolution
self.start_y = (max_y + min_y) / 2.0 - self.num_y / 2.0 * self._cfg_general.resolution
print("tsdf map initialized, with size: %d, %d" % (self.num_x, self.num_y))
self.is_map_ready = True
def CreateTSDFMap(self):
if not self.is_map_ready:
raise ValueError("create tsdf map fails, no points received.")
free_map = np.ones([self.num_x, self.num_y])
obs_map = np.zeros([self.num_x, self.num_y])
free_I = self.IndexArrayOfPs(self.free_points)
obs_I = self.IndexArrayOfPs(self.obs_points)
# create free place map
for i in obs_I:
obs_map[i[0], i[1]] = 1.0
obs_map = gaussian_filter(obs_map, sigma=self._cfg_tsdf.sigma_expand)
for i in free_I:
if i[0] < self.num_x and i[1] < self.num_y:
free_map[i[0], i[1]] = 0
free_map = gaussian_filter(free_map, sigma=self._cfg_tsdf.sigma_expand)
free_map[free_map < self._cfg_tsdf.free_space_threshold] = 0
# assign obstacles
free_map[obs_map > self._cfg_tsdf.obstacle_threshold] = 1.0
print("occupancy map generation completed.")
# Distance Transform
tsdf_array = ndimage.distance_transform_edt(free_map)
tsdf_array[tsdf_array > 0.0] = np.log(tsdf_array[tsdf_array > 0.0] + math.e)
tsdf_array = gaussian_filter(tsdf_array, sigma=self._cfg_general.sigma_smooth)
viz_points = np.concatenate((self.obs_points, self.free_points), axis=0)
# TODO: Using true terrain analysis module
ground_array = np.ones([self.num_x, self.num_y]) * 0.0
return [tsdf_array, viz_points, ground_array], [
float(self.start_x),
float(self.start_y),
]
def IndexArrayOfPs(self, points):
indexes = points[:, :2] - np.array([self.start_x, self.start_y])
indexes = (np.round(indexes / self._cfg_general.resolution)).astype(int)
return indexes
def FilterCloud(self, points, outlier_filter=True):
# crop points
if any(
[
self._cfg_general.x_max,
self._cfg_general.x_min,
self._cfg_general.y_max,
self._cfg_general.y_min,
]
):
points_x_idx_upper = (
(points[:, 0] < self._cfg_general.x_max)
if self._cfg_general.x_max is not None
else np.ones(points.shape[0], dtype=bool)
)
points_x_idx_lower = (
(points[:, 0] > self._cfg_general.x_min)
if self._cfg_general.x_min is not None
else np.ones(points.shape[0], dtype=bool)
)
points_y_idx_upper = (
(points[:, 1] < self._cfg_general.y_max)
if self._cfg_general.y_max is not None
else np.ones(points.shape[0], dtype=bool)
)
points_y_idx_lower = (
(points[:, 1] > self._cfg_general.y_min)
if self._cfg_general.y_min is not None
else np.ones(points.shape[0], dtype=bool)
)
points = points[
np.vstack(
(
points_x_idx_lower,
points_x_idx_upper,
points_y_idx_upper,
points_y_idx_lower,
)
).all(axis=0)
]
if outlier_filter:
# Filter outlier in points
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(points)
cl, _ = pcd.remove_statistical_outlier(
nb_neighbors=self._cfg_tsdf.nb_neighbors,
std_ratio=self._cfg_tsdf.std_ratio,
)
points = np.asarray(cl.points)
return points
def VizCloud(self, pcd):
o3d.visualization.draw_geometries([pcd]) # visualize point cloud
# EoF
| 7,467 | Python | 38.723404 | 114 | 0.569707 |
leggedrobotics/viplanner/viplanner/plannernet/PlannerNet.py | # Copyright (c) 2023-2024, ETH Zurich (Robotics Systems Lab)
# Author: Pascal Roth
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
import torch.nn as nn
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=dilation,
groups=groups,
bias=False,
dilation=dilation,
)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(
self,
inplanes,
planes,
stride=1,
downsample=None,
groups=1,
base_width=64,
dilation=1,
):
super().__init__()
if groups != 1 or base_width != 64:
raise ValueError("BasicBlock only supports groups=1 and base_width=64")
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.relu(out)
out = self.conv2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class PlannerNet(nn.Module):
def __init__(
self,
layers,
block=BasicBlock,
groups=1,
width_per_group=64,
replace_stride_with_dilation=None,
) -> None:
super().__init__()
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError(
"replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation)
)
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(
block,
128,
layers[1],
stride=2,
dilate=replace_stride_with_dilation[0],
)
self.layer3 = self._make_layer(
block,
256,
layers[2],
stride=2,
dilate=replace_stride_with_dilation[1],
)
self.layer4 = self._make_layer(
block,
512,
layers[3],
stride=2,
dilate=replace_stride_with_dilation[2],
)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
)
layers = []
layers.append(
block(
self.inplanes,
planes,
stride,
downsample,
self.groups,
self.base_width,
previous_dilation,
)
)
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(
block(
self.inplanes,
planes,
groups=self.groups,
base_width=self.base_width,
dilation=self.dilation,
)
)
return nn.Sequential(*layers)
def _forward_impl(self, x):
# See note [TorchScript super()]
x = self.conv1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
def forward(self, x):
return self._forward_impl(x)
| 4,857 | Python | 27.080925 | 96 | 0.53078 |
leggedrobotics/viplanner/viplanner/plannernet/__init__.py | # Copyright (c) 2023-2024, ETH Zurich (Robotics Systems Lab)
# Author: Pascal Roth
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from .autoencoder import AutoEncoder, DualAutoEncoder
from .rgb_encoder import PRE_TRAIN_POSSIBLE, get_m2f_cfg
__all__ = [
"AutoEncoder",
"DualAutoEncoder",
"get_m2f_cfg",
"PRE_TRAIN_POSSIBLE",
]
# EoF
| 369 | Python | 19.555554 | 60 | 0.699187 |
leggedrobotics/viplanner/viplanner/plannernet/autoencoder.py | # Copyright (c) 2023-2024, ETH Zurich (Robotics Systems Lab)
# Author: Pascal Roth
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from typing import Optional
import torch
import torch.nn as nn
from viplanner.config import TrainCfg
# visual-imperative-planner
from .PlannerNet import PlannerNet
from .rgb_encoder import PRE_TRAIN_POSSIBLE, RGBEncoder
class AutoEncoder(nn.Module):
def __init__(self, encoder_channel=64, k=5):
super().__init__()
self.encoder = PlannerNet(layers=[2, 2, 2, 2])
self.decoder = Decoder(512, encoder_channel, k)
def forward(self, x: torch.Tensor, goal: torch.Tensor):
x = x.expand(-1, 3, -1, -1)
x = self.encoder(x)
x, c = self.decoder(x, goal)
return x, c
class DualAutoEncoder(nn.Module):
def __init__(
self,
train_cfg: TrainCfg,
m2f_cfg=None,
weight_path: Optional[str] = None,
):
super().__init__()
self.encoder_depth = PlannerNet(layers=[2, 2, 2, 2])
if train_cfg.rgb and train_cfg.pre_train_sem and PRE_TRAIN_POSSIBLE:
self.encoder_sem = RGBEncoder(m2f_cfg, weight_path, freeze=train_cfg.pre_train_freeze)
else:
self.encoder_sem = PlannerNet(layers=[2, 2, 2, 2])
if train_cfg.decoder_small:
self.decoder = DecoderS(1024, train_cfg.in_channel, train_cfg.knodes)
else:
self.decoder = Decoder(1024, train_cfg.in_channel, train_cfg.knodes)
return
def forward(self, x_depth: torch.Tensor, x_sem: torch.Tensor, goal: torch.Tensor):
# encode depth
x_depth = x_depth.expand(-1, 3, -1, -1)
x_depth = self.encoder_depth(x_depth)
# encode sem
x_sem = self.encoder_sem(x_sem)
# concat
x = torch.cat((x_depth, x_sem), dim=1) # x.size = (N, 1024, 12, 20)
# decode
x, c = self.decoder(x, goal)
return x, c
class Decoder(nn.Module):
def __init__(self, in_channels, goal_channels, k=5):
super().__init__()
self.k = k
self.relu = nn.ReLU(inplace=True)
self.fg = nn.Linear(3, goal_channels)
self.sigmoid = nn.Sigmoid()
self.conv1 = nn.Conv2d(
(in_channels + goal_channels),
512,
kernel_size=5,
stride=1,
padding=1,
)
self.conv2 = nn.Conv2d(512, 256, kernel_size=3, stride=1, padding=0)
self.fc1 = nn.Linear(256 * 128, 1024)
self.fc2 = nn.Linear(1024, 512)
self.fc3 = nn.Linear(512, k * 3)
self.frc1 = nn.Linear(1024, 128)
self.frc2 = nn.Linear(128, 1)
def forward(self, x, goal):
# compute goal encoding
goal = self.fg(goal[:, 0:3])
goal = goal[:, :, None, None].expand(-1, -1, x.shape[2], x.shape[3])
# cat x with goal in channel dim
x = torch.cat((x, goal), dim=1)
# compute x
x = self.relu(self.conv1(x)) # size = (N, 512, x.H/32, x.W/32)
x = self.relu(self.conv2(x)) # size = (N, 512, x.H/60, x.W/60)
x = torch.flatten(x, 1)
f = self.relu(self.fc1(x))
x = self.relu(self.fc2(f))
x = self.fc3(x)
x = x.reshape(-1, self.k, 3)
c = self.relu(self.frc1(f))
c = self.sigmoid(self.frc2(c))
return x, c
class DecoderS(nn.Module):
def __init__(self, in_channels, goal_channels, k=5):
super().__init__()
self.k = k
self.relu = nn.ReLU(inplace=True)
self.fg = nn.Linear(3, goal_channels)
self.sigmoid = nn.Sigmoid()
self.conv1 = nn.Conv2d(
(in_channels + goal_channels),
512,
kernel_size=5,
stride=1,
padding=1,
)
self.conv2 = nn.Conv2d(512, 256, kernel_size=3, stride=1, padding=0)
self.conv3 = nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=0)
self.conv4 = nn.Conv2d(128, 64, kernel_size=3, stride=1, padding=0)
self.fc1 = nn.Linear(64 * 48, 256) # --> in that setting 33 million parameters
self.fc2 = nn.Linear(256, k * 3)
self.frc1 = nn.Linear(256, 1)
def forward(self, x, goal):
# compute goal encoding
goal = self.fg(goal[:, 0:3])
goal = goal[:, :, None, None].expand(-1, -1, x.shape[2], x.shape[3])
# cat x with goal in channel dim
x = torch.cat((x, goal), dim=1) # x.size = (N, 1024+16, 12, 20)
# compute x
x = self.relu(self.conv1(x)) # size = (N, 512, x.H/32, x.W/32) --> (N, 512, 10, 18),
x = self.relu(self.conv2(x)) # size = (N, 512, x.H/60, x.W/60) --> (N, 256, 8, 16)
x = self.relu(self.conv3(x)) # size = (N, 512, x.H/90, x.W/90) --> (N, 128, 6, 14)
x = self.relu(self.conv4(x)) # size = (N, 512, x.H/120, x.W/120) --> (N, 64, 4, 12)
x = torch.flatten(x, 1)
f = self.relu(self.fc1(x))
x = self.fc2(f)
x = x.reshape(-1, self.k, 3)
c = self.sigmoid(self.frc1(f))
return x, c
# EoF
| 5,063 | Python | 30.65 | 98 | 0.542366 |
leggedrobotics/viplanner/viplanner/plannernet/rgb_encoder.py | # Copyright (c) 2023-2024, ETH Zurich (Robotics Systems Lab)
# Author: Pascal Roth
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
import argparse
import pickle
from typing import Optional
import torch
import torch.nn as nn
# detectron2 and mask2former (used to load pre-trained models from Mask2Former)
try:
from detectron2.config import get_cfg
from detectron2.modeling.backbone import build_resnet_backbone
from detectron2.projects.deeplab import add_deeplab_config
PRE_TRAIN_POSSIBLE = True
except ImportError:
PRE_TRAIN_POSSIBLE = False
print("[Warning] Pre-trained ResNet50 models cannot be used since detectron2" " not found")
try:
from viplanner.third_party.mask2former.mask2former import add_maskformer2_config
except ImportError:
PRE_TRAIN_POSSIBLE = False
print("[Warning] Pre-trained ResNet50 models cannot be used since" " mask2former not found")
def get_m2f_cfg(cfg_path: str): # -> CfgNode:
# load config from file
cfg = get_cfg()
add_deeplab_config(cfg)
add_maskformer2_config(cfg)
cfg.merge_from_file(cfg_path)
cfg.freeze()
return cfg
class RGBEncoder(nn.Module):
def __init__(self, cfg, weight_path: Optional[str] = None, freeze: bool = True) -> None:
super().__init__()
# load pre-trained resnet
input_shape = argparse.Namespace()
input_shape.channels = 3
self.backbone = build_resnet_backbone(cfg, input_shape)
# load weights
if weight_path is not None:
with open(weight_path, "rb") as file:
model_file = pickle.load(file, encoding="latin1")
model_file["model"] = {k.replace("backbone.", ""): torch.tensor(v) for k, v in model_file["model"].items()}
missing_keys, unexpected_keys = self.backbone.load_state_dict(model_file["model"], strict=False)
if len(missing_keys) != 0:
print(f"[WARNING] Missing keys: {missing_keys}")
print(f"[WARNING] Unexpected keys: {unexpected_keys}")
print(f"[INFO] Loaded pre-trained backbone from {weight_path}")
# freeze network
if freeze:
for param in self.backbone.parameters():
param.requires_grad = False
# layers to get correct output shape --> modifiable
self.conv1 = nn.Conv2d(2048, 512, kernel_size=3, stride=1, padding=1)
return
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.backbone(x)["res5"] # size = (N, 2048, 12, 20) (height and width same as ResNet18)
x = self.conv1(x) # size = (N, 512, 12, 20)
return x
# EoF
| 2,659 | Python | 31.839506 | 119 | 0.647612 |
leggedrobotics/viplanner/viplanner/traj_cost_opt/traj_opt.py | # Copyright (c) 2023-2024, ETH Zurich (Robotics Systems Lab)
# Author: Pascal Roth
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
import torch
torch.set_default_dtype(torch.float32)
class CubicSplineTorch:
# Reference: https://stackoverflow.com/questions/61616810/how-to-do-cubic-spline-interpolation-and-integration-in-pytorch
def __init__(self):
self.init_m = torch.tensor([1.0, 0.0, 0.0], dtype=torch.float32)
def h_poly(self, t):
alpha = torch.arange(4, device=t.device, dtype=t.dtype)
tt = t[:, None, :] ** alpha[None, :, None]
A = torch.tensor(
[[1, 0, -3, 2], [0, 1, -2, 1], [0, 0, 3, -2], [0, 0, -1, 1]],
dtype=t.dtype,
device=t.device,
)
return A @ tt
def interp(self, x, y, xs):
m = (y[:, 1:, :] - y[:, :-1, :]) / torch.unsqueeze(x[:, 1:] - x[:, :-1], 2)
m = torch.cat([m[:, None, 0], (m[:, 1:] + m[:, :-1]) / 2, m[:, None, -1]], 1)
idxs = torch.searchsorted(x[0, 1:], xs[0, :])
dx = x[:, idxs + 1] - x[:, idxs]
hh = self.h_poly((xs - x[:, idxs]) / dx)
hh = torch.transpose(hh, 1, 2)
out = hh[:, :, 0:1] * y[:, idxs, :]
out = out + hh[:, :, 1:2] * m[:, idxs] * dx[:, :, None]
out = out + hh[:, :, 2:3] * y[:, idxs + 1, :]
out = out + hh[:, :, 3:4] * m[:, idxs + 1] * dx[:, :, None]
return out
class TrajOpt:
debug = False
def __init__(self):
self.cs_interp = CubicSplineTorch()
def TrajGeneratorFromPFreeRot(self, preds, step):
# Points is in se3
batch_size, num_p, dims = preds.shape
points_preds = torch.cat(
(
torch.zeros(
batch_size,
1,
dims,
device=preds.device,
requires_grad=preds.requires_grad,
),
preds,
),
axis=1,
)
num_p = num_p + 1
xs = torch.arange(0, num_p - 1 + step, step, device=preds.device)
xs = xs.repeat(batch_size, 1)
x = torch.arange(num_p, device=preds.device, dtype=preds.dtype)
x = x.repeat(batch_size, 1)
waypoints = self.cs_interp.interp(x, points_preds, xs)
if self.debug:
import matplotlib.pyplot as plt # for plotting
plt.scatter(
points_preds[0, :, 0].cpu().numpy(),
points_preds[0, :, 1].cpu().numpy(),
label="Samples",
color="purple",
)
plt.plot(
waypoints[0, :, 0].cpu().numpy(),
waypoints[0, :, 1].cpu().numpy(),
label="Interpolated curve",
color="blue",
)
plt.legend()
plt.show()
return waypoints # R3
def interpolate_waypoints(self, preds):
shape = list(preds.shape)
out_shape = [50, shape[2]]
waypoints = torch.nn.functional.interpolate(
preds.unsqueeze(1),
size=tuple(out_shape),
mode="bilinear",
align_corners=True,
)
waypoints = waypoints.squeeze(1)
if self.debug:
import matplotlib.pyplot as plt # for plotting
plt.scatter(
preds[0, :, 0].detach().cpu().numpy(),
preds[0, :, 1].detach().cpu().numpy(),
label="Samples",
color="purple",
)
plt.plot(
waypoints[0, :, 0].detach().cpu().numpy(),
waypoints[0, :, 1].detach().cpu().numpy(),
label="Interpolated curve",
color="blue",
)
plt.legend()
plt.show()
return waypoints
| 3,829 | Python | 30.916666 | 125 | 0.464351 |
leggedrobotics/viplanner/viplanner/traj_cost_opt/traj_cost.py | # Copyright (c) 2023-2024, ETH Zurich (Robotics Systems Lab)
# Author: Pascal Roth
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from typing import Optional, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
torch.set_default_dtype(torch.float32)
from viplanner.cost_maps import CostMapPCD
# visual-imperative-planning
from .traj_opt import TrajOpt
try:
import pypose as pp # only used for training
import wandb # only used for training
except ModuleNotFoundError or ImportError: # eval in issac sim # TODO: check if all can be installed in Isaac Sim
print("[Warning] pypose or wandb not found, only use for evaluation")
class TrajCost:
debug = False
def __init__(
self,
gpu_id: Optional[int] = 0,
log_data: bool = False,
w_obs: float = 0.25,
w_height: float = 1.0,
w_motion: float = 1.5,
w_goal: float = 2.0,
obstalce_thread: float = 0.75,
robot_width: float = 0.6,
robot_max_moving_distance: float = 0.15,
) -> None:
# init map and optimizer
self.gpu_id = gpu_id
self.cost_map: CostMapPCD = None
self.opt = TrajOpt()
self.is_map = False
self.neg_reward: torch.Tensor = None
# loss weights
self.w_obs = w_obs
self.w_height = w_height
self.w_motion = w_motion
self.w_goal = w_goal
# fear label threshold value
self.obstalce_thread = obstalce_thread
# footprint radius
self.robot_width = robot_width
self.robot_max_moving_distance = robot_max_moving_distance
# logging
self.log_data = log_data
return
@staticmethod
def TransformPoints(odom, points):
batch_size, num_p, _ = points.shape
world_ps = pp.identity_SE3(
batch_size,
num_p,
device=points.device,
requires_grad=points.requires_grad,
)
world_ps.tensor()[:, :, 0:3] = points
world_ps = pp.SE3(odom[:, None, :]) @ pp.SE3(world_ps)
return world_ps
def SetMap(self, root_path, map_name):
self.cost_map = CostMapPCD.ReadTSDFMap(root_path, map_name, self.gpu_id)
self.is_map = True
# get negative reward of cost-map
self.neg_reward = torch.zeros(7, device=self.cost_map.device)
if self.cost_map.cfg.semantics:
self.neg_reward[2] = self.cost_map.cfg.sem_cost_map.negative_reward
return
def CostofTraj(
self,
waypoints: torch.Tensor,
odom: torch.Tensor,
goal: torch.Tensor,
fear: torch.Tensor,
log_step: int,
ahead_dist: float,
dataset: str = "train",
):
batch_size, num_p, _ = waypoints.shape
assert self.is_map, "Map has to be set for cost calculation"
world_ps = self.TransformPoints(odom, waypoints).tensor()
# Obstacle loss
oloss_M = self._compute_oloss(world_ps, batch_size)
oloss = torch.mean(torch.sum(oloss_M, axis=1))
# Terrian Height loss
norm_inds, _ = self.cost_map.Pos2Ind(world_ps)
height_grid = self.cost_map.ground_array.T.expand(batch_size, 1, -1, -1)
hloss_M = (
F.grid_sample(
height_grid,
norm_inds[:, None, :, :],
mode="bicubic",
padding_mode="border",
align_corners=False,
)
.squeeze(1)
.squeeze(1)
)
hloss_M = torch.abs(world_ps[:, :, 2] - odom[:, None, 2] - hloss_M).to(
torch.float32
) # world_ps - odom to have them on the ground to be comparable to the height map
hloss_M = torch.sum(hloss_M, axis=1)
hloss = torch.mean(hloss_M)
# Goal Cost - Control Cost
gloss_M = torch.norm(goal[:, :3] - waypoints[:, -1, :], dim=1)
# gloss = torch.mean(gloss_M)
gloss = torch.mean(torch.log(gloss_M + 1.0))
# Moving Loss - punish staying
desired_wp = self.opt.TrajGeneratorFromPFreeRot(goal[:, None, 0:3], step=1.0 / (num_p - 1))
desired_ds = torch.norm(desired_wp[:, 1:num_p, :] - desired_wp[:, 0 : num_p - 1, :], dim=2)
wp_ds = torch.norm(waypoints[:, 1:num_p, :] - waypoints[:, 0 : num_p - 1, :], dim=2)
mloss = torch.abs(desired_ds - wp_ds)
mloss = torch.sum(mloss, axis=1)
mloss = torch.mean(mloss)
# Complete Trajectory Loss
trajectory_loss = self.w_obs * oloss + self.w_height * hloss + self.w_motion * mloss + self.w_goal * gloss
# Fear labels
goal_dists = torch.cumsum(wp_ds, dim=1, dtype=wp_ds.dtype)
goal_dists = torch.vstack([goal_dists] * 3)
floss_M = torch.clone(oloss_M)
floss_M[goal_dists > ahead_dist] = 0.0
fear_labels = torch.max(floss_M, 1, keepdim=True)[0]
# fear_labels = nn.Sigmoid()(fear_labels-obstalce_thread)
fear_labels = fear_labels > self.obstalce_thread + self.neg_reward[2]
fear_labels = torch.any(fear_labels.reshape(3, batch_size).T, dim=1, keepdim=True).to(torch.float32)
# Fear loss
collision_probabilty_loss = nn.BCELoss()(fear, fear_labels.float())
# log
if self.log_data:
try:
wandb.log(
{f"Height Loss {dataset}": self.w_height * hloss},
step=log_step,
)
wandb.log(
{f"Obstacle Loss {dataset}": self.w_obs * oloss},
step=log_step,
)
wandb.log(
{f"Goal Loss {dataset}": self.w_goal * gloss},
step=log_step,
)
wandb.log(
{f"Motion Loss {dataset}": self.w_motion * mloss},
step=log_step,
)
wandb.log(
{f"Trajectory Loss {dataset}": trajectory_loss},
step=log_step,
)
wandb.log(
{f"Collision Loss {dataset}": collision_probabilty_loss},
step=log_step,
)
except: # noqa: E722
print("wandb log failed")
# TODO: kinodynamics cost
return collision_probabilty_loss + trajectory_loss
def obs_cost_eval(self, odom: torch.Tensor, waypoints: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""Compute Obstacle Loss for eval_sim_static script!
Args:
odom (torch.Tensor): Current odometry
waypoints (torch.Tensor): waypoints in camera frame
Returns:
tuple: mean obstacle loss for each trajectory, max obstacle loss for each trajectory
"""
assert self.is_map, "Map has to be loaded for evaluation"
# compute obstacle loss
world_ps = self.TransformPoints(odom, waypoints).tensor()
oloss_M = self._compute_oloss(world_ps, waypoints.shape[0])
# account for negative reward
oloss_M = oloss_M - self.neg_reward[2]
oloss_M[oloss_M < 0] = 0.0
oloss_M = oloss_M.reshape(-1, waypoints.shape[0], oloss_M.shape[1])
return torch.mean(oloss_M, axis=[0, 2]), torch.amax(oloss_M, dim=[0, 2])
def cost_of_recorded_path(
self,
waypoints: torch.Tensor,
) -> torch.Tensor:
"""Cost of recorded path - for evaluation only
Args:
waypoints (torch.Tensor): Path coordinates in world frame
"""
assert self.is_map, "Map has to be loaded for evaluation"
oloss_M = self._compute_oloss(waypoints.unsqueeze(0), 1)
return torch.max(oloss_M)
def _compute_oloss(self, world_ps, batch_size):
if world_ps.shape[1] == 1: # special case when evaluating cost of a recorded path
world_ps_inflated = world_ps
else:
# include robot dimension as square
tangent = world_ps[:, 1:, 0:2] - world_ps[:, :-1, 0:2] # get tangent vector
tangent = tangent / torch.norm(tangent, dim=2, keepdim=True) # normalize normals vector
normals = tangent[:, :, [1, 0]] * torch.tensor(
[-1, 1], dtype=torch.float32, device=world_ps.device
) # get normal vector
world_ps_inflated = torch.vstack([world_ps[:, :-1, :]] * 3) # duplicate points
world_ps_inflated[:, :, 0:2] = torch.vstack(
[
# movement corners
world_ps[:, :-1, 0:2] + normals * self.robot_width / 2, # front_right
world_ps[:, :-1, 0:2], # center
world_ps[:, :-1, 0:2] - normals * self.robot_width / 2, # front_left
]
)
norm_inds, cost_idx = self.cost_map.Pos2Ind(world_ps_inflated)
# Obstacle Cost
cost_grid = self.cost_map.cost_array.T.expand(world_ps_inflated.shape[0], 1, -1, -1)
oloss_M = (
F.grid_sample(
cost_grid,
norm_inds[:, None, :, :],
mode="bicubic",
padding_mode="border",
align_corners=False,
)
.squeeze(1)
.squeeze(1)
)
oloss_M = oloss_M.to(torch.float32)
if self.debug:
# add negative reward for cost-map
world_ps_inflated = world_ps_inflated + self.neg_reward
import numpy as np
# indexes in the cost map
start_xy = torch.tensor(
[self.cost_map.cfg.x_start, self.cost_map.cfg.y_start],
dtype=torch.float64,
device=world_ps_inflated.device,
).expand(1, 1, -1)
H = (world_ps_inflated[:, :, 0:2] - start_xy) / self.cost_map.cfg.general.resolution
cost_values = self.cost_map.cost_array[
H[[0, batch_size, batch_size * 2], :, 0].reshape(-1).detach().cpu().numpy().astype(np.int64),
H[[0, batch_size, batch_size * 2], :, 1].reshape(-1).detach().cpu().numpy().astype(np.int64),
]
import matplotlib.pyplot as plt
_, (ax1, ax2, ax3) = plt.subplots(1, 3)
sc1 = ax1.scatter(
world_ps_inflated[[0, batch_size, batch_size * 2], :, 0].reshape(-1).detach().cpu().numpy(),
world_ps_inflated[[0, batch_size, batch_size * 2], :, 1].reshape(-1).detach().cpu().numpy(),
c=oloss_M[[0, batch_size, batch_size * 2]].reshape(-1).detach().cpu().numpy(),
cmap="rainbow",
vmin=0,
vmax=torch.max(cost_grid).item(),
)
ax1.set_aspect("equal", adjustable="box")
ax2.scatter(
H[[0, batch_size, batch_size * 2], :, 0].reshape(-1).detach().cpu().numpy(),
H[[0, batch_size, batch_size * 2], :, 1].reshape(-1).detach().cpu().numpy(),
c=cost_values.cpu().numpy(),
cmap="rainbow",
vmin=0,
vmax=torch.max(cost_grid).item(),
)
ax2.set_aspect("equal", adjustable="box")
cost_array = self.cost_map.cost_array.cpu().numpy()
max_cost = torch.max(self.cost_map.cost_array).item()
scale_factor = [1.4, 1.8]
for idx, run_idx in enumerate([0, batch_size, batch_size * 2]):
_, cost_idx = self.cost_map.Pos2Ind(world_ps_inflated[run_idx, :, :].unsqueeze(0))
cost_array[
cost_idx.to(torch.int32).cpu().numpy()[:, 0],
cost_idx.to(torch.int32).cpu().numpy()[:, 1],
] = (
max_cost * scale_factor[idx]
)
ax3.imshow(cost_array)
plt.figure()
plt.title("cost_map")
plt.imshow(cost_array)
import open3d as o3d
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(
world_ps_inflated[[0, batch_size, batch_size * 2], :, :3].reshape(-1, 3).detach().cpu().numpy()
)
pcd.colors = o3d.utility.Vector3dVector(
sc1.to_rgba(oloss_M[[0, batch_size, batch_size * 2]].reshape(-1).detach().cpu().numpy())[:, :3]
)
# pcd.colors = o3d.utility.Vector3dVector(sc2.to_rgba(cost_values[0].cpu().numpy())[:, :3])
o3d.visualization.draw_geometries([self.cost_map.pcd_tsdf, pcd])
return oloss_M
# EoF
| 12,602 | Python | 36.846847 | 115 | 0.532058 |
leggedrobotics/viplanner/viplanner/traj_cost_opt/__init__.py | # Copyright (c) 2023-2024, ETH Zurich (Robotics Systems Lab)
# Author: Pascal Roth
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from .traj_opt import CubicSplineTorch, TrajOpt
# for deployment in omniverse, pypose module is not available
try:
import pypose as pp
from .traj_cost import TrajCost
from .traj_viz import TrajViz
__all__ = ["TrajCost", "TrajOpt", "TrajViz", "CubicSplineTorch"]
except ModuleNotFoundError:
__all__ = ["TrajOpt", "CubicSplineTorch"]
# EoF
| 511 | Python | 23.380951 | 68 | 0.710372 |
leggedrobotics/viplanner/viplanner/traj_cost_opt/traj_viz.py | # Copyright (c) 2023-2024, ETH Zurich (Robotics Systems Lab)
# Author: Pascal Roth
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
# python
import copy
from typing import Optional
import cv2
import matplotlib.pyplot as plt
import numpy as np
import open3d as o3d
import open3d.visualization.rendering as rendering
import pypose as pp
import scipy.spatial.transform as tf
import torch
# visual-planning-learning
from viplanner.cost_maps import CostMapPCD
from .traj_cost import TrajCost
class TrajViz:
def __init__(
self,
intrinsics: np.ndarray,
cam_resolution: tuple = (360, 640),
camera_tilt: float = 0.0,
cost_map: Optional[CostMapPCD] = None,
):
# get parameters
self._cam_resolution = cam_resolution
self._intrinsics = intrinsics
self._cost_map = cost_map
self._camera_tilt = camera_tilt
# init camera
self.set_camera()
def set_camera(self):
self.camera = o3d.camera.PinholeCameraIntrinsic(
self._cam_resolution[1], # width
self._cam_resolution[0], # height
self._intrinsics[0, 0], # fx
self._intrinsics[1, 1], # fy
self._intrinsics[0, 2], # cx (width/2)
self._intrinsics[1, 2], # cy (height/2)
)
return
def VizTrajectory(
self,
preds: torch.Tensor,
waypoints: torch.Tensor,
odom: torch.Tensor,
goal: torch.Tensor,
fear: torch.Tensor,
augment_viz: torch.Tensor,
cost_map: bool = True,
visual_height: float = 0.5,
mesh_size: float = 0.5,
fov_angle: float = 0.0,
) -> None:
"""Visualize the trajectory within the costmap
Args:
preds (torch.Tensor): predicted keypoints
waypoints (torch.Tensor): waypoints
odom (torch.Tensor): odom tensor
goal (torch.Tensor): goal tensor
fear (torch.Tensor): if trajectory is risky
augment_viz (torch.Tensor): if input has been augmented
cost_map (bool, optional): visualize costmap. Defaults to True.
visual_height (float, optional): visual height of the keypoints. Defaults to 0.5.
mesh_size (float, optional): size of the mesh. Defaults to 0.5.
fov_angle (float, optional): field of view angle. Defaults to 0.0.
"""
# transform to map frame
if not isinstance(self._cost_map, CostMapPCD):
print("Cost map is missing.")
return
batch_size = len(waypoints)
# transform to world frame
preds_ws = TrajCost.TransformPoints(odom, preds).tensor().cpu().detach().numpy()
wp_ws = TrajCost.TransformPoints(odom, waypoints).tensor().cpu().detach().numpy()
goal_ws = pp.SE3(odom) @ pp.SE3(goal)
# convert to positions
goal_ws = goal_ws.tensor()[:, 0:3].numpy()
visual_list = []
if cost_map:
visual_list.append(self._cost_map.pcd_tsdf)
else:
visual_list.append(self._cost_map.pcd_viz)
visual_height = visual_height / 5.0
# visualize and trajs
traj_pcd = o3d.geometry.PointCloud()
wp_ws = np.concatenate(wp_ws, axis=0)
wp_ws[:, 2] = wp_ws[:, 2] + visual_height
traj_pcd.points = o3d.utility.Vector3dVector(wp_ws[:, 0:3])
traj_pcd.paint_uniform_color([0.99, 0.1, 0.1])
visual_list.append(traj_pcd)
# start and goal marks
mesh_sphere = o3d.geometry.TriangleMesh.create_sphere(mesh_size / 1.5) # start points
mesh_sphere_augment = o3d.geometry.TriangleMesh.create_sphere(mesh_size / 1.5) # start points
small_sphere = o3d.geometry.TriangleMesh.create_sphere(mesh_size / 3.0) # successful trajectory points
small_sphere_fear = o3d.geometry.TriangleMesh.create_sphere(mesh_size / 3.0) # unsuccessful trajectory points
mesh_box = o3d.geometry.TriangleMesh.create_box(mesh_size, mesh_size, mesh_size) # end points
# set mesh colors
mesh_box.paint_uniform_color([1.0, 0.64, 0.0])
small_sphere.paint_uniform_color([0.4, 1.0, 0.1])
small_sphere_fear.paint_uniform_color([1.0, 0.4, 0.1])
mesh_sphere_augment.paint_uniform_color([0.0, 0.0, 1.0])
# field of view visualization
fov_vis_length = 0.75 # length of the fov visualization plane in meters
fov_vis_pt_right = pp.SE3(odom) @ pp.SE3(
[
fov_vis_length * np.cos(fov_angle / 2),
fov_vis_length * np.sin(fov_angle / 2),
0,
0,
0,
0,
1,
]
)
fov_vis_pt_left = pp.SE3(odom) @ pp.SE3(
[
fov_vis_length * np.cos(fov_angle / 2),
-fov_vis_length * np.sin(fov_angle / 2),
0,
0,
0,
0,
1,
]
)
fov_vis_pt_right = fov_vis_pt_right.numpy()[:, 0:3]
fov_vis_pt_right[:, 2] += visual_height
fov_vis_pt_left = fov_vis_pt_left.numpy()[:, 0:3]
fov_vis_pt_left[:, 2] += visual_height
lines = []
points = []
for i in range(batch_size):
lines.append([2 * i, 2 * i + 1])
gp = goal_ws[i, :]
op = odom.numpy()[i, :]
op[2] = op[2] + visual_height
gp[2] = gp[2] + visual_height
points.append(gp[:3].tolist())
points.append(op[:3].tolist())
# add fov visualization
fov_mesh = o3d.geometry.TriangleMesh(
vertices=o3d.utility.Vector3dVector(np.array([op[:3], fov_vis_pt_right[i], fov_vis_pt_left[i]])),
triangles=o3d.utility.Vector3iVector(np.array([[2, 1, 0]])),
)
fov_mesh.paint_uniform_color([1.0, 0.5, 0.0])
visual_list.append(fov_mesh)
# add visualization
if augment_viz[i]:
visual_list.append(copy.deepcopy(mesh_sphere_augment).translate((op[0], op[1], op[2])))
else:
visual_list.append(copy.deepcopy(mesh_sphere).translate((op[0], op[1], op[2])))
visual_list.append(
copy.deepcopy(mesh_box).translate(
(
gp[0] - mesh_size / 2.0,
gp[1] - mesh_size / 2.0,
gp[2] - mesh_size / 2.0,
)
)
)
for j in range(preds_ws[i].shape[0]):
kp = preds_ws[i][j, :]
if fear[i, :] > 0.5:
visual_list.append(
copy.deepcopy(small_sphere_fear).translate((kp[0], kp[1], kp[2] + visual_height))
)
else:
visual_list.append(copy.deepcopy(small_sphere).translate((kp[0], kp[1], kp[2] + visual_height)))
# set line from odom to goal
colors = [[0.99, 0.99, 0.1] for i in range(len(lines))]
line_set = o3d.geometry.LineSet(
o3d.utility.Vector3dVector(points),
o3d.utility.Vector2iVector(lines),
)
line_set.colors = o3d.utility.Vector3dVector(colors)
visual_list.append(line_set)
o3d.visualization.draw_geometries(visual_list)
return
def VizImages(
self,
preds: torch.Tensor,
waypoints: torch.Tensor,
odom: torch.Tensor,
goal: torch.Tensor,
fear,
images: torch.Tensor,
visual_offset=0.35,
mesh_size=0.3,
is_shown=True,
iplanner: bool = False,
transform: bool = True,
):
batch_size = len(waypoints)
if transform:
preds_ws = TrajCost.TransformPoints(odom, preds).tensor().cpu().detach().numpy()
wp_ws = TrajCost.TransformPoints(odom, waypoints).tensor().cpu().detach().numpy()
if goal.shape[-1] != 7:
pp_goal = pp.identity_SE3(batch_size, device=goal.device)
pp_goal.tensor()[:, 0:3] = goal
goal = pp_goal.tensor()
goal_ws = pp.SE3(odom) @ pp.SE3(goal)
# convert to positions
goal_ws = goal_ws.tensor()[:, 0:3].cpu().detach().numpy()
else:
preds_ws = preds.cpu().detach().numpy()
wp_ws = waypoints.cpu().detach().numpy()
goal_ws = goal.cpu().detach().numpy()
# adjust height
goal_ws[:, 2] = goal_ws[:, 2] - visual_offset
# set materia shader
mtl = o3d.visualization.rendering.MaterialRecord()
mtl.base_color = [1.0, 1.0, 1.0, 0.3]
mtl.shader = "defaultUnlit"
# set meshes
small_sphere = o3d.geometry.TriangleMesh.create_sphere(mesh_size / 10.0) # trajectory points
small_sphere_fear = o3d.geometry.TriangleMesh.create_sphere(mesh_size / 10.0) # trajectory points
mesh_sphere = o3d.geometry.TriangleMesh.create_sphere(mesh_size / 2.0) # successful predict points
mesh_sphere_fear = o3d.geometry.TriangleMesh.create_sphere(mesh_size / 2.0) # unsuccessful predict points
mesh_box = o3d.geometry.TriangleMesh.create_box(mesh_size, mesh_size, mesh_size * 2) # end points
# set colors
if iplanner:
small_sphere.paint_uniform_color([0.0, 0.0, 1.0]) # blue
mesh_sphere.paint_uniform_color([1.0, 1.0, 0.0])
else:
small_sphere.paint_uniform_color([0.99, 0.2, 0.1]) # green
mesh_sphere.paint_uniform_color([0.4, 1.0, 0.1])
small_sphere_fear.paint_uniform_color([1.0, 0.4, 0.2])
mesh_sphere_fear.paint_uniform_color([1.0, 0.2, 0.1])
mesh_box.paint_uniform_color([1.0, 0.64, 0.1])
# init open3D render
render = rendering.OffscreenRenderer(self.camera.width, self.camera.height)
render.scene.set_background([0.0, 0.0, 0.0, 1.0]) # RGBA
# wp_start_idx = int(waypoints.shape[1] / preds.shape[1])
wp_start_idx = 1
cv_img_list = []
if is_shown:
fig, ax = plt.subplots()
for i in range(batch_size):
# add geometries
gp = goal_ws[i, :]
# add goal marker
goal_mesh = copy.deepcopy(mesh_box).translate(
(
gp[0] - mesh_size / 2.0,
gp[1] - mesh_size / 2.0,
gp[2] - mesh_size / 2.0,
)
)
render.scene.add_geometry("goal_mesh", goal_mesh, mtl)
# add predictions
for j, kp in enumerate(preds_ws[i]):
if fear[i, :] > 0.5:
kp_mesh = copy.deepcopy(mesh_sphere_fear).translate((kp[0], kp[1], kp[2] - visual_offset))
else:
kp_mesh = copy.deepcopy(mesh_sphere).translate((kp[0], kp[1], kp[2] - visual_offset))
render.scene.add_geometry("keypose" + str(j), kp_mesh, mtl)
# add trajectory
for k, wp in enumerate(wp_ws[i]):
if k < wp_start_idx:
continue
if fear[i, :] > 0.5:
wp_mesh = copy.deepcopy(small_sphere_fear).translate((wp[0], wp[1], wp[2] - visual_offset))
else:
wp_mesh = copy.deepcopy(small_sphere).translate((wp[0], wp[1], wp[2] - visual_offset))
render.scene.add_geometry("waypoint" + str(k), wp_mesh, mtl)
# set cameras
self.CameraLookAtPose(odom[i, :], render)
# project to image
img_o3d = np.asarray(render.render_to_image())
mask = (img_o3d < 10).all(axis=2)
# Attach image
c_img = images[i, :, :].expand(3, -1, -1)
c_img = c_img.cpu().detach().numpy()
c_img = np.moveaxis(c_img, 0, 2)
c_img = (c_img * 255 / np.max(c_img)).astype("uint8")
img_o3d[mask, :] = c_img[mask, :]
img_cv2 = cv2.cvtColor(img_o3d, cv2.COLOR_RGBA2BGRA)
cv_img_list.append(img_cv2)
if is_shown:
plt.imshow(img_cv2)
plt.draw()
plt.waitforbuttonpress(0) # this will wait for indefinite time
plt.close(fig)
# clear render geometry
render.scene.clear_geometry()
return cv_img_list
def CameraLookAtPose(self, odom, render):
unit_vec = pp.identity_SE3(device=odom.device)
unit_vec.tensor()[0] = 1.0
tilt_vec = [0, 0, 0]
tilt_vec.extend(list(tf.Rotation.from_euler("y", self._camera_tilt, degrees=False).as_quat()))
tilt_vec = torch.tensor(tilt_vec, device=odom.device, dtype=odom.dtype)
target_pose = pp.SE3(odom) @ pp.SE3(tilt_vec) @ unit_vec
camera_up = [0, 0, 1] # camera orientation
eye = pp.SE3(odom)
eye = eye.tensor()[0:3].cpu().detach().numpy()
target = target_pose.tensor()[0:3].cpu().detach().numpy()
render.scene.camera.look_at(target, eye, camera_up)
return
# EoF
| 13,197 | Python | 38.993939 | 118 | 0.537243 |
leggedrobotics/viplanner/viplanner/config/viplanner_sem_meta.py | # Copyright (c) 2023-2024, ETH Zurich (Robotics Systems Lab)
# Author: Pascal Roth
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
OBSTACLE_LOSS = 2.0
TRAVERSABLE_INTENDED_LOSS = 0
TRAVERSABLE_UNINTENDED_LOSS = 0.5
ROAD_LOSS = 1.5
TERRAIN_LOSS = 1.0
# NOTE: only obstacle loss should be over obscale_loss defined in costmap_cfg.py
# original coco meta
VIPLANNER_SEM_META = [
# TRAVERSABLE SPACE ###
# traversable intended
{
"name": "sidewalk",
"loss": TRAVERSABLE_INTENDED_LOSS,
"color": [0, 255, 0],
"ground": True,
},
{
"name": "crosswalk",
"loss": TRAVERSABLE_INTENDED_LOSS,
"color": [0, 102, 0],
"ground": True,
},
{
"name": "floor",
"loss": TRAVERSABLE_INTENDED_LOSS,
"color": [0, 204, 0],
"ground": True,
},
{
"name": "stairs",
"loss": TRAVERSABLE_INTENDED_LOSS,
"color": [0, 153, 0],
"ground": True,
},
# traversable not intended
{
"name": "gravel",
"loss": TRAVERSABLE_UNINTENDED_LOSS,
"color": [204, 255, 0],
"ground": True,
},
{
"name": "sand",
"loss": TRAVERSABLE_UNINTENDED_LOSS,
"color": [153, 204, 0],
"ground": True,
},
{
"name": "snow",
"loss": TRAVERSABLE_UNINTENDED_LOSS,
"color": [204, 102, 0],
"ground": True,
},
{
"name": "indoor_soft", # human made thing, can be walked on
"color": [102, 153, 0],
"loss": TERRAIN_LOSS,
"ground": False,
},
{
"name": "terrain",
"color": [255, 255, 0],
"loss": TERRAIN_LOSS,
"ground": True,
},
{
"name": "road",
"loss": ROAD_LOSS,
"color": [255, 128, 0],
"ground": True,
},
# OBSTACLES ###
# human
{
"name": "person",
"color": [255, 0, 0],
"loss": OBSTACLE_LOSS,
"ground": False,
},
{
"name": "anymal",
"color": [204, 0, 0],
"loss": OBSTACLE_LOSS,
"ground": False,
},
# vehicle
{
"name": "vehicle",
"color": [153, 0, 0],
"loss": OBSTACLE_LOSS,
"ground": False,
},
{
"name": "on_rails",
"color": [51, 0, 0],
"loss": OBSTACLE_LOSS,
"ground": False,
},
{
"name": "motorcycle",
"color": [102, 0, 0],
"loss": OBSTACLE_LOSS,
"ground": False,
},
{
"name": "bicycle",
"color": [102, 0, 0],
"loss": OBSTACLE_LOSS,
"ground": False,
},
# construction
{
"name": "building",
"loss": OBSTACLE_LOSS,
"color": [127, 0, 255],
"ground": False,
},
{
"name": "wall",
"color": [102, 0, 204],
"loss": OBSTACLE_LOSS,
"ground": False,
},
{
"name": "fence",
"color": [76, 0, 153],
"loss": OBSTACLE_LOSS,
"ground": False,
},
{
"name": "bridge",
"color": [51, 0, 102],
"loss": OBSTACLE_LOSS,
"ground": False,
},
{
"name": "tunnel",
"color": [51, 0, 102],
"loss": OBSTACLE_LOSS,
"ground": False,
},
# object
{
"name": "pole",
"color": [0, 0, 255],
"loss": OBSTACLE_LOSS,
"ground": False,
},
{
"name": "traffic_sign",
"color": [0, 0, 153],
"loss": OBSTACLE_LOSS,
"ground": False,
},
{
"name": "traffic_light",
"color": [0, 0, 204],
"loss": OBSTACLE_LOSS,
"ground": False,
},
{
"name": "bench",
"color": [0, 0, 102],
"loss": OBSTACLE_LOSS,
"ground": False,
},
# nature
{
"name": "vegetation",
"color": [153, 0, 153],
"loss": OBSTACLE_LOSS,
"ground": False,
},
{
"name": "water_surface",
"color": [204, 0, 204],
"loss": OBSTACLE_LOSS,
"ground": True,
},
# sky
{
"name": "sky",
"color": [102, 0, 51],
"loss": OBSTACLE_LOSS,
"ground": False,
},
{
"name": "background",
"color": [102, 0, 51],
"loss": OBSTACLE_LOSS,
"ground": False,
},
# void outdoor
{
"name": "dynamic",
"color": [32, 0, 32],
"loss": OBSTACLE_LOSS,
"ground": False,
},
{
"name": "static", # also everything unknown
"color": [0, 0, 0],
"loss": OBSTACLE_LOSS,
"ground": False,
},
# indoor
{
"name": "furniture",
"color": [0, 0, 51],
"loss": OBSTACLE_LOSS,
"ground": False,
},
{
"name": "door",
"color": [153, 153, 0],
"loss": OBSTACLE_LOSS,
"ground": False,
},
{
"name": "ceiling",
"color": [25, 0, 51],
"loss": OBSTACLE_LOSS,
"ground": False,
},
]
class VIPlannerSemMetaHandler:
"""Useful functions for handling VIPlanner semantic meta data."""
def __init__(self) -> None:
# meta config
self.meta = VIPLANNER_SEM_META
# class loss dict
self.class_loss: dict = self._get_class_loss_dict()
self.class_color: dict = self._get_class_color_dict()
self.class_ground: dict = self._get_class_ground_dict()
self.class_id: dict = self._get_class_id_dict()
return
def get_colors_for_names(self, name_list: list) -> list:
"""Get list of colors for a list of names."""
colors = []
name_to_color = {nc["name"]: nc["color"] for nc in self.meta}
for name in name_list:
if name in name_to_color:
colors.append(name_to_color[name])
return colors
def _get_class_loss_dict(self) -> dict:
"""Get class loss dict."""
return {nc["name"]: nc["loss"] for nc in self.meta}
def _get_class_color_dict(self) -> dict:
"""Get class color dict."""
return {nc["name"]: nc["color"] for nc in self.meta}
def _get_class_ground_dict(self) -> dict:
"""Get class ground dict."""
return {nc["name"]: nc["ground"] for nc in self.meta}
def _get_class_id_dict(self) -> dict:
"""Get class id dict."""
return {nc["name"]: i for i, nc in enumerate(self.meta)}
@property
def colors(self) -> list:
"""Get list of colors."""
return list(self.class_color.values())
@property
def losses(self) -> list:
"""Get list of losses."""
return list(self.class_loss.values())
@property
def names(self) -> list:
"""Get list of names."""
return list(self.class_loss.keys())
@property
def ground(self) -> list:
"""Get list of ground."""
return list(self.class_ground.values())
"""CLASS COLOR VISUALIZATION"""
if __name__ == "__main__":
import matplotlib.pyplot as plt
# init meta handler
meta_handler = VIPlannerSemMetaHandler()
# class ordering array
cls_order = [
["sky", "background", "ceiling", "dynamic", "static"],
[
"building",
"wall",
"fence",
"vegetation",
"water_surface",
], # 'bridge',
[
"pole",
"traffic_light",
"traffic_sign",
"bench",
"furniture",
"door",
],
["gravel", "sand", "indoor_soft", "terrain", "snow", "road"],
["sidewalk", "floor", "stairs", "crosswalk"],
["person", "anymal", "vehicle", "motorcycle", "bicycle", "on_rails"],
]
# Create the 8x8 grid of subplots
fig, axs = plt.subplots(nrows=6, ncols=6, figsize=(10, 10))
# Loop over each subplot and plot the data
for i in range(6):
for j in range(6):
ax = axs[i][j]
# Remove the axis, axis ticks, border, ...
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.spines["left"].set_visible(False)
ax.set_xticks([])
ax.set_yticks([])
# plot color
if j >= len(cls_order[i]):
continue
ax.imshow([[tuple(meta_handler.class_color[cls_order[i][j]])]])
ax.set_title(cls_order[i][j], fontsize=16)
ax.set_xlabel(meta_handler.class_color[cls_order[i][j]], fontsize=12)
# Set the overall title of the plot
fig.suptitle("VIPlanner Semantic Classes Color Scheme", fontsize=22)
# Adjust the spacing between subplots
plt.subplots_adjust(wspace=0.4, hspace=0.4)
plt.tight_layout()
plt.savefig("/home/{$USER}/viplanner_semantic_classes_color_scheme.png", dpi=300)
# Show the plot
plt.show()
# EoF
| 8,966 | Python | 23.839335 | 85 | 0.481151 |
leggedrobotics/viplanner/viplanner/config/__init__.py | # Copyright (c) 2023-2024, ETH Zurich (Robotics Systems Lab)
# Author: Pascal Roth
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from .coco_sem_meta import _COCO_MAPPING, get_class_for_id
from .costmap_cfg import (
CostMapConfig,
GeneralCostMapConfig,
ReconstructionCfg,
SemCostMapConfig,
TsdfCostMapConfig,
)
from .learning_cfg import DataCfg, TrainCfg
from .viplanner_sem_meta import OBSTACLE_LOSS, VIPlannerSemMetaHandler
__all__ = [
# configs
"ReconstructionCfg",
"SemCostMapConfig",
"TsdfCostMapConfig",
"CostMapConfig",
"GeneralCostMapConfig",
"TrainCfg",
"DataCfg",
# mapping
"VIPlannerSemMetaHandler",
"OBSTACLE_LOSS",
"get_class_for_id",
"_COCO_MAPPING",
]
# EoF
| 766 | Python | 20.914285 | 70 | 0.693211 |
leggedrobotics/viplanner/viplanner/config/learning_cfg.py | # Copyright (c) 2023-2024, ETH Zurich (Robotics Systems Lab)
# Author: Pascal Roth
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
import os
# python
from dataclasses import dataclass, field
from typing import List, Optional, Tuple, Union
import yaml
# define own loader class to include DataCfg
class Loader(yaml.SafeLoader):
pass
def construct_datacfg(loader, node):
add_dicts = {}
for node_entry in node.value:
if isinstance(node_entry[1], yaml.MappingNode):
add_dicts[node_entry[0].value] = loader.construct_mapping(node_entry[1])
node.value.remove(node_entry)
return DataCfg(**loader.construct_mapping(node), **add_dicts)
Loader.add_constructor(
"tag:yaml.org,2002:python/object:viplanner.config.learning_cfg.DataCfg",
construct_datacfg,
)
@dataclass
class DataCfg:
"""Config for data loading"""
# real world data used --> images have to be rotated by 180 degrees
real_world_data: bool = False
# from carla dataset (exclude certain spaces)
carla: bool = False
# identification suffix of the cameras for semantic and depth images
depth_suffix = "_cam0"
sem_suffix = "_cam1"
# data processing
max_depth: float = 15.0
"maximum depth for depth image"
# odom (=start) point selection
max_goal_distance: float = 15.0
min_goal_distance: float = 0.5
"maximum and minimum distance between odom and goal"
distance_scheme: dict = field(default_factory=lambda: {1: 0.2, 3: 0.35, 5: 0.25, 7.5: 0.15, 10: 0.05})
# select goal points for the samples according to the scheme:
# {distance: percentage of goals}, distances have to be increasing
# and max distance has to be equal to max_goal_distance
obs_cost_height: float = 1.5
"all odom points with cost of more than obs_cost_height are discarded (negative cost of cost_map will be automatically added)"
fov_scale: float = 1.0
"scaling of the field of view (only goals within fov are considered)"
depth_scale: float = 1000.0
"scaling of the depth image"
# train val split
ratio: float = 0.9
"ratio between train and val dataset"
max_train_pairs: Optional[int] = None
pairs_per_image: int = 4
"maximum number of train pairs (can be used to limit training time) can be set, otherwise number of recorded images times pairs_per_image is used"
ratio_fov_samples: float = 1.0
ratio_front_samples: float = 0.0
ratio_back_samples: float = 0.0
"samples distribution -> either within the robots fov, in front of the robot but outside the fov or behind the robot"
# edge blur (real world RealSense difficulties along edges) --> will be also visible in rgb/sem images due to warp
noise_edges: bool = False # not activate for CARLA yet
edge_threshold: int = 100
extend_kernel_size: Tuple[int, int] = field(default_factory=lambda: [5, 5])
# noise augmentation --> will be applied to a scaled image with range between [0, 1]
depth_salt_pepper: Optional[float] = None # Proportion of image pixels to replace with noise on range [0, 1]
depth_gaussian: Optional[float] = None # Standard deviation of the noise to add (no clipping applied)
depth_random_polygons_nb: Optional[int] = None # Number of random polygons to add
depth_random_polygon_size: int = 10 # Size of the random polygons in pixels
sem_rgb_pepper: Optional[float] = None # Proportion of pixels to randomly set to 0
sem_rgb_black_img: Optional[float] = None # Randomly set this proportion of images to complete black images -->
sem_rgb_random_polygons_nb: Optional[int] = None # Number of random polygons to add
sem_rgb_random_polygon_size: int = 20 # Size of the random polygons in pixels
@dataclass
class TrainCfg:
"""Config for multi environment training"""
# high level configurations
sem: bool = True
rgb: bool = False
"use semantic/ rgb image"
file_name: Optional[str] = None
"appendix to the model filename if needed"
seed: int = 0
"random seed"
gpu_id: int = 0
"GPU id"
file_path: str = "${USER_PATH_TO_MODEL_DATA}"
"file path to models and data directory, can be overwritten by environment variable EXPERIMENT_DIRECTORY (e.g. for cluster)"
# NOTE: since the environment variable is intended for cluster usage, some visualizations will be automatically switched off
# data and dataloader configurations
cost_map_name: str = "cost_map_sem" # "cost_map_sem"
"cost map name"
env_list: List[str] = field(
default_factory=lambda: [
"2azQ1b91cZZ",
"JeFG25nYj2p",
"Vvot9Ly1tCj",
"ur6pFq6Qu1A",
"B6ByNegPMKs",
"8WUmhLawc2A",
"E9uDoFAP3SH",
"QUCTc6BB5sX",
"YFuZgdQ5vWj",
"2n8kARJN3HM",
]
)
test_env_id: int = 9
"the test env id in the id list"
data_cfg: Union[DataCfg, List[DataCfg]] = DataCfg()
"further data configuration (can be individualized for every environment)"
multi_epoch_dataloader: bool = False
"load all samples into RAM s.t. do not have to be reloaded for each epoch"
num_workers: int = 4
"number of workers for dataloader"
load_in_ram: bool = False
"if true, all samples will be loaded into RAM s.t. do not have to be reloaded for each epoch"
# loss configurations
fear_ahead_dist: float = 2.5
"fear lookahead distance"
w_obs: float = 0.25
w_height: float = 1.0
w_motion: float = 1.5
w_goal: float = 4.0
"weights for the loss components"
obstacle_thread: float = 1.2
"obstacle threshold to decide if fear path or not (neg reward for semantic cost-maps is added automatically)"
# network configurations
img_input_size: Tuple[int, int] = field(default_factory=lambda: [360, 640])
"image size (will be cropped if larger or resized if smaller)"
in_channel: int = 16
"goal input channel numbers"
knodes: int = 5
"number of max waypoints predicted"
pre_train_sem: bool = True
pre_train_cfg: Optional[str] = "m2f_model/coco/panoptic/maskformer2_R50_bs16_50ep.yaml"
pre_train_weights: Optional[str] = "m2f_model/coco/panoptic/model_final_94dc52.pkl"
pre_train_freeze: bool = True
"loading of a pre-trained rgb encoder from mask2former (possible is ResNet 50 or 101)"
# NOTE: `pre_train_cfg` and `pre_train_weights` are assumed to be found under `file_path/models` (see above)
decoder_small: bool = False
"small decoder with less parameters"
# training configurations
resume: bool = False
"resume training"
epochs: int = 100
"number of training epochs"
batch_size: int = 64
"number of minibatch size"
hierarchical: bool = False
hierarchical_step: int = 50
hierarchical_front_step_ratio: float = 0.02
hierarchical_back_step_ratio: float = 0.01
"hierarchical training with an adjusted data structure"
# optimizer and scheduler configurations
lr: float = 2e-3
"learning rate"
factor: float = 0.5
"ReduceLROnPlateau factor"
min_lr: float = 1e-5
"minimum lr for ReduceLROnPlateau"
patience: int = 3
"patience of epochs for ReduceLROnPlateau"
optimizer: str = "sgd" # either adam or sgd
"optimizer"
momentum: float = 0.1
"momentum of the optimizer"
w_decay: float = 1e-4
"weight decay of the optimizer"
# visualization configurations
camera_tilt: float = 0.15
"camera tilt angle for visualization only"
n_visualize: int = 15
"number of trajectories that are visualized"
# logging configurations
wb_project: str = "Matterport"
wb_entity: str = "viplanner"
wb_api_key: str = "enter_your_key_here"
# functions
def get_model_save(self, epoch: Optional[int] = None):
input_domain = "DepSem" if self.sem else "Dep"
cost_name = "Geom" if self.cost_map_name == "cost_map_geom" else "Sem"
optim = "SGD" if self.optimizer == "sgd" else "Adam"
name = f"_{self.file_name}" if self.file_name is not None else ""
epoch = epoch if epoch is not None else self.epochs
hierarch = "_hierarch" if self.hierarchical else ""
return f"plannernet_env{self.env_list[0]}_ep{epoch}_input{input_domain}_cost{cost_name}_optim{optim}{hierarch}{name}"
@property
def all_model_dir(self):
return os.path.join(os.getenv("EXPERIMENT_DIRECTORY", self.file_path), "models")
@property
def curr_model_dir(self):
return os.path.join(self.all_model_dir, self.get_model_save())
@property
def data_dir(self):
return os.path.join(os.getenv("EXPERIMENT_DIRECTORY", self.file_path), "data")
@property
def log_dir(self):
return os.path.join(os.getenv("EXPERIMENT_DIRECTORY", self.file_path), "logs")
@classmethod
def from_yaml(cls, yaml_path: str):
# open yaml file and load config
with open(yaml_path) as f:
cfg_dict = yaml.load(f, Loader=Loader)
return cls(**cfg_dict["config"])
# EoF
| 9,111 | Python | 36.04065 | 150 | 0.669191 |
leggedrobotics/viplanner/viplanner/config/costmap_cfg.py | # Copyright (c) 2023-2024, ETH Zurich (Robotics Systems Lab)
# Author: Pascal Roth
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
# python
import os
from dataclasses import dataclass
from typing import Optional
import yaml
class Loader(yaml.SafeLoader):
pass
def construct_GeneralCostMapConfig(loader, node):
return GeneralCostMapConfig(**loader.construct_mapping(node))
Loader.add_constructor(
"tag:yaml.org,2002:python/object:viplanner.config.costmap_cfg.GeneralCostMapConfig",
construct_GeneralCostMapConfig,
)
def construct_ReconstructionCfg(loader, node):
return ReconstructionCfg(**loader.construct_mapping(node))
Loader.add_constructor(
"tag:yaml.org,2002:python/object:viplanner.config.costmap_cfg.ReconstructionCfg",
construct_ReconstructionCfg,
)
def construct_SemCostMapConfig(loader, node):
return SemCostMapConfig(**loader.construct_mapping(node))
Loader.add_constructor(
"tag:yaml.org,2002:python/object:viplanner.config.costmap_cfg.SemCostMapConfig",
construct_SemCostMapConfig,
)
def construct_TsdfCostMapConfig(loader, node):
return TsdfCostMapConfig(**loader.construct_mapping(node))
Loader.add_constructor(
"tag:yaml.org,2002:python/object:viplanner.config.costmap_cfg.TsdfCostMapConfig",
construct_TsdfCostMapConfig,
)
@dataclass
class ReconstructionCfg:
"""
Arguments for 3D reconstruction using depth maps
"""
# directory where the environment with the depth (and semantic) images is located
data_dir: str = "${USER_PATH_TO_DATA}"
# environment name
env: str = "town01"
# image suffix
depth_suffix = "_cam0"
sem_suffix = "_cam1"
# higher resolution depth images available for reconstruction (meaning that the depth images are also taked by the semantic camera)
high_res_depth: bool = False
# reconstruction parameters
voxel_size: float = 0.05 # [m] 0.05 for matterport 0.1 for carla
start_idx: int = 0 # start index for reconstruction
max_images: Optional[int] = 1000 # maximum number of images to reconstruct, if None, all images are used
depth_scale: float = 1000.0 # depth scale factor
# semantic reconstruction
semantics: bool = True
# speed vs. memory trade-off parameters
point_cloud_batch_size: int = (
200 # 3d points of nbr images added to point cloud at once (higher values use more memory but faster)
)
""" Internal functions """
def get_data_path(self) -> str:
return os.path.join(self.data_dir, self.env)
def get_out_path(self) -> str:
return os.path.join(self.out_dir, self.env)
@dataclass
class SemCostMapConfig:
"""Configuration for the semantic cost map"""
# point-cloud filter parameters
ground_height: Optional[float] = -0.5 # None for matterport -0.5 for carla -1.0 for nomoko
robot_height: float = 0.70
robot_height_factor: float = 3.0
nb_neighbors: int = 100
std_ratio: float = 2.0 # keep high, otherwise ground will be removed
downsample: bool = False
# smoothing
nb_neigh: int = 15
change_decimal: int = 3
conv_crit: float = (
0.45 # ration of points that have to change by at least the #change_decimal decimal value to converge
)
nb_tasks: Optional[int] = 10 # number of tasks for parallel processing, if None, all available cores are used
sigma_smooth: float = 2.5
max_iterations: int = 1
# obstacle threshold (multiplied with highest loss value defined for a semantic class)
obstacle_threshold: float = 0.8 # 0.5/ 0.6 for matterport, 0.8 for carla
# negative reward for space with smallest cost (introduces a gradient in area with smallest loss value, steering towards center)
# NOTE: at the end cost map is elevated by that amount to ensure that the smallest cost is 0
negative_reward: float = 0.5
# loss values rounded up to decimal #round_decimal_traversable equal to 0.0 are selected and the traversable gradient is determined based on them
round_decimal_traversable: int = 2
# compute height map
compute_height_map: bool = False # false for matterport, true for carla and nomoko
@dataclass
class TsdfCostMapConfig:
"""Configuration for the tsdf cost map"""
# offset of the point cloud
offset_z: float = 0.0
# filter parameters
ground_height: float = 0.35
robot_height: float = 0.70
robot_height_factor: float = 2.0
nb_neighbors: int = 50
std_ratio: float = 0.2
filter_outliers: bool = True
# dilation parameters
sigma_expand: float = 2.0
obstacle_threshold: float = 0.01
free_space_threshold: float = 0.5
@dataclass
class GeneralCostMapConfig:
"""General Cost Map Configuration"""
# path to point cloud
root_path: str = "town01"
ply_file: str = "cloud.ply"
# resolution of the cost map
resolution: float = 0.1 # [m] (0.04 for matterport, 0.1 for carla)
# map parameters
clear_dist: float = 1.0 # cost map expansion over the point cloud space (prevent paths to go out of the map)
# smoothing parameters
sigma_smooth: float = 3.0
# cost map expansion
x_min: Optional[float] = -8.05
# [m] if None, the minimum of the point cloud is used None (carla town01: -8.05 matterport: None)
y_min: Optional[float] = -8.05
# [m] if None, the minimum of the point cloud is used None (carla town01: -8.05 matterport: None)
x_max: Optional[float] = 346.22
# [m] if None, the maximum of the point cloud is used None (carla town01: 346.22 matterport: None)
y_max: Optional[float] = 336.65
# [m] if None, the maximum of the point cloud is used None (carla town01: 336.65 matterport: None)
@dataclass
class CostMapConfig:
"""General Cost Map Configuration"""
# cost map domains
semantics: bool = True
geometry: bool = False
# name
map_name: str = "cost_map_sem"
# general cost map configuration
general: GeneralCostMapConfig = GeneralCostMapConfig()
# individual cost map configurations
sem_cost_map: SemCostMapConfig = SemCostMapConfig()
tsdf_cost_map: TsdfCostMapConfig = TsdfCostMapConfig()
# visualize cost map
visualize: bool = True
# FILLED BY CODE -> DO NOT CHANGE ###
x_start: float = None
y_start: float = None
# EoF
| 6,332 | Python | 30.984848 | 149 | 0.695515 |
leggedrobotics/viplanner/viplanner/config/coco_sem_meta.py | # Copyright (c) 2023-2024, ETH Zurich (Robotics Systems Lab)
# Author: Pascal Roth
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
# Modified from https://github.com/google-research/deeplab2/blob/main/data/coco_constants.py
# File containing the meta info of all classes from the COCO dataset.
COCO_CATEGORIES = [
{"color": [220, 20, 60], "isthing": 1, "id": 1, "name": "person"},
{"color": [119, 11, 32], "isthing": 1, "id": 2, "name": "bicycle"},
{"color": [0, 0, 142], "isthing": 1, "id": 3, "name": "car"},
{"color": [0, 0, 230], "isthing": 1, "id": 4, "name": "motorcycle"},
{"color": [106, 0, 228], "isthing": 1, "id": 5, "name": "airplane"},
{"color": [0, 60, 100], "isthing": 1, "id": 6, "name": "bus"},
{"color": [0, 80, 100], "isthing": 1, "id": 7, "name": "train"},
{"color": [0, 0, 70], "isthing": 1, "id": 8, "name": "truck"},
{"color": [0, 0, 192], "isthing": 1, "id": 9, "name": "boat"},
{"color": [250, 170, 30], "isthing": 1, "id": 10, "name": "traffic light"},
{"color": [100, 170, 30], "isthing": 1, "id": 11, "name": "fire hydrant"},
{"color": [220, 220, 0], "isthing": 1, "id": 13, "name": "stop sign"},
{"color": [175, 116, 175], "isthing": 1, "id": 14, "name": "parking meter"},
{"color": [250, 0, 30], "isthing": 1, "id": 15, "name": "bench"},
{"color": [165, 42, 42], "isthing": 1, "id": 16, "name": "bird"},
{"color": [255, 77, 255], "isthing": 1, "id": 17, "name": "cat"},
{"color": [0, 226, 252], "isthing": 1, "id": 18, "name": "dog"},
{"color": [182, 182, 255], "isthing": 1, "id": 19, "name": "horse"},
{"color": [0, 82, 0], "isthing": 1, "id": 20, "name": "sheep"},
{"color": [120, 166, 157], "isthing": 1, "id": 21, "name": "cow"},
{"color": [110, 76, 0], "isthing": 1, "id": 22, "name": "elephant"},
{"color": [174, 57, 255], "isthing": 1, "id": 23, "name": "bear"},
{"color": [199, 100, 0], "isthing": 1, "id": 24, "name": "zebra"},
{"color": [72, 0, 118], "isthing": 1, "id": 25, "name": "giraffe"},
{"color": [255, 179, 240], "isthing": 1, "id": 27, "name": "backpack"},
{"color": [0, 125, 92], "isthing": 1, "id": 28, "name": "umbrella"},
{"color": [209, 0, 151], "isthing": 1, "id": 31, "name": "handbag"},
{"color": [188, 208, 182], "isthing": 1, "id": 32, "name": "tie"},
{"color": [0, 220, 176], "isthing": 1, "id": 33, "name": "suitcase"},
{"color": [255, 99, 164], "isthing": 1, "id": 34, "name": "frisbee"},
{"color": [92, 0, 73], "isthing": 1, "id": 35, "name": "skis"},
{"color": [133, 129, 255], "isthing": 1, "id": 36, "name": "snowboard"},
{"color": [78, 180, 255], "isthing": 1, "id": 37, "name": "sports ball"},
{"color": [0, 228, 0], "isthing": 1, "id": 38, "name": "kite"},
{"color": [174, 255, 243], "isthing": 1, "id": 39, "name": "baseball bat"},
{"color": [45, 89, 255], "isthing": 1, "id": 40, "name": "baseball glove"},
{"color": [134, 134, 103], "isthing": 1, "id": 41, "name": "skateboard"},
{"color": [145, 148, 174], "isthing": 1, "id": 42, "name": "surfboard"},
{"color": [255, 208, 186], "isthing": 1, "id": 43, "name": "tennis racket"},
{"color": [197, 226, 255], "isthing": 1, "id": 44, "name": "bottle"},
{"color": [171, 134, 1], "isthing": 1, "id": 46, "name": "wine glass"},
{"color": [109, 63, 54], "isthing": 1, "id": 47, "name": "cup"},
{"color": [207, 138, 255], "isthing": 1, "id": 48, "name": "fork"},
{"color": [151, 0, 95], "isthing": 1, "id": 49, "name": "knife"},
{"color": [9, 80, 61], "isthing": 1, "id": 50, "name": "spoon"},
{"color": [84, 105, 51], "isthing": 1, "id": 51, "name": "bowl"},
{"color": [74, 65, 105], "isthing": 1, "id": 52, "name": "banana"},
{"color": [166, 196, 102], "isthing": 1, "id": 53, "name": "apple"},
{"color": [208, 195, 210], "isthing": 1, "id": 54, "name": "sandwich"},
{"color": [255, 109, 65], "isthing": 1, "id": 55, "name": "orange"},
{"color": [0, 143, 149], "isthing": 1, "id": 56, "name": "broccoli"},
{"color": [179, 0, 194], "isthing": 1, "id": 57, "name": "carrot"},
{"color": [209, 99, 106], "isthing": 1, "id": 58, "name": "hot dog"},
{"color": [5, 121, 0], "isthing": 1, "id": 59, "name": "pizza"},
{"color": [227, 255, 205], "isthing": 1, "id": 60, "name": "donut"},
{"color": [147, 186, 208], "isthing": 1, "id": 61, "name": "cake"},
{"color": [153, 69, 1], "isthing": 1, "id": 62, "name": "chair"},
{"color": [3, 95, 161], "isthing": 1, "id": 63, "name": "couch"},
{"color": [163, 255, 0], "isthing": 1, "id": 64, "name": "potted plant"},
{"color": [119, 0, 170], "isthing": 1, "id": 65, "name": "bed"},
{"color": [0, 182, 199], "isthing": 1, "id": 67, "name": "dining table"},
{"color": [0, 165, 120], "isthing": 1, "id": 70, "name": "toilet"},
{"color": [183, 130, 88], "isthing": 1, "id": 72, "name": "tv"},
{"color": [95, 32, 0], "isthing": 1, "id": 73, "name": "laptop"},
{"color": [130, 114, 135], "isthing": 1, "id": 74, "name": "mouse"},
{"color": [110, 129, 133], "isthing": 1, "id": 75, "name": "remote"},
{"color": [166, 74, 118], "isthing": 1, "id": 76, "name": "keyboard"},
{"color": [219, 142, 185], "isthing": 1, "id": 77, "name": "cell phone"},
{"color": [79, 210, 114], "isthing": 1, "id": 78, "name": "microwave"},
{"color": [178, 90, 62], "isthing": 1, "id": 79, "name": "oven"},
{"color": [65, 70, 15], "isthing": 1, "id": 80, "name": "toaster"},
{"color": [127, 167, 115], "isthing": 1, "id": 81, "name": "sink"},
{"color": [59, 105, 106], "isthing": 1, "id": 82, "name": "refrigerator"},
{"color": [142, 108, 45], "isthing": 1, "id": 84, "name": "book"},
{"color": [196, 172, 0], "isthing": 1, "id": 85, "name": "clock"},
{"color": [95, 54, 80], "isthing": 1, "id": 86, "name": "vase"},
{"color": [128, 76, 255], "isthing": 1, "id": 87, "name": "scissors"},
{"color": [201, 57, 1], "isthing": 1, "id": 88, "name": "teddy bear"},
{"color": [246, 0, 122], "isthing": 1, "id": 89, "name": "hair drier"},
{"color": [191, 162, 208], "isthing": 1, "id": 90, "name": "toothbrush"},
{"color": [255, 255, 128], "isthing": 0, "id": 92, "name": "banner"},
{"color": [147, 211, 203], "isthing": 0, "id": 93, "name": "blanket"},
{"color": [150, 100, 100], "isthing": 0, "id": 95, "name": "bridge"},
{"color": [168, 171, 172], "isthing": 0, "id": 100, "name": "cardboard"},
{"color": [146, 112, 198], "isthing": 0, "id": 107, "name": "counter"},
{"color": [210, 170, 100], "isthing": 0, "id": 109, "name": "curtain"},
{"color": [92, 136, 89], "isthing": 0, "id": 112, "name": "door-stuff"},
{"color": [218, 88, 184], "isthing": 0, "id": 118, "name": "floor-wood"},
{"color": [241, 129, 0], "isthing": 0, "id": 119, "name": "flower"},
{"color": [217, 17, 255], "isthing": 0, "id": 122, "name": "fruit"},
{"color": [124, 74, 181], "isthing": 0, "id": 125, "name": "gravel"},
{"color": [70, 70, 70], "isthing": 0, "id": 128, "name": "house"},
{"color": [255, 228, 255], "isthing": 0, "id": 130, "name": "light"},
{"color": [154, 208, 0], "isthing": 0, "id": 133, "name": "mirror-stuff"},
{"color": [193, 0, 92], "isthing": 0, "id": 138, "name": "net"},
{"color": [76, 91, 113], "isthing": 0, "id": 141, "name": "pillow"},
{"color": [255, 180, 195], "isthing": 0, "id": 144, "name": "platform"},
{"color": [106, 154, 176], "isthing": 0, "id": 145, "name": "playingfield"},
{"color": [230, 150, 140], "isthing": 0, "id": 147, "name": "railroad"},
{"color": [60, 143, 255], "isthing": 0, "id": 148, "name": "river"},
{"color": [128, 64, 128], "isthing": 0, "id": 149, "name": "road"},
{"color": [92, 82, 55], "isthing": 0, "id": 151, "name": "roof"},
{"color": [254, 212, 124], "isthing": 0, "id": 154, "name": "sand"},
{"color": [73, 77, 174], "isthing": 0, "id": 155, "name": "sea"},
{"color": [255, 160, 98], "isthing": 0, "id": 156, "name": "shelf"},
{"color": [255, 255, 255], "isthing": 0, "id": 159, "name": "snow"},
{"color": [104, 84, 109], "isthing": 0, "id": 161, "name": "stairs"},
{"color": [169, 164, 131], "isthing": 0, "id": 166, "name": "tent"},
{"color": [225, 199, 255], "isthing": 0, "id": 168, "name": "towel"},
{"color": [137, 54, 74], "isthing": 0, "id": 171, "name": "wall-brick"},
{"color": [135, 158, 223], "isthing": 0, "id": 175, "name": "wall-stone"},
{"color": [7, 246, 231], "isthing": 0, "id": 176, "name": "wall-tile"},
{"color": [107, 255, 200], "isthing": 0, "id": 177, "name": "wall-wood"},
{"color": [58, 41, 149], "isthing": 0, "id": 178, "name": "water-other"},
{"color": [183, 121, 142], "isthing": 0, "id": 180, "name": "window-blind"},
{"color": [255, 73, 97], "isthing": 0, "id": 181, "name": "window-other"},
{"color": [107, 142, 35], "isthing": 0, "id": 184, "name": "tree-merged"},
{"color": [190, 153, 153], "isthing": 0, "id": 185, "name": "fence-merged"},
{"color": [146, 139, 141], "isthing": 0, "id": 186, "name": "ceiling-merged"},
{"color": [70, 130, 180], "isthing": 0, "id": 187, "name": "sky-other-merged"},
{"color": [134, 199, 156], "isthing": 0, "id": 188, "name": "cabinet-merged"},
{"color": [209, 226, 140], "isthing": 0, "id": 189, "name": "table-merged"},
{"color": [96, 36, 108], "isthing": 0, "id": 190, "name": "floor-other-merged"},
{"color": [96, 96, 96], "isthing": 0, "id": 191, "name": "pavement-merged"},
{"color": [64, 170, 64], "isthing": 0, "id": 192, "name": "mountain-merged"},
{"color": [152, 251, 152], "isthing": 0, "id": 193, "name": "grass-merged"},
{"color": [208, 229, 228], "isthing": 0, "id": 194, "name": "dirt-merged"},
{"color": [206, 186, 171], "isthing": 0, "id": 195, "name": "paper-merged"},
{"color": [152, 161, 64], "isthing": 0, "id": 196, "name": "food-other-merged"},
{"color": [116, 112, 0], "isthing": 0, "id": 197, "name": "building-other-merged"},
{"color": [0, 114, 143], "isthing": 0, "id": 198, "name": "rock-merged"},
{"color": [102, 102, 156], "isthing": 0, "id": 199, "name": "wall-other-merged"},
{"color": [250, 141, 255], "isthing": 0, "id": 200, "name": "rug-merged"},
]
_COCO_MAPPING = {
"road": ["road"],
"sidewalk": [
"pavement-merged",
],
"floor": [
"floor-other-merged",
"floor-wood",
"platform",
"playingfield",
"rug-merged",
],
"gravel": [
"gravel",
],
"stairs": [
"stairs",
],
"sand": [
"sand",
],
"snow": [
"snow",
],
"person": ["person"],
"anymal": [
"bird",
"cat",
"dog",
"horse",
"sheep",
"cow",
"elephant",
"bear",
"zebra",
"giraffe",
],
"vehicle": [
"car",
"bus",
"truck",
"boat",
],
"on_rails": [
"train",
"railroad",
],
"motorcycle": [
"motorcycle",
],
"bicycle": [
"bicycle",
],
"building": [
"building-other-merged",
"house",
"roof",
],
"wall": [
"wall-other-merged",
"curtain",
"mirror-stuff",
"wall-brick",
"wall-stone",
"wall-tile",
"wall-wood",
"window-blind",
"window-other",
],
"fence": [
"fence-merged",
],
"bridge": [
"bridge",
],
"pole": [
"fire hydrant",
"parking meter",
],
"traffic_sign": [
"stop sign",
],
"traffic_light": [
"traffic light",
],
"bench": [
"bench",
],
"vegetation": [
"potted plant",
"flower",
"tree-merged",
"mountain-merged",
"rock-merged",
],
"terrain": [
"grass-merged",
"dirt-merged",
],
"water_surface": [
"river",
"sea",
"water-other",
],
"sky": [
"sky-other-merged",
"airplane",
],
"dynamic": [
"backpack",
"umbrella",
"handbag",
"tie",
"suitcase",
"book",
# sports
"frisbee",
"skis",
"snowboard",
"sports ball",
"kite",
"baseball bat",
"baseball glove",
"skateboard",
"surfboard",
"tennis racket",
# kitchen
"bottle",
"wine glass",
"cup",
"fork",
"knife",
"spoon",
"bowl",
"microwave",
"oven",
"toaster",
"sink",
"refrigerator",
# food
"banana",
"sandwich",
"orange",
"broccoli",
"carrot",
"hot dog",
"pizza",
"donut",
"cake",
"fruit",
"food-other-merged",
"apple",
# computer hardware
"mouse",
"remote",
"keyboard",
"cell phone",
"laptop",
# other
"scissors",
"teddy bear",
"hair drier",
"toothbrush",
"net",
"paper-merged",
],
"static": [
"banner",
"cardboard",
"light",
"tent",
"unknown",
],
"furniture": [
"chair",
"couch",
"bed",
"dining table",
"toilet",
"clock",
"vase",
"blanket",
"pillow",
"shelf",
"cabinet",
"table-merged",
"counter",
"tv",
],
"door": [
"door-stuff",
],
"ceiling": ["ceiling-merged"],
"indoor_soft": [
"towel",
],
}
def get_class_for_id():
id_to_class = {}
for idx, id_dict in enumerate(COCO_CATEGORIES):
success = False
for class_name, keywords in _COCO_MAPPING.items():
if any(keyword in id_dict["name"] for keyword in keywords):
id_to_class[idx] = class_name
success = True
break
if not success:
print("No mapping found for {}".format(id_dict["name"]))
return id_to_class
def get_class_for_id_mmdet(class_list: list):
id_to_class = {}
for idx, coco_class_name in enumerate(class_list):
success = False
for class_name, keywords in _COCO_MAPPING.items():
if any(keyword in coco_class_name for keyword in keywords):
id_to_class[idx] = class_name
success = True
break
if not success:
print("No mapping found for {}".format(coco_class_name["name"]))
return id_to_class
if __name__ == "__main__":
print(get_class_for_id())
| 14,752 | Python | 38.341333 | 92 | 0.46814 |
leggedrobotics/viplanner/viplanner/utils/__init__.py | # Copyright (c) 2023-2024, ETH Zurich (Robotics Systems Lab)
# Author: Pascal Roth
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
| 148 | Python | 23.833329 | 60 | 0.736486 |
leggedrobotics/viplanner/viplanner/utils/dataset.py | # Copyright (c) 2023-2024, ETH Zurich (Robotics Systems Lab)
# Author: Pascal Roth
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
import copy
import math
# python
import os
import random
import shutil
from pathlib import Path
from random import sample
from typing import Dict, List, Optional, Tuple
import cv2
import networkx as nx
import numpy as np
import open3d as o3d
import PIL
import pypose as pp
import scipy.spatial.transform as tf
import torch
import torch.nn.functional as F
import torchvision.transforms as transforms
from PIL import Image
from scipy.spatial.kdtree import KDTree
from skimage.util import random_noise
from torch.utils.data import Dataset
from tqdm import tqdm
# implerative-planner-learning
from viplanner.config import DataCfg
from viplanner.cost_maps import CostMapPCD
# set default dtype to float32
torch.set_default_dtype(torch.float32)
class PlannerData(Dataset):
def __init__(
self,
cfg: DataCfg,
transform,
semantics: bool = False,
rgb: bool = False,
pixel_mean: Optional[np.ndarray] = None,
pixel_std: Optional[np.ndarray] = None,
) -> None:
"""_summary_
Args:
cfg (DataCfg): Dataset COnfiguration
transform (_type_): Compose torchvision transforms (resize and to tensor)
semantics (bool, optional): If semantics are used in the network input. Defaults to False.
"""
self._cfg = cfg
self.transform = transform
self.semantics = semantics
self.rgb = rgb
assert not (semantics and rgb), "Semantics and RGB cannot be used at the same time"
self.pixel_mean = pixel_mean
self.pixel_std = pixel_std
# vertical flip transform
self.flip_transform = transforms.RandomHorizontalFlip(p=1.0)
# init buffers
self.depth_filename: List[str] = []
self.sem_rgb_filename: List[str] = []
self.depth_imgs: List[torch.Tensor] = []
self.sem_imgs: List[torch.Tensor] = []
self.odom: torch.Tensor = None
self.goal: torch.Tensor = None
self.pair_augment: np.ndarray = None
self.fov_angle: float = 0.0
self.load_ram: bool = False
return
def update_buffers(
self,
depth_filename: List[str],
sem_rgb_filename: List[str],
odom: torch.Tensor,
goal: torch.Tensor,
pair_augment: np.ndarray,
) -> None:
self.depth_filename = depth_filename
self.sem_rgb_filename = sem_rgb_filename
self.odom = odom
self.goal = goal
self.pair_augment = pair_augment
return
def set_fov(self, fov_angle):
self.fov_angle = fov_angle
return
"""Augment Images with black polygons"""
def _add_random_polygons(self, image, nb_polygons, max_size):
for i in range(nb_polygons):
num_corners = random.randint(10, 20)
polygon_points = np.random.randint(0, max_size, size=(num_corners, 2))
x_offset = np.random.randint(0, image.shape[0])
y_offset = np.random.randint(0, image.shape[1])
polygon_points[:, 0] += x_offset
polygon_points[:, 1] += y_offset
# Create a convex hull from the points
hull = cv2.convexHull(polygon_points)
# Draw the hull on the image
cv2.fillPoly(image, [hull], (0, 0, 0))
return image
"""Load images"""
def load_data_in_memory(self) -> None:
"""Load data into RAM to speed up training"""
for idx in tqdm(range(len(self.depth_filename)), desc="Load images into RAM"):
self.depth_imgs.append(self._load_depth_img(idx))
if self.semantics or self.rgb:
self.sem_imgs.append(self._load_sem_rgb_img(idx))
self.load_ram = True
return
def _load_depth_img(self, idx) -> torch.Tensor:
if self.depth_filename[idx].endswith(".png"):
depth_image = Image.open(self.depth_filename[idx])
if self._cfg.real_world_data:
depth_image = np.array(depth_image.transpose(PIL.Image.ROTATE_180))
else:
depth_image = np.array(depth_image)
else:
depth_image = np.load(self.depth_filename[idx])
depth_image[~np.isfinite(depth_image)] = 0.0
depth_image = (depth_image / 1000.0).astype("float32")
depth_image[depth_image > self._cfg.max_depth] = 0.0
# add noise to depth image
if self._cfg.depth_salt_pepper or self._cfg.depth_gaussian:
depth_norm = (depth_image - np.min(depth_image)) / (np.max(depth_image) - np.min(depth_image))
if self._cfg.depth_salt_pepper:
depth_norm = random_noise(
depth_norm,
mode="s&p",
amount=self._cfg.depth_salt_pepper,
clip=False,
)
if self._cfg.depth_gaussian:
depth_norm = random_noise(
depth_norm,
mode="gaussian",
mean=0,
var=self._cfg.depth_gaussian,
clip=False,
)
depth_image = depth_norm * (np.max(depth_image) - np.min(depth_image)) + np.min(depth_image)
if self._cfg.depth_random_polygons_nb and self._cfg.depth_random_polygons_nb > 0:
depth_image = self._add_random_polygons(
depth_image,
self._cfg.depth_random_polygons_nb,
self._cfg.depth_random_polygon_size,
)
# transform depth image
depth_image = self.transform(depth_image).type(torch.float32)
if self.pair_augment[idx]:
depth_image = self.flip_transform.forward(depth_image)
return depth_image
def _load_sem_rgb_img(self, idx) -> torch.Tensor:
image = Image.open(self.sem_rgb_filename[idx])
if self._cfg.real_world_data:
image = np.array(image.transpose(PIL.Image.ROTATE_180))
else:
image = np.array(image)
# normalize image
if self.pixel_mean is not None and self.pixel_std is not None:
image = (image - self.pixel_mean) / self.pixel_std
# add noise to semantic image
if self._cfg.sem_rgb_black_img:
if random.randint(0, 99) < self._cfg.sem_rgb_black_img * 100:
image = np.zeros_like(image)
if self._cfg.sem_rgb_pepper:
image = random_noise(
image,
mode="pepper",
amount=self._cfg.depth_salt_pepper,
clip=False,
)
if self._cfg.sem_rgb_random_polygons_nb and self._cfg.sem_rgb_random_polygons_nb > 0:
image = self._add_random_polygons(
image,
self._cfg.sem_rgb_random_polygons_nb,
self._cfg.sem_rgb_random_polygon_size,
)
# transform semantic image
image = self.transform(image).type(torch.float32)
assert image.round(decimals=1).max() <= 1.0, (
f"Image '{self.sem_rgb_filename[idx]}' is not normalized with max" f" value {image.max().item()}"
)
if self.pair_augment[idx]:
image = self.flip_transform.forward(image)
return image
"""Get image in training"""
def __len__(self):
return len(self.depth_filename)
def __getitem__(self, idx):
"""
Get batch items
Returns:
- depth_image: depth image
- sem_rgb_image: semantic image
- odom: odometry of the start pose (point and rotation)
- goal: goal point in the camera frame
- pair_augment: bool if the pair is augmented (flipped at the y-axis of the image)
"""
# get depth image
if self.load_ram:
depth_image = self.depth_imgs[idx]
if self.semantics or self.rgb:
sem_rgb_image = self.sem_imgs[idx]
else:
sem_rgb_image = 0
else:
depth_image = self._load_depth_img(idx)
if self.semantics or self.rgb:
sem_rgb_image = self._load_sem_rgb_img(idx)
else:
sem_rgb_image = 0
return (
depth_image,
sem_rgb_image,
self.odom[idx],
self.goal[idx],
self.pair_augment[idx],
)
class DistanceSchemeIdx:
def __init__(self, distance: float) -> None:
self.distance: float = distance
self.odom_list: List[pp.LieTensor] = []
self.goal_list: List[pp.LieTensor] = []
self.pair_within_fov: List[bool] = []
self.pair_front_of_robot: List[bool] = []
self.pair_behind_robot: List[bool] = []
self.depth_img_list: List[str] = []
self.sem_rgb_img_list: List[str] = []
# flags
self.has_data: bool = False
return
def update_buffers(
self,
odom: pp.LieTensor,
goal: pp.LieTensor,
within_fov: bool = False,
front_of_robot: bool = False,
behind_robot: bool = False,
depth_filename: str = None,
sem_rgb_filename: str = None,
) -> None:
self.odom_list.append(odom)
self.goal_list.append(goal)
self.pair_within_fov.append(within_fov)
self.pair_front_of_robot.append(front_of_robot)
self.pair_behind_robot.append(behind_robot)
self.depth_img_list.append(depth_filename)
self.sem_rgb_img_list.append(sem_rgb_filename)
self.has_data = len(self.odom_list) > 0
return
def get_data(
self,
nb_fov: int,
nb_front: int,
nb_back: int,
augment: bool = True,
) -> Tuple[List[pp.LieTensor], List[pp.LieTensor], List[str], List[str], np.ndarray,]:
assert self.has_data, f"DistanceSchemeIdx for distance {self.distance} has no data"
# get all pairs that are within the fov
idx_fov = np.where(self.pair_within_fov)[0]
idx_front = np.where(self.pair_front_of_robot)[0]
idx_back = np.where(self.pair_behind_robot)[0]
idx_augment = []
# augment pairs if not enough
if len(idx_fov) == 0:
print(f"[WARNING] for distance {self.distance} no 'within_fov'" " samples")
idx_fov = np.array([], dtype=np.int64)
elif len(idx_fov) < nb_fov:
print(
f"[INFO] for distance {self.distance} not enough 'within_fov'"
f" samples ({len(idx_fov)} instead of {nb_fov})"
)
if augment:
idx_augment.append(
np.random.choice(
idx_fov,
min(len(idx_fov), nb_fov - len(idx_fov)),
replace=(nb_fov - len(idx_fov) > len(idx_fov)),
)
)
else:
idx_fov = np.random.choice(idx_fov, len(idx_fov), replace=False)
else:
idx_fov = np.random.choice(idx_fov, nb_fov, replace=False)
if len(idx_front) == 0:
print(f"[WARNING] for distance {self.distance} no 'front_of_robot'" " samples")
idx_front = np.array([], dtype=np.int64)
elif len(idx_front) < nb_front:
print(
f"[INFO] for distance {self.distance} not enough"
f" 'front_of_robot' samples ({len(idx_front)} instead of"
f" {nb_front})"
)
if augment:
idx_augment.append(
np.random.choice(
idx_front,
min(len(idx_front), nb_front - len(idx_front)),
replace=(nb_front - len(idx_front) > len(idx_front)),
)
)
else:
idx_front = np.random.choice(idx_front, len(idx_front), replace=False)
else:
idx_front = np.random.choice(idx_front, nb_front, replace=False)
if len(idx_back) == 0:
print(f"[WARNING] for distance {self.distance} no 'behind_robot'" " samples")
idx_back = np.array([], dtype=np.int64)
elif len(idx_back) < nb_back:
print(
f"[INFO] for distance {self.distance} not enough"
f" 'behind_robot' samples ({len(idx_back)} instead of"
f" {nb_back})"
)
if augment:
idx_augment.append(
np.random.choice(
idx_back,
min(len(idx_back), nb_back - len(idx_back)),
replace=(nb_back - len(idx_back) > len(idx_back)),
)
)
else:
idx_back = np.random.choice(idx_back, len(idx_back), replace=False)
else:
idx_back = np.random.choice(idx_back, nb_back, replace=False)
idx = np.hstack([idx_fov, idx_front, idx_back])
# stack buffers
odom = torch.stack(self.odom_list)
goal = torch.stack(self.goal_list)
# get pairs
if idx_augment:
idx_augment = np.hstack(idx_augment)
odom = torch.vstack([odom[idx], odom[idx_augment]])
goal = torch.vstack(
[
goal[idx],
goal[idx_augment].tensor() * torch.tensor([[1, -1, 1, 1, 1, 1, 1]]),
]
)
depth_img_list = [self.depth_img_list[j] for j in idx.tolist()] + [
self.depth_img_list[i] for i in idx_augment.tolist()
]
sem_rgb_img_list = [self.sem_rgb_img_list[j] for j in idx.tolist()] + [
self.sem_rgb_img_list[i] for i in idx_augment.tolist()
]
augment = np.hstack([np.zeros(len(idx)), np.ones(len(idx_augment))])
return odom, goal, depth_img_list, sem_rgb_img_list, augment
else:
return (
odom[idx],
goal[idx],
[self.depth_img_list[j] for j in idx.tolist()],
[self.sem_rgb_img_list[j] for j in idx.tolist()],
np.zeros(len(idx)),
)
class PlannerDataGenerator(Dataset):
debug = False
mesh_size = 0.5
def __init__(
self,
cfg: DataCfg,
root: str,
semantics: bool = False,
rgb: bool = False,
cost_map: CostMapPCD = None,
) -> None:
print(
f"[INFO] PlannerDataGenerator init with semantics={semantics},"
f" rgb={rgb} for ENV {os.path.split(root)[-1]}"
)
# super().__init__()
# set parameters
self._cfg = cfg
self.root = root
self.cost_map = cost_map
self.semantics = semantics
self.rgb = rgb
assert not (self.semantics and self.rgb), "semantics and rgb cannot be true at the same time"
# init list for final odom, goal and img mapping
self.depth_filename_list = []
self.sem_rgb_filename_list = []
self.odom_depth: torch.Tensor = None
self.goal: torch.Tensor = None
self.pair_outside: np.ndarray = None
self.pair_difficult: np.ndarray = None
self.pair_augment: np.ndarray = None
self.pair_within_fov: np.ndarray = None
self.pair_front_of_robot: np.ndarray = None
self.odom_array_sem_rgb: pp.LieTensor = None
self.odom_array_depth: pp.LieTensor = None
self.odom_used: int = 0
self.odom_no_suitable_goals: int = 0
# set parameters
self._device = "cuda:0" if torch.cuda.is_available() else "cpu"
# get odom data and filter
self.load_odom()
self.filter_obs_inflation()
# noise edges in depth image --> real world Realsense difficulties along edges
if self._cfg.noise_edges:
self.noise_edges()
# find odom-goal pairs
self.get_odom_goal_pairs()
return
"""LOAD HELPER FUNCTIONS"""
def load_odom(self) -> None:
print("[INFO] Loading odom data...", end=" ")
# load odom of every image
odom_path = os.path.join(self.root, f"camera_extrinsic{self._cfg.depth_suffix}.txt")
odom_np = np.loadtxt(odom_path, delimiter=",")
self.odom_array_depth = pp.SE3(odom_np)
if self.semantics or self.rgb:
odom_path = os.path.join(self.root, f"camera_extrinsic{self._cfg.sem_suffix}.txt")
odom_np = np.loadtxt(odom_path, delimiter=",")
self.odom_array_sem_rgb = pp.SE3(odom_np)
if self.debug:
# plot odom
small_sphere = o3d.geometry.TriangleMesh.create_sphere(self.mesh_size / 3.0) # successful trajectory points
small_sphere.paint_uniform_color([0.4, 1.0, 0.1])
odom_vis_list = []
for i in range(len(self.odom_array_depth)):
odom_vis_list.append(
copy.deepcopy(small_sphere).translate(
(
self.odom_array_depth[i, 0],
self.odom_array_depth[i, 1],
self.odom_array_depth[i, 2],
)
)
)
odom_vis_list.append(self.cost_map.pcd_tsdf)
o3d.visualization.draw_geometries(odom_vis_list)
print("DONE!")
return
def load_images(self, root_path, domain: str = "depth"):
img_path = os.path.join(root_path, domain)
assert os.path.isdir(img_path), f"Image directory path '{img_path}' does not exist for domain" f" {domain}"
assert len(os.listdir(img_path)) > 0, f"Image directory '{img_path}' is empty for domain {domain}"
# use the more precise npy files if available
img_filename_list = [str(s) for s in Path(img_path).rglob("*.npy")]
if len(img_filename_list) == 0:
img_filename_list = [str(s) for s in Path(img_path).rglob("*.png")]
if domain == "depth":
img_filename_list.sort(key=lambda x: int(x.split("/")[-1][: -(4 + len(self._cfg.depth_suffix))]))
else:
img_filename_list.sort(key=lambda x: int(x.split("/")[-1][: -(4 + len(self._cfg.sem_suffix))]))
return img_filename_list
"""FILTER HELPER FUNCTIONS"""
def filter_obs_inflation(self) -> None:
"""
Filter odom points within the inflation range of the obstacles in the cost map.
Filtering only performed according to the position of the depth camera, due to the close position of depth and semantic camera.
"""
print(
("[INFO] Filter odom points within the inflation range of the" " obstacles in the cost map..."),
end="",
)
norm_inds, _ = self.cost_map.Pos2Ind(self.odom_array_depth[:, None, :3])
cost_grid = self.cost_map.cost_array.T.expand(self.odom_array_depth.shape[0], 1, -1, -1)
norm_inds = norm_inds.to(cost_grid.device)
oloss_M = (
F.grid_sample(
cost_grid,
norm_inds[:, None, :, :],
mode="bicubic",
padding_mode="border",
align_corners=False,
)
.squeeze(1)
.squeeze(1)
)
oloss_M = oloss_M.to(torch.float32).to("cpu")
if self.semantics or self.rgb:
points_free_space = oloss_M < self._cfg.obs_cost_height + abs(
self.cost_map.cfg.sem_cost_map.negative_reward
)
else:
points_free_space = oloss_M < self._cfg.obs_cost_height
if self._cfg.carla:
# for CARLA filter large open spaces
# Extract the x and y coordinates from the odom poses
x_coords = self.odom_array_depth.tensor()[:, 0]
y_coords = self.odom_array_depth.tensor()[:, 1]
# Filter the point cloud based on the square coordinates
mask_area_1 = (y_coords >= 100.5) & (y_coords <= 325.5) & (x_coords >= 208.9) & (x_coords <= 317.8)
mask_area_2 = (y_coords >= 12.7) & (y_coords <= 80.6) & (x_coords >= 190.3) & (x_coords <= 315.8)
mask_area_3 = (y_coords >= 10.0) & (y_coords <= 80.0) & (x_coords >= 123.56) & (x_coords <= 139.37)
combined_mask = mask_area_1 | mask_area_2 | mask_area_3 | ~points_free_space.squeeze(1)
points_free_space = (~combined_mask).unsqueeze(1)
if self.debug:
# plot odom
odom_vis_list = []
small_sphere = o3d.geometry.TriangleMesh.create_sphere(self.mesh_size / 3.0) # successful trajectory points
for i in range(len(self.odom_array_depth)):
if round(oloss_M[i].item(), 3) == 0.0:
small_sphere.paint_uniform_color([0.4, 0.1, 1.0]) # violette
elif points_free_space[i]:
small_sphere.paint_uniform_color([0.4, 1.0, 0.1]) # green
else:
small_sphere.paint_uniform_color([1.0, 0.4, 0.1]) # red
if self.semantics or self.rgb:
z_height = self.odom_array_depth.tensor()[i, 2] + abs(
self.cost_map.cfg.sem_cost_map.negative_reward
)
else:
z_height = self.odom_array_depth.tensor()[i, 2]
odom_vis_list.append(
copy.deepcopy(small_sphere).translate(
(
self.odom_array_depth.tensor()[i, 0],
self.odom_array_depth.tensor()[i, 1],
z_height,
)
)
)
odom_vis_list.append(self.cost_map.pcd_tsdf)
o3d.visualization.draw_geometries(odom_vis_list)
nb_odom_point_prev = len(self.odom_array_depth)
self.odom_array_depth = self.odom_array_depth[points_free_space.squeeze()]
self.nb_odom_points = self.odom_array_depth.shape[0]
# load depth image files as name list
depth_filename_list = self.load_images(self.root, "depth")
self.depth_filename_list = [
depth_filename_list[i] for i in range(len(depth_filename_list)) if points_free_space[i]
]
if self.semantics:
self.odom_array_sem_rgb = self.odom_array_sem_rgb[points_free_space.squeeze()]
sem_rgb_filename_list = self.load_images(self.root, "semantics")
self.sem_rgb_filename_list = [
sem_rgb_filename_list[i] for i in range(len(sem_rgb_filename_list)) if points_free_space[i]
]
elif self.rgb:
self.odom_array_sem_rgb = self.odom_array_sem_rgb[points_free_space.squeeze()]
sem_rgb_filename_list = self.load_images(self.root, "rgb")
self.sem_rgb_filename_list = [
sem_rgb_filename_list[i] for i in range(len(sem_rgb_filename_list)) if points_free_space[i]
]
assert len(self.depth_filename_list) != 0, "No depth images left after filtering"
print("DONE!")
print(
"[INFO] odom points outside obs inflation :"
f" \t{self.nb_odom_points} ({round(self.nb_odom_points/nb_odom_point_prev*100, 2)} %)"
)
return
"""GENERATE SAMPLES"""
def get_odom_goal_pairs(self) -> None:
# get fov
self.get_intrinscs_and_fov()
# construct graph
self.get_graph()
# get pairs
self.get_pairs()
# free up memory
self.odom_array_depth = self.odom_array_sem_rgb = None
return
def compute_ratios(self) -> Tuple[float, float, float]:
# ratio of general samples distribution
num_within_fov = self.odom_depth[self.pair_within_fov].shape[0]
ratio_fov = num_within_fov / self.odom_depth.shape[0]
ratio_front = np.sum(self.pair_front_of_robot) / self.odom_depth.shape[0]
ratio_back = 1 - ratio_front - ratio_fov
# samples ratios within fov samples
num_easy = (
num_within_fov
- self.pair_difficult[self.pair_within_fov].sum().item()
- self.pair_outside[self.pair_within_fov].sum().item()
)
ratio_easy = num_easy / num_within_fov
ratio_hard = self.pair_difficult[self.pair_within_fov].sum().item() / num_within_fov
ratio_outside = self.pair_outside[self.pair_within_fov].sum().item() / num_within_fov
return (
ratio_fov,
ratio_front,
ratio_back,
ratio_easy,
ratio_hard,
ratio_outside,
)
def get_intrinscs_and_fov(self) -> None:
# load intrinsics
intrinsic_path = os.path.join(self.root, "intrinsics.txt")
P = np.loadtxt(intrinsic_path, delimiter=",") # assumes ROS P matrix
self.K_depth = P[0].reshape(3, 4)[:3, :3]
self.K_sem_rgb = P[1].reshape(3, 4)[:3, :3]
self.alpha_fov = 2 * math.atan(self.K_depth[0, 0] / self.K_depth[0, 2])
return
def get_graph(self) -> None:
num_connections = 3
num_intermediate = 3
# get occpuancy map from tsdf map
cost_array = self.cost_map.tsdf_array.cpu().numpy()
if self.semantics or self.rgb:
occupancy_map = (
cost_array > self._cfg.obs_cost_height + abs(self.cost_map.cfg.sem_cost_map.negative_reward)
).astype(np.uint8)
else:
occupancy_map = (cost_array > self._cfg.obs_cost_height).astype(np.uint8)
# construct kdtree to find nearest neighbors of points
odom_points = self.odom_array_depth.data[:, :2].data.cpu().numpy()
kdtree = KDTree(odom_points)
_, nearest_neighbors_idx = kdtree.query(odom_points, k=num_connections + 1, workers=-1)
# remove first neighbor as it is the point itself
nearest_neighbors_idx = nearest_neighbors_idx[:, 1:]
# define origin and neighbor points
origin_point = np.repeat(odom_points, repeats=num_connections, axis=0)
neighbor_points = odom_points[nearest_neighbors_idx, :].reshape(-1, 2)
# interpolate points between origin and neighbor points
x_interp = (
origin_point[:, None, 0]
+ (neighbor_points[:, 0] - origin_point[:, 0])[:, None]
* np.linspace(0, 1, num=num_intermediate + 1, endpoint=False)[1:]
)
y_interp = (
origin_point[:, None, 1]
+ (neighbor_points[:, 1] - origin_point[:, 1])[:, None]
* np.linspace(0, 1, num=num_intermediate + 1, endpoint=False)[1:]
)
inter_points = np.stack((x_interp.reshape(-1), y_interp.reshape(-1)), axis=1)
# get the indices of the interpolated points in the occupancy map
occupancy_idx = (
inter_points - np.array([self.cost_map.cfg.x_start, self.cost_map.cfg.y_start])
) / self.cost_map.cfg.general.resolution
# check occupancy for collisions at the interpolated points
collision = occupancy_map[
occupancy_idx[:, 0].astype(np.int64),
occupancy_idx[:, 1].astype(np.int64),
]
collision = np.any(collision.reshape(-1, num_intermediate), axis=1)
# get edge indices
idx_edge_start = np.repeat(np.arange(odom_points.shape[0]), repeats=num_connections, axis=0)
idx_edge_end = nearest_neighbors_idx.reshape(-1)
# filter collision edges
idx_edge_end = idx_edge_end[~collision]
idx_edge_start = idx_edge_start[~collision]
# init graph
self.graph = nx.Graph()
# add nodes with position attributes
self.graph.add_nodes_from(list(range(odom_points.shape[0])))
pos_attr = {i: {"pos": odom_points[i]} for i in range(odom_points.shape[0])}
nx.set_node_attributes(self.graph, pos_attr)
# add edges with distance attributes
self.graph.add_edges_from(list(map(tuple, np.stack((idx_edge_start, idx_edge_end), axis=1))))
distance_attr = {
(i, j): {"distance": np.linalg.norm(odom_points[i] - odom_points[j])}
for i, j in zip(idx_edge_start, idx_edge_end)
}
nx.set_edge_attributes(self.graph, distance_attr)
# DEBUG
if self.debug:
import matplotlib.pyplot as plt
nx.draw_networkx(
self.graph,
nx.get_node_attributes(self.graph, "pos"),
node_size=10,
with_labels=False,
node_color=[0.0, 1.0, 0.0],
)
plt.show()
return
def get_pairs(self):
# iterate over all odom points and find goal points
self.odom_no_suitable_goals = 0
self.odom_used = 0
# init semantic warp parameters
if self.semantics or self.rgb:
# compute pixel tensor
depth_filename = self.depth_filename_list[0]
depth_img = self._load_depth_image(depth_filename)
x_nums, y_nums = depth_img.shape
self.pix_depth_cam_frame = self.compute_pixel_tensor(x_nums, y_nums, self.K_depth)
# make dir
os.makedirs(os.path.join(self.root, "img_warp"), exist_ok=True)
# get distances between odom and goal points
odom_goal_distances = dict(
nx.all_pairs_dijkstra_path_length(
self.graph,
cutoff=self._cfg.max_goal_distance,
weight="distance",
)
)
# init dataclass for each entry in the distance scheme
self.category_scheme_pairs: Dict[float, DistanceSchemeIdx] = {
distance: DistanceSchemeIdx(distance=distance) for distance in self._cfg.distance_scheme.keys()
}
# iterate over all odom points
for odom_idx in tqdm(range(self.nb_odom_points), desc="Start-End Pairs Generation"):
odom = self.odom_array_depth[odom_idx]
# transform all odom points to current odom frame
goals = pp.Inv(odom) @ self.odom_array_depth
# categorize goals
(
within_fov,
front_of_robot,
behind_robot,
) = self.get_goal_categories(
goals
) # returns goals in odom frame
# filter odom if no suitable goals within the fov are found
if within_fov.sum() == 0:
self.odom_no_suitable_goals += 1
continue
self.odom_used += 1
if self.semantics or self.rgb:
# semantic warp
img_new_path = self._get_overlay_img(odom_idx)
else:
img_new_path = None
# get pair according to distance scheme for each category
self.reduce_pairs(
odom_idx,
goals,
within_fov,
odom_goal_distances[odom_idx],
img_new_path,
within_fov=True,
)
self.reduce_pairs(
odom_idx,
goals,
behind_robot,
odom_goal_distances[odom_idx],
img_new_path,
behind_robot=True,
)
self.reduce_pairs(
odom_idx,
goals,
front_of_robot,
odom_goal_distances[odom_idx],
img_new_path,
front_of_robot=True,
)
# DEBUG
if self.debug:
# plot odom
small_sphere = o3d.geometry.TriangleMesh.create_sphere(
self.mesh_size / 3.0
) # successful trajectory points
odom_vis_list = []
goal_odom = odom @ goals
hit_pcd = (goal_odom).cpu().numpy()[:, :3]
for idx, pts in enumerate(hit_pcd):
if within_fov[idx]:
small_sphere.paint_uniform_color([0.4, 1.0, 0.1])
elif front_of_robot[idx]:
small_sphere.paint_uniform_color([0.0, 0.5, 0.5])
else:
small_sphere.paint_uniform_color([0.0, 0.1, 1.0])
odom_vis_list.append(copy.deepcopy(small_sphere).translate((pts[0], pts[1], pts[2])))
# viz cost map
odom_vis_list.append(self.cost_map.pcd_tsdf)
# field of view visualization
fov_vis_length = 0.75 # length of the fov visualization plane in meters
fov_vis_pt_right = odom @ pp.SE3(
[
fov_vis_length * np.cos(self.alpha_fov / 2),
fov_vis_length * np.sin(self.alpha_fov / 2),
0,
0,
0,
0,
1,
]
)
fov_vis_pt_left = odom @ pp.SE3(
[
fov_vis_length * np.cos(self.alpha_fov / 2),
-fov_vis_length * np.sin(self.alpha_fov / 2),
0,
0,
0,
0,
1,
]
)
fov_vis_pt_right = fov_vis_pt_right.numpy()[:3]
fov_vis_pt_left = fov_vis_pt_left.numpy()[:3]
fov_mesh = o3d.geometry.TriangleMesh(
vertices=o3d.utility.Vector3dVector(
np.array(
[
odom.data.cpu().numpy()[:3],
fov_vis_pt_right,
fov_vis_pt_left,
]
)
),
triangles=o3d.utility.Vector3iVector(np.array([[2, 1, 0]])),
)
fov_mesh.paint_uniform_color([1.0, 0.5, 0.0])
odom_vis_list.append(fov_mesh)
# odom viz
small_sphere.paint_uniform_color([1.0, 0.0, 0.0])
odom_vis_list.append(
copy.deepcopy(small_sphere).translate(
(
odom.data[0].item(),
odom.data[1].item(),
odom.data[2].item(),
)
)
)
# plot goal
o3d.visualization.draw_geometries(odom_vis_list)
if self.debug:
small_sphere = o3d.geometry.TriangleMesh.create_sphere(self.mesh_size / 3.0) # successful trajectory points
odom_vis_list = []
for distance in self._cfg.distance_scheme.keys():
odoms = torch.vstack(self.category_scheme_pairs[distance].odom_list)
odoms = odoms.tensor().cpu().numpy()[:, :3]
for idx, odom in enumerate(odoms):
odom_vis_list.append(copy.deepcopy(small_sphere).translate((odom[0], odom[1], odom[2])))
if idx > 10:
break
# viz cost map
odom_vis_list.append(self.cost_map.pcd_tsdf)
# plot goal
o3d.visualization.draw_geometries(odom_vis_list)
return
def reduce_pairs(
self,
odom_idx: int,
goals: pp.LieTensor,
decision_tensor: torch.Tensor,
odom_distances: dict,
warp_img_path: Optional[str],
within_fov: bool = False,
behind_robot: bool = False,
front_of_robot: bool = False,
):
# remove all goals depending on the decision tensor from the odom_distances dict
keep_distance_entries = decision_tensor[list(odom_distances.keys())]
distances = np.array(list(odom_distances.values()))[keep_distance_entries.numpy()]
goal_idx = np.array(list(odom_distances.keys()))[keep_distance_entries.numpy()]
# max distance enforced odom_distances, here enforce min distance
within_distance_idx = distances > self._cfg.min_goal_distance
goal_idx = goal_idx[within_distance_idx]
distances = distances[within_distance_idx]
# check if there are any goals left
if len(goal_idx) == 0:
return
# select the goal according to the distance_scheme
for distance in self._cfg.distance_scheme.keys():
# select nbr_samples from goals within distance
within_curr_distance_idx = distances < distance
if sum(within_curr_distance_idx) == 0:
continue
selected_idx = np.random.choice(
goal_idx[within_curr_distance_idx],
min(3, sum(within_curr_distance_idx)),
replace=False,
)
# remove the selected goals from the list for further selection
distances = distances[~within_curr_distance_idx]
goal_idx = goal_idx[~within_curr_distance_idx]
for idx in selected_idx:
self.category_scheme_pairs[distance].update_buffers(
odom=self.odom_array_depth[odom_idx],
goal=goals[idx],
within_fov=within_fov,
front_of_robot=front_of_robot,
behind_robot=behind_robot,
depth_filename=self.depth_filename_list[odom_idx],
sem_rgb_filename=warp_img_path,
)
def get_goal_categories(self, goal_odom_frame: pp.LieTensor):
"""
Decide which of the samples are within the fov, in front of the robot or behind the robot.
"""
# get if odom-goal is within fov or outside the fov but still in front of the robot
goal_angle = abs(torch.atan2(goal_odom_frame.data[:, 1], goal_odom_frame.data[:, 0]))
within_fov = goal_angle < self.alpha_fov / 2 * self._cfg.fov_scale
front_of_robot = goal_angle < torch.pi / 2
front_of_robot[within_fov] = False
behind_robot = ~front_of_robot.clone()
behind_robot[within_fov] = False
return within_fov, front_of_robot, behind_robot
"""SPLIT HELPER FUNCTIONS"""
def split_samples(
self,
test_dataset: PlannerData,
train_dataset: Optional[PlannerData] = None,
generate_split: bool = False,
ratio_fov_samples: Optional[float] = None,
ratio_front_samples: Optional[float] = None,
ratio_back_samples: Optional[float] = None,
allow_augmentation: bool = True,
) -> None:
# check if ratios are given or defaults are used
ratio_fov_samples = ratio_fov_samples if ratio_fov_samples is not None else self._cfg.ratio_fov_samples
ratio_front_samples = ratio_front_samples if ratio_front_samples is not None else self._cfg.ratio_front_samples
ratio_back_samples = ratio_back_samples if ratio_back_samples is not None else self._cfg.ratio_back_samples
assert round(ratio_fov_samples + ratio_front_samples + ratio_back_samples, 2) == 1.0, (
"Sample ratios must sum up to 1.0, currently"
f" {ratio_back_samples + ratio_front_samples + ratio_fov_samples}"
)
# max sample number
if self._cfg.max_train_pairs:
max_sample_number = min(
int(self._cfg.max_train_pairs / self._cfg.ratio),
int(self.odom_used * self._cfg.pairs_per_image),
)
else:
max_sample_number = int(self.odom_used * self._cfg.pairs_per_image)
# init buffers
odom = torch.zeros((max_sample_number, 7), dtype=torch.float32)
goal = torch.zeros((max_sample_number, 7), dtype=torch.float32)
augment_samples = np.zeros((max_sample_number), dtype=bool)
depth_filename = []
sem_rgb_filename = []
current_idx = 0
for distance, distance_percentage in self._cfg.distance_scheme.items():
if not self.category_scheme_pairs[distance].has_data:
print(f"[WARN] No samples for distance {distance} in ENV" f" {os.path.split(self.root)[-1]}")
continue
# get number of samples
buffer_data = self.category_scheme_pairs[distance].get_data(
nb_fov=int(ratio_fov_samples * distance_percentage * max_sample_number),
nb_front=int(ratio_front_samples * distance_percentage * max_sample_number),
nb_back=int(ratio_back_samples * distance_percentage * max_sample_number),
augment=allow_augmentation,
)
nb_samples = buffer_data[0].shape[0]
# add to buffers
odom[current_idx : current_idx + nb_samples] = buffer_data[0]
goal[current_idx : current_idx + nb_samples] = buffer_data[1]
depth_filename += buffer_data[2]
sem_rgb_filename += buffer_data[3]
augment_samples[current_idx : current_idx + nb_samples] = buffer_data[4]
current_idx += nb_samples
# cut off unused space
odom = odom[:current_idx]
goal = goal[:current_idx]
augment_samples = augment_samples[:current_idx]
# print data mix
print(
f"[INFO] datamix containing {odom.shape[0]} suitable odom-goal"
" pairs: \n"
"\t fov :"
f" \t{int(odom.shape[0] * ratio_fov_samples) } ({round(ratio_fov_samples*100, 2)} %) \n"
"\t front of robot :"
f" \t{int(odom.shape[0] * ratio_front_samples)} ({round(ratio_front_samples*100, 2)} %) \n"
"\t back of robot :"
f" \t{int(odom.shape[0] * ratio_back_samples) } ({round(ratio_back_samples*100, 2)} %) \n"
"from"
f" {self.odom_used} ({round(self.odom_used/self.nb_odom_points*100, 2)} %)"
" different starting points where \n"
"\t non-suitable filter:"
f" {self.odom_no_suitable_goals} ({round(self.odom_no_suitable_goals/self.nb_odom_points*100, 2)} %)"
)
# generate split
idx = np.arange(odom.shape[0])
if generate_split:
train_index = sample(idx.tolist(), int(len(idx) * self._cfg.ratio))
idx = np.delete(idx, train_index)
train_dataset.update_buffers(
depth_filename=[depth_filename[i] for i in train_index],
sem_rgb_filename=([sem_rgb_filename[i] for i in train_index] if (self.semantics or self.rgb) else None),
odom=odom[train_index],
goal=goal[train_index],
pair_augment=augment_samples[train_index],
)
train_dataset.set_fov(self.alpha_fov)
test_dataset.update_buffers(
depth_filename=[depth_filename[i] for i in idx],
sem_rgb_filename=([sem_rgb_filename[i] for i in idx] if (self.semantics or self.rgb) else None),
odom=odom[idx],
goal=goal[idx],
pair_augment=augment_samples[idx],
)
test_dataset.set_fov(self.alpha_fov)
return
""" Warp semantic on depth image helper functions"""
@staticmethod
def compute_pixel_tensor(x_nums: int, y_nums: int, K_depth: np.ndarray) -> None:
# get image plane mesh grid
pix_u = np.arange(0, y_nums)
pix_v = np.arange(0, x_nums)
grid = np.meshgrid(pix_u, pix_v)
pixels = np.vstack(list(map(np.ravel, grid))).T
pixels = np.hstack([pixels, np.ones((len(pixels), 1))]) # add ones for 3D coordinates
# transform to camera frame
k_inv = np.linalg.inv(K_depth)
pix_cam_frame = np.matmul(k_inv, pixels.T)
# reorder to be in "robotics" axis order (x forward, y left, z up)
return pix_cam_frame[[2, 0, 1], :].T * np.array([1, -1, -1])
def _load_depth_image(self, depth_filename):
if depth_filename.endswith(".png"):
depth_image = Image.open(depth_filename)
if self._cfg.real_world_data:
depth_image = np.array(depth_image.transpose(PIL.Image.ROTATE_180))
else:
depth_image = np.array(depth_image)
else:
depth_image = np.load(depth_filename)
depth_image[~np.isfinite(depth_image)] = 0.0
depth_image = (depth_image / self._cfg.depth_scale).astype("float32")
depth_image[depth_image > self._cfg.max_depth] = 0.0
return depth_image
@staticmethod
def compute_overlay(
pose_dep,
pose_sem,
depth_img,
sem_rgb_image,
pix_depth_cam_frame,
K_sem_rgb,
):
# get 3D points of depth image
rot = tf.Rotation.from_quat(pose_dep[3:]).as_matrix()
dep_im_reshaped = depth_img.reshape(
-1, 1
) # flip s.t. start in lower left corner of image as (0,0) -> has to fit to the pixel tensor
points = dep_im_reshaped * (rot @ pix_depth_cam_frame.T).T + pose_dep[:3]
# transform points to semantic camera frame
points_sem_cam_frame = (tf.Rotation.from_quat(pose_sem[3:]).as_matrix().T @ (points - pose_sem[:3]).T).T
# normalize points
points_sem_cam_frame_norm = points_sem_cam_frame / points_sem_cam_frame[:, 0][:, np.newaxis]
# reorder points be camera convention (z-forward)
points_sem_cam_frame_norm = points_sem_cam_frame_norm[:, [1, 2, 0]] * np.array([-1, -1, 1])
# transform points to pixel coordinates
pixels = (K_sem_rgb @ points_sem_cam_frame_norm.T).T
# filter points outside of image
filter_idx = (
(pixels[:, 0] >= 0)
& (pixels[:, 0] < sem_rgb_image.shape[1])
& (pixels[:, 1] >= 0)
& (pixels[:, 1] < sem_rgb_image.shape[0])
)
# get semantic annotation
sem_annotation = np.zeros((pixels.shape[0], 3), dtype=np.uint8)
sem_annotation[filter_idx] = sem_rgb_image[
pixels[filter_idx, 1].astype(int),
pixels[filter_idx, 0].astype(int),
]
# reshape to image
return sem_annotation.reshape(depth_img.shape[0], depth_img.shape[1], 3)
def _get_overlay_img(self, odom_idx):
# get corresponding filenames
depth_filename = self.depth_filename_list[odom_idx]
sem_rgb_filename = self.sem_rgb_filename_list[odom_idx]
# load semantic and depth image and get their poses
depth_img = self._load_depth_image(depth_filename)
sem_rgb_image = Image.open(sem_rgb_filename)
if self._cfg.real_world_data:
sem_rgb_image = np.array(sem_rgb_image.transpose(PIL.Image.ROTATE_180))
else:
sem_rgb_image = np.array(sem_rgb_image)
pose_dep = self.odom_array_depth[odom_idx].data.cpu().numpy()
pose_sem = self.odom_array_sem_rgb[odom_idx].data.cpu().numpy()
sem_rgb_image_warped = self.compute_overlay(
pose_dep,
pose_sem,
depth_img,
sem_rgb_image,
self.pix_depth_cam_frame,
self.K_sem_rgb,
)
assert sem_rgb_image_warped.dtype == np.uint8, "sem_rgb_image_warped has to be uint8"
# DEBUG
if self.debug:
import matplotlib.pyplot as plt
f, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(15, 5))
ax1.imshow(depth_img)
ax2.imshow(sem_rgb_image_warped / 255)
ax3.imshow(sem_rgb_image)
# ax3.imshow(depth_img)
# ax3.imshow(sem_rgb_image_warped / 255, alpha=0.5)
ax1.axis("off")
ax2.axis("off")
ax3.axis("off")
plt.show()
# save semantic image under the new path
sem_rgb_filename = os.path.split(sem_rgb_filename)[1]
sem_rgb_image_path = os.path.join(self.root, "img_warp", sem_rgb_filename)
sem_rgb_image_warped = cv2.cvtColor(sem_rgb_image_warped, cv2.COLOR_RGB2BGR) # convert to BGR for cv2
assert cv2.imwrite(sem_rgb_image_path, sem_rgb_image_warped)
return sem_rgb_image_path
"""Noise Edges helper functions"""
def noise_edges(self):
"""
Along the edges in the depth image, set the values to 0.
Mimics the real-world behavior where RealSense depth cameras have difficulties along edges.
"""
print("[INFO] Adding noise to edges in depth images ...", end=" ")
new_depth_filename_list = []
# create new directory
depth_noise_edge_dir = os.path.join(self.root, "depth_noise_edges")
os.makedirs(depth_noise_edge_dir, exist_ok=True)
for depth_filename in self.depth_filename_list:
depth_img = self._load_depth_image(depth_filename)
# Perform Canny edge detection
image = ((depth_img / depth_img.max()) * 255).astype(np.uint8) # convert to CV_U8 format
edges = cv2.Canny(image, self._cfg.edge_threshold, self._cfg.edge_threshold * 3)
# Dilate the edges to extend their space
kernel = np.ones(self._cfg.extend_kernel_size, np.uint8)
dilated_edges = cv2.dilate(edges, kernel, iterations=1)
# Erode the edges to refine their shape
eroded_edges = cv2.erode(dilated_edges, kernel, iterations=1)
# modify depth image
depth_img[eroded_edges == 255] = 0.0
# save depth image
depth_img = (depth_img * self._cfg.depth_scale).astype("uint16")
if depth_filename.endswith(".png"):
assert cv2.imwrite(
os.path.join(depth_noise_edge_dir, os.path.split(depth_filename)[1]),
depth_img,
)
else:
np.save(
os.path.join(depth_noise_edge_dir, os.path.split(depth_filename)[1]),
depth_img,
)
new_depth_filename_list.append(os.path.join(depth_noise_edge_dir, os.path.split(depth_filename)[1]))
self.depth_filename_list = new_depth_filename_list
print("Done!")
return
""" Cleanup Script for files generated by this class"""
def cleanup(self):
print(
("[INFO] Cleaning up for environment" f" {os.path.split(self.root)[1]} ..."),
end=" ",
)
# remove semantic_warp directory
if os.path.isdir(os.path.join(self.root, "img_warp")):
shutil.rmtree(os.path.join(self.root, "img_warp"))
# remove depth_noise_edges directory
if os.path.isdir(os.path.join(self.root, "depth_noise_edges")):
shutil.rmtree(os.path.join(self.root, "depth_noise_edges"))
print("Done!")
return
# EoF
| 51,127 | Python | 38.481081 | 135 | 0.542707 |
leggedrobotics/viplanner/viplanner/utils/torchutil.py | # Copyright (c) 2023-2024, ETH Zurich (Robotics Systems Lab)
# Author: Pascal Roth
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
import torch
import torch.fft
class EarlyStopScheduler(torch.optim.lr_scheduler.ReduceLROnPlateau):
def __init__(
self,
optimizer,
mode="min",
factor=0.1,
patience=10,
verbose=False,
threshold=1e-4,
threshold_mode="rel",
cooldown=0,
min_lr=0,
eps=1e-8,
):
super().__init__(
optimizer=optimizer,
mode=mode,
factor=factor,
patience=patience,
threshold=threshold,
threshold_mode=threshold_mode,
cooldown=cooldown,
min_lr=min_lr,
eps=eps,
verbose=verbose,
)
self.no_decrease = 0
def step(self, metrics, epoch=None):
# convert `metrics` to float, in case it's a zero-dim Tensor
current = float(metrics)
if epoch is None:
epoch = self.last_epoch = self.last_epoch + 1
self.last_epoch = epoch
if self.is_better(current, self.best):
self.best = current
self.num_bad_epochs = 0
else:
self.num_bad_epochs += 1
if self.in_cooldown:
self.cooldown_counter -= 1
self.num_bad_epochs = 0 # ignore any bad epochs in cooldown
if self.num_bad_epochs > self.patience:
self.cooldown_counter = self.cooldown
self.num_bad_epochs = 0
self._reduce_lr(epoch)
def _reduce_lr(self, epoch):
for i, param_group in enumerate(self.optimizer.param_groups):
old_lr = float(param_group["lr"])
new_lr = max(old_lr * self.factor, self.min_lrs[i])
if old_lr - new_lr > self.eps:
param_group["lr"] = new_lr
if self.verbose:
print("Epoch {:5d}: reducing learning rate" " of group {} to {:.4e}.".format(epoch, i, new_lr))
return False
else:
return True
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
| 2,233 | Python | 28.394736 | 115 | 0.543215 |
leggedrobotics/viplanner/viplanner/utils/trainer.py | # Copyright (c) 2023-2024, ETH Zurich (Robotics Systems Lab)
# Author: Pascal Roth
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
import contextlib
# python
import os
from typing import List, Optional, Tuple
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.utils.data as Data
import torchvision.transforms as transforms
import tqdm
import wandb # logging
import yaml
# imperative-planning-learning
from viplanner.config import TrainCfg
from viplanner.plannernet import (
PRE_TRAIN_POSSIBLE,
AutoEncoder,
DualAutoEncoder,
get_m2f_cfg,
)
from viplanner.traj_cost_opt import TrajCost, TrajViz
from viplanner.utils.torchutil import EarlyStopScheduler, count_parameters
from .dataset import PlannerData, PlannerDataGenerator
torch.set_default_dtype(torch.float32)
class Trainer:
"""
VIPlanner Trainer
"""
def __init__(self, cfg: TrainCfg) -> None:
self._cfg = cfg
# set model save/load path
os.makedirs(self._cfg.curr_model_dir, exist_ok=True)
self.model_path = os.path.join(self._cfg.curr_model_dir, "model.pt")
if self._cfg.hierarchical:
self.model_dir_hierarch = os.path.join(self._cfg.curr_model_dir, "hierarchical")
os.makedirs(self.model_dir_hierarch, exist_ok=True)
self.hierach_losses = {}
# image transforms
self.transform = transforms.Compose(
[
transforms.ToTensor(),
transforms.Resize((self._cfg.img_input_size), antialias=True),
]
)
# init buffers DATA
self.data_generators: List[PlannerDataGenerator] = []
self.data_traj_cost: List[TrajCost] = []
self.data_traj_viz: List[TrajViz] = []
self.fov_ratio: float = None
self.front_ratio: float = None
self.back_ratio: float = None
self.pixel_mean: np.ndarray = None
self.pixel_std: np.ndarray = None
# inti buffers MODEL
self.best_loss = float("inf")
self.test_loss = float("inf")
self.net: nn.Module = None
self.optimizer: optim.Optimizer = None
self.scheduler: EarlyStopScheduler = None
print("[INFO] Trainer initialized")
return
"""PUBLIC METHODS"""
def train(self) -> None:
print("[INFO] Start Training")
# init logging
self._init_logging()
# load model and prepare model for training
self._load_model(self._cfg.resume)
self._configure_optimizer()
# get dataloader for training
self._load_data(train=True)
if self._cfg.hierarchical:
step_counter = 0
train_loader_list, val_loader_list = self._get_dataloader(step=step_counter)
else:
train_loader_list, val_loader_list = self._get_dataloader()
try:
wandb.watch(self.net)
except: # noqa: E722
print("[WARNING] Wandb model watch failed")
for epoch in range(self._cfg.epochs):
train_loss = 0
val_loss = 0
for i in range(len(train_loader_list)):
train_loss += self._train_epoch(train_loader_list[i], epoch, env_id=i)
val_loss += self._test_epoch(val_loader_list[i], env_id=i, epoch=epoch)
train_loss /= len(train_loader_list)
val_loss /= len(train_loader_list)
try:
wandb.log(
{
"train_loss": train_loss,
"val_loss": val_loss,
"epoch": epoch,
}
)
except: # noqa: E722
print("[WARNING] Wandb logging failed")
# if val_loss < best_loss:
if val_loss < self.best_loss:
print("[INFO] Save model of epoch %d" % (epoch))
torch.save((self.net.state_dict(), val_loss), self.model_path)
self.best_loss = val_loss
print("[INFO] Current val loss: %.4f" % (self.best_loss))
if self.scheduler.step(val_loss):
print("[INFO] Early Stopping!")
break
if self._cfg.hierarchical and (epoch + 1) % self._cfg.hierarchical_step == 0:
torch.save(
(self.net.state_dict(), self.best_loss),
os.path.join(
self.model_dir_hierarch,
(
f"model_ep{epoch}_fov{round(self.fov_ratio, 3)}_"
f"front{round(self.front_ratio, 3)}_"
f"back{round(self.back_ratio, 3)}.pt"
),
),
)
step_counter += 1
train_loader_list, val_loader_list = self._get_dataloader(step=step_counter)
self.hierach_losses[epoch] = self.best_loss
torch.cuda.empty_cache()
# cleanup data
for generator in self.data_generators:
generator.cleanup()
# empty buffers
self.data_generators = []
self.data_traj_cost = []
self.data_traj_viz = []
return
def test(self, step: Optional[int] = None) -> None:
print("[INFO] Start Training")
# set random seed for reproducibility
torch.manual_seed(self._cfg.seed)
# define step
if step is None and self._cfg.hierarchical:
step = int(self._cfg.epochs / self._cfg.hierarchical_step)
# load model
self._load_model(resume=True)
# get dataloader for training
self._load_data(train=False)
_, test_loader = self._get_dataloader(train=False, step=step)
self.test_loss = self._test_epoch(
test_loader[0],
env_id=0,
is_visual=not os.getenv("EXPERIMENT_DIRECTORY"),
fov_angle=self.data_generators[0].alpha_fov,
dataset="test",
)
# cleanup data
for generator in self.data_generators:
generator.cleanup()
def save_config(self) -> None:
print(f"[INFO] val_loss: {self.best_loss:.2f}, test_loss," f"{self.test_loss:.4f}")
""" Save config and loss to file"""
path, _ = os.path.splitext(self.model_path)
yaml_path = path + ".yaml"
print(f"[INFO] Save config and loss to {yaml_path} file")
loss_dict = {"val_loss": self.best_loss, "test_loss": self.test_loss}
save_dict = {"config": vars(self._cfg), "loss": loss_dict}
# dump yaml
with open(yaml_path, "w+") as file:
yaml.dump(save_dict, file, allow_unicode=True, default_flow_style=False)
# logging
with contextlib.suppress(Exception):
wandb.finish()
# plot hierarchical losses
if self._cfg.hierarchical:
plt.figure(figsize=(10, 10))
plt.plot(
list(self.hierach_losses.keys()),
list(self.hierach_losses.values()),
)
plt.xlabel("Epoch")
plt.ylabel("Validation Loss")
plt.title("Hierarchical Losses")
plt.savefig(os.path.join(self.model_dir_hierarch, "hierarchical_losses.png"))
plt.close()
return
"""PRIVATE METHODS"""
# Helper function DATA
def _load_data(self, train: bool = True) -> None:
if not isinstance(self._cfg.data_cfg, list):
self._cfg.data_cfg = [self._cfg.data_cfg] * len(self._cfg.env_list)
assert len(self._cfg.data_cfg) == len(self._cfg.env_list), (
"Either single DataCfg or number matching number of environments" "must be provided"
)
for idx, env_name in enumerate(self._cfg.env_list):
if (train and idx == self._cfg.test_env_id) or (not train and idx != self._cfg.test_env_id):
continue
data_path = os.path.join(self._cfg.data_dir, env_name)
# get trajectory cost map
traj_cost = TrajCost(
self._cfg.gpu_id,
log_data=train,
w_obs=self._cfg.w_obs,
w_height=self._cfg.w_height,
w_goal=self._cfg.w_goal,
w_motion=self._cfg.w_motion,
obstalce_thread=self._cfg.obstacle_thread,
)
traj_cost.SetMap(
data_path,
self._cfg.cost_map_name,
)
generator = PlannerDataGenerator(
cfg=self._cfg.data_cfg[idx],
root=data_path,
semantics=self._cfg.sem,
rgb=self._cfg.rgb,
cost_map=traj_cost.cost_map, # trajectory cost class
)
traj_viz = TrajViz(
intrinsics=generator.K_depth,
cam_resolution=self._cfg.img_input_size,
camera_tilt=self._cfg.camera_tilt,
cost_map=traj_cost.cost_map,
)
self.data_generators.append(generator)
self.data_traj_cost.append(traj_cost)
self.data_traj_viz.append(traj_viz)
print(f"LOADED DATA FOR ENVIRONMENT: {env_name}")
print("[INFO] LOADED ALL DATA")
return
# Helper function TRAINING
def _init_logging(self) -> None:
# logging
os.environ["WANDB_API_KEY"] = self._cfg.wb_api_key
os.environ["WANDB_MODE"] = "online"
os.makedirs(self._cfg.log_dir, exist_ok=True)
try:
wandb.init(
project=self._cfg.wb_project,
entity=self._cfg.wb_entity,
name=self._cfg.get_model_save(),
config=self._cfg.__dict__,
dir=self._cfg.log_dir,
)
except: # noqa: E722
print("[WARNING: Wandb not available")
return
def _load_model(self, resume: bool = False) -> None:
if self._cfg.sem or self._cfg.rgb:
if self._cfg.rgb and self._cfg.pre_train_sem:
assert PRE_TRAIN_POSSIBLE, (
"Pretrained model not available since either detectron2"
" not installed or mask2former not found in thrid_party"
" folder"
)
pre_train_cfg = os.path.join(self._cfg.all_model_dir, self._cfg.pre_train_cfg)
pre_train_weights = (
os.path.join(self._cfg.all_model_dir, self._cfg.pre_train_weights)
if self._cfg.pre_train_weights
else None
)
m2f_cfg = get_m2f_cfg(pre_train_cfg)
self.pixel_mean = m2f_cfg.MODEL.PIXEL_MEAN
self.pixel_std = m2f_cfg.MODEL.PIXEL_STD
else:
m2f_cfg = None
pre_train_weights = None
self.net = DualAutoEncoder(self._cfg, m2f_cfg=m2f_cfg, weight_path=pre_train_weights)
else:
self.net = AutoEncoder(self._cfg.in_channel, self._cfg.knodes)
assert torch.cuda.is_available(), "Code requires GPU"
print(f"Available GPU list: {list(range(torch.cuda.device_count()))}")
print(f"Running on GPU: {self._cfg.gpu_id}")
self.net = self.net.cuda(self._cfg.gpu_id)
print(f"[INFO] MODEL LOADED ({count_parameters(self.net)} parameters)")
if resume:
model_state_dict, self.best_loss = torch.load(self.model_path)
self.net.load_state_dict(model_state_dict)
print(f"Resume train from {self.model_path} with loss " f"{self.best_loss}")
return
def _configure_optimizer(self) -> None:
if self._cfg.optimizer == "adam":
self.optimizer = optim.Adam(
self.net.parameters(),
lr=self._cfg.lr,
weight_decay=self._cfg.w_decay,
)
elif self._cfg.optimizer == "sgd":
self.optimizer = optim.SGD(
self.net.parameters(),
lr=self._cfg.lr,
momentum=self._cfg.momentum,
weight_decay=self._cfg.w_decay,
)
else:
raise KeyError(f"Optimizer {self._cfg.optimizer} not supported")
self.scheduler = EarlyStopScheduler(
self.optimizer,
factor=self._cfg.factor,
verbose=True,
min_lr=self._cfg.min_lr,
patience=self._cfg.patience,
)
print("[INFO] OPTIMIZER AND SCHEDULER CONFIGURED")
return
def _get_dataloader(
self,
train: bool = True,
step: Optional[int] = None,
allow_augmentation: bool = True,
) -> None:
train_loader_list: List[Data.DataLoader] = []
val_loader_list: List[Data.DataLoader] = []
if step is not None:
self.fov_ratio = (
1.0 - (self._cfg.hierarchical_front_step_ratio + self._cfg.hierarchical_back_step_ratio) * step
)
self.front_ratio = self._cfg.hierarchical_front_step_ratio * step
self.back_ratio = self._cfg.hierarchical_back_step_ratio * step
for generator in self.data_generators:
# init data classes
val_data = PlannerData(
cfg=generator._cfg,
transform=self.transform,
semantics=self._cfg.sem,
rgb=self._cfg.rgb,
pixel_mean=self.pixel_mean,
pixel_std=self.pixel_std,
)
if train:
train_data = PlannerData(
cfg=generator._cfg,
transform=self.transform,
semantics=self._cfg.sem,
rgb=self._cfg.rgb,
pixel_mean=self.pixel_mean,
pixel_std=self.pixel_std,
)
else:
train_data = None
# split data in train and validation with given sample ratios
if train:
generator.split_samples(
train_dataset=train_data,
test_dataset=val_data,
generate_split=train,
ratio_back_samples=self.back_ratio,
ratio_front_samples=self.front_ratio,
ratio_fov_samples=self.fov_ratio,
allow_augmentation=allow_augmentation,
)
else:
generator.split_samples(
train_dataset=train_data,
test_dataset=val_data,
generate_split=train,
ratio_back_samples=self.back_ratio,
ratio_front_samples=self.front_ratio,
ratio_fov_samples=self.fov_ratio,
allow_augmentation=allow_augmentation,
)
if self._cfg.load_in_ram:
if train:
train_data.load_data_in_memory()
val_data.load_data_in_memory()
if train:
train_loader = Data.DataLoader(
dataset=train_data,
batch_size=self._cfg.batch_size,
shuffle=True,
pin_memory=True,
num_workers=self._cfg.num_workers,
)
val_loader = Data.DataLoader(
dataset=val_data,
batch_size=self._cfg.batch_size,
shuffle=True,
pin_memory=True,
num_workers=self._cfg.num_workers,
)
if train:
train_loader_list.append(train_loader)
val_loader_list.append(val_loader)
return train_loader_list, val_loader_list
def _train_epoch(
self,
loader: Data.DataLoader,
epoch: int,
env_id: int,
) -> float:
train_loss, batches = 0, len(loader)
enumerater = tqdm.tqdm(enumerate(loader))
for batch_idx, inputs in enumerater:
odom = inputs[2].cuda(self._cfg.gpu_id)
goal = inputs[3].cuda(self._cfg.gpu_id)
self.optimizer.zero_grad()
if self._cfg.sem or self._cfg.rgb:
depth_image = inputs[0].cuda(self._cfg.gpu_id)
sem_rgb_image = inputs[1].cuda(self._cfg.gpu_id)
preds, fear = self.net(depth_image, sem_rgb_image, goal)
else:
image = inputs[0].cuda(self._cfg.gpu_id)
preds, fear = self.net(image, goal)
# flip y axis for augmented samples (clone necessary due to
# inplace operation that otherwise leads to error in backprop)
preds_flip = torch.clone(preds)
preds_flip[inputs[4], :, 1] = preds_flip[inputs[4], :, 1] * -1
goal_flip = torch.clone(goal)
goal_flip[inputs[4], 1] = goal_flip[inputs[4], 1] * -1
log_step = batch_idx + epoch * batches
loss, _ = self._loss(
preds_flip,
fear,
self.data_traj_cost[env_id],
odom,
goal_flip,
log_step=log_step,
)
wandb.log({"train_loss_step": loss}, step=log_step)
loss.backward()
self.optimizer.step()
train_loss += loss.item()
enumerater.set_description(
f"Epoch: {epoch} in Env: "
f"({env_id+1}/{len(self._cfg.env_list)-1}) "
f"- train loss:{round(train_loss/(batch_idx+1), 4)} on"
f" {batch_idx}/{batches}"
)
return train_loss / (batch_idx + 1)
def _test_epoch(
self,
loader,
env_id: int,
epoch: int = 0,
is_visual=False,
fov_angle: float = 90.0,
dataset: str = "val",
) -> float:
test_loss = 0
num_batches = len(loader)
preds_viz = []
wp_viz = []
image_viz = []
with torch.no_grad():
for batch_idx, inputs in enumerate(loader):
odom = inputs[2].cuda(self._cfg.gpu_id)
goal = inputs[3].cuda(self._cfg.gpu_id)
if self._cfg.sem or self._cfg.rgb:
image = inputs[0].cuda(self._cfg.gpu_id) # depth
sem_rgb_image = inputs[1].cuda(self._cfg.gpu_id) # sem
preds, fear = self.net(image, sem_rgb_image, goal)
else:
image = inputs[0].cuda(self._cfg.gpu_id)
preds, fear = self.net(image, goal)
# flip y axis for augmented samples
preds[inputs[4], :, 1] = preds[inputs[4], :, 1] * -1
goal[inputs[4], 1] = goal[inputs[4], 1] * -1
log_step = epoch * num_batches + batch_idx
loss, waypoints = self._loss(
preds,
fear,
self.data_traj_cost[env_id],
odom,
goal,
log_step=log_step,
dataset=dataset,
)
if dataset == "val":
wandb.log({f"{dataset}_loss_step": loss}, step=log_step)
test_loss += loss.item()
if is_visual and len(preds_viz) * batch_idx < self._cfg.n_visualize:
if batch_idx == 0:
odom_viz = odom.cpu()
goal_viz = goal.cpu()
fear_viz = fear.cpu()
augment_viz = inputs[4].cpu()
else:
odom_viz = torch.cat((odom_viz, odom.cpu()), dim=0)
goal_viz = torch.cat((goal_viz, goal.cpu()), dim=0)
fear_viz = torch.cat((fear_viz, fear.cpu()), dim=0)
augment_viz = torch.cat((augment_viz, inputs[4].cpu()), dim=0)
preds_viz.append(preds.cpu())
wp_viz.append(waypoints.cpu())
image_viz.append(image.cpu())
if is_visual:
preds_viz = torch.vstack(preds_viz)
wp_viz = torch.vstack(wp_viz)
image_viz = torch.vstack(image_viz)
# limit again to number of visualizations since before
# added as multiple of batch size
preds_viz = preds_viz[: self._cfg.n_visualize]
wp_viz = wp_viz[: self._cfg.n_visualize]
image_viz = image_viz[: self._cfg.n_visualize]
odom_viz = odom_viz[: self._cfg.n_visualize]
goal_viz = goal_viz[: self._cfg.n_visualize]
fear_viz = fear_viz[: self._cfg.n_visualize]
augment_viz = augment_viz[: self._cfg.n_visualize]
# visual trajectory and images
self.data_traj_viz[env_id].VizTrajectory(
preds_viz,
wp_viz,
odom_viz,
goal_viz,
fear_viz,
fov_angle=fov_angle,
augment_viz=augment_viz,
)
self.data_traj_viz[env_id].VizImages(preds_viz, wp_viz, odom_viz, goal_viz, fear_viz, image_viz)
return test_loss / (batch_idx + 1)
def _loss(
self,
preds: torch.Tensor,
fear: torch.Tensor,
traj_cost: TrajCost,
odom: torch.Tensor,
goal: torch.Tensor,
log_step: int,
step: float = 0.1,
dataset: str = "train",
) -> Tuple[torch.Tensor, torch.Tensor]:
waypoints = traj_cost.opt.TrajGeneratorFromPFreeRot(preds, step=step)
loss = traj_cost.CostofTraj(
waypoints,
odom,
goal,
fear,
log_step,
ahead_dist=self._cfg.fear_ahead_dist,
dataset=dataset,
)
return loss, waypoints
# EoF
| 22,112 | Python | 34.608696 | 112 | 0.510899 |
leggedrobotics/viplanner/viplanner/utils/eval_utils.py | # Copyright (c) 2023-2024, ETH Zurich (Robotics Systems Lab)
# Author: Pascal Roth
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
# python
import os
from typing import List, Optional, Union
import matplotlib.pyplot as plt
import numpy as np
import torch
import yaml
# viplanner
from viplanner.config.learning_cfg import Loader as TrainCfgLoader
from viplanner.traj_cost_opt import TrajCost
class BaseEvaluator:
def __init__(
self,
distance_tolerance: float,
obs_loss_threshold: float,
cost_map_dir: Optional[str] = None,
cost_map_name: Optional[str] = None,
) -> None:
# args
self.distance_tolerance = distance_tolerance
self.obs_loss_threshold = obs_loss_threshold
self.cost_map_dir = cost_map_dir
self.cost_map_name = cost_map_name
# parameters
self._nbr_paths: int = 0
# load cost_map
self._use_cost_map: bool = False
if all([self.cost_map_dir, self.cost_map_name]):
self._load_cost_map()
return
##
# Properties
##
@property
def nbr_paths(self) -> int:
return self._nbr_paths
def set_nbr_paths(self, nbr_paths: int) -> None:
self._nbr_paths = nbr_paths
return
##
# Buffer
##
def create_buffers(self) -> None:
self.length_goal: np.ndarray = np.zeros(self._nbr_paths)
self.length_path: np.ndarray = np.zeros(self._nbr_paths)
self.path_extension: np.ndarray = np.zeros(self._nbr_paths)
self.goal_distances: np.ndarray = np.zeros(self._nbr_paths)
if self._use_cost_map:
self.loss_obstacles: np.ndarray = np.zeros(self._nbr_paths)
##
# Reset
##
def reset(self) -> None:
self.create_buffers()
self.eval_stats = {}
return
##
# Cost Map
##
def _load_cost_map(self) -> None:
self._traj_cost: TrajCost = TrajCost(gpu_id=None) # use cpu for evaluation
self._traj_cost.SetMap(self.cost_map_dir, self.cost_map_name)
self._use_cost_map = True
return
def _get_cost_map_loss(self, path: Union[torch.Tensor, np.ndarray]) -> float:
if isinstance(path, np.ndarray):
waypoints = torch.tensor(path, dtype=torch.float32)
else:
waypoints = path.to(dtype=torch.float32)
loss = self._traj_cost.cost_of_recorded_path(waypoints).numpy()
if self._traj_cost.cost_map.cfg.semantics:
loss -= self._traj_cost.cost_map.cfg.sem_cost_map.negative_reward
return loss
##
# Eval Statistics
##
def eval_statistics(self) -> None:
# Evaluate results
goal_reached = self.goal_distances < self.distance_tolerance
goal_reached_rate = sum(goal_reached) / len(goal_reached)
avg_distance_to_goal = sum(self.goal_distances) / len(self.goal_distances)
avg_distance_to_goal_reached = sum(self.goal_distances[goal_reached]) / sum(goal_reached)
print(
"All path segments been passed. Results: \nReached goal rate"
f" (thres: {self.distance_tolerance}):\t{goal_reached_rate} \nAvg"
f" goal-distance (all): \t{avg_distance_to_goal} \nAvg"
f" goal-distance (reached):\t{avg_distance_to_goal_reached}"
)
self.eval_stats = {
"goal_reached_rate": goal_reached_rate,
"avg_distance_to_goal_all": avg_distance_to_goal,
"avg_distance_to_goal_reached": avg_distance_to_goal_reached,
}
if self._use_cost_map:
within_obs_threshold = np.sum(self.loss_obstacles < self.obs_loss_threshold) / len(self.loss_obstacles)
avg_obs_loss = sum(self.loss_obstacles) / len(self.loss_obstacles)
avg_obs_loss_reached = sum(self.loss_obstacles[goal_reached]) / sum(goal_reached)
max_obs_loss = max(self.loss_obstacles)
max_obs_loss_reached = max(self.loss_obstacles[goal_reached]) if sum(goal_reached) > 0 else np.inf
print(
"Within obs threshold"
f" ({self.obs_loss_threshold}):\t{within_obs_threshold} \nObstacle"
f" loss (all): \t{avg_obs_loss} \nObstacle loss"
f" (reached): \t{avg_obs_loss_reached} \nMax obstacle loss"
f" (all): \t{max_obs_loss} \nMax obstacle loss"
f" (reached):\t{max_obs_loss_reached}"
)
self.eval_stats["avg_obs_loss_all"] = avg_obs_loss
self.eval_stats["avg_obs_loss_reached"] = avg_obs_loss_reached
self.eval_stats["max_obs_loss_all"] = max_obs_loss
self.eval_stats["max_obs_loss_reached"] = max_obs_loss_reached
return
def save_eval_results(self, model_dir: str, save_name: str) -> None:
# save eval results in model yaml
yaml_path = model_dir[:-3] + ".yaml"
if not os.path.exists(yaml_path):
return
with open(yaml_path) as file:
data: dict = yaml.load(file, Loader=TrainCfgLoader)
if "eval" not in data:
data["eval"] = {}
data["eval"][save_name] = self.eval_stats
with open(yaml_path, "w") as file:
yaml.dump(data, file)
##
# Plotting
##
def plt_single_model(self, eval_dir: str, show: bool = True) -> None:
# check if directory exists
os.makedirs(eval_dir, exist_ok=True)
# get unique goal lengths and init buffers
unique_goal_length = np.unique(np.round(self.length_goal, 1))
mean_path_extension = []
std_path_extension = []
mean_goal_distance = []
std_goal_distance = []
goal_counts = []
mean_obs_loss = []
std_obs_loss = []
for x in unique_goal_length:
# get subset of path predictions with goal length x
subset_idx = np.round(self.length_goal, 1) == x
mean_path_extension.append(np.mean(self.path_extension[subset_idx]))
std_path_extension.append(np.std(self.path_extension[subset_idx]))
mean_goal_distance.append(np.mean(self.goal_distances[subset_idx]))
std_goal_distance.append(np.std(self.goal_distances[subset_idx]))
goal_counts.append(len(self.goal_distances[subset_idx]))
if self._use_cost_map:
mean_obs_loss.append(np.mean(self.loss_obstacles[subset_idx]))
std_obs_loss.append(np.std(self.loss_obstacles[subset_idx]))
# plot with the distance to the goal depending on the length between goal and start
fig, ax = plt.subplots(figsize=(12, 10))
fig.suptitle("Path Length Increase", fontsize=20)
ax.plot(
unique_goal_length,
mean_path_extension,
color="blue",
label="Average path length",
)
ax.fill_between(
unique_goal_length,
np.array(mean_path_extension) - np.array(std_path_extension),
np.array(mean_path_extension) + np.array(std_path_extension),
color="blue",
alpha=0.2,
label="Uncertainty",
)
ax.set_xlabel("Start-Goal Distance", fontsize=16)
ax.set_ylabel("Path Length", fontsize=16)
ax.set_title(
(
"Avg increase of path length is"
f" {round(np.mean(self.path_extension), 5)*100:.2f}% for"
" successful paths with tolerance of"
f" {self.distance_tolerance}"
),
fontsize=16,
)
ax.tick_params(axis="both", which="major", labelsize=14)
ax.legend()
fig.savefig(os.path.join(eval_dir, "path_length.png"))
if show:
plt.show()
else:
plt.close()
# plot to compare the increase in path length depending on the distance between goal and start
goal_success_mean = np.sum(self.goal_distances < self.distance_tolerance) / len(self.goal_distances)
# Create a figure and two axis objects, with the second one sharing the x-axis of the first
fig, ax1 = plt.subplots(figsize=(12, 10))
ax2 = ax1.twinx()
fig.subplots_adjust(hspace=0.4) # Add some vertical spacing between the two plots
# Plot the goal distance data
ax1.plot(
unique_goal_length,
mean_goal_distance,
color="blue",
label="Average goal distance length",
zorder=2,
)
ax1.fill_between(
unique_goal_length,
np.array(mean_goal_distance) - np.array(std_goal_distance),
np.array(mean_goal_distance) + np.array(std_goal_distance),
color="blue",
alpha=0.2,
label="Uncertainty",
zorder=1,
)
ax1.set_xlabel("Start-Goal Distance", fontsize=16)
ax1.set_ylabel("Goal Distance", fontsize=16)
ax1.set_title(
(
f"With a tolerance of {self.distance_tolerance} are"
f" {round(goal_success_mean, 5)*100:.2f} % of goals reached"
),
fontsize=16,
)
ax1.tick_params(axis="both", which="major", labelsize=14)
# Plot the goal counts data on the second axis
ax2.bar(
unique_goal_length,
goal_counts,
color="red",
alpha=0.5,
width=0.05,
label="Number of samples",
zorder=0,
)
ax2.set_ylabel("Sample count", fontsize=16)
ax2.tick_params(axis="both", which="major", labelsize=14)
# Combine the legends from both axes
lines, labels = ax1.get_legend_handles_labels()
bars, bar_labels = ax2.get_legend_handles_labels()
ax2.legend(lines + bars, labels + bar_labels, loc="upper center")
plt.suptitle("Goal Distance", fontsize=20)
fig.savefig(os.path.join(eval_dir, "goal_distance.png"))
if show:
plt.show()
else:
plt.close()
if self._use_cost_map:
# plot to compare the obs loss depending on the distance between goal and start
avg_obs_loss = np.mean(self.loss_obstacles)
obs_threshold_success_rate = np.sum(self.loss_obstacles < self.obs_loss_threshold) / len(
self.loss_obstacles
)
fig, ax = plt.subplots(figsize=(12, 10))
fig.suptitle("Obstacle Loss", fontsize=20)
ax.plot(
unique_goal_length,
mean_obs_loss,
color="blue",
label="Average obs loss",
)
ax.fill_between(
unique_goal_length,
np.array(mean_obs_loss) - np.array(std_obs_loss),
np.array(mean_obs_loss) + np.array(std_obs_loss),
color="blue",
alpha=0.2,
label="Uncertainty",
)
ax.set_xlabel("Start-Goal Distance", fontsize=16)
ax.set_ylabel("Obstacle Loss", fontsize=16)
ax.set_title(
(
f"Avg obstacle loss {round(avg_obs_loss, 5):.5f} with"
f" {obs_threshold_success_rate}% within obs thres"
f" {self.obs_loss_threshold}"
),
fontsize=16,
)
ax.tick_params(axis="both", which="major", labelsize=14)
ax.legend()
fig.savefig(os.path.join(eval_dir, "obs_cost.png"))
if show:
plt.show()
else:
plt.close()
return
def plt_comparison(
self,
length_goal_list: List[np.ndarray],
goal_distance_list: List[np.ndarray],
path_extension_list: List[np.ndarray],
model_dirs: List[str],
save_dir: str,
obs_loss_list: Optional[List[np.ndarray]] = None,
model_names: Optional[List[str]] = None,
) -> None:
# path increase plot
fig_path, axs_path = plt.subplots(figsize=(12, 10))
fig_path.suptitle("Path Extension", fontsize=24)
axs_path.set_xlabel("Start-Goal Distance [m]", fontsize=20)
axs_path.set_ylabel("Path Extension [%]", fontsize=20)
axs_path.tick_params(axis="both", which="major", labelsize=16)
# goal distance plot
fig_goal, axs_goal = plt.subplots(figsize=(12, 10))
fig_goal.suptitle("Goal Distance", fontsize=24)
axs_goal.set_xlabel("Start-Goal Distance [m]", fontsize=20)
axs_goal.set_ylabel("Goal Distance [m]", fontsize=20)
axs_goal.tick_params(axis="both", which="major", labelsize=16)
if self._use_cost_map:
assert obs_loss_list is not None, "If cost map is used, obs_loss_list must be provided"
# obs loss plot
fig_obs, axs_obs = plt.subplots(figsize=(12, 10))
# fig_obs.suptitle("Mean Obstacle Loss Along Path", fontsize=24)
axs_obs.set_xlabel("Start-Goal Distance [m]", fontsize=20)
axs_obs.set_ylabel("Mean Obstacle Loss", fontsize=20)
axs_obs.tick_params(axis="both", which="major", labelsize=16)
bar_width = 0.8 / len(length_goal_list)
for idx in range(len(length_goal_list)):
if model_names is None:
model_name = os.path.split(model_dirs[idx])[1]
else:
model_name = model_names[idx]
goal_success_bool = goal_distance_list[idx] < self.distance_tolerance
unique_goal_length = np.unique(np.round(length_goal_list[idx], 0))
mean_path_extension = []
std_path_extension = []
mean_goal_distance = []
std_goal_distance = []
mean_obs_loss = []
std_obs_loss = []
goal_length_obs_exists = []
unqiue_goal_length_used = []
for x in unique_goal_length:
if x == 0:
continue
# get subset of path predictions with goal length x
subset_idx = np.round(length_goal_list[idx], 0) == x
mean_path_extension.append(np.mean(path_extension_list[idx][subset_idx]))
std_path_extension.append(np.std(path_extension_list[idx][subset_idx]))
mean_goal_distance.append(np.mean(goal_distance_list[idx][subset_idx]))
std_goal_distance.append(np.std(goal_distance_list[idx][subset_idx]))
if self._use_cost_map:
y_obs_subset = obs_loss_list[idx][subset_idx]
if len(y_obs_subset) != 0:
mean_obs_loss.append(np.mean(y_obs_subset))
std_obs_loss.append(np.std(y_obs_subset))
goal_length_obs_exists.append(x)
else:
print(f"Warning: No obs loss for {model_name} at goal" f" distance {x}")
unqiue_goal_length_used.append(x)
unique_goal_length = np.array(unqiue_goal_length_used)
goal_length_obs_exists = np.array(goal_length_obs_exists)
bar_pos = bar_width / 2 + idx * bar_width - 0.4
# plot to compare the increase in path length depending in on the distance between goal and start for the successful paths
avg_increase = np.mean(path_extension_list[idx])
axs_path.bar(
unique_goal_length + bar_pos,
mean_path_extension,
width=bar_width,
label=(f"{model_name} (avg {round(avg_increase, 5)*100:.2f} %))"),
alpha=0.8,
) # yerr=std_path_extension,
# axs_path.plot(goal_length_path_exists, mean_path_extension, label=f'{model_name} ({round(avg_increase, 5)*100:.2f} %))')
# axs_path.fill_between(goal_length_path_exists, np.array(mean_path_extension) - np.array(std_path_extension), np.array(mean_path_extension) + np.array(std_path_extension), alpha=0.2)
# plot with the distance to the goal depending on the length between goal and start
goal_success = np.sum(goal_success_bool) / len(goal_distance_list[idx])
axs_goal.bar(
unique_goal_length + bar_pos,
mean_goal_distance,
width=bar_width,
label=(f"{model_name} (success rate" f" {round(goal_success, 5)*100:.2f} %)"),
alpha=0.8,
) # yerr=std_goal_distance,
# axs_goal.plot(unique_goal_length, mean_goal_distance, label=f'{model_name} ({round(goal_success, 5)*100:.2f} %)')
# axs_goal.fill_between(unique_goal_length, np.array(mean_goal_distance) - np.array(std_goal_distance), np.array(mean_goal_distance) + np.array(std_goal_distance), alpha=0.2)
if self._use_cost_map:
# plot with the distance to the goal depending on the length between goal and start
avg_obs_loss = np.mean(obs_loss_list[idx])
axs_obs.bar(
goal_length_obs_exists + bar_pos,
mean_obs_loss,
width=bar_width,
label=f"{model_name} (avg {round(avg_obs_loss, 5):.3f})",
alpha=0.8,
) # yerr=std_obs_loss,
# axs_obs.plot(goal_length_obs_exists, mean_obs_loss, label=f'{model_name} ({round(avg_obs_loss, 5):.5f} %)')
# axs_obs.fill_between(goal_length_obs_exists, np.array(mean_obs_loss) - np.array(std_obs_loss), np.array(mean_obs_loss) + np.array(std_obs_loss), alpha=0.2)
# plot threshold for successful path
axs_goal.axhline(
y=self.distance_tolerance,
color="red",
linestyle="--",
label="threshold",
)
axs_path.legend(fontsize=20)
axs_goal.legend(fontsize=20)
fig_path.savefig(os.path.join(save_dir, "path_length_comp.png"))
fig_goal.savefig(os.path.join(save_dir, "goal_distance_comp.png"))
if self._use_cost_map:
axs_obs.legend(fontsize=20)
fig_obs.savefig(os.path.join(save_dir, "obs_loss_comp.png"))
plt.show()
return
# EoF
| 18,324 | Python | 37.906582 | 195 | 0.560358 |
boredengineering/awesome_terrains/README.md | # Awesome Terrains Extension
!!! Please, feel free to collaborate on the project !!!<br/>
**Authors** <br/>
> Renan Monteiro Barbosa
Requirements:<br/>
> Isaac-Sim
This extension is a study template to create Terrains programatically in Isaac-Sim.<br/>
The idea is to explore making terrains to train Robots in OIGE.<br/>
# Extension Project Template
This project was automatically generated.
- `app` - It is a folder link to the location of your *Omniverse Kit* based app.
- `exts` - It is a folder where you can add new extensions. It was automatically added to extension search path. (Extension Manager -> Gear Icon -> Extension Search Path).
Open this folder using Visual Studio Code. It will suggest you to install few extensions that will make python experience better.
Look for "omni.isaac.terrain_generator" extension in extension manager and enable it. Try applying changes to any python files, it will hot-reload and you can observe results immediately.
Alternatively, you can launch your app from console with this folder added to search path and your extension enabled, e.g.:
```
> app\omni.code.bat --ext-folder exts --enable company.hello.world
```
# App Link Setup
If `app` folder link doesn't exist or broken it can be created again. For better developer experience it is recommended to create a folder link named `app` to the *Omniverse Kit* app installed from *Omniverse Launcher*. Convenience script to use is included.
Run:
```
> link_app.bat
```
If successful you should see `app` folder link in the root of this repo.
If multiple Omniverse apps is installed script will select recommended one. Or you can explicitly pass an app:
```
> link_app.bat --app create
```
You can also just pass a path to create link to:
```
> link_app.bat --path "C:/Users/bob/AppData/Local/ov/pkg/create-2021.3.4"
```
# Sharing Your Extensions
This folder is ready to be pushed to any git repository. Once pushed direct link to a git repository can be added to *Omniverse Kit* extension search paths.
Link might look like this: `git://github.com/[user]/[your_repo].git?branch=main&dir=exts`
Notice `exts` is repo subfolder with extensions. More information can be found in "Git URL as Extension Search Paths" section of developers manual.
To add a link to your *Omniverse Kit* based app go into: Extension Manager -> Gear Icon -> Extension Search Path
| 2,377 | Markdown | 36.156249 | 258 | 0.755574 |
boredengineering/awesome_terrains/tools/scripts/link_app.py | import argparse
import json
import os
import sys
import packmanapi
import urllib3
def find_omniverse_apps():
http = urllib3.PoolManager()
try:
r = http.request("GET", "http://127.0.0.1:33480/components")
except Exception as e:
print(f"Failed retrieving apps from an Omniverse Launcher, maybe it is not installed?\nError: {e}")
sys.exit(1)
apps = {}
for x in json.loads(r.data.decode("utf-8")):
latest = x.get("installedVersions", {}).get("latest", "")
if latest:
for s in x.get("settings", []):
if s.get("version", "") == latest:
root = s.get("launch", {}).get("root", "")
apps[x["slug"]] = (x["name"], root)
break
return apps
def create_link(src, dst):
print(f"Creating a link '{src}' -> '{dst}'")
packmanapi.link(src, dst)
APP_PRIORITIES = ["code", "create", "view"]
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Create folder link to Kit App installed from Omniverse Launcher")
parser.add_argument(
"--path",
help="Path to Kit App installed from Omniverse Launcher, e.g.: 'C:/Users/bob/AppData/Local/ov/pkg/create-2021.3.4'",
required=False,
)
parser.add_argument(
"--app", help="Name of Kit App installed from Omniverse Launcher, e.g.: 'code', 'create'", required=False
)
args = parser.parse_args()
path = args.path
if not path:
print("Path is not specified, looking for Omniverse Apps...")
apps = find_omniverse_apps()
if len(apps) == 0:
print(
"Can't find any Omniverse Apps. Use Omniverse Launcher to install one. 'Code' is the recommended app for developers."
)
sys.exit(0)
print("\nFound following Omniverse Apps:")
for i, slug in enumerate(apps):
name, root = apps[slug]
print(f"{i}: {name} ({slug}) at: '{root}'")
if args.app:
selected_app = args.app.lower()
if selected_app not in apps:
choices = ", ".join(apps.keys())
print(f"Passed app: '{selected_app}' is not found. Specify one of the following found Apps: {choices}")
sys.exit(0)
else:
selected_app = next((x for x in APP_PRIORITIES if x in apps), None)
if not selected_app:
selected_app = next(iter(apps))
print(f"\nSelected app: {selected_app}")
_, path = apps[selected_app]
if not os.path.exists(path):
print(f"Provided path doesn't exist: {path}")
else:
SCRIPT_ROOT = os.path.dirname(os.path.realpath(__file__))
create_link(f"{SCRIPT_ROOT}/../../app", path)
print("Success!")
| 2,814 | Python | 32.117647 | 133 | 0.562189 |
boredengineering/awesome_terrains/tools/packman/config.packman.xml | <config remotes="cloudfront">
<remote2 name="cloudfront">
<transport actions="download" protocol="https" packageLocation="d4i3qtqj3r0z5.cloudfront.net/${name}@${version}" />
</remote2>
</config>
| 211 | XML | 34.333328 | 123 | 0.691943 |
boredengineering/awesome_terrains/tools/packman/bootstrap/install_package.py | # Copyright 2019 NVIDIA CORPORATION
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import shutil
import sys
import tempfile
import zipfile
__author__ = "hfannar"
logging.basicConfig(level=logging.WARNING, format="%(message)s")
logger = logging.getLogger("install_package")
class TemporaryDirectory:
def __init__(self):
self.path = None
def __enter__(self):
self.path = tempfile.mkdtemp()
return self.path
def __exit__(self, type, value, traceback):
# Remove temporary data created
shutil.rmtree(self.path)
def install_package(package_src_path, package_dst_path):
with zipfile.ZipFile(package_src_path, allowZip64=True) as zip_file, TemporaryDirectory() as temp_dir:
zip_file.extractall(temp_dir)
# Recursively copy (temp_dir will be automatically cleaned up on exit)
try:
# Recursive copy is needed because both package name and version folder could be missing in
# target directory:
shutil.copytree(temp_dir, package_dst_path)
except OSError as exc:
logger.warning("Directory %s already present, packaged installation aborted" % package_dst_path)
else:
logger.info("Package successfully installed to %s" % package_dst_path)
install_package(sys.argv[1], sys.argv[2])
| 1,844 | Python | 33.166666 | 108 | 0.703362 |
boredengineering/awesome_terrains/exts/omni.isaac.terrain_generator/config/extension.toml | [package]
# Semantic Versioning is used: https://semver.org/
version = "1.0.0"
# Lists people or organizations that are considered the "authors" of the package.
authors = ["NVIDIA","Bored Engineering","Renan Monteiro Barbosa"]
# The title and description fields are primarily for displaying extension info in UI
title = "Awesome Terrain Generator"
description="An extension to generate terrains for Isaac-Sim with focus on OIGE. Only works on Isaac-Sim."
# Path (relative to the root) or content of readme markdown file for UI.
readme = "docs/README.md"
# URL of the extension source repository.
repository = "https://github.com/boredengineering/awesome_terrains"
# One of categories for UI.
category = "Example"
# Keywords for the extension
keywords = ["isaac","isaac-sim","sim","oige","gym","kit", "example","terains"]
# Location of change log file in target (final) folder of extension, relative to the root.
# More info on writing changelog: https://keepachangelog.com/en/1.0.0/
changelog="docs/CHANGELOG.md"
# Preview image and icon. Folder named "data" automatically goes in git lfs (see .gitattributes file).
# Preview image is shown in "Overview" of Extensions window. Screenshot of an extension might be a good preview image.
preview_image = "data/preview.png"
# Icon is shown in Extensions window, it is recommended to be square, of size 256x256.
icon = "data/icon.png"
# Use omni.ui to build simple UI
[dependencies]
"omni.kit.uiapp" = {}
# Main python module this extension provides, it will be publicly available as "import omni.isaac.terrain_generator".
[[python.module]]
name = "omni.isaac.terrain_generator"
[[test]]
# Extra dependencies only to be used during test run
dependencies = [
"omni.kit.ui_test" # UI testing extension
]
| 1,764 | TOML | 35.770833 | 118 | 0.748299 |
boredengineering/awesome_terrains/exts/omni.isaac.terrain_generator/omni/isaac/terrain_generator/terrain_utils.py | import numpy as np
from numpy.random import choice
from scipy import interpolate
from math import sqrt
from omni.isaac.core.utils.prims import define_prim, get_prim_at_path
from omni.isaac.core.prims import XFormPrim
from pxr import UsdPhysics, Sdf, Gf, PhysxSchema, Usd
import omni.kit.commands
def random_uniform_terrain(terrain, min_height, max_height, step=1, downsampled_scale=None,):
"""
Generate a uniform noise terrain
Parameters
terrain (SubTerrain): the terrain
min_height (float): the minimum height of the terrain [meters]
max_height (float): the maximum height of the terrain [meters]
step (float): minimum height change between two points [meters]
downsampled_scale (float): distance between two randomly sampled points ( musty be larger or equal to terrain.horizontal_scale)
"""
if downsampled_scale is None:
downsampled_scale = terrain.horizontal_scale
# switch parameters to discrete units
min_height = int(min_height / terrain.vertical_scale)
max_height = int(max_height / terrain.vertical_scale)
step = int(step / terrain.vertical_scale)
heights_range = np.arange(min_height, max_height + step, step)
height_field_downsampled = np.random.choice(heights_range, (int(terrain.width * terrain.horizontal_scale / downsampled_scale), int(
terrain.length * terrain.horizontal_scale / downsampled_scale)))
x = np.linspace(0, terrain.width * terrain.horizontal_scale, height_field_downsampled.shape[0])
y = np.linspace(0, terrain.length * terrain.horizontal_scale, height_field_downsampled.shape[1])
f = interpolate.interp2d(y, x, height_field_downsampled, kind='linear')
x_upsampled = np.linspace(0, terrain.width * terrain.horizontal_scale, terrain.width)
y_upsampled = np.linspace(0, terrain.length * terrain.horizontal_scale, terrain.length)
z_upsampled = np.rint(f(y_upsampled, x_upsampled))
terrain.height_field_raw += z_upsampled.astype(np.int16)
return terrain
def sloped_terrain(terrain, slope=1):
"""
Generate a sloped terrain
Parameters:
terrain (SubTerrain): the terrain
slope (int): positive or negative slope
Returns:
terrain (SubTerrain): update terrain
"""
x = np.arange(0, terrain.width)
y = np.arange(0, terrain.length)
xx, yy = np.meshgrid(x, y, sparse=True)
xx = xx.reshape(terrain.width, 1)
max_height = int(slope * (terrain.horizontal_scale / terrain.vertical_scale) * terrain.width)
terrain.height_field_raw[:, np.arange(terrain.length)] += (max_height * xx / terrain.width).astype(terrain.height_field_raw.dtype)
return terrain
def pyramid_sloped_terrain(terrain, slope=1, platform_size=1.):
"""
Generate a sloped terrain
Parameters:
terrain (terrain): the terrain
slope (int): positive or negative slope
platform_size (float): size of the flat platform at the center of the terrain [meters]
Returns:
terrain (SubTerrain): update terrain
"""
x = np.arange(0, terrain.width)
y = np.arange(0, terrain.length)
center_x = int(terrain.width / 2)
center_y = int(terrain.length / 2)
xx, yy = np.meshgrid(x, y, sparse=True)
xx = (center_x - np.abs(center_x-xx)) / center_x
yy = (center_y - np.abs(center_y-yy)) / center_y
xx = xx.reshape(terrain.width, 1)
yy = yy.reshape(1, terrain.length)
max_height = int(slope * (terrain.horizontal_scale / terrain.vertical_scale) * (terrain.width / 2))
terrain.height_field_raw += (max_height * xx * yy).astype(terrain.height_field_raw.dtype)
platform_size = int(platform_size / terrain.horizontal_scale / 2)
x1 = terrain.width // 2 - platform_size
x2 = terrain.width // 2 + platform_size
y1 = terrain.length // 2 - platform_size
y2 = terrain.length // 2 + platform_size
min_h = min(terrain.height_field_raw[x1, y1], 0)
max_h = max(terrain.height_field_raw[x1, y1], 0)
terrain.height_field_raw = np.clip(terrain.height_field_raw, min_h, max_h)
return terrain
def discrete_obstacles_terrain(terrain, max_height, min_size, max_size, num_rects, platform_size=1.):
"""
Generate a terrain with gaps
Parameters:
terrain (terrain): the terrain
max_height (float): maximum height of the obstacles (range=[-max, -max/2, max/2, max]) [meters]
min_size (float): minimum size of a rectangle obstacle [meters]
max_size (float): maximum size of a rectangle obstacle [meters]
num_rects (int): number of randomly generated obstacles
platform_size (float): size of the flat platform at the center of the terrain [meters]
Returns:
terrain (SubTerrain): update terrain
"""
# switch parameters to discrete units
max_height = int(max_height / terrain.vertical_scale)
min_size = int(min_size / terrain.horizontal_scale)
max_size = int(max_size / terrain.horizontal_scale)
platform_size = int(platform_size / terrain.horizontal_scale)
(i, j) = terrain.height_field_raw.shape
height_range = [-max_height, -max_height // 2, max_height // 2, max_height]
width_range = range(min_size, max_size, 4)
length_range = range(min_size, max_size, 4)
for _ in range(num_rects):
width = np.random.choice(width_range)
length = np.random.choice(length_range)
start_i = np.random.choice(range(0, i-width, 4))
start_j = np.random.choice(range(0, j-length, 4))
terrain.height_field_raw[start_i:start_i+width, start_j:start_j+length] = np.random.choice(height_range)
x1 = (terrain.width - platform_size) // 2
x2 = (terrain.width + platform_size) // 2
y1 = (terrain.length - platform_size) // 2
y2 = (terrain.length + platform_size) // 2
terrain.height_field_raw[x1:x2, y1:y2] = 0
return terrain
def wave_terrain(terrain, num_waves=1, amplitude=1.):
"""
Generate a wavy terrain
Parameters:
terrain (terrain): the terrain
num_waves (int): number of sine waves across the terrain length
Returns:
terrain (SubTerrain): update terrain
"""
amplitude = int(0.5*amplitude / terrain.vertical_scale)
if num_waves > 0:
div = terrain.length / (num_waves * np.pi * 2)
x = np.arange(0, terrain.width)
y = np.arange(0, terrain.length)
xx, yy = np.meshgrid(x, y, sparse=True)
xx = xx.reshape(terrain.width, 1)
yy = yy.reshape(1, terrain.length)
terrain.height_field_raw += (amplitude*np.cos(yy / div) + amplitude*np.sin(xx / div)).astype(
terrain.height_field_raw.dtype)
return terrain
def stairs_terrain(terrain, step_width, step_height):
"""
Generate a stairs
Parameters:
terrain (terrain): the terrain
step_width (float): the width of the step [meters]
step_height (float): the height of the step [meters]
Returns:
terrain (SubTerrain): update terrain
"""
# switch parameters to discrete units
step_width = int(step_width / terrain.horizontal_scale)
step_height = int(step_height / terrain.vertical_scale)
num_steps = terrain.width // step_width
height = step_height
for i in range(num_steps):
terrain.height_field_raw[i * step_width: (i + 1) * step_width, :] += height
height += step_height
return terrain
def pyramid_stairs_terrain(terrain, step_width, step_height, platform_size=1.):
"""
Generate stairs
Parameters:
terrain (terrain): the terrain
step_width (float): the width of the step [meters]
step_height (float): the step_height [meters]
platform_size (float): size of the flat platform at the center of the terrain [meters]
Returns:
terrain (SubTerrain): update terrain
"""
# switch parameters to discrete units
step_width = int(step_width / terrain.horizontal_scale)
step_height = int(step_height / terrain.vertical_scale)
platform_size = int(platform_size / terrain.horizontal_scale)
height = 0
start_x = 0
stop_x = terrain.width
start_y = 0
stop_y = terrain.length
while (stop_x - start_x) > platform_size and (stop_y - start_y) > platform_size:
start_x += step_width
stop_x -= step_width
start_y += step_width
stop_y -= step_width
height += step_height
terrain.height_field_raw[start_x: stop_x, start_y: stop_y] = height
return terrain
def stepping_stones_terrain(terrain, stone_size, stone_distance, max_height, platform_size=1., depth=-10):
"""
Generate a stepping stones terrain
Parameters:
terrain (terrain): the terrain
stone_size (float): horizontal size of the stepping stones [meters]
stone_distance (float): distance between stones (i.e size of the holes) [meters]
max_height (float): maximum height of the stones (positive and negative) [meters]
platform_size (float): size of the flat platform at the center of the terrain [meters]
depth (float): depth of the holes (default=-10.) [meters]
Returns:
terrain (SubTerrain): update terrain
"""
# switch parameters to discrete units
stone_size = int(stone_size / terrain.horizontal_scale)
stone_distance = int(stone_distance / terrain.horizontal_scale)
max_height = int(max_height / terrain.vertical_scale)
platform_size = int(platform_size / terrain.horizontal_scale)
height_range = np.arange(-max_height-1, max_height, step=1)
start_x = 0
start_y = 0
terrain.height_field_raw[:, :] = int(depth / terrain.vertical_scale)
if terrain.length >= terrain.width:
while start_y < terrain.length:
stop_y = min(terrain.length, start_y + stone_size)
start_x = np.random.randint(0, stone_size)
# fill first hole
stop_x = max(0, start_x - stone_distance)
terrain.height_field_raw[0: stop_x, start_y: stop_y] = np.random.choice(height_range)
# fill row
while start_x < terrain.width:
stop_x = min(terrain.width, start_x + stone_size)
terrain.height_field_raw[start_x: stop_x, start_y: stop_y] = np.random.choice(height_range)
start_x += stone_size + stone_distance
start_y += stone_size + stone_distance
elif terrain.width > terrain.length:
while start_x < terrain.width:
stop_x = min(terrain.width, start_x + stone_size)
start_y = np.random.randint(0, stone_size)
# fill first hole
stop_y = max(0, start_y - stone_distance)
terrain.height_field_raw[start_x: stop_x, 0: stop_y] = np.random.choice(height_range)
# fill column
while start_y < terrain.length:
stop_y = min(terrain.length, start_y + stone_size)
terrain.height_field_raw[start_x: stop_x, start_y: stop_y] = np.random.choice(height_range)
start_y += stone_size + stone_distance
start_x += stone_size + stone_distance
x1 = (terrain.width - platform_size) // 2
x2 = (terrain.width + platform_size) // 2
y1 = (terrain.length - platform_size) // 2
y2 = (terrain.length + platform_size) // 2
terrain.height_field_raw[x1:x2, y1:y2] = 0
return terrain
def convert_heightfield_to_trimesh(height_field_raw, horizontal_scale, vertical_scale, slope_threshold=None):
"""
Convert a heightfield array to a triangle mesh represented by vertices and triangles.
Optionally, corrects vertical surfaces above the provide slope threshold:
If (y2-y1)/(x2-x1) > slope_threshold -> Move A to A' (set x1 = x2). Do this for all directions.
B(x2,y2)
/|
/ |
/ |
(x1,y1)A---A'(x2',y1)
Parameters:
height_field_raw (np.array): input heightfield
horizontal_scale (float): horizontal scale of the heightfield [meters]
vertical_scale (float): vertical scale of the heightfield [meters]
slope_threshold (float): the slope threshold above which surfaces are made vertical. If None no correction is applied (default: None)
Returns:
vertices (np.array(float)): array of shape (num_vertices, 3). Each row represents the location of each vertex [meters]
triangles (np.array(int)): array of shape (num_triangles, 3). Each row represents the indices of the 3 vertices connected by this triangle.
"""
hf = height_field_raw
num_rows = hf.shape[0]
num_cols = hf.shape[1]
y = np.linspace(0, (num_cols-1)*horizontal_scale, num_cols)
x = np.linspace(0, (num_rows-1)*horizontal_scale, num_rows)
yy, xx = np.meshgrid(y, x)
if slope_threshold is not None:
slope_threshold *= horizontal_scale / vertical_scale
move_x = np.zeros((num_rows, num_cols))
move_y = np.zeros((num_rows, num_cols))
move_corners = np.zeros((num_rows, num_cols))
move_x[:num_rows-1, :] += (hf[1:num_rows, :] - hf[:num_rows-1, :] > slope_threshold)
move_x[1:num_rows, :] -= (hf[:num_rows-1, :] - hf[1:num_rows, :] > slope_threshold)
move_y[:, :num_cols-1] += (hf[:, 1:num_cols] - hf[:, :num_cols-1] > slope_threshold)
move_y[:, 1:num_cols] -= (hf[:, :num_cols-1] - hf[:, 1:num_cols] > slope_threshold)
move_corners[:num_rows-1, :num_cols-1] += (hf[1:num_rows, 1:num_cols] - hf[:num_rows-1, :num_cols-1] > slope_threshold)
move_corners[1:num_rows, 1:num_cols] -= (hf[:num_rows-1, :num_cols-1] - hf[1:num_rows, 1:num_cols] > slope_threshold)
xx += (move_x + move_corners*(move_x == 0)) * horizontal_scale
yy += (move_y + move_corners*(move_y == 0)) * horizontal_scale
# create triangle mesh vertices and triangles from the heightfield grid
vertices = np.zeros((num_rows*num_cols, 3), dtype=np.float32)
vertices[:, 0] = xx.flatten()
vertices[:, 1] = yy.flatten()
vertices[:, 2] = hf.flatten() * vertical_scale
triangles = -np.ones((2*(num_rows-1)*(num_cols-1), 3), dtype=np.uint32)
for i in range(num_rows - 1):
ind0 = np.arange(0, num_cols-1) + i*num_cols
ind1 = ind0 + 1
ind2 = ind0 + num_cols
ind3 = ind2 + 1
start = 2*i*(num_cols-1)
stop = start + 2*(num_cols-1)
triangles[start:stop:2, 0] = ind0
triangles[start:stop:2, 1] = ind3
triangles[start:stop:2, 2] = ind1
triangles[start+1:stop:2, 0] = ind0
triangles[start+1:stop:2, 1] = ind2
triangles[start+1:stop:2, 2] = ind3
return vertices, triangles
# Cannot load DefinePrim
def add_terrain_to_stage(stage, vertices, triangles, position=None, orientation=None):
num_faces = triangles.shape[0]
terrain_mesh = stage.DefinePrim("/World/terrain", "Mesh")
terrain_mesh.GetAttribute("points").Set(vertices)
terrain_mesh.GetAttribute("faceVertexIndices").Set(triangles.flatten())
terrain_mesh.GetAttribute("faceVertexCounts").Set(np.asarray([3]*num_faces))
terrain = XFormPrim(prim_path="/World/terrain",
name="terrain",
position=position,
orientation=orientation)
# Addind a simple Collider does not work
# omni.kit.commands.execute('SetRigidBody',
# path=Sdf.Path('/World/terrain'),
# approximationShape='convexHull',
# kinematic=False)
# omni.kit.commands.execute('CreateJointsCommand',
# stage=Usd.Stage.Open(rootLayer=Sdf.Find('anon:000001BFCF67EBB0:World0.usd'), sessionLayer=Sdf.Find('anon:000001BFCF67FC90:World0-session.usda')),
# joint_type='Fixed',
# paths=['/World/terrain'],
# join_to_parent=False)
UsdPhysics.CollisionAPI.Apply(terrain.prim)
collision_api = UsdPhysics.MeshCollisionAPI.Apply(terrain.prim)
# collision_api.CreateApproximationAttr().Set("meshSimplification")
collision_api.CreateApproximationAttr().Set("sdf")
physx_collision_api = PhysxSchema.PhysxCollisionAPI.Apply(terrain.prim)
physx_collision_api.GetContactOffsetAttr().Set(0.02)
physx_collision_api.GetRestOffsetAttr().Set(0.00)
class SubTerrain:
def __init__(self, terrain_name="terrain", width=256, length=256, vertical_scale=1.0, horizontal_scale=1.0):
self.terrain_name = terrain_name
self.vertical_scale = vertical_scale
self.horizontal_scale = horizontal_scale
self.width = width
self.length = length
self.height_field_raw = np.zeros((self.width, self.length), dtype=np.int16)
# stage = omni.usd.get_context().get_stage()
# width=256
# length=256
# height_field_raw = np.zeros((width, length), dtype=np.int16)
# vertices, triangles = convert_heightfield_to_trimesh(height_field_raw, horizontal_scale=1.0, vertical_scale=1.0, slope_threshold=None)
# terrain = add_terrain_to_stage(stage, vertices, triangles)
# random_uniform_terrain(terrain, min_height, max_height, step=1)
| 17,002 | Python | 42.822165 | 155 | 0.647159 |
boredengineering/awesome_terrains/exts/omni.isaac.terrain_generator/omni/isaac/terrain_generator/extension.py | import omni.ext
import omni.ui as ui
from .terrain_utils import *
from omni.isaac.core.utils.stage import add_reference_to_stage, get_current_stage
from omni.isaac.core.utils.prims import define_prim, get_prim_at_path
import omni.usd
# Functions and vars are available to other extension as usual in python: `example.python_ext.some_public_function(x)`
def some_public_function(x: int):
print("[omni.isaac.terrain_generator] some_public_function was called with x: ", x)
return x ** x
# Any class derived from `omni.ext.IExt` in top level module (defined in `python.modules` of `extension.toml`) will be
# instantiated when extension gets enabled and `on_startup(ext_id)` will be called. Later when extension gets disabled
# on_shutdown() is called.
class OmniIsaacTerrain_generatorExtension(omni.ext.IExt):
# ext_id is current extension id. It can be used with extension manager to query additional information, like where
# this extension is located on filesystem.
def on_startup(self, ext_id):
print("[omni.isaac.terrain_generator] omni isaac terrain_generator startup")
self._count = 0
self._window = ui.Window("My Window", width=300, height=300)
with self._window.frame:
with ui.VStack():
label = ui.Label("")
def on_click():
self.get_terrain()
# self._count += 1
label.text = "Generate Terrain"
def on_reset():
self.clear_terrain()
# self._count = 0
label.text = "Clear Stage"
on_reset()
with ui.HStack():
ui.Button("Add Terrain", clicked_fn=on_click)
ui.Button("Clear Stage", clicked_fn=on_reset)
def on_shutdown(self):
print("[omni.isaac.terrain_generator] omni isaac terrain_generator shutdown")
# This deletes the terrain
def clear_terrain(self):
current_stage = get_current_stage()
current_stage.RemovePrim("/World/terrain")
# The stuff that makes terrain
def get_terrain(self):
stage = get_current_stage()
# create all available terrain types
num_terains = 8
terrain_width = 12.
terrain_length = 12.
horizontal_scale = 0.25 # [m]
vertical_scale = 0.005 # [m]
num_rows = int(terrain_width/horizontal_scale)
num_cols = int(terrain_length/horizontal_scale)
heightfield = np.zeros((num_terains*num_rows, num_cols), dtype=np.int16)
def new_sub_terrain():
return SubTerrain(width=num_rows, length=num_cols, vertical_scale=vertical_scale, horizontal_scale=horizontal_scale)
# weird
heightfield[0:num_rows, :] = random_uniform_terrain(new_sub_terrain(), min_height=-0.2, max_height=0.2, step=0.2, downsampled_scale=0.5).height_field_raw
# Make a plain slope, need to understand how to control. When deleted makes a flat terrain
heightfield[num_rows:2*num_rows, :] = sloped_terrain(new_sub_terrain(), slope=-0.5).height_field_raw
# Pyramid slope, probably the base for the stairs code
heightfield[2*num_rows:3*num_rows, :] = pyramid_sloped_terrain(new_sub_terrain(), slope=-0.5).height_field_raw
# nice square obstacles randomly generated
heightfield[3*num_rows:4*num_rows, :] = discrete_obstacles_terrain(new_sub_terrain(), max_height=0.5, min_size=1., max_size=5., num_rects=20).height_field_raw
# Nice curvy terrain
heightfield[4*num_rows:5*num_rows, :] = wave_terrain(new_sub_terrain(), num_waves=2., amplitude=1.).height_field_raw
# Adjust stair step size, how far it goes down or up.
heightfield[5*num_rows:6*num_rows, :] = stairs_terrain(new_sub_terrain(), step_width=0.75, step_height=-0.5).height_field_raw
# Need to figure out how to cahnge step heights and make a Pyramid Stair go up
heightfield[6*num_rows:7*num_rows, :] = pyramid_stairs_terrain(new_sub_terrain(), step_width=0.75, step_height=-0.5).height_field_raw
# Stepping Stones need fixing depth
heightfield[7*num_rows:8*num_rows, :] = stepping_stones_terrain(new_sub_terrain(), stone_size=1., stone_distance=1., max_height=0.5, platform_size=0.).height_field_raw
vertices, triangles = convert_heightfield_to_trimesh(heightfield, horizontal_scale=horizontal_scale, vertical_scale=vertical_scale, slope_threshold=1.5)
position = np.array([-6.0, 48.0, 0])
orientation = np.array([0.70711, 0.0, 0.0, -0.70711])
add_terrain_to_stage(stage=stage, vertices=vertices, triangles=triangles, position=position, orientation=orientation)
# Error
# Cannot load DefinePrim
# File "e:\bored engineer github\bored engineer\awesome_terrains\exts\omni.isaac.terrain_generator\omni\isaac\terrain_generator\extension.py", line 4, in <module>
# from .terrain_utils import *
# File "e:\bored engineer github\bored engineer\awesome_terrains\exts\omni.isaac.terrain_generator\omni\isaac\terrain_generator\terrain_utils.py", line 384, in <module>
# terrain = add_terrain_to_stage(stage, vertices, triangles)
# File "e:\bored engineer github\bored engineer\awesome_terrains\exts\omni.isaac.terrain_generator\omni\isaac\terrain_generator\terrain_utils.py", line 340, in add_terrain_to_stage
# terrain_mesh = stage.DefinePrim("/World/terrain", "Mesh")
# AttributeError: 'NoneType' object has no attribute 'DefinePrim' | 5,526 | Python | 52.14423 | 182 | 0.666305 |
boredengineering/awesome_terrains/exts/omni.isaac.terrain_generator/omni/isaac/terrain_generator/__init__.py | from .extension import *
| 25 | Python | 11.999994 | 24 | 0.76 |
boredengineering/awesome_terrains/exts/omni.isaac.terrain_generator/omni/isaac/terrain_generator/create_terrain_demo.py | import os, sys
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(SCRIPT_DIR)
import omni
from omni.isaac.kit import SimulationApp
import numpy as np
import torch
simulation_app = SimulationApp({"headless": False})
from abc import abstractmethod
from omni.isaac.core.tasks import BaseTask
from omni.isaac.core.prims import RigidPrimView, RigidPrim, XFormPrim
from omni.isaac.core import World
from omni.isaac.core.objects import DynamicSphere
from omni.isaac.core.utils.prims import define_prim, get_prim_at_path
from omni.isaac.core.utils.nucleus import find_nucleus_server
from omni.isaac.core.utils.stage import add_reference_to_stage, get_current_stage
from omni.isaac.core.materials import PreviewSurface
from omni.isaac.cloner import GridCloner
from pxr import UsdPhysics, UsdLux, UsdShade, Sdf, Gf, UsdGeom, PhysxSchema
from .terrain_utils import *
class TerrainCreation(BaseTask):
def __init__(self, name, num_envs, num_per_row, env_spacing, config=None, offset=None,) -> None:
BaseTask.__init__(self, name=name, offset=offset)
self._num_envs = num_envs
self._num_per_row = num_per_row
self._env_spacing = env_spacing
self._device = "cpu"
self._cloner = GridCloner(self._env_spacing, self._num_per_row)
self._cloner.define_base_env(self.default_base_env_path)
define_prim(self.default_zero_env_path)
@property
def default_base_env_path(self):
return "/World/envs"
@property
def default_zero_env_path(self):
return f"{self.default_base_env_path}/env_0"
def set_up_scene(self, scene) -> None:
self._stage = get_current_stage()
distantLight = UsdLux.DistantLight.Define(self._stage, Sdf.Path("/World/DistantLight"))
distantLight.CreateIntensityAttr(2000)
self.get_terrain()
self.get_ball()
super().set_up_scene(scene)
prim_paths = self._cloner.generate_paths("/World/envs/env", self._num_envs)
print(f"cloning {self._num_envs} environments...")
self._env_pos = self._cloner.clone(
source_prim_path="/World/envs/env_0",
prim_paths=prim_paths
)
return
def get_terrain(self):
# create all available terrain types
num_terains = 8
terrain_width = 12.
terrain_length = 12.
horizontal_scale = 0.25 # [m]
vertical_scale = 0.005 # [m]
num_rows = int(terrain_width/horizontal_scale)
num_cols = int(terrain_length/horizontal_scale)
heightfield = np.zeros((num_terains*num_rows, num_cols), dtype=np.int16)
def new_sub_terrain():
return SubTerrain(width=num_rows, length=num_cols, vertical_scale=vertical_scale, horizontal_scale=horizontal_scale)
heightfield[0:num_rows, :] = random_uniform_terrain(new_sub_terrain(), min_height=-0.2, max_height=0.2, step=0.2, downsampled_scale=0.5).height_field_raw
heightfield[num_rows:2*num_rows, :] = sloped_terrain(new_sub_terrain(), slope=-0.5).height_field_raw
heightfield[2*num_rows:3*num_rows, :] = pyramid_sloped_terrain(new_sub_terrain(), slope=-0.5).height_field_raw
heightfield[3*num_rows:4*num_rows, :] = discrete_obstacles_terrain(new_sub_terrain(), max_height=0.5, min_size=1., max_size=5., num_rects=20).height_field_raw
heightfield[4*num_rows:5*num_rows, :] = wave_terrain(new_sub_terrain(), num_waves=2., amplitude=1.).height_field_raw
heightfield[5*num_rows:6*num_rows, :] = stairs_terrain(new_sub_terrain(), step_width=0.75, step_height=-0.5).height_field_raw
heightfield[6*num_rows:7*num_rows, :] = pyramid_stairs_terrain(new_sub_terrain(), step_width=0.75, step_height=-0.5).height_field_raw
heightfield[7*num_rows:8*num_rows, :] = stepping_stones_terrain(new_sub_terrain(), stone_size=1.,
stone_distance=1., max_height=0.5, platform_size=0.).height_field_raw
vertices, triangles = convert_heightfield_to_trimesh(heightfield, horizontal_scale=horizontal_scale, vertical_scale=vertical_scale, slope_threshold=1.5)
position = np.array([-6.0, 48.0, 0])
orientation = np.array([0.70711, 0.0, 0.0, -0.70711])
add_terrain_to_stage(stage=self._stage, vertices=vertices, triangles=triangles, position=position, orientation=orientation)
def get_ball(self):
ball = DynamicSphere(prim_path=self.default_zero_env_path + "/ball",
name="ball",
translation=np.array([0.0, 0.0, 1.0]),
mass=0.5,
radius=0.2,)
def post_reset(self):
for i in range(self._num_envs):
ball_prim = self._stage.GetPrimAtPath(f"{self.default_base_env_path}/env_{i}/ball")
color = 0.5 + 0.5 * np.random.random(3)
visual_material = PreviewSurface(prim_path=f"{self.default_base_env_path}/env_{i}/ball/Looks/visual_material", color=color)
binding_api = UsdShade.MaterialBindingAPI(ball_prim)
binding_api.Bind(visual_material.material, bindingStrength=UsdShade.Tokens.strongerThanDescendants)
def get_observations(self):
pass
def calculate_metrics(self) -> None:
pass
def is_done(self) -> None:
pass
if __name__ == "__main__":
world = World(
stage_units_in_meters=1.0,
rendering_dt=1.0/60.0,
backend="torch",
device="cpu",
)
num_envs = 800
num_per_row = 80
env_spacing = 0.56*2
terrain_creation_task = TerrainCreation(name="TerrainCreation",
num_envs=num_envs,
num_per_row=num_per_row,
env_spacing=env_spacing,
)
world.add_task(terrain_creation_task)
world.reset()
while simulation_app.is_running():
if world.is_playing():
if world.current_time_step_index == 0:
world.reset(soft=True)
world.step(render=True)
else:
world.step(render=True)
simulation_app.close() | 6,310 | Python | 41.355704 | 166 | 0.61775 |
boredengineering/awesome_terrains/exts/omni.isaac.terrain_generator/omni/isaac/terrain_generator/tests/__init__.py | from .test_hello_world import * | 31 | Python | 30.999969 | 31 | 0.774194 |
boredengineering/awesome_terrains/exts/omni.isaac.terrain_generator/omni/isaac/terrain_generator/tests/test_hello_world.py | # NOTE:
# omni.kit.test - std python's unittest module with additional wrapping to add suport for async/await tests
# For most things refer to unittest docs: https://docs.python.org/3/library/unittest.html
import omni.kit.test
# Extnsion for writing UI tests (simulate UI interaction)
import omni.kit.ui_test as ui_test
# Import extension python module we are testing with absolute import path, as if we are external user (other extension)
import omni.isaac.terrain_generator
# Having a test class dervived from omni.kit.test.AsyncTestCase declared on the root of module will make it auto-discoverable by omni.kit.test
class Test(omni.kit.test.AsyncTestCase):
# Before running each test
async def setUp(self):
pass
# After running each test
async def tearDown(self):
pass
# Actual test, notice it is "async" function, so "await" can be used if needed
async def test_hello_public_function(self):
result = omni.isaac.terrain_generator.some_public_function(4)
self.assertEqual(result, 256)
async def test_window_button(self):
# Find a label in our window
label = ui_test.find("My Window//Frame/**/Label[*]")
# Find buttons in our window
add_button = ui_test.find("My Window//Frame/**/Button[*].text=='Add'")
reset_button = ui_test.find("My Window//Frame/**/Button[*].text=='Reset'")
# Click reset button
await reset_button.click()
self.assertEqual(label.widget.text, "empty")
await add_button.click()
self.assertEqual(label.widget.text, "count: 1")
await add_button.click()
self.assertEqual(label.widget.text, "count: 2")
| 1,692 | Python | 35.021276 | 142 | 0.684397 |
boredengineering/awesome_terrains/exts/omni.isaac.terrain_generator/docs/CHANGELOG.md | # Changelog
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
## [1.0.0] - 2021-04-26
- Initial version of extension UI template with a window
| 178 | Markdown | 18.888887 | 80 | 0.702247 |
boredengineering/awesome_terrains/exts/omni.isaac.terrain_generator/docs/README.md | # Awesome Terrain Generator Example [omni.isaac.terrain_generator]
This extension is intened to help to explore how to programatically generate terrains for OIGE (Omniverse Isaac Gym Reinforcement Learning Environments for Isaac Sim).
| 237 | Markdown | 46.599991 | 167 | 0.827004 |
boredengineering/awesome_terrains/exts/omni.isaac.terrain_generator/docs/index.rst | omni.isaac.terrain_generator
#############################
Example of Python only extension
.. toctree::
:maxdepth: 1
README
CHANGELOG
.. automodule::"omni.isaac.terrain_generator"
:platform: Windows-x86_64, Linux-x86_64
:members:
:undoc-members:
:show-inheritance:
:imported-members:
:exclude-members: contextmanager
| 357 | reStructuredText | 16.047618 | 45 | 0.633053 |
USDSync/MetaCloudExplorer/README.md | # USDSync.com
# Meta Cloud Explorer (MCE)
# NVIDIA Onmiverse Extension, a Scene Authoring Tool
(In Beta Development phase)

The true power of the Metaverse is to gain new insights to existing problems by experiencing things in a different way. A simple change in perspective can sometimes work wonders! Meta Cloud Explorer helps Cloud Architects visualize thier cloud infrastructure at scale. It's a dynamic metaverse creation tool for Nvidia's Omniverse.
Quickly connect to your Cloud Infrastructure and visualize it in your private Omniverse!*
This extension generates digital models of your Cloud Infrastructure that can be used to gain insights to drive better infrastructure, optimized resources, reduced costs, and breakthrough customer experiences.

**Gain Insight by seeing your infrastructure at scale:**

**View resources by Location, Group, Type, Subscription**

**Optional costs data integrated to groups**

**Click any shape for more information**

**Can support multiple clouds!**

**Only works with Microsoft Azure Cloud currently*
AWS, GCP planned on roadmap.
The whole point of Meta Cloud Explorer (MCE) is to let you experience your cloud infrastructure in a new way. The Metaverse is here, it means a lot of different things, but one of the key tenants of the Metaverse is to be able to experience new things, or to experience existing things in a different or new contexts.
A Metaverse is just a 3D space you can customize and share, or even visit it with other people. The ability to experience a virtual place with others opens up a world of possibility for training, knowledge sharing and helping others to understand technology.
### Cloud Infrastructure 2023 +
Azure, AWS, GCP are massive in reach, with millions of customers, and probably billions of pieces of infrastructure, it's a lot for cloud architect, infrastructure and SRE engineers manage. Cloud Architects have many tools to help manage the complexity and risk of managing cloud operations, infrastructure and deployments at scale.
Infrastructure is now managed just like code, in Source Control and connected to powerful Cloud Orchestration software like Cloud Formation, Terraform, Bicep and others, giving Cloud Architects, Engineers and Developers even more powerful tools to help scale and manage the cloud.
Existing Web based UIs suffer from "focusing on small groups of trees" which makes it hard to "see the forest" in this context.
There is no shortage of "Infrastructure diagram generation" tools that can produce 2d representations of your cloud infrastructure. Visio, Lucid Scale and other cloud based diagramming tools help architects manage and understand cloud infrastructure design and architecture. Many diagrams are still manually maintained and time-consuming.
Lucid Scale lets you generate a model from your cloud infrastructure like this:

### NVIDIA Omniverse and USD Universal Scene Description
Meta Cloud Explorer doesn't replace any of these tools, It's a new tool, a new way to see, understand and experience your cloud infrastructure at scale.
Thanks to the power of NVIDIA's Omniverse, we can now create real-time, photo realistic environments modeled directly from your cloud infrastructure!
As Meta Cloud Explorer matures, it will help you travel through time, see infrastructure differences and configurations in new ways, even create entirely new architectures. The power to visualize networks, network traffic and "the edge" can help engineers understand cloud security, endpoints and vulnerabilities.
In version 1.0 of Meta Cloud Explorer, we've just began to bridge the gap between cloud infrastructure and the metaverse. There is so much that can be done in this new context! v1.0 provides a read-only view of your cloud infrastructure and gives you an easy to use toolbox to create a simulated world that represents your cloud.
Future versions will let you create infrastructure from template libraries, learn about new architecture techniques and simulate design changes.
USDSync.com aims to create a SaaS based "AzureverseAsAService", where we can host your "Azureverse" live and in sync with your real cloud 24x7.
No more scene composition and design, USDSync.com can host these resources and keep them in sync automatically.
### The right tool for the right job
Have you ever tried to cleanup your hard drive? Windows File Explorer suffers from the same problem as the Azure, AWS and GCP UI portals. You are always in just one folder, or just one group or just one project, it's really hard just to understand the scale of the whole thing!
Want to understand "Whats taking up space on your hard disk?"... Good luck... Yes, you can right click on every folder and view usage, but it's just not as easy as it should be! - We just keep running out of space and adding bigger and bigger hard drives! Ever used WinDirStat?
WinDirStat is a free program that shows visually what is taking up space on your hard drive allowing you to gain insights that windows just doesn't provide. Once it has scanned your drive it visually shows you all the files on your disk, in a bin-packed graph. This graph allows you to easily see and understand what files and folders are taking up the space on your disk. Look what a simple change in context does for you?!?!

Think of MCE as the "WinDirStat" of your Azure,. AWS, GCP cloud infrastructure.. just another tool, but wow, the instant insights, change in context, really helps sometimes. I honestly don't know how you would figure out whats taking up space on your hard disk without a tool like WinDirStat.
Look at the difference between looking at your infrastructure in Azure, AWS or GCP, vs looking at it in MCE!
While the context is obviously a bit different and MCE does not replace these tools at all, you can immediately gain insight!
### Azure Portal

### AWS Portal

### GCP Portal

### I just want to see ALL MY INFRASTRUCUTRE, in ONE PLACE, RIGHT NOW!
I don't always want to just look at one resource group at a time. I don't want to endlessly filter lists or search for things. I don't always want to just look at diagrams or templates or even the code that built this infrastructure, I JUST WANT TO SEE IT! In one place, like it's a place I could go and if its virtual, be able to rearrange it instantly!
I rest my case, may I present **Meta Cloud Explorer**!! :)
### Meta Cloud Explorer Extension in Omniverse!

This is just "a picture" of my Azureverse. It's really a digital world I can travel around inside, rearrange, re-group, change groupings, costs, layouts and more. It's an easy way to just see your cloud infrastructure, right now, visually.. it's actually quite cool ;)
MCE even works in Omniverse XR! Just save your USD scene and open it in Omniverse XR, strap on your VR headset and immerse yourself inside your cloud. The future is here and it's time you have a tool that lets you quickly understand your cloud infrastructure, costs and resources distribution.
To the Metaverse and beyond! (kinda dumb I know, but it's better than Infinity, heh)
Goto the Wiki to Get Started!
https://github.com/USDSync/MetaCloudExplorer/wiki
| 9,153 | Markdown | 93.371133 | 429 | 0.799847 |
USDSync/MetaCloudExplorer/azure-pipelines.yml | # Python package
# Create and test a Python package on multiple Python versions.
# Add steps that analyze code, save the dist with the build record, publish to a PyPI-compatible index, and more:
# https://docs.microsoft.com/azure/devops/pipelines/languages/python
trigger:
- main
pool:
vmImage: ubuntu-latest
strategy:
matrix:
Python27:
python.version: '2.7'
Python35:
python.version: '3.5'
Python36:
python.version: '3.6'
Python37:
python.version: '3.7'
steps:
- task: UsePythonVersion@0
inputs:
versionSpec: '$(python.version)'
displayName: 'Use Python $(python.version)'
- script: |
python -m pip install --upgrade pip
pip install -r requirements.txt
displayName: 'Install dependencies'
- script: |
pip install pytest pytest-azurepipelines
pytest
displayName: 'pytest'
| 849 | YAML | 21.972972 | 113 | 0.696113 |
USDSync/MetaCloudExplorer/tools/scripts/link_app.py | import os
import argparse
import sys
import json
import packmanapi
import urllib3
def find_omniverse_apps():
http = urllib3.PoolManager()
try:
r = http.request("GET", "http://127.0.0.1:33480/components")
except Exception as e:
print(f"Failed retrieving apps from an Omniverse Launcher, maybe it is not installed?\nError: {e}")
sys.exit(1)
apps = {}
for x in json.loads(r.data.decode("utf-8")):
latest = x.get("installedVersions", {}).get("latest", "")
if latest:
for s in x.get("settings", []):
if s.get("version", "") == latest:
root = s.get("launch", {}).get("root", "")
apps[x["slug"]] = (x["name"], root)
break
return apps
def create_link(src, dst):
print(f"Creating a link '{src}' -> '{dst}'")
packmanapi.link(src, dst)
APP_PRIORITIES = ["code", "create", "view"]
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Create folder link to Kit App installed from Omniverse Launcher")
parser.add_argument(
"--path",
help="Path to Kit App installed from Omniverse Launcher, e.g.: 'C:/Users/bob/AppData/Local/ov/pkg/create-2021.3.4'",
required=False,
)
parser.add_argument(
"--app", help="Name of Kit App installed from Omniverse Launcher, e.g.: 'code', 'create'", required=False
)
args = parser.parse_args()
path = args.path
if not path:
print("Path is not specified, looking for Omniverse Apps...")
apps = find_omniverse_apps()
if len(apps) == 0:
print(
"Can't find any Omniverse Apps. Use Omniverse Launcher to install one. 'Code' is the recommended app for developers."
)
sys.exit(0)
print("\nFound following Omniverse Apps:")
for i, slug in enumerate(apps):
name, root = apps[slug]
print(f"{i}: {name} ({slug}) at: '{root}'")
if args.app:
selected_app = args.app.lower()
if selected_app not in apps:
choices = ", ".join(apps.keys())
print(f"Passed app: '{selected_app}' is not found. Specify one of the following found Apps: {choices}")
sys.exit(0)
else:
selected_app = next((x for x in APP_PRIORITIES if x in apps), None)
if not selected_app:
selected_app = next(iter(apps))
print(f"\nSelected app: {selected_app}")
_, path = apps[selected_app]
if not os.path.exists(path):
print(f"Provided path doesn't exist: {path}")
else:
SCRIPT_ROOT = os.path.dirname(os.path.realpath(__file__))
create_link(f"{SCRIPT_ROOT}/../../app", path)
print("Success!")
| 2,813 | Python | 32.5 | 133 | 0.562389 |
USDSync/MetaCloudExplorer/tools/packman/config.packman.xml | <config remotes="cloudfront">
<remote2 name="cloudfront">
<transport actions="download" protocol="https" packageLocation="d4i3qtqj3r0z5.cloudfront.net/${name}@${version}" />
</remote2>
</config>
| 211 | XML | 34.333328 | 123 | 0.691943 |
USDSync/MetaCloudExplorer/tools/packman/bootstrap/install_package.py | # Copyright 2019 NVIDIA CORPORATION
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import zipfile
import tempfile
import sys
import shutil
__author__ = "hfannar"
logging.basicConfig(level=logging.WARNING, format="%(message)s")
logger = logging.getLogger("install_package")
class TemporaryDirectory:
def __init__(self):
self.path = None
def __enter__(self):
self.path = tempfile.mkdtemp()
return self.path
def __exit__(self, type, value, traceback):
# Remove temporary data created
shutil.rmtree(self.path)
def install_package(package_src_path, package_dst_path):
with zipfile.ZipFile(
package_src_path, allowZip64=True
) as zip_file, TemporaryDirectory() as temp_dir:
zip_file.extractall(temp_dir)
# Recursively copy (temp_dir will be automatically cleaned up on exit)
try:
# Recursive copy is needed because both package name and version folder could be missing in
# target directory:
shutil.copytree(temp_dir, package_dst_path)
except OSError as exc:
logger.warning(
"Directory %s already present, packaged installation aborted" % package_dst_path
)
else:
logger.info("Package successfully installed to %s" % package_dst_path)
install_package(sys.argv[1], sys.argv[2])
| 1,888 | Python | 31.568965 | 103 | 0.68697 |
USDSync/MetaCloudExplorer/exts/meta.cloud.explorer.azure/meta/cloud/explorer/azure/csv_data_manager.py | # Selection UI window for importing CSV files
import carb
from omni.kit.window.file_importer import get_file_importer
import os.path
import asyncio
from pathlib import Path
# external python lib
import csv
import itertools
from .data_store import DataStore
from .prim_utils import cleanup_prim_path
import omni.kit.notification_manager as nm
#This class is designed to import data from 3 input files
#This file acts like a data provider for the data_manager
class CSVDataManager():
def __init__(self):
self._dataStore = DataStore.instance() # Get A Singleton instance, store data here
# limit the number of rows read
self.max_elements = 5000
#specify the filesnames to load
def loadFilesManual(self, grpFile:str, resFile:str):
self.load_grp_file_manual(grpFile)
self.load_res_file_manual(resFile)
#Load all the data from CSV files and process it
def loadFiles(self):
self.load_grp_file()
self.load_res_file()
#Resource Groups File Import
#NAME,SUBSCRIPTION,LOCATION
def load_grp_file_manual(self, fileName):
i=1
with open(fileName, encoding='utf-8-sig', newline='') as csvfile:
reader = csv.DictReader(csvfile, delimiter=',')
for row in reader:
name = row["NAME"]
subs = row["SUBSCRIPTION"]
location = row["LOCATION"]
grp = {name:{"name":name, "subs": subs, "location":location}}
self._dataStore._groups.update(grp)
i=i+1
if i > self.max_elements: return
self.sendNotify("MCE: Azure groups loaded: " + str(len(self._dataStore._groups)), nm.NotificationStatus.INFO)
#Groups File Import
def load_grp_file(self):
if os.path.exists(self._dataStore._rg_csv_file_path):
self.load_grp_file_manual(self._dataStore._rg_csv_file_path)
# Read CSV Resources file
# Expects fields:
# NAME,TYPE,RESOURCE GROUP,LOCATION,SUBSCRIPTION, LMCOST
def load_res_file_manual(self, fileName):
i=1
with open(fileName, encoding='utf-8-sig') as file:
reader = csv.DictReader(file, delimiter=',')
for row in reader:
name = row["NAME"]
type = row["TYPE"]
group = row["RESOURCE GROUP"]
location = row["LOCATION"]
subscription = row["SUBSCRIPTION"]
lmcost = row["LMCOST"]
#fix spacing, control chars early
name = cleanup_prim_path(self, Name=name)
self._dataStore._resources[name] = {"name":name, "type": type, "group": group, "location":location, "subscription":subscription, "lmcost": lmcost}
i=i+1
if i > self.max_elements: return
self.sendNotify("MCE: Azure resources loaded: " + str(len(self._dataStore._resources)), nm.NotificationStatus.INFO)
#Resources File Import
def load_res_file(self):
# check that CSV exists
if os.path.exists(self._dataStore._rs_csv_file_path):
self.load_res_file_manual(self._dataStore._rs_csv_file_path)
# Handles the click of the Load button for file selection dialog
def select_file(self, fileType: str):
self.file_importer = get_file_importer()
if fileType == "rg":
self.file_importer.show_window(
title="Select a CSV File",
import_button_label="Select",
import_handler=self._on_click_rg_open,
file_extension_types=[(".csv", "CSV Files (*.csv)")],
file_filter_handler=self._on_filter_item
)
if fileType == "res":
self.file_importer.show_window(
title="Select a CSV File",
import_button_label="Select",
import_handler=self._on_click_res_open,
file_extension_types=[(".csv", "CSV Files (*.csv)")],
file_filter_handler=self._on_filter_item
)
if fileType == "bgl":
self.file_importer.show_window(
title="Select a png image file",
import_button_label="Select",
import_handler=self._on_click_bgl_open,
file_extension_types=[(".png", "PNG Files (*.png)")],
file_filter_handler=self._on_filter_item
)
if fileType == "bgm":
self.file_importer.show_window(
title="Select a png image file",
import_button_label="Select",
import_handler=self._on_click_bgm_open,
file_extension_types=[(".png", "PNG Files (*.png)")],
file_filter_handler=self._on_filter_item
)
if fileType == "bgh":
self.file_importer.show_window(
title="Select a png image file",
import_button_label="Select",
import_handler=self._on_click_bgh_open,
file_extension_types=[(".png", "PNG Files (*.png)")],
file_filter_handler=self._on_filter_item
)
# Handles the click of the open button within the file importer dialog
def _on_click_rg_open(self, filename: str, dirname: str, selections):
# File name should not be empty.
filename = filename.strip()
if not filename:
carb.log_warn(f"Filename must be provided.")
return
# create the full path to csv file
if dirname:
fullpath = f"{dirname}/{filename}"
else:
fullpath = filename
self._dataStore._rg_csv_file_path = fullpath
self._dataStore._rg_csv_field_model.set_value(str(fullpath))
# Handles the click of the open button within the file importer dialog
def _on_click_res_open(self, filename: str, dirname: str, selections):
# File name should not be empty.
filename = filename.strip()
if not filename:
carb.log_warn(f"Filename must be provided.")
return
# create the full path to csv file
if dirname:
fullpath = f"{dirname}/{filename}"
else:
fullpath = filename
self._dataStore._rs_csv_file_path = fullpath
self._dataStore._rs_csv_field_model.set_value(str(fullpath))
# Handles the click of the open button within the file importer dialog
def _on_click_bgl_open(self, filename: str, dirname: str, selections):
# File name should not be empty.
filename = filename.strip()
if not filename:
carb.log_warn(f"Filename must be provided.")
return
# create the full path to csv file
if dirname:
fullpath = f"{dirname}/{filename}"
else:
fullpath = filename
self._dataStore._bgl_file_path = fullpath
self._dataStore._bgl_field_model.set_value(str(fullpath))
self._dataStore.Save_Config_Data()
# Handles the click of the open button within the file importer dialog
def _on_click_bgm_open(self, filename: str, dirname: str, selections):
# File name should not be empty.
filename = filename.strip()
if not filename:
carb.log_warn(f"Filename must be provided.")
return
# create the full path to csv file
if dirname:
fullpath = f"{dirname}/{filename}"
else:
fullpath = filename
self._dataStore._bgm_file_path = fullpath
self._dataStore._bgm_field_model.set_value(str(fullpath))
self._dataStore.Save_Config_Data()
# Handles the click of the open button within the file importer dialog
def _on_click_bgh_open(self, filename: str, dirname: str, selections):
# File name should not be empty.
filename = filename.strip()
if not filename:
carb.log_warn(f"Filename must be provided.")
return
# create the full path to csv file
if dirname:
fullpath = f"{dirname}/{filename}"
else:
fullpath = filename
self._dataStore._bgh_file_path = fullpath
self._dataStore._bgh_field_model.set_value(str(fullpath))
self._dataStore.Save_Config_Data()
# Handles the filtering of files within the file importer dialog
def _on_filter_item(self, filename: str, filter_postfix: str, filter_ext: str) -> bool:
if not filename:
return True
# Show only .csv files
_, ext = os.path.splitext(filename)
if ext == filter_ext:
return True
else:
return False
def sendNotify(self, message:str, status:nm.NotificationStatus):
# https://docs.omniverse.nvidia.com/py/kit/source/extensions/omni.kit.notification_manager/docs/index.html?highlight=omni%20kit%20notification_manager#
import omni.kit.notification_manager as nm
ok_button = nm.NotificationButtonInfo("OK", on_complete=self.clicked_ok)
nm.post_notification(
message,
hide_after_timeout=True,
duration=3,
status=status,
button_infos=[],
)
def clicked_ok():
carb.log_info("User clicked ok")
| 9,552 | Python | 35.323194 | 162 | 0.573702 |
USDSync/MetaCloudExplorer/exts/meta.cloud.explorer.azure/meta/cloud/explorer/azure/widget_info_manipulator.py | ## Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
##
## NVIDIA CORPORATION and its licensors retain all intellectual property
## and proprietary rights in and to this software, related documentation
## and any modifications thereto. Any use, reproduction, disclosure or
## distribution of this software and related documentation without an express
## license agreement from NVIDIA CORPORATION is strictly prohibited.
##
__all__ = ["WidgetInfoManipulator"]
from omni.ui import color as cl
from omni.ui import scene as sc
import omni.ui as ui
import carb
class _ViewportLegacyDisableSelection:
"""Disables selection in the Viewport Legacy"""
def __init__(self):
self._focused_windows = None
focused_windows = []
try:
# For some reason is_focused may return False, when a Window is definitely in fact is the focused window!
# And there's no good solution to this when mutliple Viewport-1 instances are open; so we just have to
# operate on all Viewports for a given usd_context.
import omni.kit.viewport_legacy as vp
vpi = vp.acquire_viewport_interface()
for instance in vpi.get_instance_list():
window = vpi.get_viewport_window(instance)
if not window:
continue
focused_windows.append(window)
if focused_windows:
self._focused_windows = focused_windows
for window in self._focused_windows:
# Disable the selection_rect, but enable_picking for snapping
window.disable_selection_rect(True)
except Exception:
pass
class _DragPrioritize(sc.GestureManager):
"""Refuses preventing _DragGesture."""
def can_be_prevented(self, gesture):
# Never prevent in the middle of drag
return gesture.state != sc.GestureState.CHANGED
def should_prevent(self, gesture, preventer):
if preventer.state == sc.GestureState.BEGAN or preventer.state == sc.GestureState.CHANGED:
return True
class _DragGesture(sc.DragGesture):
""""Gesture to disable rectangle selection in the viewport legacy"""
def __init__(self):
super().__init__(manager=_DragPrioritize())
def on_began(self):
# When the user drags the slider, we don't want to see the selection
# rect. In Viewport Next, it works well automatically because the
# selection rect is a manipulator with its gesture, and we add the
# slider manipulator to the same SceneView.
# In Viewport Legacy, the selection rect is not a manipulator. Thus it's
# not disabled automatically, and we need to disable it with the code.
self.__disable_selection = _ViewportLegacyDisableSelection()
def on_ended(self):
# This re-enables the selection in the Viewport Legacy
self.__disable_selection = None
class WidgetInfoManipulator(sc.Manipulator):
def __init__(self, **kwargs):
super().__init__(**kwargs)
#self.destroy()
self._radius = 2
self._distance_to_top = 5
self._thickness = 2
self._radius_hovered = 20
def destroy(self):
self._root = None
self._path_label = None
self._name_label = None
self._grp_label = None
self._type_label = None
self._location_label = None
self._sub_label = None
self._cost_label = None
def _on_build_widgets(self):
carb.log_info("WidgetInfoManipulator - on_build_widgets")
with ui.ZStack():
ui.Rectangle(
style={
"background_color": cl(0.2),
"border_color": cl(0.7),
"border_width": 2,
"border_radius": 4,
}
)
with ui.VStack(style={"font_size": 24}):
ui.Spacer(height=4)
with ui.ZStack(style={"margin": 1}, height=30):
ui.Rectangle(
style={
"background_color": cl(0.0),
}
)
ui.Line(style={"color": cl(0.7), "border_width": 2}, alignment=ui.Alignment.BOTTOM)
ui.Label("MCE: Resource Information", height=0, alignment=ui.Alignment.LEFT)
ui.Spacer(height=4)
self._path_label = ui.Label("Path:", height=0, alignment=ui.Alignment.LEFT)
self._name_label = ui.Label("Name:", height=0, alignment=ui.Alignment.LEFT)
self._grp_label = ui.Label("RGrp:", height=0, alignment=ui.Alignment.LEFT)
self._type_label = ui.Label("Type:", height=0, alignment=ui.Alignment.LEFT)
self._location_label = ui.Label("Location:", height=0, alignment=ui.Alignment.LEFT)
self._sub_label = ui.Label("Sub:", height=0, alignment=ui.Alignment.LEFT)
self._cost_label = ui.Label("Cost:", height=0, alignment=ui.Alignment.LEFT)
self.on_model_updated(None)
# Additional gesture that prevents Viewport Legacy selection
self._widget.gestures += [_DragGesture()]
def on_build(self):
carb.log_info("WidgetInfoManipulator - on_build")
"""Called when the model is chenged and rebuilds the whole slider"""
self._root = sc.Transform(visible=False)
with self._root:
with sc.Transform(scale_to=sc.Space.SCREEN):
with sc.Transform(transform=sc.Matrix44.get_translation_matrix(0, 100, 0)):
# Label
with sc.Transform(look_at=sc.Transform.LookAt.CAMERA):
self._widget = sc.Widget(600, 250, update_policy=sc.Widget.UpdatePolicy.ON_MOUSE_HOVERED)
self._widget.frame.set_build_fn(self._on_build_widgets)
self._on_build_widgets()
def on_model_updated(self, _):
try:
# if we don't have selection then show nothing
if not self.model or not self.model.get_item("name"):
if hasattr(self, "_root"):
self._root.visible = False
return
else:
# Update the shapes
position = self.model.get_as_floats(self.model.get_item("position"))
self._root.transform = sc.Matrix44.get_translation_matrix(*position)
self._root.visible = True
except:
return
#how to select parent ?
# name = self.model.get_item('name')
# if name.find("Collision") != -1:
# return
# Update the shape name
if hasattr(self, "_name_label"):
name = self.model.get_item('name')
infoBlurb = name.replace("/World/RGrps/", "")
infoBlurb = infoBlurb.replace("/World/Subs/", "")
infoBlurb = infoBlurb.replace("/World/Locs/", "")
infoBlurb = infoBlurb.replace("/World/Types/", "")
try:
self._path_label.text = f"{infoBlurb}"
except:
self._path_label = ui.Label("Path:", height=20, alignment=ui.Alignment.LEFT)
try:
self._name_label.text = "Name: " + self.model.get_custom('res_name')
except:
self._name_label = ui.Label("Name:" , height=40, alignment=ui.Alignment.LEFT)
try:
self._grp_label.text = "ResGrp: " + self.model.get_custom('res_grp')
except:
self._grp_label = ui.Label("RGrp:", height=60, alignment=ui.Alignment.LEFT)
try:
self._type_label.text = "Type: " + self.model.get_custom('res_type')
except:
self._type_label = ui.Label("Type: ", height=80, alignment=ui.Alignment.LEFT)
try:
self._location_label.text = "Location: " + self.model.get_custom('res_loc')
except:
self._location_label = ui.Label("Location: ", height=100, alignment=ui.Alignment.LEFT)
try:
self._sub_label.text = "Sub: " + self.model.get_custom('res_sub')
except:
self._sub_label = ui.Label("Sub: " , height=120, alignment=ui.Alignment.LEFT)
try:
self._cost_label.text = "Cost: " + self.model.get_custom('res_cost')
except:
self._cost_label = ui.Label("Cost:", height=140, alignment=ui.Alignment.LEFT)
| 8,503 | Python | 40.082125 | 117 | 0.57662 |
USDSync/MetaCloudExplorer/exts/meta.cloud.explorer.azure/meta/cloud/explorer/azure/button.py | from .constant import COLORS, LightColors, DarkColors, MouseKey
from .style import get_ui_style
from .rectangle import DashRectangle, OpaqueRectangle
from omni import ui
class InvisibleButton(ui.Button):
STYLE = {
"InvisibleButton": {"background_color": COLORS.TRANSPARENT},
"InvisibleButton.Label": {"color": COLORS.TRANSPARENT},
}
def __init__(self, *arg, **kwargs):
kwargs["style"] = self.STYLE
kwargs["style_type_name_override"] = "InvisibleButton"
super().__init__("##INVISIBLE", **kwargs)
class DashButton:
def __init__(
self,
height=0,
name=None,
image_source=None,
image_size=16,
image_padding=7,
dash_padding_x=2,
padding=6,
clicked_fn=None,
alignment=ui.Alignment.LEFT,
):
self._on_clicked_fn = clicked_fn
with ui.ZStack(height=height):
self._rectangle = OpaqueRectangle(height=height, name=name, style_type_name_override="Button")
with ui.HStack(spacing=0):
ui.Spacer(width=dash_padding_x + padding)
if alignment == ui.Alignment.CENTER:
ui.Spacer()
self._build_image_label(
image_source=image_source, name=name, image_size=image_size, image_padding=image_padding
)
ui.Spacer()
elif alignment == ui.Alignment.RIGHT:
ui.Spacer()
self._build_image_label(
image_source=image_source, name=name, image_size=image_size, image_padding=image_padding
)
else:
self._build_image_label(
image_source=image_source, name=name, image_size=image_size, image_padding=image_padding
)
ui.Spacer()
ui.Spacer(width=dash_padding_x)
DashRectangle(500, height, padding_x=dash_padding_x)
if clicked_fn:
self._rectangle.set_mouse_pressed_fn(lambda x, y, btn, flag: self._on_clicked())
@property
def enabled(self):
return self._label.enabled
@enabled.setter
def enabled(self, value):
self._label.enabled = value
def _build_image_label(self, image_source=None, name=None, image_size=16, image_padding=7):
if image_source:
with ui.VStack(width=image_size + 2 * image_padding):
ui.Spacer()
ui.Image(
image_source,
width=image_size,
height=image_size,
name=name,
style_type_name_override="Button.Image",
)
ui.Spacer()
self._label = ui.Label("Add", width=0, name=name, style_type_name_override="Button.Label")
def _on_clicked(self):
if self._label.enabled:
if self._on_clicked_fn is not None:
self._on_clicked_fn()
class ImageButton:
LIGHT_STYLE = {
"ImageButton": {"background_color": COLORS.TRANSPARENT, "border_width": 0, "border_radius": 2.0},
"ImageButton:hovered": {"background_color": LightColors.ButtonHovered},
"ImageButton:pressed": {"background_color": LightColors.ButtonPressed},
"ImageButton:selected": {"background_color": LightColors.ButtonSelected},
}
DARK_STYLE = {
"ImageButton": {"background_color": COLORS.TRANSPARENT, "border_width": 0, "border_radius": 2.0},
"ImageButton:hovered": {"background_color": 0xFF373737},
"ImageButton:selected": {"background_color": 0xFF1F2123},
}
UI_STYLES = {"NvidiaLight": LIGHT_STYLE, "NvidiaDark": DARK_STYLE}
def __init__(
self,
name,
width,
height,
image,
clicked_fn,
tooltip=None,
visible=True,
enabled=True,
activated=False,
tooltip_fn=None,
):
self._name = name
self._width = width
self._height = height
self._tooltip = tooltip
self._tooltip_fn = tooltip_fn
self._visible = visible
self._enabled = enabled
self._image = image
self._clicked_fn = clicked_fn
self._activated = activated
self._panel = None
self._bkground_widget = None
self._image_widget = None
self._mouse_x = 0
self._mouse_y = 0
def create(self, style=None, padding_x=2, padding_y=2):
ww = self.get_width()
hh = self.get_height()
if style is None:
style = ImageButton.UI_STYLES[get_ui_style()]
self._panel = ui.ZStack(spacing=0, width=ww, height=hh, style=style)
with self._panel:
with ui.Placer(offset_x=0, offset_y=0):
self._bkground_widget = ui.Rectangle(
name=self._name, style_type_name_override="ImageButton", width=ww, height=hh
)
self._bkground_widget.visible = self._visible and self._enabled
with ui.Placer(offset_x=padding_x, offset_y=padding_y):
self._image_widget = ui.Image(
self._image,
width=ww - padding_x * 2,
height=hh - padding_y * 2,
fill_policy=ui.FillPolicy.STRETCH,
mouse_pressed_fn=(lambda x, y, key, m: self._on_mouse_pressed(x, y, key)),
mouse_released_fn=(lambda x, y, key, m: self._on_mouse_released(x, y, key)),
opaque_for_mouse_events=True,
)
if self._bkground_widget is None or self._image_widget is None:
return
if self._tooltip:
self._image_widget.set_tooltip(self._tooltip)
if self._tooltip_fn:
self._tooltip_fn(self._image_widget, self._tooltip)
if not self._enabled:
self._bkground_widget.enabled = False
self._image_widget.enabled = False
def destroy(self):
if self._panel:
self._panel.clear()
if self._bkground_widget:
self._bkground_widget = None
if self._image_widget:
self._image_widget = None
@property
def enabled(self):
return self._enabled
@enabled.setter
def enabled(self, value):
self.enable(value)
def get_width(self):
return self._width
def get_height(self):
return self._height
def get_widget_pos(self):
x = self._bkground_widget.screen_position_x
y = self._bkground_widget.screen_position_y
return (x, y)
def enable(self, enabled):
if self._enabled != enabled:
self._enabled = enabled
self._bkground_widget.visible = enabled and self._visible
self._image_widget.enabled = enabled
return False
def set_tooltip(self, tooltip):
self._tooltip = tooltip
if self._image_widget is not None:
self._image_widget.set_tooltip(self._tooltip)
def set_tooltip_fn(self, tooltip_fn: callable):
self._tooltip_fn = tooltip_fn
if self._image_widget is not None:
self._image_widget.set_tooltip_fn(lambda w=self._image_widget, name=self._tooltip: tooltip_fn(w, name))
def is_visible(self):
return self._visible
def set_visible(self, visible=True):
if self._visible != visible:
self._visible = visible
self._bkground_widget.visible = visible and self._enabled
self._image_widget.visible = visible
def identify(self, name):
return self._name == name
def get_name(self):
return self._name
def is_activated(self):
return self._activated
def activate(self, activated=True):
if self._activated == activated:
return False
self._activated = activated
if self._bkground_widget is not None:
self._bkground_widget.selected = activated
def set_image(self, image):
if self._image != image:
self._image = image
self._image_widget.source_url = image
return False
def _on_mouse_pressed(self, x, y, key):
if not self._enabled:
return
# For left button, we do trigger the click event on mouse_released.
# For other buttons, we trigger the click event right now since Widget will never has
# mouse_released event for any buttons other than left.
if key != MouseKey.LEFT:
self._clicked_fn(key, x, y)
else:
self._mouse_x = x
self._mouse_y = y
def _on_mouse_released(self, x, y, key):
if self._enabled:
if key == MouseKey.LEFT:
self._clicked_fn(MouseKey.LEFT, x, y)
class SimpleImageButton(ImageButton):
def __init__(self, image, size, clicked_fn=None, name=None, style=None, padding=2):
self._on_clicked_fn = clicked_fn
if name is None:
name = "default_image_btn"
super().__init__(name, size, size, image, self._on_clicked)
self.create(style=style, padding_x=padding, padding_y=padding)
@property
def clicked_fn(self):
return self._on_clicked_fn
@clicked_fn.setter
def clicked_fn(self, value):
self._on_clicked_fn = None
def _on_clicked(self, button, x, y):
if self._on_clicked_fn:
self._on_clicked_fn()
class BoolImageButton(ImageButton):
def __init__(self, true_image, false_image, size, state=True, clicked_fn=None):
self._true_image = true_image
self._false_image = false_image
self._state = state
self._on_clicked_fn = clicked_fn
super().__init__("default_image_btn", size, size, self._get_image(), self._on_clicked)
self.create()
@property
def state(self):
return self._state
@state.setter
def state(self, value):
self.set_state(value, notify=False)
def set_state(self, state, notify=False):
self._state = state
self.set_image(self._get_image())
if notify and self._on_clicked_fn:
self._on_clicked_fn(self._state)
def _on_clicked(self, button, x, y):
self.set_state(not self._state, notify=True)
def _get_image(self):
return self._true_image if self._state else self._false_image
| 10,437 | Python | 32.242038 | 115 | 0.566734 |
USDSync/MetaCloudExplorer/exts/meta.cloud.explorer.azure/meta/cloud/explorer/azure/group_type.py |
from .group_base import GroupBase
from pxr import Gf, UsdGeom, UsdLux, Usd, Sdf
from .math_utils import calcPlaneSizeForGroup
from .prim_utils import cleanup_prim_path
import locale
import asyncio
import carb
import omni.client
import omni.kit.app
import omni.ui as ui
import omni.usd
import omni.kit.commands
class TypeGrpView(GroupBase):
def __init__(self, viewPath:str, scale:float, upAxis:str, shapeUpAxis:str, symPlanes:bool, binPack:bool):
self._scale = scale
self._upAxis = upAxis
self._shapeUpAxis = shapeUpAxis
self._view_path = viewPath
self._symPlanes = symPlanes
self._binPack = binPack
super().__init__()
def calcGroupPlaneSizes(self):
self._dataStore._lcl_groups = []
self._dataStore._lcl_sizes = []
if len(self._dataStore._type_count) == 0:
self._dataManager.refresh_data()
#check it again
if len(self._dataStore._type_count) == 0:
return 0 # ---------- NO DATA
#Clone the location groups
gpz = self._dataStore._type_count.copy()
#How big should the groups be?
for grp in gpz:
size = calcPlaneSizeForGroup(
scaleFactor=self._scale,
resourceCount=self._dataStore._type_count.get(grp)
)
#mixed plane sizes
self._dataStore._lcl_sizes.append(size)
grp = cleanup_prim_path(self, grp)
self._dataStore._lcl_groups.append({ "group":grp, "size":size })
#Should the groups all be the same size ?
if self._symPlanes:
self._dataStore._lcl_sizes.sort(reverse=True)
maxPlaneSize = self._dataStore._lcl_sizes[0] #largest plane
groupCount = len(self._dataStore._lcl_sizes) #count of groups
#Reset plane sizes
self._dataStore._lcl_sizes = []
for count in range(0,groupCount):
self._dataStore._lcl_sizes.append(maxPlaneSize)
self._dataStore._lcl_groups = []
for grp in gpz:
self._dataStore._lcl_groups.append({ "group":grp, "size":maxPlaneSize })
def calulateCosts(self):
for g in self._dataStore._lcl_groups:
#Get the cost by resource group
locale.setlocale( locale.LC_ALL, 'en_CA.UTF-8' )
try:
self._cost = str(locale.currency(self._dataStore._type_cost[g]))
except:
self._cost = "" # blank not 0, blank means dont show it at all
def selectGroupPrims(self):
self.paths = []
base = Sdf.Path("/World/Types")
for grp in self._dataStore.map_group.keys():
grp_path = base.AppendPath(cleanup_prim_path(self, grp))
self.paths.append(str(grp_path))
omni.kit.commands.execute('SelectPrimsCommand',
old_selected_paths=[],
new_selected_paths=self.paths,
expand_in_stage=True)
#Abstact to load resources
def loadResources(self):
self.view_path = Sdf.Path(self.root_path.AppendPath(self._view_path))
if (len(self._dataStore._lcl_groups)) >0 :
#Cycle all the loaded groups
for grp in self._dataStore._lcl_groups:
carb.log_info(grp["group"])
#Cleanup the group name for a prim path
group_prim_path = self.view_path.AppendPath(grp["group"])
#match the group to the resource map
for key, values in self._dataStore._map_type.items():
#Is this the group?
if key == grp["group"]:
asyncio.ensure_future(self.loadGroupResources(key, group_prim_path, values))
def selectGroupPrims(self):
self.paths = []
stage = omni.usd.get_context().get_stage()
base = Sdf.Path("/World/Types")
curr_prim = stage.GetPrimAtPath(base)
for prim in Usd.PrimRange(curr_prim):
# only process shapes and meshes
tmp_path = str(prim.GetPath())
if '/CollisionMesh' not in tmp_path:
if '/CollisionPlane' not in tmp_path:
self.paths.append(tmp_path)
# for grp in self._dataStore._map_subscription.keys():
# grp_path = base.AppendPath(cleanup_prim_path(self, grp))
# self.paths.append(str(grp_path))
omni.kit.commands.execute('SelectPrimsCommand',
old_selected_paths=[],
new_selected_paths=self.paths,
expand_in_stage=True)
| 4,647 | Python | 31.732394 | 109 | 0.572628 |
USDSync/MetaCloudExplorer/exts/meta.cloud.explorer.azure/meta/cloud/explorer/azure/scatter_on_planes.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
__all__ = ["scatterOnFixedPlane"]
from typing import List, Optional
import random
from pxr import Gf
def scatterOnFixedPlane(
count: List[int],
distance: List[float],
scaleFactor:float=1.0
):
"""
Returns generator with pairs containing transform matrices and ids to
arrange multiple objects.
### Arguments:
`count: List[int]`
Number of matrices to generage per axis
`distance: List[float]`
The distance between objects per axis
"""
vectors = {}
id_cnt = 0
for i in range(count[0]):
x = (i - 0.5 * (count[0] - 1)) * distance[0]*scaleFactor
for j in range(count[1]):
y = (j - 0.5 * (count[1] - 1)) * distance[1]*scaleFactor
for k in range(count[2]):
z = (k - 0.5 * (count[2] - 1)) * distance[2]*scaleFactor
#yield([x, y, z])
vec_id = id_cnt
vec = {vec_id: Gf.Vec3f(x,y,z)}
vectors.update(vec)
id_cnt = id_cnt +1
return vectors
| 1,510 | Python | 25.982142 | 76 | 0.602649 |
USDSync/MetaCloudExplorer/exts/meta.cloud.explorer.azure/meta/cloud/explorer/azure/style_button.py |
__all__ = ["button_style"]
button_styles = {
"Button": {
"border_width": 0.5,
"border_radius": 0.0,
"margin": 4.0,
"padding": 4.0,
"font_size" : 20
},
"Button::subs": {
"background_color": 0x0096C8FA,
"background_gradient_color": 0xFFFAB26D,
"border_color": 0xFFFD761D,
},
"Button.Label::subs": {
"color": 0xFFFFFFFF,
"font_size" : 16
},
"Button::subs:hovered": {
"background_color": 0xFFFF6E00,
"background_gradient_color": 0xFFFFAE5A
},
"Button::subs:pressed": {
"background_color": 0xFFFAB26D,
"background_gradient_color": 0xFFFF7E09
},
"Button::clear": {
"background_color": 0xFFFF7E09,
"background_gradient_color": 0xFFFAB26D,
"border_color": 0xFFFD761D,
},
"Button.Label::clear": {
"color": 0xFFFFFFFF,
"font_size" : 16
},
"Button::rs": {
"background_color": 0xFFFF7E09,
"background_gradient_color": 0xFFFAB26D,
"border_color": 0xFFFD761D,
},
"Button::rs:hovered": {
"background_color": 0xFFFF6E00,
"background_gradient_color": 0xFFFFAE5A,
"border_color": 0xFFFD761D,
},
"Button::rs:pressed": {
"background_color": 0xFFFAB26D,
"background_gradient_color": 0xFFFF7E09,
"border_color": 0xFFFD761D,
},
"Button.Label::rs": {
"color": 0xFFFFFFFF,
"font_size" : 16
},
"Button::clr": {
"background_color": 0xEE7070,
"background_gradient_color": 0xFFFAB26D,
"border_color": 0xFFFD761D,
},
"Button.Label::clr": {
"color": 0xFFFFFFFF,
"font_size" : 16
},
"Button::clr:hovered": {
"background_color": 0xFFFF6E00,
"background_gradient_color": 0xFFFFAE5A
},
"Button::clr:pressed": {
"background_color": 0xFFFAB26D,
"background_gradient_color": 0xFFFF7E09
},
"Button::help": {
"background_color": 0x6464c8,
"background_gradient_color": 0xFFFAB26D,
"border_color": 0xFFFD761D,
},
"Button.Label::help": {
"color": 0xFFFFFFFF,
"font_size" : 16
},
"Button::help:hovered": {
"background_color": 0xFFFF6E00,
"background_gradient_color": 0xFFFFAE5A
},
"Button::help:pressed": {
"background_color": 0xFFFAB26D,
"background_gradient_color": 0xFFFF7E09
},
}
| 2,488 | Python | 25.478723 | 48 | 0.546222 |
USDSync/MetaCloudExplorer/exts/meta.cloud.explorer.azure/meta/cloud/explorer/azure/pillow_text.py | import glob
from PIL import Image, ImageDraw, ImageFont, ImageDraw
import io
import asyncio
import os
import time
import sys
import os.path as path
from pathlib import Path
import omni.kit.pipapi
from datetime import datetime, timedelta
#Create and draw images in async contexxt
async def draw_text_on_image_at_position_async (
input_image_path:str,
output_image_path:str,
textToDraw:str,
costToDraw:str,
x:int, y:int,
fillColor:str,
font:str,
fontSize:int):
await draw_text_on_image_at_position(
input_image_path,
output_image_path,
textToDraw,
costToDraw,
x, y, fillColor, font, fontSize
)
def is_file_older_than_x_days(file, days=1):
file_time = path.getmtime(file)
# Check against 24 hours
return ((time.time() - file_time) / 3600 > 24*days)
#Create a new image with text
def draw_text_on_image_at_position(
input_image_path:str,
output_image_path:str,
textToDraw:str,
costToDraw:str,
x:int, y:int,
fillColor:str, font:str, fontSize:int):
makeFile = False
if not os.path.exists(input_image_path):
print("No src file: " + str(input_image_path))
return
if os.path.exists(output_image_path):
if is_file_older_than_x_days(output_image_path, 30):
makeFile = True
else:
makeFile = True
if makeFile:
print("Refreshing Image " + str(output_image_path) + " with text: " + textToDraw + " cst: " + costToDraw)
#font = ImageFont.load(str(font))
font = ImageFont.truetype(str(font), fontSize, encoding="unic")
print("Loading src file: " + str(input_image_path))
image = Image.open(input_image_path)
image = image.rotate(270, expand=1)
draw = ImageDraw.Draw(image)
textW, textH = draw.textsize(textToDraw, font) # how big is our text
costW, costH = draw.textsize(costToDraw, font) # how big is cost text
if costToDraw != "":
costToDraw = str(costToDraw) + " /month"
draw.text((x,y-75), textToDraw, font_size=fontSize,anchor="ls", font=font, fill=fillColor)
draw.text((x,y+75), costToDraw, font_size=(fontSize-50), anchor="ls", font=font, fill="red")
else:
draw.text((x, y-50), textToDraw, font_size=fontSize,anchor="ls", font=font, fill=fillColor)
image = image.rotate(-270, expand=1)
with open(output_image_path, 'wb') as out_file:
image.save(out_file, 'PNG')
#image.save(output_image_path)
# def create_image_with_text(output_image_path:str, textToDraw:str, x:int, y:int, h:int, w:int, color:str, alignment:str, fillColor:str, fontPath:str, fontSize:int):
# image = Image.new("RGB", (h, w), color)
# draw = ImageDraw.Draw(image)
# # Load font from URI
# #font1 = "https://github.com/googlefonts/Arimo/raw/main/fonts/ttf/Arimo-Regular.ttf"
# font1 = 'https://github.com/googlefonts/roboto/blob/main/src/hinted/Roboto-Black.ttf?raw=true'
# font = load_font_from_uri(fontSize, font1)
# #font = ImageFont.truetype(fontPath, layout_engine=ImageFont.LAYOUT_BASIC, size=fontSize)
# draw.text((x, y), textToDraw, font=font, anchor="ls", fill=fillColor)
# image.save(output_image_path)
#angled text
#https://stackoverflow.com/questions/245447/how-do-i-draw-text-at-an-angle-using-pythons-pil
def draw_text_90_into(text: str, into, at):
# Measure the text area
font = ImageFont.truetype (r'C:\Windows\Fonts\Arial.ttf', 16)
wi, hi = font.getsize (text)
# Copy the relevant area from the source image
img = into.crop ((at[0], at[1], at[0] + hi, at[1] + wi))
# Rotate it backwards
img = img.rotate (270, expand = 1)
# Print into the rotated area
d = ImageDraw.Draw (img)
d.text ((0, 0), text, font = font, fill = (0, 0, 0))
# Rotate it forward again
img = img.rotate (90, expand = 1)
# Insert it back into the source image
# Note that we don't need a mask
into.paste (img, at)
if __name__ == "__main__":
#create_image_with_text("temp\\output2.jpg", "Mmmuuuurrrrrrrrrr", 10.0,525,575,575,"white", "left", "black", "temp\\airstrike.ttf", 44)
draw_text_on_image_at_position("temp\\tron_grid_test.png", "temp\\output_test.png", "defaultresourcegroup_ea","$299.00", 200,1800, "yellow", 110)
#'https://github.com/googlefonts/roboto/blob/main/src/hinted/Roboto-Black.ttf?raw=true'
# input_image_path:str,
# output_image_path:str,
# textToDraw:str,
# costToDraw:str,
# x:int, y:int,
# fillColor:str, fontSize:int):
| 4,635 | Python | 30.753424 | 169 | 0.63754 |
USDSync/MetaCloudExplorer/exts/meta.cloud.explorer.azure/meta/cloud/explorer/azure/math_utils.py |
# Calculate the size of the Plane to place Resource Group X's items on
# using 2D spaces here, will locate all the items on a plane the size of the group
# 1x, 2x2, 3x3, 4x4, 5x5, 6x6, 7x7, 8x8, etc... what size do we need?
import math
from pxr import Gf
__all__ = ["calcPlaneSizeForGroup"]
from re import I
from typing import List
from .scatter_on_planes import scatterOnFixedPlane
# Calculate the size of the Group Plane to create
def calcPlaneSizeForGroup(scaleFactor:float, resourceCount: int):
# 1-30 squared, return the square root, this is the size of the space needed
for i in [1, 4, 9, 16, 25, 36, 49, 64, 81, 100, 121, 144, 169, 196, 225, 256, 289, 324, 361, 400, 441, 484,529,576,625,676,729, 784, 841,900]:
if resourceCount > 0 and resourceCount <= i:
return float(((math.sqrt(i)*100)*scaleFactor)+1)
#FIGURES OUT WHERE TO PUT THE PRIMS ON A VARIABLE SIZED-PLANE
def calculateGroupTransforms(self, scale:float, count:int ):
#ex 400.0 -> 800 - 400 plane is 800x800
plane_size = (calcPlaneSizeForGroup(scaleFactor=scale, resourceCount=count)*2)
plane_class = ((plane_size/100)/2)
#distance of objects depending on grid size..
dist = plane_size / plane_class
#Use NVIDIAs Scatter algo to position on varying sized planes
transforms = scatterOnFixedPlane(
count=[int(plane_class), int(plane_class), 1], # Distribute accross the plane class
distance=[dist,dist,dist],
scaleFactor=scale
)
#there should be at least one transform
if len(transforms) == 0:
vec_id = 0
vec = {vec_id: Gf.Vec3f(0,0,0)}
transforms[0] = vec
return transforms
| 1,686 | Python | 32.739999 | 146 | 0.677343 |
USDSync/MetaCloudExplorer/exts/meta.cloud.explorer.azure/meta/cloud/explorer/azure/scatter_complex.py | __all__ = ["distributePlanes"]
from typing import List, Optional
import random
from pxr import Gf
def distributePlanes(
UpAxis: 'Z',
count: List[int],
distance: List[float],
sizes: List[float],
randomization: List[float],
seed: Optional[int] = None,
scaleFactor:float=1.0
):
#print("UpAxis = " + UpAxis)
random.seed(seed)
if(UpAxis == 'Z'):
nUpPlane = count[0]*count[1]
elif(UpAxis == 'X'):
nUpPlane = count[1]*count[2]
else:#(UpAxis == 'Y'):
nUpPlane = count[2]*count[0]
for i in range(len(sizes)):
iPlane = i % nUpPlane
if(UpAxis == 'Z'):
ix = iPlane // count[1]
iy = iPlane % count[1]
iz = i // nUpPlane
elif(UpAxis == 'X'):
iy = iPlane // count[2]
iz = iPlane % count[2]
ix = i // nUpPlane
else:#(UpAxis == 'Y'):
iz = iPlane // count[0]
ix = iPlane % count[0]
iy = i // nUpPlane
x = ix*((distance[0]+sizes[i])*scaleFactor) * randomization[0]
y = iy*((distance[1]+sizes[i])*scaleFactor) * randomization[1]
z = iz*((distance[2]+sizes[i])*scaleFactor) * randomization[2]
yield(Gf.Vec3d(x,y,z))
| 1,255 | Python | 24.632653 | 70 | 0.517928 |
USDSync/MetaCloudExplorer/exts/meta.cloud.explorer.azure/meta/cloud/explorer/azure/group_location.py |
from .group_base import GroupBase
from pxr import Gf, UsdGeom, UsdLux, Usd, Sdf
from .math_utils import calcPlaneSizeForGroup
from .prim_utils import cleanup_prim_path
import locale
import asyncio
import carb
import omni.client
import omni.kit.app
import omni.ui as ui
import omni.usd
import omni.kit.commands
class LocGrpView(GroupBase):
def __init__(self, viewPath:str, scale:float, upAxis:str, shapeUpAxis:str, symPlanes:bool, binPack:bool):
self._root_path = Sdf.Path(viewPath)
self._scale = scale
self._upAxis = upAxis
self._shapeUpAxis = shapeUpAxis
self._view_path = viewPath
self._symPlanes = symPlanes
self._binPack = binPack
super().__init__()
def calcGroupPlaneSizes(self):
self._dataStore._lcl_groups = []
self._dataStore._lcl_sizes = []
if len(self._dataStore._location_count) == 0:
self._dataManager.refresh_data()
#check it again
if len(self._dataStore._location_count) == 0:
return 0 # ---------- NO DATA
#Clone the location groups
gpz = self._dataStore._location_count.copy()
#How big should the groups be?
for grp in gpz:
size = calcPlaneSizeForGroup(
scaleFactor=self._scale,
resourceCount=self._dataStore._location_count.get(grp)
)
#mixed plane sizes
self._dataStore._lcl_sizes.append(size)
grp = cleanup_prim_path(self, grp)
self._dataStore._lcl_groups.append({ "group":grp, "size":size })
#Should the groups all be the same size ?
if self._symPlanes:
self._dataStore._lcl_sizes.sort(reverse=True)
maxPlaneSize = self._dataStore._lcl_sizes[0] #largest plane
groupCount = len(self._dataStore._lcl_sizes) #count of groups
#Reset plane sizes
self._dataStore._lcl_sizes = []
for count in range(0,groupCount):
self._dataStore._lcl_sizes.append(maxPlaneSize)
self._dataStore._lcl_groups = []
for grp in gpz:
self._dataStore._lcl_groups.append({ "group":grp, "size":maxPlaneSize })
def calulateCosts(self):
for g in self._dataStore._lcl_groups:
#Get the cost by resource group
locale.setlocale( locale.LC_ALL, 'en_CA.UTF-8' )
try:
self._cost = str(locale.currency(self._dataStore._location_cost[g]))
except:
self._cost = "" # blank not 0, blank means dont show it at all
#Abstact to load resources
def loadResources(self):
self.view_path = Sdf.Path(self.root_path.AppendPath(self._view_path))
if (len(self._dataStore._lcl_groups)) >0 :
#Cycle all the loaded groups
for grp in self._dataStore._lcl_groups:
carb.log_info(grp["group"])
#Cleanup the group name for a prim path
group_prim_path = self.view_path.AppendPath(grp["group"])
#match the group to the resource map
for key, values in self._dataStore._map_location.items():
#Is this the group?
if key == grp["group"]:
asyncio.ensure_future(self.loadGroupResources(key, group_prim_path, values))
def selectGroupPrims(self):
self.paths = []
base = Sdf.Path(self.root_path.AppendPath(self._view_path))
for grp in self._dataStore._map_location.keys():
grp_path = base.AppendPath(cleanup_prim_path(self, grp))
self.paths.append(str(grp_path))
omni.kit.commands.execute('SelectPrimsCommand',
old_selected_paths=[],
new_selected_paths=self.paths,
expand_in_stage=True)
| 3,918 | Python | 31.932773 | 109 | 0.578356 |
USDSync/MetaCloudExplorer/exts/meta.cloud.explorer.azure/meta/cloud/explorer/azure/style.py | from .constant import COLORS, LightColors, DarkColors, FontSize
from omni import ui
import carb.settings
def get_ui_style():
settings = carb.settings.get_settings()
style = settings.get_as_string("/persistent/app/window/uiStyle")
if not style:
style = "NvidiaDark"
return style
class DefaultWidgetStyle:
LIGHT = {
"Button": {
"background_color": LightColors.Button,
"border_radius": 2.0,
"stack_direction": ui.Direction.LEFT_TO_RIGHT,
},
"Button.Label": {"color": LightColors.Background},
"Button:hovered": {"background_color": LightColors.ButtonHovered},
"Button:pressed": {"background_color": LightColors.ButtonPressed},
"CollapsableFrame": {
"color": COLORS.TRANSPARENT,
"background_color": COLORS.TRANSPARENT,
"secondary_color": COLORS.TRANSPARENT,
},
"CollapsableFrame:hovered": {"secondary_color": COLORS.TRANSPARENT},
"CollapsableFrame:pressed": {"secondary_color": COLORS.TRANSPARENT},
"ComboBox": {
"color": LightColors.Text,
"background_color": LightColors.Background,
"selected_color": LightColors.BackgroundSelected,
"border_radius": 1,
"padding_width": 0,
"padding_height": 4,
"secondary_padding": 8,
},
"ComboBox:disabled": {"color": LightColors.TextDisabled},
"Field": {"background_color": LightColors.Background, "color": LightColors.Text, "border_radius": 2},
"Plot": {"background_color": LightColors.Background, "color": LightColors.TextSelected, "border_radius": 1},
"Triangle": {"background_color": LightColors.Background},
}
DARK = {
"Button": {
"background_color": DarkColors.Button,
"border_radius": 2.0,
"stack_direction": ui.Direction.LEFT_TO_RIGHT,
},
"Button.Label": {"color": DarkColors.Text},
"Button:hovered": {"background_color": DarkColors.ButtonHovered},
"Button:pressed": {"background_color": DarkColors.ButtonPressed},
"CollapsableFrame": {
"color": COLORS.TRANSPARENT,
"background_color": COLORS.TRANSPARENT,
"secondary_color": COLORS.TRANSPARENT,
},
"CollapsableFrame:hovered": {"secondary_color": COLORS.TRANSPARENT},
"CollapsableFrame:pressed": {"secondary_color": COLORS.TRANSPARENT},
"ComboBox": {
"color": DarkColors.Text,
"background_color": DarkColors.Background,
"selected_color": DarkColors.BackgroundSelected,
"border_radius": 1,
},
"ComboBox:disabled": {"color": DarkColors.TextDisabled},
"Field": {"background_color": DarkColors.Background, "color": DarkColors.Text, "border_radius": 2},
"Plot": {"background_color": DarkColors.Background, "color": DarkColors.TextSelected, "border_radius": 1},
"Triangle": {"background_color": DarkColors.Background},
}
@staticmethod
def get_style(ui_style=None):
if ui_style is None:
ui_style = get_ui_style()
if ui_style == "NvidiaDark":
return DefaultWidgetStyle.DARK
else:
return DefaultWidgetStyle.LIGHT
| 3,323 | Python | 39.048192 | 116 | 0.607884 |
USDSync/MetaCloudExplorer/exts/meta.cloud.explorer.azure/meta/cloud/explorer/azure/omni_utils.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
__all__ = ["get_selection", "duplicate_prims", "create_prims, create_shaders"]
from re import I
from typing import List
import omni.usd
import asyncio
import omni.kit.commands
from pxr import Sdf, Gf, Usd, UsdGeom
from .prim_utils import create_plane
def get_selection() -> List[str]:
"""Get the list of currently selected prims"""
return omni.usd.get_context().get_selection().get_selected_prim_paths()
def duplicate_prims(transforms: List = [], prim_names: List[str] = [], target_path: str = "", mode: str = "Copy"):
"""
Returns generator with pairs containing transform matrices and ids to
arrange multiple objects.
### Arguments:
`transforms: List`
Pairs containing transform matrices and ids to apply to new objects
`prim_names: List[str]`
Prims to duplicate
`target_path: str`
The parent for the new prims
`mode: str`
"Reference": Create a reference of the given prim path
"Copy": Create a copy of the given prim path
"PointInstancer": Create a PointInstancer
"""
if mode == "PointInstancer":
omni.kit.commands.execute(
"ScatterCreatePointInstancer",
path_to=target_path,
transforms=transforms,
prim_names=prim_names,
)
return
usd_context = omni.usd.get_context()
# Call commands in a single undo group. So the user will undo everything
# with a single press of ctrl-z
with omni.kit.undo.group():
# Create a group
omni.kit.commands.execute("CreatePrim", prim_path=target_path, prim_type="Scope")
for i, matrix in enumerate(transforms):
id = matrix[1]
matrix = matrix[0]
path_from = Sdf.Path(prim_names[id])
path_to = Sdf.Path(target_path).AppendChild(f"{path_from.name}{i}")
# Create a new prim
if mode == "Copy":
omni.kit.commands.execute("CopyPrims", paths_from=[path_from.pathString], paths_to=[path_to.pathString])
elif mode == "Reference":
omni.kit.commands.execute(
"CreateReference", usd_context=usd_context, prim_path=path_from, path_to=path_to, asset_path=""
)
else:
continue
# Move
omni.kit.commands.execute("TransformPrim", path=path_to, new_transform_matrix=matrix)
def create_prims(up_axis:str, plane_size:List[float], transforms: List = [], prim_names: List[str] = [], parent_path:str = ""):
"""
Returns generator with pairs containing transform matrices and ids to arrange multiple objects.
### Arguments:
`transforms: List`
Pairs containing transform matrices and ids to apply to new objects
`prim_names: List[str]`
Prims to create
`target_paths: List[str]`
The paths for the new prims
"""
usd_context = omni.usd.get_context()
stage_ref = usd_context.get_stage()
# Call commands in a single undo group. So the user will undo everything
# with a single press of ctrl-z
#with omni.kit.undo.group():
#print("Prim count: " + str(len(prim_names)))
# Create a group
#omni.kit.commands.execute("CreatePrim", prim_path=parent_path, prim_type="Scope")
i=0
for matrix in enumerate(transforms):
if (i >= len(prim_names)): continue
path = Sdf.Path(parent_path).AppendPath(prim_names[i]["group"])
print(str(i) + " adding plane:" + str(path) + " " + str(plane_size[i]) + " @ " + str(matrix[1]))
if prim_names[i]["group"] == "observation_deck":
matrix[1][0] = matrix[1][0] + 500
matrix[1][1] = matrix[1][1] + 500
matrix[1][2] = matrix[1][2] + 500
omni.kit.commands.execute('AddGroundPlaneCommand',
stage=stage_ref,
planePath=str(path), #'/RGrp/Test_Positioning'
axis='Z',
size=plane_size[i],
position=matrix[1],
color=Gf.Vec3f(0,0,0))
i=i+1
def get_selected_prims(self):
"""
Get the currently selected prims in the scene
"""
context = omni.usd.get_context()
stage = context.get_stage()
prims = [stage.GetPrimAtPath(m) for m in context.get_selection().get_selected_prim_paths()]
return prims
async def create_shaders(base_path:str, prim_name:str ):
prim_path = Sdf.Path(base_path)
prim_path = prim_path.AppendPath("CollisionMesh")
#Select the Collision Mesh
omni.kit.commands.execute('SelectPrims',
old_selected_paths=[''],
new_selected_paths=[str(prim_path)],
expand_in_stage=True)
#print("Creating Shader: " + str(prim_path))
#Create a Shader for the Mesh
omni.kit.commands.execute('CreateAndBindMdlMaterialFromLibrary',
mdl_name='OmniPBR.mdl',
mtl_name='OmniPBR',
prim_name=prim_name,
mtl_created_list=None,
bind_selected_prims=True)
await omni.kit.app.get_app().next_update_async()
| 5,525 | Python | 32.490909 | 127 | 0.618643 |
USDSync/MetaCloudExplorer/exts/meta.cloud.explorer.azure/meta/cloud/explorer/azure/data_manager.py | # This class manages both the offline data and online data
from typing import Dict
from .Singleton import Singleton
from .csv_data_manager import CSVDataManager
from .azure_data_manager_stub import AzureDataManager
#Azure API disabled in this version, due to:
from .data_store import DataStore
from .prim_utils import cleanup_prim_path, draw_image
from .azure_resource_map import shape_usda_name
from .pillow_text import draw_text_on_image_at_position_async, draw_text_on_image_at_position
from pathlib import Path
from pxr import Sdf
from .prim_utils import get_font_size_from_length
import omni.kit.notification_manager as nm
import omni
import asyncio
import logging
import shutil
import locale
import carb
# User either connects to Azure with connection info
# OR User can import data from data files
# depending on the mode, this class should return the same data
# it is a DataManager type resource
# User clicks Connect, Or Load, Goal is the same, load data from azure or files
# and give the user some basic info to show the connection / import worked.
# now connected, user can load different sets of resources and view then in different ways.
ASYNC_ENABLED = True
CURRENT_PATH = Path(__file__).parent
DATA_PATH = CURRENT_PATH.joinpath("temp")
RES_PATH = CURRENT_PATH.parent.parent.parent.parent.joinpath("data\\resources")
IMPORTS_PATH = CURRENT_PATH.parent.parent.parent.parent.joinpath("data\\import")
@Singleton
class DataManager:
def __init__(self):
self._callbacks = []
logging.getLogger("asyncio").setLevel(logging.WARNING)
carb.log_info("DataManager Created.")
self._dataStore = DataStore.instance()
self._offlineDataManager = CSVDataManager()
self._onlineDataManager = AzureDataManager()
self._dataStore.Load_Config_Data()
self.refresh_data()
#shut it down...
def destroy(self):
carb.log_info("DataManager Destroyed.")
self._callbacks = []
self._offlineDataManager = None
self._onlineDataManager = None
self._dataStore = None #this seems to cause problems
#add a callback for model changed
def add_model_changed_callback(self, func):
self._callbacks.append(func)
#Invoke the callbacks that want to know when the data changes
def _model_changed(self):
for c in self._callbacks:
c()
#Load data from file
def load_csv_files(self):
self._dataStore._groups.clear()
self._dataStore._resources.clear()
self._lcl_sizes = []
self._lcl_groups = []
self._lcl_resources = []
self._dataStore._source_of_data = "OfflineData"
self._dataStore.Save_Config_Data()
#Load data from Cloud API
self._offlineDataManager.loadFiles()
#Aggregate the info, wait for it
if len(self._dataStore._groups) >0:
asyncio.ensure_future(self.process_data())
#Load data from Azure API
def load_from_api(self):
self._dataStore._groups.clear()
self._dataStore._resources.clear()
self._lcl_sizes = []
self._lcl_groups = []
self._lcl_resources = []
self._dataStore._source_of_data = "LiveAzureAPI"
self._dataStore.Save_Config_Data()
#Load the data and process it
if self._onlineDataManager.connect():
self._onlineDataManager.load_data()
#wait for data to finish loading
if len(self._dataStore._groups) >0:
asyncio.ensure_future(self.process_data())
def wipe_data(self):
self._dataStore.wipe_data()
self._model_changed()
def refresh_data(self):
if self._dataStore:
if self._dataStore._source_of_data =="OfflineData":
self.load_csv_files()
carb.log_info("CSV Data Refreshed.")
elif self._dataStore._source_of_data == "LiveAzureAPI":
self.load_from_api()
carb.log_info("Live Data Refreshed.")
else:
carb.log_info("Load some data!")
#Load the "All resources (Shapes) set"
#This sample contains 1 resource per group
def load_sample_resources(self):
if self._dataStore:
self._dataStore.wipe_data()
self._dataStore._source_of_data = "SampleFiles"
src_filel = IMPORTS_PATH.joinpath("TestShapes_RG.csv")
src_file2 = IMPORTS_PATH.joinpath("TestShapes_all.csv")
self.load_and_process_manual(src_filel, src_file2)
#Load the "Small Company sample"
def load_small_company(self):
if self._dataStore:
self._dataStore.wipe_data()
self._dataStore._source_of_data = "SampleFiles"
src_filel = IMPORTS_PATH.joinpath("SmallCompany_RG.csv")
src_file2 = IMPORTS_PATH.joinpath("SmallCompany_all.csv")
self.load_and_process_manual(src_filel, src_file2)
#Load the "Large Company sample"
def load_large_company(self):
if self._dataStore:
self._dataStore.wipe_data()
self._dataStore._source_of_data = "SampleFiles"
src_filel = IMPORTS_PATH.joinpath("LargeCompany_RG.csv")
src_file2 = IMPORTS_PATH.joinpath("LargeCompany_all.csv")
self.load_and_process_manual(src_filel, src_file2)
#load the files async
def load_and_process_manual(self, grpFile, rgFIle ):
#load the files
self._offlineDataManager.loadFilesManual(grpFile, rgFIle)
#Aggregate the info
if len(self._dataStore._groups) >0:
asyncio.ensure_future(self.process_data())
#Aggregate subscription, resources counts to DataManager Dictionaries
async def process_data(self):
carb.log_info("Processing Data...")
#For every resrouce...
for key in self._dataStore._resources:
obj = self._dataStore._resources[key]
#yield control
await asyncio.sleep(0)
### AGGREGATE COUNTS
self.AggregateCountsAsync(obj)
### AGGREGATE COSTS
self.AggregateCostsAsync(obj)
### MAP RESOURCES TO AGGREGATES
self.MapResourcesToGroupsAsync(obj)
#Pre-create images for the groups
carb.log_info("Creating images..")
await self.CreateImagesForGroups()
carb.log_info("Creating images complete..")
#let everyone know, stuff changed...
self._model_changed()
#output aggregation results to console
carb.log_info("Data processing complete..")
carb.log_info(self._dataStore._source_of_data + " data refreshed.")
carb.log_info(str(len(self._dataStore._resources)) + " Resources loaded from " + self._dataStore._source_of_data)
carb.log_info(str(len(self._dataStore._groups)) + " Groups loaded from " + self._dataStore._source_of_data)
#Create Images for all the maps
async def CreateImagesForGroups(self):
carb.log_info("Processing images async.")
#go through all the maps and create images
#this will save a ton of time later
if self._dataStore._bgl_file_path is None:
return
if self._dataStore._bgm_file_path is None:
return
if self._dataStore._bgh_file_path is None:
return
src_filel = RES_PATH.joinpath(self._dataStore._bgl_file_path)
src_filem = RES_PATH.joinpath(self._dataStore._bgm_file_path)
src_fileh = RES_PATH.joinpath(self._dataStore._bgh_file_path)
src_image = src_filel
#SUBSCRIPTIONS
#We need to create images for each group
for rec in self._dataStore._map_subscription:
recText = rec #Name of subscription
#Let the Ui breathe ;)
#TODO async
#await omni.kit.app.get_app().next_update_async()
output_file = DATA_PATH.joinpath(recText + ".png")
cost_output_file = DATA_PATH.joinpath(recText + "-cost.png")
textToDraw = recText
costToDraw =""
#We dont care here if the user wants costs or not, we are pre-making images
try:
locale.setlocale( locale.LC_ALL, 'en_CA.UTF-8' )
rawCost = float(self._dataStore._subscription_cost[recText])
costToDraw = locale.currency(self._dataStore._subscription_cost[recText])
carb.log_info ("RawCost: " + recText + " $" + str(rawCost))
carb.log_info ("Cost: " + recText + " $" + str(costToDraw))
if rawCost < 500:
src_image = src_filel
if rawCost > 500 and rawCost < 1500:
src_image = src_filem
if rawCost > 1500:
src_image = src_fileh
except:
costToDraw=""
#todo change image based on score
draw_image(self, output_file=output_file, src_file=src_image, textToDraw=textToDraw, costToDraw="")
draw_image(self, output_file=cost_output_file, src_file=src_image, textToDraw=textToDraw, costToDraw=costToDraw)
#LOCATIONS
#We need to create images for each group
for rec in self._dataStore._map_location:
recText = rec
#Let the Ui breathe ;)
#await omni.kit.app.get_app().next_update_async()
temp_file = recText + ".png"
output_file = DATA_PATH.joinpath(temp_file)
cost_output_file = DATA_PATH.joinpath(recText + "-cost.png")
textToDraw = recText
costToDraw =""
try:
locale.setlocale( locale.LC_ALL, 'en_CA.UTF-8' )
rawCost = float(self._dataStore._location_cost[recText])
costToDraw = locale.currency(self._dataStore._location_cost[recText])
carb.log_info ("RawCost: " + recText + " $" + str(rawCost))
carb.log_info ("Cost: " + recText + " $" + str(costToDraw))
if rawCost < 500:
src_image = src_filel
if rawCost > 500 and rawCost < 1500:
src_image = src_filem
if rawCost > 1500:
src_image = src_fileh
except:
costToDraw=""
draw_image(self, output_file=output_file, src_file=src_image, textToDraw=textToDraw, costToDraw="")
draw_image(self, output_file=cost_output_file, src_file=src_image, textToDraw=textToDraw, costToDraw=costToDraw)
#RESOURCE GROUPS
#We need to create images for each group
for rec in self._dataStore._map_group:
recText = rec
#Let the Ui breathe ;)
#await omni.kit.app.get_app().next_update_async()
output_file = DATA_PATH.joinpath(recText + ".png")
cost_output_file = DATA_PATH.joinpath(recText + "-cost.png")
textToDraw = recText
costToDraw =""
try:
locale.setlocale( locale.LC_ALL, 'en_CA.UTF-8' )
rawCost = float(self._dataStore._group_cost[rec])
costToDraw = locale.currency(self._dataStore._group_cost[recText])
carb.log_info ("RawCost: " + recText + " $" + str(rawCost))
carb.log_info ("Cost: " + recText + " $" + str(costToDraw))
if rawCost < 500:
src_image = src_filel
if rawCost > 500 and rawCost < 1500:
src_image = src_filem
if rawCost > 1500:
src_image = src_fileh
except:
costToDraw=""
draw_image(self, output_file=output_file, src_file=src_image, textToDraw=textToDraw, costToDraw="")
draw_image(self, output_file=cost_output_file, src_file=src_image, textToDraw=textToDraw, costToDraw=costToDraw)
#TYPES
#We need to create images for each group
for rec in self._dataStore._map_type:
recText = rec
#Let the Ui breathe ;)
#await omni.kit.app.get_app().next_update_async()
output_file = DATA_PATH.joinpath(recText + ".png")
cost_output_file = DATA_PATH.joinpath(recText + "-cost.png")
textToDraw = recText
costToDraw =""
try:
locale.setlocale( locale.LC_ALL, 'en_CA.UTF-8' )
rawCost = float(self._dataStore._type_cost[recText])
costToDraw = locale.currency(self._dataStore._type_cost[recText])
carb.log_info ("RawCost: " + recText + " $" + str(rawCost))
carb.log_info ("Cost: " + recText + " $" + str(costToDraw))
if rawCost < 500:
src_image = src_filel
if rawCost > 500 and rawCost < 1500:
src_image = src_filem
if rawCost > 1500:
src_image = src_fileh
except:
costToDraw=""
draw_image(self, output_file=output_file, src_file=src_image, textToDraw=textToDraw, costToDraw="")
draw_image(self, output_file=cost_output_file, src_file=src_image, textToDraw=textToDraw, costToDraw=costToDraw)
#TAGS
#We need to create images for each group
for rec in self._dataStore._map_tag:
recText = rec
#Let the Ui breathe ;)
#await omni.kit.app.get_app().next_update_async()
output_file = DATA_PATH.joinpath(recText + ".png")
cost_output_file = DATA_PATH.joinpath(recText + "-cost.png")
textToDraw = recText
costToDraw =""
try:
locale.setlocale( locale.LC_ALL, 'en_CA.UTF-8' )
rawCost = float(self._dataStore._tag_cost[recText])
costToDraw = locale.currency(self._dataStore._tag_cost[recText])
carb.log_info ("RawCost: " + recText + " $" + str(rawCost))
carb.log_info ("Cost: " + recText + " $" + str(costToDraw))
if rawCost < 500:
src_image = src_filel
if rawCost > 500 and rawCost < 1500:
src_image = src_filem
if rawCost > 1500:
src_image = src_fileh
except:
costToDraw=""
draw_image(self, output_file=output_file, src_file=src_image, textToDraw=textToDraw, costToDraw="")
draw_image(self, output_file=cost_output_file, src_file=src_image, textToDraw=textToDraw, costToDraw=costToDraw)
carb.log_info("Processing images complete..")
#Calculate the low, min, max, mean costs and score each group according to its peers
def ScoreCosts(self):
pass
#Async context
def AggregateCostsAsync(self, obj):
### AGGREGATE COSTS
#Cost per Sub
subKey = cleanup_prim_path(self, obj["subscription"])
if subKey not in self._dataStore._subscription_cost.keys():
self._dataStore._subscription_cost[subKey] = float(obj["lmcost"])
else:
self._dataStore._subscription_cost[subKey] = float(self._dataStore._subscription_cost[subKey]) + float(obj["lmcost"])
#Cost per Location
locKey = cleanup_prim_path(self, obj["location"])
if locKey not in self._dataStore._location_cost.keys():
self._dataStore._location_cost[locKey] = float(obj["lmcost"])
else:
self._dataStore._location_cost[locKey] = float(self._dataStore._location_cost[locKey]) + float(obj["lmcost"])
#Cost per Type
typeKey = cleanup_prim_path(self, obj["type"])
if typeKey not in self._dataStore._type_cost.keys():
self._dataStore._type_cost[typeKey] = float(obj["lmcost"])
else:
self._dataStore._type_cost[typeKey] = float(self._dataStore._type_cost[typeKey]) + float(obj["lmcost"])
#Cost per Group
grpKey = cleanup_prim_path(self, obj["group"])
if grpKey not in self._dataStore._group_cost.keys():
self._dataStore._group_cost[grpKey] = float(obj["lmcost"])
else:
self._dataStore._group_cost[grpKey] = float(self._dataStore._group_cost[grpKey]) + float(obj["lmcost"])
#Async Context
def AggregateCountsAsync(self, obj):
### AGGREGATE COUNTS
#Count per Sub
subKey = cleanup_prim_path(self, obj["subscription"])
if subKey not in self._dataStore._subscription_count.keys():
self._dataStore._subscription_count[subKey] = 1
else:
self._dataStore._subscription_count[subKey] = self._dataStore._subscription_count[subKey] + 1
#Count per Location
locKey = cleanup_prim_path(self, obj["location"])
if locKey not in self._dataStore._location_count.keys():
self._dataStore._location_count[locKey] = 1
else:
self._dataStore._location_count[locKey] = self._dataStore._location_count[locKey] + 1
#Count per Type
typeKey = cleanup_prim_path(self, obj["type"])
if typeKey not in self._dataStore._type_count.keys():
self._dataStore._type_count[typeKey] = 1
else:
self._dataStore._type_count[typeKey] = self._dataStore._type_count[typeKey] + 1
#Count per Group
grpKey = cleanup_prim_path(self, obj["group"])
if grpKey not in self._dataStore._group_count.keys():
self._dataStore._group_count[grpKey] = 1
else:
self._dataStore._group_count[grpKey] = self._dataStore._group_count[grpKey] + 1
#Given a resource, Map it to all the groups it belongs to.
def MapResourcesToGroupsAsync(self, obj):
#Get the mapped shape and figure out the prim path for the map
# Set a default
shape_to_render = "omniverse://localhost/Resources/3dIcons/scene.usd"
#NAME,TYPE,RESOURCE GROUP,LOCATION,SUBSCRIPTION, LMCOST
try:
resName = obj["name"]
typeName = cleanup_prim_path(self, obj["type"]) #needs to be clean, used to map to shapes
group = obj["group"]
location = obj["location"]
sub = obj["subscription"]
cost =obj["lmcost"]
shape_to_render = shape_usda_name[typeName]
except:
carb.log_info("Error getting priom values - " + resName)
# SUBSCRIPTION MAP
self.map_objects(resName, typeName, group, location, sub, cost, "/Subs" ,shape_to_render, self._dataStore._map_subscription, obj, "subscription")
# GROUP MAP
self.map_objects(resName, typeName, group, location, sub, cost, "/RGrps", shape_to_render, self._dataStore._map_group, obj, "group")
# TYPE MAP
self.map_objects(resName, typeName, group, location, sub, cost, "/Types", shape_to_render, self._dataStore._map_type, obj, "type")
# LOCATION MAP
self.map_objects(resName, typeName, group, location, sub, cost, "/Locs", shape_to_render, self._dataStore._map_location, obj, "location")
#TODO TAGMAP
#self.map_objects(typeName, "/Tag", shape_to_render, self._dataStore._tag_map, obj, "tag")
#Maps objects to create to each aggregate
def map_objects(self, resName, typeName,grp, loc, sub, cost, root, shape, map, obj, field:str):
cleaned_group_name = cleanup_prim_path(self, Name=obj[field])
carb.log_info(cleaned_group_name)
map_obj = {"name": resName, "type":typeName, "shape":shape, "location":loc, "subscription":sub, "group":grp, "cost":cost }
if cleaned_group_name not in map.keys():
#new map!
map[cleaned_group_name] = [map_obj]
else:
#get the map for this group, add this item
mapObj = map[cleaned_group_name]
mapObj.append(map_obj)
#passthrough to csv manager
def select_file(self, fileType: str):
self._offlineDataManager.select_file(fileType=fileType)
def clicked_ok(self):
pass
def sendNotify(self, message:str, status:nm.NotificationStatus):
# https://docs.omniverse.nvidia.com/py/kit/source/extensions/omni.kit.notification_manager/docs/index.html?highlight=omni%20kit%20notification_manager#
import omni.kit.notification_manager as nm
ok_button = nm.NotificationButtonInfo("OK", on_complete=self.clicked_ok)
nm.post_notification(
message,
hide_after_timeout=True,
duration=5,
status=status,
button_infos=[ok_button]
)
#-- SINGLETON SUPPORT
def instance(self):
"""
Returns the singleton instance. Upon its first call, it creates a
new instance of the decorated class and calls its `__init__` method.
On all subsequent calls, the already created instance is returned.
"""
try:
return self._instance
except AttributeError:
self._instance = self._decorated()
return self._instance
def __call__(self):
raise TypeError('Singletons must be accessed through `instance()`.')
def __instancecheck__(self, inst):
return isinstance(inst, self._decorated)
| 21,491 | Python | 37.174067 | 159 | 0.593551 |
USDSync/MetaCloudExplorer/exts/meta.cloud.explorer.azure/meta/cloud/explorer/azure/stage_position.py | __all__ = ["scatterWithPlaneSize"]
from typing import List, Optional
import random
from pxr import Gf
def scatterWithPlaneSize(
count: List[int],
distance: List[float],
sizes: List[float],
randomization: List[float],
id_count: int = 1,
seed: Optional[int] = None,
scaleFactor:float=1.0
):
"""
Returns generator with pairs containing transform matrices and ids to
arrange multiple objects.
### Arguments:
`count: List[int]`ce1dcf85-c041-4bb9-8275-4b96e70252a2
Number of matrices to generage per axis
`distance: List[float]`
The distance between objects per axis
`randomization: List[float]`
Random distance per axis
`id_count: int`
Count of differrent id
`seed: int`
If seed is omitted or None, the current system time is used. If seed
is an int, it is used directly.
"""
print("Generating " + str(id_count) + " postions: " + str(count[0]) + "|" + str(count[1]) + "|" + str(count[2]))
for i in range(id_count):
if (sizes[i]>250):
x = (i - 0.5 * (count[0] - 1)) * (distance[0]*scaleFactor) + (sizes[i]*2)
else:
x = (i - 0.5 * (count[0] - 1)) * (distance[0]*scaleFactor) + (sizes[i]*2)
for j in range(count[1]):
if (sizes[i]>250):
y = (j - 0.5 * (count[1] - 1)) * (distance[1]*scaleFactor) + (sizes[i]*2)
else:
y = (j - 0.5 * (count[1] - 1)) * (distance[1]*scaleFactor) + (sizes[i]*2)
for k in range(count[2]):
if (sizes[i]>250):
z = (k - 0.5 * (count[2] - 1)) * (distance[2]*scaleFactor) + (sizes[i]*2)
else:
z = (k - 0.5 * (count[2] - 1)) * (distance[2]*scaleFactor) + (sizes[i]*2)
result = Gf.Vec3d(x,y,z)
yield (result)
def position_resource_on_target(
planeSize: float,
resourceNumber: int,
):
# pass
if (planeSize == 100.0):
pass
if (planeSize == 200.0):
pass
if (planeSize == 300.0):
pass
if (planeSize == 400.0):
pass
if (planeSize == 500.0):
pass
if (planeSize == 600.0):
pass
| 2,305 | Python | 24.340659 | 116 | 0.509328 |
USDSync/MetaCloudExplorer/exts/meta.cloud.explorer.azure/meta/cloud/explorer/azure/data_store.py |
__all__ = ["Save_Config_Data", "Load_Config_Data"]
import carb
from .Singleton import Singleton
import omni.ui as ui
from .combo_box_model import ComboBoxModel
from pathlib import Path
import os
CURRENT_PATH = Path(__file__).parent
DATA_PATH = CURRENT_PATH.joinpath("temp")
RES_PATH = CURRENT_PATH.parent.parent.parent.parent.joinpath("data\\resources")
@Singleton
class DataStore():
def __init__(self):
print("DataStore initialized")
#Azure Resoruce Groups
#NAME,SUBSCRIPTION,LOCATION
self._groups = {}
#All the reosurces
#NAME,TYPE,RESOURCE GROUP,LOCATION,SUBSCRIPTION, LMCOST
self._resources = {}
#aggregated data (counts)
self._aad_count = {}
self._subscription_count = {}
self._location_count = {}
self._group_count = {}
self._type_count = {}
self._tag_count = {}
#aggregated data (costs)
self._aad_cost = {}
self._subscription_cost = {}
self._location_cost = {}
self._group_cost = {}
self._type_cost = {}
self._tag_cost = {}
#mapped resources (indexes)
self._map_aad = {}
self._map_subscription = {}
self._map_location = {}
self._map_group = {}
self._map_type = {}
self._map_tag = {}
#track where the data last came from (state)
self._source_of_data = ""
self._use_symmetric_planes = False
self._use_packing_algo = True
self._show_info_widgets = True
self._last_view_type = "ByGroup" # ByGroup, ByLocation, ByType, BySub, ByTag
self._scale_model = 1.0
#temporary arrays
#Calc Plane sizes based on items in group
self._lcl_sizes = [] #Plane sizes determined by resource counts
self._lcl_groups = [] #Group data for creating planes
self._lcl_resources = [] #Resources to show on stage
#Variables for files to import (UI settings)
self._rg_csv_file_path = ""
self._rg_csv_field_model = ui.SimpleStringModel()
self._rs_csv_file_path = ""
self._rs_csv_field_model = ui.SimpleStringModel()
self._bgl_file_path = ""
self._bgl_field_model = ui.SimpleStringModel()
self._bgm_file_path = ""
self._bgm_field_model = ui.SimpleStringModel()
self._bgh_file_path = ""
self._bgh_field_model = ui.SimpleStringModel()
#azure connection info
self._azure_tenant_id = ""
self._azure_tenant_id_model =ui.SimpleStringModel()
self._azure_client_id = ""
self._azure_client_id_model = ui.SimpleStringModel()
self._azure_client_secret = ""
self._azure_client_secret_model = ui.SimpleStringModel()
self._azure_subscription_id = ""
self._azure_subscription_id_model = ui.SimpleStringModel()
#composition options (UI settings)
self._symmetric_planes_model = ui.SimpleBoolModel(False)
self._packing_algo_model = ui.SimpleBoolModel(True)
self._show_info_widgets_model = ui.SimpleBoolModel(True)
self._primary_axis_model = ComboBoxModel("Z", "X", "Y") # track which Axis is up
self._shape_up_axis_model = ComboBoxModel("Z", "X", "Y") # track which Axis is up for the shape placement
self._composition_scale_model = ui.SimpleFloatModel()
self._options_count_models = [ui.SimpleIntModel(), ui.SimpleIntModel(), ui.SimpleIntModel()]
self._options_dist_models = [ui.SimpleFloatModel(), ui.SimpleFloatModel(), ui.SimpleFloatModel()]
self._options_random_models = [ui.SimpleFloatModel(), ui.SimpleFloatModel(), ui.SimpleFloatModel()]
self._composition_scale_model.as_float = 1.0
self._options_count_models[0].as_int = 10
self._options_count_models[1].as_int = 10
self._options_count_models[2].as_int = 1
self._options_dist_models[0].as_float = 250
self._options_dist_models[1].as_float = 250
self._options_dist_models[2].as_float = 250
self._options_random_models[0].as_float = 1.0
self._options_random_models[1].as_float = 1.0
self._options_random_models[2].as_float = 1.0
self.Load_Config_Data()
def wipe_data(self):
self._groups.clear()
self._resources.clear()
self._subscription_count = {}
self._location_count = {}
self._group_count = {}
self._type_count = {}
self._tag_count = {}
self._subscription_cost = {}
self._location_cost = {}
self._group_cost = {}
self._type_cost = {}
self._tag_cost = {}
self._map_aad = {}
self._map_subscription = {}
self._map_location = {}
self._map_group = {}
self._map_type = {}
self._map_tag = {}
self._lcl_sizes = []
self._lcl_groups = []
self._lcl_resources = []
carb.log_info("Data Cleared.")
def Save_Config_Data(self):
settings = carb.settings.get_settings()
if self._rg_csv_file_path != "":
settings.set("/persistent/exts/meta.cloud.explorer.azure/rg_csv_file_path", self._rg_csv_file_path)
if self._rs_csv_file_path != "":
settings.set("/persistent/exts/meta.cloud.explorer.azure/rs_csv_file_path", self._rs_csv_file_path)
if self._azure_tenant_id != "":
settings.set("/persistent/exts/meta.cloud.explorer.azure/azure_tenant_id", self._azure_tenant_id)
if self._azure_client_id != "":
settings.set("/persistent/exts/meta.cloud.explorer.azure/azure_client_id", self._azure_client_id)
if self._azure_subscription_id != "":
settings.set("/persistent/exts/meta.cloud.explorer.azure/azure_subscription_id", self._azure_subscription_id)
if self._source_of_data != "":
settings.set("/persistent/exts/meta.cloud.explorer.azure/last_data_source", self._source_of_data)
if self._bgl_file_path != "":
settings.set("/persistent/exts/meta.cloud.explorer.azure/bgl_file_path", self._bgl_file_path)
if self._bgm_file_path != "":
settings.set("/persistent/exts/meta.cloud.explorer.azure/bgm_file_path", self._bgm_file_path)
if self._bgh_file_path != "":
settings.set("/persistent/exts/meta.cloud.explorer.azure/bgh_file_path", self._bgh_file_path)
if self._last_view_type != "":
settings.set("/persistent/exts/meta.cloud.explorer.azure/last_view_type", self._last_view_type)
if self._options_count_models[0].as_int >0:
settings.set("/persistent/exts/meta.cloud.explorer.azure/x_group_count", self._options_count_models[0].as_int)
if self._options_count_models[1].as_int >0:
settings.set("/persistent/exts/meta.cloud.explorer.azure/y_group_count", self._options_count_models[1].as_int)
if self._options_count_models[2].as_int >= 0:
settings.set("/persistent/exts/meta.cloud.explorer.azure/z_group_count", self._options_count_models[2].as_int)
if self._options_dist_models[0].as_float >= 0:
settings.set("/persistent/exts/meta.cloud.explorer.azure/x_dist_count", self._options_dist_models[0].as_float)
if self._options_dist_models[1].as_float >= 0:
settings.set("/persistent/exts/meta.cloud.explorer.azure/y_dist_count", self._options_dist_models[1].as_float)
if self._options_dist_models[2].as_float >= 0:
settings.set("/persistent/exts/meta.cloud.explorer.azure/z_dist_count", self._options_dist_models[2].as_float)
if self._options_random_models[0].as_float >= 0:
settings.set("/persistent/exts/meta.cloud.explorer.azure/x_random_count", self._options_random_models[0].as_float)
if self._options_random_models[1].as_float >= 0:
settings.set("/persistent/exts/meta.cloud.explorer.azure/y_random_count", self._options_random_models[1].as_float)
if self._options_random_models[2].as_float >= 0:
settings.set("/persistent/exts/meta.cloud.explorer.azure/z_random_count", self._options_random_models[2].as_float)
settings.set("/persistent/exts/meta.cloud.explorer.azure/show_info_widgets", self._show_info_widgets)
#Load Saved config data
def Load_Config_Data(self):
settings = carb.settings.get_settings()
self._rg_csv_file_path = settings.get("/persistent/exts/meta.cloud.explorer.azure/rg_csv_file_path")
self._rs_csv_file_path = settings.get("/persistent/exts/meta.cloud.explorer.azure/rs_csv_file_path")
self._azure_tenant_id = settings.get("/persistent/exts/meta.cloud.explorer.azure/azure_tenant_id")
self._azure_client_id = settings.get("/persistent/exts/meta.cloud.explorer.azure/azure_client_id")
self._azure_subscription_id = settings.get("/persistent/exts/meta.cloud.explorer.azure/azure_subscription_id")
try:
self._azure_client_secret = os.getenv('MCE_CLIENT_SECRET')
except:
self._azure_client_secret= ""
self._source_of_data = settings.get("/persistent/exts/meta.cloud.explorer.azure/last_data_source")
self._bgl_file_path = settings.get("/persistent/exts/meta.cloud.explorer.azure/bgl_file_path")
self._bgm_file_path = settings.get("/persistent/exts/meta.cloud.explorer.azure/bgm_file_path")
self._bgh_file_path = settings.get("/persistent/exts/meta.cloud.explorer.azure/bgh_file_path")
self._last_view_type= settings.get("/persistent/exts/meta.cloud.explorer.azure/last_view_type")
self._show_info_widgets= settings.get("/persistent/exts/meta.cloud.explorer.azure/show_info_widgets")
try:
self._options_count_models[0].set_value(int(settings.get("/persistent/exts/meta.cloud.explorer.azure/x_group_count")))
self._options_count_models[1].set_value(int(settings.get("/persistent/exts/meta.cloud.explorer.azure/y_group_count")))
self._options_count_models[2].set_value(int(settings.get("/persistent/exts/meta.cloud.explorer.azure/z_group_count")))
self._options_dist_models[0].set_value(float(settings.get("/persistent/exts/meta.cloud.explorer.azure/x_dist_count")))
self._options_dist_models[1].set_value(float(settings.get("/persistent/exts/meta.cloud.explorer.azure/y_dist_count")))
self._options_dist_models[2].set_value(float(settings.get("/persistent/exts/meta.cloud.explorer.azure/z_dist_count")))
self._options_random_models[0].set_value(float(settings.get("/persistent/exts/meta.cloud.explorer.azure/x_random_count")))
self._options_random_models[1].set_value(float(settings.get("/persistent/exts/meta.cloud.explorer.azure/y_random_count")))
self._options_random_models[2].set_value(float(settings.get("/persistent/exts/meta.cloud.explorer.azure/z_random_count")))
except: #set defualts
self._last_view_type = "ByGroup"
self._composition_scale_model.set_value(1.0)
self._options_count_models[0].set_value(10)
self._options_count_models[1].set_value(10)
self._options_count_models[2].set_value(1)
self._options_dist_models[0].set_value(250)
self._options_dist_models[1].set_value(250)
self._options_dist_models[2].set_value(250)
self._options_random_models[0].set_value(1.0)
self._options_random_models[1].set_value(1.0)
self._options_random_models[2].set_value(1)
#set defaults
if self._bgl_file_path is None:
self._bgl_file_path = RES_PATH.joinpath("grid_green.png")
self._bgm_file_path = RES_PATH.joinpath("grid_blue.png")
self._bgh_file_path = RES_PATH.joinpath("grid_red.png")
self.Save_Config_Data()
#-- SINGLETON SUPPORT
def instance(self):
"""
Returns the singleton instance. Upon its first call, it creates a
new instance of the decorated class and calls its `__init__` method.
On all subsequent calls, the already created instance is returned.
"""
try:
return self._instance
except AttributeError:
self._instance = self._decorated()
return self._instance
def __call__(self):
raise TypeError('Singletons must be accessed through `instance()`.')
def __instancecheck__(self, inst):
return isinstance(inst, self._decorated)
| 12,674 | Python | 47.563218 | 150 | 0.621272 |
USDSync/MetaCloudExplorer/exts/meta.cloud.explorer.azure/meta/cloud/explorer/azure/group_aad.py |
from .group_base import GroupBase
from pxr import Gf, UsdGeom, UsdLux, Usd, Sdf
import locale
class AADGrpView(GroupBase):
def __init__(self, viewPath:str, scale:float, upAxis:str, shapeUpAxis:str):
self._scale = scale
self._upAxis = upAxis
self._shapeUpAxis = shapeUpAxis
self.view_path = viewPath
super().__init__()
def calcGroupPlaneSizes(self):
pass
def calulateCosts(self):
for g in self._dataStore._lcl_groups:
#Get the cost by resource group
locale.setlocale( locale.LC_ALL, 'en_CA.UTF-8' )
try:
self._cost = str(locale.currency(self._dataStore._aad_cost[g]))
except:
self._cost = "" # blank not 0, blank means dont show it at all
def prepResources(self):
pass # Requires subclass implm
| 879 | Python | 28.333332 | 83 | 0.588168 |
USDSync/MetaCloudExplorer/exts/meta.cloud.explorer.azure/meta/cloud/explorer/azure/extension.py | import sys
import carb
import omni.ext
import asyncio
from functools import partial
import omni.ext
import omni.kit.ui
import omni.ui as ui
import omni.kit.pipapi
# Requires Code 2022.1.2+ - Blocked by typing_extensions incompatibility
from omni.kit.viewport.utility import get_active_viewport_window
from .views import MainView, WINDOW_NAME
from .viewport_scene import ViewportScene
from .object_info_model import ObjectInfoModel
from .widget_info_model import WidgetInfoModel
## AZURE API DISABLED IN 2022.1.3, due to PIP library problem wtih typing_extensions library.
#omni.kit.pipapi.install("azure-identity", module="azure-identity", ignore_import_check=True, ignore_cache=True, surpress_output=False,use_online_index=True )
#omni.kit.pipapi.install("azure-mgmt-resource", module="azure-mgmt-resource", ignore_import_check=True, ignore_cache=True, surpress_output=False,use_online_index=True )
#omni.kit.pipapi.install("pandas", module="pandas", ignore_import_check=True, ignore_cache=True, surpress_output=False,use_online_index=True )
#sys.path.append("D:/python37/lib/site-packages")
#print(sys.modules.keys())
#from azure.mgmt.resource import ResourceManagementClient
#from azure.identity import AzureCliCredential
# Any class derived from `omni.ext.IExt` in top level module (defined in `python.modules` of `extension.toml`)
# will be instantiated when extension gets enabled and `on_startup(ext_id)` will be called.
# Later when extension gets disabled on_shutdown() is called
class MetaCloudExplorerAzure(omni.ext.IExt):
# ext_id is current extension id. It can be used with extension manager to query additional information, like where
# this extension is located on filesystem.
MENU_PATH = f"Window/{WINDOW_NAME}"
def on_startup(self, ext_id):
carb.log_info("[meta.cloud.explorer.azure.extension] MetaCloudExplorer startup")
self._ext_id = ext_id
self._menu_path = f"Window/{WINDOW_NAME}"
self._window = None
def on_menu_click(menu, toggled):
"""Handles showing and hiding the window from the 'Windows' menu."""
if toggled:
# Get the active Viewport (which at startup is the default Viewport)
self._viewport_window = get_active_viewport_window()
# Issue an error if there is no Viewport
if not self._viewport_window:
carb.log_error(f"No Viewport Window to add {self._ext_id} scene to")
return
# Build out the scene
objModel = ObjectInfoModel()
widModel = WidgetInfoModel()
self._viewport_scene = ViewportScene(viewport_window=self._viewport_window,
ext_id=self._ext_id,widgetModel=widModel, objectModel=objModel)
self._window = MainView(WINDOW_NAME, widgetModel=widModel, objectModel=objModel)
else:
self._window.show()
# Deregister the function that shows the window from omni.ui
#ui.Workspace.set_show_window_fn(MetaCloudExplorerAzure.WINDOW_NAME, None)
self._menu = omni.kit.ui.get_editor_menu().add_item(self._menu_path, on_menu_click, True)
def on_shutdown(self):
carb.log_info("[meta.cloud.explorer.azure.extension] MetaCloudExplorer shutdown")
omni.kit.ui.get_editor_menu().remove_item(self._menu)
if hasattr(self, "_window"):
if self._window:
self._window.destroy()
self._window = None
if hasattr(self, "_viewport_scene"):
if self._viewport_scene:
self._viewport_scene.destroy()
self._viewport_scene = None
| 3,749 | Python | 43.117647 | 168 | 0.672713 |
USDSync/MetaCloudExplorer/exts/meta.cloud.explorer.azure/meta/cloud/explorer/azure/azure_data_manager.py | # import omni.kit.pipapi
# import carb
# import os
# import json
# import sys
# from datetime import datetime
# import omni.kit.notification_manager as nm
# omni.kit.pipapi.install("azure-identity", module="azure-identity", ignore_import_check=True, ignore_cache=True, surpress_output=False,use_online_index=True )
# omni.kit.pipapi.install("azure-mgmt-resource", module="azure-mgmt-resource", ignore_import_check=True, ignore_cache=True, surpress_output=False,use_online_index=True )
# sys.path.append("D:/python37/lib/site-packages")
# #print(sys.modules.keys())
# from .data_store import DataStore
# from .prim_utils import cleanup_prim_path
# from azure.mgmt.resource import ResourceManagementClient
# from azure.mgmt.resource.subscriptions import SubscriptionClient
# from azure.identity import ClientSecretCredential
# import asyncio
# import os
# # Manage resources and resource groups - create, update and delete a resource group,
# # deploy a solution into a resource group, export an ARM template. Create, read, update
# # and delete a resource
# class AzureDataManager():
# def __init__(self):
# self._dataStore = DataStore.instance() # Get A Singleton instance, store data here
# def get_token(self):
# # Acquire a credential object using CLI-based authentication.
# if self._dataStore._azure_tenant_id =="":
# self.sendNotify("MCE: Please enter Azure credentials to connect...", nm.NotificationStatus.WARNING)
# return
# if self._dataStore._azure_client_secret =="":
# self.sendNotify("MCE: Please enter Azure client secret to connect...", nm.NotificationStatus.WARNING)
# return False
# self.sendNotify("MCE: Connecting to Azure Tenant...", nm.NotificationStatus.INFO)
# self._token_credential = ClientSecretCredential(
# self._dataStore._azure_tenant_id,
# self._dataStore._azure_client_id,
# self._dataStore._azure_client_secret)
# # Retrieve subscription ID from environment variable.
# self._subscription_id = self._dataStore._azure_subscription_id
# return True
# #validate we can connect
# def connect(self):
# #Get a token
# valid = self.get_token()
# try:
# if (valid):
# # List subscriptions
# subscription_client = SubscriptionClient(credential=self._token_credential)
# page_result = subscription_client.subscriptions.list()
# result = [item for item in page_result]
# for item in result:
# carb.log_warn(item.subscription_id)
# carb.log_warn(item.tags)
# except:
# valid = False
# error = sys.exc_info()[0]
# carb.log_error("Oops! " + str(error) + " occurred.")
# self.sendNotify("MCE: Error:" + str(error), nm.NotificationStatus.WARNING)
# return valid
# def clicked_ok():
# carb.log_info("User clicked ok")
# def sendNotify(self, message:str, status:nm.NotificationStatus):
# # https://docs.omniverse.nvidia.com/py/kit/source/extensions/omni.kit.notification_manager/docs/index.html?highlight=omni%20kit%20notification_manager#
# import omni.kit.notification_manager as nm
# ok_button = nm.NotificationButtonInfo("OK", on_complete=self.clicked_ok)
# nm.post_notification(
# message,
# hide_after_timeout=True,
# duration=3,
# status=status,
# button_infos=[],
# )
# #Connect to API and load adata
# def load_data(self):
# self.save_connection_data()
# self.load_groups()
# self.load_resources()
# def save_connection_data(self):
# self._dataStore.Save_Config_Data()
# def load_resources(self):
# try:
# resCnt = 0
# for grp in self._dataStore._groups:
# resources = self.list_group_resources(grp)
# for res in resources:
# resCnt = resCnt +1
# name = cleanup_prim_path(self, Name=res.name)
# self._dataStore._resources[name] = {"name":name, "type": res.type, "group": grp, "location":res.location, "subscription":self._subscription_id, "lmcost": 0}
# #self._dataStore.res["name"] = {"name":res["name"], "type": type, "group": group, "location":location, "subscription":subscription, "lmcost": lmcost}
# self.sendNotify("MCE: Azure resources loaded: " + str(len(self._dataStore._resources)), nm.NotificationStatus.INFO)
# carb.log_info("Azure API resources loaded: " + str(len(self._dataStore._resources)))
# except:
# error = sys.exc_info()[0]
# carb.log_error("Oops! " + str(error) + " occurred.")
# self.sendNotify("MCE: Error:" + str(error), nm.NotificationStatus.WARNING)
# def load_groups(self):
# try:
# resource_client = ResourceManagementClient(self._token_credential, self._subscription_id)
# rg_groups = resource_client.resource_groups.list()
# grpCnt = 0
# for group in rg_groups:
# grp = {group.name:{"name":group.name, "subs": self._subscription_id, "location":group.location}}
# self._dataStore._groups.update(grp)
# grpCnt = grpCnt + 1
# self.sendNotify("MCE: Azure groups loaded: " + str(len(self._dataStore._groups)), nm.NotificationStatus.INFO)
# carb.log_info("Azure API groups loaded: " + str(len(self._dataStore._groups)))
# except:
# error = sys.exc_info()[0]
# carb.log_error("Oops! " + str(error) + " occurred.")
# self.sendNotify("MCE: Error:" + str(error), nm.NotificationStatus.WARNING)
# #return a list of resource groups
# def get_resource_groups(self):
# # Obtain the management object for resources.
# try:
# resource_client = ResourceManagementClient(self._token_credential, self._subscription_id)
# rg_groups = resource_client.resource_groups.list()
# return rg_groups
# except:
# error = sys.exc_info()[0]
# carb.log_error("Oops! " + str(error) + " occurred.")
# self.sendNotify("MCE: Error:" + str(error), nm.NotificationStatus.WARNING)
# #for item in rg_groups:
# # print(item)
# # List Resources within the group
# def list_group_resources(self, groupName:str):
# # Obtain the management object for resources.
# resource_client = ResourceManagementClient(self._token_credential, self._subscription_id)
# carb.log_info("List all of the resources within the group")
# res = resource_client.resources.list_by_resource_group(groupName)
# return res
# #creates a resource group with groupName at location
# def create_resource_group(self, groupName:str, location:str):
# # Obtain the management object for resources.
# resource_client = ResourceManagementClient(self._token_credential, self._subscription_id)
# #
# # Managing resource groups
# #
# resource_group_params = {"location": location}
# # Create Resource group
# print("Create Resource Group: " + groupName + " @ " + location)
# self.print_item(
# resource_client.resource_groups.create_or_update(
# groupName, resource_group_params)
# )
# def print_item(self, group):
# """Print a ResourceGroup instance."""
# print("\tName: {}".format(group.name))
# print("\tId: {}".format(group.id))
# print("\tLocation: {}".format(group.location))
# print("\tTags: {}".format(group.tags))
# self.print_properties(group.properties)
# def print_properties(self, props):
# """Print a ResourceGroup properties instance."""
# if props and props.provisioning_state:
# print("\tProperties:")
# print("\t\tProvisioning State: {}".format(props.provisioning_state))
# print("\n\n")
# # Create a Key Vault in the Resource Group
# def create_key_vault(self, vaultName:str, location:str, groupName:str):
# # Obtain the management object for resources.
# resource_client = ResourceManagementClient(self._token_credential, self._subscription_id)
# print("Create a Key Vault via a Generic Resource Put")
# key_vault_params = {
# "location": location,
# "properties": {
# "sku": {"family": "A", "name": "standard"},
# "tenantId": self._dataStore._azure_tenant_id,
# "accessPolicies": [],
# "enabledForDeployment": True,
# "enabledForTemplateDeployment": True,
# "enabledForDiskEncryption": True
# },
# }
# resource_client.resources.begin_create_or_update(
# resource_group_name=groupName,
# resource_provider_namespace="Microsoft.KeyVault",
# parent_resource_path="",
# resource_type="vaults",
# # Suffix random string to make vault name unique
# resource_name=vaultName + datetime.utcnow().strftime("-%H%M%S"),
# api_version="2019-09-01",
# parameters=key_vault_params
# ).result()
# # Export the Resource group template
# def export_group_template(self, groupName:str):
# # Obtain the management object for resources.
# resource_client = ResourceManagementClient(self._token_credential, self._subscription_id)
# print("Export Resource Group Template")
# BODY = {
# 'resources': ['*']
# }
# result = json.dumps(
# resource_client.resource_groups.begin_export_template(
# groupName, BODY).result().template, indent=4
# )
# print(result + "\n\n")
# return result
# # def run_example():
# # """Resource Group management example."""
# # #
# # # Create the Resource Manager Client with an Application (service principal) token provider
# # #
# # subscription_id = os.environ.get("AZURE_SUBSCRIPTION_ID", None) # your Azure Subscription Id
# # credentials = DefaultAzureCredential()
# # client = ResourceManagementClient(credentials, subscription_id)
# # #
# # # Managing resource groups
# # #
# # resource_group_params = {"location": "westus"}
# # # List Resource Groups
# # print("List Resource Groups")
# # for item in client.resource_groups.list():
# # print_item(item)
# # # Create Resource group
# # print("Create Resource Group")
# # print_item(
# # client.resource_groups.create_or_update(
# # GROUP_NAME, resource_group_params)
# # )
# # # Modify the Resource group
# # print("Modify Resource Group")
# # resource_group_params.update(tags={"hello": "world"})
# # print_item(
# # client.resource_groups.update(
# # GROUP_NAME, resource_group_params)
# # )
# # # Create a Key Vault in the Resource Group
# # print("Create a Key Vault via a Generic Resource Put")
# # key_vault_params = {
# # "location": "westus",
# # "properties": {
# # "sku": {"family": "A", "name": "standard"},
# # "tenantId": os.environ["AZURE_TENANT_ID"],
# # "accessPolicies": [],
# # "enabledForDeployment": True,
# # "enabledForTemplateDeployment": True,
# # "enabledForDiskEncryption": True
# # },
# # }
# # client.resources.begin_create_or_update(
# # resource_group_name=GROUP_NAME,
# # resource_provider_namespace="Microsoft.KeyVault",
# # parent_resource_path="",
# # resource_type="vaults",
# # # Suffix random string to make vault name unique
# # resource_name="azureSampleVault" + datetime.utcnow().strftime("-%H%M%S"),
# # api_version="2019-09-01",
# # parameters=key_vault_params
# # ).result()
# # # List Resources within the group
# # print("List all of the resources within the group")
# # for item in client.resources.list_by_resource_group(GROUP_NAME):
# # print_item(item)
# # # Export the Resource group template
# # print("Export Resource Group Template")
# # BODY = {
# # 'resources': ['*']
# # }
# # print(
# # json.dumps(
# # client.resource_groups.begin_export_template(
# # GROUP_NAME, BODY).result().template, indent=4
# # )
# # )
# # print("\n\n")
# # # Delete Resource group and everything in it
# # print("Delete Resource Group")
# # delete_async_operation = client.resource_groups.begin_delete(GROUP_NAME)
# # delete_async_operation.wait()
# # print("\nDeleted: {}".format(GROUP_NAME))
| 13,676 | Python | 38.758721 | 178 | 0.568368 |
USDSync/MetaCloudExplorer/exts/meta.cloud.explorer.azure/meta/cloud/explorer/azure/viewport_scene.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
__all__ = ["ViewportScene"]
from omni.ui import scene as sc
import omni.ui as ui
from .object_info_manipulator import ObjectInfoManipulator
from .widget_info_manipulator import WidgetInfoManipulator
class ViewportScene():
"""The Object Info Manipulator, placed into a Viewport"""
def __init__(self, viewport_window: ui.Window, ext_id: str, widgetModel, objectModel) -> None:
self._scene_view = None
self._viewport_window = viewport_window
self._widgetModel = widgetModel
self._objectModel = objectModel
# Create a unique frame for our SceneView
with self._viewport_window.get_frame(ext_id):
# Create a default SceneView (it has a default camera-model)
self._scene_view = sc.SceneView()
# Add the manipulator into the SceneView's scene
with self._scene_view.scene:
WidgetInfoManipulator(model=self._widgetModel)
ObjectInfoManipulator(model=self._objectModel)
# Register the SceneView with the Viewport to get projection and view updates
self._viewport_window.viewport_api.add_scene_view(self._scene_view)
def __del__(self):
self.destroy()
def destroy(self):
if self._scene_view:
# Empty the SceneView of any elements it may have
self._scene_view.scene.clear()
# Be a good citizen, and un-register the SceneView from Viewport updates
if self._viewport_window:
self._viewport_window.viewport_api.remove_scene_view(self._scene_view)
# Remove our references to these objects
self._viewport_window = None
self._scene_view = None
| 2,132 | Python | 40.01923 | 98 | 0.682458 |
USDSync/MetaCloudExplorer/exts/meta.cloud.explorer.azure/meta/cloud/explorer/azure/__init__.py | from .extension import *
| 25 | Python | 11.999994 | 24 | 0.76 |
USDSync/MetaCloudExplorer/exts/meta.cloud.explorer.azure/meta/cloud/explorer/azure/combo_box_model.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
__all__ = ["ComboBoxModel"]
from typing import Optional
import omni.ui as ui
class ListItem(ui.AbstractItem):
"""Single item of the model"""
def __init__(self, text):
super().__init__()
self.name_model = ui.SimpleStringModel(text)
def __repr__(self):
return f'"{self.name_model.as_string}"'
@property
def as_string(self):
"""Return the string of the name model"""
return self.name_model.as_string
class ComboBoxModel(ui.AbstractItemModel):
"""
Represents the model for lists. It's very easy to initialize it
with any string list:
string_list = ["Hello", "World"]
model = ComboBoxModel(*string_list)
ui.ComboBox(model)
"""
def __init__(self, *args, default=0):
super().__init__()
self._children = [ListItem(t) for t in args]
self._default = ui.SimpleIntModel()
self._default.as_int = default
# Update the combo box when default is changed
self._default.add_value_changed_fn(lambda _: self._item_changed(None))
def get_item_children(self, item):
"""Returns all the children when the widget asks it."""
if item is not None:
# Since we are doing a flat list, we return the children of root only.
# If it's not root we return.
return []
return self._children
def get_item_value_model_count(self, item):
"""The number of columns"""
return 1
def get_item_value_model(self, item: Optional[ListItem], column_id):
"""
Return value model.
It's the object that tracks the specific value.
In our case we use ui.SimpleStringModel.
"""
if item is None:
return self._default
return item.name_model
def get_current_item(self) -> ListItem:
"""Returns the currently selected item in ComboBox"""
return self._children[self._default.as_int]
| 2,391 | Python | 30.893333 | 82 | 0.637808 |
USDSync/MetaCloudExplorer/exts/meta.cloud.explorer.azure/meta/cloud/explorer/azure/rectangle.py | from omni import ui
from .constant import COLORS
# A rectangle that will block mouse events to deeper
class OpaqueRectangle(ui.Rectangle):
def __init__(self, **kwargs):
kwargs["opaque_for_mouse_events"] = True
kwargs["mouse_pressed_fn"] = lambda *_: self._dummy()
super().__init__(**kwargs)
def _dummy(self):
pass
class ShortSeparator:
def __init__(self, height):
self._panel = ui.HStack(width=2, height=height)
with self._panel:
with ui.VStack(width=2, height=height, style={"Line": {"color": COLORS.LIGHRT_GARY, "border_width": 1}}):
ui.Spacer(height=2)
ui.Line(width=1, alignment=ui.Alignment.LEFT)
ui.Line(width=1, alignment=ui.Alignment.LEFT)
ui.Spacer(height=2)
class DashRectangle:
def __init__(self, width, height, padding_x=2, padding_y=2, w_step=10, h_step=10):
w_num = int((width - 2 * padding_x - 2) / w_step)
h_num = int((height - 2 * padding_y) / h_step)
with ui.ZStack():
with ui.VStack(style={"Line": {"color": COLORS.LIGHRT_GARY, "border_width": 1}}):
ui.Spacer(height=padding_y)
self._build_horizontal_line(w_num, padding_x)
ui.Spacer(height=height - 2 * padding_y - 2)
self._build_horizontal_line(w_num, padding_x)
ui.Spacer(height=padding_y)
with ui.HStack(height=height):
ui.Spacer(width=padding_x)
self._build_vertical_line(height, h_step, h_num, padding_y)
ui.Spacer()
self._build_vertical_line(height, h_step, h_num, padding_y)
ui.Spacer(width=padding_x - 2)
def _build_horizontal_line(self, w_num, padding_x):
with ui.HStack():
ui.Spacer(width=padding_x)
for _ in range(w_num):
ui.Line(width=6, height=1)
ui.Spacer()
ui.Line(height=1)
ui.Spacer(width=padding_x)
def _build_vertical_line(self, height, h_step, h_num, padding_y):
with ui.VStack(width=2, height=height):
ui.Spacer(height=padding_y)
for _ in range(h_num):
ShortSeparator(h_step)
| 2,269 | Python | 36.833333 | 117 | 0.555751 |
USDSync/MetaCloudExplorer/exts/meta.cloud.explorer.azure/meta/cloud/explorer/azure/packer.py | """
MIT License
Copyright (c) 2016 Michael Shihrer ([email protected])
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
"""
EXAMPLE USAGE: https://repl.it/NfZq/1
blocks = []
blocks.append(Block((21,10)))
blocks.append(Block((5,10)))
blocks.append(Block((5,10)))
blocks.append(Block((7,13)))
blocks.append(Block((2,4)))
pack = Packer()
pack.fit(blocks)
for block in blocks:
if block.fit:
print("size: {} loc: {}".format(block.size, block.fit.location))
else:
print("not fit: {}".format(block.size))
"""
"""
For a more fleshed out example, see: https://github.com/shihrer/BinPacker/tree/Develop
This has a number of optimizations like removing recursion so it can run on much,
much large inputs without hitting any stack limitations. Basically an order of
magnitude faster on very large inputs. Also includes a simple visualizer for the results
using pygame.
"""
class Packer:
"""
Defines a packer object to be used on a list of blocks.
"""
def __init__(self):
self.root = None
def fit(self, blocks):
"""
Initiates the packing.
blocks: A list of block objects with a 'size' proprety representing (w,h) as a tuple.
"""
self.root = Node((0, 0), blocks[0].size)
for block in blocks:
some_node = self.find_node(self.root, block.size)
if some_node is not None:
block.fit = self.split_node(some_node, block.size)
else:
block.fit = self.grow_node(block.size)
return None
def find_node(self, some_node, size):
if some_node.used:
return self.find_node(some_node.right, size) or self.find_node(some_node.down, size)
elif (size[0] <= some_node.size[0]) and (size[1] <= some_node.size[1]):
return some_node
else:
return None
def split_node(self, some_node, size):
some_node.used = True
some_node.down = Node((some_node.location[0], some_node.location[1] + size[1]),
(some_node.size[0], some_node.size[1] - size[1]))
some_node.right = Node((some_node.location[0] + size[0], some_node.location[1]),
(some_node.size[0] - size[0], size[1]))
return some_node
def grow_node(self, size):
can_go_down = size[0] <= self.root.size[0]
can_go_right = size[1] <= self.root.size[1]
should_go_down = can_go_down and (self.root.size[0] >= (self.root.size[1] + size[1]))
should_go_right = can_go_right and (self.root.size[1] >= (self.root.size[0] + size[0]))
if should_go_right:
return self.grow_right(size)
elif should_go_down:
return self.grow_down(size)
elif can_go_right:
return self.grow_right(size)
elif can_go_down:
return self.grow_down(size)
else:
return None
def grow_right(self, size):
new_root = Node((0, 0), (self.root.size[0] + size[0], self.root.size[1]))
new_root.used = True
new_root.down = self.root
new_root.right = Node((self.root.size[0], 0), (size[0], self.root.size[1]))
self.root = new_root
some_node = self.find_node(self.root, size)
if some_node is not None:
return self.split_node(some_node, size)
else:
return None
def grow_down(self, size):
new_root = Node((0, 0), (self.root.size[0], self.root.size[1] + size[1]))
new_root.used = True
new_root.down = Node((0, self.root.size[1]), (self.root.size[0], size[1]))
new_root.right = self.root
self.root = new_root
some_node = self.find_node(self.root, size)
if some_node is not None:
return self.split_node(some_node, size)
else:
return None
class Block:
"""
Defines an object Block with two properties.
size: tuple representing the blocks size (w,h)
fit: Stores a Node object for output.
"""
def __init__(self, size):
self.size = size
self.fit = None
class Node:
"""
Defines an object Node for use in the packer function. Represents the space that a block is placed.
used: Boolean to determine if a node has been used.
down: A node located beneath the current node.
right: A node located to the right of the current node.
size: A tuple (w,h) representing the size of the node.
location: A tuple representing the (x,y) coordinate of the top left of the node.
"""
def __init__(self, location, size):
self.used = False
self.down = None
self.right = None
self.size = size
self.location = location | 5,737 | Python | 34.419753 | 104 | 0.625937 |
USDSync/MetaCloudExplorer/exts/meta.cloud.explorer.azure/meta/cloud/explorer/azure/style_meta.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
__all__ = ["meta_window_style"]
from omni.ui import color as cl
from omni.ui import constant as fl
from omni.ui import url
import omni.kit.app
import omni.ui as ui
import pathlib
# Pre-defined constants. It's possible to change them runtime.
cl.meta_window_hovered = cl("#2b2e2e")
cl.meta_window_text = cl("#9e9e9e")
fl.meta_window_attr_hspacing = 10
fl.meta_window_attr_spacing = 1
fl.meta_window_group_spacing = 2
# Pre-defined constants. It's possible to change them runtime.
fl_attr_hspacing = 10
fl_attr_spacing = 1
fl_group_spacing = 5
cl_attribute_dark = cl("#202324")
cl_attribute_red = cl("#ac6060")
cl_attribute_green = cl("#60ab7c")
cl_attribute_blue = cl("#35889e")
cl_line = cl("#404040")
cl_text_blue = cl("#5eb3ff")
cl_text_gray = cl("#707070")
cl_text = cl("#a1a1a1")
cl_text_hovered = cl("#ffffff")
cl_field_text = cl("#5f5f5f")
cl_widget_background = cl("#1f2123")
cl_attribute_default = cl("#505050")
cl_attribute_changed = cl("#55a5e2")
cl_slider = cl("#383b3e")
cl_combobox_background = cl("#252525")
cl_main_background = cl("#2a2b2c")
cls_temperature_gradient = [cl("#fe0a00"), cl("#f4f467"), cl("#a8b9ea"), cl("#2c4fac"), cl("#274483"), cl("#1f334e")]
cls_color_gradient = [cl("#fa0405"), cl("#95668C"), cl("#4b53B4"), cl("#33C287"), cl("#9fE521"), cl("#ff0200")]
cls_tint_gradient = [cl("#1D1D92"), cl("#7E7EC9"), cl("#FFFFFF")]
cls_grey_gradient = [cl("#020202"), cl("#525252"), cl("#FFFFFF")]
cls_button_gradient = [cl("#232323"), cl("#656565")]
# The main style dict
meta_window_style = {
"Label::attribute_name": {
"color": cl.meta_window_text,
"margin_height": fl.meta_window_attr_spacing,
"margin_width": fl.meta_window_attr_hspacing,
},
"CollapsableFrame::group": {"margin_height": fl.meta_window_group_spacing},
"CollapsableFrame::group:hovered": {"secondary_color": cl.meta_window_hovered},
# for Gradient Image
"ImageWithProvider::gradient_slider":{"border_radius": 4, "corner_flag": ui.CornerFlag.ALL},
"ImageWithProvider::button_background_gradient": {"border_radius": 3, "corner_flag": ui.CornerFlag.ALL},
}
#Functions from NVIDIA
def hex_to_color(hex: int) -> tuple:
# convert Value from int
red = hex & 255
green = (hex >> 8) & 255
blue = (hex >> 16) & 255
alpha = (hex >> 24) & 255
rgba_values = [red, green, blue, alpha]
return rgba_values
def _interpolate_color(hex_min: int, hex_max: int, intep):
max_color = hex_to_color(hex_max)
min_color = hex_to_color(hex_min)
color = [int((max - min) * intep) + min for max, min in zip(max_color, min_color)]
return (color[3] << 8 * 3) + (color[2] << 8 * 2) + (color[1] << 8 * 1) + color[0]
def get_gradient_color(value, max, colors):
step_size = len(colors) - 1
step = 1.0/float(step_size)
percentage = value / float(max)
idx = (int) (percentage / step)
if idx == step_size:
color = colors[-1]
else:
color = _interpolate_color(colors[idx], colors[idx+1], percentage)
return color
def generate_byte_data(colors):
data = []
for color in colors:
data += hex_to_color(color)
_byte_provider = ui.ByteImageProvider()
_byte_provider.set_bytes_data(data, [len(colors), 1])
return _byte_provider
def build_gradient_image(colors, height, style_name):
byte_provider = generate_byte_data(colors)
ui.ImageWithProvider(byte_provider,fill_policy=omni.ui.IwpFillPolicy.IWP_STRETCH, height=height, name=style_name)
return byte_provider | 3,926 | Python | 34.378378 | 117 | 0.670148 |
USDSync/MetaCloudExplorer/exts/meta.cloud.explorer.azure/meta/cloud/explorer/azure/group_sub.py |
from .group_base import GroupBase
from pxr import Gf, UsdGeom, UsdLux, Usd, Sdf
from .math_utils import calcPlaneSizeForGroup
from .prim_utils import cleanup_prim_path
import locale
import asyncio
import carb
import omni.client
import omni.kit.app
import omni.ui as ui
import omni.usd
import omni.kit.commands
#--- SUBSCRIPTION BASED GROUPS
class SubGrpView(GroupBase):
def __init__(self, viewPath:str, scale:float, upAxis:str, shapeUpAxis:str, symPlanes:bool, binPack:bool):
self._scale = scale
self._upAxis = upAxis
self._shapeUpAxis = shapeUpAxis
self._view_path = viewPath
self._symPlanes = symPlanes
self._binPack = binPack
super().__init__()
def calcGroupPlaneSizes(self):
self._dataStore._lcl_groups = []
self._dataStore._lcl_sizes = []
if len(self._dataStore._subscription_count) == 0:
self._dataManager.refresh_data()
#check it again
if len(self._dataStore._subscription_count) == 0:
return 0
#Clone the sub groups
gpz = self._dataStore._subscription_count.copy()
for grp in gpz:
size = calcPlaneSizeForGroup(
scaleFactor=self._scale,
resourceCount=self._dataStore._subscription_count.get(grp)
)
#mixed plane sizes
self._dataStore._lcl_sizes.append(size)
grp = cleanup_prim_path(self, grp)
self._dataStore._lcl_groups.append({ "group":grp, "size":size })
#Should the groups all be the same size ?
if self._symPlanes:
self._dataStore._lcl_sizes.sort(reverse=True)
maxPlaneSize = self._dataStore._lcl_sizes[0] #largest plane
groupCount = len(self._dataStore._lcl_sizes) #count of groups
#Reset plane sizes
self._dataStore._lcl_sizes = []
for count in range(0,groupCount):
self._dataStore._lcl_sizes.append(maxPlaneSize)
self._dataStore._lcl_groups = []
for grp in gpz:
self._dataStore._lcl_groups.append({ "group":grp, "size":maxPlaneSize })
def calulateCosts(self):
for g in self._dataStore._lcl_groups:
#Get the cost by resource group
locale.setlocale( locale.LC_ALL, 'en_CA.UTF-8' )
try:
self._cost = str(locale.currency(self._dataStore._subscription_cost[g]["group"]))
except:
self._cost = "" # blank not 0, blank means dont show it at all
#Abstact to load resources
def loadResources(self):
self.view_path = Sdf.Path(self.root_path.AppendPath(self._view_path))
if (len(self._dataStore._lcl_groups)) >0 :
#Cycle all the loaded groups
for grp in self._dataStore._lcl_groups:
carb.log_info(grp["group"])
#Cleanup the group name for a prim path
group_prim_path = self.view_path.AppendPath(grp["group"])
#match the group to the resource map
for key, values in self._dataStore._map_subscription.items():
#Is this the group?
if key == grp["group"]:
asyncio.ensure_future(self.loadGroupResources(key, group_prim_path, values))
def selectGroupPrims(self):
self.paths = []
stage = omni.usd.get_context().get_stage()
base = Sdf.Path("/World/Subs")
curr_prim = stage.GetPrimAtPath(base)
for prim in Usd.PrimRange(curr_prim):
# only process shapes and meshes
tmp_path = str(prim.GetPath())
if '/CollisionMesh' not in tmp_path:
if '/CollisionPlane' not in tmp_path:
self.paths.append(tmp_path)
# for grp in self._dataStore._map_subscription.keys():
# grp_path = base.AppendPath(cleanup_prim_path(self, grp))
# self.paths.append(str(grp_path))
omni.kit.commands.execute('SelectPrimsCommand',
old_selected_paths=[],
new_selected_paths=self.paths,
expand_in_stage=True)
| 4,244 | Python | 32.96 | 109 | 0.57705 |
USDSync/MetaCloudExplorer/exts/meta.cloud.explorer.azure/meta/cloud/explorer/azure/group_base.py | from abc import ABC, abstractmethod
import omni.client
import omni.kit.app
import omni.ui as ui
import omni.usd
import omni.kit.commands
from pathlib import Path
import shutil
import os
import asyncio
import locale
import carb
from .prim_utils import create_plane
from .prim_utils import get_font_size_from_length
from .prim_utils import draw_image
from .prim_utils import cleanup_prim_path, create_and_place_prim, get_parent_child_prim_path
from pxr import Gf, UsdGeom, UsdLux, Usd, Sdf
from .data_manager import DataManager
from .data_store import DataStore
from .math_utils import calculateGroupTransforms
from .scatter_complex import distributePlanes
from .omni_utils import create_prims, create_shaders
from os.path import exists
CURRENT_PATH = Path(__file__).parent
DATA_PATH = CURRENT_PATH.joinpath("temp")
#Defines an Abstract class of an Aggregate set of resource views
#Children access specific data sets for the base
class GroupBase(ABC):
def __init__(self):
self._dataManager = DataManager.instance() # Get A Singleton instance
self._dataStore = DataStore.instance() # Get A Singleton instance
#root prim paths
self.root_path = Sdf.Path('/World')
# limit the number of rows read
self.max_elements = 5000
self.base_prim_size = 50
#limits
self.x_threshold = 50000
self.y_threshold = 50000
self.z_threshold = 50000
self.x_extent = 0
self.y_extent = 0
self.z_extent = 0
#Create the stage...
def initializeStage(self, stage_unit_per_meter:float):
self._stage = omni.usd.get_context().get_stage()
root_prim = self._stage.GetPrimAtPath(self.root_path)
# set the up axis
UsdGeom.SetStageUpAxis(self._stage, UsdGeom.Tokens.z)
# set the unit of the world
UsdGeom.SetStageMetersPerUnit(self._stage, stage_unit_per_meter)
self._stage.SetDefaultPrim(root_prim)
#Depending on the Active View, "groups" will contain different aggreagetes.
#This function creates the GroundPlane objects on the stage for each group
async def CreateGroups(self, transforms):
#b = sorted(groups)
#carb.log_info("Sorted keys",b)
if (len(self._dataStore._lcl_groups)) >0 :
#Create new prims and then transform them
path = str(Sdf.Path(self.root_path).AppendPath(self._view_path))
create_prims(
transforms=transforms,
prim_names=self._dataStore._lcl_groups,
parent_path=path,
up_axis="Z",
plane_size=self._dataStore._lcl_sizes
)
#DEBUG
i=0
for grp in self._dataStore._lcl_groups:
prim_path = Sdf.Path(self.root_path).AppendPath(str(self._view_path))
prim_path = Sdf.Path(prim_path).AppendPath(grp["group"])
#Selects prim, creates associated OMNIPBR shaders
carb.log_info("Create shader " + grp["group"] + " of " + str(len(self._dataStore._lcl_groups)))
await create_shaders(base_path=prim_path, prim_name=grp["group"])
await omni.kit.app.get_app().next_update_async()
#Set the shader images for the groups
await self.AddShaderImages()
await omni.kit.app.get_app().next_update_async()
#Assign Images to the group Shaders
async def AddShaderImages(self):
#Images have been pre-made, jsut assign them
for g in self._dataStore._lcl_groups:
clean = cleanup_prim_path(self, g["group"])
#Dont show cost
output_file = DATA_PATH.joinpath(clean + ".png")
file_exists = exists(output_file)
if not file_exists:
draw_image(self, output_file=output_file, src_file=self._dataStore._bgl_file_path , textToDraw=g, costToDraw="")
#Get Stage
stage = omni.usd.get_context().get_stage()
#Find the /Looks root
curr_prim = stage.GetPrimAtPath("/")
looks_path = ""
for prim in Usd.PrimRange(curr_prim):
if prim.GetPath() == "/Looks":
looks_path = "/Looks"
break
elif prim.GetPath() == "/World/Looks":
looks_path = "/World/Looks"
break
#carb.log_info("Looks root is: " +looks_path)
#Get the Shader and set the image property
if (looks_path == ""):
looks_path = "/Looks"
shader_path = Sdf.Path(looks_path)
shader_path = Sdf.Path(shader_path.AppendPath(clean))
shader_path = Sdf.Path(shader_path.AppendPath("Shader"))
#select the shader
selection = omni.usd.get_context().get_selection()
selection.set_selected_prim_paths([str(shader_path)], False)
#Get the Shader
shader_prim = stage.GetPrimAtPath(str(shader_path))
carb.log_info("Shader Attributes:-----" + str(shader_path))
#carb.log_info(shader_prim.GetAttributes())
carb.log_info("Set shader image " + str(output_file))
try:
shader_prim.CreateAttribute("inputs:diffuse_texture", Sdf.ValueTypeNames.Asset)
omni.kit.commands.execute('ChangeProperty',
prop_path=Sdf.Path(shader_path).AppendPath('.inputs:diffuse_texture'),
value=str(output_file), prev=str(output_file))
await omni.kit.app.get_app().next_update_async()
except:
#Do it again!
omni.kit.commands.execute('ChangeProperty',
prop_path=Sdf.Path(shader_path).AppendPath('.inputs:diffuse_texture'),
value=str(output_file),prev=str(output_file))
#Change the Group Shaders textures to /from cost images
async def showHideCosts(self):
#Get Stage
stage = omni.usd.get_context().get_stage()
#Find the /Looks root
curr_prim = stage.GetPrimAtPath("/")
looks_path = ""
for prim in Usd.PrimRange(curr_prim):
if prim.GetPath() == "/Looks":
looks_path = "/Looks"
break
elif prim.GetPath() == "/World/Looks":
looks_path = "/World/Looks"
break
#carb.log_info("Looks root is: " +looks_path)
#Get the Shader and set the image property
if (looks_path == ""):
looks_path = "/Looks"
#Flip the shader images on all group shader prims
for g in self._dataStore._lcl_groups:
clean = cleanup_prim_path(self, g["group"])
cost_file = DATA_PATH.joinpath(clean + "-cost.png")
file_exists = exists(cost_file)
if not file_exists:
draw_image(self, output_file=cost_file, src_file=self._dataStore._bg_file_path , textToDraw=g, costToDraw=self._cost)
output_file = DATA_PATH.joinpath(clean + ".png")
file_exists = exists(output_file)
if not file_exists:
draw_image(self, output_file=output_file, src_file=self._dataStore._bg_file_path , textToDraw=g, costToDraw="")
#Get the Shaders
shader_path = Sdf.Path(looks_path)
shader_path = Sdf.Path(shader_path.AppendPath(clean))
shader_path = Sdf.Path(shader_path.AppendPath("Shader"))
#select the shader
selection = omni.usd.get_context().get_selection()
selection.set_selected_prim_paths([str(shader_path)], False)
#Get the Shader
shader_prim = stage.GetPrimAtPath(str(shader_path))
# carb.log_info("Shader Attributes:-----" + str(shader_path))
# carb.log_info(shader_prim.GetAttributes())
try:
currentVal = shader_prim.GetAttribute("inputs:diffuse_texture").Get()
if "-cost.png" not in str(currentVal):
omni.kit.commands.execute('ChangeProperty',
prop_path=Sdf.Path(shader_path).AppendPath('.inputs:diffuse_texture'),
value=str(cost_file), prev=str(output_file))
else:
omni.kit.commands.execute('ChangeProperty',
prop_path=Sdf.Path(shader_path).AppendPath('.inputs:diffuse_texture'),
value=str(output_file), prev=str(cost_file))
except:
pass
#Load the resources from map
async def loadGroupResources(self,group_name, group_prim_path, values):
i=0 # prim count tracker
resCount = len(values)
#Get the transform coordinates for a plane of this size with nn resources
transforms = calculateGroupTransforms(self=self, scale=self._scale, count=resCount)
for res in values:
carb.log_info("Placing prim " + res["type"] + " " + str(i) + " of " + str(resCount))
resName = res["name"]
resShape = res["shape"]
resType = res["type"]
resGrp = res["group"]
resLoc = res["location"]
resSub = res["subscription"]
cost = res["cost"]
prim_vector = transforms[i]
carb.log_info("Creating prim path:" + str(group_prim_path) + " " + str(resName))
new_prim_path = get_parent_child_prim_path(self, group_prim_path, resName)
carb.log_info("New prim path:" + str(new_prim_path))
await create_and_place_prim(self,
prim_type= resType,
prim_name=resName,
grp_name=resGrp,
sub_name=resSub,
loc_name=resLoc,
cost=cost,
new_prim_path=str(new_prim_path),
shapeToRender=resShape,
scale=(self._scale*self.base_prim_size),
position=prim_vector
)
omni.kit.commands.execute('ChangeMetadata',
object_paths=[str(new_prim_path)],
key='kind',
value='component')
i=i+1 #increment resource id
# Create Group Planes for the aggregates
@abstractmethod
def calcGroupPlaneSizes(self):
pass # Requires subclass implm
# Calc Costs for the aggregates
@abstractmethod
def calulateCosts(self):
pass # Requires subclass implm
# Load the resources for this view's groups
@abstractmethod
def loadResources(self):
pass # Requires subclass implm
#Selcet the active group's prims
@abstractmethod
def selectGroupPrims(self):
pass # Requires subclass implm | 11,167 | Python | 35.736842 | 142 | 0.563535 |
USDSync/MetaCloudExplorer/exts/meta.cloud.explorer.azure/meta/cloud/explorer/azure/Singleton.py | class Singleton:
"""
A non-thread-safe helper class to ease implementing singletons.
This should be used as a decorator -- not a metaclass -- to the
class that should be a singleton.
The decorated class can define one `__init__` function that
takes only the `self` argument. Also, the decorated class cannot be
inherited from. Other than that, there are no restrictions that apply
to the decorated class.
To get the singleton instance, use the `instance` method. Trying
to use `__call__` will result in a `TypeError` being raised.
"""
def __init__(self, decorated):
self._decorated = decorated
def instance(self):
"""
Returns the singleton instance. Upon its first call, it creates a
new instance of the decorated class and calls its `__init__` method.
On all subsequent calls, the already created instance is returned.
"""
try:
return self._instance
except AttributeError:
self._instance = self._decorated()
return self._instance
def __call__(self):
raise TypeError('Singletons must be accessed through `instance()`.')
def __instancecheck__(self, inst):
return isinstance(inst, self._decorated) | 1,279 | Python | 33.594594 | 76 | 0.647381 |
USDSync/MetaCloudExplorer/exts/meta.cloud.explorer.azure/meta/cloud/explorer/azure/azure_data_manager_stub.py | # AZURE DATA MANAGER STUB
# MOSTLY COMMENTED OUT, API DISABLED
# CANT LOAD azure-identity library in Code 2022.1.2 +
# https://forums.developer.nvidia.com/t/pip-library-wont-load-in-2021-1-2/222719
import omni.kit.pipapi
import carb
import os
import json
import sys
from datetime import datetime
import omni.kit.notification_manager as nm
#omni.kit.pipapi.install("azure-identity", module="azure-identity", ignore_import_check=True, ignore_cache=True, surpress_output=False,use_online_index=True )
#omni.kit.pipapi.install("azure-mgmt-resource", module="azure-mgmt-resource", ignore_import_check=True, ignore_cache=True, surpress_output=False,use_online_index=True )
#sys.path.append("D:/python37/lib/site-packages")
#print(sys.modules.keys())
from .data_store import DataStore
from .prim_utils import cleanup_prim_path
#from azure.mgmt.resource import ResourceManagementClient
#from azure.mgmt.resource.subscriptions import SubscriptionClient
#from azure.identity import ClientSecretCredential
import asyncio
import os
# Manage resources and resource groups - create, update and delete a resource group,
# deploy a solution into a resource group, export an ARM template. Create, read, update
# and delete a resource
class AzureDataManager():
def __init__(self):
pass
#self._dataStore = DataStore.instance() # Get A Singleton instance, store data here
def get_token(self):
# Acquire a credential object using CLI-based authentication.
# if self._dataStore._azure_tenant_id =="":
# self.sendNotify("MCE: Please enter Azure credentials to connect...", nm.NotificationStatus.WARNING)
# return
# if self._dataStore._azure_client_secret =="":
# self.sendNotify("MCE: Please enter Azure client secret to connect...", nm.NotificationStatus.WARNING)
# return False
# self.sendNotify("MCE: Connecting to Azure Tenant...", nm.NotificationStatus.INFO)
# self._token_credential = ClientSecretCredential(
# self._dataStore._azure_tenant_id,
# self._dataStore._azure_client_id,
# self._dataStore._azure_client_secret)
# # Retrieve subscription ID from environment variable.
# self._subscription_id = self._dataStore._azure_subscription_id
return False
#validate we can connect
def connect(self):
self.sendNotify("MCE: Azure API DISABLED in 2022.1.3, due to library incompatibility, use data files!", nm.NotificationStatus.WARNING)
#Get a token
# valid = self.get_token()
# try:
# if (valid):
# # List subscriptions
# subscription_client = SubscriptionClient(credential=self._token_credential)
# page_result = subscription_client.subscriptions.list()
# result = [item for item in page_result]
# for item in result:
# carb.log_warn(item.subscription_id)
# carb.log_warn(item.tags)
# except:
# valid = False
# error = sys.exc_info()[0]
# carb.log_error("Oops! " + str(error) + " occurred.")
# self.sendNotify("MCE: Error:" + str(error), nm.NotificationStatus.WARNING)
return False
def clicked_ok(self):
carb.log_info("User clicked ok")
def sendNotify(self, message:str, status:nm.NotificationStatus):
# https://docs.omniverse.nvidia.com/py/kit/source/extensions/omni.kit.notification_manager/docs/index.html?highlight=omni%20kit%20notification_manager#
import omni.kit.notification_manager as nm
ok_button = nm.NotificationButtonInfo("OK", on_complete=self.clicked_ok)
nm.post_notification(
message,
hide_after_timeout=False,
duration=0,
status=status,
button_infos=[ok_button],
)
#Connect to API and load adata
def load_data(self):
pass
#self.save_connection_data()
#self.load_groups()
#self.load_resources()
#def save_connection_data(self):
# self._dataStore.Save_Config_Data()
# def load_resources(self):
# try:
# resCnt = 0
# for grp in self._dataStore._groups:
# resources = self.list_group_resources(grp)
# for res in resources:
# resCnt = resCnt +1
# name = cleanup_prim_path(self, Name=res.name)
# self._dataStore._resources[name] = {"name":name, "type": res.type, "group": grp, "location":res.location, "subscription":self._subscription_id, "lmcost": 0}
# #self._dataStore.res["name"] = {"name":res["name"], "type": type, "group": group, "location":location, "subscription":subscription, "lmcost": lmcost}
# self.sendNotify("MCE: Azure resources loaded: " + str(len(self._dataStore._resources)), nm.NotificationStatus.INFO)
# carb.log_info("Azure API resources loaded: " + str(len(self._dataStore._resources)))
# except:
# error = sys.exc_info()[0]
# carb.log_error("Oops! " + str(error) + " occurred.")
# self.sendNotify("MCE: Error:" + str(error), nm.NotificationStatus.WARNING)
# def load_groups(self):
# try:
# resource_client = ResourceManagementClient(self._token_credential, self._subscription_id)
# rg_groups = resource_client.resource_groups.list()
# grpCnt = 0
# for group in rg_groups:
# grp = {group.name:{"name":group.name, "subs": self._subscription_id, "location":group.location}}
# self._dataStore._groups.update(grp)
# grpCnt = grpCnt + 1
# self.sendNotify("MCE: Azure groups loaded: " + str(len(self._dataStore._groups)), nm.NotificationStatus.INFO)
# carb.log_info("Azure API groups loaded: " + str(len(self._dataStore._groups)))
# except:
# error = sys.exc_info()[0]
# carb.log_error("Oops! " + str(error) + " occurred.")
# self.sendNotify("MCE: Error:" + str(error), nm.NotificationStatus.WARNING)
#return a list of resource groups
# def get_resource_groups(self):
# # Obtain the management object for resources.
# try:
# resource_client = ResourceManagementClient(self._token_credential, self._subscription_id)
# rg_groups = resource_client.resource_groups.list()
# return rg_groups
# except:
# error = sys.exc_info()[0]
# carb.log_error("Oops! " + str(error) + " occurred.")
# self.sendNotify("MCE: Error:" + str(error), nm.NotificationStatus.WARNING)
# #for item in rg_groups:
# # print(item)
# # List Resources within the group
# def list_group_resources(self, groupName:str):
# # Obtain the management object for resources.
# resource_client = ResourceManagementClient(self._token_credential, self._subscription_id)
# carb.log_info("List all of the resources within the group")
# res = resource_client.resources.list_by_resource_group(groupName)
# return res
# #creates a resource group with groupName at location
# def create_resource_group(self, groupName:str, location:str):
# # Obtain the management object for resources.
# resource_client = ResourceManagementClient(self._token_credential, self._subscription_id)
# #
# # Managing resource groups
# #
# resource_group_params = {"location": location}
# # Create Resource group
# print("Create Resource Group: " + groupName + " @ " + location)
# self.print_item(
# resource_client.resource_groups.create_or_update(
# groupName, resource_group_params)
# )
# def print_item(self, group):
# """Print a ResourceGroup instance."""
# print("\tName: {}".format(group.name))
# print("\tId: {}".format(group.id))
# print("\tLocation: {}".format(group.location))
# print("\tTags: {}".format(group.tags))
# self.print_properties(group.properties)
# def print_properties(self, props):
# """Print a ResourceGroup properties instance."""
# if props and props.provisioning_state:
# print("\tProperties:")
# print("\t\tProvisioning State: {}".format(props.provisioning_state))
# print("\n\n")
# # Create a Key Vault in the Resource Group
# def create_key_vault(self, vaultName:str, location:str, groupName:str):
# # Obtain the management object for resources.
# resource_client = ResourceManagementClient(self._token_credential, self._subscription_id)
# print("Create a Key Vault via a Generic Resource Put")
# key_vault_params = {
# "location": location,
# "properties": {
# "sku": {"family": "A", "name": "standard"},
# "tenantId": self._dataStore._azure_tenant_id,
# "accessPolicies": [],
# "enabledForDeployment": True,
# "enabledForTemplateDeployment": True,
# "enabledForDiskEncryption": True
# },
# }
# resource_client.resources.begin_create_or_update(
# resource_group_name=groupName,
# resource_provider_namespace="Microsoft.KeyVault",
# parent_resource_path="",
# resource_type="vaults",
# # Suffix random string to make vault name unique
# resource_name=vaultName + datetime.utcnow().strftime("-%H%M%S"),
# api_version="2019-09-01",
# parameters=key_vault_params
# ).result()
# # Export the Resource group template
# def export_group_template(self, groupName:str):
# # Obtain the management object for resources.
# resource_client = ResourceManagementClient(self._token_credential, self._subscription_id)
# print("Export Resource Group Template")
# BODY = {
# 'resources': ['*']
# }
# result = json.dumps(
# resource_client.resource_groups.begin_export_template(
# groupName, BODY).result().template, indent=4
# )
# print(result + "\n\n")
# return result
# def run_example():
# """Resource Group management example."""
# #
# # Create the Resource Manager Client with an Application (service principal) token provider
# #
# subscription_id = os.environ.get("AZURE_SUBSCRIPTION_ID", None) # your Azure Subscription Id
# credentials = DefaultAzureCredential()
# client = ResourceManagementClient(credentials, subscription_id)
# #
# # Managing resource groups
# #
# resource_group_params = {"location": "westus"}
# # List Resource Groups
# print("List Resource Groups")
# for item in client.resource_groups.list():
# print_item(item)
# # Create Resource group
# print("Create Resource Group")
# print_item(
# client.resource_groups.create_or_update(
# GROUP_NAME, resource_group_params)
# )
# # Modify the Resource group
# print("Modify Resource Group")
# resource_group_params.update(tags={"hello": "world"})
# print_item(
# client.resource_groups.update(
# GROUP_NAME, resource_group_params)
# )
# # Create a Key Vault in the Resource Group
# print("Create a Key Vault via a Generic Resource Put")
# key_vault_params = {
# "location": "westus",
# "properties": {
# "sku": {"family": "A", "name": "standard"},
# "tenantId": os.environ["AZURE_TENANT_ID"],
# "accessPolicies": [],
# "enabledForDeployment": True,
# "enabledForTemplateDeployment": True,
# "enabledForDiskEncryption": True
# },
# }
# client.resources.begin_create_or_update(
# resource_group_name=GROUP_NAME,
# resource_provider_namespace="Microsoft.KeyVault",
# parent_resource_path="",
# resource_type="vaults",
# # Suffix random string to make vault name unique
# resource_name="azureSampleVault" + datetime.utcnow().strftime("-%H%M%S"),
# api_version="2019-09-01",
# parameters=key_vault_params
# ).result()
# # List Resources within the group
# print("List all of the resources within the group")
# for item in client.resources.list_by_resource_group(GROUP_NAME):
# print_item(item)
# # Export the Resource group template
# print("Export Resource Group Template")
# BODY = {
# 'resources': ['*']
# }
# print(
# json.dumps(
# client.resource_groups.begin_export_template(
# GROUP_NAME, BODY).result().template, indent=4
# )
# )
# print("\n\n")
# # Delete Resource group and everything in it
# print("Delete Resource Group")
# delete_async_operation = client.resource_groups.begin_delete(GROUP_NAME)
# delete_async_operation.wait()
# print("\nDeleted: {}".format(GROUP_NAME))
| 13,810 | Python | 38.235795 | 178 | 0.583128 |
USDSync/MetaCloudExplorer/exts/meta.cloud.explorer.azure/meta/cloud/explorer/azure/unit_tests.py | import unittest
from .math_utils import calcPlaneSizeForGroup
class TestRGPlaneSizeCalc(unittest.TestCase):
def test_lower(self):
planeSize = calcPlaneSizeForGroup(1)
self.assertEqual(planeSize, 1)
def test_lower(self):
planeSize = calcPlaneSizeForGroup(2)
self.assertEqual(planeSize, 2)
def test_lower(self):
planeSize = calcPlaneSizeForGroup(3)
self.assertEqual(planeSize, 2)
def test_lower(self):
planeSize = calcPlaneSizeForGroup(4)
self.assertEqual(planeSize, 2)
def test_lower(self):
planeSize = calcPlaneSizeForGroup(5)
self.assertEqual(planeSize, 3)
def test_lower(self):
planeSize = calcPlaneSizeForGroup(6)
self.assertEqual(planeSize, 3)
if __name__ == '__main__':
unittest.main() | 823 | Python | 24.749999 | 45 | 0.665857 |
USDSync/MetaCloudExplorer/exts/meta.cloud.explorer.azure/meta/cloud/explorer/azure/azure_resource_map.py | shape_usda_name = {
"AAD":"omniverse://localhost/MCE/3dIcons/AzureAAD_1.1.usd",
"Resource_Group":"omniverse://localhost/MCE/3dIcons/Resource_Groups_3.0.usd",
"Storage_account":"omniverse://localhost/MCE/3dIcons/StorageAccounts_2_8.usd",
"App_Service":"omniverse://localhost/MCE/3dIcons/AppServices_1.2.usd",
"Subscription":"omniverse://localhost/MCE/3dIcons/Subscriptions_1.3.usd",
"API_Connection":"omniverse://localhost/MCE/3dIcons/API_Connection.usd",
"API_Management_service":"omniverse://localhost/MCE/3dIcons/API_management_services_fix.usd",
"App_Configuration":"omniverse://localhost/MCE/3dIcons/App-Configuration.usd",
"App_Service_plan":"omniverse://localhost/MCE/3dIcons/app_service_plan_fix.usd",
"App_Service":"omniverse://localhost/MCE/3dIcons/AppServices_1.2.usd",
"Application_Insights":"omniverse://localhost/MCE/3dIcons/Application_Insights_4.0.usd",
"Application_gateway":"omniverse://localhost/MCE/3dIcons/Application_Gateway.usd",
"Automation_Account":"omniverse://localhost/MCE/3dIcons/automation_accounts_fix.usd",
"Availability_test":"omniverse://localhost/MCE/3dIcons/Availability_Test.usd",
"Azure_Bot":"omniverse://localhost/MCE/3dIcons/Web_App_Bot.usd",
"Azure_Cosmos_DB_API_for_MongoDB_account":"omniverse://localhost/MCE/3dIcons/Azure_Cosmos_DB_API_MongoDB.usd",
"Azure_Cosmos_DB_account":"omniverse://localhost/MCE/3dIcons/Azure_Cosmos_DB.usd",
"Azure_Data_Explorer_Cluster":"omniverse://localhost/MCE/3dIcons/azure_data_explorer_clusters_fix.usd",
"Azure_DevOps_organization":"omniverse://localhost/MCE/3dIcons/Azure_Dev_Ops.usd",
"Azure_Machine_Learning":"omniverse://localhost/MCE/3dIcons/Azure_Machine_Learning.usd",
"Azure_Workbook":"omniverse://localhost/MCE/3dIcons/azure_workbook_fix.usd",
"Bastion":"omniverse://localhost/MCE/3dIcons/Bastion.usd",
"Cognitive_Service":"omniverse://localhost/MCE/3dIcons/Cognitive_Services.usd",
"Container_registry":"omniverse://localhost/MCE/3dIcons/container_registries.usd",
"Data_Lake_Analytics":"omniverse://localhost/MCE/3dIcons/Data_Lake_Analytics_1.2.usd",
"Data_Lake_Storage_Gen1":"omniverse://localhost/MCE/3dIcons/data_lake_storage_gen1_fix.usd",
"Data_factory__V2_":"omniverse://localhost/MCE/3dIcons/data_factory_fix.usd",
"Disk":"omniverse://localhost/MCE/3dIcons/Disk_1.0.usd",
"DNS_zone":"omniverse://localhost/MCE/3dIcons/DNS_Zone.usd",
"DNS_Zone":"omniverse://localhost/MCE/3dIcons/DNS_Zone.usd",
"Event_Grid_System_Topic":"omniverse://localhost/MCE/3dIcons/event_grid_topics_fix.usd",
"Event_Hubs_Namespace":"omniverse://localhost/MCE/3dIcons/events_hub_fix.usd",
"Firewall_Policy":"omniverse://localhost/MCE/3dIcons/Firewall_Policy.usd",
"Firewall":"omniverse://localhost/MCE/3dIcons/Firewall.usd",
"Function_App":"omniverse://localhost/MCE/3dIcons/function_apps_fix.usd",
"Image":"omniverse://localhost/MCE/3dIcons/image_fix.usd",
"Key_vault":"omniverse://localhost/MCE/3dIcons/Key_Vaults_2.0.usd",
"Kubernetes_service":"omniverse://localhost/MCE/3dIcons/kubernetess_services_fix.usd",
"Language":"omniverse://localhost/MCE/3dIcons/Language_Understanding.usd",
"Language_understanding":"omniverse://localhost/MCE/3dIcons/Language_Understanding.usd",
"Load_balancer":"omniverse://localhost/MCE/3dIcons/load_balancer_fix.usd",
"Log_Analytics_query_pack":"omniverse://localhost/MCE/3dIcons/Log_Analytics_Query_Pack.usd",
"Log_Analytics_workspace":"omniverse://localhost/MCE/3dIcons/Log_Analytics_Workspace.usd",
"Logic_App__Standard_":"omniverse://localhost/MCE/3dIcons/Logic_Apps_Std.usd",
"Logic_app":"omniverse://localhost/MCE/3dIcons/Logic_apps_fix.usd",
"Logic_apps_custom_connector":"omniverse://localhost/MCE/3dIcons/Logic_Apps_Custom_Connector.usd",
"Managed_Identity":"omniverse://localhost/MCE/3dIcons/Managed_Identity.usd",
"Managed_application":"omniverse://localhost/MCE/3dIcons/Managed_Identity.usd",
"Network_Interface":"omniverse://localhost/MCE/3dIcons/network_interface_fix.usd",
"Microsoft_Network_networkInterfaces":"omniverse://localhost/MCE/3dIcons/network_interface_fix.usd",
"Network_Watcher":"omniverse://localhost/MCE/3dIcons/network_watcher_fix.usd",
"Network_security_group":"omniverse://localhost/MCE/3dIcons/network_security_group_fix.usd",
"Microsoft_Network_networkSecurityGroups" : "omniverse://localhost/MCE/3dIcons/network_security_group_fix.usd",
"Power_BI_Embedded":"omniverse://localhost/MCE/3dIcons/Power_BI_Embedded_2.0.usd",
"Private_DNS_zone":"omniverse://localhost/MCE/3dIcons/Private_DNS_Zone_2.0.usd",
"Private_endpoint":"omniverse://localhost/MCE/3dIcons/Private_End_Point.usd",
"Public_IP_address":"omniverse://localhost/MCE/3dIcons/public_ip_adresses_fix.usd",
"Recovery_Services_vault":"omniverse://localhost/MCE/3dIcons/Recovery_Services_Vault.usd",
"Restore_Point_Collection":"omniverse://localhost/MCE/3dIcons/Restore_Point_Collection_2.0.usd",
"Runbook":"omniverse://localhost/MCE/3dIcons/Runbook.usd",
"SQL_database":"omniverse://localhost/MCE/3dIcons/SQLDatabase.usd",
"SQL_elastic_pool":"omniverse://localhost/MCE/3dIcons/SQL_Elastic_Pools.usd",
"SQL_server":"omniverse://localhost/MCE/3dIcons/SQLServer.usd",
"SQL_virtual_machine":"omniverse://localhost/MCE/3dIcons/SQL_Virtual_Machine_1.1.usd",
"Search_service":"omniverse://localhost/MCE/3dIcons/Search_Services_1.0.usd",
"Service_Bus_Namespace":"omniverse://localhost/MCE/3dIcons/service_bus_fix.usd",
"Service_Fabric_cluster":"omniverse://localhost/MCE/3dIcons/service-fabric-clusters_fix.usd",
"Shared_dashboard":"omniverse://localhost/MCE/3dIcons/Shared_Dashboard.usd",
"Snapshot":"omniverse://localhost/MCE/3dIcons/Snapshot.usd",
"Solution":"omniverse://localhost/MCE/3dIcons/solution.usd",
"Storage_account":"omniverse://localhost/MCE/3dIcons/StorageAccounts_2.8.usd",
"Traffic_Manager_profile":"omniverse://localhost/MCE/3dIcons/Traffic_Manager_Profiles_1.0.usd",
"Virtual_machine_scale_set":"omniverse://localhost/MCE/3dIcons/Virtual_Machines_Scale_Sets_2.0.usd",
"Virtual_machine":"omniverse://localhost/MCE/3dIcons/Virtual_Machine_1.1.usd",
"Virtual_network":"omniverse://localhost/MCE/3dIcons/Virtual_Network.usd",
"Web_App_Bot":"omniverse://localhost/MCE/3dIcons/Web_App_Bot.usd",
"Coat_Rack":"omniverse://localhost/MCE/3dIcons/Coat_Rack_Bowler_Hat.usdz",
"Observation_Chair":"omniverse://localhost/MCE/3dIcons/Green-Ball-Chair.usd",
"Leather_Jacket":"omniverse://localhost/MCE/3dIcons/Leather_Jacket.usdz",
"Rug_V4": "omniverse://localhost/MCE/3dIcons/RugV4.usd",
"Neon_All_Resources": "omniverse://localhost/MCE/NeonSigns/all_resources/all_resources.usd",
"Neon_By_Group": "omniverse://localhost/MCE/NeonSigns/resources_by_group/resources_by_group.usd",
"Neon_By_Subscription": "omniverse://localhost/MCE/NeonSigns/resources_by_cost/resources_by_cost.usd",
"Neon_By_Location": "omniverse://localhost/MCE/NeonSigns/resources_by_location/resources_by_location.usd",
"Neon_Azure_Cloud": "omniverse://localhost/MCE/NeonSigns/azure cloud/azure cloud.usd",
"Microsoft_Web_certificates" :"omniverse://localhost/MCE/3dIcons/App_Service_Certificates.usd",
"Microsoft_ClassicStorage_storageAccounts":"omniverse://localhost/MCE/3dIcons/StorageAccounts_2.8.usd",
"Storage_account__classic_":"omniverse://localhost/MCE/3dIcons/StorageAccounts_2.8.usd",
"microsoft_alertsmanagement_smartDetectorAlertRules": "omniverse://localhost/MCE/3dIcons/Alerts.usd",
"Microsoft_KeyVault_vaults":"omniverse://localhost/MCE/3dIcons/Key_Vaults_2.0.usd",
"Microsoft_Storage_storageAccounts" : "omniverse://localhost/MCE/3dIcons/StorageAccounts_2.8.usd",
"Microsoft_Network_dnszones" : "omniverse://localhost/MCE/3dIcons/DNS_Zone.usd",
"Microsoft_Web_sites": "omniverse://localhost/MCE/3dIcons/AppServices_1.2.usd",
"Microsoft_Web_serverFarms" : "omniverse://localhost/MCE/3dIcons/app_service_plan_fix.usd",
"Microsoft_Network_networkWatchers" : "omniverse://localhost/MCE/3dIcons/network_watcher_fix.usd",
"Microsoft_OperationalInsights_workspaces" : "omniverse://localhost/MCE/3dIcons/Log_Analytics_Workspace.usd",
"Microsoft_OperationsManagement_solutions" : "omniverse://localhost/MCE/3dIcons/solution.usd",
"microsoft_insights_autoscalesettings" : "omniverse://localhost/MCE/3dIcons/scene.usd",
"microsoft_visualstudio_account" :"omniverse://localhost/MCE/3dIcons/azure_devops_fix.usd",
"Microsoft_Migrate_moveCollections" : "omniverse://localhost/MCE/3dIcons/scene.usd",
"microsoft_insights_actiongroups" : "omniverse://localhost/MCE/3dIcons/scene.usd",
"Microsoft_Insights_components":"omniverse://localhost/MCE/3dIcons/Application_Insights_4.0.usd",
"Microsoft_Portal_dashboards" : "omniverse://localhost/MCE/3dIcons/Shared_Dashboard.usd",
"Microsoft_ContainerRegistry_registries": "omniverse://localhost/MCE/3dIcons/container_registries_fix.usd",
"Microsoft_RecoveryServices_vaults":"omniverse://localhost/MCE/3dIcons/recovery_service_vault_fix.usd",
"Microsoft_DevTestLab_schedules": "omniverse://localhost/MCE/3dIcons/scene.usd",
"User_Red":"omniverse://localhost/MCE/3dIcons/User_Red.usd",
"User_Blue":"omniverse://localhost/MCE/3dIcons/User_Blue.usd",
"User_Orange":"omniverse://localhost/MCE/3dIcons/User_Orange.usd",
"User_Green":"omniverse://localhost/MCE/3dIcons/User_Green.usd",
}
| 9,535 | Python | 81.206896 | 115 | 0.745254 |
USDSync/MetaCloudExplorer/exts/meta.cloud.explorer.azure/meta/cloud/explorer/azure/object_info_model.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
__all__ = ["ObjectInfoModel"]
from pxr import Tf
from pxr import Usd
from pxr import UsdGeom
from omni.ui import scene as sc
import omni.usd
from .prim_utils import only_select_parent_prims
# The distance to raise above the top of the object's bounding box
TOP_OFFSET = 5
class ObjectInfoModel(sc.AbstractManipulatorModel):
"""
The model tracks the position and info of the selected object.
"""
class PositionItem(sc.AbstractManipulatorItem):
"""
The Model Item represents the position. It doesn't contain anything
because we take the position directly from USD when requesting.
"""
def __init__(self):
super().__init__()
self.value = [0, 0, 0]
def __init__(self):
super().__init__()
# Current selected prim and material
self._current_paths = []
self.positions = []
self._stage_listener = None
self.populate()
# Track selection changes
self.events = self._get_context().get_stage_event_stream()
self.stage_event_delegate = self.events.create_subscription_to_pop(
self.on_stage_event, name="Object Info Selection Update"
)
def on_stage_event(self, event):
"""Called by stage_event_stream. We only care about selection changes."""
# NEW: if statement to only check when selection changed
if event.type == int(omni.usd.StageEventType.SELECTION_CHANGED):
self.populate()
def destroy(self):
self.events = None
self.stage_event_delegate.unsubscribe()
def populate(self):
self._current_paths = []
self.positions = []
usd_context = self._get_context()
stage = usd_context.get_stage()
#Get selected prims
usd_context = omni.usd.get_context()
self._stage: Usd.Stage = usd_context.get_stage()
self._selection = usd_context.get_selection()
self._paths = self._selection.get_selected_prim_paths()
#Selectively choose the paths
self._paths = only_select_parent_prims(prim_paths=self._paths)
#if len(self._current_paths) > 1: #ONLY SHOW ON MULTISELECT!
for path in self._paths:
prim = stage.GetPrimAtPath(path)
if not prim.IsValid():
return
for child in prim.GetChildren():
if child.IsA(UsdGeom.Imageable):
if str(path).find("Collision") == -1:
if str(path).find("Baked") == -1:
if str(path).find("/materials") == -1:
self._current_paths.append(child.GetPath())
self.positions.append(ObjectInfoModel.PositionItem())
# Position is changed because new selected object has a different position
self._item_changed(self.positions[-1])
#elif len(self._current_paths == 0):
# pass
def _get_context(self):
# Get the UsdContext we are attached to
return omni.usd.get_context()
def _notice_changed(self, notice: Usd.Notice, stage: Usd.Stage) -> None:
"""Called by Tf.Notice. Used when the current selected object changes in some way."""
for p in notice.GetChangedInfoOnlyPaths():
for i, watched_path in enumerate(self._current_paths):
if str(watched_path) in str(p.GetPrimPath()):
self._item_changed(self.positions[i])
def get_name(self, index):
stage = self._get_context().get_stage()
prim = stage.GetPrimAtPath(self._current_paths[index])
return prim.GetCustomDataByKey('res_name')
def get_num_prims(self):
return len(self._current_paths)
def get_position(self, index):
"""Returns position of currently selected object"""
stage = self._get_context().get_stage()
if not stage or not self._current_paths[index]:
return [0, 0, 0]
# Get position directly from USD
prim = stage.GetPrimAtPath(self._current_paths[index])
box_cache = UsdGeom.BBoxCache(Usd.TimeCode.Default(), includedPurposes=[UsdGeom.Tokens.default_])
bound = box_cache.ComputeWorldBound(prim)
range = bound.ComputeAlignedBox()
bboxMin = range.GetMin()
bboxMax = range.GetMax()
# Find the top center of the bounding box and add a small offset upward.
position = [(bboxMin[0] + bboxMax[0]) * 0.5, bboxMax[1] + TOP_OFFSET, (bboxMin[2] + bboxMax[2]) * 0.5]
return position
| 5,078 | Python | 35.021276 | 110 | 0.611461 |
USDSync/MetaCloudExplorer/exts/meta.cloud.explorer.azure/meta/cloud/explorer/azure/stage_manager.py | # This class is intended to manage the stage
# Using the requested view and the loaded dataset, we need to translate data into Prims
# There are 2 variables here. The prims you want to see, and the way you want to see them
# the data gives resource counts by subscription, group, or location.
# it also contains resource group data and data about all the raw resources
# we need to put the resource prims in groups on the stage, floating islands ?
# I want to try and build a floating island for each resource group, IE a plane that prims can rest on.
# The islands will be 2d planes in 3d space, big enough to accomidate the resources in said group.
# the more resources the bigger the island.
#we can then postion the islands in novel ways for exploration
# Model related
# Python built-in
from textwrap import fill
import time
from cgitb import text
import os.path
from unicodedata import name
import carb
import locale
from pathlib import Path
# external python lib
import csv
import itertools
# USD imports
from pxr import Gf, UsdGeom, UsdLux, Usd, Sdf
# omniverse
import omni.client
import omni.kit.app
import omni.ui as ui
import omni.usd
import omni.kit.commands
import shutil
import os
import asyncio
import omni.kit.notification_manager as nm
from .prim_utils import create_plane
from .prim_utils import cleanup_prim_path
from .prim_utils import get_font_size_from_length
from .packer import Node, Block, Packer
from omni.kit.window.file_importer import get_file_importer
from omni.ui import color as cl
#import utilities
from .azure_resource_map import shape_usda_name
from .data_manager import DataManager
from .data_store import DataStore
from .scatter_complex import distributePlanes
#Import View Models
from .group_aad import AADGrpView
from .group_group import ResGrpView
from .group_sub import SubGrpView
from .group_location import LocGrpView
from .group_type import TypeGrpView
from .group_tag import TagGrpView
CURRENT_PATH = Path(__file__).parent
DATA_PATH = CURRENT_PATH.joinpath("temp")
# The Stage Manager is responsible for drawing the stage based on the ViewType
# It will start from scratch and create the Ground plane and groups on the plane
# It will render the resources in each group on individual planes
class StageManager():
def __init__(self):
self._dataManager = DataManager.instance() # Get A Singleton instance
self._dataStore = DataStore.instance() # Get A Singleton instance
#self._dataManager.add_model_changed_callback(self.model_changed)
self.stage_unit_per_meter = 1
#Get Composition Options from UI
try:
self._scale = self._dataStore._scale_model
except:
self._scale=1.0
try:
self._use_packing_algo = self._dataStore._use_packing_algo
except:
self._use_packing_algo = False
try:
self._use_symmetric_planes = self._dataStore._use_symmetric_planes
except:
self._use_symmetric_planes = False
try:
self._last_view_type = self._dataStore._last_view_type
except:
self._last_view_type = "ByGroup"
if self._last_view_type is None:
self._last_view_type = "ByGroup"
self._upAxis="Z"
self._shapeUpAxis="Z"
self.ActiveView = self.SetActiveView(self._last_view_type)
def SetActiveView(self, viewType:str):
#Set a subclass to handle the View Creation
if viewType == "ByGroup":
view = ResGrpView(viewPath="RGrps", scale=self._scale, upAxis=self._upAxis, shapeUpAxis=self._shapeUpAxis,
symPlanes=self._dataStore._symmetric_planes_model.as_bool, binPack=self._use_packing_algo)
if viewType == "ByLocation":
view = LocGrpView(viewPath="Locs", scale=self._scale, upAxis=self._upAxis, shapeUpAxis=self._shapeUpAxis,
symPlanes=self._dataStore._symmetric_planes_model.as_bool, binPack=self._use_packing_algo)
if viewType == "ByType":
view = TypeGrpView(viewPath="Types", scale=self._scale, upAxis=self._upAxis, shapeUpAxis=self._shapeUpAxis,
symPlanes=self._dataStore._symmetric_planes_model.as_bool, binPack=self._use_packing_algo)
if viewType == "BySub":
view = SubGrpView(viewPath="Subs", scale=self._scale, upAxis=self._upAxis, shapeUpAxis=self._shapeUpAxis,
symPlanes=self._dataStore._symmetric_planes_model.as_bool, binPack=self._use_packing_algo)
if viewType == "ByTag":
view = TagGrpView(viewPath="Tags", scale=self._scale, upAxis=self._upAxis, shapeUpAxis=self._shapeUpAxis,
symPlanes=self._dataStore._symmetric_planes_model.as_bool, binPack=self._use_packing_algo)
return view
# def model_changed():
# pass
#Invoked from UI - Show the Stages based on the View.
def ShowStage(self, viewType:str):
#Reset view data
self._dataStore._lcl_sizes = []
self._dataStore._lcl_groups = []
self._dataStore._lcl_resources = []
self.ActiveView = self.SetActiveView(viewType)
#populate the stage
self.ActiveView.initializeStage(self.stage_unit_per_meter) #Base Method
self.ActiveView.calcGroupPlaneSizes() #Abstract Method
self.ActiveView.calulateCosts() #Abstract Method
transforms = self.getTransforms() #Cooredinates for the group planes
#sort the groups to add largest first
self._dataStore._lcl_groups.sort(key=lambda element: element['size'], reverse=True)
self._dataStore._lcl_sizes.sort(reverse=True)
asyncio.ensure_future(self.AddLightsToStage())
#Create the groups in an async loop
grpCnt = len(self._dataStore._lcl_groups)
if (grpCnt) >0 :
asyncio.ensure_future(self.ActiveView.CreateGroups(transforms=transforms))
self.ActiveView.loadResources() #Abstract Method
self.sendNotify("Stage loading complete: " + str(grpCnt) + " groups loaded.", nm.NotificationStatus.INFO)
#Load the resources by group
def LoadResources(self, viewType:str):
self.ActiveView = self.SetActiveView(viewType)
self.ActiveView.initializeStage(self.stage_unit_per_meter) #Base Method
self.ActiveView.calcGroupPlaneSizes() #Abstract Method
self.ActiveView.calulateCosts() #Abstract Method
#View is already set, show resources for specific or all paths
if self.ActiveView is None:
self.ActiveView = self.SetActiveView(self._last_view_type)
self.ActiveView.loadResources() #Abstract Method
#Gets the x,y,z coordinates to place the grouping planes
def getTransforms(self):
if (self._dataStore._use_packing_algo):
#Use Packer Algorithm to determine positioning
transforms = []
blocks = []
if len(self._dataStore._lcl_sizes) >0:
sorted_sizes = sorted(self._dataStore._lcl_sizes, reverse=True)
for size in sorted_sizes:
sz = (size*2) #double the size end to end
blocks.append(Block((sz,sz)))
pack = Packer()
pack.fit(blocks)
for block in blocks:
if block.fit:
fitX = block.fit.location[0]
fitY = block.fit.location[1]
fitZ = 0
transforms.append(Gf.Vec3f(fitX, fitY ,fitZ))
#print("size: {} loc: {},{}".format(str(block.size[0]), str(block.fit.location[0]), str(block.fit.location[1])))
else:
print("not fit: {}".format(block.size[0]))
return transforms
else:
#Use the scatter distribution method
maxDims = (self._dataStore._options_count_models[0].as_float * self._dataStore._options_count_models[1].as_float * self._dataStore._options_count_models[2].as_float)
grpCnt = len(self._dataStore._lcl_groups)
if grpCnt > maxDims:
self.sendNotify("Not enough dimensions for ..." + str(grpCnt) + "res groups, Max Dims: " + str(maxDims), nm.NotificationStatus.WARNING)
return
if grpCnt >0:
#Use Customized Scatter algorithm get coordinates for varying sized planes
transforms = distributePlanes(
UpAxis=self._upAxis,
count=[m.as_int for m in self._dataStore._options_count_models],
distance=[m.as_float for m in self._dataStore._options_dist_models],
sizes=self._dataStore._lcl_sizes,
randomization=[m.as_float for m in self._dataStore._options_random_models],
seed=0,
scaleFactor=self._dataStore._composition_scale_model.as_float)
return transforms
async def AddLightsToStage(self):
stage = omni.usd.get_context().get_stage()
try:
if stage.GetPrimAtPath('/Environment/sky'):
omni.kit.commands.execute('DeletePrimsCommand',
paths=['/Environment/sky'])
except:
pass #ignore this
await omni.kit.app.get_app().next_update_async()
omni.kit.commands.execute('CreateDynamicSkyCommand',
sky_url='http://omniverse-content-production.s3-us-west-2.amazonaws.com/Assets/Skies/Dynamic/NightSky.usd',
sky_path='/Environment/sky')
omni.kit.commands.execute('ChangeProperty',
prop_path=Sdf.Path('/Environment/sky.xformOp:rotateZYX'),
value=Gf.Vec3f(90.0, 0.0, 0.0),
prev=Gf.Vec3f(0.0, 0.0, 0.0))
def Select_Planes(self):
if self.ActiveView is None:
self.ActiveView = self.SetActiveView(self._last_view_type)
self.ActiveView.selectGroupPrims()
else:
self.ActiveView.selectGroupPrims()
def get_size(self, element):
return element['size']
def ShowCosts(self):
if self.ActiveView is None:
self.ActiveView = self.SetActiveView(self._last_view_type)
asyncio.ensure_future(self.ActiveView.showHideCosts())
# Set Color
# next_shape.GetDisplayColorAttr().Set(
# category_colors[int(cluster) % self.max_num_clusters])
def clicked_ok(self):
pass
def sendNotify(self, message:str, status:nm.NotificationStatus):
# https://docs.omniverse.nvidia.com/py/kit/source/extensions/omni.kit.notification_manager/docs/index.html?highlight=omni%20kit%20notification_manager#
import omni.kit.notification_manager as nm
ok_button = nm.NotificationButtonInfo("OK", on_complete=self.clicked_ok)
nm.post_notification(
message,
hide_after_timeout=True,
duration=5,
status=status,
button_infos=[]
)
#log the vectors
def log_transforms(self, vectors):
for v in vectors:
logdata = str(vectors[v][0]) + "," + str(vectors[v][1]) + "," + str(vectors[v][2])
print(logdata)
| 11,398 | Python | 36.49671 | 177 | 0.631953 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.