file_path
stringlengths
22
162
content
stringlengths
19
501k
size
int64
19
501k
lang
stringclasses
1 value
avg_line_length
float64
6.33
100
max_line_length
int64
18
935
alphanum_fraction
float64
0.34
0.93
eliabntt/GRADE-RR/additional_scripts/colorize.py
""" Use this code to colorize the generated data. The code is thought to colorize all the data, create videos, and fix the vertical fov issue. Please check the arguments to understand how to use it. Please set the corresponding data_enabled to False if you do not want to colorize some kind of data (eg. depth_enabled) """ import math import argparse import colorsys import confuse import copy import cv2 import ipdb import numpy as np import os import pickle as pkl import random from PIL import Image, ImageDraw def project_pinhole(points, view_proj_matrix): """ Project 3D points to 2D camera view using a pinhole camera model. Args: points (numpy.ndarray): Array of points in world frame of shape (num_points, 3). viewport (omni.kit.viewport._viewport.IViewportWindow): Viewport from which to retrieve/create sensor. Returns: (numpy.ndarray): Image-space points of shape (num_points, 3) """ homo = np.pad(points, ((0, 0), (0, 1)), constant_values=1.0) tf_points = np.dot(homo, view_proj_matrix) tf_points = tf_points / (tf_points[..., -1:]) tf_points[..., :2] = 0.5 * (tf_points[..., :2] + 1) return tf_points[..., :3] def random_colours(N, enable_random=True, num_channels=3): """ Generate random colors. Generate visually distinct colours by linearly spacing the hue channel in HSV space and then convert to RGB space. """ start = 0 if enable_random: random.seed(10) start = random.random() hues = [(start + i / N) % 1.0 for i in range(N)] colours = [list(colorsys.hsv_to_rgb(h, 0.9, 1.0)) for i, h in enumerate(hues)] if num_channels == 4: for color in colours: color.append(1.0) if enable_random: random.shuffle(colours) return colours def colorize_bboxes(bboxes_2d_data, rgb, num_channels=4): """ Colorizes 2D bounding box data for visualization. Args: bboxes_2d_data (numpy.ndarray): 2D bounding box data from the sensor. rgb (numpy.ndarray): RGB data from the sensor to embed bounding box. num_channels (int): Specify number of channels i.e. 3 or 4. """ obj_name_list = [] rgb_img = Image.fromarray(rgb).convert("RGBA") rgb_img2 = Image.fromarray(rgb) overlay = Image.new("RGBA", rgb_img.size, (0, 0, 0, 0)) rgb_img_draw = ImageDraw.Draw(overlay) rgb_img_draw2 = ImageDraw.Draw(rgb_img2) for bbox_2d in bboxes_2d_data: obj_name_list.append(bbox_2d[1]) obj_name_list_np = np.unique(np.array(obj_name_list)) color_list = random_colours(len(obj_name_list_np.tolist()), True, num_channels) for bbox_2d in bboxes_2d_data: index = np.where(obj_name_list_np == bbox_2d[1])[0][0] bbox_color = color_list[index] outline = (int(255 * bbox_color[0]), int(255 * bbox_color[1]), int(255 * bbox_color[2])) if num_channels == 4: outline = ( int(255 * bbox_color[0]), int(255 * bbox_color[1]), int(255 * bbox_color[2]), int(255 * bbox_color[3]), ) fill = ( int(255 * bbox_color[0]), int(255 * bbox_color[1]), int(255 * bbox_color[2]), int(0.25 * 255), ) rgb_img_draw.rectangle([(bbox_2d[6], bbox_2d[7]), (bbox_2d[8], bbox_2d[9])], fill=fill, outline=outline, width=3) rgb_img_draw2.rectangle([(bbox_2d[6], bbox_2d[7]), (bbox_2d[8], bbox_2d[9])], outline=outline, width=3) bboxes_2d_rgb = Image.alpha_composite(rgb_img, overlay) bboxes_2d_rgb = np.array(bboxes_2d_rgb) bboxes_2d_rgb2 = np.array(rgb_img2) bboxes_2d_rgb3 = np.array(Image.alpha_composite(rgb_img2.convert("RGBA"), overlay)) return bboxes_2d_rgb3 # , bboxes_2d_rgb2 #only boxes def colorize_depth(depth_image): """ It takes a depth image, normalizes it, and then maps it to a color image :param depth_image: The depth image to be colorized :return: The colorized depth image. """ height, width = depth_image.shape[:2] colorized_image = np.zeros((height, width, 4)) depth_image *= 100 depth_image = np.reciprocal(depth_image) depth_image[depth_image == 0.0] = 1e-5 depth_image = np.clip(depth_image, 0, 255) depth_image -= np.min(depth_image) if np.max(depth_image) > 0: depth_image /= np.max(depth_image) + 1e-8 colorized_image[:, :, 0] = depth_image colorized_image[:, :, 1] = depth_image colorized_image[:, :, 2] = depth_image colorized_image[:, :, 3] = 1 colorized_image = (colorized_image * 255).astype(np.uint8) return colorized_image def colorize_semantic_from_instance(instance_image, instance_mappings, sem = False): """ It takes the instance image and the instance mappings and returns a colorized image :param instance_image: the instance image from the instance segmentation :param instance_mappings: a list of dictionaries, each of which has the following keys: """ if len(instance_mappings) == 0: segmentation_image = np.zeros_like(instance_image) segmentation_ids = np.unique(segmentation_image) num_colours = len(segmentation_ids) # This is to avoid generating lots of colours for semantic classes not in frame lut = np.array([segmentation_ids, list(range(num_colours))]) re_instanced = lut[1, np.searchsorted(lut[0, :], segmentation_image)] colours = np.array([[0.0] * 4] + random_colours(num_colours)) else: semantic_instances = {} changed = np.zeros(instance_image.shape) for im in instance_mappings[::-1]: semantic_instances.setdefault(im["semanticId"], []).extend(im["instanceIds"]) changed[instance_image == im["uniqueId"]] = max(im["instanceIds"]) instance_image = changed.astype(np.uint32) max_semantic_instance_id = np.max([max(il) for _, il in semantic_instances.items()]) max_instance_id = instance_image.max() lut = np.zeros(max(max_semantic_instance_id, max_instance_id) + 1, dtype=np.uint32) if sem: for i, (_, il) in enumerate(semantic_instances.items()): lut[np.array(il)] = i + 1 # +1 to differentiate from background re_instanced = np.take(lut, instance_image) colours = np.array([[0.0] * 3] + random_colours(len(semantic_instances))) else: colours = np.array([[0.0] * 3] + random_colours(len(lut))) re_instanced = instance_image rgb = np.zeros((re_instanced.shape[0], re_instanced.shape[1], 3)) for i in range(len(colours)): rgb[re_instanced == i] = colours[i] rgb = rgb * 255 return rgb.astype(np.uint8) def colorize_bboxes_3d(bboxes_3d_corners, rgb): """ > It takes a list of 3D bounding boxes and a RGB image, and returns the RGB image with the 3D bounding boxes drawn on it :param bboxes_3d_corners: in the local camera frame :param rgb: the image :return: the image with the bounding boxes drawn on it. """ height, width = rgb.shape[:2] # FILTER BOXES mask_uv = ~np.any(np.all(bboxes_3d_corners < 0, axis=1), axis=1) & ~np.any( np.all(bboxes_3d_corners > 1, axis=1), axis=1 ) mask_z = np.all(np.all(bboxes_3d_corners[..., 2:] >= 0, axis=1), axis=1) & np.all( np.all(bboxes_3d_corners[..., 2:] <= 1, axis=1), axis=1 ) bboxes_3d_corners = bboxes_3d_corners[mask_uv & mask_z] bboxes_3d_corners = bboxes_3d_corners[..., :2].reshape(-1, 8, 2) * np.array([[width, height]]) face_idx_list = [[0, 1, 3, 2], [4, 5, 7, 6], [2, 3, 7, 6], [0, 1, 5, 4], [0, 2, 6, 4], [1, 3, 7, 5]] colours = random_colours(len(face_idx_list)) master_overlay_img = Image.new("RGBA", (width, height), (0, 0, 0, 0)) for face_idxs, colour in zip(face_idx_list, colours): overlay = Image.new("RGBA", (width, height)) draw = ImageDraw.Draw(overlay) colour = [int(c * 255) for c in colour] for p in bboxes_3d_corners: draw.polygon([tuple(xy) for xy in p[face_idxs]], fill=tuple([*colour[:3], 120])) draw.line([tuple(xy) for xy in p[face_idxs]], width=3, fill=tuple(colour)) master_overlay_img = Image.alpha_composite(master_overlay_img, overlay) rgb_img = Image.fromarray(rgb).convert("RGBA") rgb_img = Image.alpha_composite(rgb_img, master_overlay_img) return np.asarray(rgb_img) def colorize_normals(normals): """ It takes a 3-channel array of normals, and returns a 4-channel array of normals with the background pixels set to transparent :param normals: a numpy array of shape (H, W, 3) containing the surface normals :return: the normals of the image. """ background_mask = np.sum(normals, axis=-1) == 0.0 # normalize from [-1, 1] to [0, 255] normals = (normals + 1.0) / 2 * 255 # Set background alpha to 0. normals = np.pad(normals, ((0, 0), (0, 0), (0, 1)), constant_values=255) normals[background_mask, 3] = 0. return normals.astype(np.uint8) def colorize_motion_vector(data): """Convert motion vector into colored image. The conversion is done by mapping 3D direction vector to HLS space, then converted to RGB. Args: data (numpy.array): data returned by the annotator of shape (H, W, 4). Return: (np.array): Data converted to uint8 RGBA image. """ r, theta, phi = _cartesian_to_spherical(data[:, :, :3]) phi += np.pi theta_degree = theta * 180 / np.pi phi_degree = phi * 180 / np.pi h = phi_degree / 360 l = theta_degree / 180 r = cv2.normalize(r, None, 0, 1, cv2.NORM_MINMAX) pixels = np.dstack((h * 180, l * 255, r * 255)).astype(np.uint8) rgb = cv2.cvtColor(pixels, cv2.COLOR_HLS2RGB) return rgb def _cartesian_to_spherical(xyz): """ It takes a 3D Cartesian coordinate and returns the corresponding spherical coordinates :param xyz: the 3D coordinates of the points in the image """ h, w = xyz.shape[0], xyz.shape[1] xyz = xyz.reshape(-1, 3) xy = xyz[:, 0] ** 2 + xyz[:, 1] ** 2 r = np.sqrt(xy + xyz[:, 2] ** 2) theta = np.arctan2(np.sqrt(xy), xyz[:, 2]) # for elevation angle defined from Z-axis down phi = np.arctan2(xyz[:, 1], xyz[:, 0]) # for elevation angle defined from XY-plane up return r.reshape(h, w), theta.reshape(h, w), phi.reshape(h, w) def boolean_string(s): """ It takes a string and returns a boolean :param s: the string to convert :return: The boolean value of the string. """ if s.lower() not in {'false', 'true'}: raise ValueError('Not a valid boolean string') return s.lower() == 'true' parser = argparse.ArgumentParser(description="Colorize data") parser.add_argument("--viewport_folder", type=str) parser.add_argument("--img_id", type=str, default="-1") parser.add_argument("--save_imgs", type=boolean_string, default=True) parser.add_argument("--save_video", type=boolean_string, default=False) parser.add_argument("--always_update_map", type=boolean_string, default=False) parser.add_argument("--semantics", type=boolean_string, default=False) parser.add_argument("--convert_depth", type=boolean_string, default=True) # used to better visualize inverse depth parser.add_argument("--corrected_bbox_folder", type=str, default="") parser.add_argument("--vertical_aperture", type=float, default=2.32) parser.add_argument("--change_aperture", type=boolean_string, default=False) parser.add_argument("--output_dir", type=str) args, unknown = parser.parse_known_args() config = confuse.Configuration("ColorizeData", __name__) config.set_args(args) minid = 1 maxid = 1801 isdigit = False try: int(config["img_id"].get()) isdigit = True except: isdigit = False if isdigit: img_id = int(config["img_id"].get()) if img_id <= -1: print("Processing all images") else: minid = img_id maxid = img_id + 1 ids = [i for i in range(minid, maxid)] else: ids = [config["img_id"].get()] vertical_aperture = config["vertical_aperture"].get() change_aperture = config["change_aperture"].get() viewport = config["viewport_folder"].get() subfolders = os.listdir(config["viewport_folder"].get()) depth_enabled = "depth" in subfolders depthLinear_enabled = "depthLinear" in subfolders normals_enabled = "normals" in subfolders bbox2d_enabled = "bbox_2d_tight" in subfolders bbox3d_enabled = "bbox_3d" in subfolders # todo these need to be fixed instance_enabled = "instance" in subfolders sem_enabled = "instance" in subfolders and config["semantics"].get() motion_enabled = "motion-vector" in subfolders always_update_map = config["always_update_map"].get() save_video = config["save_video"].get() save_img = config["save_imgs"].get() if save_video or save_img: outdir = config["output_dir"].get() if not os.path.exists(config["output_dir"].get()): os.makedirs(outdir) if config["corrected_bbox_folder"].get() != "": corrected_bbox_folder = config["corrected_bbox_folder"].get() else: corrected_bbox_folder = None old_instance_map = None vrgb, vdepth, vdepthLinear, vnormals, vbbox2d, vbbox3d, vinstance, vmotion, vsem = [], [], [], [], [], [], [], [], [] for i in ids: rgb = cv2.imread(os.path.join(viewport, "rgb", f"{i}.png")) if save_img: cv2.imwrite(os.path.join(outdir, f"rgb_{i}.png"), rgb) if save_video: vrgb.append(os.path.join(outdir, f"rgb_{i}.png")) if depthLinear_enabled: depth = np.load(os.path.join(viewport, "depthLinear", f"{i}.npy")) depth = colorize_depth(depth) if save_img: cv2.imwrite(os.path.join(outdir, f"depthLinear_{i}.png"), depth) if save_video: vdepthLinear.append(os.path.join(outdir, f"depthLinear_{i}.png")) if depth_enabled: depth = np.load(os.path.join(viewport, "depth", f"{i}.npy")) if config["convert_depth"].get(): depth = 1/depth depth = colorize_depth(depth) if save_img: cv2.imwrite(os.path.join(outdir, f"depth_{i}.png"), depth) if save_video: vdepth.append(os.path.join(outdir, f"depth_{i}.png")) if normals_enabled: normals = np.load(os.path.join(viewport, "normals", f"{i}.npy")) normals = colorize_normals(normals) if save_img: cv2.imwrite(os.path.join(outdir, f"normals_{i}.png"), normals) if save_video: vnormals.append(os.path.join(outdir, f"normals_{i}.png")) if bbox2d_enabled: bbox2d = np.load(os.path.join(viewport, "bbox_2d_tight", f"{i}.npy"), allow_pickle=True) rgb_data = copy.deepcopy(rgb) bbox2d = colorize_bboxes(bbox2d, rgb_data) if save_img: cv2.imwrite(os.path.join(outdir, f"bbox2d_{i}.png"), bbox2d) if save_video: vbbox2d.append(os.path.join(outdir, f"bbox2d_{i}.png")) if bbox3d_enabled: bbox3d = np.load(os.path.join(viewport, "bbox_3d", f"{i}.npy"), allow_pickle=True) viewport_mat = np.load(os.path.join(viewport, "camera", f"{i}.npy"), allow_pickle=True) view_mat = viewport_mat.item()["view_projection_matrix"] pose_mat = viewport_mat.item()["pose"] if change_aperture: viewproj_mat = np.dot(pose_mat, view_mat) vertical_aperture = vertical_aperture vfov = 2 * math.atan(vertical_aperture / (2 * viewport_mat.item()["focal_length"])) viewproj_mat[1,1] = 1 / math.tan(vfov / 2) viewproj_mat = np.dot(np.linalg.inv(pose_mat), viewproj_mat) corners = project_pinhole(bbox3d["corners"].reshape(-1, 3), viewproj_mat) corners = corners.reshape(-1, 8, 3) rgb_data = copy.deepcopy(rgb) e = [] for idx,bb in enumerate(bbox3d): if bb['semanticLabel'] in ['zebra','human','google','shapenet']: e.append(corners[idx]) if corrected_bbox_folder is not None: corrected_bbox = np.load(os.path.join(corrected_bbox_folder, f"{i}.npy"), allow_pickle=True) corrected_bbox = corrected_bbox.item() for idx, bb in enumerate(bbox3d): if bb[1] in corrected_bbox['bbox3d']: print(f"Correcting bbox3d for {bb[1]}") # if corrected_bbox['bbox3d'] is dictionary if isinstance(corrected_bbox['bbox3d'][bb[1]], dict): bbox3d[idx]["corners"] = corrected_bbox['bbox3d'][bb[1]]["oriented"] / 0.01 else: bbox3d[idx]["corners"] = corrected_bbox['bbox3d'][bb[1]] / 0.01 bbox3d = colorize_bboxes_3d(np.array(e), rgb_data) if save_img: cv2.imwrite(os.path.join(outdir, f"bbox3d_{i}.png"), bbox3d) if save_video: vbbox3d.append(os.path.join(outdir, f"bbox3d_{i}.png")) if instance_enabled: instance = np.load(os.path.join(viewport, "instance", f"{i}.npy"), allow_pickle=True) if old_instance_map is None or always_update_map: old_instance_map = copy.deepcopy(instance[1]) instance[1] = copy.deepcopy(old_instance_map) instance_img = colorize_semantic_from_instance(instance[0], instance[1]) if save_img: cv2.imwrite(os.path.join(outdir, f"instance_{i}.png"), instance_img) if save_video: vinstance.append(os.path.join(outdir, f"instance_{i}.png")) if sem_enabled: sem = colorize_semantic_from_instance(instance[0], instance[1], sem=True) if save_img: cv2.imwrite(os.path.join(outdir, f"sem_{i}.png"), sem) if save_video: vsem.append(os.path.join(outdir, f"sem_{i}.png")) if motion_enabled: motion = np.load(os.path.join(viewport, "motion-vector", f"{i}.npy"), allow_pickle=True) motion = colorize_motion_vector(motion) if save_img: cv2.imwrite(os.path.join(outdir, f"motion_{i}.png"), motion) if save_video: vmotion.append(os.path.join(outdir, f"motion_{i}.png")) if save_video: height, width, layers = rgb.shape for v in zip([vrgb, vdepth, vdepthLinear, vnormals, vbbox2d, vbbox3d, vinstance, vmotion, vsem], ["rgb", "depth", "depthLinear", "normals", "bbox2d", "bbox3d", "instance", "motion", "sem"]): if len(v[0]) > 0: video = cv2.VideoWriter(os.path.join(outdir, f"{v[1]}.mp4"), cv2.VideoWriter_fourcc(*"mp4v"), 30, (width, height)) for img_path in v[0]: img = cv2.imread(img_path) if img.shape[2] < 3: img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) video.write(img[:, :, :3]) video.release() os.system("ffmpeg -i " + os.path.join(outdir, f"{v[1]}.mp4") + " -vcodec libx264 -y " + os.path.join(outdir, f"{v[1]}_conv.mp4"))
17,948
Python
37.027542
127
0.651159
eliabntt/GRADE-RR/additional_scripts/pixel_to_world.py
""" This code serve as an example to project the points from the pixel coordinates to the world coordinates. You need the camera pose and projection matrix, as well as clearly the pixel depth. Those are available in the viewport folder, for example: Viewport0/camera Viewport0/depth (or depthLinear) You will load the camera viewport_mat from the camera folder. This dictionary will have the view projection matrix and the global camera pose They use a near/far clipping plane model, and not a focal length model. At the end of the file you can also check how to use the focal length model, but you need to know the focal length of the camera """ viewport_mat = np.load(os.path.join(viewport, 'camera',f'{i}.npy'), allow_pickle=True) # in Isaac view_projection is np.dot(view_matrix, proj_matrix) # view_matrix is local to world, i.e. the inverse of the pose matrix # the proj_matrix use the near far clipping plane model # a = -1.0 / np.tan(np.radians(fov / 2)) # b = -a * aspect_ratio # c = z_far / (z_far - z_near) # d = z_near * z_far / (z_far - z_near) # Construct the camera projection matrix # projection_matrix = np.array([ # [a, 0.0, 0.0, 0.0], # [0.0, b, 0.0, 0.0], # [0.0, 0.0, c, 1.0], # [0.0, 0.0, d, 0.0] # ]) view_mat = viewport_mat.item()["view_projection_matrix"] pose_mat = viewport_mat.item()["pose"] inv_VP = np.linalg.inv(view_mat) pixel_x = .... pixel_y = .... pixel_d = .... width = viewport_mat['resolution']['width'] width = viewport_mat['resolution']['height'] F = viewport_mat['clipping_range'][1] N = viewport_mat['clipping_range'][0] W = -pixel_d ndc_x = (2 * pixel_x) / width - 1 ndc_y = 1 - (2 * pixel_y) / height Z = ( (W*F/(F-N)) + N*F/(F-N) )/(W) xyz = np.array([ndc_x, ndc_y, Z, 1]) * W xyz = np.dot(xyz, inv_VP) # alternatively consider that a = -fx, b = fy, cx = widht / 2, cy = height /2 # and that the pose_mat has the translation in the last ROW (in unit coordinates, so mind the scale) tmp = np.dot(pose_mat, view_mat) fx = -tmp[0,0] fy = tmp[1,1] cx = width / 2 cy = height / 2 x = (px - cx) * d / fx y = (py - cy) * d / fy pt = [x,y,z,1] xyz = np.dot(cpose.T, pt)[:3]
2,131
Python
33.387096
128
0.656969
eliabntt/GRADE-RR/additional_scripts/check_folders.py
""" Use this to check if all the files/folders are there """ import os import ipdb mainpath = "/ps/project/irotate/" folders = ["DE_lot_obs_cam0"] tocheck = ["bbox_2d_loose","bbox_2d_tight","bbox_3d","camera","depthLinear","instance","poses","rgb"] for mainfolder in folders: for folder in os.listdir(os.path.join(mainpath, mainfolder)): for subfolder in [os.path.join(mainpath, mainfolder, folder, "Viewport0"), os.path.join(mainpath, mainfolder, folder, "Viewport0_occluded")]: print(subfolder) data = os.listdir(subfolder) if len(data) > len(tocheck): print("More than expected folders") print(subfolder) ipdb.set_trace() if len(data) < len(tocheck): print("Less than expected folders") print(subfolder) ipdb.set_trace() for f in data: if f not in tocheck: continue if len(os.listdir(os.path.join(subfolder, f))) != 1801: print("Not enough files in folder") print(os.path.join(subfolder, f)) ipdb.set_trace()
991
Python
29.060605
143
0.672048
eliabntt/GRADE-RR/additional_scripts/process_paths/change_paths.py
import argparse import confuse import os def change_path(c_line, prefix, my_cc_path, match_str, normpath, remove_prefix=True): if remove_prefix: offset = len(match_str) else: offset = -1 path = os.path.join(my_cc_path + c_line[c_line.find(match_str) + offset:]) if normpath: path = os.path.normpath(path[:path.rfind("@")].replace('\\',"/")) + path[path.rfind("@"):] new_path = c_line[:c_line.find("@") + 1] + prefix + path return new_path parser = argparse.ArgumentParser(description="USD reference changer") parser.add_argument("--config_file", type=str, default="parser_config.yaml") parser.add_argument("--input", type=str) parser.add_argument("--output_name", type=str, default="") parser.add_argument("--output_dir", type=str, default="") args, unknown = parser.parse_known_args() config = confuse.Configuration("USDRefChanger", __name__) config.set_file(args.config_file) config.set_args(args) filename = config["input"].get() output_loc = config["output_dir"].get() if output_loc == "": output_loc = os.path.dirname(config["input"].get()) out_name = config["output_name"].get() if out_name == "": out_name = os.path.basename(config["input"].get())[:-4] + "_proc.usda" else: if out_name[-4:] != "usda": out_name += ".usda" out_file_path = os.path.join(output_loc, out_name) prefix_cc = config["prefix_cc"].get() my_cc_path = config["cc_path"].get() prefix_3dfront = config["prefix_front3d"].get() my_front_path = config["front3d_path"].get() prefix_cloth3d = config["prefix_cloth3d"].get() my_cloth_path = config["cloth3d_path"].get() prefix_surreal = config["prefix_surreal"].get() my_surr_path = config["surreal_path"].get() normpath = config["normpath"].get() with open(out_file_path, "w") as o_file, open(filename, "r") as i_file: lines = i_file.readlines() for line in lines: c_line = line if ".png" in line or ".jpg" in line or ".jpeg" in line or ".tga" in line or ".tif" in line or ".bmp" in line and "cc_textures" not in line: # remove 3D-FUTURE-model if "3D-FUTURE-model" in line: # import ipdb; ipdb.set_trace() c_line = line.replace("3D-FUTURE-model/", "") if "cc_textures" not in line: # and "../../" in line: # import ipdb; ipdb.set_trace() # add after ../../ 3D-FUTURE-model l_index = c_line.find("../../") c_line = c_line[:l_index+6] + "3D-FUTURE-model/" + c_line[l_index+6:] if "opacity_constant" in line or "reflection_roughness_constant" in line or "metallic_constant" in line: tmp = c_line.split(" ") tmp[-1] = tmp[-1].replace("\n", "") if "int" in tmp: tmp[tmp.index("int")] = "float" if float(tmp[-1]) == 0: tmp[-1] = str(0.00001) try: tmp[-1] = str(format(float(tmp[-1]))) except: import ipdb; ipdb.set_trace() c_line = " ".join(tmp)+"\n" elif "cc_textures" in line: c_line = change_path(c_line, prefix_cc, my_cc_path, "cc_textures", normpath, remove_prefix=False) elif "3DFRONT" in line or "3D-FUTURE" in line: if "future" in line.lower(): c_line = change_path(c_line, prefix_3dfront, my_front_path, "3D-FUTURE-model", normpath) else: import ipdb; ipdb.set_trace() c_line = change_path(c_line, prefix_3dfront, my_front_path, "3DFRONT", normpath) elif "cloth3d" in line: c_line = change_path(c_line, prefix_cloth3d, my_cloth_path, "cloth_3d", normpath) elif "surreal" in line: c_line = change_path(c_line, prefix_surreal, my_surr_path, "surreal", normpath) o_file.write(c_line)
3,457
Python
34.285714
141
0.647961
eliabntt/GRADE-RR/simulator/people_and_objects.py
import argparse import time import os import numpy as np # base_env_path and other settings are in the config file out_dir = "" # set this to a temporary empty dir from omni.isaac.kit import SimulationApp def boolean_string(s): if s.lower() not in {'false', 'true'}: raise ValueError('Not a valid boolean string') return s.lower() == 'true' parser = argparse.ArgumentParser(description="Your second IsaacSim run") parser.add_argument("--headless", type=boolean_string, default=True, help="Wheter to run it in headless mode or not") parser.add_argument("--rtx_mode", type=boolean_string, default=False, help="Use rtx when True, use path tracing when False") parser.add_argument("--config_file", type=str, default="config.yaml") parser.add_argument("--fix_env", type=str, default="", help="leave it empty to have a random env, fix it to use a fixed one. Useful for loop processing") args, unknown = parser.parse_known_args() config = confuse.Configuration("world_and_robot", __name__) config.set_file(args.config_file) config.set_args(args) CONFIG = {"display_options": 3286, "width": 1280, "height": 720, "headless": config["headless"].get()} kit = SimulationApp(launch_config=CONFIG, experience=f"{os.environ['EXP_PATH']}/omni.isaac.sim.python.kit") omni.usd.get_context().open_stage(config["base_env_path"].get(), None) kit.update() kit.update() print("Loading stage...") while is_stage_loading(): kit.update() print("Loading Complete") context = omni.usd.get_context() stage = context.get_stage() meters_per_unit = config["meters_per_unit"].get() simulation_context = SimulationContext(physics_dt=1.0 / config["physics_hz"].get(), rendering_dt=1.0 / config["render_hz"].get(), stage_units_in_meters=meters_per_unit, backend='torch') simulation_context.initialize_physics() physx_interface = omni.physx.acquire_physx_interface() physx_interface.start_simulation() print("Adding ROS clock, you can check with rostopic echo /clock") _clock_graph = add_clock() simulation_context.play() for _ in range(10): simulation_context.step() og.Controller.evaluate_sync(_clock_graph) simulation_context.stop() import utils.misc_utils from utils.misc_utils import * from utils.robot_utils import * from utils.simulation_utils import * from utils.objects_utils import * from utils.human_utils import * simulation_environment_setup(need_ros = True) if base_world_path != "": from utils.environment_utils import * print("Loading environment...") environment = environment(config, meters_per_unit=meters_per_unit) env_prim_path = environment.load_and_center(config["env_prim_path"].get()) process_semantics(config["env_prim_path"].get()) print("Visualization...") for _ in range(1000): simulation_context.render() simulation_context.step(render=False) print("Environment loading done...") add_colliders(env_prim_path) print("Colliders added..") simulation_context.play() x, y, z = 0, 0, 0 if out_dir != "": environment.generate_map(out_dir, origin=[x,y,z]) print("Map generated..") simulation_context.stop() ros_transform_components = [] camera_list = [] viewport_list = [] camera_pose, camera_pose_pub = [], [] imus,imu_pubs = [], [] lidars = [] odoms, odom_pubs = [], [] from omni.isaac.sensor import _sensor _is = _sensor.acquire_imu_sensor_interface() old_h_ape, old_v_ape = [], [] _dc = dynamic_control_interface() print("Loading robots..") robot_base_prim_path = config["robot_base_prim_path"].get() usd_robot_path = str(config["usd_robot_path"].get()) for n in range(config["num_robots"].get()): import_robot(robot_base_prim_path, n, usd_robot_path) x, y, z, yaw = np.random.randint(-100,100,4) set_drone_joints_init_loc(f"{robot_base_prim_path}{n}", [x / meters_per_unit, y / meters_per_unit, z / meters_per_unit], [0, 0, np.deg2rad(yaw)], upper_zlim = z * 2, lower_zlim = -z * 2 ) print("Adding ROS components") add_ros_components(robot_base_prim_path, n, ros_transform_components, camera_list, viewport_list, camera_pose, camera_pose_pub, imu_pubs, imus, odoms, odom_pubs, lidars, [], config, old_h_ape, old_v_ape, _is, simulation_context, _clock, irotate=False) kit.update() timeline = setup_timeline(config) # setup the timeline before adding anything animated print("Loading people") n = 0 human_base_prim_path = config["human_base_prim_path"].get() while n < config["num_humans"].get(): folder = rng.choice(human_folders) random_name = rng.choice(os.listdir(os.path.join(human_export_folder, folder))) asset_path = os.path.join(human_export_folder, folder, random_name, random_name + ".usd") print("Loading human {} from {}".format(random_name, folder)) tmp_pkl = pkl.load(open(os.path.join(human_export_folder, folder, random_name, random_name + ".pkl"), 'rb')) used_ob_stl_paths.append(os.path.join(human_export_folder, folder, random_name, random_name + ".stl")) load_human(human_base_prim_path, n, asset_path) stl_path = os.path.join(human_export_folder, folder, random_name, random_name + ".stl") x = np.random.randint(environment.env_limits_shifted[0], environment.env_limits_shifted[3]) y = np.random.randint(environment.env_limits_shifted[1], environment.env_limits_shifted[4]) z = 0 yaw = np.random.randint(0,360) # position the mesh set_translate(stage.GetPrimAtPath(f"{human_base_prim_path}{n}"), [x / meters_per_unit, y / meters_per_unit, z / meters_per_unit]) set_scale(stage.GetPrimAtPath(f"{human_base_prim_path}{n}"), 1 / meters_per_unit) set_rotate(stage.GetPrimAtPath(f"{human_base_prim_path}{n}"), [0, 0, np.deg2rad(yaw)]) n += 1 print("Load objects") google_ob_used, shapenet_ob_used = load_objects(config, environment, np.random.default_rng(), [], 1/meters_per_unit) if (config["rtx_mode"].get()): set_raytracing_settings(config["physics_hz"].get()) else: set_pathtracing_settings(config["physics_hz"].get()) print("Note that the rendering is now blocking until finished") for i in range(100): print(f"Iteration {i}/100", end="\r") sleeping(simulation_context, viewport_list, raytracing=config["rtx_mode"].get()) # deselect all objects omni.usd.get_context().get_selection().clear_selected_prim_paths() omni.usd.get_context().get_selection().set_selected_prim_paths([], False) timeline.set_current_time(0) timeline.set_auto_update(False) # this no longer works as expected. # Theoretically, once this is set and the timeline plays, rendering will not advance the timeline # this is no longer the case. Thus, keep track of the ctime (as we do within sleeping function) # the simulation context can be kept stopped, but that will prevent physics and time to advance. # https://forums.developer.nvidia.com/t/the-timeline-set-auto-update-false-no-longer-works/253504/10 simulation_context.play() for i in range(2000): simulation_context.step(render=False) og.Controller.evaluate_sync(_clock) time.sleep(0.2) simulation_context.render() # publish IMU print("Publishing IMU...") pub_imu(_is, imu_pubs, robot_imu_frames, meters_per_unit) if i % ratio_joints == 0: for js in joint_states: og.Controller.set(og.Controller.attribute(f"{js}/OnImpulseEvent.state:enableImpulse"), True) if i % ratio_tf: for tf in tf_trees: og.Controller.set(og.Controller.attribute(f"{tf}/OnImpulseEvent.state:enableImpulse"), True) if simulation_step % ratio_odom == 0: c_pose, _ = pub_odom(odoms, odom_pubs, _dc, meters_per_unit) pub_cam_pose(camera_pose, camera_pose_pub, _dc, meters_per_unit) if simulation_step % ratio_camera == 0: # The RTX LiDAR is still a fuzzy component. The "normal" LiDAR is more stable, but won't see non-colliding objects for lidar in lidars: og.Controller.attribute(lidar+".inputs:step").set(1) ctime = timeline.get_current_time() simulation_context.render() timeline.set_current_time(ctime) for lidar in lidars: og.Controller.attribute(lidar+".inputs:step").set(0) pub_and_write_images(simulation_context, viewport_list, ros_camera_list, raytracing) # clearly not writing anything here timeline.forward_one_frame() # advancing the timeline simulation_context.stop() try: kit.close() except: pass
8,219
Python
37.411215
185
0.709089
eliabntt/GRADE-RR/simulator/smpl_and_bbox.py
import argparse import carb import confuse import ipdb import math import numpy as np import os import roslaunch import rospy import scipy.spatial.transform as tf import sys import time import traceback import trimesh import yaml from omni.isaac.kit import SimulationApp from time import sleep from omni.syntheticdata import sensors, helpers as sensors, generic_helper_lib def get_obj_pose(time): """Get pose of all objects with a semantic label. """ stage = omni.usd.get_context().get_stage() mappings = generic_helper_lib.get_instance_mappings() pose = [] for m in mappings: prim_path = m[1] prim = stage.GetPrimAtPath(prim_path) prim_tf = omni.usd.get_world_transform_matrix(prim, time) pose.append((str(prim_path), m[2], str(m[3]), np.array(prim_tf))) return pose def boolean_string(s): if s.lower() not in {'false', 'true'}: raise ValueError('Not a valid boolean string') return s.lower() == 'true' """ Exported information will have the shape of [[prim_asset_path, bbox] [prim_asset_path,skel] [prim_asset_path, init_tf, init_rot]] prim_asset_path is string of the asset in the simulation. It will be processed in order so expect groups of human,cloth --- possibly reversed All is output in WORLD frame. Please check the notes regarding projection in camera frame. bbox will be of shape (ef, 8, 3) if only one bbox is saved or (ef, 2, 8, 3) if both are saved ef will be either the last animated frame (given the simulated environment) or the last frame of the animations + 1 if you need to access the bbox of the mesh after that just use [-1] skel is the smpl skeleton info use the flags below to export only the skeleton, only the garments or only the body or any combination init_rot is the same of the info file init_tf is equal, except that here we account for the small vertical translation that is added to meshes very close to the ground -- this was a bug during the data generation which actually has very little influence (< 0.1 cm in vertical displacement) -- the design choice was to save the placement value and then have always a way to recover the eventual vertical displacement which is anyway based on a rule (check human_utils.py:move_humans_to_ground) everything is in meters NOTE: We start writing images from timeline.frame = 1 (1/fps) since the "forward_timeline" call has been placed _before_ the publishing """ try: parser = argparse.ArgumentParser(description="Get Bounding Boxes") parser.add_argument("--experiment_folder", type=str, help="The experiment folder with the USD file and the info file") parser.add_argument("--body", type=boolean_string, default=True, help="When true process the bodies") parser.add_argument("--garments", type=boolean_string, default=True, help="When true process the garments") parser.add_argument("--base_path", type=str, default="my_human_", help="Human prim base path") parser.add_argument("--headless", type=boolean_string, default=False, help="Whether run this headless or not") parser.add_argument("--write", type=boolean_string, default=True, help="Whether to write results") parser.add_argument("--both", type=boolean_string, default=False, help="Whether to write both vertex types -- preference in code is both - fast - slow") parser.add_argument("--fast", type=boolean_string, default=True, help="Whether to write only the axis-aligned box or the oriented one") parser.add_argument("--only_exp", type=boolean_string, default=True, help="Whether to export only the experiment (considering the reverse strategy) or the whole sequences") parser.add_argument("--get_skel", type=boolean_string, default=True, help="Whether to include the skeleton info") parser.add_argument("--skel_root", type=str, default="avg_root", help="This is a recognizable last part of the root of the skeleton prim, in our case _avg_root " + "It will process ONLY the path of which the last part is this root") parser.add_argument("--correct_poses", type=boolean_string, default=False) args, unknown = parser.parse_known_args() config = confuse.Configuration("BoundingBoxes", __name__) config.set_args(args) exp_info = np.load(os.path.join(config["experiment_folder"].get(), "experiment_info.npy"), allow_pickle=True) exp_info = exp_info.item() CONFIG = {"display_options": 3286, "width": 1280, "height": 720, "headless": config["headless"].get()} kit = SimulationApp(launch_config=CONFIG, experience=f"{os.environ['EXP_PATH']}/omni.isaac.sim.python.kit") import utils.misc_utils from utils.misc_utils import * from utils.robot_utils import * from utils.simulation_utils import * from utils.objects_utils import * from utils.environment_utils import * from utils.human_utils import * simulation_environment_setup() local_file_prefix = "my-computer://" omni.usd.get_context().open_stage(local_file_prefix + config["experiment_folder"].get() + "/loaded_stage.usd", None) kit.update() kit.update() print("Loading stage...") while is_stage_loading(): kit.update() print("Loading Complete") context = omni.usd.get_context() stage = context.get_stage() set_stage_up_axis("Z") simulation_context = SimulationContext(physics_dt=1.0 / exp_info["config"]["physics_hz"].get(), rendering_dt=1.0 / exp_info["config"]["render_hz"].get(), stage_units_in_meters=0.01) simulation_context.start_simulation() meters_per_unit = UsdGeom.GetStageMetersPerUnit(stage) set_raytracing_settings(exp_info["config"]["physics_hz"].get()) timeline = setup_timeline(exp_info["config"]) base_path = config["base_path"].get() fast, both, slow = False, False, False if config["both"].get(): both = True elif config["fast"].get(): fast = True else: slow = True get_skel = config["get_skel"] only_exp = config["only_exp"].get() humans_info = exp_info["humans"] write = config["write"].get() if write: results = [] stime = time.time() helper_list_global = [] helper_list_skel = [] skel_root = config["skel_root"].get() smpl_info_path = "" for prim in stage.Traverse(): prim_path = str(prim.GetPath()).lower() if base_path in prim_path: if (get_skel and skel_root in prim_path and prim_path[:prim_path.find(skel_root)] not in helper_list_skel) or \ (str(prim.GetTypeName()).lower() == "mesh" and "points" in prim.GetPropertyNames()): print(f"Processing {prim}") parent = prim.GetParent() refs = omni.usd.get_composed_references_from_prim(parent) while len(refs) == 0: parent = parent.GetParent() refs = omni.usd.get_composed_references_from_prim(parent) human_global_path = str(omni.usd.get_composed_references_from_prim(parent)[0][0].assetPath) human_global_path = human_global_path[len(local_file_prefix):] index = humans_info['folders'].index(human_global_path[:-3] + "stl") init_tf = np.array(parent.GetAttribute("xformOp:translate").Get()) init_rot = parent.GetAttribute("xformOp:orient").Get() init_rot = np.array([init_rot.GetImaginary()[0], init_rot.GetImaginary()[1], init_rot.GetImaginary()[2], init_rot.GetReal()]) init_rot_mat = tf.Rotation.from_quat(init_rot).as_matrix() if write and str(parent.GetPath()) not in helper_list_global: results.append([str(parent.GetPath()), init_tf, init_rot]) helper_list_global.append(str(parent.GetPath())) if human_global_path[:-3] + "pkl" != smpl_info_path: smpl_info_path = human_global_path[:-3] + "pkl" smpl_anim_info = pkl.load(open(smpl_info_path, 'rb')) smpl_info = smpl_anim_info["info"] r = smpl_info["zrot"] rot_mat = tf.Rotation.from_euler('z', r).as_matrix() ef = int(math.ceil(smpl_anim_info["ef"] * exp_info["config"]["fps"].get() / 24)) if only_exp: ef = min(ef, int(math.ceil( exp_info["config"]["experiment_length"].get() / exp_info['reversing_timeline_ratio']))) if (get_skel and skel_root in prim_path): helper_list_skel.append(prim_path[:prim_path.find(skel_root)]) skeleton, joint_token = AnimationSchema.SkelJoint(prim).GetJoint() skel_cache = UsdSkel.Cache() skel_query = skel_cache.GetSkelQuery(UsdSkel.Skeleton(skeleton.GetPrim())) xfCache = UsdGeom.XformCache() skeleton_info = np.empty((ef, 3), dtype=object) for i in range(0, ef): xfCache.SetTime(i) transforms = skel_query.ComputeJointWorldTransforms(xfCache) translates, rotations, scales = UsdSkel.DecomposeTransforms(transforms) skeleton_info[i] = [np.array(translates) * meters_per_unit, np.array(rotations), np.array(scales) * meters_per_unit] if write: results.append([str(prim.GetPath()), np.array(skeleton_info)]) else: points = UsdGeom.PointBased(prim) if both: bounds = np.zeros((ef, 2, 8, 3)) else: bounds = np.zeros((ef, 8, 3)) for i in range(0, ef): points_in_mesh = points.ComputePointsAtTime(i, Usd.TimeCode(i)) points_in_mesh = np.array(points_in_mesh) # bound = points.ComputeWorldBound(i, "default") # for j in range(8): # print(bound.ComputeAlignedRange().GetCorner(j)) points_in_mesh = ((points_in_mesh @ rot_mat.T @ init_rot_mat.T) + init_tf * meters_per_unit) # normals = prim.GetAttribute("normals").Get(i) # normals = np.array(normals) mymesh = trimesh.PointCloud(points_in_mesh) if fast: temp_bounds = mymesh.bounding_box.vertices elif slow: temp_bounds = mymesh.bounding_box_oriented.vertices elif both: temp_bounds = [mymesh.bounding_box.vertices, mymesh.bounding_box_oriented.vertices] bounds[i] = temp_bounds if write: results.append([str(prim.GetPath()), bounds]) results = np.array(results, dtype=object) print(f"etime {time.time() - stime}") if write: np.save(os.path.join(config["experiment_folder"].get(), "bboxes.npy"), results) except: extype, value, tb = sys.exc_info() traceback.print_exc() import ipdb ipdb.set_trace() finally: simulation_context.stop() try: kit.close() except: pass
10,213
Python
39.693227
202
0.684911
eliabntt/GRADE-RR/simulator/replay_experiment.py
import argparse import carb import confuse import cv2 import ipdb import math import numpy as np import os import rosbag import roslaunch import rospy import scipy.spatial.transform as tf import sys import time import traceback import trimesh import yaml from omni.isaac.kit import SimulationApp from time import sleep def boolean_string(s): if s.lower() not in {'false', 'true'}: raise ValueError('Not a valid boolean string') return s.lower() == 'true' """ Suppose you want a stereo camera And to have optical flow And LiDAR (not fully supported yet) of the experiments. This is a way in which you can re-process your info and get the results. Suggestion: teleport is much more precise (sub mm difference). Working with velocities is fisy This code is a bit hard-coded as it is a demonstration code. """ try: parser = argparse.ArgumentParser(description="Get Bounding Boxes") parser.add_argument("--experiment_folder", type=str, help="The experiment folder with the USD file and the info file") parser.add_argument("--headless", type=boolean_string, default=False, help="Whether run this headless or not") parser.add_argument("--write", type=boolean_string, default=False, help="Whether to write new cameras results") parser.add_argument("--write_flow", type=boolean_string, default=False, help="Whether to write optical flow") parser.add_argument("--write_normals", type=boolean_string, default=False, help="Whether to write normals") parser.add_argument("--use_teleport", type=boolean_string, default=False, help="Whether to use teleport or force joint vel, both have adv and disadv") parser.add_argument("--use_reindex", type=boolean_string, default=False, help="Whether to use reindexed bags") parser.add_argument("--bag_basename", type=str, default="7659a6c9-9fc7-4be5-bc93-5b202ff2a22b") parser.add_argument("--out_folder_npy", type=str, default='additional_data') parser.add_argument("--bag_subpath", type=str, default="") args, unknown = parser.parse_known_args() config = confuse.Configuration("NewSensor", __name__) config.set_args(args) exp_info = np.load(os.path.join(config["experiment_folder"].get(), "experiment_info.npy"), allow_pickle=True) exp_info = exp_info.item() poses_path = os.path.join(config["experiment_folder"].get(), "Viewport0", "camera") write_flow = config["write_flow"].get() write_normals = config["write_normals"].get() write = config["write"].get() CONFIG = {"display_options": 3286, "width": 1280, "height": 720, "headless": config["headless"].get()} kit = SimulationApp(launch_config=CONFIG, experience=f"{os.environ['EXP_PATH']}/omni.isaac.sim.python.kit") import utils.misc_utils from utils.misc_utils import * from utils.robot_utils import * from utils.simulation_utils import * from utils.objects_utils import * from utils.environment_utils import * from utils.human_utils import * simulation_environment_setup() rospy.init_node("new_sensor_publisher", anonymous=True, disable_signals=True, log_level=rospy.ERROR) local_file_prefix = "my-computer://" omni.usd.get_context().open_stage(local_file_prefix + config["experiment_folder"].get() + "/loaded_stage.usd", None) kit.update() kit.update() print("Loading stage...") while is_stage_loading(): kit.update() print("Loading Complete") context = omni.usd.get_context() stage = context.get_stage() simulation_context = SimulationContext(physics_dt=1.0 / exp_info["config"]["physics_hz"].get(), rendering_dt=1.0 / exp_info["config"]["render_hz"].get(), stage_units_in_meters=0.01) simulation_context.initialize_physics() meters_per_unit = UsdGeom.GetStageMetersPerUnit(stage) set_raytracing_settings(exp_info["config"]["physics_hz"].get()) timeline = setup_timeline(exp_info["config"]) reversing_timeline_ratio = exp_info['reversing_timeline_ratio'] experiment_length = exp_info['config']['experiment_length'].get() ratio_camera = exp_info['config']['ratio_camera'].get() cnt_reversal = 1 simulation_context.stop() ### here we add the new camera to the robot. It will be located 5 cm to the right w.r.t. the original one old_h_ape = [] old_v_ape = [] viewport_window_list = [] ros_camera_list = [] # omni.kit.commands.execute('CopyPrim', # path_from='/my_robot_0/camera_link/Camera', # path_to='/my_robot_0/camera_link/Camera_stereo', # exclusive_select=False) # set_translate(stage.GetPrimAtPath('/my_robot_0/camera_link/Camera_stereo'), [1, 0, 0]) # component, viewport = add_camera_and_viewport("/my_robot_0/camera_link", # exp_info["config"]["robot_sensor_size"].get(), old_h_ape, old_v_ape, # simulation_context, 0, 0, camera_path="Camera_stereo") # cam_outputs = control_camera(viewport, simulation_context) # ros_camera_list.append([0, component, cam_outputs]) # viewport_window_list.append(viewport) # omni.kit.commands.execute('CopyPrim', # path_from='/my_robot_0/camera_link/Camera_npy', # path_to='/my_robot_0/camera_link/Camera_npy_stereo', # exclusive_select=False) # # set_translate(stage.GetPrimAtPath('/my_robot_0/camera_link/Camera_npy_stereo'), [1, 0, 0]) # viewport_npy, _ = create_viewport("/my_robot_0/camera_link/Camera_npy_stereo", config["headless"].get(), # 0, exp_info["config"]["npy_sensor_size"].get(), old_h_ape, old_v_ape, simulation_context) # viewport_window_list.append(viewport_npy) viewport_npy, _ = create_viewport("/my_robot_0/camera_link/Camera_npy", config["headless"].get(), 0, exp_info["config"]["npy_sensor_size"].get(), old_h_ape, old_v_ape, simulation_context) viewport_window_list.append(viewport_npy) is_rtx = exp_info["config"]["rtx_mode"].get() if is_rtx: set_raytracing_settings(exp_info["config"]["physics_hz"].get()) else: set_pathtracing_settings(exp_info["config"]["physics_hz"].get()) simulation_context.play() for _ in range(5): simulation_context.render() old_v_ape = [2.32] * len(old_v_ape) # todo this is harcoded for index, cam in enumerate(viewport_window_list): simulation_context.step(render=False) simulation_context.render() camera = stage.GetPrimAtPath(cam.get_active_camera()) camera.GetAttribute("horizontalAperture").Set(old_h_ape[index]) camera.GetAttribute("verticalAperture").Set(old_v_ape[index]) simulation_context.stop() _clock_graph = add_clock() # add ROS clock og.Controller.evaluate_sync(_clock_graph) # add a new sensor lidars = [] # sensor = add_lidar(f"/my_robot_0/yaw_link", [0, 0, -.1], [0, 0, 0], is_3d=True, is_2d=False) # lidars.append(sensor) kit.update() cnt_tf = -1 use_teleport = config["use_teleport"].get() use_reindex = config["use_reindex"].get() id_bag = 0 bag_path = os.path.join(config["experiment_folder"].get(), config['bag_subpath'].get(), f"{config['bag_basename'].get()}_{id_bag}.bag") joint_order = ['x_joint', 'y_joint', 'z_joint', 'roll_joint', 'pitch_joint', 'yaw_joint'] joint_position = [] joint_velocity = [] joint_time = [] robot_pose = [] started = use_reindex while os.path.exists(bag_path): bag = rosbag.Bag(bag_path) for topic, msg, t in bag.read_messages( topics=["/my_robot_0/joint_states", "/my_robot_0/odom", "/starting_experiment"]): if not started: if topic == "/starting_experiment": started = True continue else: continue if 'joint' in topic: joint_position.append(msg.position) joint_velocity.append(msg.velocity) joint_time.append(msg.header.stamp) else: robot_pose.append([msg.pose.pose.position, msg.pose.pose.orientation]) id_bag += 1 bag_path = os.path.join(config["experiment_folder"].get(), config['bag_subpath'].get(), f"{config['bag_basename'].get()}_{id_bag}.bag") if len(joint_position) == 0: print("No bag found") sys.exit(-1) ratio_tf = exp_info['config']['ratio_tf'].get() init_x, init_y, init_z, init_roll, init_pitch, init_yaw = get_robot_joint_init_loc('/my_robot_0') init_pos = np.array([init_x, init_y, init_z]) init_rot = np.array([init_roll, init_pitch, init_yaw]) change_collision_at_path(False,paths=['/my_robot_0/camera_link/Cube.physics:collisionEnabled','/my_robot_0/yaw_link/visuals.physics:collisionEnabled']) kit.update() set_drone_joints_init_loc('/my_robot_0', [0, 0, 0], [0,0,0], 300, lower_zlim=0) # todo use actual limit from simulation kit.update() simulation_context.play() for _ in range(5): simulation_context.step(render=False) simulation_context.render() timeline.set_auto_update(False) timeline.set_current_time(min(- 1 / (exp_info['config']["physics_hz"].get() / ratio_camera), -abs(exp_info['config']["bootstrap_exploration"].get()))) simulation_step = int(timeline.get_current_time() * exp_info['config']["physics_hz"].get()) - 1 out_dir_npy = os.path.join(config['experiment_folder'].get(), config['out_folder_npy'].get()) if write_flow: _tmp = extension_custom.MyRecorder() _tmp.on_startup() _settings = _tmp.get_default_settings() _settings["rgb"]["enabled"] = False _settings["motion-vector"]["enabled"] = write_flow _settings["motion-vector"]["colorize"] = False _settings["motion-vector"]["npy"] = True my_recorder_flow = recorder_setup(_settings, out_dir_npy, True, 0) my_recorder_flow._enable_record = False if write_normals: _tmp = extension_custom.MyRecorder() _tmp.on_startup() _settings = _tmp.get_default_settings() _settings["rgb"]["enabled"] = True _settings["normals"]["enabled"] = write_normals _settings["motion-vector"]["colorize"] = False _settings["motion-vector"]["npy"] = True my_recorder_normals = recorder_setup(_settings, out_dir_npy, True, 0) my_recorder_normals._enable_record = False if write: _tmp = exp_info['config']['_recorder_settings'].get() _tmp["depth"]["enabled"] = False _tmp["depthLinear"]["enabled"] = False _tmp["semantic"]["enabled"] = False _tmp["normals"]["enabled"] = False _tmp["bbox_2d_loose"]["enabled"] = False _tmp["bbox_2d_tight"]["enabled"] = False _tmp["bbox_3d"]["enabled"] = False my_recorder = recorder_setup(_tmp, out_dir_npy, True, 0) my_recorder._enable_record = False # how to hide dynamic content dynamicprims = [] for prim in stage.Traverse(): if 'my_human' in str(prim.GetPath()).lower(): dynamicprims.append(prim) for prim in stage.GetPrimAtPath("/World").GetChildren()[6:]: dynamicprims.append(prim) toggle_dynamic_objects(dynamicprims, False) forward = True while kit.is_running(): simulation_step += 1 if simulation_step == 0: _dc = dynamic_control_interface() handle = _dc.get_rigid_body('/my_robot_0/yaw_link') if not use_teleport: art = _dc.get_articulation('/my_robot_0') joints = [] _dc.wake_up_articulation(art) for joint in joint_order: joints.append(_dc.find_articulation_dof(art, joint)) change_collision_at_path(True,paths=['/my_robot_0/camera_link/Cube.physics:collisionEnabled','/my_robot_0/yaw_link/visuals.physics:collisionEnabled']) og.Controller.evaluate_sync(_clock_graph) # since the first image generated is at time=1/30, we add 7/240 prev_time = timeline.get_current_time() + 7 / 240 * (simulation_step == 0) timeline.set_current_time(prev_time) simulation_step += 8 sleeping(simulation_context, viewport_window_list, is_rtx) try: if write: my_recorder._update() my_recorder._enable_record = True if write_flow: my_recorder_flow._update() my_recorder_flow._enable_record = True if write_normals: my_recorder_normals._update() my_recorder_normals._enable_record = True except: sleeping(simulation_context, viewport_window_list, is_rtx) if write: my_recorder._update() my_recorder._enable_record = True if write_flow: my_recorder_flow._update() my_recorder_flow._enable_record = True if write_normals: my_recorder_normals._update() my_recorder_normals._enable_record = True simulation_context.render() simulation_context.render() timeline.set_current_time(prev_time) if simulation_step < 0: simulation_context.step(render=False) if (simulation_step % ratio_camera == 0): timeline.forward_one_frame() continue if use_teleport: if simulation_step % ratio_tf == 0: cnt_tf += 1 teleport("/my_robot_0", np.array(joint_position[cnt_tf][:3]) / meters_per_unit + init_pos , tf.Rotation.from_euler('XYZ', joint_position[cnt_tf][3:] + init_rot).as_quat()) if (simulation_step % (ratio_tf * 2) == 0): # odm is published half the rate of the tf myp = _dc.get_rigid_body_pose(handle) print( f"pose diff {np.array(_dc.get_rigid_body_pose(handle).p) / 100 - np.array([robot_pose[int(cnt_tf / 2)][0].x, robot_pose[int(cnt_tf / 2)][0].y, robot_pose[int(cnt_tf / 2)][0].z])}") else: vel = np.array(joint_velocity[ cnt_tf]) # or average position between the two, or use the IMU to interpolate also which has 240 hz pos = (np.array(joint_position[cnt_tf][:3]) + vel[:3] * 1 / 240) / meters_per_unit + init_pos ori = (np.array(joint_position[cnt_tf][3:]) + vel[3:] * 1 / 240) + init_rot teleport("/my_robot_0", pos, tf.Rotation.from_euler('XYZ', ori).as_quat()) else: _dc.wake_up_articulation(art) if simulation_step % ratio_tf == 0: cnt_tf += 1 vel = np.array(joint_velocity[cnt_tf]) next_vel = vel if cnt_tf < len(joint_position) - 1: next_vel = np.array(joint_velocity[cnt_tf + 1]) if cnt_tf == 0: pos = np.append(np.array(joint_position[cnt_tf][:3]) / meters_per_unit + init_pos - vel[:3] * 1 / 240, joint_position[cnt_tf][3:] + init_rot - vel[3:] * 1 / 240) for idx, joint in enumerate(joints): _dc.set_dof_position(joint, pos[idx] * (-1 if idx == 1 else 1)) cvel = (vel + next_vel) / 2 cvel[:3] = cvel[:3] / meters_per_unit _dc.set_articulation_dof_velocity_targets(art, list(cvel)) for idx, joint in enumerate(joints): _dc.set_dof_velocity(joint, cvel[idx] * (-1 if idx == 1 else 1)) if (simulation_step % (ratio_tf * 2) == 0): myp = _dc.get_rigid_body_pose(handle) print( f"pose diff {np.array(_dc.get_rigid_body_pose(handle).p) / 100 - np.array([robot_pose[int(cnt_tf / 2)][0].x, robot_pose[int(cnt_tf / 2)][0].y, robot_pose[int(cnt_tf / 2)][0].z])}") if simulation_step % 8 == 0: # tmp = np.load( # f'/ps/project/irotate/GRADE_DATA/DE/7659a6c9-9fc7-4be5-bc93-5b202ff2a22b/Viewport0/camera/{int(simulation_step/8)}.npy', # allow_pickle=True).item() prim_tf = omni.usd.get_world_transform_matrix(stage.GetPrimAtPath('/my_robot_0/camera_link/Camera')) # in v2022 this is the only viable option to control time since timeline.set_auto_update=False is not working timeline.set_current_time(prev_time + 1 / 240 * (1 if forward else -1)) prev_time = timeline.get_current_time() simulation_context.step(render=False) simulation_context.render() print("Clocking...") # NOTE THAT THIS MIGHT GET CONFUSING -- reindexing/retiming is needed for sure. Tests need to be careful! og.Controller.evaluate_sync(_clock_graph) if simulation_step == 0: og.Controller.evaluate_sync(_clock_graph) time.sleep(0.2) if simulation_step % ratio_camera == 0: if (simulation_step + ratio_camera) / ratio_camera < (experiment_length / reversing_timeline_ratio) * ( cnt_reversal): forward = True else: if (simulation_step + ratio_camera) / ratio_camera >= ((experiment_length - 1) / reversing_timeline_ratio) * ( cnt_reversal + 1) or \ (timeline.get_current_time() - 1 / timeline.get_time_codes_per_seconds()) < 0: cnt_reversal += 2 forward = True else: forward = False if write_flow: if my_recorder_flow._enable_record: simulation_context.render() my_recorder_flow._counter += 1 time.sleep(1.5) # this seems necessary my_recorder_flow._update() # you have two ways to proceed here. the sleeping performs just the rendering and then you manually toggle the recorder below # otherwise use pub_and_write_images which automatically calls it if necessary. In the latter case, remember to increase the counter sleeping(simulation_context, viewport_window_list, is_rtx) # if write: # if my_recorder._enable_record: # my_recorder._counter += 1 # pub_and_write_images(simulation_context, viewport_window_list, ros_camera_list, is_rtx, my_recorder) if write: if my_recorder._enable_record: my_recorder._counter += 1 my_recorder._update() if write_normals: if my_recorder_normals._enable_record: my_recorder_normals._counter += 1 my_recorder_normals._update() # new sensor here -- imagine 30 fps -- in that case I need to publish # if you need sensors in the middle you need to interpolate # using IMU and TF readings # you can access those from the rosbags # note you might need to work with the timeline times if the rate that you want is different # if simulation_step % ratio_camera == 0: # for lidar in lidars: # og.Controller.attribute(lidar + ".inputs:step").set(1) # ctime = timeline.get_current_time() # simulation_context.render() # # point_cloud = og.Controller().node("/Render/PostProcess/SDGPipeline/RenderProduct_Replicator_RtxSensorCpuIsaacComputeRTXLidarPointCloud").get_attribute("outputs:pointCloudData").get() # # laser_scan = og.Controller().node("/Render/PostProcess/SDGPipeline/RenderProduct_Replicator_RtxSensorCpuIsaacComputeRTXLidarFlatScan").get_attribute("outputs:linearDepthData").get() # timeline.set_current_time(ctime) # for lidar in lidars: # og.Controller.attribute(lidar+".inputs:step").set(0) if simulation_step % ratio_camera == 0 and simulation_step / ratio_camera == experiment_length: print("End of experiment!!!") simulation_context.pause() break except: extype, value, tb = sys.exc_info() traceback.print_exc() import ipdb ipdb.set_trace() finally: simulation_context.stop() try: kit.close() except: pass
18,401
Python
40.26009
190
0.668062
eliabntt/GRADE-RR/simulator/zebra_datagen.py
import argparse import carb import confuse import ipdb import numpy as np import os import sys import time import traceback import yaml from omni.isaac.kit import SimulationApp from time import sleep def boolean_string(s): if s.lower() not in {'false', 'true'}: raise ValueError('Not a valid boolean string') return s.lower() == 'true' def compute_points(skel_root_path, prim, ef, stage): usdSkelRoot = UsdSkel.Root.Get(stage, skel_root_path) UsdSkel.BakeSkinning(usdSkelRoot, Gf.Interval(0, ef)) prim = UsdGeom.PointBased(prim) xformCache = UsdGeom.XformCache() final_points = np.zeros((ef, len(prim.GetPointsAttr().Get()), 3)) for prim in Usd.PrimRange(usdSkelRoot.GetPrim()): if prim.GetTypeName() != "Mesh": continue localToWorld = xformCache.GetLocalToWorldTransform(prim) for time in range(ef): points = UsdGeom.Mesh(prim).GetPointsAttr().Get(time) for index in range(len(points)): points[index] = localToWorld.Transform(points[index]) points = np.array(points) final_points[time] = points return final_points def randomize_floor_position(floor_data, floor_translation, scale, meters_per_unit, env_name, rng): floor_points = np.zeros((len(floor_data), 3)) if env_name == "Windmills": yaw = np.deg2rad(-155) rot = np.array([[np.cos(yaw), -np.sin(yaw), 0], [np.sin(yaw), np.cos(yaw), 0], [0, 0, 1]]) floor_translation = np.matmul(floor_translation, rot) if env_name == "L_Terrain": meters_per_unit = 1 for i in range(len(floor_data)): floor_points[i, 0] = floor_data[i][0] * scale[0] * meters_per_unit + floor_translation[0] * meters_per_unit floor_points[i, 1] = floor_data[i][1] * scale[1] * meters_per_unit + floor_translation[1] * meters_per_unit floor_points[i, 2] = floor_data[i][2] * scale[2] * meters_per_unit + floor_translation[2] * meters_per_unit if env_name == "L_Terrain": meters_per_unit = 0.01 max_floor_x = max(floor_points[:, 0]) min_floor_x = min(floor_points[:, 0]) max_floor_y = max(floor_points[:, 1]) min_floor_y = min(floor_points[:, 1]) if env_name == "Windmills": min_floor_x = -112 max_floor_x = 161 min_floor_y = -209 max_floor_y = 63 rows = np.where((floor_points[:, 0] > min_floor_x) & (floor_points[:, 0] < max_floor_x) & (floor_points[:, 1] > min_floor_y) & (floor_points[:, 1] < max_floor_y))[0] floor_points = floor_points[rows] rows = [] while (len(rows) == 0): size_x = rng.integers(40, 120) size_y = rng.integers(40, 120) # get all floor_points within a size x size square randomly centered min_x = rng.uniform(min(floor_points[:, 0]), max(floor_points[:, 0])) max_x = min_x + min(size_x, max(floor_points[:, 0]) - min(floor_points[:, 0])) while max_x > max(floor_points[:, 0]): min_x = rng.uniform(min(floor_points[:, 0]), max(floor_points[:, 0])) max_x = min_x + min(size_x, max(floor_points[:, 0]) - min(floor_points[:, 0])) min_y = rng.uniform(min(floor_points[:, 1]), max(floor_points[:, 1])) max_y = min_y + min(size_y, max(floor_points[:, 1]) - min(floor_points[:, 1])) while max_y > max(floor_points[:, 1]): min_y = rng.uniform(min(floor_points[:, 1]), max(floor_points[:, 1])) max_y = min_y + min(size_y, max(floor_points[:, 1]) - min(floor_points[:, 1])) # FIXME this is just an approximation which MAY NOT WORK ALWAYS! rows = np.where((min_x <= floor_points[:,0]) & (floor_points[:,0] <= max_x) & (floor_points[:,1]<=max_y) & (floor_points[:,1]>= min_y))[0] floor_points = floor_points[rows] shape = (len(np.unique(floor_points[:, 0])), -1, 3) floor_points = floor_points.reshape(shape) if (floor_points[0, 1, 0] - floor_points[0, 0, 0]) > 1: zoom_factor = int(floor_points[0, 1, 0] - floor_points[0, 0, 0]) import scipy.ndimage.interpolation as interpolation floor_points = interpolation.zoom(floor_points, (zoom_factor, zoom_factor, 1)) return floor_points, max_floor_x, min_floor_x, max_floor_y, min_floor_y try: parser = argparse.ArgumentParser(description="Dynamic Worlds Simulator") parser.add_argument("--config_file", type=str, default="config.yaml") parser.add_argument("--headless", type=boolean_string, default=True, help="Wheter to run it in headless mode or not") parser.add_argument("--rtx_mode", type=boolean_string, default=False, help="Use rtx when True, use path tracing when False") parser.add_argument("--record", type=boolean_string, default=False, help="Writing data to the disk") parser.add_argument("--debug_vis", type=boolean_string, default=False, help="When true continuosly loop the rendering") parser.add_argument("--neverending", type=boolean_string, default=False, help="Never stop the main loop") parser.add_argument("--fix_env", type=str, default="", help="leave it empty to have a random env, fix it to use a fixed one. Useful for loop processing") args, unknown = parser.parse_known_args() config = confuse.Configuration("DynamicWorlds", __name__) config.set_file(args.config_file) config.set_args(args) can_start = True CONFIG = {"display_options": 3286, "width": 1280, "height": 720, "headless": config["headless"].get()} kit = SimulationApp(launch_config=CONFIG, experience=f"{os.environ['EXP_PATH']}/omni.isaac.sim.python.kit") # Cannot move before SimApp is launched import utils.misc_utils from utils.misc_utils import * from utils.robot_utils import * from utils.simulation_utils import * from utils.environment_utils import * from pxr import UsdGeom, UsdLux, Gf, Vt, UsdPhysics, PhysxSchema, Usd, UsdShade, Sdf, UsdSkel simulation_environment_setup(need_ros=False) all_env_names = ["Bliss", "Forest", "Grasslands", "Iceland", "L_Terrain", "Meadow", "Moorlands", "Nature_1", 'Nature_2', "Savana", "Windmills", "Woodland"] ground_area_name = ["Landscape_1", "Landscape_1", "Landscape_1", "Landscape_0", "Terrain_5", "Landscape_0", "Landscape_2", "Ground", "Ground", "Landscape_1", "Landscape_0", "Landscape_1"] need_sky = [True] * len(all_env_names) env_id = all_env_names.index(config["fix_env"].get()) rng = np.random.default_rng() rng_state = np.random.get_state() local_file_prefix = "" # setup environment variables environment = environment(config, rng, local_file_prefix) out_dir = os.path.join(config['out_folder'].get(), environment.env_name) out_dir_npy = os.path.join(config['out_folder_npy'].get(), environment.env_name) if not os.path.exists(out_dir): os.makedirs(out_dir) omni.usd.get_context().open_stage(local_file_prefix + config["base_env_path"].get(), None) # Wait two frames so that stage starts loading kit.update() kit.update() print("Loading stage...") while is_stage_loading(): kit.update() print("Loading Complete") context = omni.usd.get_context() stage = context.get_stage() set_stage_up_axis("Z") omni.kit.commands.execute("DeletePrimsCommand", paths=["/World/GroundPlane"]) # do this AFTER loading the world simulation_context = SimulationContext(physics_dt=1.0 / config["physics_hz"].get(), rendering_dt=1.0 / config["render_hz"].get(), stage_units_in_meters=0.01) simulation_context.initialize_physics() simulation_context.play() simulation_context.stop() kit.update() meters_per_unit = 0.01 # use rtx while setting up! set_raytracing_settings(config["physics_hz"].get()) env_prim_path = environment.load_and_center(config["env_prim_path"].get()) process_semantics(config["env_prim_path"].get(), "World") if all_env_names[env_id] == "L_Terrain": set_scale(stage.GetPrimAtPath(f"/World/home"), 100) while is_stage_loading(): kit.update() floor_data = stage.GetPrimAtPath(f"/World/home/{ground_area_name[env_id]}/{ground_area_name[env_id]}").GetProperty( 'points').Get() floor_translation = np.array(stage.GetPrimAtPath(f"/World/home/{ground_area_name[env_id]}").GetProperty( 'xformOp:translate').Get()) scale = np.array(stage.GetPrimAtPath(f"/World/home/{ground_area_name[env_id]}").GetProperty("xformOp:scale").Get()) # i need to consider that z has a bounding box and that the position is on the top corner for _ in range(50): simulation_context.render() floor_points, max_floor_x, min_floor_x, max_floor_y, min_floor_y = randomize_floor_position(floor_data, floor_translation, scale, meters_per_unit, all_env_names[env_id], rng) add_semantics(stage.GetPrimAtPath("/World/home"), "world") # set timeline of the experiment timeline = setup_timeline(config) viewport_window_list = [] dynamic_prims = [] first = True simulation_context.stop() simulation_context.play() for _ in range(10): simulation_context.step() _dc = dynamic_control_interface() print("Loading robots..") robot_base_prim_path = config["robot_base_prim_path"].get() usd_robot_path = str(config["usd_robot_path"].get()) old_h_ap = [] old_v_ap = [] simulation_context.stop() for n in range(config["num_robots"].get()): import_robot(robot_base_prim_path, n, usd_robot_path, local_file_prefix) change_prim_collision(False, robot_base_prim_path + str(n)) set_drone_joints_init_loc(robot_base_prim_path + str(n), [0, 0, 0], [0, 0, 0], 10e15) kit.update() for n in range(config["num_robots"].get()): add_npy_viewport(viewport_window_list, robot_base_prim_path, n, old_h_ap, old_v_ap, config,simulation_context, tot_num_ros_cam=0) kit.update() for _ in range(5): simulation_context.render() for index, cam in enumerate(viewport_window_list): camera = stage.GetPrimAtPath(cam.get_active_camera()) camera.GetAttribute("horizontalAperture").Set(old_h_ap[index]) camera.GetAttribute("verticalAperture").Set(old_v_ap[index]) print("Loading robot complete") print("Loading zebras..") zebra_anims_loc = config["zebra_anims_loc"].get() # get a list of .usd file in the folder import glob zebra_files = glob.glob(f"{zebra_anims_loc}/*.usd") from utils.zebra_utils import * from omni.kit.window.sequencer.scripts import sequencer_drop_controller _, sequence = omni.kit.commands.execute("SequencerCreateSequenceCommand") sequence_path = sequence.GetPrim().GetPath() kit.update() zebra_anim_names = ["Attack", "Attack01", "Attack02", "Eating", "Gallop", "Hit_Back", "Hit_Front", "Hit_Left", "Hit_Right", "Idle", "Idle2", "Idle3", "Idle4", "Jump", "Tarsus", "Trot", "Walkback"] zebra_seq_lengths = [27, 54, 32, 133, 12, 15, 17, 20, 15, 48, 72, 119, 201, 43, 29, 24, 27] zebra_mesh_paths = [ "Attack/Zebra_motions/African_Animal___Zebra/_Object_Pivot_Node_/Object_6/Object_45/Zebra_SHP2_0_Zebra_Mat_0_001", "Attack01/Zebra_motions/African_Animal___Zebra/_Object_Pivot_Node_/Object_6/Object_45/Zebra_SHP2_0_Zebra_Mat_0_001", "Attack02/Zebra_motions/African_Animal___Zebra/_Object_Pivot_Node_/Object_6/Object_45/Zebra_SHP2_0_Zebra_Mat_0_001", "Eating/Zebra_motions/African_Animal___Zebra/_Object_Pivot_Node_/Object_6/Object_45/Zebra_SHP2_0_Zebra_Mat_0", "Gallop/Zebra_motions/African_Animal___Zebra/_Object_Pivot_Node_/Object_6/Object_45/Zebra_SHP2_0_Zebra_Mat_0", "Hit_Back/Zebra_motions/African_Animal___Zebra/_Object_Pivot_Node_/Object_6/Object_45/Zebra_SHP2_0_Zebra_Mat_0", "Hit_Front/Zebra_motions/African_Animal___Zebra/_Object_Pivot_Node_/Object_6/Object_45/Zebra_SHP2_0_Zebra_Mat_0", "Hit_Left/Zebra_motions/African_Animal___Zebra/_Object_Pivot_Node_/Object_6/Object_45/Zebra_SHP2_0_Zebra_Mat_0", "Hit_Right/Zebra_motions/African_Animal___Zebra/_Object_Pivot_Node_/Object_6/Object_45/Zebra_SHP2_0_Zebra_Mat_0", "Idle/Zebra_motions/African_Animal___Zebra/_Object_Pivot_Node_/Object_6/Object_45/Zebra_SHP2_0_Zebra_Mat_0", "Idle2/Zebra_motions/African_Animal___Zebra/_Object_Pivot_Node_/Object_6/Object_45/Zebra_SHP2_0_Zebra_Mat_0", "Idle3/Zebra_motions/African_Animal___Zebra/_Object_Pivot_Node_/Object_6/Object_45/Zebra_SHP2_0_Zebra_Mat_0", "Idle4/Zebra_motions/African_Animal___Zebra/_Object_Pivot_Node_/Object_6/Object_45/Zebra_SHP2_0_Zebra_Mat_0", "Jump/Zebra_motions/African_Animal___Zebra/_Object_Pivot_Node_/Object_6/Object_45/Zebra_SHP2_0_Zebra_Mat_0", "Tarsus/Zebra_motions/African_Animal___Zebra/_Object_Pivot_Node_/Object_6/Object_45/Zebra_SHP2_0_Zebra_Mat_0", "Trot/Zebra_motions/African_Animal___Zebra/_Object_Pivot_Node_/Object_6/Object_45/Zebra_SHP2_0_Zebra_Mat_0", "Walkback/Zebra_motions/African_Animal___Zebra/_Object_Pivot_Node_/Object_6/Object_45/Zebra_SHP2_0_Zebra_Mat_0"] zebra_info = {} for i, v in enumerate(zebra_anim_names): zebra_info[v] = {"path": zebra_mesh_paths[i], "length": zebra_seq_lengths[i], "mesh_path": zebra_mesh_paths[i]} for zebra_file in zebra_files: if not os.path.exists(zebra_file[:-4] + "_points.npy"): zebra_name = zebra_file.split("/")[-1].split(".")[0] zebra_index = zebra_anim_names.index(zebra_name) zebra_path = load_zebra("/zebra_", zebra_index, zebra_file) kit.update() kit.update() zebra_name = zebra_file.split("/")[-1].split(".")[0] zebra_index = zebra_anim_names.index(zebra_name) prim = stage.GetPrimAtPath(zebra_path + zebra_mesh_paths[zebra_index][len(zebra_name):]) skel_root_path = zebra_path + "/Zebra_motions/African_Animal___Zebra/_Object_Pivot_Node_/Object_6" points = compute_points(skel_root_path, prim, zebra_seq_lengths[zebra_index], stage) * meters_per_unit np.save(zebra_file[:-4] + "_points.npy", points) zebra_info[zebra_name]["points"] = points omni.kit.commands.execute("DeletePrimsCommand", paths=[zebra_path]) else: zebra_name = zebra_file.split("/")[-1].split(".")[0] zebra_index = zebra_anim_names.index(zebra_name) zebra_info[zebra_name]["points"] = np.load(zebra_file[:-4] + "_points.npy") max_anim_length = max(zebra_seq_lengths) # IT IS OF CRUCIAL IMPORTANCE THAT AFTER THIS POINT THE RENDER GETS DONE WITH THE SLEEPING CALL! OTHERWISE PATH TRACING SPP WILL GET RUINED if (config["rtx_mode"].get()): set_raytracing_settings(config["physics_hz"].get()) else: set_pathtracing_settings(config["physics_hz"].get()) omni.usd.get_context().get_selection().set_selected_prim_paths([], False) for _ in range(5): simulation_context.step(render=False) sleeping(simulation_context, viewport_window_list, config["rtx_mode"].get()) timeline.set_current_time(0) simulation_step = 0 # this is NOT the frame, this is the "step" (related to physics_hz) my_recorder = recorder_setup(config['_recorder_settings'].get(), out_dir_npy, config['record'].get(), 0) timeline.set_current_time(0) # set to 0 to be sure that the first frame is recorded timeline.set_auto_update(False) # two times, this will ensure that totalSpp is reached sleeping(simulation_context, viewport_window_list, config["rtx_mode"].get()) sleeping(simulation_context, viewport_window_list, config["rtx_mode"].get()) my_recorder._enable_record = False exp_len = config["anim_exp_len"].get() my_recorder._enable_record = False sleeping(simulation_context, viewport_window_list, config["rtx_mode"].get()) if config["rtx_mode"].get(): my_recorder._update() hidden_position = [min_floor_x / meters_per_unit, min_floor_y / meters_per_unit, -10e5] all_zebras = preload_all_zebras(config, rng, zebra_files, zebra_info, simulation_context, sequencer_drop_controller, max_anim_length, hidden_position) substep = 3 simulation_context.play() import ipdb; ipdb.set_trace() while kit.is_running(): if simulation_step > 0: for zebra in all_zebras: set_translate(stage.GetPrimAtPath(zebra), list(hidden_position)) floor_points, max_floor_x, min_floor_x, max_floor_y, min_floor_y = randomize_floor_position(floor_data, floor_translation, scale, meters_per_unit, all_env_names[env_id], rng) frame_info = place_zebras(all_zebras, rng, floor_points, meters_per_unit, hidden_position, config, max_anim_length, zebra_info) for c_substep in range(substep): average_zebra_x = 0 average_zebra_y = 0 average_zebra_z = 0 max_zebra_x = -1e10 max_zebra_y = -1e10 min_zebra_x = 1e10 min_zebra_y = 1e10 counter = 0 for prim in frame_info: if "zebra" in prim: average_zebra_x += frame_info[prim]["position"][0] average_zebra_y += frame_info[prim]["position"][1] average_zebra_z += frame_info[prim]["position"][2] max_zebra_x = max(max_zebra_x, frame_info[prim]["position"][0]) max_zebra_y = max(max_zebra_y, frame_info[prim]["position"][1]) min_zebra_x = min(min_zebra_x, frame_info[prim]["position"][0]) min_zebra_y = min(min_zebra_y, frame_info[prim]["position"][1]) counter += 1 average_zebra_x /= counter average_zebra_y /= counter average_zebra_z /= counter delta_x = max_zebra_x - min_zebra_x delta_y = max_zebra_y - min_zebra_y used_x = [] used_y = [] used_z = [] for n in range(config["num_robots"].get()): safe = False while not safe: # -100 + 100 random_x = rng.uniform(average_zebra_x - delta_x/2 - 5, average_zebra_x + delta_x/2 + 5) # keep random_x within max_floor_x min_floor_x random_x = max(random_x, min_floor_x) random_x = min(random_x, max_floor_x) random_y = rng.uniform(average_zebra_y - delta_y/2 -5, average_zebra_y + delta_y/2 + 5) # keep random_y within max_floor_y min_floor_y random_y = max(random_y, min_floor_y) random_y = min(random_y, max_floor_y) random_z = rng.uniform(average_zebra_z + 5, average_zebra_z + 20) if len(used_x) > 0: for i in range(len(used_x)): safe = True if np.sqrt((used_x[i] - random_x) ** 2 + (used_y[i] - random_y) ** 2 + (used_z[i] - random_z) ** 2) < .5: safe = False break else: safe = True if safe: used_x.append(random_x) used_y.append(random_y) used_z.append(random_z) # get angle between robot and average_zebra angle = np.arctan2(average_zebra_y - random_y, average_zebra_x - random_x) # randomize yaw +- 30 degrees yaw = rng.uniform(-np.pi / 6, np.pi / 6) + angle # randomize yaw +- 15 degrees yaw = rng.uniform(-np.pi / 12, np.pi / 12) + angle # get pitch + 15 degrees (camera already pitched) # with a weight based on the average zebra location pitch = - np.arctan2(average_zebra_z - random_z, np.sqrt( (average_zebra_x - random_x) ** 2 + (average_zebra_y - random_y) ** 2)) # roll minimal -10, 10 degrees roll = rng.uniform(-np.pi / 18, np.pi / 18) rot = Rotation.from_euler('xyz', [roll, pitch, yaw]) teleport(robot_base_prim_path + str(n), [random_x / meters_per_unit, random_y / meters_per_unit, random_z / meters_per_unit], rot.as_quat()) frame_info[f"{robot_base_prim_path}{n}"] = {"position": [random_x, random_y, random_z], "rotation": [roll, pitch, yaw]} simulation_context.step(render=False) simulation_context.step(render=False) for _ in range(3): simulation_context.step(render=False) simulation_context.render() sleep(0.5) # two frames with the same animation point # todo fix the time import ipdb; ipdb.set_trace() timeline.set_current_time(max_anim_length / timeline.get_time_codes_per_seconds()) if need_sky[env_id]: # with probability 0.9 during day hours stage.GetPrimAtPath("/World/Looks/SkyMaterial/Shader").GetAttribute("inputs:SunPositionFromTOD").Set(True) if rng.uniform() < 0.9: stage.GetPrimAtPath("/World/Looks/SkyMaterial/Shader").GetAttribute("inputs:TimeOfDay").Set( rng.uniform(5, 20)) else: if rng.uniform() < 0.5: stage.GetPrimAtPath("/World/Looks/SkyMaterial/Shader").GetAttribute("inputs:TimeOfDay").Set( rng.uniform(0, 5)) else: stage.GetPrimAtPath("/World/Looks/SkyMaterial/Shader").GetAttribute("inputs:TimeOfDay").Set( rng.uniform(20, 24)) print("Publishing cameras...") my_recorder._enable_record = True frame_info["step"] = simulation_step frame_info["substep"] = c_substep pub_try_cnt = 0 success_pub = False while not success_pub and pub_try_cnt < 3: try: pub_and_write_images(simulation_context, viewport_window_list, [], config["rtx_mode"].get(), my_recorder) success_pub = True except: print("Error publishing camera") pub_try_cnt += 1 import ipdb; ipdb.set_trace() # simulation_context.stop() # simulation_context.play() sleep(0.5) simulation_context.render() simulation_context.render() if not success_pub: frame_info["error"] = True else: frame_info["error"] = False np.save(out_dir_npy + f"/frame_{simulation_step}_{c_substep}.npy", frame_info) simulation_context.stop() # clips = [f"/World/Sequence{k}{k}_Clip" for k in frame_info.keys() if k.startswith("/zebra")] # remove targets from clips # for clip in clips: # relationship = stage.GetPrimAtPath(clip).GetProperty("animation") # relationship.RemoveTarget(relationship.GetTargets()[0]) # relationship = stage.GetPrimAtPath(clip).GetProperty("assetPrim") # asset = relationship.GetTargets()[0] # relationship.RemoveTarget(asset) # omni.kit.commands.execute("DeletePrimsCommand", # paths=clips) # omni.kit.commands.execute("DeletePrimsCommand", # paths= # [f"/World/Sequence{k}" for k in frame_info.keys() if k.startswith("/zebra")]) # omni.kit.commands.execute("DeletePrimsCommand", paths=[k for k in frame_info.keys() if k.startswith("/zebra")]) timeline.set_current_time(0) my_recorder._counter += 1 simulation_step += 1 if simulation_step >= exp_len: break except: extype, value, tb = sys.exc_info() traceback.print_exc() ipdb.post_mortem(tb) finally: simulation_context.stop() try: kit.close() except: pass
22,410
Python
40.88972
167
0.65328
eliabntt/GRADE-RR/simulator/FUEL_indoor_simulation.py
import argparse import carb import confuse import ipdb import numpy as np import os import roslaunch import rospy import sys import time import traceback import yaml from omni.isaac.kit import SimulationApp from time import sleep def boolean_string(s): if s.lower() not in {'false', 'true'}: raise ValueError('Not a valid boolean string') return s.lower() == 'true' try: parser = argparse.ArgumentParser(description="Dynamic Worlds Simulator") parser.add_argument("--config_file", type=str, default="config.yaml") parser.add_argument("--headless", type=boolean_string, default=True, help="Wheter to run it in headless mode or not") parser.add_argument("--rtx_mode", type=boolean_string, default=False, help="Use rtx when True, use path tracing when False") parser.add_argument("--record", type=boolean_string, default=True, help="Writing data to the disk") parser.add_argument("--debug_vis", type=boolean_string, default=False, help="When true continuosly loop the rendering") parser.add_argument("--neverending", type=boolean_string, default=False, help="Never stop the main loop") parser.add_argument("--fix_env", type=str, default="", help="leave it empty to have a random env, fix it to use a fixed one. Useful for loop processing") args, unknown = parser.parse_known_args() config = confuse.Configuration("DynamicWorlds", __name__) config.set_file(args.config_file) config.set_args(args) os.environ["SHAPENET_LOCAL_DIR"] = config["shapenet_local_dir"].get() experiment_length = config["experiment_length"].get() can_start = True CONFIG = {"display_options": 3286, "width": 1280, "height": 720, "headless": config["headless"].get()} kit = SimulationApp(launch_config=CONFIG, experience=f"{os.environ['EXP_PATH']}/omni.isaac.sim.python.kit") # Cannot move before SimApp is launched import utils.misc_utils from utils.misc_utils import * from utils.robot_utils import * from utils.simulation_utils import * from utils.objects_utils import * from utils.environment_utils import * from utils.human_utils import * def monitor_movement(msg, args): global second_start global last_check_time global c_pose global old_pose global rng global env_prim_path wait_time = rospy.Duration(1) index, environment = args[0], args[1] if second_start and rospy.Time.now() > last_check_time + wait_time: last_check_time = rospy.Time.now() diff_x = abs(old_pose[index][0] - c_pose[index][0]) ** 2 diff_y = abs(old_pose[index][1] - c_pose[index][1]) ** 2 diff_z = abs(old_pose[index][2] - c_pose[index][2]) ** 2 dist = (diff_x + diff_y + diff_z) ** 0.5 if (dist) < 0.1: my_pose = PoseStamped() if (rng.uniform() > .9): x, y, z, yaw = position_object(environment, type=0) x = x[0] y = y[0] z = z[0] yaw = yaw[0] + rng.uniform(0, 2 * np.pi) else: yaw = get_robot_yaw(c_pose[index][0], c_pose[index][1], c_pose[index][2], environment.env_mesh, environment.shifts) x = c_pose[index][0] + 0.2 * np.cos(yaw) y = c_pose[index][1] + 0.2 * np.sin(yaw) z = c_pose[index][2] yaw += rng.uniform(0, 2 * np.pi) my_pose.pose.position.x = x my_pose.pose.position.y = y my_pose.pose.position.z = z rot = np.array(yaw) * 180 / np.pi quat = ( Gf.Rotation(Gf.Vec3d.XAxis(), 0) * Gf.Rotation(Gf.Vec3d.YAxis(), 0) * Gf.Rotation(Gf.Vec3d.ZAxis(), rot) ).GetQuat() my_pose.pose.orientation.x = quat.imaginary[0] my_pose.pose.orientation.y = quat.imaginary[1] my_pose.pose.orientation.z = quat.imaginary[2] my_pose.pose.orientation.w = quat.real print( f"Publishing random goal since robot {index} stuck [{x},{y},{z}, {yaw} ({yaw * 180 / 3.14})].") my_pose.header.frame_id = "world" my_pose.header.stamp = rospy.Time.now() movement_monitor_pubs[index].publish(my_pose) if (dist) < 0.05: set_colliders(env_prim_path, True) else: old_pose[index] = c_pose[index] set_colliders(env_prim_path, True) def autostart_exploration(msg, index): global first_start global second_start global can_start global can_change_second_start global last_pub_time if (msg.data == "PUB_FIRST_360"): can_change_second_start = True wait_time = rospy.Duration(0, 500000000) if second_start else rospy.Duration(1) if (msg.data == "WAIT_TRIGGER" or ( msg.data == "PUB_360" and not second_start) and rospy.Time.now() > last_pub_time + wait_time): if can_start: if not first_start: first_start = True elif can_change_second_start: second_start = True print("Exploration will start at the end of this movement") default_pose = PoseStamped() default_pose.header.frame_id = "world" default_pose.header.stamp = rospy.Time.now() start_explorer_pubs[index].publish(default_pose) last_pub_time = rospy.Time.now() def publish_random_goal(msg, args): global last_pub_time global first_start global second_start global can_start global can_change_second_start index, environment = args[0], args[1] if (msg.data == "PUB_FIRST_360"): can_change_second_start = True if (msg.data == "WAIT_TRIGGER" or ( msg.data == "PUB_360" and not second_start) and rospy.Time.now() > last_pub_time + rospy.Duration(0, 500000000)): if can_start: if not first_start: first_start = True elif can_change_second_start: second_start = True my_pose = PoseStamped() x, y, z, yaw = position_object(environment, type=0) my_pose.pose.position.x = x[0] my_pose.pose.position.y = y[0] my_pose.pose.position.z = z[0] rot = np.array(yaw[0]) * 180 / np.pi quat = ( Gf.Rotation(Gf.Vec3d.XAxis(), 0) * Gf.Rotation(Gf.Vec3d.YAxis(), 0) * Gf.Rotation(Gf.Vec3d.ZAxis(), rot) ).GetQuat() my_pose.pose.orientation.x = quat.imaginary[0] my_pose.pose.orientation.y = quat.imaginary[1] my_pose.pose.orientation.z = quat.imaginary[2] my_pose.pose.orientation.w = quat.real print(f"Publishing random goal [{x[0]},{y[0]},{z[0]}, {yaw[0]} ({yaw[0] * 180 / 3.14})] for robot {index}") my_pose.header.frame_id = "fixing_manual" my_pose.header.stamp = rospy.Time.now() send_waypoint_pubs[index].publish(my_pose) last_pub_time = rospy.Time.now() simulation_environment_setup() # set timeline of the experiment timeline = setup_timeline(config) rospy.init_node("my_isaac_ros_app", anonymous=True, disable_signals=True, log_level=rospy.ERROR) starting_pub = rospy.Publisher('starting_experiment', String) rng = np.random.default_rng() rng_state = np.random.get_state() local_file_prefix = "" # if something is broken try my-computer:// # setup environment variables meters_per_unit = config["meters_per_unit"].get() environment = environment(config, rng, local_file_prefix, meters_per_unit) uuid = roslaunch.rlutil.get_or_generate_uuid(None, False) out_dir = os.path.join(config['out_folder'].get(), environment.env_name) out_dir_npy = os.path.join(config['out_folder_npy'].get(), environment.env_name) if not os.path.exists(out_dir): os.makedirs(out_dir) os.environ["ROS_LOG_DIR"] = out_dir roslaunch.configure_logging(uuid) launch_files = ros_launchers_setup(roslaunch, environment.env_limits_shifted, config) parent = roslaunch.parent.ROSLaunchParent(uuid, launch_files, force_log=True) omni.usd.get_context().open_stage(local_file_prefix + config["base_env_path"].get(), None) # Wait two frames so that stage starts loading kit.update() kit.update() print("Loading stage...") while is_stage_loading(): kit.update() print("Loading Complete") context = omni.usd.get_context() stage = context.get_stage() set_stage_up_axis("Z") # do this AFTER loading the world simulation_context = SimulationContext(physics_dt=1.0 / config["physics_hz"].get(), rendering_dt=1.0 / config["render_hz"].get(), stage_units_in_meters=meters_per_unit, backend='torch') simulation_context.initialize_physics() physx_interface = omni.physx.acquire_physx_interface() physx_interface.start_simulation() _clock_graph = add_clock() # add ROS clock simulation_context.play() for _ in range(10): simulation_context.step() og.Controller.evaluate_sync(_clock_graph) last_pub_time = rospy.Time.now() simulation_context.stop() # fixme IDK why this is necessary sometimes try: parent.start() except: print("Failed to start roslaunch, retry") try: parent.start() except: print("Failed to start roslaunch, exit") exit(1) print("ros node launched") kit.update() # use rtx while setting up! set_raytracing_settings(config["physics_hz"].get()) env_prim_path = environment.load_and_center(config["env_prim_path"].get()) process_semantics(config["env_prim_path"].get()) randomize_and_fix_lights(config["_random_light"].get(), rng, env_prim_path, environment.env_limits[-1] - 0.2, meters_per_unit, is_rtx=config["rtx_mode"].get()) randomize_roughness(config["_random_roughness"].get(), rng, env_prim_path) ros_camera_list = [] ros_transform_components = [] # list of tf and joint components, one (of each) for each robot viewport_window_list = [] dynamic_prims = [] imus_handle_list = [] robot_odom_frames = [] robot_imu_frames = [] camera_pose_frames = [] imu_pubs = [] odom_pubs = [] cam_pose_pubs = [] first = True simulation_context.play() for _ in range(100): og.Controller.evaluate_sync(_clock_graph) simulation_context.step() last_pub_time = rospy.Time.now() simulation_context.stop() print("Generating map...") if add_colliders(env_prim_path): simulation_context.play() x, y, z, yaw = position_object(environment, type=3) environment.generate_map(out_dir, origin=[x[0], y[0], 0]) for _ in range(10): simulation_context.step() timeline.set_current_time(0) # set to 0 to be sure that the first frame is recorded else: simulation_context.play() for _ in range(10): simulation_context.step() print("Error generating collisions", file=sys.stderr) simulation_context.play() _dc = dynamic_control_interface() print("Loading robots..") from omni.isaac.sensor import _sensor _is = _sensor.acquire_imu_sensor_interface() robot_base_prim_path = config["robot_base_prim_path"].get() usd_robot_path = str(config["usd_robot_path"].get()) c_pose = [] old_pose = [] old_h_ap = [] old_v_ap = [] lidars = [] simulation_context.stop() for n in range(config["num_robots"].get()): import_robot(robot_base_prim_path, n, usd_robot_path, local_file_prefix) x, y, z, yaw = get_valid_robot_location(environment, first) set_drone_joints_init_loc(f"{robot_base_prim_path}{n}", [x / meters_per_unit, y / meters_per_unit, z / meters_per_unit], [0,0,yaw], (environment.env_limits[5]) / meters_per_unit, 0.3/meters_per_unit, irotate=config["is_iRotate"].get()) c_pose.append([x, y, z]) old_pose.append([x, y, z]) # todo make a comment about this and the number of cameras add_ros_components(robot_base_prim_path, n, ros_transform_components, ros_camera_list, viewport_window_list, camera_pose_frames, cam_pose_pubs, imu_pubs, robot_imu_frames, robot_odom_frames, odom_pubs, lidars, dynamic_prims, config, old_h_ap, old_v_ap, _is, simulation_context, _clock_graph) kit.update() first = False for n in range(config["num_robots"].get()): add_npy_viewport(viewport_window_list, robot_base_prim_path, n, old_h_ap, old_v_ap, config, simulation_context, config["num_robots"].get() * 1) for _ in range(50): simulation_context.render() print("Loading robot complete") print("WARNING: CAMERA APERTURE MANUAL SET NO LONGER WORKS, NEEDS TO BE FIXED BY NVIDIA!!!!") time.sleep(5) # # legacy code # for index, cam in enumerate(viewport_window_list): # camera = stage.GetPrimAtPath(cam.get_active_camera()) # camera.GetAttribute("horizontalAperture").Set(old_h_ap[index]) # camera.GetAttribute("verticalAperture").Set(old_v_ap[index]) print("Starting FSM - setting up topics...") start_explorer_pubs = [] send_waypoint_pubs = [] movement_monitor_pubs = [] for index, _ in enumerate(robot_odom_frames): print("Waiting for fsm to start for robot {}".format(index)) my_topic = f"{robot_base_prim_path}{index}/exploration_node/fsm_exploration/state" if config["autonomous"].get(): rospy.Subscriber(my_topic, String, callback=autostart_exploration, callback_args=index) start_explorer_pubs.append( rospy.Publisher(f"{robot_base_prim_path}{index}/traj_start_trigger", PoseStamped, queue_size=10)) else: rospy.Subscriber(my_topic, String, callback=publish_random_goal, callback_args=(index, environment)) send_waypoint_pubs.append( rospy.Publisher(f"{robot_base_prim_path}{index}/exploration_node/manual_goal", PoseStamped, queue_size=10)) rospy.Subscriber(my_topic, String, callback=monitor_movement, callback_args=(index, environment)) movement_monitor_pubs.append( rospy.Publisher(f"{robot_base_prim_path}{index}/command/pose", PoseStamped, queue_size=10)) print("fsm management for robot {} setted up".format(index)) print("FSM setted up") print("Loading humans..") my_humans = [] my_humans_heights = [] human_export_folder = config["human_path"].get() human_folders = os.listdir(human_export_folder) tot_area = 0 areas = [] initial_dynamics = len(dynamic_prims) used_ob_stl_paths = [] ## todo cycle to complete area, need to update the service probably n = 0 human_anim_len = [] added_prims = [] human_base_prim_path = config["human_base_prim_path"].get() while n < rng.integers(7, 1 + max(7, config["num_humans"].get())): anim_len = 0 # the animation needs to be shorter than config["max_anim_len"].get() and longer than 0/min_len while anim_len < max(config["min_human_anim_len"].get(), 0) or anim_len > config["max_human_anim_len"].get(): folder = rng.choice(human_folders) while "old_textures" in folder: folder = rng.choice(human_folders) random_name = rng.choice(os.listdir(os.path.join(human_export_folder, folder))) asset_path = local_file_prefix + os.path.join(human_export_folder, folder, random_name, random_name + ".usd") tmp_pkl = pkl.load(open(os.path.join(human_export_folder, folder, random_name, random_name + ".pkl"), 'rb')) anim_len = tmp_pkl['ef'] print("Loading human {} from {}".format(random_name, folder)) used_ob_stl_paths.append(os.path.join(human_export_folder, folder, random_name, random_name + ".stl")) human_anim_len.append(tmp_pkl['ef']) if "verts" in tmp_pkl.keys(): my_humans_heights.append(tmp_pkl['verts'][:, :, 2]) else: my_humans_heights.append(None) my_humans.append(random_name) load_human(human_base_prim_path, n, asset_path, dynamic_prims, added_prims) stl_path = os.path.join(human_export_folder, folder, random_name, random_name + ".stl") this_mesh = mesh.Mesh.from_file(stl_path) areas.append((this_mesh.x.max() - this_mesh.x.min()) * (this_mesh.y.max() - this_mesh.y.min())) tot_area += areas[-1] n += 1 x, y, z, yaw = position_object(environment, type=1, objects=my_humans, ob_stl_paths=used_ob_stl_paths, max_collisions=int(config["allow_collision"].get())) to_be_removed = [] human_prim_list = [] body_origins = [] for n, human in enumerate(my_humans): if z[n] < 0: to_be_removed.append(n) tot_area -= areas[n] else: set_translate(stage.GetPrimAtPath(f"{human_base_prim_path}{n}"), [x[n] / meters_per_unit, y[n] / meters_per_unit, z[n] / meters_per_unit]) set_scale(stage.GetPrimAtPath(f"{human_base_prim_path}{n}"), 1 / meters_per_unit) set_rotate(stage.GetPrimAtPath(f"{human_base_prim_path}{n}"), [0, 0, yaw[n]]) human_prim_list.append(f"{human_base_prim_path}{n}") body_origins.append([x[n], y[n], z[n], yaw[n]]) if len(to_be_removed) > 0: print("Removing humans that are out of the environment") to_be_removed.reverse() cumsum = np.cumsum(added_prims) for n in to_be_removed: my_humans.pop(n) used_ob_stl_paths.pop(n) my_humans_heights.pop(n) for _ in range(added_prims[n]): if n > 0: dynamic_prims.pop(cumsum[n - 1] + initial_dynamics) else: dynamic_prims.pop(initial_dynamics) human_anim_len.pop(n) omni.kit.commands.execute("DeletePrimsCommand", paths=[f"{human_base_prim_path}{n}" for n in to_be_removed]) print("Loading human complete") google_ob_used, shapenet_ob_used = load_objects(config, environment, rng, dynamic_prims, 1/meters_per_unit) # IT IS OF CRUCIAL IMPORTANCE THAT AFTER THIS POINT THE RENDER GETS DONE WITH THE SLEEPING CALL! OTHERWISE PATH TRACING SPP WILL GET RUINED if (config["rtx_mode"].get()): set_raytracing_settings(config["physics_hz"].get()) else: set_pathtracing_settings(config["physics_hz"].get()) omni.usd.get_context().get_selection().clear_selected_prim_paths() omni.usd.get_context().get_selection().set_selected_prim_paths([], False) for _ in range(5): simulation_context.step(render=False) sleeping(simulation_context, viewport_window_list, raytracing=config["rtx_mode"].get()) timeline.set_current_time(0) simulation_step = 0 # this is NOT the frame, this is the "step" (related to physics_hz) my_recorder = recorder_setup(config['_recorder_settings'].get(), out_dir_npy, config['record'].get(), skip_cameras=1) simulation_context.stop() timeline.set_current_time(0) # set to 0 to be sure that the first frame is recorded timeline.set_auto_update(False) for _ in range(5): kit.update() simulation_context.play() timeline.set_auto_update(False) first_start = False second_start = False can_change_second_start = False # two times, this will ensure that totalSpp is reached sleeping(simulation_context, viewport_window_list, raytracing=config["rtx_mode"].get()) sleeping(simulation_context, viewport_window_list, raytracing=config["rtx_mode"].get()) last_pub_time = rospy.Time.now() last_check_time = rospy.Time.now() if config['debug_vis'].get(): cnt = 0 while 1: cnt += 1 if cnt % 10000 == 0: import ipdb ipdb.set_trace() print("Debug vis") sleeping(simulation_context, viewport_window_list, raytracing=config["rtx_mode"].get()) reversing_timeline_ratio = compute_timeline_ratio(human_anim_len, config["reverse_strategy"].get(), experiment_length) print(f"The reversing ratio is {reversing_timeline_ratio}.\n" f"This implies that that every {experiment_length / reversing_timeline_ratio} frames we reverse the animations") cnt_reversal = 1 # example # exp length: 600, ratio: 4 # forward 0-150, 151-300 backward, 300-450 forward, 450-600 backward (so 4 slots) # exp length: 1200, ratio: 4 # forward 0-300, 301-600 backward, 601-900 forward, 901-1200 backward (so 4 slots) ratio_camera = config["ratio_camera"].get() ratio_odom = config["ratio_odom"].get() ratio_tf = config["ratio_tf"].get() starting_to_pub = False my_recorder._enable_record = False status = True while kit.is_running(): # NOTE EVERYTHING THAT NEEDS TO BE RENDERED NEEDS TO BE MOVED AFTER THE TIMELINE UPDATE CONSISTENTLY if can_start: last_check_time = rospy.Time.now() if second_start: if config['record'].get(): sleeping(simulation_context, viewport_window_list, raytracing=config["rtx_mode"].get()) my_recorder._update() sleeping(simulation_context, viewport_window_list, raytracing=config["rtx_mode"].get()) starting_to_pub = True timeline.set_current_time(min(- 1 / (config["physics_hz"].get() / ratio_camera), -abs(config["bootstrap_exploration"].get()))) simulation_step = int(timeline.get_current_time() * config["physics_hz"].get()) - 1 # reset_physics(timeline, simulation_context) print("Bootstrap started") can_start = False simulation_step += 1 if starting_to_pub and simulation_step == 0: timeline.set_current_time(0) # reset_physics(timeline, simulation_context) move_humans_to_ground(my_humans_heights, human_prim_list, simulation_step / ratio_camera, meters_per_unit, config["max_distance_human_ground"].get()) print("Starting recording NOW!") msg = String("starting") starting_pub.publish(msg) starting_to_pub = False time.sleep(0.5) if config['record'].get(): my_recorder._enable_record = True last_check_time = rospy.Time.now() if (config["_random_light"].get()["during_experiment"]): if (simulation_step % config["_random_light"].get()["n-frames"] == 0): # fixme todo smooth change, idea get max-min and time window randomize_and_fix_lights(config["_random_light"].get(), rng, env_prim_path, environment.env_limits[-1], environment.meters_per_unit, is_rtx=config["rtx_mode"].get()) # step the physics simulation_context.step(render=False) # get the current time in ROS print("Clocking...") og.Controller.evaluate_sync(_clock_graph) time.sleep(0.1) ctime = timeline.get_current_time() simulation_context.render() timeline.set_current_time(ctime) # publish IMU print("Publishing IMU...") pub_imu(_is, imu_pubs, robot_imu_frames, meters_per_unit) # publish joint status (ca 120 Hz) if simulation_step % ratio_tf == 0: print("Publishing joint/tf status...") for component in ros_transform_components: og.Controller.set(og.Controller.attribute(f"{component}/OnImpulseEvent.state:enableImpulse"), True) # publish odometry (60 hz) if simulation_step % ratio_odom == 0: print("Publishing odometry...") c_pose, _ = pub_odom(robot_odom_frames, odom_pubs, _dc, meters_per_unit) pub_cam_pose(camera_pose_frames, cam_pose_pubs, _dc, meters_per_unit) # we consider ratio_camera to forward the animation. # If you want it different ratio_animation < ratio_camera to avoid # two frames with the same animation point if second_start: if simulation_step % ratio_camera == 0: if my_recorder._enable_record: # update the image counter externally so that we can use it in the recorder and all images have the same index my_recorder._counter += 1 if simulation_step / ratio_camera < (experiment_length / reversing_timeline_ratio) * ( cnt_reversal): timeline.forward_one_frame() else: if simulation_step / ratio_camera >= ((experiment_length - 1) / reversing_timeline_ratio) * ( cnt_reversal + 1) or \ (timeline.get_current_time() - 1 / timeline.get_time_codes_per_seconds()) < 0: cnt_reversal += 2 timeline.forward_one_frame() else: timeline.rewind_one_frame() if simulation_step % ratio_camera == 0: for lidar in lidars: og.Controller.attribute(lidar+".inputs:step").set(1) ctime = timeline.get_current_time() simulation_context.render() timeline.set_current_time(ctime) for lidar in lidars: og.Controller.attribute(lidar+".inputs:step").set(0) # publish camera (30 hz) if simulation_step % ratio_camera == 0: ctime = timeline.get_current_time() print("Publishing cameras...") # FIRST ONE WRITTEN IS AT 1/30 on the timeline pub_and_write_images(simulation_context, viewport_window_list, ros_camera_list, config["rtx_mode"].get(), my_recorder, second_start) timeline.set_current_time(ctime) if simulation_step % ratio_camera == 0 and simulation_step / ratio_camera == experiment_length \ and not config["neverending"].get(): print("End of experiment!!!") simulation_context.pause() if my_recorder.data_writer is not None: my_recorder.data_writer.stop_threads() timeline.set_current_time(0) context.save_as_stage(os.path.join(out_dir, "loaded_stage.usd")) experiment_info = {} experiment_info["config"] = config experiment_info["reversing_timeline_ratio"] = reversing_timeline_ratio experiment_info["humans"] = {} experiment_info["humans"]["ids"] = my_humans experiment_info["humans"]["folders"] = used_ob_stl_paths experiment_info["humans"]["origins"] = body_origins # x y z yaw experiment_info["google_obs"] = google_ob_used experiment_info["shapenet_obs"] = shapenet_ob_used experiment_info["environment"] = {} experiment_info["environment"]["id"] = environment.env_name experiment_info["environment"]["folder"] = environment.env_path experiment_info["environment"]["shifts"] = environment.shifts experiment_info["rng_state"] = rng_state np.save(os.path.join(out_dir, "experiment_info.npy"), experiment_info) break except: extype, value, tb = sys.exc_info() traceback.print_exc() ipdb.post_mortem(tb) finally: for pub in odom_pubs: pub.unregister() for pub in imu_pubs: pub.unregister() for pub in cam_pose_pubs: pub.unregister() for pub in start_explorer_pubs: pub.unregister() for pub in send_waypoint_pubs: pub.unregister() parent.shutdown() rospy.signal_shutdown("my_simulation complete") simulation_context.stop() try: kit.close() except: pass
25,227
Python
37.457317
156
0.674793
eliabntt/GRADE-RR/simulator/robot_with_ros.py
import argparse import time import os import numpy as np # base_env_path and other settings are in the config file out_dir = "" # set this to a temporary empty dir from omni.isaac.kit import SimulationApp def boolean_string(s): if s.lower() not in {'false', 'true'}: raise ValueError('Not a valid boolean string') return s.lower() == 'true' parser = argparse.ArgumentParser(description="Your second IsaacSim run") parser.add_argument("--headless", type=boolean_string, default=True, help="Wheter to run it in headless mode or not") parser.add_argument("--rtx_mode", type=boolean_string, default=False, help="Use rtx when True, use path tracing when False") parser.add_argument("--config_file", type=str, default="config.yaml") parser.add_argument("--fix_env", type=str, default="", help="leave it empty to have a random env, fix it to use a fixed one. Useful for loop processing") args, unknown = parser.parse_known_args() config = confuse.Configuration("world_and_robot", __name__) config.set_file(args.config_file) config.set_args(args) CONFIG = {"display_options": 3286, "width": 1280, "height": 720, "headless": config["headless"].get()} kit = SimulationApp(launch_config=CONFIG, experience=f"{os.environ['EXP_PATH']}/omni.isaac.sim.python.kit") omni.usd.get_context().open_stage(config["base_env_path"].get(), None) kit.update() kit.update() print("Loading stage...") while is_stage_loading(): kit.update() print("Loading Complete") context = omni.usd.get_context() stage = context.get_stage() meters_per_unit = config["meters_per_unit"].get() simulation_context = SimulationContext(physics_dt=1.0 / config["physics_hz"].get(), rendering_dt=1.0 / config["render_hz"].get(), stage_units_in_meters=meters_per_unit, backend='torch') simulation_context.initialize_physics() physx_interface = omni.physx.acquire_physx_interface() physx_interface.start_simulation() print("Adding ROS clock, you can check with rostopic echo /clock") _clock_graph = add_clock() # add ROS clock simulation_context.play() for _ in range(10): simulation_context.step() # remember that this step also the physics og.Controller.evaluate_sync(_clock_graph) simulation_context.stop() import utils.misc_utils from utils.misc_utils import * from utils.robot_utils import * from utils.simulation_utils import * simulation_environment_setup(need_ros = True) if base_world_path != "": from utils.environment_utils import * print("Loading environment...") environment = environment(config, meters_per_unit=meters_per_unit) env_prim_path = environment.load_and_center(config["env_prim_path"].get()) process_semantics(config["env_prim_path"].get()) print("Visualization...") for _ in range(1000): simulation_context.render() simulation_context.step(render=False) print("Environment loading done...") add_colliders(env_prim_path) print("Colliders added..") simulation_context.play() x, y, z = 0, 0, 0 if out_dir != "": environment.generate_map(out_dir, origin=[x,y,z]) print("Map generated..") simulation_context.stop() # prepare some containers joint_states = [] tf_trees = [] camera_list = [] viewport_list = [] camera_pose, camera_pose_pub = [], [] imus,imu_pubs = [], [] lidars = [] odoms, odom_pubs = [], [] # get the interface to add imu sensors from omni.isaac.sensor import _sensor _is = _sensor.acquire_imu_sensor_interface() # these are kept because the aperture is resetted based on the h aperture by IsaacSim. # In v2021 this could have been reverted. In v2022 not. old_h_ape, old_v_ape = [], [] # get the interface to access dynamics of the assets _dc = dynamic_control_interface() print("Loading robots..") robot_base_prim_path = config["robot_base_prim_path"].get() usd_robot_path = str(config["usd_robot_path"].get()) for n in range(config["num_robots"].get()): import_robot(robot_base_prim_path, n, usd_robot_path) x, y, z, yaw = np.random.randint(-100,100,4) set_drone_joints_init_loc(f"{robot_base_prim_path}{n}", [x / meters_per_unit, y / meters_per_unit, z / meters_per_unit], [0, 0, np.deg2rad(yaw)], upper_zlim = z * 2, lower_zlim = -z * 2 ) print("Adding ROS components") joint_states.append(add_joint_state(f"{robot_base_prim_path}{n}")) tf_trees.append(add_pose_tree(f"{robot_base_prim_path}{n}")) # create the viewport, the camera component component, viewport = add_camera_and_viewport(f"{robot_base_prim_path}{n}/camera_link", config["robot_sensor_size"].get(), old_h_ape, old_v_ape, simulation_context, 0, n, cam_per_robot=1) # cam index is useful if you want multiple cameras cam_outputs = control_camera(viewport, simulation_context) camera_list.append([n + 0, component, cam_outputs]) viewport_list.append(viewport) omni.kit.app.get_app().update() camera_pose.append(f"{robot_base_prim_path}{n}/camera_link") camera_pose_pub.append(rospy.Publisher(f"{robot_base_prim_path}{n}/camera/pose", PoseStamped, queue_size=10)) setup_imu_sensor(_is, config, f"{robot_base_prim_path}{n}/imu_link") imu_pubs.append(rospy.Publisher(f"{robot_base_prim_path}{n}/imu_body", Imu, queue_size=10)) imus.append(f"{robot_base_prim_path}{n}/imu_link") odoms.append(f"{robot_base_prim_path}{n}/yaw_link") odom_pubs.append(rospy.Publisher(f"{robot_base_prim_path}{n}/odom", Odometry, queue_size=10)) sensor = add_lidar(f"{robot_base_prim_path}{n}/yaw_link", [0, 0, -.1], [0, 0, 0], is_3d=True, is_2d=True) lidars.append(sensor) # alternatively # add_ros_components(robot_base_prim_path, n, ros_transform_components, camera_list, viewport_list, # camera_pose, camera_pose_pub, imu_pubs, imus, # odoms, odom_pubs, lidars, # [], config, old_h_ape, old_v_ape, _is, simulation_context, _clock, irotate=False): print("Loading robots done") # set some settings for the rendering if (config["rtx_mode"].get()): set_raytracing_settings(config["physics_hz"].get()) else: set_pathtracing_settings(config["physics_hz"].get()) print("Note that the rendering is now blocking until finished") for i in range(100): print(f"Iteration {i}/100", end="\r") sleeping(simulation_context, viewport_list, raytracing=config["rtx_mode"].get()) # deselect all objects omni.usd.get_context().get_selection().clear_selected_prim_paths() omni.usd.get_context().get_selection().set_selected_prim_paths([], False) simulation_context.play() for i in range(2000): simulation_context.step(render=False) og.Controller.evaluate_sync(_clock) time.sleep(0.2) simulation_context.render() # publish IMU print("Publishing IMU...") pub_imu(_is, imu_pubs, robot_imu_frames, meters_per_unit) if i % ratio_joints == 0: for js in joint_states: og.Controller.set(og.Controller.attribute(f"{js}/OnImpulseEvent.state:enableImpulse"), True) if i % ratio_tf: for tf in tf_trees: og.Controller.set(og.Controller.attribute(f"{tf}/OnImpulseEvent.state:enableImpulse"), True) if simulation_step % ratio_odom == 0: c_pose, _ = pub_odom(odoms, odom_pubs, _dc, meters_per_unit) pub_cam_pose(camera_pose, camera_pose_pub, _dc, meters_per_unit) if simulation_step % ratio_camera == 0: # The RTX LiDAR is still a fuzzy component. The "normal" LiDAR is more stable, but won't see non-colliding objects for lidar in lidars: og.Controller.attribute(lidar+".inputs:step").set(1) ctime = timeline.get_current_time() simulation_context.render() timeline.set_current_time(ctime) for lidar in lidars: og.Controller.attribute(lidar+".inputs:step").set(0) pub_and_write_images(simulation_context, viewport_list, ros_camera_list, raytracing) # clearly not writing anything here simulation_context.stop() try: kit.close() except: pass
7,858
Python
36.966183
185
0.693688
eliabntt/GRADE-RR/simulator/world_and_robot.py
import argparse # base_env_path and other settings are in the config file out_dir = "" # set this to a temporary empty dir from omni.isaac.kit import SimulationApp def boolean_string(s): if s.lower() not in {'false', 'true'}: raise ValueError('Not a valid boolean string') return s.lower() == 'true' parser = argparse.ArgumentParser(description="Your second IsaacSim run") parser.add_argument("--headless", type=boolean_string, default=True, help="Wheter to run it in headless mode or not") parser.add_argument("--rtx_mode", type=boolean_string, default=False, help="Use rtx when True, use path tracing when False") # new options parser.add_argument("--config_file", type=str, default="config.yaml") parser.add_argument("--fix_env", type=str, default="", help="leave it empty to have a random env, fix it to use a fixed one. Useful for loop processing") args, unknown = parser.parse_known_args() config = confuse.Configuration("world_and_robot", __name__) # load the config file specified config.set_file(args.config_file) config.set_args(args) CONFIG = {"display_options": 3286, "width": 1280, "height": 720, "headless": config["headless"].get()} kit = SimulationApp(launch_config=CONFIG, experience=f"{os.environ['EXP_PATH']}/omni.isaac.sim.python.kit") omni.usd.get_context().open_stage(config["base_env_path"].get(), None) kit.update() kit.update() print("Loading stage...") while is_stage_loading(): kit.update() print("Loading Complete") context = omni.usd.get_context() stage = context.get_stage() meters_per_unit = config["meters_per_unit"].get() simulation_context = SimulationContext(physics_dt=1.0 / config["physics_hz"].get(), rendering_dt=1.0 / config["render_hz"].get(), stage_units_in_meters=meters_per_unit, backend='torch') simulation_context.initialize_physics() physx_interface = omni.physx.acquire_physx_interface() physx_interface.start_simulation() for _ in range(100): simulation_context.render() simulation_context.step(render=False) import utils.misc_utils from utils.misc_utils import * from utils.robot_utils import * from utils.simulation_utils import * simulation_environment_setup(need_ros = False) # enable some extensions, check if ros is running automatically if base_world_path != "": from utils.environment_utils import * print("Loading environment...") environment = environment(config, meters_per_unit=meters_per_unit) # setup the class env_prim_path = environment.load_and_center(config["env_prim_path"].get()) # actually load the env process_semantics(config["env_prim_path"].get()) # add semantic information based either on label you provide, or looking into fields of the objcets. This applies semantic to all childs print("Visualization...") for _ in range(1000): simulation_context.render() simulation_context.step(render=False) print("Environment loading done...") print("Add colliders to the environment, if the environment is big this could take ages..") add_colliders(env_prim_path) # add colliders to the environment print("Colliders added..") print("For the next step please check out the code and set x, y, z manually to test them out..") print() ipdb.set_trace() simulation_context.play() x, y, z = 0, 0, 0 if out_dir == "": print("Change out_dir") environment.generate_map(out_dir, origin=[x,y,z]) print("Map generated..") simulation_context.stop() print("Loading robots..") robot_base_prim_path = config["robot_base_prim_path"].get() usd_robot_path = str(config["usd_robot_path"].get()) for n in range(config["num_robots"].get()): import_robot(robot_base_prim_path, n, usd_robot_path) x, y, z, yaw = np.random.randint(-100,100,4) set_drone_joints_init_loc(f"{robot_base_prim_path}{n}", [x / meters_per_unit, y / meters_per_unit, z / meters_per_unit], [0, 0, np.deg2rad(yaw)], upper_zlim = z * 2, lower_zlim = -z * 2 ) print("Loading robots done") simulation_context.play() for _ in range(2000): simulation_context.render() simulation_context.step(render=False) simulation_context.stop() try: kit.close() except: pass
4,102
Python
35.633928
186
0.714529
eliabntt/GRADE-RR/simulator/irotate_simulation.py
import argparse import carb import confuse import ipdb import numpy as np import os import roslaunch import rospy import sys import time import traceback import yaml from omni.isaac.kit import SimulationApp from time import sleep def boolean_string(s): if s.lower() not in {'false', 'true'}: raise ValueError('Not a valid boolean string') return s.lower() == 'true' try: parser = argparse.ArgumentParser(description="Dynamic Worlds Simulator") parser.add_argument("--config_file", type=str, default="config.yaml") parser.add_argument("--headless", type=boolean_string, default=True, help="Wheter to run it in headless mode or not") parser.add_argument("--rtx_mode", type=boolean_string, default=False, help="Use rtx when True, use path tracing when False") parser.add_argument("--record", type=boolean_string, default=True, help="Writing data to the disk") parser.add_argument("--debug_vis", type=boolean_string, default=False, help="When true continuosly loop the rendering") parser.add_argument("--neverending", type=boolean_string, default=False, help="Never stop the main loop") parser.add_argument("--fix_env", type=str, default="", help="leave it empty to have a random env, fix it to use a fixed one. Useful for loop processing") args, unknown = parser.parse_known_args() config = confuse.Configuration("DynamicWorlds", __name__) config.set_file(args.config_file) config.set_args(args) experiment_length = config["experiment_length"].get() can_start = True CONFIG = {"display_options": 3286, "width": 1280, "height": 720, "headless": config["headless"].get()} kit = SimulationApp(launch_config=CONFIG, experience=f"{os.environ['EXP_PATH']}/omni.isaac.sim.python.kit") # Cannot move before SimApp is launched import utils.misc_utils from utils.misc_utils import * from utils.robot_utils import * from utils.simulation_utils import * from utils.environment_utils import * simulation_environment_setup() rospy.init_node("my_isaac_ros_app", anonymous=True, disable_signals=True, log_level=rospy.ERROR) starting_pub = rospy.Publisher('starting_experiment', String) rng = np.random.default_rng() rng_state = np.random.get_state() local_file_prefix = "my-computer://" # setup environment variables environment = environment(config, rng, local_file_prefix) uuid = roslaunch.rlutil.get_or_generate_uuid(None, False) out_dir = os.path.join(config['out_folder'].get(), environment.env_name) out_dir_npy = os.path.join(config['out_folder_npy'].get(), environment.env_name) if not os.path.exists(out_dir): os.makedirs(out_dir) os.environ["ROS_LOG_DIR"] = out_dir roslaunch.configure_logging(uuid) launch_files = ros_launchers_setup(roslaunch, environment.env_limits_shifted, config) parent = roslaunch.parent.ROSLaunchParent(uuid, launch_files, force_log=True) omni.usd.get_context().open_stage(local_file_prefix + config["base_env_path"].get(), None) # Wait two frames so that stage starts loading kit.update() kit.update() print("Loading stage...") while is_stage_loading(): kit.update() print("Loading Complete") context = omni.usd.get_context() stage = context.get_stage() set_stage_up_axis("Z") # do this AFTER loading the world simulation_context = SimulationContext(physics_dt=1.0 / config["physics_hz"].get(), rendering_dt=1.0 / config["render_hz"].get(), stage_units_in_meters=0.01) simulation_context.start_simulation() add_clock() # add ROS clock simulation_context.play() for _ in range(100): omni.kit.commands.execute("RosBridgeTickComponent", path="/ROS_Clock") simulation_context.step() last_pub_time = rospy.Time.now() simulation_context.stop() # fixme IDK why this is necessary sometimes try: parent.start() except: print("Failed to start roslaunch, retry") try: parent.start() except: print("Failed to start roslaunch, exit") exit(1) print("ros node launched") kit.update() meters_per_unit = UsdGeom.GetStageMetersPerUnit(stage) # use rtx while setting up! set_raytracing_settings(config["physics_hz"].get()) env_prim_path = environment.load_and_center(config["env_prim_path"].get()) process_semantics(config["env_prim_path"].get()) randomize_and_fix_lights(config["_random_light"].get(), rng, env_prim_path, environment.env_limits[-1] - 0.2, environment.meters_per_unit, is_rtx=config["rtx_mode"].get()) randomize_roughness(config["_random_roughness"].get(), rng, env_prim_path) # set timeline of the experiment timeline = setup_timeline(config) ros_camera_list = [] ros_transform_components = [] # list of tf and joint components, one (of each) for each robot viewport_window_list = [] dynamic_prims = [] imus_handle_list = [] robot_odom_frames = [] robot_imu_frames = [] camera_pose_frames = [] imu_pubs = [] odom_pubs = [] cam_pose_pubs = [] camera_odom_pubs = [] camera_odom_frames = [] lidar_components = [] first = True imu_sensor, imu_props = setup_imu_sensor(config) simulation_context.play() for _ in range(100): omni.kit.commands.execute("RosBridgeTickComponent", path="/ROS_Clock") simulation_context.step() last_pub_time = rospy.Time.now() simulation_context.stop() print("Generating map...") if add_colliders(env_prim_path): simulation_context.play() x, y, z, yaw = position_object(environment, type=3) environment.generate_map(out_dir, origin=[x[0], y[0], 0]) for _ in range(10): simulation_context.step() timeline.set_current_time(0) # set to 0 to be sure that the first frame is recorded else: simulation_context.play() for _ in range(10): simulation_context.step() print("Error generating collisions", file=sys.stderr) simulation_context.play() _dc = dynamic_control_interface() print("Loading robots..") robot_base_prim_path = config["robot_base_prim_path"].get() usd_robot_path = str(config["usd_robot_path"].get()) c_pose = [] old_pose = [] old_h_ap = [] old_v_ap = [] for n in range(config["num_robots"].get()): simulation_context.stop() import_robot(robot_base_prim_path, n, usd_robot_path, local_file_prefix) x, y, z, yaw = 0, 0, 0, 0 simulation_context.stop() set_drone_joints_init_loc(f"{robot_base_prim_path}{n}", [x / meters_per_unit, y / meters_per_unit, z / meters_per_unit], [0, 0, yaw], (environment.env_limits[5]) / meters_per_unit, irotate=config["is_iRotate"].get()) c_pose.append([x, y, z]) old_pose.append([x, y, z]) kit.update() simulation_context.play() kit.update() add_ros_components(robot_base_prim_path, n, ros_transform_components, ros_camera_list, viewport_window_list, camera_pose_frames, cam_pose_pubs, imus_handle_list, imu_pubs, robot_imu_frames, robot_odom_frames, odom_pubs, dynamic_prims, config, imu_sensor, imu_props, old_h_ap, old_v_ap, config["is_iRotate"].get()) add_irotate_ros_components(camera_odom_frames, camera_odom_pubs, lidar_components, robot_base_prim_path, n) kit.update() first = False for n in range(config["num_robots"].get()): add_npy_viewport(viewport_window_list, robot_base_prim_path, n, old_h_ap, old_v_ap, config, config["num_robots"].get() * 1) kit.update() for _ in range(50): simulation_context.render() print("Loading robot complete") for index, cam in enumerate(viewport_window_list): camera = stage.GetPrimAtPath(cam.get_active_camera()) camera.GetAttribute("horizontalAperture").Set(old_h_ap[index]) camera.GetAttribute("verticalAperture").Set(old_v_ap[index]) # setup manual ticks for all components (just to be sure) # IMU not necessary as it is NOT a ROS component itself for component in ros_camera_list: omni.kit.commands.execute("RosBridgeTickComponent", path=str(component.GetPath())) for component in ros_transform_components: omni.kit.commands.execute("RosBridgeTickComponent", path=str(component.GetPath())) # IT IS OF CRUCIAL IMPORTANCE THAT AFTER THIS POINT THE RENDER GETS DONE WITH THE SLEEPING CALL! OTHERWISE PATH TRACING SPP WILL GET RUINED if (config["rtx_mode"].get()): set_raytracing_settings(config["physics_hz"].get()) else: set_pathtracing_settings(config["physics_hz"].get()) omni.usd.get_context().get_selection().set_selected_prim_paths([], False) simulation_context.stop() simulation_context.play() for _ in range(5): simulation_context.step(render=False) sleeping(simulation_context, viewport_window_list, raytracing=config["rtx_mode"].get()) timeline.set_current_time(0) simulation_step = 0 # this is NOT the frame, this is the "step" (related to physics_hz) my_recorder = recorder_setup(config['_recorder_settings'].get(), out_dir_npy, config['record'].get()) timeline.set_current_time(0) # set to 0 to be sure that the first frame is recorded timeline.set_auto_update(False) omni.kit.commands.execute("RosBridgeUseSimTime", use_sim_time=True) omni.kit.commands.execute("RosBridgeUsePhysicsStepSimTime", use_physics_step_sim_time=True) # two times, this will ensure that totalSpp is reached sleeping(simulation_context, viewport_window_list, raytracing=config["rtx_mode"].get()) sleeping(simulation_context, viewport_window_list, raytracing=config["rtx_mode"].get()) last_pub_time = rospy.Time.now() last_check_time = rospy.Time.now() if config['debug_vis'].get(): cnt = 0 while 1: cnt += 1 if cnt % 10000 == 0: import ipdb ipdb.set_trace() print("DEBUGGING VIS") sleeping(simulation_context, viewport_window_list, raytracing=config["rtx_mode"].get()) for i, cam in enumerate(ros_camera_list): omni.kit.commands.execute("RosBridgeTickComponent", path=str(cam.GetPath())) reversing_timeline_ratio = 1 print( f"The reversing ratio is {reversing_timeline_ratio}.\n" f"This implies that that every {experiment_length / reversing_timeline_ratio} frames we reverse the animations") cnt_reversal = 1 ratio_camera = config["ratio_camera"].get() ratio_odom = config["ratio_odom"].get() ratio_tf = config["ratio_tf"].get() starting_to_pub = False my_recorder._enable_record = False second_start = False while kit.is_running(): if can_start: last_check_time = rospy.Time.now() if config['record'].get(): sleeping(simulation_context, viewport_window_list, raytracing=config["rtx_mode"].get()) my_recorder._update() sleeping(simulation_context, viewport_window_list, raytracing=config["rtx_mode"].get()) starting_to_pub = True timeline.set_current_time(min(- 1 / (config["physics_hz"].get() / ratio_camera), -abs(config["bootstrap_exploration"].get()))) simulation_step = int(timeline.get_current_time() * config["physics_hz"].get()) - 1 print("Bootstrap started") can_start = False second_start = True simulation_step += 1 if starting_to_pub and simulation_step == 0: print("Starting recording NOW!") msg = String("starting") starting_pub.publish(msg) starting_to_pub = False time.sleep(0.5) if config['record'].get(): my_recorder._enable_record = True last_check_time = rospy.Time.now() if (config["_random_light"].get()["during_experiment"]): if (simulation_step % config["_random_light"].get()["n-frames"] == 0): # fixme todo smooth change, idea get max-min and time window randomize_and_fix_lights(config["_random_light"].get(), rng, env_prim_path, environment.env_limits[-1], environment.meters_per_unit, is_rtx=config["rtx_mode"].get()) # step the physics simulation_context.step(render=False) # get the current time in ROS print("Clocking...") omni.kit.commands.execute("RosBridgeTickComponent", path="/ROS_Clock") time.sleep(0.2) # publish IMU print("Publishing IMU...") pub_imu(imus_handle_list, imu_sensor, imu_pubs, robot_imu_frames, meters_per_unit) # publish joint status (ca 120 Hz) if simulation_step % ratio_tf == 0: print("Publishing joint/tf status...") for component in ros_transform_components: omni.kit.commands.execute("RosBridgeTickComponent", path=str(component.GetPath())) # publish odometry (60 hz) if simulation_step % ratio_odom == 0: print("Publishing odometry...") pub_cam_pose(camera_pose_frames, cam_pose_pubs, _dc, meters_per_unit) c_pose, _ = pub_odom(camera_odom_frames, camera_odom_pubs, _dc, meters_per_unit, robot_odom_frames) c_pose, _ = pub_odom(robot_odom_frames, odom_pubs, _dc, meters_per_unit) for component in lidar_components: omni.kit.commands.execute("RosBridgeTickComponent", path=str(component.GetPath())) # we consider ratio_camera to forward the animation. # If you want it different ratio_animation < ratio_camera to avoid # two frames with the same animation point if simulation_step % ratio_camera == 0: if my_recorder._enable_record: # update the image counter externally so that we can use it in the recorder and all images have the same index my_recorder._counter += 1 if simulation_step / ratio_camera < (experiment_length / reversing_timeline_ratio) * ( cnt_reversal): timeline.forward_one_frame() else: if simulation_step / ratio_camera >= ((experiment_length - 1) / reversing_timeline_ratio) * ( cnt_reversal + 1) or \ (timeline.get_current_time() - 1 / timeline.get_time_codes_per_seconds()) < 0: cnt_reversal += 2 timeline.forward_one_frame() else: timeline.rewind_one_frame() # publish camera (30 hz) if simulation_step % ratio_camera == 0: print("Publishing cameras...") # getting skel pose for each joint # get_skeleton_info(meters_per_unit, body_origins, body_list) # FIRST ONE WRITTEN IS AT 1/30 on the timeline pub_and_write_images(simulation_context, viewport_window_list, ros_camera_list, config["rtx_mode"].get(), my_recorder, second_start) if simulation_step % ratio_camera == 0 and simulation_step / ratio_camera == experiment_length \ and not config["neverending"].get(): print("End of experiment!!!") simulation_context.pause() if my_recorder.data_writer is not None: my_recorder.data_writer.stop_threads() timeline.set_current_time(0) context.save_as_stage(os.path.join(out_dir, "loaded_stage.usd")) experiment_info = {} experiment_info["config"] = config experiment_info["reversing_timeline_ratio"] = reversing_timeline_ratio experiment_info["environment"] = {} experiment_info["environment"]["id"] = environment.env_name experiment_info["environment"]["folder"] = environment.env_path experiment_info["environment"]["shifts"] = environment.shifts experiment_info["rng_state"] = rng_state np.save(os.path.join(out_dir, "experiment_info.npy"), experiment_info) break except: extype, value, tb = sys.exc_info() traceback.print_exc() # ipdb.post_mortem(tb) finally: for pub in odom_pubs: pub.unregister() for pub in imu_pubs: pub.unregister() for pub in cam_pose_pubs: pub.unregister() parent.shutdown() rospy.signal_shutdown("my_simulation complete") simulation_context.stop() try: kit.close() except: pass
15,217
Python
36.761787
143
0.693829
eliabntt/GRADE-RR/simulator/multi_robot_sim.py
import argparse import carb import confuse import ipdb import numpy as np import os import roslaunch import rospy import sys import time import traceback import yaml from omni.isaac.kit import SimulationApp from time import sleep def boolean_string(s): if s.lower() not in {'false', 'true'}: raise ValueError('Not a valid boolean string') return s.lower() == 'true' try: parser = argparse.ArgumentParser(description="Dynamic Worlds Simulator") parser.add_argument("--config_file", type=str, default="config.yaml") parser.add_argument("--headless", type=boolean_string, default=True, help="Wheter to run it in headless mode or not") parser.add_argument("--rtx_mode", type=boolean_string, default=False, help="Use rtx when True, use path tracing when False") parser.add_argument("--record", type=boolean_string, default=True, help="Writing data to the disk") parser.add_argument("--debug_vis", type=boolean_string, default=False, help="When true continuosly loop the rendering") parser.add_argument("--neverending", type=boolean_string, default=False, help="Never stop the main loop") parser.add_argument("--fix_env", type=str, default="", help="leave it empty to have a random env, fix it to use a fixed one. Useful for loop processing") args, unknown = parser.parse_known_args() config = confuse.Configuration("DynamicWorlds", __name__) config.set_file(args.config_file) config.set_args(args) os.environ["SHAPENET_LOCAL_DIR"] = config["shapenet_local_dir"].get() experiment_length = config["experiment_length"].get() can_start = True CONFIG = {"display_options": 3286, "width": 1280, "height": 720, "headless": config["headless"].get()} kit = SimulationApp(launch_config=CONFIG, experience=f"{os.environ['EXP_PATH']}/omni.isaac.sim.python.kit") # Cannot move before SimApp is launched import utils.misc_utils from utils.misc_utils import * from utils.robot_utils import * from utils.simulation_utils import * from utils.objects_utils import * from utils.environment_utils import * from utils.human_utils import * def monitor_movement(msg, args): global second_start global last_check_time global c_pose global old_pose global rng global env_prim_path wait_time = rospy.Duration(1) index, environment = args[0], args[1] if second_start and rospy.Time.now() > last_check_time + wait_time: last_check_time = rospy.Time.now() diff_x = abs(old_pose[index][0] - c_pose[index][0]) ** 2 diff_y = abs(old_pose[index][1] - c_pose[index][1]) ** 2 diff_z = abs(old_pose[index][2] - c_pose[index][2]) ** 2 dist = (diff_x + diff_y + diff_z) ** 0.5 if (dist) < 0.1: my_pose = PoseStamped() if (rng.uniform() > .9): x, y, z, yaw = position_object(environment, type=0) x = x[0] y = y[0] z = z[0] yaw = yaw[0] + rng.uniform(0, 2 * np.pi) else: yaw = get_robot_yaw(c_pose[index][0], c_pose[index][1], c_pose[index][2], environment.env_mesh, environment.shifts) x = c_pose[index][0] + 0.2 * np.cos(yaw) y = c_pose[index][1] + 0.2 * np.sin(yaw) z = c_pose[index][2] yaw += rng.uniform(0, 2 * np.pi) my_pose.pose.position.x = x my_pose.pose.position.y = y my_pose.pose.position.z = z rot = np.array(yaw) * 180 / np.pi quat = ( Gf.Rotation(Gf.Vec3d.XAxis(), 0) * Gf.Rotation(Gf.Vec3d.YAxis(), 0) * Gf.Rotation(Gf.Vec3d.ZAxis(), rot) ).GetQuat() my_pose.pose.orientation.x = quat.imaginary[0] my_pose.pose.orientation.y = quat.imaginary[1] my_pose.pose.orientation.z = quat.imaginary[2] my_pose.pose.orientation.w = quat.real print( f"Publishing random goal since robot {index} stuck [{x},{y},{z}, {yaw} ({yaw * 180 / 3.14})].") my_pose.header.frame_id = "world" my_pose.header.stamp = rospy.Time.now() movement_monitor_pubs[index].publish(my_pose) if (dist) < 0.05: set_colliders(env_prim_path, True) else: old_pose[index] = c_pose[index] set_colliders(env_prim_path, True) def autostart_exploration(msg, index): global first_start global second_start global can_start global can_change_second_start global last_pub_time if (msg.data == "PUB_FIRST_360"): can_change_second_start = True wait_time = rospy.Duration(0, 500000000) if second_start else rospy.Duration(1) if (msg.data == "WAIT_TRIGGER" or ( msg.data == "PUB_360" and not second_start) and rospy.Time.now() > last_pub_time + wait_time): if can_start: if not first_start: first_start = True elif can_change_second_start: second_start = True print("Exploration will start at the end of this movement") default_pose = PoseStamped() default_pose.header.frame_id = "world" default_pose.header.stamp = rospy.Time.now() start_explorer_pubs[index].publish(default_pose) last_pub_time = rospy.Time.now() def publish_random_goal(msg, args): global last_pub_time global first_start global second_start global can_start global can_change_second_start index, environment = args[0], args[1] if (msg.data == "PUB_FIRST_360"): can_change_second_start = True if (msg.data == "WAIT_TRIGGER" or ( msg.data == "PUB_360" and not second_start) and rospy.Time.now() > last_pub_time + rospy.Duration(0, 500000000)): if can_start: if not first_start: first_start = True elif can_change_second_start: second_start = True my_pose = PoseStamped() x, y, z, yaw = position_object(environment, type=0) my_pose.pose.position.x = x[0] my_pose.pose.position.y = y[0] my_pose.pose.position.z = z[0] rot = np.array(yaw[0]) * 180 / np.pi quat = ( Gf.Rotation(Gf.Vec3d.XAxis(), 0) * Gf.Rotation(Gf.Vec3d.YAxis(), 0) * Gf.Rotation(Gf.Vec3d.ZAxis(), rot) ).GetQuat() my_pose.pose.orientation.x = quat.imaginary[0] my_pose.pose.orientation.y = quat.imaginary[1] my_pose.pose.orientation.z = quat.imaginary[2] my_pose.pose.orientation.w = quat.real print(f"Publishing random goal [{x[0]},{y[0]},{z[0]}, {yaw[0]} ({yaw[0] * 180 / 3.14})] for robot {index}") my_pose.header.frame_id = "fixing_manual" my_pose.header.stamp = rospy.Time.now() send_waypoint_pubs[index].publish(my_pose) last_pub_time = rospy.Time.now() simulation_environment_setup() rospy.init_node("my_isaac_ros_app", anonymous=True, disable_signals=True, log_level=rospy.ERROR) starting_pub = rospy.Publisher('starting_experiment', String) rng = np.random.default_rng() rng_state = np.random.get_state() local_file_prefix = "my-computer://" # setup environment variables environment = environment(config, rng, local_file_prefix) uuid = roslaunch.rlutil.get_or_generate_uuid(None, False) out_dir = os.path.join(config['out_folder'].get(), environment.env_name) out_dir_npy = os.path.join(config['out_folder_npy'].get(), environment.env_name) if not os.path.exists(out_dir): os.makedirs(out_dir) os.environ["ROS_LOG_DIR"] = out_dir roslaunch.configure_logging(uuid) launch_files = ros_launchers_setup(roslaunch, environment.env_limits_shifted, config) parent = roslaunch.parent.ROSLaunchParent(uuid, launch_files, force_log=True) omni.usd.get_context().open_stage(local_file_prefix + config["base_env_path"].get(), None) # Wait two frames so that stage starts loading kit.update() kit.update() print("Loading stage...") while is_stage_loading(): kit.update() print("Loading Complete") context = omni.usd.get_context() stage = context.get_stage() set_stage_up_axis("Z") # do this AFTER loading the world simulation_context = SimulationContext(physics_dt=1.0 / config["physics_hz"].get(), rendering_dt=1.0 / config["render_hz"].get(), stage_units_in_meters=0.01) simulation_context.start_simulation() add_clock() # add ROS clock simulation_context.play() for _ in range(100): omni.kit.commands.execute("RosBridgeTickComponent", path="/ROS_Clock") simulation_context.step() last_pub_time = rospy.Time.now() simulation_context.stop() # fixme IDK why this is necessary sometimes try: parent.start() except: print("Failed to start roslaunch, retry") try: parent.start() except: print("Failed to start roslaunch, exit") exit(1) print("ros node launched") kit.update() meters_per_unit = UsdGeom.GetStageMetersPerUnit(stage) # use rtx while setting up! set_raytracing_settings(config["physics_hz"].get()) env_prim_path = environment.load_and_center(config["env_prim_path"].get()) process_semantics(config["env_prim_path"].get()) randomize_and_fix_lights(config["_random_light"].get(), rng, env_prim_path, environment.env_limits[-1] - 0.2, environment.meters_per_unit, is_rtx=config["rtx_mode"].get()) randomize_roughness(config["_random_roughness"].get(), rng, env_prim_path) # set timeline of the experiment timeline = setup_timeline(config) ros_camera_list = [] ros_transform_components = [] # list of tf and joint components, one (of each) for each robot viewport_window_list = [] dynamic_prims = [] imus_handle_list = [] robot_odom_frames = [] robot_imu_frames = [] camera_pose_frames = [] imu_pubs = [] odom_pubs = [] cam_pose_pubs = [] irotate_cam_odom_pubs = [] irotate_cam_odom_frames = [] irotate_differential_odom_frames = [] lidar_components = [] first = True imu_sensor, imu_props = setup_imu_sensor(config) simulation_context.play() for _ in range(100): omni.kit.commands.execute("RosBridgeTickComponent", path="/ROS_Clock") simulation_context.step() last_pub_time = rospy.Time.now() simulation_context.stop() print("Generating map...") if add_colliders(env_prim_path): simulation_context.play() x, y, z, yaw = position_object(environment, type=3) environment.generate_map(out_dir, origin=[x[0], y[0], 0]) for _ in range(10): simulation_context.step() timeline.set_current_time(0) # set to 0 to be sure that the first frame is recorded else: simulation_context.play() for _ in range(10): simulation_context.step() print("Error generating collisions", file=sys.stderr) simulation_context.play() _dc = dynamic_control_interface() print("Loading robots..") robot_base_prim_path = config["robot_base_prim_path"].get() usd_robot_path = [str(i) for i in config["usd_robot_path"].get()] c_pose = [] old_pose = [] old_h_ap = [] old_v_ap = [] is_irotate = np.array(config["is_iRotate"].get()) for n in range(config["num_robots"].get()): simulation_context.stop() import_robot(robot_base_prim_path, n, usd_robot_path[n], local_file_prefix) if is_irotate[n]: x, y, z, yaw = 0, 0, 0, 0 else: x, y, z, yaw = get_valid_robot_location(environment, first) simulation_context.stop() set_drone_joints_init_loc(f"{robot_base_prim_path}{n}", [x / meters_per_unit, y / meters_per_unit, z / meters_per_unit], [0, 0, yaw], (environment.env_limits[5]) / meters_per_unit, 0.3/meters_per_unit, is_irotate[n]) c_pose.append([x, y, z]) old_pose.append([x, y, z]) kit.update() simulation_context.play() kit.update() add_ros_components(robot_base_prim_path, n, ros_transform_components, ros_camera_list, viewport_window_list, camera_pose_frames, cam_pose_pubs, imus_handle_list, imu_pubs, robot_imu_frames, robot_odom_frames, odom_pubs, dynamic_prims, config, imu_sensor, imu_props, old_h_ap, old_v_ap, is_irotate[n]) if is_irotate[n]: add_irotate_ros_components(irotate_cam_odom_frames, irotate_cam_odom_pubs, lidar_components, robot_base_prim_path, n) irotate_differential_odom_frames.append(robot_odom_frames[-1]) kit.update() first = False for n in range(config["num_robots"].get()): add_npy_viewport(viewport_window_list, robot_base_prim_path, n, old_h_ap, old_v_ap, config, config["num_robots"].get()*1) for _ in range(50): simulation_context.render() print("Loading robot complete") for index, cam in enumerate(viewport_window_list): camera = stage.GetPrimAtPath(cam.get_active_camera()) camera.GetAttribute("horizontalAperture").Set(old_h_ap[index]) camera.GetAttribute("verticalAperture").Set(old_v_ap[index]) # IMU not necessary as it is NOT a ROS component itself for component in ros_camera_list: omni.kit.commands.execute("RosBridgeTickComponent", path=str(component.GetPath())) for component in ros_transform_components: omni.kit.commands.execute("RosBridgeTickComponent", path=str(component.GetPath())) print("Starting FSM - setting up topics...") start_explorer_pubs = [] send_waypoint_pubs = [] movement_monitor_pubs = [] for index, _ in enumerate(robot_odom_frames): print("Waiting for fsm to start for robot {}".format(index)) my_topic = f"{robot_base_prim_path}{index}/exploration_node/fsm_exploration/state" if config["autonomous"].get(): rospy.Subscriber(my_topic, String, callback=autostart_exploration, callback_args=index) start_explorer_pubs.append( rospy.Publisher(f"{robot_base_prim_path}{index}/traj_start_trigger", PoseStamped, queue_size=10)) else: rospy.Subscriber(my_topic, String, callback=publish_random_goal, callback_args=(index, environment)) send_waypoint_pubs.append( rospy.Publisher(f"{robot_base_prim_path}{index}/exploration_node/manual_goal", PoseStamped, queue_size=10)) rospy.Subscriber(my_topic, String, callback=monitor_movement, callback_args=(index, environment)) movement_monitor_pubs.append( rospy.Publisher(f"{robot_base_prim_path}{index}/command/pose", PoseStamped, queue_size=10)) print("fsm management for robot {} setted up".format(index)) print("FSM setted up") print("Loading humans..") my_humans = [] my_humans_heights = [] human_export_folder = config["human_path"].get() human_folders = os.listdir(human_export_folder) tot_area = 0 areas = [] initial_dynamics = len(dynamic_prims) used_ob_stl_paths = [] ## todo cycle to complete area, need to update the service probably n = 0 human_anim_len = [] added_prims = [] human_base_prim_path = config["human_base_prim_path"].get() n_humans_loading = rng.integers(7, 1 + max(7, config["num_humans"].get())) while n < n_humans_loading: anim_len = 0 # the animation needs to be shorter than config["max_anim_len"].get() and longer than 0/min_len while anim_len < max(config["min_human_anim_len"].get(), 0) or anim_len > config["max_human_anim_len"].get(): folder = rng.choice(human_folders) random_name = rng.choice(os.listdir(os.path.join(human_export_folder, folder))) asset_path = local_file_prefix + os.path.join(human_export_folder, folder, random_name, random_name + ".usd") tmp_pkl = pkl.load(open(os.path.join(human_export_folder, folder, random_name, random_name + ".pkl"), 'rb')) anim_len = tmp_pkl['ef'] print("Loading human {} from {}".format(random_name, folder)) used_ob_stl_paths.append(os.path.join(human_export_folder, folder, random_name, random_name + ".stl")) human_anim_len.append(tmp_pkl['ef']) if "verts" in tmp_pkl.keys(): my_humans_heights.append(tmp_pkl['verts'][:, :, 2]) else: my_humans_heights.append(None) my_humans.append(random_name) load_human(human_base_prim_path, n, asset_path, dynamic_prims, added_prims) stl_path = os.path.join(human_export_folder, folder, random_name, random_name + ".stl") this_mesh = mesh.Mesh.from_file(stl_path) areas.append((this_mesh.x.max() - this_mesh.x.min()) * (this_mesh.y.max() - this_mesh.y.min())) tot_area += areas[-1] # if not config["use_area"].get(): n += 1 # if env_area / area_polygon * 100 > config["area_percentage"].get(): # break x, y, z, yaw = position_object(environment, type=1, objects=my_humans, ob_stl_paths=used_ob_stl_paths, max_collisions=int(config["allow_collision"].get())) to_be_removed = [] human_prim_list = [] body_origins = [] for n, human in enumerate(my_humans): if z[n] < 0: to_be_removed.append(n) tot_area -= areas[n] else: set_translate(stage.GetPrimAtPath(f"{human_base_prim_path}{n}"), [x[n] / meters_per_unit, y[n] / meters_per_unit, z[n] / meters_per_unit]) set_rotate(stage.GetPrimAtPath(f"{human_base_prim_path}{n}"), [0, 0, yaw[n]]) human_prim_list.append(f"{human_base_prim_path}{n}") body_origins.append([x[n], y[n], z[n], yaw[n]]) if len(to_be_removed) > 0: print("Removing humans that are out of the environment") to_be_removed.reverse() cumsum = np.cumsum(added_prims) for n in to_be_removed: my_humans.pop(n) used_ob_stl_paths.pop(n) my_humans_heights.pop(n) for _ in range(added_prims[n]): if n > 0: dynamic_prims.pop(cumsum[n - 1] + initial_dynamics) else: dynamic_prims.pop(initial_dynamics) human_anim_len.pop(n) omni.kit.commands.execute("DeletePrimsCommand", paths=[f"{human_base_prim_path}{n}" for n in to_be_removed]) print("Loading human complete") google_ob_used, shapenet_ob_used = load_objects(config, environment, rng, dynamic_prims) # IT IS OF CRUCIAL IMPORTANCE THAT AFTER THIS POINT THE RENDER GETS DONE WITH THE SLEEPING CALL! OTHERWISE PATH TRACING SPP WILL GET RUINED if (config["rtx_mode"].get()): set_raytracing_settings(config["physics_hz"].get()) else: set_pathtracing_settings(config["physics_hz"].get()) omni.usd.get_context().get_selection().set_selected_prim_paths([], False) simulation_context.stop() simulation_context.play() for _ in range(5): simulation_context.step(render=False) sleeping(simulation_context, viewport_window_list, raytracing=config["rtx_mode"].get()) timeline.set_current_time(0) simulation_step = 0 # this is NOT the frame, this is the "step" (related to physics_hz) my_recorder = recorder_setup(config['_recorder_settings'].get(), out_dir_npy, config['record'].get(), config["num_robots"].get() * 1) timeline.set_current_time(0) # set to 0 to be sure that the first frame is recorded timeline.set_auto_update(False) first_start = False second_start = False can_change_second_start = False omni.kit.commands.execute("RosBridgeUseSimTime", use_sim_time=True) omni.kit.commands.execute("RosBridgeUsePhysicsStepSimTime", use_physics_step_sim_time=True) # two times, this will ensure that totalSpp is reached sleeping(simulation_context, viewport_window_list, raytracing=config["rtx_mode"].get()) sleeping(simulation_context, viewport_window_list, raytracing=config["rtx_mode"].get()) last_pub_time = rospy.Time.now() last_check_time = rospy.Time.now() if config['debug_vis'].get(): cnt = 0 while 1: cnt += 1 if cnt % 10000 == 0: import ipdb ipdb.set_trace() print("DEBUGGING VIS") sleeping(simulation_context, viewport_window_list, raytracing=config["rtx_mode"].get()) for i, cam in enumerate(ros_camera_list): omni.kit.commands.execute("RosBridgeTickComponent", path=str(cam.GetPath())) reversing_timeline_ratio = compute_timeline_ratio(human_anim_len, config["reverse_strategy"].get(), experiment_length) print( f"The reversing ratio is {reversing_timeline_ratio}.\n" f"This implies that that every {experiment_length / reversing_timeline_ratio} frames we reverse the animations") cnt_reversal = 1 ratio_camera = config["ratio_camera"].get() ratio_odom = config["ratio_odom"].get() ratio_tf = config["ratio_tf"].get() starting_to_pub = False my_recorder._enable_record = False while kit.is_running(): if can_start: last_check_time = rospy.Time.now() if second_start: if config['record'].get(): sleeping(simulation_context, viewport_window_list, raytracing=config["rtx_mode"].get()) my_recorder._update() sleeping(simulation_context, viewport_window_list, raytracing=config["rtx_mode"].get()) starting_to_pub = True timeline.set_current_time(min(- 1 / (config["physics_hz"].get() / ratio_camera), -abs(config["bootstrap_exploration"].get()))) simulation_step = int(timeline.get_current_time() * config["physics_hz"].get()) - 1 print("Bootstrap started") can_start = False simulation_step += 1 if starting_to_pub and simulation_step == 0: move_humans_to_ground(my_humans_heights, human_prim_list, simulation_step / ratio_camera, meters_per_unit, config["max_distance_human_ground"].get()) print("Starting recording NOW!") msg = String("starting") starting_pub.publish(msg) starting_to_pub = False time.sleep(0.5) if config['record'].get(): my_recorder._enable_record = True last_check_time = rospy.Time.now() if (config["_random_light"].get()["during_experiment"]): if (simulation_step % config["_random_light"].get()["n-frames"] == 0): # fixme todo smooth change, idea get max-min and time window randomize_and_fix_lights(config["_random_light"].get(), rng, env_prim_path, environment.env_limits[-1], environment.meters_per_unit, is_rtx=config["rtx_mode"].get()) # step the physics simulation_context.step(render=False) # get the current time in ROS print("Clocking...") omni.kit.commands.execute("RosBridgeTickComponent", path="/ROS_Clock") time.sleep(0.2) # publish IMU print("Publishing IMU...") pub_imu(imus_handle_list, imu_sensor, imu_pubs, robot_imu_frames, meters_per_unit) # publish joint status (ca 120 Hz) if simulation_step % ratio_tf == 0: print("Publishing joint/tf status...") for component in ros_transform_components: omni.kit.commands.execute("RosBridgeTickComponent", path=str(component.GetPath())) # publish odometry (60 hz) if simulation_step % ratio_odom == 0: print("Publishing odometry...") c_pose, _ = pub_odom(robot_odom_frames, odom_pubs, _dc, meters_per_unit) pub_cam_pose(camera_pose_frames, cam_pose_pubs, _dc, meters_per_unit) _, _ = pub_odom(irotate_cam_odom_frames, irotate_cam_odom_pubs, _dc, meters_per_unit, irotate_differential_odom_frames) for component in lidar_components: omni.kit.commands.execute("RosBridgeTickComponent", path=str(component.GetPath())) # we consider ratio_camera to forward the animation. # If you want it different ratio_animation < ratio_camera to avoid # two frames with the same animation point if second_start: if simulation_step % ratio_camera == 0: if my_recorder._enable_record: # update the image counter externally so that we can use it in the recorder and all images have the same index my_recorder._counter += 1 if simulation_step / ratio_camera < (experiment_length / reversing_timeline_ratio) * ( cnt_reversal): timeline.forward_one_frame() else: if simulation_step / ratio_camera >= ((experiment_length - 1) / reversing_timeline_ratio) * ( cnt_reversal + 1) or \ (timeline.get_current_time() - 1 / timeline.get_time_codes_per_seconds()) < 0: cnt_reversal += 2 timeline.forward_one_frame() else: timeline.rewind_one_frame() # publish camera (30 hz) if simulation_step % ratio_camera == 0: print("Publishing cameras...") # getting skel pose for each joint # get_skeleton_info(meters_per_unit, body_origins, body_list) # FIRST ONE WRITTEN IS AT 1/30 on the timeline pub_and_write_images(simulation_context, viewport_window_list, ros_camera_list, config["rtx_mode"].get(), my_recorder, second_start) if simulation_step % ratio_camera == 0 and simulation_step / ratio_camera == experiment_length \ and not config["neverending"].get(): print("End of experiment!!!") simulation_context.pause() if my_recorder.data_writer is not None: my_recorder.data_writer.stop_threads() timeline.set_current_time(0) context.save_as_stage(os.path.join(out_dir, "loaded_stage.usd")) experiment_info = {} experiment_info["config"] = config experiment_info["reversing_timeline_ratio"] = reversing_timeline_ratio experiment_info["humans"] = {} experiment_info["humans"]["ids"] = my_humans experiment_info["humans"]["folders"] = used_ob_stl_paths experiment_info["humans"]["origins"] = body_origins # x y z yaw experiment_info["google_obs"] = google_ob_used experiment_info["shapenet_obs"] = shapenet_ob_used experiment_info["environment"] = {} experiment_info["environment"]["id"] = environment.env_name experiment_info["environment"]["folder"] = environment.env_path experiment_info["environment"]["shifts"] = environment.shifts experiment_info["rng_state"] = rng_state np.save(os.path.join(out_dir, "experiment_info.npy"), experiment_info) break except: extype, value, tb = sys.exc_info() traceback.print_exc() # ipdb.post_mortem(tb) finally: for pub in odom_pubs: pub.unregister() for pub in imu_pubs: pub.unregister() for pub in cam_pose_pubs: pub.unregister() for pub in start_explorer_pubs: pub.unregister() for pub in send_waypoint_pubs: pub.unregister() parent.shutdown() rospy.signal_shutdown("my_simulation complete") simulation_context.stop() try: kit.close() except: pass
26,681
Python
39.550152
144
0.639331
eliabntt/GRADE-RR/simulator/first_run.py
import argparse base_environment_path = "" # please edit this e.g. GRADE-RR/usds/env_base.usd # necessary import from omni.isaac.kit import SimulationApp # simply use this to correctly parse booleans def boolean_string(s): if s.lower() not in {'false', 'true'}: raise ValueError('Not a valid boolean string') return s.lower() == 'true' parser = argparse.ArgumentParser(description="Your first IsaacSim run") parser.add_argument("--headless", type=boolean_string, default=True, help="Wheter to run it in headless mode or not") parser.add_argument("--rtx_mode", type=boolean_string, default=False, help="Use rtx when True, use path tracing when False") args, unknown = parser.parse_known_args() config = confuse.Configuration("first_run", __name__) config.set_args(args) # create a kit object which is your Simulation App CONFIG = {"display_options": 3286, "width": 1280, "height": 720, "headless": config["headless"].get()} kit = SimulationApp(launch_config=CONFIG, experience=f"{os.environ['EXP_PATH']}/omni.isaac.sim.python.kit") # !!! you can ONLY load Isaac modules AFTER this point !!! # after here you can do everything that you desire # first step is usually opening a basic stage, perhaps with some assets already in as the sky omni.usd.get_context().open_stage(base_environment_path, None) # Wait two frames so that stage starts loading kit.update() kit.update() print("Loading stage...") while is_stage_loading(): kit.update() print("Loading Complete") context = omni.usd.get_context() stage = context.get_stage() # used to access the elements of the simulation simulation_context = SimulationContext(physics_dt=1.0 / 60, rendering_dt=1.0 / 60, stage_units_in_meters=0.01, backend='torch') simulation_context.initialize_physics() physx_interface = omni.physx.acquire_physx_interface() physx_interface.start_simulation() for _ in range(100): simulation_context.render() simulation_context.step(render=False) try: kit.close() except: pass
1,978
Python
33.719298
127
0.73913
eliabntt/GRADE-RR/simulator/savana_simulation.py
import carb import rospy from omni.isaac.kit import SimulationApp import argparse import os import time import numpy as np import roslaunch from time import sleep import yaml import confuse import ipdb, traceback, sys def boolean_string(s): if s.lower() not in {'false', 'true'}: raise ValueError('Not a valid boolean string') return s.lower() == 'true' try: parser = argparse.ArgumentParser(description="Dynamic Worlds Simulator") parser.add_argument("--config_file", type=str, default="config.yaml") parser.add_argument("--headless", type=boolean_string, default=True, help="Wheter to run it in headless mode or not") parser.add_argument("--rtx_mode", type=boolean_string, default=False, help="Use rtx when True, use path tracing when False") parser.add_argument("--record", type=boolean_string, default=True, help="Writing data to the disk") parser.add_argument("--debug_vis", type=boolean_string, default=False, help="When true continuosly loop the rendering") parser.add_argument("--neverending", type=boolean_string, default=False, help="Never stop the main loop") parser.add_argument("--fix_env", type=str, default="", help="leave it empty to have a random env, fix it to use a fixed one. Useful for loop processing") args, unknown = parser.parse_known_args() config = confuse.Configuration("DynamicWorlds", __name__) config.set_file(args.config_file) config.set_args(args) can_start = True CONFIG = {"display_options": 3286, "width": 1280, "height": 720, "headless": config["headless"].get()} kit = SimulationApp(launch_config=CONFIG, experience=f"{os.environ['EXP_PATH']}/omni.isaac.sim.python.kit") # Cannot move before SimApp is launched import utils.misc_utils from utils.misc_utils import * from utils.robot_utils import * from utils.simulation_utils import * from utils.environment_utils import * simulation_environment_setup() # set timeline of the experiment timeline = setup_timeline(config) rospy.init_node("my_isaac_ros_app", anonymous=True, disable_signals=True, log_level=rospy.ERROR) starting_pub = rospy.Publisher('starting_experiment', String) rng = np.random.default_rng() rng_state = np.random.get_state() local_file_prefix = "" # setup environment variables meters_per_unit = config["meters_per_unit"].get() environment = environment(config, rng, local_file_prefix, meters_per_unit) out_dir = os.path.join(config['out_folder'].get(), environment.env_name) out_dir_npy = os.path.join(config['out_folder_npy'].get(), environment.env_name) if not os.path.exists(out_dir): os.makedirs(out_dir) os.environ["ROS_LOG_DIR"] = out_dir omni.usd.get_context().open_stage(local_file_prefix + config["base_env_path"].get(), None) # Wait two frames so that stage starts loading kit.update() kit.update() print("Loading stage...") while is_stage_loading(): kit.update() print("Loading Complete") context = omni.usd.get_context() stage = context.get_stage() set_stage_up_axis("Z") if config["clean_base_env"].get(): omni.kit.commands.execute("DeletePrimsCommand", paths=["/World/GroundPlane"]) # do this AFTER loading the world simulation_context = SimulationContext(physics_dt=1.0 / config["physics_hz"].get(), rendering_dt=1.0 / config["render_hz"].get(), stage_units_in_meters=0.01) simulation_context.initialize_physics() physx_interface = omni.physx.acquire_physx_interface() physx_interface.start_simulation() _clock_graph = add_clock() # add ROS clock simulation_context.play() for _ in range(10): simulation_context.step() og.Controller.evaluate_sync(_clock_graph) last_pub_time = rospy.Time.now() simulation_context.stop() kit.update() # use rtx while setting up! set_raytracing_settings(config["physics_hz"].get()) env_prim_path = environment.load_and_center(config["env_prim_path"].get()) process_semantics(config["env_prim_path"].get()) ros_camera_list = [] ros_transform_components = [] # list of tf and joint components, one (of each) for each robot viewport_window_list = [] dynamic_prims = [] imus_handle_list = [] robot_odom_frames = [] robot_imu_frames = [] camera_pose_frames = [] imu_pubs = [] odom_pubs = [] cam_pose_pubs = [] simulation_context.play() for _ in range(100): og.Controller.evaluate_sync(_clock_graph) simulation_context.step() print("Loading robots..") from omni.isaac.sensor import _sensor _is = _sensor.acquire_imu_sensor_interface() _dc = dynamic_control_interface() robot_base_prim_path = config["robot_base_prim_path"].get() usd_robot_path = str(config["usd_robot_path"].get()) old_h_ap = [] old_v_ap = [] robot_init_loc = [] robot_init_ang = [] simulation_context.stop() for n in range(config["num_robots"].get()): import_robot(robot_base_prim_path, n, usd_robot_path, local_file_prefix) if config["init_loc"].get()["use"]: # assuming we go here x = config["init_loc"].get()["x"][n] y = config["init_loc"].get()["y"][n] z = config["init_loc"].get()["z"][n] yaw = np.deg2rad(config["init_loc"].get()["yaw"][n]) roll = np.deg2rad(config["init_loc"].get()["roll"][n]) pitch = np.deg2rad(config["init_loc"].get()["pitch"][n]) robot_init_loc.append([x,y,z]) robot_init_ang.append([roll, pitch, yaw]) set_drone_joints_init_loc(f"{robot_base_prim_path}{n}", [x / meters_per_unit, y / meters_per_unit, z / meters_per_unit], [roll, pitch, yaw], (environment.env_limits[5]) / meters_per_unit) add_ros_components(robot_base_prim_path, n, ros_transform_components, ros_camera_list, viewport_window_list, camera_pose_frames, cam_pose_pubs, imu_pubs, robot_imu_frames, robot_odom_frames, odom_pubs, None, #lidars = None dynamic_prims, config, old_h_ap, old_v_ap, _is, simulation_context, _clock_graph) kit.update() if config["use_robot_traj"].get(): add_robot_traj(f"{robot_base_prim_path}{n}",config,meters_per_unit,timeline.get_time_codes_per_seconds()) for n in range(config["num_robots"].get()): add_npy_viewport(viewport_window_list, robot_base_prim_path, n, old_h_ap, old_v_ap, config, simulation_context, config["num_robots"].get()) for _ in range(50): simulation_context.render() print("Loading robot complete") print("WARNING: CAMERA APERTURE MANUAL SET NO LONGER WORKS, NEEDS TO BE FIXED BY NVIDIA!!!!") time.sleep(5) for index, cam in enumerate(viewport_window_list): camera = stage.GetPrimAtPath(cam.get_active_camera()) camera.GetAttribute("horizontalAperture").Set(old_h_ap[index]) camera.GetAttribute("verticalAperture").Set(old_v_ap[index]) # IT IS OF CRUCIAL IMPORTANCE THAT AFTER THIS POINT THE RENDER GETS DONE WITH THE SLEEPING CALL! OTHERWISE PATH TRACING SPP WILL GET RUINED if (config["rtx_mode"].get()): set_raytracing_settings(config["physics_hz"].get()) else: set_pathtracing_settings(config["physics_hz"].get()) omni.usd.get_context().get_selection().set_selected_prim_paths([], False) simulation_context.stop() simulation_context.play() for _ in range(5): simulation_context.step(render=False) sleeping(simulation_context, viewport_window_list, config["rtx_mode"].get()) timeline.set_current_time(0) simulation_step = 0 # this is NOT the frame, this is the "step" (related to physics_hz) my_recorder = recorder_setup(config['_recorder_settings'].get(), out_dir_npy, config['record'].get()) timeline.set_current_time(0) # set to 0 to be sure that the first frame is recorded timeline.set_auto_update(False) # two times, this will ensure that totalSpp is reached sleeping(simulation_context, viewport_window_list, config["rtx_mode"].get()) sleeping(simulation_context, viewport_window_list, config["rtx_mode"].get()) if config['debug_vis'].get(): cnt = 0 while 1: cnt += 1 if cnt % 10000 == 0: import ipdb ipdb.set_trace() print("DEBUGGING VIS") simulation_context.step(render=False) simulation_context.step(render=True) sleeping(simulation_context, viewport_window_list, config["rtx_mode"].get()) for i, cam in enumerate(ros_camera_list): omni.kit.commands.execute("RosBridgeTickComponent", path=str(cam.GetPath())) ratio_camera = config["ratio_camera"].get() ratio_odom = config["ratio_odom"].get() ratio_tf = config["ratio_tf"].get() starting_to_pub = False my_recorder._enable_record = False forward = True goal_list = [] exp_len = config["anim_exp_len"].get() if not config["use_robot_traj"].get() and config["use_joint_traj"].get(): for elem in config["robot_traj"].get(): goal_list.append([elem["pose"]["x"], elem["pose"]["y"], elem["pose"]["z"], elem["pose"]["roll"], elem["pose"]["pitch"], elem["pose"]["yaw"]]) while kit.is_running(): if can_start: if config['record'].get(): # reload_references("/World/home") sleeping(simulation_context, viewport_window_list, config["rtx_mode"].get()) my_recorder._update() sleeping(simulation_context, viewport_window_list, config["rtx_mode"].get()) starting_to_pub = True timeline.set_current_time(min(- 1 / (config["physics_hz"].get() / ratio_camera), -abs(config["bootstrap_exploration"].get()))) simulation_step = int(timeline.get_current_time() * config["physics_hz"].get()) - 1 print("Bootstrap started") can_start = False simulation_step += 1 if starting_to_pub and simulation_step == 0: print("Starting recording NOW!") msg = String("starting") starting_pub.publish(msg) time.sleep(0.5) starting_to_pub = False if config['record'].get(): my_recorder._enable_record = True # step the physics simulation_context.step(render=False) # get the current time in ROS print("Clocking...") og.Controller.evaluate_sync(_clock_graph) ctime = timeline.get_current_time() simulation_context.render() timeline.set_current_time(ctime) # publish IMU print("Publishing IMU...") pub_imu(_is, imu_pubs, robot_imu_frames, meters_per_unit) # publish joint status (ca 120 Hz) if simulation_step % ratio_tf == 0: print("Publishing joint/tf status...") for component in ros_transform_components: og.Controller.set(og.Controller.attribute(f"{component}/OnImpulseEvent.state:enableImpulse"), True) # publish odometry (60 hz) if simulation_step % ratio_odom == 0: print("Publishing odometry...") c_pose, c_angle = pub_odom(robot_odom_frames, odom_pubs, _dc, meters_per_unit) pub_cam_pose(camera_pose_frames, cam_pose_pubs, _dc, meters_per_unit) if config["use_joint_traj"].get(): if len(goal_list)>0 and simulation_step >= 0: # this needs to be expanded to multiple robots goal_list = check_pose_and_goals(robot_init_loc[0], robot_init_ang[0], c_pose[0], c_angle[0], "/my_robot_0", goal_list, meters_per_unit, simulation_step == 0) if len(goal_list)==0: break # we consider ratio_camera to forward the animation. # If you want it different ratio_animation < ratio_camera to avoid # two frames with the same animation point if simulation_step % ratio_camera == 0: if my_recorder._enable_record: # update the image counter externally so that we can use it in the recorder and all images have the same index my_recorder._counter += 1 if (simulation_step > 0 and (simulation_step / ratio_camera + 1) % exp_len == 0): forward = not forward if (timeline.get_current_time() - 1 / timeline.get_time_codes_per_seconds()<0): forward = True if forward: timeline.forward_one_frame() else: timeline.rewind_one_frame() # publish camera (30 hz) if simulation_step % ratio_camera == 0: ctime = timeline.get_current_time() print("Publishing cameras...") pub_and_write_images(simulation_context, viewport_window_list, ros_camera_list, config["rtx_mode"].get(), my_recorder) timeline.set_current_time(ctime) except: extype, value, tb = sys.exc_info() traceback.print_exc() # ipdb.post_mortem(tb) finally: for pub in odom_pubs: pub.unregister() for pub in imu_pubs: pub.unregister() for pub in cam_pose_pubs: pub.unregister() parent.shutdown() rospy.signal_shutdown("my_simulation complete") simulation_context.stop() try: kit.close() except: pass
13,742
Python
39.780415
174
0.613375
eliabntt/GRADE-RR/simulator/utils/robot_utils.py
import utils.misc_utils from omni.isaac.core.utils.prims import set_targets from scipy.spatial.transform import Rotation from utils.misc_utils import * from omni.isaac.core.utils.render_product import create_hydra_texture def create_odom_message(_dc, robot_body_ptr, handle, meters_per_unit): """ Create odometry message for the robot_body_ptr. Converts the readings from the IsaacSim unit to the mps when necessary Gets the current rostime header frame us "WORLD" and the child frame is from the "handle" """ lin_vel = _dc.get_rigid_body_local_linear_velocity(robot_body_ptr) ang_vel = _dc.get_rigid_body_angular_velocity(robot_body_ptr) pose = _dc.get_rigid_body_pose(robot_body_ptr) odom_msg = Odometry() odom_msg.header.frame_id = "world" odom_msg.header.stamp = rospy.Time.now() odom_msg.child_frame_id = handle[1:] if handle.startswith("/") else handle odom_msg.pose.pose.position.x = pose.p.x * meters_per_unit odom_msg.pose.pose.position.y = pose.p.y * meters_per_unit odom_msg.pose.pose.position.z = pose.p.z * meters_per_unit odom_msg.pose.pose.orientation.x = pose.r.x odom_msg.pose.pose.orientation.y = pose.r.y odom_msg.pose.pose.orientation.z = pose.r.z odom_msg.pose.pose.orientation.w = pose.r.w odom_msg.twist.twist.linear.x = lin_vel.x * meters_per_unit odom_msg.twist.twist.linear.y = lin_vel.y * meters_per_unit odom_msg.twist.twist.linear.z = lin_vel.z * meters_per_unit odom_msg.twist.twist.angular.x = ang_vel.x odom_msg.twist.twist.angular.y = ang_vel.y odom_msg.twist.twist.angular.z = ang_vel.z p_cov = np.array([0.0] * 36).reshape(6, 6) p_cov[0:2, 0:2] = 0.00 p_cov[5, 5] = 0.00 odom_msg.pose.covariance = tuple(p_cov.ravel().tolist()) odom_msg.twist.covariance = tuple(p_cov.ravel().tolist()) return odom_msg def create_diff_odom_message(_dc, robot_body_ptr, handle, meters_per_unit, base_body_ptr, base_handle): """ Create odometry message for the robot_body_ptr. Converts the readings from the IsaacSim unit to the mps when necessary Gets the current rostime header frame us "WORLD" and the child frame is from the "handle" """ lin_vel = _dc.get_rigid_body_local_linear_velocity(robot_body_ptr) ang_vel = _dc.get_rigid_body_angular_velocity(robot_body_ptr) pose = _dc.get_rigid_body_pose(robot_body_ptr) base_lin_vel = _dc.get_rigid_body_local_linear_velocity(base_body_ptr) base_ang_vel = _dc.get_rigid_body_angular_velocity(base_body_ptr) base_pose = _dc.get_rigid_body_pose(base_body_ptr) odom_msg = Odometry() odom_msg.header.frame_id = base_handle odom_msg.header.stamp = rospy.Time.now() odom_msg.child_frame_id = handle[1:] if handle.startswith("/") else handle odom_msg.pose.pose.position.x = (pose.p.x - base_pose.p.x) * meters_per_unit odom_msg.pose.pose.position.y = (pose.p.y - base_pose.p.y) * meters_per_unit odom_msg.pose.pose.position.z = (pose.p.z - base_pose.p.z) * meters_per_unit q1 = Quaternion(base_pose.r.w, base_pose.r.x, base_pose.r.y, base_pose.r.z) q2 = Quaternion(pose.r.w, pose.r.x, pose.r.y, pose.r.z) q = q1.conjugate * q2 odom_msg.pose.pose.orientation.x = q.x odom_msg.pose.pose.orientation.y = q.y odom_msg.pose.pose.orientation.z = q.z odom_msg.pose.pose.orientation.w = q.w odom_msg.twist.twist.linear.x = (lin_vel.x - base_lin_vel.x) * meters_per_unit odom_msg.twist.twist.linear.y = (lin_vel.y - base_lin_vel.y) * meters_per_unit odom_msg.twist.twist.linear.z = (lin_vel.z - base_lin_vel.z) * meters_per_unit odom_msg.twist.twist.angular.x = (ang_vel.x - base_ang_vel.x) odom_msg.twist.twist.angular.y = (ang_vel.y - base_ang_vel.y) odom_msg.twist.twist.angular.z = (ang_vel.z - base_ang_vel.z) p_cov = np.array([0.0] * 36).reshape(6, 6) p_cov[0:2, 0:2] = 0.00 p_cov[5, 5] = 0.00 odom_msg.pose.covariance = tuple(p_cov.ravel().tolist()) odom_msg.twist.covariance = tuple(p_cov.ravel().tolist()) return odom_msg def create_camera_pose_message(_dc, camera_body_ptr, handle, meters_per_unit): """ Similar to the odom, but it's just for a pose message, in this case for the camera """ pose = _dc.get_rigid_body_pose(camera_body_ptr) camera_pose = PoseStamped() camera_pose.header.frame_id = "world" camera_pose.header.stamp = rospy.Time.now() camera_pose.pose.position.x = pose.p.x * meters_per_unit camera_pose.pose.position.y = pose.p.y * meters_per_unit camera_pose.pose.position.z = pose.p.z * meters_per_unit camera_pose.pose.orientation.x = pose.r.x camera_pose.pose.orientation.y = pose.r.y camera_pose.pose.orientation.z = pose.r.z camera_pose.pose.orientation.w = pose.r.w return camera_pose def add_pose_tree(path: str, irotate: bool=False): """ Add the tf publisher to the desired path. This path should be the robot itself. Each robot has a pose tree. """ if path.startswith("/"): path = path[1:] og.Controller.edit( {"graph_path": f"/{path}/TFActionGraph", "evaluator_name": "execution"}, { og.Controller.Keys.CREATE_NODES: [ ("ReadSimTime", "omni.isaac.core_nodes.IsaacReadSimulationTime"), ("OnImpulseEvent", "omni.graph.action.OnImpulseEvent"), ("PublishTF", "omni.isaac.ros_bridge.ROS1PublishTransformTree"), ], og.Controller.Keys.CONNECT: [ ("OnImpulseEvent.outputs:execOut", "PublishTF.inputs:execIn"), ("ReadSimTime.outputs:simulationTime", "PublishTF.inputs:timeStamp"), ], og.Controller.Keys.SET_VALUES: [ ("PublishTF.inputs:nodeNamespace", f"/{path}"), ] }, ) # fixme if irotate: omni.kit.commands.execute('ChangeProperty', prop_path=Sdf.Path('/my_robot_0/ROS_PoseTree.poseTreePubTopic'), value='/tf2', prev='/tf') set_target_prims(primPath=f"/{path}/TFActionGraph/PublishTF", inputName="inputs:targetPrims", targetPrimPaths=[f"/{path}"]) return f"/{path}/TFActionGraph" def add_camera_and_viewport(path: str, resolution: list, old_h_ape, old_v_ape, sc, index=0, robot_index=0, cam_per_robot=1, camera_path="Camera"): """ The function create first the ROSBridge Camera and then the corresponding viewport. index is the number of the camera for the given robot. headless is a boolean that indicates if the simulation is headless or not (i.e. create a visual viewport or not). robot_index correspond to the n-th robot in the scene. """ resolution = tuple(resolution) camera_path = path + f"/{camera_path}" index = robot_index * cam_per_robot + index stage = omni.usd.get_context().get_stage() camera = stage.GetPrimAtPath(camera_path) old_h_ape.append(camera.GetAttribute("horizontalAperture").Get()) old_v_ape.append(camera.GetAttribute("verticalAperture").Get()) viewport_name = "Viewport" + (f" {index + 1}" if str(index + 1) != "0" and str(index + 1) != "1" else "") sc.step() keys = og.Controller.Keys (camera_graph, _, _, _) = og.Controller.edit( { "graph_path": f"{path}/ROSCamera_{index}_Graph", "evaluator_name": "push", "pipeline_stage": og.GraphPipelineStage.GRAPH_PIPELINE_STAGE_ONDEMAND, }, { keys.CREATE_NODES: [ ("OnTick", "omni.graph.action.OnTick"), ("createViewport", "omni.isaac.core_nodes.IsaacCreateViewport"), ("setViewportResolution", "omni.isaac.core_nodes.IsaacSetViewportResolution"), ("getRenderProduct", "omni.isaac.core_nodes.IsaacGetViewportRenderProduct"), ("setCamera", "omni.isaac.core_nodes.IsaacSetCameraOnRenderProduct"), ("cameraHelperRgb", "omni.isaac.ros_bridge.ROS1CameraHelper"), ("cameraHelperInfo", "omni.isaac.ros_bridge.ROS1CameraHelper"), ("cameraHelperDepth", "omni.isaac.ros_bridge.ROS1CameraHelper"), ], keys.CONNECT: [ ("OnTick.outputs:tick", "createViewport.inputs:execIn"), ("createViewport.outputs:execOut", "getRenderProduct.inputs:execIn"), ("createViewport.outputs:viewport", "getRenderProduct.inputs:viewport"), ("createViewport.outputs:execOut", "setViewportResolution.inputs:execIn"), ("createViewport.outputs:viewport", "setViewportResolution.inputs:viewport"), ("getRenderProduct.outputs:execOut", "setCamera.inputs:execIn"), ("getRenderProduct.outputs:renderProductPath", "setCamera.inputs:renderProductPath"), ("setCamera.outputs:execOut", "cameraHelperRgb.inputs:execIn"), ("setCamera.outputs:execOut", "cameraHelperInfo.inputs:execIn"), ("setCamera.outputs:execOut", "cameraHelperDepth.inputs:execIn"), ("getRenderProduct.outputs:renderProductPath", "cameraHelperRgb.inputs:renderProductPath"), ("getRenderProduct.outputs:renderProductPath", "cameraHelperInfo.inputs:renderProductPath"), ("getRenderProduct.outputs:renderProductPath", "cameraHelperDepth.inputs:renderProductPath"), ], og.Controller.Keys.SET_VALUES: [ ("createViewport.inputs:viewportId", index), ("setViewportResolution.inputs:height", int(resolution[1])), ("setViewportResolution.inputs:width", int(resolution[0])), ("cameraHelperRgb.inputs:frameId", path[1:]), ("cameraHelperRgb.inputs:topicName", path + f"/{index}/rgb/image_raw"), ("cameraHelperRgb.inputs:type", "rgb"), ("cameraHelperDepth.inputs:frameId", path[1:]), ("cameraHelperDepth.inputs:topicName", path + f"/{index}/depth/image_raw"), ("cameraHelperDepth.inputs:type", "depth"), ("cameraHelperInfo.inputs:frameId", path[1:]), ("cameraHelperInfo.inputs:topicName", path + f"/{index}/camera_info"), ("cameraHelperInfo.inputs:type", "camera_info"), ], }, ) set_targets( prim=omni.usd.get_context().get_stage().GetPrimAtPath(f"{path}/ROSCamera_{index}_Graph/setCamera"), attribute="inputs:cameraPrim", target_prim_paths=[camera_path], ) og.Controller.evaluate_sync(camera_graph) for _ in range(5): sc.step() omni.kit.app.get_app().update() viewport_handle = [x for x in omni.kit.viewport.window.get_viewport_window_instances()][-1].viewport_api viewport_handle.set_texture_resolution((resolution[0], resolution[1])) for _ in range(5): sc.step() omni.kit.app.get_app().update() return camera_graph.get_path_to_graph(), viewport_handle def add_joint_state(path: str): if path.startswith("/"): path = path[1:] og.Controller.edit( {"graph_path": f"/{path}/JointActionGraph", "evaluator_name": "execution"}, { og.Controller.Keys.CREATE_NODES: [ ("ReadSimTime", "omni.isaac.core_nodes.IsaacReadSimulationTime"), ("OnImpulseEvent", "omni.graph.action.OnImpulseEvent"), ("PublishJointState", "omni.isaac.ros_bridge.ROS1PublishJointState"), ("SubscribeJointState", "omni.isaac.ros_bridge.ROS1SubscribeJointState"), ("ArticulationController", "omni.isaac.core_nodes.IsaacArticulationController"), ], og.Controller.Keys.CONNECT: [ ("OnImpulseEvent.outputs:execOut", "PublishJointState.inputs:execIn"), ("OnImpulseEvent.outputs:execOut", "SubscribeJointState.inputs:execIn"), ("OnImpulseEvent.outputs:execOut", "ArticulationController.inputs:execIn"), ("ReadSimTime.outputs:simulationTime", "PublishJointState.inputs:timeStamp"), ("SubscribeJointState.outputs:jointNames", "ArticulationController.inputs:jointNames"), ("SubscribeJointState.outputs:positionCommand", "ArticulationController.inputs:positionCommand"), ("SubscribeJointState.outputs:velocityCommand", "ArticulationController.inputs:velocityCommand"), ("SubscribeJointState.outputs:effortCommand", "ArticulationController.inputs:effortCommand"), ], og.Controller.Keys.SET_VALUES: [ # Providing path to Articulation Controller node # Providing the robot path is equivalent to setting the targetPrim in Articulation Controller node ("ArticulationController.inputs:usePath", True), ("ArticulationController.inputs:robotPath", "/" + path), # Assigning topic names to clock publishers ("PublishJointState.inputs:topicName", "/" + path + "/joint_states"), ("SubscribeJointState.inputs:topicName", "/" + path + "/joint_commands"), ], }, ) # set_target_prims(primPath=f"/{path}/JointActionGraph/SubscribeJointState", targetPrimPaths=[f"/{path}"]) set_target_prims(primPath=f"/{path}/JointActionGraph/PublishJointState", targetPrimPaths=[f"/{path}"]) return f"/{path}/JointActionGraph" def add_clock(): (_clock_graph, _, _, _) = og.Controller.edit( {"graph_path": "/ClockActionGraph", "evaluator_name": "push", "pipeline_stage": og.GraphPipelineStage.GRAPH_PIPELINE_STAGE_ONDEMAND, }, { og.Controller.Keys.CREATE_NODES: [ ("ReadSimTime", "omni.isaac.core_nodes.IsaacReadSimulationTime"), ("OnTick", "omni.graph.action.OnTick"), ("PublishManualClock", "omni.isaac.ros_bridge.ROS1PublishClock"), ], og.Controller.Keys.CONNECT: [ # Connecting execution of OnImpulseEvent node to PublishManualClock so it will only publish when an impulse event is triggered ("OnTick.outputs:tick", "PublishManualClock.inputs:execIn"), # Connecting simulationTime data of ReadSimTime to the clock publisher nodes ("ReadSimTime.outputs:simulationTime", "PublishManualClock.inputs:timeStamp"), ], og.Controller.Keys.SET_VALUES: [ # Assigning topic names to clock publishers ("PublishManualClock.inputs:topicName", "/clock"), ], }, ) return _clock_graph def get_robot_yaw(x, y, z, env_mesh, shifts): """ Checks the best robot yaw angle for the given position. Cast rays from the robot position to the environment mesh and returns the angle It considers 36 rays. For each ray we compute the distance to the nearest point on the mesh. If the distance is infinite, it gets interpolated. We compute a rolling window sum (with a window size of 4 rays) of the distances. Return the best yaw angle in RADIANS. """ checking_steps = 36 angles = [[np.cos(np.pi * 2.0 / checking_steps * c_step), np.sin(np.pi * 2.0 / checking_steps * c_step), 0] for c_step in range(checking_steps)] positions = [[x + shifts[0], y + shifts[1], z + shifts[2]] for _ in range(checking_steps)] checking_rays = trimesh.proximity.longest_ray(env_mesh, positions, angles) checking_rays[checking_rays < 0] = 0 nans, x = inf_helper(checking_rays) checking_rays[nans] = np.interp(x(nans), x(~nans), checking_rays[~nans]) checking_rays[checking_rays > 8] = 8 rolling_rays = int(40 / (360 / checking_steps)) checking_rays = np.append(checking_rays, checking_rays[:rolling_rays - 1]) checking_rays = np.convolve(checking_rays, np.ones(rolling_rays, dtype=int), 'valid') / rolling_rays return (np.argmax(checking_rays) + rolling_rays / 2) * 2 * np.pi / checking_steps def get_vp_list(): from omni.kit.viewport.window import get_viewport_window_instances return [x for x in get_viewport_window_instances()] def create_viewport(camera_path, is_headless, index, resolution, old_h_ape, old_v_ape, sc): """ The function create the viewport for the given camera. Creates an handle, a viewport and the window position/size if the system is not headless. """ stage = omni.usd.get_context().get_stage() camera = stage.GetPrimAtPath(camera_path) old_h_ape.append(camera.GetAttribute("horizontalAperture").Get()) old_v_ape.append(camera.GetAttribute("verticalAperture").Get()) index += 1 # omniverse starts from 1 viewport_name = "Viewport" + (f" {index}" if str(index) != "0" and str(index) != "1" else "") viewport = omni.kit.viewport.utility.get_active_viewport_window(window_name=viewport_name) viewport_handle = omni.kit.viewport.utility.get_viewport_from_window_name(viewport_name) if not viewport_handle: viewport = omni.kit.viewport.utility.create_viewport_window(name=viewport_name) viewport_handle = omni.kit.viewport.utility.get_viewport_from_window_name(viewport.name) if not is_headless: viewport.setPosition(1000, 400) viewport.height, viewport.width = 300, 300 viewport_handle.set_active_camera(camera_path) for _ in range(10): sc.step() viewport_handle.set_texture_resolution((resolution[0], resolution[1])) sc.step() return viewport_handle, viewport.name def ros_launchers_setup(roslaunch, env_limits_shifted, config): """ Setup the ros launchers for the simulation. We need an exploration manager for every robot, and a collision checking service to place the objects. """ roslaunch_files = [] roslaunch_args = [] launch_files = [] print("launching ros nodes...") if not config["only_placement"].get(): for i in range(config["num_robots"].get()): # TODO hack to be compatible with the old version if type(config["is_iRotate"].get()) == list: is_irotate = config["is_iRotate"].get()[i] else: is_irotate = config["is_iRotate"].get() if not is_irotate: cli_args1 = ["exploration_manager", "my_exploration.launch", # cli_args1 = ["/home/ebonetto/catkin_ws/src/FUEL/fuel_planner/exploration_manager/launch/my_exploration.launch", "box_min_x:={:.4f}".format(env_limits_shifted[0] - 0.2), "box_min_y:={:.4f}".format(env_limits_shifted[1] - 0.2), "box_min_z:={:.4f}".format(env_limits_shifted[2]), "box_max_x:={:.4f}".format(env_limits_shifted[3] + 0.2), "box_max_y:={:.4f}".format(env_limits_shifted[4] + 0.2), "box_max_z:={:.4f}".format(min(3, env_limits_shifted[5] - 0.1)), f"mav_name:={config['robot_base_prim_path'].get()}{i}"] roslaunch_files.append(roslaunch.rlutil.resolve_launch_arguments(cli_args1)[0]) roslaunch_args.append(cli_args1[2:]) launch_files.append((roslaunch_files[-1], roslaunch_args[-1])) else: cli_args1 = ["custom_joint_controller_ros_irotate", "publish_joint_commands_node.launch", "position_limit_x:={:.4f}".format(env_limits_shifted[3] + 0.2), "position_limit_y:={:.4f}".format(env_limits_shifted[4] + 0.2), "position_limit_z:={:.4f}".format(3), "robot_id:=1", "frame_id:='base'"] roslaunch_files.append(roslaunch.rlutil.resolve_launch_arguments(cli_args1)[0]) roslaunch_args.append(cli_args1[2:]) launch_files.append((roslaunch_files[-1], roslaunch_args[-1])) # TODO hack because we pre-cache the robot mesh if type(config["robot_mesh_path"].get()) == list: mesh_path = config["robot_mesh_path"].get()[0] else: mesh_path = config["robot_mesh_path"].get() cli_args2 = ["collision_check", "collision_check.launch", "robot_mesh_path:={}".format(mesh_path)] roslaunch_file2 = roslaunch.rlutil.resolve_launch_arguments(cli_args2)[0] roslaunch_args2 = cli_args2[2:] launch_files.append((roslaunch_file2, roslaunch_args2)) return launch_files def create_imu_message(frame, last_reading, meters_per_unit): """ Create the IMU message from the last reading. """ imu_msg = Imu() imu_msg.header.frame_id = frame[1:] if frame.startswith("/") else frame imu_msg.header.stamp = rospy.Time.now() imu_msg.angular_velocity.x = last_reading.ang_vel_x imu_msg.angular_velocity.y = last_reading.ang_vel_y imu_msg.angular_velocity.z = last_reading.ang_vel_z imu_msg.linear_acceleration.x = last_reading.lin_acc_x * meters_per_unit * meters_per_unit imu_msg.linear_acceleration.y = last_reading.lin_acc_y * meters_per_unit * meters_per_unit imu_msg.linear_acceleration.z = last_reading.lin_acc_z * meters_per_unit * meters_per_unit imu_msg.angular_velocity_covariance = [0, 0, 0, 0, 0, 0, 0, 0, 0] imu_msg.linear_acceleration_covariance = [0, 0, 0, 0, 0, 0, 0, 0, 0] return imu_msg def setup_imu_sensor(_is, config, imu_sensor_path): """ Setup the IMU sensor config. Keep in mind that this is relative to the parent body, so any transform the parent has is already reflected. """ add_imu_sensor, sensor = omni.kit.commands.execute( "IsaacSensorCreateImuSensor", path="/imu_sensor", parent=imu_sensor_path, sensor_period=1 / config["physics_hz"].get(), orientation=Gf.Quatd(1, 0, 0, 0), visualize=False, ) if not add_imu_sensor: raise Exception("Failed to add IMU sensor") return sensor def pub_imu(_is, imu_pubs, robot_imu_frames, meters_per_unit): """ Simple message publisher """ for index, handle in enumerate(robot_imu_frames): last_reading = _is.get_sensor_sim_reading(handle + "/imu_sensor") imu_pubs[index].publish(create_imu_message(handle, last_reading, meters_per_unit)) def pub_cam_pose(camera_pose_frames, cam_pose_pubs, _dc, meters_per_unit): """ Simple message publisher """ for index, handle in enumerate(camera_pose_frames): camera_body_ptr = _dc.get_rigid_body(handle) cam_pose_pubs[index].publish(create_camera_pose_message(_dc, camera_body_ptr, handle, meters_per_unit)) def pub_odom(robot_odom_frames, odom_pubs, _dc, meters_per_unit, diff_odom_frames=[]): """ Simple message publisher """ odoms = [] angles = [] if len(diff_odom_frames) == 0: for index, handle in enumerate(robot_odom_frames): robot_body_ptr = _dc.get_rigid_body(handle) odom = create_odom_message(_dc, robot_body_ptr, handle, meters_per_unit) odoms.append([odom.pose.pose.position.x, odom.pose.pose.position.y, odom.pose.pose.position.z]) angles.append(Rotation.from_quat( [odom.pose.pose.orientation.x, odom.pose.pose.orientation.y, odom.pose.pose.orientation.z, odom.pose.pose.orientation.w]).as_euler("XYZ")) odom_pubs[index].publish(odom) else: for index, handle in enumerate(robot_odom_frames): robot_body_ptr = _dc.get_rigid_body(handle) diff_body_ptr = _dc.get_rigid_body(diff_odom_frames[index]) diff_handle = diff_odom_frames[index][1:] if diff_odom_frames[index].startswith("/") else diff_odom_frames[ index] odom = create_diff_odom_message(_dc, robot_body_ptr, handle, meters_per_unit, diff_body_ptr, diff_handle) odoms.append([odom.pose.pose.position.x, odom.pose.pose.position.y, odom.pose.pose.position.z]) angles.append(Rotation.from_quat( [odom.pose.pose.orientation.x, odom.pose.pose.orientation.y, odom.pose.pose.orientation.z, odom.pose.pose.orientation.w]).as_euler("XYZ")) odom_pubs[index].publish(odom) return odoms, angles def import_robot(robot_base_prim_path, n, usd_robot_path, local_file_prefix=''): """ Add the robot to the stage. Add semantics. """ stage = omni.usd.get_context().get_stage() res, _ = omni.kit.commands.execute("CreateReferenceCommand", usd_context=omni.usd.get_context(), path_to=f"{robot_base_prim_path}{n}", asset_path=local_file_prefix + usd_robot_path, instanceable=False) if res: clear_properties(f"{robot_base_prim_path}{n}") add_semantics(stage.GetPrimAtPath(f"{robot_base_prim_path}{n}"), "robot") else: raise Exception("Failed to import robot") def get_valid_robot_location(environment, first): """ Query the service to place the robot in a free space AND compute an initial good yaw. """ x, y, z, _ = position_object(environment, type=0, reset=first) # robot is nearly circular so I do not have to worry about collisionsif environment.env_mesh != None: if environment.env_mesh != None: yaw = get_robot_yaw(x[0], y[0], z[0], environment.env_mesh, environment.shifts) print(f"Initial yaw: {yaw}") return x[0], y[0], z[0], yaw def control_camera(viewport, sc): sc.step() if viewport is not None: import omni.syntheticdata._syntheticdata as sd stage = omni.usd.get_context().get_stage() # Required for editing the SDGPipeline graph which exists in the Session Layer with Usd.EditContext(stage, stage.GetSessionLayer()): # Get name of rendervar for RGB sensor type rv_rgb = omni.syntheticdata.SyntheticData.convert_sensor_type_to_rendervar(sd.SensorType.Rgb.name) # Get path to IsaacSimulationGate node in RGB pipeline rgb_camera_gate_path = omni.syntheticdata.SyntheticData._get_node_path( rv_rgb + "IsaacSimulationGate", viewport.get_render_product_path() ) # Get name of rendervar for DistanceToImagePlane sensor type rv_depth = omni.syntheticdata.SyntheticData.convert_sensor_type_to_rendervar( sd.SensorType.DistanceToImagePlane.name) # Get path to IsaacSimulationGate node in Depth pipeline depth_camera_gate_path = omni.syntheticdata.SyntheticData._get_node_path( rv_depth + "IsaacSimulationGate", viewport.get_render_product_path() ) # Get path to IsaacSimulationGate node in CameraInfo pipeline camera_info_gate_path = omni.syntheticdata.SyntheticData._get_node_path( "PostProcessDispatch" + "IsaacSimulationGate", viewport.get_render_product_path() ) return rgb_camera_gate_path, depth_camera_gate_path, camera_info_gate_path def add_ros_components(robot_base_prim_path, n, ros_transform_components, ros_camera_list, viewport_window_list, camera_pose_frames, cam_pose_pubs, imu_pubs, robot_imu_frames, robot_odom_frames, odom_pubs, lidars, dynamic_prims, config, old_h_ape, old_v_ape, _is, simulation_context, _clock, irotate=False): """ Add the ROS components to the robot. This is done because we need different topics for each robot. Components added: - joint_states (publisher and subscriber) - tf broadcaster - camera - camera pose - imu - odom When necessary we create also the corresponding publisher (whenever the RosBridge component is not available). Publishers created: - imu - odom - camera pose """ ros_transform_components.append(add_joint_state(f"{robot_base_prim_path}{n}")) ros_transform_components.append(add_pose_tree(f"{robot_base_prim_path}{n}", irotate)) # create camera component, viewport = add_camera_and_viewport(f"{robot_base_prim_path}{n}/camera_link", config["robot_sensor_size"].get(), old_h_ape, old_v_ape, simulation_context, 0, n, cam_per_robot=1) # cam index is useful if you want multiple cameras cam_outputs = control_camera(viewport, simulation_context) ros_camera_list.append([n + 0, component, cam_outputs]) viewport_window_list.append(viewport) # component, viewport = add_camera_and_viewport(f"{robot_base_prim_path}{n}/camera_link", # config["robot_sensor_size"].get(), # old_h_ape, old_v_ape, simulation_context, # 1, n, cam_per_robot=2) # cam index is useful if you want multiple cameras # cam_outputs = control_camera(viewport, simulation_context) # ros_camera_list.append([n + 1, component, cam_outputs]) # viewport_window_list.append(viewport) omni.kit.app.get_app().update() # append camera pose frame (we need only one) and pubs camera_pose_frames.append(f"{robot_base_prim_path}{n}/camera_link") cam_pose_pubs.append(rospy.Publisher(f"{robot_base_prim_path}{n}/camera/pose", PoseStamped, queue_size=10)) for _ in range(10): og.Controller.set(og.Controller.attribute(f"{ros_transform_components[-1]}/OnImpulseEvent.state:enableImpulse"), True) og.Controller.set(og.Controller.attribute(f"{ros_transform_components[-2]}/OnImpulseEvent.state:enableImpulse"), True) og.Controller.evaluate_sync(_clock) simulation_context.step() # attach IMU sensor to the robot if irotate: setup_imu_sensor(_is, config, f"{robot_base_prim_path}{n}/imu_link") imu_pubs.append(rospy.Publisher(f"{robot_base_prim_path}{n}/imu_cam", Imu, queue_size=10)) robot_imu_frames.append(f"{robot_base_prim_path}{n}/imu_link") setup_imu_sensor(_is, config, f"{robot_base_prim_path}{n}/base_link") imu_pubs.append(rospy.Publisher(f"{robot_base_prim_path}{n}/imu_body", Imu, queue_size=10)) robot_imu_frames.append(f"{robot_base_prim_path}{n}/base_link") robot_odom_frames.append(f"{robot_base_prim_path}{n}/base_link") else: setup_imu_sensor(_is, config, f"{robot_base_prim_path}{n}/imu_link") imu_pubs.append(rospy.Publisher(f"{robot_base_prim_path}{n}/imu_body", Imu, queue_size=10)) robot_imu_frames.append(f"{robot_base_prim_path}{n}/imu_link") setup_imu_sensor(_is, config, f"{robot_base_prim_path}{n}/camera_link") imu_pubs.append(rospy.Publisher(f"{robot_base_prim_path}{n}/imu_camera", Imu, queue_size=10)) robot_imu_frames.append(f"{robot_base_prim_path}{n}/camera_link") robot_odom_frames.append(f"{robot_base_prim_path}{n}/yaw_link") odom_pubs.append(rospy.Publisher(f"{robot_base_prim_path}{n}/odom", Odometry, queue_size=10)) stage = omni.usd.get_context().get_stage() dynamic_prims.append(stage.GetPrimAtPath(f"{robot_base_prim_path}{n}")) if lidars: stage = omni.usd.get_context().get_stage() dynamic_prims.append(stage.GetPrimAtPath(f"{robot_base_prim_path}{n}")) sensor = add_lidar(f"{robot_base_prim_path}{n}/yaw_link", [0, 0, -.1], [0, 0, 0], is_3d=True, is_2d=True) lidars.append(sensor) def get_robot_joint_init_loc(name): """ It gets the initial location of the robot's joints :param name: The name of the robot :return: The initial location of the robot's joints. """ stage = omni.usd.get_context().get_stage() x = UsdPhysics.Joint.Get(stage, name + '/base_link/x_joint').GetLocalPos0Attr().Get()[0] y = UsdPhysics.Joint.Get(stage, name + '/x_link/y_joint').GetLocalPos0Attr().Get()[1] z = UsdPhysics.Joint.Get(stage, name + '/y_link/z_joint').GetLocalPos0Attr().Get()[2] roll = UsdPhysics.RevoluteJoint.Get(stage, name + '/z_link/roll_joint').GetLocalRot0Attr().Get() roll = Rotation.from_quat([roll.imaginary[0], roll.imaginary[1], roll.imaginary[2], roll.real]).as_euler('XYZ')[0] pitch = UsdPhysics.RevoluteJoint.Get(stage, name + '/roll_link/pitch_joint').GetLocalRot0Attr().Get() pitch = Rotation.from_quat([pitch.imaginary[0], pitch.imaginary[1], pitch.imaginary[2], pitch.real]).as_euler('XYZ')[ 1] yaw = UsdPhysics.RevoluteJoint.Get(stage, name + '/pitch_link/yaw_joint').GetLocalRot0Attr().Get() yaw = Rotation.from_quat([yaw.imaginary[0], yaw.imaginary[1], yaw.imaginary[2], yaw.real]).as_euler('XYZ')[2] return x, y, z, roll, pitch, yaw def set_drone_joints_init_loc(name: str, pos: [], orientation: [], upper_zlim: float=100, lower_zlim: float=0, irotate=False): """ Move the drone to the specified location by acting on the JOINTS. PLEASE NOTE: the intial joint position published by joint_states will be 0,0,0 strangely. #IsaacBug The joints should be named as follows: - base_link/x_joint - x_link/y_joint - y_link/z_joint - z_link/roll_joint - roll_link/pitch_joint - pitch_link/yaw_joint name: the name of the robot (e.g. "my_robot_0", the prim path) pos: the position of the robot (x,y,z) orientation: the orientation of the robot (roll,pitch,yaw), in rad upper_zlim: the z limit of the robot (z) irotate: if True, the joints considered are the iRotate ones """ x, y, z = pos upper_zlim = max(upper_zlim, z) roll, pitch, yaw = orientation stage = omni.usd.get_context().get_stage() if irotate: UsdPhysics.Joint.Get(stage, name + '/x_link/x_joint').GetLocalPos0Attr().Set(Gf.Vec3f(x, 0, 0)) UsdPhysics.Joint.Get(stage, name + '/y_link/y_joint').GetLocalPos0Attr().Set(Gf.Vec3f(0, y, 0)) yaw = np.rad2deg(yaw) quat = ( Gf.Rotation(Gf.Vec3d.XAxis(), 0) * Gf.Rotation(Gf.Vec3d.YAxis(), 0) * Gf.Rotation(Gf.Vec3d.ZAxis(), yaw) ) UsdPhysics.RevoluteJoint.Get(stage, name + '/yaw_link/yaw_joint').GetLocalRot1Attr().Set(Gf.Quatf(quat.GetQuat())) else: UsdPhysics.Joint.Get(stage, name + '/base_link/x_joint').GetLocalPos0Attr().Set(Gf.Vec3f(x, 0, 0)) UsdPhysics.Joint.Get(stage, name + '/x_link/y_joint').GetLocalPos0Attr().Set(Gf.Vec3f(0, y, 0)) UsdPhysics.Joint.Get(stage, name + '/y_link/z_joint').GetLocalPos0Attr().Set(Gf.Vec3f(0, 0, z)) stage.GetPrimAtPath(name + '/y_link/z_joint').GetAttribute('physics:lowerLimit').Set(-z + lower_zlim) stage.GetPrimAtPath(name + '/y_link/z_joint').GetAttribute('physics:upperLimit').Set(upper_zlim - z) roll = np.rad2deg(roll) quat = ( Gf.Rotation(Gf.Vec3d.XAxis(), roll) * Gf.Rotation(Gf.Vec3d.YAxis(), 0) * Gf.Rotation(Gf.Vec3d.ZAxis(), 0) ) UsdPhysics.RevoluteJoint.Get(stage, name + '/z_link/roll_joint').GetLocalRot0Attr().Set(Gf.Quatf(quat.GetQuat())) pitch = np.rad2deg(pitch) quat = ( Gf.Rotation(Gf.Vec3d.XAxis(), pitch) * Gf.Rotation(Gf.Vec3d.YAxis(), 0) * Gf.Rotation(Gf.Vec3d.ZAxis(), 90) ) UsdPhysics.RevoluteJoint.Get(stage, name + '/roll_link/pitch_joint').GetLocalRot0Attr().Set( Gf.Quatf(quat.GetQuat())) yaw = np.rad2deg(yaw) quat = ( Gf.Rotation(Gf.Vec3d.XAxis(), 0) * Gf.Rotation(Gf.Vec3d.YAxis(), 0) * Gf.Rotation(Gf.Vec3d.ZAxis(), yaw) ) UsdPhysics.RevoluteJoint.Get(stage, name + '/pitch_link/yaw_joint').GetLocalRot0Attr().Set(Gf.Quatf(quat.GetQuat())) def add_robot_traj(path: str, config, meters_per_unit, time_codes_per_second): """ It adds a translation and rotation animation to the given path, using the given configuration, meters per unit, and time codes per second :param path: The path to the USD stage :type path: str :param config: The configuration file that contains the robot trajectory :param meters_per_unit: The scale of the scene :param time_codes_per_second: This is the number of time codes per second. This is the same as the frame rate of the animation """ clear_properties(path) for entry in config["robot_traj"].get(): add_translate_anim(path, Gf.Vec3d(entry["pose"]["x"] / meters_per_unit, entry["pose"]["y"] / meters_per_unit, entry["pose"]["z"] / meters_per_unit), entry["time"] * time_codes_per_second) add_rotation_anim(path, Gf.Vec3d(entry["pose"]["roll"], entry["pose"]["pitch"], entry["pose"]["yaw"]), entry["time"] * time_codes_per_second, use_double=True) def diff_angle(alpha, beta): dist = (alpha - beta + np.pi + 2 * np.pi) % (2 * np.pi) - np.pi return dist # assume position control def check_pose_and_goals(init_loc, init_angle, c_pose, c_angle, path, goal_list, meters_per_unit, first): """ It sets the target position of the joints to the next goal in the list :param init_loc: the initial location of the robot :param init_angle: the initial orientation of the robot :param c_pose: current pose of the robot :param c_angle: current angle of the robot :param path: the path to the robot in the simulation :param goal_list: a list of goals, each goal is a list of 6 elements: x, y, z, roll, pitch, yaw :param meters_per_unit: This is the scale of the robot :param first: whether this is the first time the function is called :return: The goal list is being returned. """ dist_roll = abs(diff_angle(np.deg2rad(goal_list[0][3]), diff_angle(c_angle[0], init_angle[0]))) dist_pitch = abs(diff_angle(np.deg2rad(goal_list[0][4]), diff_angle(c_angle[1], init_angle[1]))) dist_yaw = abs(diff_angle(np.deg2rad(goal_list[0][5]), diff_angle(c_angle[2], init_angle[2]))) sum_dist = dist_roll + dist_pitch + dist_yaw if not first and \ (np.linalg.norm(np.array([goal_list[0][0], goal_list[0][1], goal_list[0][2]]) - np.array(c_pose) + np.array( init_loc[0:3])) > 0.8 \ or sum_dist > 0.6): return goal_list if not first: goal_list.pop(0) if len(goal_list) == 0: return [] omni.kit.commands.execute('ChangeProperty', prop_path=Sdf.Path(f'{path}/base_link/x_joint.drive:linear:physics:stiffness'), value=1200.0, prev=0.0) omni.kit.commands.execute('ChangeProperty', prop_path=Sdf.Path(f'{path}/base_link/x_joint.drive:linear:physics:damping'), value=1000.0, prev=0.0) omni.kit.commands.execute('ChangeProperty', prop_path=Sdf.Path(f'{path}/base_link/x_joint.drive:linear:physics:maxForce'), value=500.0, prev=0.0) omni.kit.commands.execute('ChangeProperty', prop_path=Sdf.Path(f'{path}/base_link/x_joint.physxJoint:maxJointVelocity'), value=200.0, # cm/s prev=0.0) omni.kit.commands.execute('ChangeProperty', prop_path=Sdf.Path(f'{path}/base_link/x_joint.drive:linear:physics:targetPosition'), value=(goal_list[0][0]) / meters_per_unit, prev=0.0) omni.kit.commands.execute('ChangeProperty', prop_path=Sdf.Path(f'{path}/x_link/y_joint.drive:linear:physics:stiffness'), value=1200.0, prev=0.0) omni.kit.commands.execute('ChangeProperty', prop_path=Sdf.Path(f'{path}/x_link/y_joint.drive:linear:physics:damping'), value=1000.0, prev=0.0) omni.kit.commands.execute('ChangeProperty', prop_path=Sdf.Path(f'{path}/x_link/y_joint.drive:linear:physics:maxForce'), value=500.0, prev=0.0) omni.kit.commands.execute('ChangeProperty', prop_path=Sdf.Path(f'{path}/x_link/y_joint.physxJoint:maxJointVelocity'), value=200.0, prev=0.0) omni.kit.commands.execute('ChangeProperty', prop_path=Sdf.Path(f'{path}/x_link/y_joint.drive:linear:physics:targetPosition'), value=(goal_list[0][1]) / meters_per_unit, prev=0.0) omni.kit.commands.execute('ChangeProperty', prop_path=Sdf.Path(f'{path}/y_link/z_joint.drive:linear:physics:stiffness'), value=1200.0, prev=0.0) omni.kit.commands.execute('ChangeProperty', prop_path=Sdf.Path(f'{path}/y_link/z_joint.drive:linear:physics:damping'), value=1000.0, prev=0.0) omni.kit.commands.execute('ChangeProperty', prop_path=Sdf.Path(f'{path}/y_link/z_joint.drive:linear:physics:maxForce'), value=500.0, prev=0.0) omni.kit.commands.execute('ChangeProperty', prop_path=Sdf.Path(f'{path}/y_link/z_joint.physxJoint:maxJointVelocity'), value=200.0, prev=0.0) omni.kit.commands.execute('ChangeProperty', prop_path=Sdf.Path(f'{path}/y_link/z_joint.drive:linear:physics:targetPosition'), value=(goal_list[0][2]) / meters_per_unit, prev=0.0) omni.kit.commands.execute('ChangeProperty', prop_path=Sdf.Path(f'{path}/z_link/roll_joint.drive:angular:physics:stiffness'), value=1200.0, prev=0.0) omni.kit.commands.execute('ChangeProperty', prop_path=Sdf.Path(f'{path}/z_link/roll_joint.drive:angular:physics:damping'), value=1000.0, prev=0.0) omni.kit.commands.execute('ChangeProperty', prop_path=Sdf.Path(f'{path}/z_link/roll_joint.drive:angular:physics:maxForce'), value=300.0, prev=0.0) omni.kit.commands.execute('ChangeProperty', prop_path=Sdf.Path(f'{path}/z_link/roll_joint.physxJoint:maxJointVelocity'), value=0.2, prev=0.0) omni.kit.commands.execute('ChangeProperty', prop_path=Sdf.Path(f'{path}/z_link/roll_joint.drive:angular:physics:targetPosition'), value=(goal_list[0][3]), prev=0.0) omni.kit.commands.execute('ChangeProperty', prop_path=Sdf.Path(f'{path}/roll_link/pitch_joint.drive:angular:physics:stiffness'), value=1200.0, prev=0.0) omni.kit.commands.execute('ChangeProperty', prop_path=Sdf.Path(f'{path}/roll_link/pitch_joint.drive:angular:physics:damping'), value=1000.0, prev=0.0) omni.kit.commands.execute('ChangeProperty', prop_path=Sdf.Path(f'{path}/roll_link/pitch_joint.drive:angular:physics:maxForce'), value=300.0, prev=0.0) omni.kit.commands.execute('ChangeProperty', prop_path=Sdf.Path(f'{path}/roll_link/pitch_joint.physxJoint:maxJointVelocity'), value=0.2, prev=0.0) omni.kit.commands.execute('ChangeProperty', prop_path=Sdf.Path(f'{path}/roll_link/pitch_joint.drive:angular:physics:targetPosition'), value=(goal_list[0][4]), prev=0.0) omni.kit.commands.execute('ChangeProperty', prop_path=Sdf.Path(f'{path}/pitch_link/yaw_joint.drive:angular:physics:stiffness'), value=1200.0, prev=0.0) omni.kit.commands.execute('ChangeProperty', prop_path=Sdf.Path(f'{path}/pitch_link/yaw_joint.drive:angular:physics:damping'), value=1000.0, prev=0.0) omni.kit.commands.execute('ChangeProperty', prop_path=Sdf.Path(f'{path}/pitch_link/yaw_joint.drive:angular:physics:maxForce'), value=300.0, prev=0.0) omni.kit.commands.execute('ChangeProperty', prop_path=Sdf.Path(f'{path}/pitch_link/yaw_joint.physxJoint:maxJointVelocity'), value=1.3, prev=0.0) omni.kit.commands.execute('ChangeProperty', prop_path=Sdf.Path(f'{path}/pitch_link/yaw_joint.drive:angular:physics:targetPosition'), value=(goal_list[0][5]), prev=0.0) return goal_list def add_irotate_ros_components(camera_odom_frames, camera_odom_pubs, lidar_components, robot_base_prim_path, n): """ Add the irotate-specific ros-components to the robot. """ camera_odom_frames.append(f"{robot_base_prim_path}{n}/cameraholder_link") camera_odom_pubs.append(rospy.Publisher(f"{robot_base_prim_path}{n}/camera_odom", Odometry, queue_size=10)) lidar_components.append(add_lidar(f"{robot_base_prim_path}{n}/lasersensor_link"), is_2d = True, is_3d=False) def add_lidar(path, translation=[0, 0, 0], orientation=[0, 0, 0], is_2d=True, is_3d=False, degrees=True): # drive sim applies 0.5,-0.5,-0.5,w(-0.5), we have to apply the reverse base_or = tf.Rotation.from_quat([0.5, -0.5, -0.5, -0.5]) orientation = tf.Rotation.from_euler('XYZ', orientation, degrees=degrees) orientation = (base_or * orientation).as_quat() success, sensor = omni.kit.commands.execute( "IsaacSensorCreateRtxLidar", path="/RTX_Lidar", parent=path, config="Example_Rotary", translation=(translation[0], translation[1], translation[2]), orientation=Gf.Quatd(orientation[3], orientation[0], orientation[1], orientation[2]), # Gf.Quatd is w,i,j,k ) omni.kit.app.get_app().update() omni.kit.app.get_app().update() omni.kit.app.get_app().update() render_product_path = rep.create.render_product(sensor.GetPath().pathString, resolution=(1, 1)) # _, render_product_path = create_hydra_texture([1, 1], sensor.GetPath().pathString) omni.kit.app.get_app().update() omni.kit.app.get_app().update() # add the lidar to the graph # config is isaac_sim-2022.2.1/exts/omni.sensors.nv.lidar/data/Example_Rotary.json if is_3d: writer = rep.writers.get("RtxLidar" + "ROS1PublishPointCloud") writer.initialize(topicName=f"{path}/lidar/point_cloud", frameId=path[1:]) writer.attach([render_product_path]) if is_2d: writer = rep.writers.get("RtxLidar" + "ROS1PublishLaserScan") writer.initialize(topicName=f"{path}/lidar/laser_scan", frameId=path[1:], rotationRate=100, horizontalFov=360, depthRange=[0.1,10000], horizontalResolution=0.1) writer.attach([render_product_path]) # todo for lidar one can change directly /Render/PostProcess/SDGPipeline/RenderProduct_Isaac_RtxSensorCpuIsaacComputeRTXLidarFlatScan # but NOT for the 3d lidar # todo theoretically I can avoid returning anything making just sure that I render at each loop return omni.syntheticdata.SyntheticData._get_node_path( "PostProcessDispatch" + "IsaacSimulationGate", render_product_path ) def add_npy_viewport(viewport_window_list, robot_base_prim_path, n, old_h_ape, old_v_ape, config, sc, tot_num_ros_cam=1): viewport_npy, _ = create_viewport(f"{robot_base_prim_path}{n}/camera_link/Camera_npy", config["headless"].get(), tot_num_ros_cam + 1 * n, config["npy_sensor_size"].get(), old_h_ape, old_v_ape, sc) viewport_window_list.append(viewport_npy) def change_joint_limit(joint: str, limit): omni.kit.commands.execute('ChangeProperty', prop_path=Sdf.Path(f'{joint}'), value=(limit), prev=0.0)
45,348
Python
45.086382
135
0.661066
eliabntt/GRADE-RR/simulator/utils/environment_utils.py
""" Use this class to load the environment and the relative information. The init function should be used to load the environment. It will get the environment from a given folder and create the necessary support variables. """ from omni.isaac.occupancy_map import _occupancy_map from omni.isaac.occupancy_map.scripts.utils import update_location, compute_coordinates, generate_image import utils.misc_utils from utils.misc_utils import * class environment: def __init__(self, config, rng = np.random.default_rng(), local_file_prefix = '', meters_per_unit=0.01): self.get_environment(config, rng, local_file_prefix) self.meters_per_unit = meters_per_unit def set_meters_per_unit(self, meters_per_unit): self.meters_per_unit = meters_per_unit def get_environment(self, config, rng: np.random.default_rng, local_file_prefix: str): """ If the name is not specified the environment will be taken at random using the rng. Based on the config one can decide if 1. loading the stl of the environment 2. loading the environment limits with the npy file [note that this is preferable, otherwise default values will be used] 3. Using the limits the system will compute the necessary translations to center the environment in 0,0,0 config: the configuration processed by the main algorithm rng: global rng local_file_prefix: necessary to access the local storage from isaacsim """ self.env_usd_export_folder = config["env_path"].get() if config["fix_env"].get() != "": self.env_name = config["fix_env"].get() else: self.env_name = rng.choice([f for f in os.listdir(self.env_usd_export_folder) if not f.startswith('.')]) self.env_path = local_file_prefix + os.path.join(self.env_usd_export_folder, self.env_name, self.env_name + ".usd") if config["use_stl"].get(): self.env_stl_path = os.path.join(self.env_usd_export_folder, self.env_name, self.env_name + ".stl") self.env_mesh = trimesh.load(os.path.join(self.env_usd_export_folder, self.env_name, self.env_name + ".stl")) else: self.env_stl_path = None self.env_mesh = None if config["use_npy"].get(): self.env_info = np.load(os.path.join(self.env_usd_export_folder, self.env_name, self.env_name + ".npy"), allow_pickle=True) self.env_info = self.env_info.tolist() else: self.env_info = [0, 0, 0, 0, 0, 0, np.array([[-1000, -1000], [-1000, 1000], [1000, 1000], [1000, -1000]])] self.env_limits = self.env_info[0:6] self.shifts = [(self.env_limits[0] + self.env_limits[3]) / 2, (self.env_limits[1] + self.env_limits[4]) / 2, self.env_limits[2]] self.env_limits_shifted = [self.env_limits[i] - self.shifts[i % 3] for i, _ in enumerate(self.env_limits)] self.area_polygon = get_area(self.env_info[6]) self.env_polygon = [Point(i[0], i[1], 0) for i in self.env_info[-1]] def generate_map(self, out_path: str, zlim=[0, 1], cell_size = 0.05, origin=[0, 0, 0]): """ WARNING: HACK! ALL UNKNWON ARE WHITE! Generates a map for the environment and save it to the out_path location in the disk. First it searches for a non colliding location. Then it creates a map of the environment. We ovverride the unknown color to be "white" (i.e. free) as the system map unknown unreachable areas. out_path: the folder where to save the map z_limit: height to consider for projection cell_size: size of a single cell in the map (cm) origin: computed origin. Must be a free cell """ bound = int( max(abs(self.env_limits_shifted[0]) + abs(self.env_limits_shifted[3]), abs(self.env_limits_shifted[1]) + abs(self.env_limits_shifted[4])) / self.meters_per_unit * 1.5) _om = _occupancy_map.acquire_occupancy_map_interface() lower_bound = [-bound, -bound, zlim[0]/ self.meters_per_unit] lower_bound = np.array(lower_bound) - np.array(origin) / self.meters_per_unit upper_bound = [bound, bound, zlim[1]/ self.meters_per_unit *.8] upper_bound = np.array(upper_bound) - np.array(origin) / self.meters_per_unit center = np.array(origin) / self.meters_per_unit center[2] += 0.1 / self.meters_per_unit # 10 cm above the floor update_location(_om, center, lower_bound, upper_bound) _om.set_cell_size(cell_size/self.meters_per_unit) _om.generate() image_buffer = generate_image(_om, [0, 0, 0, 255], [255, 255, 255, 255], [255, 255, 255, 255]) dims = _om.get_dimensions() _im = Image.frombytes("RGBA", (dims.x, dims.y), bytes(image_buffer)) image_width = _im.width image_height = _im.height size = [0, 0, 0] size[0] = image_width * cell_size size[1] = image_height * cell_size scale_to_meters = 1.0 / self.meters_per_unit default_image_name = os.path.join(out_path, "map.png") top_left, top_right, bottom_left, bottom_right, image_coords = compute_coordinates(_om, cell_size) ros_yaml_file_text = "image: " + default_image_name ros_yaml_file_text += f"\nresolution: {float(cell_size / scale_to_meters)}" ros_yaml_file_text += ( f"\norigin: [{float(bottom_left[0] / scale_to_meters)}, {float(bottom_left[1] / scale_to_meters)}, 0.0000]" ) ros_yaml_file_text += "\nnegate: 0" ros_yaml_file_text += f"\noccupied_thresh: {0.65}" ros_yaml_file_text += "\nfree_thresh: 0.196" _im.save(default_image_name) with open(default_image_name[:-3] + "yaml", 'w') as f: f.write(ros_yaml_file_text) center = lower_bound center[2] = -100000000.0 update_location(_om, center, [0, 0, 0], [0, 0, 0]) _om.generate() # disable_extension('omni.isaac.occupancy_map') def load_and_center(self, prim_path: str = "/World/home", correct_paths_req: bool = False, push_in_floor: bool = False): """ Load the environment from the usd path env_path Center it wrt the world coordinate frames The environment is loaded at prim_path prim_path: path that the environment should have in the prim tree correct_paths_req: if True, corrects the paths of the assets in the environment push_in_floor: if True, pushes the environment in the floor a bit. Useful for thin meshes that sometimes are not correctly visualized (flickering) """ stage = omni.usd.get_context().get_stage() print("loading environment {}".format(self.env_name)) # from omni.isaac.core.utils.nucleus import find_nucleus_server # result, nucleus_server = find_nucleus_server() res, _ = omni.kit.commands.execute('CreateReferenceCommand', usd_context=omni.usd.get_context(), path_to=prim_path, asset_path=self.env_path, # asset_path= nucleus_server + "/Isaac/Environments/Simple_Warehouse/warehouse.usd", instanceable=True) if res: clear_properties(prim_path) if correct_paths_req: print("Correcting paths... --- note that you might want to change utils/misc_utils.py:correct_paths") try: correct_paths(prim_path) except: print("Failed to correct paths for {}".format(prim_path)) time.sleep(10) else: print("Not correcting paths --- check that all textures are visibile and the reflection maps are correct") # center the home in the middle of the environment set_translate(stage.GetPrimAtPath(prim_path), list(- np.array(self.shifts) / self.meters_per_unit)) for child in stage.GetPrimAtPath(prim_path).GetAllChildren(): if "xform" == child.GetTypeName().lower(): clear_properties(str(child.GetPath())) if push_in_floor and "floor" not in str(child.GetPath()).lower(): myold = child.GetProperty('xformOp:translate').Get() myold = [myold[0], myold[1], myold[2] - 0.04] set_translate(child, list(np.array(myold))) return prim_path else: raise Exception("Failed to load environment {}".format(self.env_name))
7,793
Python
45.118343
148
0.674451
eliabntt/GRADE-RR/simulator/utils/zebra_utils.py
import utils.misc_utils from omni.kit.sequencer.usd import SequenceSchema, usd_sequencer from utils.misc_utils import * def load_zebra(zebra_base_prim_path, n, asset_path): stage = omni.usd.get_context().get_stage() res, _ = omni.kit.commands.execute("CreateReferenceCommand", usd_context=omni.usd.get_context(), path_to=f"{zebra_base_prim_path}{n}", asset_path=asset_path, instanceable=False) clear_properties(f"{zebra_base_prim_path}{n}") return f"{zebra_base_prim_path}{n}" def place_zebras(frame_info, rng, floor_points, meters_per_unit, hidden_position, config, max_anim_len, zebra_info): stage = omni.usd.get_context().get_stage() # create bool array as big as floor_points occupied = np.zeros((floor_points.shape[0]-2, floor_points.shape[1]-2), dtype=bool) deleted_zebras = [] out_frame_info = {} min_number_zebras = config["min_number_zebras"].get() max_number_zebras = config["max_number_zebras"].get() selected_zebras = rng.choice(list(frame_info.keys()), size=int(rng.uniform(min_number_zebras, max_number_zebras)), replace=False) for zebra in selected_zebras: out_frame_info[zebra] = frame_info[zebra].copy() out_frame_info[zebra] = randomize_frame(out_frame_info[zebra], rng, max_anim_len, zebra_info) # process the box and extract xmin xmax ymin ymax box = np.array(out_frame_info[zebra]["box"]) xmin = np.min(box[:, 0]) xmax = np.max(box[:, 0]) ymin = np.min(box[:, 1]) ymax = np.max(box[:, 1]) # box is the 2D box box = np.array([[xmin, ymin], [xmax, ymin], [xmax, ymax], [xmin, ymax]]) # random yaw rotation of the box yaw = rng.uniform(0, 2 * np.pi) # create a rotation matrix rot = np.array([[np.cos(yaw), -np.sin(yaw)], [np.sin(yaw), np.cos(yaw)]]) # rotate the box box = np.matmul(box, rot) positioned = False newbox = [] # get intermediate int points for i in range(4): p1 = np.round(box[i]).astype(int) p2 = np.round(box[(i + 1) % 4]).astype(int) # compute all int numbers between p1 and p2 dx = p2[0] - p1[0] dy = p2[1] - p1[1] if dx == 0: x = p1[0] y = np.arange(min(p1[1], p2[1]), max(p1[1], p2[1]) + 1 if max(p1[1], p2[1]) >= 0 else -1) for j in range(len(y)): newbox.append([x, y[j]]) elif dy == 0: x = np.arange(min(p1[0], p2[0]), max(p1[0], p2[0]) + 1 if max(p1[0], p2[0]) >= 0 else -1) y = p1[1] for j in range(len(x)): newbox.append([x[j], y]) elif dx == 0 and dy == 0: newbox.append([p1[0], p1[1]]) else: x = np.arange(min(p1[0], p2[0]), max(p1[0], p2[0]) + 1 if max(p1[0], p2[0]) >= 0 else -1) y = p1[1] + (x - p1[0]) * dy / dx for j in range(len(x)): newbox.append([x[j], y[j]]) newbox = np.unique(np.array(newbox).astype(int), axis=0).astype(int) for _ in range(100): # get a random location in occupied -- this will be my center center = np.array([rng.integers(0, occupied.shape[1]), rng.integers(0, occupied.shape[0])]) # check if all the cells covered by the box in occupied are free -- not only the boundaries collision = False for x_coor, y_coor in newbox: try: if occupied[center[0] - y_coor, center[1] + x_coor]: collision = True break except IndexError: collision = True break if collision: break if not collision: tmp_floor_points = [] newcenter = np.array([center[0] + 1, center[1] + 1]) # if there is no collision, set the cells covered by the box to occupied for x_coor, y_coor in newbox: occupied[center[0] - y_coor, center[1] + x_coor] = True # get the corresponding floor point given the center and x_coor and col # NOTE THAT Y IS OPPOSITE SIGN tmp_floor_points.append(floor_points[newcenter[0] - y_coor, newcenter[1] + x_coor]) # set the position of the zebra to the center loc = np.mean(tmp_floor_points, axis=0) / meters_per_unit loc = np.array(floor_points[newcenter[0], newcenter[1]]) / meters_per_unit set_translate(stage.GetPrimAtPath(zebra), list(loc)) # set the rotation of the zebra to the roll, pitch, yaw # lower_point = np.min(tmp_floor_points, axis=0) # upper_point = np.max(tmp_floor_points, axis=0) # vector = np.array(upper_point) - np.array(lower_point) # compute roll pitch and yaw of vector # roll, pitch, yaw = Rotation.from_rotvec(vector).as_euler("XYZ") # transform = Rotation.from_matrix( # trimesh.PointCloud(tmp_floor_points).bounding_box_oriented.transform[:3, :3]).as_euler("XYZ") out_frame_info[zebra]["position"] = loc * meters_per_unit out_frame_info[zebra]["rotation"] = [0, 0, yaw] out_frame_info[zebra]["center"] = newcenter out_frame_info[zebra]["box"] = box set_rotate(stage.GetPrimAtPath(zebra), [0, 0, yaw]) # todo refine this to account for terrain positioned = True break if not positioned: print("Could not position zebra", zebra) # delete the zebra deleted_zebras.append(zebra) set_translate(stage.GetPrimAtPath(zebra), list(hidden_position)) for zebra in deleted_zebras: del out_frame_info[zebra] return out_frame_info def randomize_frame(zebra, rng, max_anim_len, zebra_info): stage = omni.usd.get_context().get_stage() zebra_path = zebra["path"] scale = rng.integers(40, 100) set_scale(stage.GetPrimAtPath(zebra_path), scale) zebra_name = zebra["name"] prim = stage.GetPrimAtPath(f"/World/Sequence{zebra_path}{zebra_path}_Clip") anim_len = zebra_info[zebra_name]["length"] timeslot = max_anim_len - rng.integers(0, anim_len) prim.GetAttribute("startTime").Set(Sdf.TimeCode(timeslot * 1.0)) prim.GetAttribute("endTime").Set( Sdf.TimeCode(float(max(timeslot + zebra_info[zebra_name]["length"], max_anim_len)))) points_in_mesh = zebra_info[zebra_name]["points"][max_anim_len - timeslot] * scale / 100 zebra = {"name": zebra_name, "time": timeslot, "used_frame": max_anim_len - timeslot + 1, "scale": scale, "box": trimesh.PointCloud(points_in_mesh).bounding_box.vertices, "path": zebra_path} return zebra def preload_all_zebras(config, rng, zebra_files, zebra_info, simulation_context, sequencer_drop_controller, max_anim_len, hidden_position): stage = omni.usd.get_context().get_stage() # load a random number of zebras between min_number_zebra and max_number_zebra num_zebras = config["max_number_zebras"].get() frame_info = {} for n in range(num_zebras): # load a random zebra zebra_file = rng.choice(zebra_files) # load the zebra zebra_path = load_zebra("/zebra_", n, zebra_file) scale = rng.integers(40, 100) set_scale(stage.GetPrimAtPath(zebra_path), scale) zebra_name = zebra_file.split("/")[-1].split(".")[0] add_semantics(stage.GetPrimAtPath(zebra_path), "zebra") timeslot = max_anim_len - rng.integers(0, zebra_info[zebra_name]["length"]) sequencer_drop_controller.sequencer_drop(stage.GetPrimAtPath("/World/Sequence"), zebra_path, float(timeslot)) prim = stage.GetPrimAtPath(f"/World/Sequence{zebra_path}{zebra_path}_Clip") prim.GetAttribute("startTime").Set(Sdf.TimeCode(timeslot * 1.0)) prim.GetAttribute("endTime").Set( Sdf.TimeCode(float(max(timeslot + zebra_info[zebra_name]["length"], max_anim_len)))) points_in_mesh = zebra_info[zebra_name]["points"][max_anim_len - timeslot] * scale / 100 frame_info[zebra_path] = {"name": zebra_name, "time": timeslot, "used_frame": max_anim_len - timeslot + 1, "scale": scale, "box": trimesh.PointCloud(points_in_mesh).bounding_box.vertices, "path": zebra_path} simulation_context.step(render=False) simulation_context.render() set_translate(stage.GetPrimAtPath(zebra_path), hidden_position) return frame_info
7,881
Python
40.052083
121
0.642431
eliabntt/GRADE-RR/simulator/utils/misc_utils.py
import asyncio import carb import ipdb import json import ntpath import numpy as np import os import pickle as pkl from PIL import Image from pyquaternion import Quaternion import scipy.spatial.transform as tf from stl import mesh import time import trimesh from typing import Dict, Optional, Union # ros import rospy, rosgraph from geometry_msgs.msg import PoseStamped, Point from nav_msgs.msg import Odometry from sensor_msgs.msg import Imu from std_msgs.msg import String # omni import omni.isaac.shapenet as shapenet import omni.kit from omni.isaac import RangeSensorSchema from omni.isaac.core import SimulationContext, PhysicsContext import omni.replicator.core as rep from omni.isaac.core.prims import XFormPrim from omni.isaac.core.utils.carb import set_carb_setting from omni.isaac.core.utils.extensions import enable_extension, disable_extension from omni.isaac.core.utils.stage import is_stage_loading, set_stage_up_axis from omni.isaac.dynamic_control import _dynamic_control import omni.isaac.IsaacSensorSchema as IsaacSensorSchema from omni.isaac.synthetic_recorder import extension_custom from omni.physxcommands import SetStaticColliderCommand, RemoveStaticColliderCommand from pxr import UsdGeom, Gf, Usd, UsdSkel, AnimationSchema, Semantics, UsdPhysics, Sdf, UsdShade from pxr.Usd import Prim # 2022 edits import omni.graph.core as og from omni.isaac.core_nodes.scripts.utils import set_target_prims def add_semantics(prim: Prim, semantic_label: str): """ Adds semantic labels to the prim. prim: the prim to add the semantic label to semantic_label: the semantic label to add """ if not prim.HasAPI(Semantics.SemanticsAPI): sem = Semantics.SemanticsAPI.Apply(prim, "Semantics") sem.CreateSemanticTypeAttr() sem.CreateSemanticDataAttr() else: sem = Semantics.SemanticsAPI.Get(prim, "Semantics") sem.GetSemanticTypeAttr().Set("class") sem.GetSemanticDataAttr().Set(str(semantic_label)) def correct_paths(parent_name: str): """ Helper function to correct the paths of the world's materials (as they come from Windows). parent_name: the prim path of the father. """ stage = omni.usd.get_context().get_stage() for prim in stage.Traverse(): shader_path = prim.GetPath() if parent_name.lower() in str(shader_path).lower(): if prim.GetTypeName().lower() == "mesh": prim.GetProperty('doubleSided').Set(False) if prim.GetTypeName().lower() == "shader": try: change_shader_path(shader_path) except: print(f"Error changing shader of in {shader_path}") time.sleep(5) def change_shader_path(shader_path: str): """ Changes the shader path of the material. material_path: the prim path to the material collection (e.g. "/World/my_robot_0/materials, /World/home/materials") """ stage = omni.usd.get_context().get_stage() shader = stage.GetPrimAtPath(shader_path) if 'inputs:diffuse_texture' in shader.GetPropertyNames(): old_path = str(shader.GetAttribute('inputs:diffuse_texture').Get().resolvedPath) new_path = old_path.replace("@", "") # print(f"Changing path {old_path}") if "something" in old_path or "P:" in old_path: new_path = old_path.replace(ntpath.sep, os.sep).replace('P:/', '').replace("@", "") elif "somethingelse" in old_path.lower(): splitted = old_path.split(ntpath.sep) tmp_path = "" for i in splitted: tmp_path += i + ntpath.sep if "something" in i: break tmp_path = tmp_path.replace(ntpath.sep, os.sep) new_path = old_path.replace(ntpath.sep, os.sep).replace(tmp_path, '').replace( "@", "") shader.GetAttribute('inputs:diffuse_texture').Set(new_path) if 'inputs:reflectionroughness_texture' in shader.GetPropertyNames(): old_path = str(shader.GetAttribute('inputs:reflectionroughness_texture').Get().resolvedPath) new_path = old_path.replace("@", "") # print(f"Changing path {old_path}") if "something" in old_path or "P:" in old_path: new_path = old_path.replace(ntpath.sep, os.sep).replace('P:/', '').replace("@", "") elif "somethingelse" in old_path.lower(): splitted = old_path.split(ntpath.sep) tmp_path = "" for i in splitted: tmp_path += i + ntpath.sep if "something" in i: break tmp_path = tmp_path.replace(ntpath.sep, os.sep) new_path = old_path.replace(ntpath.sep, os.sep).replace(tmp_path, '').replace( "@", "") shader.GetAttribute('inputs:reflectionroughness_texture').Set(new_path) def set_colliders(path_main_asset: str, value: bool): """ It takes a path to a main asset, and a boolean value, and sets the physics:collisionEnabled attribute to the boolean value for all children of the main asset. This effectively enable or disable collisions. :param path_main_asset: The path to the main asset in the USD file :type path_main_asset: str :param value: bool :type value: bool """ stage = omni.usd.get_context().get_stage() for j in stage.GetPrimAtPath(path_main_asset).GetAllChildren(): for i in j.GetAllChildren(): if "physics:collisionEnabled" in i.GetPropertyNames(): if i.GetProperty("physics:collisionEnabled").Get() == value: continue i.GetProperty("physics:collisionEnabled").Set(value) def add_colliders(path_main_asset: str): """ Adds the colliders to the main asset. This allows the object to have collisions or not (if supported). Return True if the colliders were added, False otherwise. path_main_asset: the path of the prim asset whose childs need to be processed """ stage = omni.usd.get_context().get_stage() fres = True for prim in stage.Traverse(): prim_path = prim.GetPath() if path_main_asset.lower() in str(prim_path).lower(): if prim.GetTypeName().lower() == "mesh" or prim.GetTypeName().lower() == "xform": res, _ = SetStaticColliderCommand.execute(str(prim.GetPath())) fres = res and fres return fres def process_semantics(parent_name: str, name_to_label: str = None): """ Processes the semantics of the world. In case the name_to_label is specified (not coming from Front3D), it will be set to the name_to_label param. parent_name: the prim path of the father. label: the eventual label to give to the set of assets """ for prim in omni.usd.get_context().get_stage().Traverse(): primpath = prim.GetPath() if parent_name.lower() in str(primpath).lower(): if prim.GetTypeName().lower() == "mesh" or prim.GetTypeName().lower() == "xform": if name_to_label == None: # tmp = prim.GetAttribute('userProperties:category_id') tmp = prim.GetAttribute('userProperties:semantic') if tmp.Get() != None: add_semantics(prim, str(tmp.Get())) else: add_semantics(prim, name_to_label) def randomize_and_fix_lights(config: dict, rng: np.random.default_rng, parent_name: str, z_lim, meters_per_unit, is_rtx: bool = False): """ Randomize the lights within an environment config: the configuration dict with the parameters and enabled/disabled config for intensity/color rng: global rng parent_name: parent whose childs need to be considered to change the lights """ stage = omni.usd.get_context().get_stage() if not (config["intensity"] or config["color"]): return min_int = config.get("intensity_interval", 0.0)[0] max_int = config.get("intensity_interval", 1.0)[1] for prim in stage.Traverse(): path = prim.GetPath() if parent_name.lower() in str(path).lower(): if "light" in prim.GetTypeName().lower(): if "environment" in str(path).lower(): continue if config["intensity"]: prim.GetAttribute('intensity').Set(rng.uniform(low=min_int, high=max_int)) if config["color"]: col = rng.random(size=3) prim.GetAttribute('color').Set(Gf.Vec3f(col[0], col[1], col[2])) if not is_rtx: prim.GetAttribute('diffuse').Set(4) prim.GetAttribute('specular').Set(4) # FIXME no actual check I'm not moving other stuff. but this should work based on the "existance" of segmentation info and that lights on its own does not have a translation attribute z_lamp = omni.usd.get_world_transform_matrix(prim)[3, 2] * meters_per_unit if z_lamp > z_lim - 0.08: diff = z_lamp - z_lim - 0.08 while not prim.HasAttribute('xformOp:translate'): prim = prim.GetParent() # while (not "semantic:Semantics:params:semanticData" in parent.GetPropertyNames()): # parent = parent.GetParent() p_lamp = prim.GetAttribute('xformOp:translate').Get() p_lamp[2] -= diff prim.GetAttribute('xformOp:translate').Set(p_lamp) # move the light if it is too high def randomize_roughness(config: dict, rng: np.random.default_rng, parent_name: str): """ Randomize the roughness (reflectivity) of assets within an environment config: the configuration dict with the parameters and enabled/disabled config for intensity/color rng: global rng parent_name: parent whose childs need to be considered to change the lights """ stage = omni.usd.get_context().get_stage() if not (config["enabled"]): return min_int = config.get("intensity_interval", 0.0)[0] max_int = config.get("intensity_interval", 1.0)[1] for prim in stage.Traverse(): path = prim.GetPath() if parent_name.lower() in str(path).lower(): if prim.GetTypeName().lower() == "material" or prim.GetTypeName().lower() == "shader": if "inputs:RoughnessMin" in prim.GetPropertyNames(): val = rng.uniform(low=min_int, high=max_int) prim.GetAttribute('inputs:RoughnessMin').Set(val) prim.GetAttribute('inputs:RoughnessMax').Set(val) def get_area(polygon): """ Computes the area of a polygon. """ x = polygon[:, 0] y = polygon[:, 1] return .5 * np.absolute(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1))) def change_prim_collision(enable, prim_path): for j in omni.usd.get_context().get_stage().Traverse(): if str(j.GetPath()).startswith(prim_path): if 'physics:collisionEnabled' in j.GetPropertyNames(): omni.kit.commands.execute('ChangeProperty', prop_path=Sdf.Path(str(j.GetPath())+'.physics:collisionEnabled'), value=enable, prev=None) def change_collision_at_path(enable, paths=['/my_robot_0/camera_link/Cube.physics:collisionEnabled','/my_robot_0/yaw_link/visuals.physics:collisionEnabled']): """ It enables or disables collisions for the paths :param enable: True or False """ for path in paths: omni.kit.commands.execute('ChangeProperty', prop_path=Sdf.Path(path), value=enable, prev=None) def add_translate_anim(prim_path: str, pos: Gf.Vec3d, time: float = 0.0): """ Add a goal location at a given timecode. The object will EVENTUALLY move there with a smooth movement. prim_path: the path of the asset to be moved pos: the final position time: the time in FRAME """ omni.kit.commands.execute('ChangePropertyCommand', prop_path=prim_path + '.xformOp:translate', value=pos, prev=Gf.Vec3d(0, 0, 0), type_to_create_if_not_exist=UsdGeom.XformOp.TypeTranslate, timecode=Usd.TimeCode(time)) def add_rotation_anim(prim_path: str, rot: list, time: float = 0.0, use_double=False): """ Add a goal rotation at a given timecode. The object will EVENTUALLY move there with a smooth movement. EXPECT ROT IN RAD! prim_path: the path of the asset to be moved rot: the final position time: the time in FRAME """ rot = np.array(rot) * 180 / np.pi quat = ( Gf.Rotation(Gf.Vec3d.XAxis(), rot[0]) * Gf.Rotation(Gf.Vec3d.YAxis(), rot[1]) * Gf.Rotation(Gf.Vec3d.ZAxis(), rot[2]) ) omni.kit.commands.execute('ChangePropertyCommand', prop_path=prim_path + ".xformOp:orient", value=Gf.Quatf(quat.GetQuat()) if not use_double else Gf.Quatd(quat.GetQuat()), prev=Gf.Quatf(0, 0, 0, 1) if not use_double else Gf.Quatd(0, 0, 0, 1), type_to_create_if_not_exist=UsdGeom.XformOp.TypeOrient, timecode=Usd.TimeCode(time)) def inf_helper(y: np.array): """Helper to handle indices and logical indices of NaNs. Input: - y, 1d numpy array with possible NaNs Output: - nans, logical indices of NaNs - index, a function, with signature indices= index(logical_indices), to convert logical indices of NaNs to 'equivalent' indices """ return np.isinf(y), lambda z: z.nonzero()[0] def position_object(environment, type: int, objects: list = [], ob_stl_paths: list = [], reset: bool = False, max_collisions: int = 200): """ type = 0 -> camera z_lim = [0.8 - 1.8] using camera stl type = 1 -> humans z_lim = [0 - 0] using human stl type = 2 -> shapenet z_lim = [0 - 1.8] using camera stl type = 3 -> origin z_lim = [0 - 0] using camera stl note: when min == max we apply a small offset to the max to address shifts in the z-axis to allow small collisions. However, the result will be still published at the wanted height. envionment: the environment object type: see above objects: the list of objects to be placed ob_stl_paths: the corresponding stls reset: if the collision checker need to be resetted forcefully """ # thih import will work if you compile our https://github.com/eliabntt/moveit_based_collision_checker_and_placement/tree/main # and you add the source catkin command to isaac_X_X/setup_python_env.sh from collision_check.srv import * if environment.env_stl_path == None: print( "No stl is being loaded for the environment, please pre-fix all objects locations or implement your own strategy") environment.env_stl_path = "" print("Wait for service") rospy.wait_for_service("/fake/collision_checker/check") print("Service loaded") try: check_collision = rospy.ServiceProxy("/fake/collision_checker/check", collision_check_srv) req = collision_check_srvRequest() req.env_stl_path = environment.env_stl_path req.env_polygon = environment.env_polygon req.reset = reset if type == 1: for ob in objects: req.ob_names.append(ob) req.ob_stl_paths = ob_stl_paths req.is_cam = True if type != 1 else False min_z = (0.8 + environment.env_limits[2]) if type == 0 else environment.env_limits[2] max_z = environment.env_limits[2] if (type == 1 or type == 3) else min(1.8 + environment.env_limits[2], environment.env_limits[5] - 0.5) if type == 4: min_z = environment.env_limits[2] max_z = environment.env_limits[2] has_forced_z = -1 if min_z == max_z: max_z += 0.5 has_forced_z = min_z req.min_limits = [environment.env_limits[0] + 0.5, environment.env_limits[1] + 0.5, min_z] req.max_limits = [environment.env_limits[3] - 0.5, environment.env_limits[4] - 0.5, max_z] req.limit_collision = 0 if type != 1 else max_collisions req.forced_z = has_forced_z res = check_collision.call(req) if has_forced_z != -1: res.z = [min(has_forced_z, z) for z in res.z] return np.array(res.x) - environment.shifts[0], np.array(res.y) - environment.shifts[1], np.array(res.z) - \ environment.shifts[2], res.yaw except rospy.ServiceException as e: print("Service call failed: %s" % e) return [-1] * len(objects), [-1] * len(objects), [-1] * len(objects), [0] * len(objects) def set_scale(prim: Prim, scale: float = 1.0): """ Set the scale of a Prim prim: the prim scale: the scale """ prop_names = prim.GetPropertyNames() if "xformOp:scale" not in prop_names: xformable = UsdGeom.Xformable(prim) xform_op_scale = xformable.AddXformOp(UsdGeom.XformOp.TypeScale, UsdGeom.XformOp.PrecisionDouble, "") else: xform_op_scale = UsdGeom.XformOp(prim.GetAttribute("xformOp:scale")) xform_op_scale.Set(Gf.Vec3d([scale, scale, scale])) def clear_properties(path: str): """ The function clears all the POSE properties of the given prim. This is to ensure a consistent way of setting those properties for different objects. This should be called with ALL loaded objects so that we have consistent xformOp:trans/Orient """ current_position, current_orientation = XFormPrim(path).get_world_pose() def set_translate(prim: Prim, new_loc: list): """ prim: must be prim type, the prim to be moved new_loc: list [x-y-z] for the single prim """ properties = prim.GetPropertyNames() if "xformOp:translate" in properties: translate_attr = prim.GetAttribute("xformOp:translate") translate_attr.Set(Gf.Vec3d(new_loc)) elif "xformOp:transform" in properties: transform_attr = prim.GetAttribute("xformOp:transform") matrix = prim.GetAttribute("xformOp:transform").Get() matrix.SetTranslateOnly(Gf.Vec3d(new_loc)) transform_attr.Set(matrix) else: xform = UsdGeom.Xformable(prim) xform_op = xform.AddXformOp(UsdGeom.XformOp.TypeTranslate, UsdGeom.XformOp.PrecisionDouble, "") xform_op.Set(Gf.Vec3d(new_loc)) def set_rotate(prim: XFormPrim, rot: list): """ expects rot in rad prim: The prim to be rotated rot: roll-pitch-yaw in RAD """ properties = prim.GetPropertyNames() rot = np.array(rot) * 180 / np.pi quat = ( Gf.Rotation(Gf.Vec3d.XAxis(), rot[0]) * Gf.Rotation(Gf.Vec3d.YAxis(), rot[1]) * Gf.Rotation(Gf.Vec3d.ZAxis(), rot[2]) ) if "xformOp:orient" in properties: rotation = prim.GetAttribute("xformOp:orient") rotation.Set(Gf.Quatd(quat.GetQuat())) else: xform = UsdGeom.Xformable(prim) xform_op = xform.AddXformOp(UsdGeom.XformOp.TypeOrient, UsdGeom.XformOp.PrecisionDouble, "") xform_op.Set(Gf.Quatd(quat.GetQuat())) def dynamic_control_interface(): """ This is placed here as the extension is not loaded in the main script. """ return _dynamic_control.acquire_dynamic_control_interface() def reload_references(path): """ It reloads all the references and payloads of a given prim :param path: The path to the prim you want to reload references for """ stage = omni.usd.get_context().get_stage() prim_list = [] for j in stage.GetPrimAtPath(path).GetAllChildren(): prim_list.append(j) layers = set() for prim in prim_list: for (ref, intro_layer) in omni.usd.get_composed_references_from_prim(prim): layer = Sdf.Find(intro_layer.ComputeAbsolutePath(ref.assetPath)) if ref.assetPath else None if layer: layers.add(layer) for (ref, intro_layer) in omni.usd.get_composed_payloads_from_prim(prim): layer = Sdf.Find(intro_layer.ComputeAbsolutePath(ref.assetPath)) if ref.assetPath else None if layer: layers.add(layer) for l in layers: l.Reload(force=True) def teleport(path, loc, rot): """ It teleports the object at the given path to the given location and rotation :param path: The path to the object you want to teleport :param loc: (x, y, z) :param rot: (x, y, z, w) """ omni.kit.commands.execute( "IsaacSimTeleportPrim", prim_path=path, translation=(loc[0], loc[1], loc[2]), rotation=(rot[0], rot[1], rot[2], rot[3]), ) def toggle_dynamic_objects(dynamic_prims: list, status: bool): """ It toggles the visibility of the dynamic objects in the scene :param dynamic_prims: a list of prims that you want to toggle :type dynamic_prims: list """ # print("Toggling environment...") for _ in range(3): for prim in dynamic_prims: imageable = UsdGeom.Imageable(prim) if status: imageable.MakeVisible() else: imageable.MakeInvisible() imageable = [] def reset_physics(timeline, simulation_context): timeline.stop() simulation_context.reset() timeline.play()
20,439
Python
39
191
0.658056
eliabntt/GRADE-RR/simulator/utils/simulation_utils.py
import time import utils.misc_utils from utils.misc_utils import * GRAPH_PATH = "/Render/PostProcess/SDGPipeline" def set_common_stage_properties(rate): """ Note: some properties as of now can only be set with the general environment USD file. """ _desired_render_settings: Dict[str, Union[bool, int]] = { "/app/asyncRendering": False, "/app/renderer/skipWhileMinimized": False, "/app/renderer/sleepMsOnFocus": 0, "/app/renderer/sleepMsOutOfFocus": 0, "/app/runLoops/main/rateLimitEnabled": True, "/app/runLoops/main/rateLimitFrequency": rate, "/persistent/simulation/minFrameRate": rate, "/app/runLoops/main/rateLimitUseBusyLoop": True, "/app/runLoops/rendering_0/rateLimitEnabled": True, "/app/viewport/showSettingMenu": True, "/app/viewport/showCameraMenu": True, "/app/viewport/showRendererMenu": True, "/app/viewport/showHideMenu": True, "/app/viewport/showLayerMenu": True, "/app/viewport/grid/showOrigin": False, "/app/viewport/grid/enabled": False, ## this does not work "/persistent/app/viewport/grid/lineWidth": 0, "/rtx/multiThreading/enabled": True, "/app/asyncRenderingLowLatency": False, # "/persistent/app/captureFrame/viewport": True, } for setting_key, desired_value in _desired_render_settings.items(): set_carb_setting(carb.settings.get_settings(), setting_key, desired_value) def simulation_environment_setup(need_ros = True): """ Enable the necessary extensions that will be used within the simulation """ enable_extension("omni.isaac.ros_bridge") enable_extension("omni.isaac.physics_inspector") enable_extension("omni.isaac.physics_utilities") enable_extension("omni.anim.skelJoint") enable_extension("omni.kit.window.sequencer") enable_extension("omni.isaac.dynamic_control") enable_extension("omni.isaac.shapenet") enable_extension("semantics.schema.editor") enable_extension("omni.hydra.iray") enable_extension("omni.iray.settings.core") enable_extension('omni.isaac.occupancy_map') enable_extension('omni.isaac.shapenet') enable_extension('omni.isaac.range_sensor') disable_extension('omni.isaac.sun_study') enable_extension('omni.isaac.core_nodes') enable_extension('omni.isaac.sensor') # Necessary ONLY if using NUCLEUS # Locate /Isaac folder on nucleus server to load sample from omni.isaac.core.utils.nucleus import get_assets_root_path nucleus_server = get_assets_root_path() if nucleus_server is None: carb.log_error("Could not find nucleus server with /Isaac folder, exiting") exit() if need_ros: if not rosgraph.is_master_online(): carb.log_error("Please run roscore before executing this script") exit() def set_raytracing_settings(physics_hz): set_common_stage_properties(physics_hz) settings = carb.settings.get_settings() settings.set("/app/hydraEngine/waitIdle", True) settings.set_string("/rtx/rendermode", "RayTracing") settings.set_int('/rtx/post/aa/op', 2) def set_pathtracing_settings(physics_hz): set_common_stage_properties(physics_hz) settings = carb.settings.get_settings() settings.set_string("/rtx/rendermode", "PathTracing") settings.set_int('/rtx/post/aa/op', 1) # settings.set_int('/rtx/multiThreading/enabled', True) # settings.set_bool('/rtx/multiThreading/enabled', True) settings.set_int('/rtx/post/histogram/filterType', 1) settings.set_int('/rtx/post/histogram/tau', 100) settings.set_float('/rtx/post/histogram/minEV', 2) settings.set_float('/rtx/post/histogram/maxEV', 50) settings.set_bool('/rtx/post/histogram/enabaled', True) settings.set_int('/rtx/post/tonemap/filmIso', 100) # 400 settings.set_int('/rtx/post/tonemap/cameraShutter', 30) settings.set_int('/rtx/post/tonemap/fStop', 4) settings.set_int("/rtx/pathtracing/maxBounces", 6) # 6 settings.set_int("/rtx/pathtracing/maxSpecularAndTransmissionBounces", 6) # settings.set_int("/rtx/pathtracing/maxDiffuseBounces", 10) settings.set_int("/rtx/pathtracing/spp", 1) settings.set_int("/rtx/pathtracing/totalSpp", 64) settings.set_int("/rtx/pathtracing/clampSpp", 64) settings.set_int("/rtx/pathtracing/cached/enabled", False) settings.set_bool("/rtx/pathtracing/cached/enabled", False) settings.set_int("/rtx/pathtracing/lightcache/cached/enabled", False) settings.set_bool("/rtx/pathtracing/lightcache/cached/enabled", False) settings.set("/app/hydraEngine/waitIdle", False) def compute_timeline_ratio(human_anim_len, reverse_strategy, experiment_length): """ based on the reverse strategy compute how the system should roll back animations This might be counter-productive in some instances """ if len(human_anim_len) == 0: return 1 if reverse_strategy == "avg": return float(experiment_length) / (sum(human_anim_len) / len(human_anim_len)) elif reverse_strategy == "min": return float(experiment_length) / min(human_anim_len) elif reverse_strategy == "max": return float(experiment_length) / max(human_anim_len) elif reverse_strategy == "half": return 2 elif reverse_strategy == "none": return 1 else: return 1 def pub_and_write_images(simulation_context, viewport_window_list, ros_camera_list, raytracing, my_recorder=None, enable_recorder=True): sleeping(simulation_context, viewport_window_list, raytracing) ctime = omni.timeline.get_timeline_interface().get_current_time() for i, cam, outs in ros_camera_list: print(f"Publishing camera {cam}...") for output in outs: og.Controller.attribute(output+ ".inputs:step").set(1) simulation_context.render() for i, cam, outs in ros_camera_list: for output in outs: og.Controller.attribute(output+ ".inputs:step").set(0) omni.timeline.get_timeline_interface().set_current_time(ctime) if my_recorder and my_recorder._enable_record and enable_recorder: my_recorder._update() print("Writing") def sleeping(simulation_context, viewport_window_list, raytracing, totalSpp=64, spp=1): """ Sleeps the simulation to be sure that the whole frame has been rendered and updated. First we render a couple of frames. In rtx mode we need to wait the fps of the viewport to be reached. In pathtracing mode we need to do "/rtx/pathtracing/spp" rendering steps. e.g. carb.settings.get_settings().get("/rtx/pathtracing/totalSpp") carb.settings.get_settings().get("/rtx/pathtracing/spp") """ # todo is there a better way? I don"t think so, this is variable # fixme making sure timeline does not advance timeline = omni.timeline.get_timeline_interface() mytime = timeline.get_current_time() if raytracing: sleep_time = 0 start = time.time() for _ in range(100): for vp in viewport_window_list: if vp.fps == 0: continue sleep_time = max(1 / vp.fps * 1.1, sleep_time) if sleep_time != 0 and time.time() - start > sleep_time * 2: # overly cautious break simulation_context.render() timeline.set_current_time(mytime) else: cnt = totalSpp increase = spp while cnt >= 0: simulation_context.render() timeline.set_current_time(mytime) cnt -= increase simulation_context.render() timeline.set_current_time(mytime) simulation_context.render() timeline.set_current_time(mytime) time.sleep(0.2) def recorder_setup(_recorder_settings, out_path, enabled, skip_cameras=1): my_recorder = extension_custom.MyRecorder() my_recorder.on_startup() my_recorder.set_single_settings(_recorder_settings) my_recorder._dir_name = os.path.join(out_path) my_recorder._enable_record = enabled my_recorder.skip_cameras = skip_cameras return my_recorder def setup_timeline(config): """ It sets up the timeline to have a start time of 0.0, an end time of the experiment length * 2, and a time code per second of the fps :param config: a dictionary of parameters that are used to configure the experiment :return: timeline """ timeline = omni.timeline.get_timeline_interface() timeline.set_start_time(0.0) if "fps" not in config: fps = 30 else: fps = config['fps'].get() if "experiment_length" in config: timeline.set_end_time(config["experiment_length"].get() * 2 / fps) # *2 to have room else: print("No experiment length found, setting it to 3600") timeline.set_end_time(3600 / fps) timeline.set_time_codes_per_second(fps) return timeline
8,162
Python
35.936651
136
0.735849
eliabntt/GRADE-RR/simulator/utils/human_utils.py
import utils.misc_utils from utils.misc_utils import * def move_humans_to_ground(my_humans_heights: list, body_lists: list, frame: float, meters_per_unit: float, max_height: float): """ Function to keep the human at ground level (0 for now, but can be elaborated) my_human_heights: list of [animation_frames, [vertices, z_loc]]. For every frame of the animation, for every vertex, the z loc body_lists: Using to access the prim, list of prim paths frame: the simulation frame we are in (float or int will get a cast to int) meters_per_unit: meter per unit of distance in the simulation """ stage = omni.usd.get_context().get_stage() for index, height in enumerate(my_humans_heights): z_min = None if height is None: context = omni.usd.get_context() stage = context.get_stage() prim = stage.GetPrimAtPath(body_lists[index]) for i in prim.GetAllChildren(): if "armature" in str(i.GetPath()).lower(): prim = i for i in prim.GetAllChildren(): if "body" in str(i.GetPath()).lower(): prim = i for i in prim.GetAllChildren(): if "mesh" in str(i.GetPath()).lower(): prim = i l = prim.GetPropertyNames() if "points" in l: k = prim.GetAttribute("points").Get() if k is not None: k = np.array(k) z_min = min(k[:, 2]) else: z_min = min(height[int(min(max(frame - 1, 0), len(height) - 1))]) / meters_per_unit if z_min is None: continue if z_min < max_height: loc = stage.GetPrimAtPath(body_lists[index]).GetProperty('xformOp:translate').Get() loc = [loc[0], loc[1], loc[2] - z_min] set_translate(stage.GetPrimAtPath(body_lists[index]), loc) def load_human(human_base_prim_path, n, asset_path, dynamic_prims=[], added_prims=[], correct_texture_paths=False): """ Load the human based on the usd path and add it to the dynamic prims list Follow prim naming convention /human_base_prim_path+n Add also the semantic with the label "human" human_base_prim_path: the base path to which we add the n of the n-th human as per the prim path n: the number of the human asset_path: the path of the ussd of the human dynamic_prims: the list of dynamic prims in the world. Only the body, and the clothes will be added (not the armature) as separate objects added_prims: the list of the number of prims added to the world correct_texture_paths: if True, correct the texture paths to the correct path """ stage = omni.usd.get_context().get_stage() res, _ = omni.kit.commands.execute("CreateReferenceCommand", usd_context=omni.usd.get_context(), path_to=f"{human_base_prim_path}{n}", asset_path=asset_path, instanceable=False) cnt = 0 if res: for child in stage.GetPrimAtPath(f"{human_base_prim_path}{n}").GetAllChildren(): if "armature" in child.GetName().lower(): for sub_child in child.GetAllChildren(): if "armature" not in sub_child.GetName().lower(): dynamic_prims.append(sub_child) cnt += 1 added_prims.append(cnt) clear_properties(f"{human_base_prim_path}{n}") if correct_texture_paths: print("Correcting texture paths, you might want to change utils/misc_utils.py:correct_paths") correct_paths(f"{human_base_prim_path}{n}") else: print("Not correcting texture paths, you might want to check the textures") process_semantics(f"{human_base_prim_path}{n}", "human") else: raise Exception(f"Failed to load human {n} from {asset_path}")
3,553
Python
41.309523
139
0.663102
eliabntt/GRADE-RR/simulator/utils/objects_utils.py
import utils.misc_utils from utils.misc_utils import * mtl_created_list = [] def setup_shapenet(username, password, csv_location): global database shapenet.settings.ShapenetSettings() if not os.path.exists(csv_location): logged_in = shapenet.login.save_v1_csvs(username, password, csv_location) database = shapenet.globals.get_database() return database def load_object(rng=np.random.default_rng(), obj_name="shapenet", config=None, scale=1): if obj_name == "shapenet": return load_shapenet_object(rng, config, scale) elif obj_name == "google": return load_google_obj(rng, config, scale) def load_shapenet_object(rng=np.random.default_rng(), config=None, scale=1): """ It loads a random object from the ShapeNet database :param rng: a random number generator. If you don't have one, you can use np.random.default_rng() :param config: a dictionary of parameters that can be set by the user :param scale: The scale of the object, defaults to 1 (optional) :return: The path to the object and the synsetId and modelId of the object. """ global database scale /= 100 synsetId = rng.choice(list(database)) if config["synsetId"].get() == "random" else config["synsetId"].get() modelId = rng.choice(list(database[synsetId])) if config["modelId"].get() == "random" else config["modelId"].get() _settings = carb.settings.get_settings() prim = shapenet.shape.addShapePrim(_settings.get("/isaac/shapenet/omniverseServer"), synsetId, modelId, Gf.Vec3d(0, 0, 0), Gf.Rotation(Gf.Vec3d(1, 0, 0), 0), scale, True, True) if type(prim) == str: raise Exception(prim) return str(prim.GetPath()), [synsetId, modelId] def load_google_obj(rng=np.random.default_rng(), config=None, scale = 1): """ It loads a random Google 3D asset from the Google Scanned Object, converts it to USD, and then creates a reference to it in the current stage :param rng: a random number generator :param config: a dictionary of the config file :return: The prim path of the asset and the name of the asset """ google_obj_folder = config['google_obj_folder'].get() if config['google_obj_shortlist'].get() == "": asset = rng.choice(os.listdir(google_obj_folder)) else: with (open(config['google_obj_shortlist'].get(), 'r')) as f: asset = rng.choice(f.read().splitlines()) if not os.path.exists(f"{google_obj_folder}/exported_usd/{asset}/"): os.makedirs(f"{google_obj_folder}/exported_usd/{asset}/") usd_asset_path = f"{google_obj_folder}/exported_usd/{asset}/{asset}.usd" obj_asset_path = f"{google_obj_folder}/{asset}/meshes/model.obj" print(f"Converting {obj_asset_path} to {usd_asset_path}") if not os.path.exists(usd_asset_path): success = asyncio.new_event_loop().run_until_complete(convert_google_obj(obj_asset_path, usd_asset_path)) if not success: raise Exception("Failed to convert obj to usd") stage = omni.usd.get_context().get_stage() prim_path = str(stage.GetDefaultPrim().GetPath()) + "/" + asset insta_count = 0 prim_path_len = len(prim_path) while stage.GetPrimAtPath(prim_path): insta_count += 1 prim_path = f"{prim_path[:prim_path_len]}_{insta_count}" omni.kit.commands.execute('CreateReferenceCommand', usd_context=omni.usd.get_context(), path_to=prim_path, asset_path=usd_asset_path, instanceable=True) texture_list = os.listdir(f"{google_obj_folder}/{asset}/materials/textures")[0] # shader = UsdShade.Shader(stage.GetPrimAtPath(f"{prim_path}/Looks/material_0/material_0")) # shader.CreateInput("diffuse_texture", Sdf.ValueTypeNames.Asset) # omni.kit.commands.execute('ChangePropertyCommand', # prop_path=f'{prim_path}/Looks/material_0/material_0.inputs:diffuse_texture', # value=f"{google_obj_folder}/{asset}/materials/textures/{texture_list}", # prev=None) global mtl_created_list omni.kit.commands.execute( "CreateAndBindMdlMaterialFromLibrary", mdl_name="OmniPBR.mdl", mtl_name=f"{asset}", mtl_created_list=mtl_created_list, ) mtl_prim = stage.GetPrimAtPath(mtl_created_list[0]) omni.usd.create_material_input( mtl_prim, "diffuse_texture", "my-computer://" + texture_list, # my-computer seems necessary Sdf.ValueTypeNames.Asset, ) obj_shade = UsdShade.Material(mtl_prim) for child in stage.GetPrimAtPath(prim_path).GetAllChildren(): if child.GetTypeName().lower() == "xform": for subchild in child.GetAllChildren(): if subchild.GetTypeName().lower() == "mesh": UsdShade.MaterialBindingAPI(subchild).Bind(obj_shade, UsdShade.Tokens.strongerThanDescendants) set_scale(stage.GetPrimAtPath(prim_path), scale) return str(prim_path), asset async def convert_google_obj(in_path, out_path): """ It converts a Google 3D model to a format that can be used in Omni :param in_path: The path to the file you want to convert :param out_path: The path to the output file :return: A boolean value. """ import omni.kit.asset_converter as assetimport context = omni.kit.asset_converter.AssetConverterContext() converter_manager = omni.kit.asset_converter.get_instance() context.embed_textures = False task = converter_manager.create_converter_task(in_path, out_path, None, context) success = await task.wait_until_finished() return success def load_objects(config, environment, rng, dynamic_prims, scale): """ Load objects in the environment Config should contain `config["obstacles"]` with the various considered keys. In our case those are shapenet and google(scanned_objects) In config we define the # of objects for each class. If the import fails the system tries to load it from another class. For now we do not generate positions that are collision free, so the objects will go through obstacles/humans/camera. config: the config dictionary environment: the environment object rng: the global rng dynamic_prims: the list of dynamic prims that will be used in the main thread """ stage = omni.usd.get_context().get_stage() shapenet_obs = config["obstacles"]["shapenet"].get() google_obs = config["obstacles"]["google"].get() num_obstacles = shapenet_obs + google_obs loc = '' google_obs_used = [] shapenet_obs_used = [] meters_per_unit = environment.meters_per_unit if (num_obstacles > 0): print("Loading obstacles..") for n in range(num_obstacles): print("Loading obstacle {}".format(n)) # set random valid location, use "camera" x, y, z, yaw = position_object(environment, type=2) if google_obs > 0: ob_type = "google" google_obs -= 1 else: ob_type = "shapenet" if loc == '': loc = shapenet.globals.get_local_shape_loc() print("Location is {}".format(loc)) csv_location = loc + "/v1_csv/" database = setup_shapenet(config["shapenet_username"].get(), config["shapenet_password"].get(), csv_location) if database is None: print("Error loading database, resort to google") ob_type = "google" shapenet_obs -= 1 try: my_shape, shape_infos = load_object(rng, ob_type, config, scale) except: print("Error loading object, try with the other type") try: my_shape, shape_infos = load_object(rng, "google" if ob_type == "shapenet" else "shapenet", config, scale) except: print("Error loading object, giving up") continue google_obs_used.append(shape_infos) if ob_type == "google" else shapenet_obs_used.append(shape_infos) print(f"{my_shape} loaded.. pose and adding animation") clear_properties(my_shape) add_translate_anim(my_shape, Gf.Vec3d(x[0] / meters_per_unit, y[0] / meters_per_unit, z[0] / meters_per_unit)) add_rotation_anim(my_shape, Gf.Vec3d(rng.uniform(0, 2 * np.pi), rng.uniform(0, 2 * np.pi), rng.uniform(0, 2 * np.pi))) dynamic_prims.append(stage.GetPrimAtPath(my_shape)) num_keys = rng.choice(range(1, config["experiment_length"].get()), rng.integers(1, 10)).astype(float) num_keys.sort() for key in num_keys: key *= 1 x, y, z, yaw = position_object(environment, type=2) add_translate_anim(my_shape, Gf.Vec3d(x[0] / meters_per_unit, y[0] / meters_per_unit, z[0] / meters_per_unit), key) add_rotation_anim(my_shape, Gf.Vec3d(rng.uniform(0, 360), rng.uniform(0, 360), rng.uniform(0, 360)), key) if ob_type == "google": add_colliders(my_shape) add_semantics(stage.GetPrimAtPath(my_shape), ob_type) print("Loading obstacle complete") return google_obs_used, shapenet_obs_used
8,654
Python
40.018957
121
0.681303
eliabntt/GRADE-RR/irotate_specific/republish_tf.py
#!/usr/bin/env python import rospy import ipdb import random from tf2_msgs.msg import TFMessage import copy def callback(data, pub): data_to_pub = TFMessage() data_to_pub.transforms = copy.copy(data.transforms) cnt = 0 for i, d in enumerate(data.transforms): if "x_link" in d.child_frame_id or "y_link" in d.child_frame_id or "yaw_link" in d.child_frame_id or "base_link" in d.child_frame_id or "cameraholder_link" in d.child_frame_id: data_to_pub.transforms.pop(i - cnt) cnt += 1 pub.publish(data_to_pub) return def listener(): rospy.init_node('tf_republisher') pub = rospy.Publisher("tf", TFMessage, queue_size=1) rospy.Subscriber("/tf2", TFMessage, callback, callback_args=(pub)) rospy.spin() if __name__ == '__main__': listener()
816
Python
29.259258
187
0.650735
eliabntt/GRADE-RR/isaac_internals/exts/omni.isaac.synthetic_utils/omni/isaac/synthetic_utils/syntheticdata.py
# Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. """Helper class for obtaining groundtruth data from OmniKit. Support provided for RGB, Depth, Bounding Box (2D Tight, 2D Loose, 3D), segmentation (instance and semantic), and camera parameters. Typical usage example: kit = OmniKitHelper() # Start omniverse kit sd_helper = SyntheticDataHelper() gt = sd_helper.get_groundtruth(['rgb', 'depth', 'boundingBox2DTight'], viewport) """ import math import time import typing import asyncio import carb import omni import numpy as np import builtins from pxr import Usd class SyntheticDataHelper: def __init__(self): self.app = omni.kit.app.get_app_interface() ext_manager = self.app.get_extension_manager() ext_manager.set_extension_enabled("omni.syntheticdata", True) from omni.syntheticdata import sensors, helpers import omni.syntheticdata._syntheticdata as sd # Must be imported after getting app interface self.sd = sd self.sd_interface = self.sd.acquire_syntheticdata_interface() self.carb_settings = carb.settings.acquire_settings_interface() self.sensor_helper_lib = sensors self.generic_helper_lib = helpers self.sensor_helpers = { "rgb": sensors.get_rgb, "depth": sensors.get_distance_to_image_plane, "depthLinear": sensors.get_distance_to_camera, "instanceSegmentation": sensors.get_instance_segmentation, "semanticSegmentation": sensors.get_semantic_segmentation, "boundingBox2DTight": sensors.get_bounding_box_2d_tight, "boundingBox2DLoose": sensors.get_bounding_box_2d_loose, "boundingBox3D": sensors.get_bounding_box_3d, "motion-vector": sensors.get_motion_vector, "normals": sensors.get_normals, "camera": self.get_camera_params, "pose": self.get_pose, "occlusion": sensors.get_occlusion, } self.sensor_types = { "rgb": self.sd.SensorType.Rgb, "depth": self.sd.SensorType.DistanceToImagePlane, "depthLinear": self.sd.SensorType.DistanceToCamera, "instanceSegmentation": self.sd.SensorType.InstanceSegmentation, "semanticSegmentation": self.sd.SensorType.SemanticSegmentation, "boundingBox2DTight": self.sd.SensorType.BoundingBox2DTight, "boundingBox2DLoose": self.sd.SensorType.BoundingBox2DLoose, "boundingBox3D": self.sd.SensorType.BoundingBox3D, "occlusion": self.sd.SensorType.Occlusion, "motion-vector": self.sd.SensorType.MotionVector, "normals": self.sd.SensorType.Normal, } self.sensor_state = {s: False for s in list(self.sensor_helpers.keys())} def get_camera_params(self, viewport): """Get active camera intrinsic and extrinsic parameters. Returns: A dict of the active camera's parameters. pose (numpy.ndarray): camera position in world coordinates, fov (float): horizontal field of view in radians focal_length (float) horizontal_aperture (float) view_projection_matrix (numpy.ndarray(dtype=float64, shape=(4, 4))) resolution (dict): resolution as a dict with 'width' and 'height'. clipping_range (tuple(float, float)): Near and Far clipping values. """ stage = omni.usd.get_context().get_stage() prim = stage.GetPrimAtPath(viewport.get_active_camera()) prim_tf = omni.usd.get_world_transform_matrix(prim) current_time = omni.timeline.get_timeline_interface().get_current_time() view_params = self.generic_helper_lib.get_view_params(viewport) hfov = 2 * math.atan(view_params["horizontal_aperture"] / (2 * view_params["focal_length"])) vfov = prim.GetAttribute('verticalAperture').Get() view_proj_mat = self.generic_helper_lib.get_view_proj_mat(view_params) return { "pose": np.array(prim_tf), "hfov": hfov, "vfov": vfov, "ctime": current_time, "focal_length": view_params["focal_length"], "horizontal_aperture": view_params["horizontal_aperture"], "view_projection_matrix": view_proj_mat, "resolution": {"width": view_params["width"], "height": view_params["height"]}, "clipping_range": view_params["clipping_range"], } def get_pose(self): """Get pose of all objects with a semantic label. """ stage = omni.usd.get_context().get_stage() mappings = self.generic_helper_lib.get_instance_mappings() pose = [] timeline = omni.timeline.get_timeline_interface() time = timeline.get_current_time() * timeline.get_time_codes_per_seconds() time = Usd.TimeCode(time) for m in mappings: prim_path = m[1] prim = stage.GetPrimAtPath(prim_path) prim_tf = omni.usd.get_world_transform_matrix(prim, time) pose.append((str(prim_path), m[2], str(m[3]), np.array(prim_tf))) return pose def initialize(self, sensor_names, viewport_api): """Initialize sensors in the list provided. Args: viewport_api (Any): Viewport from which to retrieve/create sensor. sensor_types (list of omni.syntheticdata._syntheticdata.SensorType): List of sensor types to initialize. """ for sensor_name in sensor_names: if sensor_name != "camera" and sensor_name != "pose": self.sensor_helper_lib.enable_sensors(viewport_api, [self.sensor_types[sensor_name]]) if builtins.ISAAC_LAUNCHED_FROM_JUPYTER: data = [] while data == []: self.app.update() data = self.sensor_helpers[sensor_name](viewport_api) else: future = asyncio.ensure_future(self.sensor_helper_lib.next_sensor_data_async(viewport_api)) while not future.done(): self.app.update() self.app.update() async def initialize_async(self, sensor_names, viewport_api): """Initialize sensors in the list provided. Async version Args: viewport_api (Any): Viewport from which to retrieve/create sensor. sensor_types (list of omni.syntheticdata._syntheticdata.SensorType): List of sensor types to initialize. """ for sensor_name in sensor_names: if sensor_name != "camera" and sensor_name != "pose": await self.sensor_helper_lib.initialize_async(viewport_api, [self.sensor_types[sensor_name]]) await self.sensor_helper_lib.next_sensor_data_async(viewport_api) pass def get_groundtruth(self, sensor_names, viewport_api, verify_sensor_init=True, wait_for_sensor_data=0.1): """Get groundtruth from specified gt_sensors. Args: sensor_names (list): List of strings of sensor names. Valid sensors names: rgb, depth, instanceSegmentation, semanticSegmentation, boundingBox2DTight, boundingBox2DLoose, boundingBox3D, camera viewport_api (Any): Viewport from which to retrieve/create sensor. verify_sensor_init (bool): Additional check to verify creation and initialization of sensors. wait_for_sensor_data (float): Additional time to sleep before returning ground truth so are correctly filled. Default is 0.1 seconds Returns: Dict of sensor outputs """ if wait_for_sensor_data > 0: time.sleep(wait_for_sensor_data) # Create and initialize sensors if verify_sensor_init: loop = asyncio.get_event_loop() if loop and loop.is_running(): carb.log_warn("Set verify_sensor_init to false if running with asyncio") pass else: self.initialize(sensor_names, viewport_api) gt = {} sensor_state = {} # Process non-RT-only sensors for sensor in sensor_names: if sensor not in ["camera", "pose"]: if sensor == "instanceSegmentation": gt[sensor] = self.sensor_helpers[sensor](viewport_api, parsed=True, return_mapping=True) elif sensor == "boundingBox3D": gt[sensor] = self.sensor_helpers[sensor](viewport_api, parsed=True, return_corners=True) else: gt[sensor] = self.sensor_helpers[sensor](viewport_api) self.sensor_helper_lib.create_or_retrieve_sensor(viewport_api, self.sensor_types[sensor]) # sensors are always initialized after they are created sensor_state[sensor] = True elif sensor == "pose": sensor_state[sensor] = True gt[sensor] = self.sensor_helpers[sensor]() else: sensor_state[sensor] = True gt[sensor] = self.sensor_helpers[sensor](viewport_api) gt["state"] = sensor_state return gt def get_semantic_ids(self, semantic_data: list = [[]]) -> typing.List[int]: """Returns unique id's for a semantic image Args: semantic_data (list, optional): Semantic Image. Defaults to [[]]. Returns: typing.List[int]: List of unique semantic IDs in image """ return list(np.unique(semantic_data)) def get_semantic_id_map(self, semantic_labels: list = []) -> dict: """ Get map of semantic ID from label """ output = {} if len(semantic_labels) > 0: for label in semantic_labels: idx = self.sd_interface.get_semantic_segmentation_id_from_data("class", label) output[label] = idx return output def get_semantic_label_map(self, semantic_ids: list = []) -> dict: """ Get map of semantic label from ID """ output = {} if len(semantic_ids) > 0: for idx in semantic_ids: label = self.sd_interface.get_semantic_segmentation_data_from_id(idx) output[idx] = label return output def get_mapped_semantic_data( self, semantic_data: list = [[]], user_semantic_label_map: dict = {}, remap_using_base_class=False ) -> dict: """Map semantic segmentation data to IDs specified by user Usage: gt = get_groundtruth() user_semantic_label_map ={"cone":4, "cylinder":5, "cube":6} mapped_data = get_mapped_semantic_data(gt["semanticSegmentation"], user_semantic_label_map) Args: semantic_data (list, optional): Raw semantic image. Defaults to [[]]. user_semantic_label_map (dict, optional): Dictionary of label to id pairs. Defaults to {}. remap_using_base_class (bool, optional): If multiple class labels are found, use the topmost one. Defaults to False. Returns: dict: [description] """ semantic_data_np = np.array(semantic_data) unique_semantic_ids = list(np.unique(semantic_data_np)) unique_semantic_labels_map = self.get_semantic_label_map(unique_semantic_ids) for unique_id, unique_label in unique_semantic_labels_map.items(): label = unique_label if remap_using_base_class: label = unique_label.split(":")[-1] if label in user_semantic_label_map: semantic_data_np[np.where(semantic_data == unique_id)] = user_semantic_label_map[label] return semantic_data_np.tolist()
10,852
Python
37.214789
145
0.69084
eliabntt/GRADE-RR/isaac_internals/exts/omni.isaac.synthetic_utils/omni/isaac/synthetic_utils/visualization.py
# Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. import struct import random import colorsys import numpy as np from PIL import Image, ImageDraw def random_colours(N, enable_random=True, num_channels=3): """ Generate random colors. Generate visually distinct colours by linearly spacing the hue channel in HSV space and then convert to RGB space. """ start = 0 if enable_random: random.seed(10) start = random.random() hues = [(start + i / N) % 1.0 for i in range(N)] colours = [list(colorsys.hsv_to_rgb(h, 0.9, 1.0)) for i, h in enumerate(hues)] if num_channels == 4: for color in colours: color.append(1.0) if enable_random: random.shuffle(colours) return colours def plot_boxes(ax, bboxes, labels=None, colours=None, label_size=10): import matplotlib.pyplot as plt if colours is None: colours = random_colours(len(bboxes)) if labels is None: labels = [""] * len(bboxes) for bb, label, colour in zip(bboxes, labels, colours): maxint = 2 ** (struct.Struct("i").size * 8 - 1) - 1 # if a bbox is not visible, do not draw if bb[0] != maxint and bb[1] != maxint: x = bb[0] y = bb[1] w = bb[2] - x h = bb[3] - y box = plt.Rectangle((x, y), w, h, fill=False, edgecolor=colour) ax.add_patch(box) if label: font = {"family": "sans-serif", "color": colour, "size": label_size} ax.text(bb[0], bb[1], label, fontdict=font) def colorize_depth(depth_image, width, height, num_channels=3): """ Colorizes depth data for visualization. Args: depth_image (numpy.ndarray): Depth data from the sensor. width (int): Width of the viewport. height (int): Height of the viewport. num_channels (int): Specify number of channels i.e. 3 or 4. """ colorized_image = np.zeros((height, width, num_channels)) depth_image[depth_image == 0.0] = 1e-5 depth_image = np.clip(depth_image, 0, 255) depth_image -= np.min(depth_image) depth_image /= np.max(depth_image) - np.min(depth_image) colorized_image[:, :, 0] = depth_image colorized_image[:, :, 1] = depth_image colorized_image[:, :, 2] = depth_image if num_channels == 4: colorized_image[:, :, 3] = 1 colorized_image = (colorized_image * 255).astype(int) return colorized_image def colorize_segmentation(segmentation_image, width, height, num_channels=3, num_colors=None): """ Colorizes segmentation data for visualization. Args: segmentation_image (numpy.ndarray): Segmentation data from the sensor. width (int): Width of the viewport. height (int): Height of the viewport. num_channels (int): Specify number of channels i.e. 3 or 4. num_colors (int): Specify number of colors for consistency across frames. """ segmentation_mappings = segmentation_image[:, :, 0] segmentation_list = np.unique(segmentation_mappings) if num_colors is None: num_colors = np.max(segmentation_list) + 1 color_pixels = random_colours(num_colors, True, num_channels) color_pixels = [[color_pixel[i] * 255 for i in range(num_channels)] for color_pixel in color_pixels] segmentation_masks = np.zeros((len(segmentation_list), *segmentation_mappings.shape), dtype=np.bool) index_list = [] for index, segmentation_id in enumerate(segmentation_list): segmentation_masks[index] = segmentation_mappings == segmentation_id index_list.append(segmentation_id) color_image = np.zeros((height, width, num_channels), dtype=np.uint8) for index, mask, colour in zip(index_list, segmentation_masks, color_pixels): color_image[mask] = color_pixels[index] if index > 0 else 0 return color_image def colorize_bboxes(bboxes_2d_data, bboxes_2d_rgb, num_channels=3): """ Colorizes 2D bounding box data for visualization. Args: bboxes_2d_data (numpy.ndarray): 2D bounding box data from the sensor. bboxes_2d_rgb (numpy.ndarray): RGB data from the sensor to embed bounding box. num_channels (int): Specify number of channels i.e. 3 or 4. """ semantic_id_list = [] bbox_2d_list = [] rgb_img = Image.fromarray(bboxes_2d_rgb) rgb_img_draw = ImageDraw.Draw(rgb_img) for bbox_2d in bboxes_2d_data: if bbox_2d[5] > 0: semantic_id_list.append(bbox_2d[1]) bbox_2d_list.append(bbox_2d) semantic_id_list_np = np.unique(np.array(semantic_id_list)) color_list = random_colours(len(semantic_id_list_np.tolist()), True, num_channels) for bbox_2d in bbox_2d_list: index = np.where(semantic_id_list_np == bbox_2d[1])[0][0] bbox_color = color_list[index] outline = (int(255 * bbox_color[0]), int(255 * bbox_color[1]), int(255 * bbox_color[2])) if num_channels == 4: outline = ( int(255 * bbox_color[0]), int(255 * bbox_color[1]), int(255 * bbox_color[2]), int(255 * bbox_color[3]), ) rgb_img_draw.rectangle([(bbox_2d[6], bbox_2d[7]), (bbox_2d[8], bbox_2d[9])], outline=outline, width=2) bboxes_2d_rgb = np.array(rgb_img) return bboxes_2d_rgb
5,785
Python
39.461538
110
0.626102
eliabntt/GRADE-RR/isaac_internals/exts/omni.isaac.synthetic_utils/omni/isaac/synthetic_utils/tests/test_synthetic_utils.py
# Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # # NOTE: # omni.kit.test - std python's unittest module with additional wrapping to add suport for async/await tests # For most things refer to unittest docs: https://docs.python.org/3/library/unittest.html from omni.isaac.core.utils.viewports import set_camera_view import omni.kit.test import omni.kit.commands import carb import carb.tokens import copy import os import asyncio import numpy as np from pxr import Gf, UsdGeom, UsdPhysics import random # Import extension python module we are testing with absolute import path, as if we are external user (other extension) from omni.isaac.synthetic_utils import SyntheticDataHelper from omni.isaac.synthetic_utils.writers import NumpyWriter from omni.isaac.synthetic_utils.writers import KittiWriter from omni.syntheticdata.tests.utils import add_semantics from omni.isaac.core.utils.physics import simulate_async from omni.isaac.core.utils.nucleus import get_assets_root_path from omni.isaac.core.utils.semantics import add_update_semantics from omni.isaac.core.utils.extensions import get_extension_path_from_name from omni.isaac.core.utils.stage import set_stage_up_axis from omni.isaac.core import PhysicsContext from omni.physx.scripts.physicsUtils import add_ground_plane from omni.kit.viewport.utility import get_active_viewport # Having a test class dervived from omni.kit.test.AsyncTestCase declared on the root of module will make it auto-discoverable by omni.kit.test class TestSyntheticUtils(omni.kit.test.AsyncTestCase): # Before running each test async def setUp(self): await omni.usd.get_context().new_stage_async() await omni.kit.app.get_app().next_update_async() self._physics_rate = 60 set_stage_up_axis("z") PhysicsContext(physics_dt=1.0 / self._physics_rate) self._time_step = 1.0 / self._physics_rate carb.settings.get_settings().set_int("/app/runLoops/main/rateLimitFrequency", int(self._physics_rate)) carb.settings.get_settings().set_bool("/app/runLoops/main/rateLimitEnabled", True) carb.settings.get_settings().set_int("/persistent/simulation/minFrameRate", int(self._physics_rate)) carb.settings.get_settings().set("/app/asyncRendering", False) carb.settings.get_settings().set("/app/hydraEngine/waitIdle", True) carb.settings.get_settings().set("/rtx/hydra/enableSemanticSchema", True) await omni.kit.app.get_app().next_update_async() # Start Simulation and wait self._timeline = omni.timeline.get_timeline_interface() self._viewport_api = get_active_viewport() self._usd_context = omni.usd.get_context() self._sd_helper = SyntheticDataHelper() self._synthetic_utils_path = get_extension_path_from_name("omni.isaac.synthetic_utils") self._stage = self._usd_context.get_stage() self._camera_path = "/Camera" camera = self._stage.DefinePrim(self._camera_path, "Camera") self._viewport_api.set_active_camera(self._camera_path) pass # After running each test async def tearDown(self): await omni.kit.app.get_app().next_update_async() self._timeline.stop() while omni.usd.get_context().get_stage_loading_status()[2] > 0: print("tearDown, assets still loading, waiting to finish...") await omni.kit.app.get_app().next_update_async() await omni.kit.app.get_app().next_update_async() pass async def initialize_sensors(self): # Initialize syntheticdata sensors await omni.kit.app.get_app().next_update_async() await self._sd_helper.initialize_async( [ "rgb", "depth", "instanceSegmentation", "semanticSegmentation", "boundingBox2DTight", "boundingBox2DLoose", "boundingBox3D", ], self._viewport_api, ) await omni.kit.app.get_app().next_update_async() # Acquire a copy of the ground truth. def get_groundtruth(self): gt = self._sd_helper.get_groundtruth( [ "rgb", "depthLinear", "boundingBox2DTight", "boundingBox2DLoose", "instanceSegmentation", "semanticSegmentation", "boundingBox3D", "camera", "pose", ], self._viewport_api, verify_sensor_init=False, ) return copy.deepcopy(gt) async def load_robot_scene(self): assets_root_path = get_assets_root_path() if assets_root_path is None: carb.log_error("Could not find Isaac Sim assets folder") return robot_usd = assets_root_path + "/Isaac/Robots/Carter/carter_v1.usd" add_ground_plane(self._stage, "/physics/groundPlane", "Z", 1000.0, Gf.Vec3f(0.0, 0, -0.25), Gf.Vec3f(1.0)) # setup high-level robot prim self.prim = self._stage.DefinePrim("/robot", "Xform") self.prim.GetReferences().AddReference(robot_usd) add_semantics(self.prim, "robot") rot_mat = Gf.Matrix3d(Gf.Rotation((0, 0, 1), 90)) omni.kit.commands.execute( "TransformPrimCommand", path=self.prim.GetPath(), old_transform_matrix=None, new_transform_matrix=Gf.Matrix4d().SetRotate(rot_mat).SetTranslateOnly(Gf.Vec3d(0, -0.64, 0)), ) # setup scene camera set_camera_view([3.00, 3.0, 3.00], [0, -0.64, 0], self._camera_path, self._viewport_api) await self.initialize_sensors() # Unit test for sensor groundtruth async def test_groundtruth(self): await self.load_robot_scene() self._timeline.play() await omni.kit.app.get_app().next_update_async() await simulate_async(1.0) await omni.syntheticdata.sensors.next_sensor_data_async(self._viewport_api) gt = self.get_groundtruth() # Validate Depth groundtruth gt_depth = gt["depthLinear"] self.assertAlmostEqual(np.min(gt_depth), 5.11157, delta=0.1) self.assertAlmostEqual(np.max(gt_depth), 7.4313293, delta=0.1) # Validate 2D BBox groundtruth gt_bbox2d = gt["boundingBox2DTight"] self.assertEqual(len(gt_bbox2d), 1) self.assertAlmostEqual(gt_bbox2d[0][6], 432, delta=2) self.assertAlmostEqual(gt_bbox2d[0][7], 138, delta=2) self.assertAlmostEqual(gt_bbox2d[0][8], 844, delta=2) self.assertAlmostEqual(gt_bbox2d[0][9], 542, delta=2) # Validate semantic segmentation groundtruth - 0 (unlabeled) and 1 (robot) gt_semantic = gt["semanticSegmentation"] self.assertEqual(len(np.unique(gt_semantic)), 2) user_semantic_label_map = {"robot": 4, "cylinder": 5, "cube": 6} mapped_data = self._sd_helper.get_mapped_semantic_data(gt_semantic, user_semantic_label_map, True) unique_data = np.unique(mapped_data) self.assertEqual(unique_data[0], 0) self.assertEqual(unique_data[1], 4) # Validate 3D BBox groundtruth gt_bbox3d = gt["boundingBox3D"] self.assertEqual(len(gt_bbox3d), 1) self.assertAlmostEqual(gt_bbox3d[0][6], -0.43041847, delta=0.01) self.assertAlmostEqual(gt_bbox3d[0][7], -0.31312422, delta=0.01) self.assertAlmostEqual(gt_bbox3d[0][8], -0.25173292, delta=0.01) self.assertAlmostEqual(gt_bbox3d[0][9], 0.24220554, delta=0.01) self.assertAlmostEqual(gt_bbox3d[0][10], 0.3131649, delta=0.01) self.assertAlmostEqual(gt_bbox3d[0][11], 0.4119104, delta=0.01) # Validate camera groundtruth - position, fov, focal length, aperature gt_camera = gt["camera"] gt_camera_trans = gt_camera["pose"][3, :3] self.assertAlmostEqual(gt_camera_trans[0], 3.000, delta=0.001) self.assertAlmostEqual(gt_camera_trans[1], 3.000, delta=0.001) self.assertAlmostEqual(gt_camera_trans[2], 3.000, delta=0.001) self.assertEqual(gt_camera["resolution"]["width"], 1280) self.assertEqual(gt_camera["resolution"]["height"], 720) self.assertAlmostEqual(gt_camera["fov"], 0.4131223226073451, 1e-5) self.assertAlmostEqual(gt_camera["focal_length"], 50.0, 1e-5) self.assertAlmostEqual(gt_camera["horizontal_aperture"], 20.954999923706055, 1e-2) # Validate pose groundtruth - prim path, semantic label, position gt_pose = gt["pose"] self.assertEqual(len(gt_pose), 1) self.assertEqual(gt_pose[0][0], "/robot") self.assertEqual(gt_pose[0][2], "robot") gt_pose_trans = (gt_pose[0])[3][3, :3] self.assertAlmostEqual(gt_pose_trans[0], 0.0, delta=0.001) self.assertAlmostEqual(gt_pose_trans[1], -0.640, delta=0.001) self.assertAlmostEqual(gt_pose_trans[2], 0.0, delta=0.001) pass # Unit test for data writer async def test_writer(self): await self.load_robot_scene() self._timeline.play() await omni.kit.app.get_app().next_update_async() await simulate_async(1.0) await omni.kit.app.get_app().next_update_async() viewport_window = omni.kit.viewport.utility.get_active_viewport_window() # Setting up config for writer sensor_settings = {} sensor_settings_viewport = {"rgb": {"enabled": True}} viewport_name = viewport_window.title sensor_settings[viewport_name] = copy.deepcopy(sensor_settings_viewport) # Initialize data writer output_folder = os.getcwd() + "/output" data_writer = NumpyWriter(output_folder, 4, 100, sensor_settings) data_writer.start_threads() # Get rgb groundtruth gt = self._sd_helper.get_groundtruth(["rgb"], self._viewport_api, verify_sensor_init=False) # Write rgb groundtruth image_id = 1 groundtruth = {"METADATA": {"image_id": str(image_id), "viewport_name": viewport_name}, "DATA": {}} groundtruth["DATA"]["RGB"] = gt["rgb"] data_writer.q.put(groundtruth) # Validate output file output_file_path = os.path.join(output_folder, viewport_name, "rgb", str(image_id) + ".png") data_writer.stop_threads() await asyncio.sleep(0.1) self.assertEqual(os.path.isfile(output_file_path), True) pass # Unit test for data writer async def test_kitti_writer(self): await self.load_robot_scene() self._timeline.play() await omni.kit.app.get_app().next_update_async() await simulate_async(1.0) await omni.kit.app.get_app().next_update_async() viewport_window = omni.kit.viewport.utility.get_active_viewport_window() # Setting up config for writer sensor_settings = {} sensor_settings_viewport = {"rgb": {"enabled": True}} viewport_name = viewport_window.title sensor_settings[viewport_name] = copy.deepcopy(sensor_settings_viewport) # Initialize data writer output_folder_tight = os.getcwd() + "/kitti_tight" output_folder_loose = os.getcwd() + "/kitti_loose" data_writer_tight = KittiWriter( output_folder_tight, 4, 100, train_size=1, classes="robot", bbox_type="BBOX2DTIGHT" ) data_writer_tight.start_threads() data_writer_loose = KittiWriter( output_folder_loose, 4, 100, train_size=1, classes="robot", bbox_type="BBOX2DLOOSE" ) data_writer_loose.start_threads() # Get rgb groundtruth gt = self._sd_helper.get_groundtruth( ["rgb", "boundingBox2DTight", "boundingBox2DLoose"], self._viewport_api, verify_sensor_init=False ) # Write rgb groundtruth image_id = 0 groundtruth = { "METADATA": { "image_id": str(image_id), "viewport_name": viewport_name, "BBOX2DTIGHT": {}, "BBOX2DLOOSE": {}, }, "DATA": {}, } image = gt["rgb"] groundtruth["DATA"]["RGB"] = image groundtruth["DATA"]["BBOX2DTIGHT"] = gt["boundingBox2DTight"] groundtruth["METADATA"]["BBOX2DTIGHT"]["WIDTH"] = image.shape[1] groundtruth["METADATA"]["BBOX2DTIGHT"]["HEIGHT"] = image.shape[0] groundtruth["DATA"]["BBOX2DLOOSE"] = gt["boundingBox2DLoose"] groundtruth["METADATA"]["BBOX2DLOOSE"]["WIDTH"] = image.shape[1] groundtruth["METADATA"]["BBOX2DLOOSE"]["HEIGHT"] = image.shape[0] for f in range(2): groundtruth["METADATA"]["image_id"] = image_id data_writer_tight.q.put(copy.deepcopy(groundtruth)) data_writer_loose.q.put(copy.deepcopy(groundtruth)) image_id = image_id + 1 # Validate output file data_writer_tight.stop_threads() data_writer_loose.stop_threads() await asyncio.sleep(0.1) for output_folder in [output_folder_tight, output_folder_loose]: self.assertEqual(os.path.isfile(os.path.join(output_folder + "/training/image_2", str(0) + ".png")), True) self.assertEqual(os.path.isfile(os.path.join(output_folder + "/training/label_2", str(0) + ".txt")), True) self.assertEqual(os.path.isfile(os.path.join(output_folder + "/testing/image_2", str(1) + ".png")), True) pass # create a cube. async def add_cube(self, path, size, offset): cubeGeom = UsdGeom.Cube.Define(self._stage, path) cubePrim = self._stage.GetPrimAtPath(path) # use add_semantics to set its class to Cube add_semantics(cubePrim, "cube") cubeGeom.CreateSizeAttr(size) cubeGeom.ClearXformOpOrder() cubeGeom.AddTranslateOp().Set(offset) await omni.kit.app.get_app().next_update_async() UsdPhysics.CollisionAPI.Apply(cubePrim) return cubePrim, cubeGeom # create a scene with a cube. async def load_cube_scene(self): # ensure we are done with all of scene setup. await omni.kit.app.get_app().next_update_async() # check units meters_per_unit = UsdGeom.GetStageMetersPerUnit(self._stage) add_ground_plane(self._stage, "/physics/groundPlane", "Z", 1000.0, Gf.Vec3f(0.0, 0, -25), Gf.Vec3f(1.0)) # Add a cube at a "close" location self.cube_location = Gf.Vec3f(-300.0, 0.0, 50.0) self.cube, self.cube_geom = await self.add_cube("/World/Cube", 100.0, self.cube_location) # setup scene camera set_camera_view([1000, 1000, 1000], [0, 0, 0], self._camera_path, self._viewport_api) await self.initialize_sensors() # Unit test for sensor groundtruth async def frame_lag_test(self, move): # start the scene # wait for update move(Gf.Vec3f(random.random() * 100, random.random() * 100, random.random() * 100)) await omni.syntheticdata.sensors.next_sensor_data_async(self._viewport_api) # grab ground truth gt1 = self.get_groundtruth() # move the cube move(Gf.Vec3f(random.random() * 100, random.random() * 100, random.random() * 100)) # wait for update await omni.syntheticdata.sensors.next_sensor_data_async(self._viewport_api) # grab ground truth gt2 = self.get_groundtruth() await omni.syntheticdata.sensors.next_sensor_data_async(self._viewport_api) gt3 = self.get_groundtruth() # ensure segmentation is identical gt_seg1 = gt1["semanticSegmentation"] gt_seg2 = gt2["semanticSegmentation"] self.assertEqual(len(np.unique(gt_seg1)), len(np.unique(gt_seg2))) # the cube 3d bboxes should be different after update gt_box3d1 = gt1["boundingBox3D"] gt_box3d2 = gt2["boundingBox3D"] gt_box3d3 = gt3["boundingBox3D"] # check the list size self.assertEqual(len(gt_box3d1), len(gt_box3d2)) # check the corners, they should/must move to pass the test. self.assertNotEqual(gt_box3d1["corners"].tolist(), gt_box3d2["corners"].tolist()) # Should be no change between these two frames self.assertEqual(gt_box3d2["corners"].tolist(), gt_box3d3["corners"].tolist()) await omni.syntheticdata.sensors.next_sensor_data_async(self._viewport_api) # stop the scene pass # Test lag by executing a command async def test_oneframelag_kitcommand(self): await self.load_cube_scene() def set_prim_pose(location): omni.kit.commands.execute( "TransformPrimCommand", path=self.cube.GetPath(), old_transform_matrix=None, new_transform_matrix=Gf.Matrix4d() .SetRotate(Gf.Matrix3d(Gf.Rotation((0, 0, 1), 90))) .SetTranslateOnly(Gf.Vec3d(location)), ) for frame in range(50): await self.frame_lag_test(set_prim_pose) pass # Test lag using a USD prim. async def test_oneframelag_usdprim(self): await self.load_cube_scene() def set_prim_pose(location): properties = self.cube.GetPropertyNames() if "xformOp:translate" in properties: translate_attr = self.cube.GetAttribute("xformOp:translate") translate_attr.Set(location) for frame in range(50): await self.frame_lag_test(set_prim_pose) pass async def test_remap_semantics(self): set_camera_view([1000, 1000, 1000], [0, 0, 0], self._camera_path, self._viewport_api) usd_path = self._synthetic_utils_path + "/data/usd/tests/nested_semantics.usd" self.prim = self._stage.DefinePrim("/test_nested", "Xform") self.prim.GetReferences().AddReference(usd_path) await omni.kit.app.get_app().next_update_async() await self.initialize_sensors() gt = self.get_groundtruth() ids = self._sd_helper.get_semantic_ids(gt["semanticSegmentation"]) labels = self._sd_helper.get_semantic_label_map(ids) # make sure remapping with remap_using_base_class True should work even if we don't have nested classes mapped_id_a = self._sd_helper.get_semantic_ids( self._sd_helper.get_mapped_semantic_data( gt["semanticSegmentation"], {"red": 1, "green": 10, "blue": 100}, remap_using_base_class=True ) ) mapped_id_b = self._sd_helper.get_semantic_ids( self._sd_helper.get_mapped_semantic_data( gt["semanticSegmentation"], {"red": 1, "green": 10, "blue": 100}, remap_using_base_class=False ) ) # if labels aren't nested, they should remain the same unique_data_a = np.unique(mapped_id_a).tolist() unique_data_b = np.unique(mapped_id_b).tolist() self.assertListEqual(unique_data_a, unique_data_b) self.assertEqual(unique_data_a[0], 0) self.assertEqual(unique_data_a[1], 1) self.assertEqual(unique_data_a[2], 10) self.assertEqual(unique_data_a[3], 100) async def test_nested_semantics(self): set_camera_view([1000, 1000, 1000], [0, 0, 0], self._camera_path, self._viewport_api) usd_path = self._synthetic_utils_path + "/data/usd/tests/nested_semantics.usd" self.prim = self._stage.DefinePrim("/test_nested", "Xform") add_update_semantics(self.prim, "combined") self.prim.GetReferences().AddReference(usd_path) await omni.kit.app.get_app().next_update_async() await self.initialize_sensors() gt = self.get_groundtruth() ids = self._sd_helper.get_semantic_ids(gt["semanticSegmentation"]) labels = self._sd_helper.get_semantic_label_map(ids) mapped_id_a = self._sd_helper.get_semantic_ids( self._sd_helper.get_mapped_semantic_data( gt["semanticSegmentation"], {"combined": 99}, remap_using_base_class=True ) ) mapped_id_b = self._sd_helper.get_semantic_ids( self._sd_helper.get_mapped_semantic_data( gt["semanticSegmentation"], {"combined": 99}, remap_using_base_class=False ) ) unique_data_a = np.unique(mapped_id_a).tolist() unique_data_b = np.unique(mapped_id_b).tolist() self.assertEqual(unique_data_a[0], 0) self.assertEqual(unique_data_a[1], 99) # remap_using_base_class false should result in the mapping not changing self.assertEqual(unique_data_b[0], 0) self.assertEqual(unique_data_b[1], 1) self.assertEqual(unique_data_b[2], 2) self.assertEqual(unique_data_b[3], 3)
21,136
Python
43.876858
142
0.629731
eliabntt/GRADE-RR/isaac_internals/exts/omni.isaac.synthetic_utils/omni/isaac/synthetic_utils/writers/numpy.py
# Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. """Helper class for writing groundtruth data offline in numpy format. """ import copy import omni import os import numpy as np from PIL import Image from .base import BaseWriter from omni.isaac.core.utils.viewports import get_viewport_names class NumpyWriter(BaseWriter): def __init__(self, data_dir, num_worker_threads, max_queue_size=500, sensor_settings=None): BaseWriter.__init__(self, data_dir, num_worker_threads, max_queue_size) from omni.isaac.synthetic_utils import visualization self.visualization = visualization self.create_output_folders(sensor_settings) def worker(self): """Processes task from queue. Each tasks contains groundtruth data and metadata which is used to transform the output and write it to disk.""" while True: groundtruth = self.q.get() if groundtruth is None: break filename = groundtruth["METADATA"]["image_id"] viewport_name = groundtruth["METADATA"]["viewport_name"] for gt_type, data in groundtruth["DATA"].items(): if gt_type == "RGB": self.save_image(viewport_name, gt_type, data, filename) elif gt_type == "DEPTH": if groundtruth["METADATA"]["DEPTH"]["NPY"]: self.depth_folder = self.data_dir + "/" + str(viewport_name) + "/depth/" np.save(self.depth_folder + filename + ".npy", data) if groundtruth["METADATA"]["DEPTH"]["COLORIZE"]: self.save_image(viewport_name, gt_type, data, filename) elif gt_type == "DEPTHLINEAR": if groundtruth["METADATA"]["DEPTHLINEAR"]["NPY"]: self.depthLinear_folder = self.data_dir + "/" + str(viewport_name) + "/depthLinear/" np.save(self.depthLinear_folder + filename + ".npy", data) if groundtruth["METADATA"]["DEPTHLINEAR"]["COLORIZE"]: self.save_image(viewport_name, gt_type, data, filename) elif gt_type == "INSTANCE": self.save_segmentation( viewport_name, gt_type, data, filename, groundtruth["METADATA"]["INSTANCE"]["WIDTH"], groundtruth["METADATA"]["INSTANCE"]["HEIGHT"], groundtruth["METADATA"]["INSTANCE"]["COLORIZE"], groundtruth["METADATA"]["INSTANCE"]["MAPPINGS"], groundtruth["METADATA"]["INSTANCE"]["NPY"], ) elif gt_type == "SEMANTIC": self.save_segmentation( viewport_name, gt_type, data, filename, groundtruth["METADATA"]["SEMANTIC"]["WIDTH"], groundtruth["METADATA"]["SEMANTIC"]["HEIGHT"], groundtruth["METADATA"]["SEMANTIC"]["COLORIZE"], groundtruth["METADATA"]["SEMANTIC"]["MAPPINGS"], groundtruth["METADATA"]["SEMANTIC"]["NPY"], ) elif gt_type in ["BBOX2DTIGHT", "BBOX2DLOOSE"]: self.save_bbox( viewport_name, gt_type, data, filename, groundtruth["METADATA"][gt_type]["COLORIZE"], groundtruth["DATA"]["RGB"], groundtruth["METADATA"][gt_type]["NPY"], ) elif gt_type in ["BBOX3D"]: self.save_bbox( viewport_name, gt_type, data, filename, groundtruth["METADATA"][gt_type]["COLORIZE"], groundtruth["METADATA"]["BBOX3D_IMAGE"], groundtruth["METADATA"][gt_type]["NPY"], ) elif gt_type in ["MOTIONVECTOR"]: self.save_motion( viewport_name, gt_type, data, filename, groundtruth["METADATA"][gt_type]["COLORIZE"], groundtruth["DATA"]["RGB"], groundtruth["METADATA"][gt_type]["NPY"], ) elif gt_type == "CAMERA": self.camera_folder = self.data_dir + "/" + str(viewport_name) + "/camera/" np.save(self.camera_folder + filename + ".npy", data) elif gt_type == "POSES": self.poses_folder = self.data_dir + "/" + str(viewport_name) + "/poses/" np.save(self.poses_folder + filename + ".npy", data) elif gt_type == "NORMALS": self.normals_folder = self.data_dir + "/" + str(viewport_name) + "/normals/" np.save(self.normals_folder + filename + ".npy", data) else: raise NotImplementedError self.q.task_done() def save_motion( self, viewport_name, data_type, data, filename, width=1280, height=720, display_rgb=True, save_npy=True ): self.motion_folder = self.data_dir + "/" + str(viewport_name) + "/motion-vector/" if save_npy: np.save(self.motion_folder + filename + ".npy", data) def save_segmentation( self, viewport_name, data_type, data, filename, width=1280, height=720, display_rgb=True, mappings=True, save_npy=True): self.instance_folder = self.data_dir + "/" + str(viewport_name) + "/instance/" self.semantic_folder = self.data_dir + "/" + str(viewport_name) + "/semantic/" # Save ground truth data locally as npy if not mappings: data = data[0] if data_type == "INSTANCE" and save_npy: np.save(self.instance_folder + filename + ".npy", data) if data_type == "SEMANTIC" and save_npy: np.save(self.semantic_folder + filename + ".npy", data) if mappings: data = data[0] if display_rgb: image_data = np.frombuffer(data, dtype=np.uint8).reshape(*data.shape, -1) num_colors = 50 if data_type == "SEMANTIC" else None color_image = self.visualization.colorize_segmentation(image_data, width, height, 3, num_colors) # color_image = visualize.colorize_instance(image_data) color_image_rgb = Image.fromarray(color_image, "RGB") if data_type == "INSTANCE": color_image_rgb.save(f"{self.instance_folder}/{filename}.png") if data_type == "SEMANTIC": color_image_rgb.save(f"{self.semantic_folder}/{filename}.png") def save_image(self, viewport_name, img_type, image_data, filename): self.rgb_folder = self.data_dir + "/" + str(viewport_name) + "/rgb/" self.depth_folder = self.data_dir + "/" + str(viewport_name) + "/depth/" self.depthLinear_folder = self.data_dir + "/" + str(viewport_name) + "/depthLinear/" if img_type == "RGB": # Save ground truth data locally as png rgb_img = Image.fromarray(image_data, "RGBA") rgb_img.save(f"{self.rgb_folder}/{filename}.png") elif img_type == "DEPTH" or img_type == "DEPTHLINEAR": # Convert linear depth to inverse depth for better visualization image_data = image_data * 100 # Save ground truth data locally as png image_data[image_data == 0.0] = 1e-5 image_data = np.clip(image_data, 0, 255) image_data -= np.min(image_data) if np.max(image_data) > 0: image_data /= np.max(image_data) depth_img = Image.fromarray((image_data * 255.0).astype(np.uint8)) if img_type == "DEPTH": depth_img.save(f"{self.depth_folder}/{filename}.png") if img_type == "DEPTHLINEAR": depth_img.save(f"{self.depthLinear_folder}/{filename}.png") def save_bbox(self, viewport_name, data_type, data, filename, display_rgb=True, rgb_data=None, save_npy=True): self.bbox_2d_tight_folder = self.data_dir + "/" + str(viewport_name) + "/bbox_2d_tight/" self.bbox_2d_loose_folder = self.data_dir + "/" + str(viewport_name) + "/bbox_2d_loose/" self.bbox_3d_folder = self.data_dir + "/" + str(viewport_name) + "/bbox_3d/" # Save ground truth data locally as npy if data_type == "BBOX2DTIGHT" and save_npy: np.save(self.bbox_2d_tight_folder + filename + ".npy", data) if data_type == "BBOX2DLOOSE" and save_npy: np.save(self.bbox_2d_loose_folder + filename + ".npy", data) if data_type == "BBOX3D" and save_npy: np.save(self.bbox_3d_folder + filename + ".npy", data) if display_rgb and rgb_data is not None: if "2D" in data_type: color_image = self.visualization.colorize_bboxes(data, rgb_data) color_image_rgb = Image.fromarray(color_image, "RGBA") if data_type == "BBOX2DTIGHT": color_image_rgb.save(f"{self.bbox_2d_tight_folder}/{filename}.png") if data_type == "BBOX2DLOOSE": color_image_rgb.save(f"{self.bbox_2d_loose_folder}/{filename}.png") if "3D" in data_type: rgb_img = Image.fromarray(rgb_data, "RGBA") rgb_img.save(f"{self.bbox_3d_folder}/{filename}.png") def create_output_folders(self, sensor_settings=None): """Checks if the sensor output folder corresponding to each viewport is created. If not, it creates them.""" if not os.path.exists(self.data_dir): os.mkdir(self.data_dir) if sensor_settings is None: sensor_settings = dict() viewport_names = get_viewport_names() sensor_settings_viewport = { "rgb": {"enabled": True}, "depth": {"enabled": True, "colorize": True, "npy": True}, "depthLinear": {"enabled": True, "colorize": True, "npy": True}, "instance": {"enabled": True, "colorize": True, "npy": True}, "semantic": {"enabled": True, "colorize": True, "npy": True}, "bbox_2d_tight": {"enabled": True, "colorize": True, "npy": True}, "bbox_2d_loose": {"enabled": True, "colorize": True, "npy": True}, "camera": {"enabled": True, "npy": True}, "poses": {"enabled": True, "npy": True}, "motion-vector": {"enabled": True, "npy": True, "colorize": True}, "bbox_3d": {"enabled": True, "npy": True, "colorize": True}, "normals": {"enabled": True, "npy": True, "colorize": True}, } for name in viewport_names: sensor_settings[name] = copy.deepcopy(sensor_settings_viewport) for viewport_name in sensor_settings: viewport_folder = self.data_dir + "/" + str(viewport_name) if not os.path.exists(viewport_folder): os.mkdir(viewport_folder) for sensor_name in sensor_settings[viewport_name]: if sensor_settings[viewport_name][sensor_name]["enabled"]: sensor_folder = self.data_dir + "/" + str(viewport_name) + "/" + str(sensor_name) if not os.path.exists(sensor_folder): os.mkdir(sensor_folder)
12,152
Python
51.83913
150
0.534398
eliabntt/GRADE-RR/isaac_internals/exts/omni.isaac.synthetic_utils/omni/isaac/synthetic_utils/writers/base.py
# Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. """Base class for writing groundtruth data offline. """ import atexit import queue import threading class BaseWriter: def __init__(self, data_dir, num_worker_threads, max_queue_size=500): atexit.register(self.stop_threads) # Threading for multiple scenes self.num_worker_threads = num_worker_threads # Initialize queue with a specified size self.q = queue.Queue(max_queue_size) self.data_dir = data_dir self.threads = [] def start_threads(self): """Start worker threads.""" for _ in range(self.num_worker_threads): t = threading.Thread(target=self.worker, daemon=True) t.start() self.threads.append(t) def stop_threads(self): """Waits for all tasks to be completed before stopping worker threads.""" print("Finish writing data...") # Block until all tasks are done self.q.join() print("Done.") def worker(self): """Processes task from queue. Each tasks contains groundtruth data and metadata which is used to transform the output and write it to disk.""" pass
1,581
Python
32.659574
150
0.679317
eliabntt/GRADE-RR/isaac_internals/exts/omni.isaac.synthetic_utils/omni/isaac/synthetic_utils/writers/kitti.py
# Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # """Helper class for writing groundtruth data offline in kitti format. """ import csv import os from PIL import Image from .base import BaseWriter import carb class KittiWriter(BaseWriter): def __init__( self, data_dir="kitti_data", num_worker_threads=4, max_queue_size=500, train_size=10, classes=[], bbox_type="BBOX2DLOOSE", ): BaseWriter.__init__(self, data_dir, num_worker_threads, max_queue_size) self.create_output_folders() self.train_size = train_size self.classes = classes self.bbox_type = bbox_type if self.bbox_type != "BBOX2DLOOSE" and self.bbox_type != "BBOX2DTIGHT": carb.log_error( f"bbox_type must be BBOX2DLOOSE or BBOX2DTIGHT, it is currently set to {self.bbox_type} which is not supported, defaulting to BBOX2DLOOSE" ) self.bbox_type = "BBOX2DLOOSE" def worker(self): """Processes task from queue. Each tasks contains groundtruth data and metadata which is used to transform the output and write it to disk.""" while True: data = self.q.get() if data is None: break else: self.save_image(data) if int(data["METADATA"]["image_id"]) < self.train_size: self.save_label(data) self.q.task_done() def save_label(self, data): """Saves the labels for the 2d bounding boxes in Kitti format.""" label_set = [] viewport_width = data["METADATA"][self.bbox_type]["WIDTH"] viewport_height = data["METADATA"][self.bbox_type]["HEIGHT"] for box in data["DATA"][self.bbox_type]: label = [] # 2D bounding box points x_min, y_min, x_max, y_max = int(box[6]), int(box[7]), int(box[8]), int(box[9]) # Check if bounding boxes are in the viewport if ( x_min < 0 or y_min < 0 or x_max > viewport_width or y_max > viewport_height or x_min > viewport_width or y_min > viewport_height or y_max < 0 or x_max < 0 ): continue semantic_label = str(box[2]) # Skip label if not in class list if self.classes != [] and semantic_label not in self.classes: continue # Adding Kitting Data, NOTE: Only class and 2d bbox coordinates are filled in label.append(semantic_label) label.append(f"{0.00:.2f}") label.append(3) label.append(f"{0.00:.2f}") label.append(x_min) label.append(y_min) label.append(x_max) label.append(y_max) for _ in range(7): label.append(f"{0.00:.2f}") label_set.append(label) with open(os.path.join(self.train_label_dir, f"{data['METADATA']['image_id']}.txt"), "w") as annotation_file: writer = csv.writer(annotation_file, delimiter=" ") writer.writerows(label_set) def save_image(self, data): """Saves the RGB image in the correct directory for kitti""" if int(data["METADATA"]["image_id"]) < self.train_size: rgb_img = Image.fromarray(data["DATA"]["RGB"], "RGBA").convert("RGB") rgb_img.save(f"{self.train_folder}/image_2/{data['METADATA']['image_id']}{'.png'}") else: rgb_img = Image.fromarray(data["DATA"]["RGB"], "RGBA").convert("RGB") rgb_img.save(f"{self.test_folder}/image_2/{data['METADATA']['image_id']}{'.png'}") def create_output_folders(self): """Checks if the output folders are created. If not, it creates them.""" if not os.path.exists(self.data_dir): os.mkdir(self.data_dir) self.train_folder = os.path.join(self.data_dir, "training") self.test_folder = os.path.join(self.data_dir, "testing") if not os.path.exists(self.train_folder): os.mkdir(self.train_folder) if not os.path.exists(self.test_folder): os.mkdir(self.test_folder) self.train_img_dir = os.path.join(self.train_folder, "image_2") if not os.path.exists(self.train_img_dir): os.mkdir(self.train_img_dir) self.train_label_dir = os.path.join(self.train_folder, "label_2") if not os.path.exists(self.train_label_dir): os.mkdir(self.train_label_dir) if not os.path.exists(os.path.join(self.test_folder, "image_2")): os.mkdir(os.path.join(self.test_folder, "image_2"))
5,135
Python
36.764706
154
0.578384
eliabntt/GRADE-RR/isaac_internals/exts/omni.isaac.shapenet/omni/isaac/shapenet/login.py
# Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # import time from .globals import * from .shape import get_links, download_file import os import omni.ui as ui WIDGET_WIDTH = 130 # this function is used to make sure the user can log into shapenet.org. It should be used before creating the pickle. def try_login(username, password): import webbot b = webbot.Browser(showWindow=False) b.go_to("shapenet.org/account") b.type(username, into="username") b.type(password, into="password") b.click("Sign in") time.sleep(1) # if we are logged in we should be able to re-open the page and see congratulations! b.refresh() time.sleep(1) page_source = b.get_page_source() login_success = page_source.find("Congratulations!") > -1 b.quit() return login_success # This is the base script to check if the user has a login and gather the CVS files from # http://shapenet.cs.stanford.edu/shapenet/obj-zip/ShapeNetCore.v1/ to save locally def save_v1_csvs(username, password, save_path): import urllib.request if try_login(username, password): if not os.path.exists(save_path): os.makedirs(save_path) url = g_shapenet_url + "1/" file_count_zero = 57 files = ["04379243","03593526","04225987","02958343","02876657","04460130","03001627","02871439","02942699","02691156","03642806","02801938","03991062","04256520","03624134","02946921","04090263","04468005","03761084","03938244","03636649","02747177","03710193","04530566","03790512","03207941","02828884","03948459","04099429","03691459","03337140","02773838","02933112","02818832","02843684","03211117","03928116","03261776","04401088","04330267","03759954","02924116","03797390","04074963","02808440","02880940","03085013","03467517","04554684","02834778","03325088","04004475","02954340"] for index, href in enumerate(files): print(f"{file_count_zero} --Downloading {href} to {save_path}.") file_count_zero = file_count_zero - 1 download_file(save_path + href, url + href + ".csv") return True else: print("Please go to shapenet.org and get a valid login.") return False # this helper function creates a synsetDB entry from a shapenet v1 cvs file. def create_synsetDBEntry(csv_file): import csv readCSV = csv.reader(csv_file, delimiter=",") skipFirst = True synsetDb = {} for row in readCSV: if skipFirst: skipFirst = False continue modelId = row[0] modelDb = modelId[: modelId.find(".")] modelId = modelId[modelId.find(".") + 1 :] wnsynset = row[1] wnlemmas = row[2] up = row[3] front = row[4] name = row[5] tags = row[6] synsetDb[modelId] = (wnsynset, wnlemmas, up, front, name, tags) return synsetDb # This is the script used to create the shapenet_db2.pickle.bz2 file if the user already has shapenet v1 downloaded. def create_db_from_files(path): import glob csv_files = glob.glob(path + "/*.csv") snDb = {} for filename in csv_files: synsetId = filename[-12:-4] with open(filename, encoding="utf8") as csv_file: snDb[synsetId] = create_synsetDBEntry(csv_file) return snDb # save and test the pickled databse. def save_and_testDB(snDb, out_file): # simple sanity check to make sure the input database is valid so we don't write out a crap one. if not len(snDb) == 57: return False import bz2 try: import cPickle as pickle except: import pickle sfile = bz2.BZ2File(out_file, "wb") pickle.dump(snDb, sfile) sfile.close() f = bz2.BZ2File(out_file, "rb") new_dict = pickle.load(f) f.close() if len(new_dict) == 57: print("ID Database created successfully!") return True else: print("Failed to create ID Database :(") return False class ShapenetLogin: def __init__(self, shapenetMenu): self._shapenetMenu = shapenetMenu self._models = {} self._login_window = None self.create_login_window() def _on_login_fn(self, widget): csv_location = get_local_shape_loc() + "/v1_csv/" username = self._username.model.get_value_as_string() password = self._password.model.get_value_as_string() logged_in = False if len(username) > 0 and len(password) > 0: logged_in = save_v1_csvs(username, password, csv_location) else: self._login_window.visible = False flags = ui.WINDOW_FLAGS_NO_RESIZE | ui.WINDOW_FLAGS_MODAL flags |= ui.WINDOW_FLAGS_NO_SCROLLBAR self.invalid_window = ui.Window("Username or Password invalid.", width=500, height=0, flags=flags) with self.invalid_window.frame: with ui.VStack(name="root", style={"VStack::root": {"margin": 10}}, height=0, spacing=20): ui.Label("Pelase enter a valid user and password.", alignment=ui.Alignment.LEFT, word_wrap=True) self.invalid_window.visible = True if not logged_in: print(f"Attempting to use local files if they already exist in {csv_location}.") snDb = create_db_from_files(csv_location) if save_and_testDB(snDb, get_local_shape_loc() + g_pickle_file_name): self._shapenetMenu._hide_login_show_add() if pickle_file_exists(): self._login_window.visible = False def create_login_window(self): flags = ui.WINDOW_FLAGS_NO_RESIZE | ui.WINDOW_FLAGS_MODAL flags |= ui.WINDOW_FLAGS_NO_SCROLLBAR self._login_window = ui.Window("Create Shapenet Database Index File", width=500, height=0, flags=flags) with self._login_window.frame: with ui.VStack(): with ui.HStack(height=20): ui.Label("Username or Email: ", alignment=ui.Alignment.CENTER, width=WIDGET_WIDTH) self._username = ui.StringField() ui.Spacer(width=6) ui.Spacer(height=10) with ui.HStack(height=20): ui.Label("Password:", alignment=ui.Alignment.CENTER, width=WIDGET_WIDTH) self._password = ui.StringField() self._password.password_mode = True ui.Spacer(height=10) with ui.HStack(height=20): ui.Button("Sign In to shapenet.org", clicked_fn=lambda b=None: self._on_login_fn(b)) with ui.VStack(name="root", style={"VStack::root": {"margin": 10}}, height=0, spacing=20): password_message = "You password will not be stored by Isaac Sim, it is only used to log into the shapenet.org web page. Password encription is up to the shapenet.org web page." ui.Label(password_message, alignment=ui.Alignment.LEFT, word_wrap=True)
7,339
Python
39.32967
600
0.631421
eliabntt/GRADE-RR/isaac_internals/exts/omni.isaac.shapenet/omni/isaac/shapenet/globals.py
# Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # from .global_constants import * import carb.tokens import os import bz2 import omni try: import cPickle as pickle except: import pickle DEBUG_PRINT_ON = False g_local_shape_loc = None g_shapenet_db = None g_pickle_file_full_name = None def pickle_file_exists(): global g_pickle_file_full_name if g_pickle_file_full_name == None: g_pickle_file_full_name = os.path.realpath(__file__)[:-11] + g_pickle_file_name return os.path.exists(g_pickle_file_full_name) def get_database(): global g_shapenet_db if g_shapenet_db == None: if pickle_file_exists(): f = bz2.BZ2File(g_pickle_file_full_name, "rb") g_shapenet_db = pickle.load(f) f.close() else: g_shapenet_db = None omni.kit.app.get_app().print_and_log(f"Missing shapenet database of names at {g_pickle_file_full_name}.") return g_shapenet_db def get_local_shape_loc(): global g_local_shape_loc if g_local_shape_loc == None: env_path = os.getenv("SHAPENET_LOCAL_DIR") if env_path is None: resolved_data_path = carb.tokens.get_tokens_interface().resolve("${data}") g_local_shape_loc = resolved_data_path + "/shapenet" omni.kit.app.get_app().print_and_log( f"env var SHAPENET_LOCAL_DIR not set, using default data dir {g_local_shape_loc}" ) else: g_local_shape_loc = env_path omni.kit.app.get_app().print_and_log(f"Using local env var SHAPENET_LOCAL_DIR {env_path}") return g_local_shape_loc
2,024
Python
29.681818
117
0.658597
eliabntt/GRADE-RR/isaac_internals/exts/omni.isaac.shapenet/omni/isaac/shapenet/comm_kit.py
# Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # import requests from global_constants import g_bind_address def setup(): comm = KitCommunication() return comm class KitCommunication(object): def __init__(self, url=g_bind_address[0], port=g_bind_address[1]): self._address = "http://" + url + ":" + port def post_command(self, request_dict): try: print("POST_COMMAND Trying: ", request_dict) resp = requests.post(self._address, json=request_dict) print("POST_COMMAND Response: ", resp.status_code) if resp.status_code != requests.codes.ok: raise KitEngineException(resp.status_code, resp.json()) return resp.json() if r.getcode() != 200: raise KitEngineException(r.getcode(), r.read()) return r.read() except requests.exceptions.RequestException as e: print( "Are you sure kit is running and the omni.isaac.shapenet plugin is loaded? Because I can't find the server!" ) class KitEngineException(Exception): def __init__(self, status_code, resp_dict): resp_msg = resp_dict["message"] if "message" in resp_dict else "Message not available" self.message = f"Kit returned response with status: {status_code} ({requests.status_codes._codes[status_code][0]}), message: {resp_msg}" class KitCommunicationException(Exception): def __init__(self, message): self.message = message def test_comm(): comm = setup() command = {} command["synsetId"] = "02691156" command["modelId"] = "dd9ece07d4bc696c2bafe808edd44356" command["pos"] = (10.0, 11.0, 12.0) # optional command["rot"] = ((0.0, 0.0, 1.0), 90.0) # optional command["scale"] = 1.1 # optional print(command) comm.post_command(command)
2,247
Python
34.124999
144
0.65198
eliabntt/GRADE-RR/isaac_internals/exts/omni.isaac.shapenet/omni/isaac/shapenet/extension.py
# Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # """ This plugin is used to load shapenet objects into Kit. If the shape already exists as a USD on a connected omniverse server, then it will use that version, unless there is an override. If not on omniverse, the plugin will convert the obj from a folder on the machine and upoad it to omniverse if there is a connection. """ import omni.ext import omni.kit from .globals import DEBUG_PRINT_ON from .menu import ShapenetMenu EXTENSION_NAME = "ShapeNet Loader" class Extension(omni.ext.IExt): def on_startup(self, ext_id: str): if DEBUG_PRINT_ON: print("\nI STARTED I STARTED!\n") self._menu = ShapenetMenu(ext_id) if DEBUG_PRINT_ON: print("\nafter ShapenetMenu\n") def on_shutdown(self): self._menu.shutdown() self._menu = None def get_name(self): """Return the name of the extension""" return EXTENSION_NAME
1,369
Python
30.860464
82
0.704894
eliabntt/GRADE-RR/isaac_internals/exts/omni.isaac.shapenet/omni/isaac/shapenet/utils.py
# Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. import os import carb LABEL_TO_SYNSET = { "table": "04379243", "monitor": "03211117", "phone": "04401088", "watercraft": "04530566", "chair": "03001627", "lamp": "03636649", "speaker": "03691459", "bench": "02828884", "plane": "02691156", "bathtub": "02808440", "bookcase": "02871439", "bag": "02773838", "basket": "02801938", "bowl": "02880940", "bus": "02924116", "cabinet": "02933112", "camera": "02942699", "car": "02958343", "dishwasher": "03207941", "file": "03337140", "knife": "03624134", "laptop": "03642806", "mailbox": "03710193", "microwave": "03761084", "piano": "03928116", "pillow": "03938244", "pistol": "03948459", "printer": "04004475", "rocket": "04099429", "sofa": "04256520", "washer": "04554684", "rifle": "04090263", "can": "02946921", "bottle": "02876657", "bowl": "02880940", "earphone": "03261776", "mug": "03797390", } SYNSET_TO_LABEL = {v: k for k, v in LABEL_TO_SYNSET.items()} def get_local_shape_loc(): g_local_shape_loc = None env_path = os.getenv("SHAPENET_LOCAL_DIR") if env_path == None: resolved_data_path = carb.tokens.get_tokens_interface().resolve("${data}") g_local_shape_loc = resolved_data_path + "/shapenet" print(f"env var SHAPENET_LOCAL_DIR not set, using default data dir {g_local_shape_loc}") else: g_local_shape_loc = env_path print(f"Using local env var SHAPENET_LOCAL_DIR {env_path}") return g_local_shape_loc async def convert(in_file, out_file, load_materials=False): # This import causes conflicts when global import asyncio import omni.kit.asset_converter def progress_callback(progress, total_steps): pass converter_context = omni.kit.asset_converter.AssetConverterContext() # setup converter and flags converter_context.ignore_materials = not load_materials # converter_context.ignore_animation = False # converter_context.ignore_cameras = True # converter_context.single_mesh = True # converter_context.smooth_normals = True # converter_context.preview_surface = False # converter_context.support_point_instancer = False # converter_context.embed_mdl_in_usd = False # converter_context.use_meter_as_world_unit = True # converter_context.create_world_as_default_root_prim = False instance = omni.kit.asset_converter.get_instance() task = instance.create_converter_task(in_file, out_file, progress_callback, converter_context) success = True while True: success = await task.wait_until_finished() if not success: await asyncio.sleep(0.1) else: break return success def shapenet_convert(categories=None, max_models=50, load_materials=False): """Helper to convert shapenet assets to USD Args: categories (list of string): List of ShapeNet categories to convert. max_models (int): Maximum number of models to convert. load_materials (bool): If true, materials will be loaded from shapenet meshes. """ import asyncio import pprint if categories is None: print("The following categories and id's are supported:") pprint.pprint(LABEL_TO_SYNSET) raise ValueError(f"No categories specified via --categories argument") # Ensure all categories specified are valid invalid_categories = [] for c in categories: if c not in LABEL_TO_SYNSET.keys() and c not in LABEL_TO_SYNSET.values(): invalid_categories.append(c) if invalid_categories: raise ValueError(f"The following are not valid ShapeNet categories: {invalid_categories}") # This import needs to occur after kit is loaded so that physx can be discovered local_shapenet = get_local_shape_loc() local_shapenet_output = f"{os.path.abspath(local_shapenet)}_nomat" if load_materials: local_shapenet_output = f"{os.path.abspath(local_shapenet)}_mat" os.makedirs(local_shapenet_output, exist_ok=True) synsets = categories if synsets is None: synsets = LABEL_TO_SYNSET.values() for synset in synsets: print(f"\nConverting synset {synset}...") # If synset is specified by label, convert to synset if synset in LABEL_TO_SYNSET: synset = LABEL_TO_SYNSET[synset] model_dirs = os.listdir(os.path.join(local_shapenet, synset)) for i, model_id in enumerate(model_dirs): if i >= max_models: print(f"max models ({max_models}) reached, exiting conversion") break local_path = os.path.join(local_shapenet, synset, model_id, "models/model_normalized.obj") shape_name = "model_normalized_nomat" if load_materials: shape_name = "model_normalized_mat" out_dir = os.path.join(local_shapenet_output, synset, model_id) os.makedirs(out_dir, exist_ok=True) out_path = os.path.join(out_dir, f"{shape_name}.usd") if not os.path.exists(out_path): status = asyncio.get_event_loop().run_until_complete(convert(local_path, out_path, load_materials)) if not status: print(f"ERROR OmniConverterStatus is {status}") print(f"---Added {out_path}")
5,851
Python
34.466666
115
0.644676
eliabntt/GRADE-RR/isaac_internals/exts/omni.isaac.shapenet/omni/isaac/shapenet/menu.py
# Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # import omni.ui as ui from omni.kit.menu.utils import add_menu_items, remove_menu_items, MenuItemDescription from omni.isaac.ui.menu import make_menu_item_description import weakref from .settings import ShapenetSettings from .globals import * EXTENSION_NAME = "ShapeNet Loader" HELP_TEXT = ( " This omni.isaac.shapenet plugin allows you to add ShapeNetCore.V2 models from shapenet.org to your stage in Omniverse Kit.\n\n" " You can use the ShapeNet menu to add shapes.\n\n" " If should already have ShapeNetCore V2 installed locally, this plugin will use the local files. Use the env var SHAPENET_LOCAL_DIR to set that location (IMPORTANT NOTE: Make sure there are no periods, ., in the path name), otherwise, omni.isaac.shapenet will use the default ${data}/shapenet folder. By using local folders, you can edit shapenet models before their conversion to usd. If you want to keep the original file, just save the modified file as " ' "models/modified/model.obj" in that shape\'s /models folder.\n\n' " If the shape is already on the omniverse server at g_omni_shape_loc (defaults to /Projects/shapenet), then that model will be used instead of the locally saved or modified shapenet obj file.\n\n" ) class ShapenetMenu: def __init__(self, ext_id: str): self._window = None self._settings_ui = None self._models = {} self._menu_items = [ make_menu_item_description(ext_id, EXTENSION_NAME, lambda a=weakref.proxy(self): a._create_window()) ] add_menu_items(self._menu_items, "Isaac Utils") # self._create_window() # comment this out to prevent window from being visible until menu is clicked def _create_window(self): if self._window == None: """ build ShapeNet window""" self._window = ui.Window( title="ShapeNet Loader", width=400, height=150, visible=True, dockPreference=ui.DockPreference.LEFT_BOTTOM, ) self._window.deferred_dock_in("Console", ui.DockPolicy.DO_NOTHING) with self._window.frame: with ui.VStack(): with ui.HStack(height=20): self._models["add_button"] = ui.Button( "Add A Model", width=0, clicked_fn=lambda b=None: self._settings_ui._on_add_model_fn() ) ui.Spacer() self._models["help_button"] = ui.Button( "Help", width=0, clicked_fn=lambda b=None: self._on_help_menu_click() ) with ui.HStack(height=20): self._create_settings_ui() # TODO need this? self._models["add_button"].visible = True self._window.visible = True def _create_settings_ui(self): self._settings_ui = ShapenetSettings() def _on_help_menu_click(self): help_message = HELP_TEXT flags = ui.WINDOW_FLAGS_NO_RESIZE | ui.WINDOW_FLAGS_MODAL flags |= ui.WINDOW_FLAGS_NO_SCROLLBAR self._help_window = ui.Window("Shapenet Help", width=500, height=0, flags=flags) with self._help_window.frame: with ui.VStack(name="root", style={"VStack::root": {"margin": 10}}, height=0, spacing=20): ui.Label(help_message, alignment=ui.Alignment.LEFT, word_wrap=True) def shutdown(self): remove_menu_items(self._menu_items, "Isaac Utils") self._menu_items = None self._settings_ui = None
4,045
Python
46.599999
468
0.632138
eliabntt/GRADE-RR/isaac_internals/exts/omni.isaac.shapenet/omni/isaac/shapenet/shape.py
# Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # import omni.client import omni.kit import omni.usd import asyncio import os from pxr import UsdGeom, Gf, Tf import random import sys from .globals import * def file_exists_on_omni(file_path): result, _ = omni.client.stat(file_path) if result == omni.client.Result.OK: return True return False async def create_folder_on_omni(folder_path): if not file_exists_on_omni(folder_path): result = await omni.client.create_folder_async(folder_path) return result == omni.client.Result.OK async def convert(in_file, out_file): # This import causes conflicts when global? import omni.kit.asset_converter as assetimport # Folders must be created first through usd_ext of omni won't be able to create the files creted in them in the current session. out_folder = out_file[0 : out_file.rfind("/") + 1] # only call create_folder_on_omni if it's connected to an omni server if out_file.startswith("omniverse://"): await create_folder_on_omni(out_folder + "materials") def progress_callback(progress, total_steps): pass converter_context = omni.kit.asset_converter.AssetConverterContext() # setup converter and flags converter_context.as_shapenet = True converter_context.single_mesh = True instance = omni.kit.asset_converter.get_instance() task = instance.create_converter_task(in_file, out_file, progress_callback, converter_context) success = True while True: success = await task.wait_until_finished() if not success: await asyncio.sleep(0.1) else: break return success # This is the main entry point for any function that wants to add a shape to the scene. # Care must be taken when running this on a seperate thread from the main thread because # it calls c++ modules from python which hold the GIL. def addShapePrim( omniverseServer, synsetId, modelId, pos, rot, scale, auto_add_physics, use_convex_decomp, do_not_place=False ): # allow for random ids shapenet_db = get_database() if synsetId == None or synsetId == "random": synsetId = random.choice(list(shapenet_db)) if modelId == None or modelId == "random": modelId = random.choice(list(shapenet_db[synsetId])) # use shapenet v2 for models # Get the local file system path and the omni server path local_folder = get_local_shape_loc() + "/" + synsetId + "/" + modelId + "/" local_path = local_folder + "models/model_normalized.obj" local_modified_path = local_folder + "models/modified/model.obj" global g_omni_shape_loc omni_shape_loc = "omniverse://" + omniverseServer + g_omni_shape_loc (result, entry) = asyncio.new_event_loop().run_until_complete(omni.client.stat_async(omni_shape_loc)) if not result == omni.client.Result.OK: print("Saving converted files locally since omniverse server is not connected") omni_shape_loc = get_local_shape_loc() + "/local-converted-USDs" omni_path = ( omni_shape_loc + "/n" + synsetId + "/i" + modelId + "/" ) # don't forget to add the name at the end and .usd omni_modified_path = omni_shape_loc + "/n" + synsetId + "/i" + modelId + "/modified/" stage = omni.usd.get_context().get_stage() if not stage: return "ERROR Could not get the stage." # Get the name of the shapenet object reference in the stage if it exists # (i.e. it has been added already and is used in another location on the stage). synsetID_path = g_root_usd_namespace_path + "/n" + synsetId over_path = synsetID_path + "/i" + modelId # Get the name of the instance we will add with the transform, this is the actual visible prim # instance of the reference to the omniverse file which was converted to local disk after global g_shapenet_db g_shapenet_db = get_database() if g_shapenet_db == None: shape_name = "" print("Please create an Shapenet ID Database with the menu.") else: shape_name = Tf.MakeValidIdentifier(g_shapenet_db[synsetId][modelId][4]) if shape_name == "": shape_name = "empty_shape_name" prim_path = str(stage.GetDefaultPrim().GetPath()) + "/" + shape_name prim_path_len = len(prim_path) shape_name_len = len(shape_name) # if there is only one instance, we don't add a _# postfix, but if there is multiple, then the second instance # starts with a _1 postfix, and further additions increase that number. insta_count = 0 while stage.GetPrimAtPath(prim_path): insta_count += 1 prim_path = f"{prim_path[:prim_path_len]}_{insta_count}" shape_name = f"{shape_name[:shape_name_len]}_{insta_count}" omni_path = omni_path + shape_name + ".usd" omni_modified_path = omni_modified_path + shape_name + ".usd" # If the prim refernce to the omnivers file is not already on # the stage the stage we will need to add it. place_later = False if not stage.GetPrimAtPath(over_path): print(f"-Shapenet is adding {shape_name} to the stage for the first time.") # If the files does not already exist in omniverse we will have to add it there # with our automatic conversion of the original shapenet file. # We need to check if the modified file is on disk, so if it's not on the omni server it will # be added there even if the non modified one already exists on omni. if os.path.exists(local_modified_path) or file_exists_on_omni(omni_modified_path): omni_path = omni_modified_path if not file_exists_on_omni(omni_path): # If the original omniverse file does not exist locally, we will have to pull # it from Stanford's shapenet database on the web. if os.path.exists(local_modified_path): local_path = local_modified_path omni_path = omni_modified_path if not os.path.exists(local_path): # Pull the shapenet files to the local drive for conversion to omni:usd no_model_message = f"The file does not exist at {local_path}, are you sure you have the env var SHAPENET_LOCAL_DIR set and the shapnet database downloaded to it?" print(no_model_message) return f"ERROR {no_model_message}" # Add The file to omniverse here, if you add them asyncronously, then you have to do the # rest of the scene adding later. print(f"---Converting {shape_name}...") status = asyncio.get_event_loop().run_until_complete(convert(local_path, omni_path)) if not status: return f"ERROR OmniConverterStatus is {status}" print(f"---Added to Omniverse as {omni_path}.") # Add the over reference of the omni file to the stage here. print(f"----Adding over of {over_path} to stage.") if not do_not_place and not place_later: over = stage.OverridePrim(over_path) over.GetReferences().AddReference(omni_path) # Add the instance of the shape here. if not do_not_place and not place_later: prim = stage.DefinePrim(prim_path, "Xform") prim.GetReferences().AddInternalReference(over_path) # shapenet v2 models are normalized to 1 meter, and rotated -90deg on the x axis. metersPerUnit = UsdGeom.GetStageMetersPerUnit(stage) scaled_scale = scale / metersPerUnit rot = Gf.Rotation(Gf.Vec3d(1, 0, 0), 90) * rot addobject_fn(prim.GetPath(), pos, rot, scaled_scale) # add physics if auto_add_physics: from omni.physx.scripts import utils print("Adding PHYSICS to ShapeNet model") shape_approximation = "convexHull" if use_convex_decomp: shape_approximation = "convexDecomposition" utils.setRigidBody(prim, shape_approximation, False) return prim return None def get_min_max_vert(obj_file_name): min_x = min_y = min_z = sys.float_info.max max_x = max_y = max_z = -sys.float_info.max with open(obj_file_name, "r") as fi: for ln in fi: if ln.startswith("v "): vx = float(ln[2:].partition(" ")[0]) vy = float(ln[2:].partition(" ")[2].partition(" ")[0]) vz = float(ln[2:].partition(" ")[2].partition(" ")[2]) min_x = min(min_x, vx) min_y = min(min_y, vy) min_z = min(min_z, vz) max_x = max(max_x, vx) max_y = max(max_y, vy) max_z = max(max_z, vz) return Gf.Vec3f(min_x, min_y, min_z), Gf.Vec3f(max_x, max_y, max_z) # Got this From Lou Rohan... Thanks Lou! # objectpath - path in omniverse - omni:/Projects/Siggraph2019/AtticWorkflow/Props/table_cloth/table_cloth.usd # objectname - name you want it to be called in the stage # xform - Gf.Matrix4d def addobject_fn(path, position, rotation, scale): # The original model was translated by the centroid, and scaled to be normalized by the length of the # hypotenuse of the bbox translate_mtx = Gf.Matrix4d() rotate_mtx = Gf.Matrix4d() scale_mtx = Gf.Matrix4d() translate_mtx.SetTranslate(position) # centroid/metersPerUnit) rotate_mtx.SetRotate(rotation) scale_mtx = scale_mtx.SetScale(scale) transform_matrix = scale_mtx * rotate_mtx * translate_mtx omni.kit.commands.execute("TransformPrimCommand", path=path, new_transform_matrix=transform_matrix)
9,960
Python
41.751073
178
0.654618
eliabntt/GRADE-RR/isaac_internals/exts/omni.isaac.shapenet/omni/isaac/shapenet/comm.py
# Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # from pxr import Gf import traceback from .globals import * from .shape import addShapePrim def process_request_in_thread(thread_type, responses_queue, menu, request): response = {"success": True, "message": ""} if "omniverseServer" not in request: response["message"] += "No omniverseServer requested. " response["success"] = False else: omniverseServer = request["omniverseServer"] response["message"] += "omniverseServer = " + omniverseServer + ". " if "synsetId" not in request: response["message"] += "No synsetId requested. " response["success"] = False else: synsetId = request["synsetId"] response["message"] += "synsetId = " + synsetId + ". " if "modelId" not in request: response["message"] += "No modelId requested. " response["success"] = False else: modelId = request["modelId"] response["message"] += "modelId = " + modelId + ". " # transfomrs have default values so are optional if "pos" not in request: pos = Gf.Vec3d(0, 0, 0) else: pos = Gf.Vec3d(request["pos"]) response["message"] += f"pos = {pos}. " if "rot" not in request: rot = Gf.Rotation(Gf.Vec3d(0, 1, 0), 0) else: try: rot = Gf.Rotation(request["rot"][0], request["rot"][1]) response["message"] += f"rot = {rot}. " except: rot = Gf.Rotation(Gf.Vec3d(0, 1, 0), 0) traceback.print_exc() if "scale" not in request: scale = 1.0 else: scale = request["scale"] response["message"] += f"scale = {scale}. " # User can change some global variables with outside commands sent through here. global g_omni_shape_loc if "g_omni_shape_loc" in request: g_omni_shape_loc = request["g_omni_shape_loc"] response["message"] += f"g_omni_shape_loc = {g_omni_shape_loc}. " global g_local_shape_loc if "g_local_shape_loc" in request: g_local_shape_loc = request["g_local_shape_loc"] response["message"] += f"g_local_shape_loc = {g_local_shape_loc}. " global g_root_usd_namespace_path if "g_root_usd_namespace_path" in request: g_root_usd_namespace_path = request["g_root_usd_namespace_path"] response["message"] += f"g_root_usd_namespace_path = {g_root_usd_namespace_path}. " if "auto_add_physics" not in request: auto_add_physics = 0 else: auto_add_physics = request["auto_add_physics"] response["message"] += f"auto_add_physics = {auto_add_physics}. " if "use_convex_decomp" not in request: use_convex_decomp = 0 else: use_convex_decomp = request["use_convex_decomp"] response["message"] += f"use_convex_decomp = {use_convex_decomp}. " if "do_not_place" not in request: do_not_place = 0 else: do_not_place = request["do_not_place"] response["message"] += f"do_not_place = {do_not_place}. " if response["success"]: try: # This is where all the work is done once the message is decoded. added_prim = addShapePrim( request["omniverseServer"], request["synsetId"], request["modelId"], pos, rot, scale, auto_add_physics, use_convex_decomp, do_not_place, ) if added_prim is None: response["message"] += "Didn't add object." else: response["message"] += "Added object: " + added_prim.GetPath().pathString except: response["message"] += " had Error, so could not run addShapePrim." response["success"] = False traceback.print_exc() responses_queue.put(response)
4,299
Python
34.53719
91
0.589207
eliabntt/GRADE-RR/isaac_internals/exts/omni.isaac.shapenet/omni/isaac/shapenet/settings.py
# Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # import carb from omni.kit.widget.settings import create_setting_widget, SettingType import omni.ui as ui from pxr import Gf from .globals import * from .shape import addShapePrim from .globals import g_default_omni_server class ShapenetSettings: def __init__(self): self._settings = carb.settings.get_settings() # note: Not sure this is the best place to set default values. self._settings.set_default_string("/isaac/shapenet/omniverseServer", g_default_omni_server) self._settings.set_default_string("/isaac/shapenet/synsetId", "random") self._settings.set_default_string("/isaac/shapenet/modelId", "random") self._settings.set_float_array("/isaac/shapenet/pos", [0.0, 0.0, 0.0]) self._settings.set_float_array("/isaac/shapenet/rotaxis", [0.0, 0.0, 0.0]) self._settings.set_default_float("/isaac/shapenet/rotangle", 0.0) self._settings.set_default_float("/isaac/shapenet/scale", 1.0) self._settings.set_default_bool("/isaac/shapenet/auto_add_physics", False) self._settings.set_default_bool("/isaac/shapenet/use_convex_decomp", False) self._build_ui() def _build_ui(self): """ Add Shape Settings """ with ui.CollapsableFrame(title="Add Model Parameters"): with ui.VStack(spacing=2): with ui.HStack(height=20): ui.Label("Omniverse Server", word_wrap=True, width=ui.Percent(35)) create_setting_widget("/isaac/shapenet/omniverseServer", SettingType.STRING) with ui.HStack(height=20): ui.Label("synsetId and modelId", word_wrap=True, width=ui.Percent(35)) create_setting_widget("/isaac/shapenet/synsetId", SettingType.STRING) ui.Spacer() create_setting_widget("/isaac/shapenet/modelId", SettingType.STRING) with ui.HStack(height=20): ui.Label("X Y Z Position", word_wrap=True, width=ui.Percent(35)) create_setting_widget("/isaac/shapenet/pos", SettingType.DOUBLE3) with ui.HStack(height=20): ui.Label("X Y Z Axis Angle", word_wrap=True, width=ui.Percent(35)) create_setting_widget("/isaac/shapenet/rotaxis", SettingType.DOUBLE3) ui.Spacer() create_setting_widget("/isaac/shapenet/rotangle", SettingType.FLOAT) with ui.HStack(height=20): ui.Label("Scale of add", word_wrap=True, width=ui.Percent(35)) create_setting_widget("/isaac/shapenet/scale", SettingType.FLOAT) with ui.HStack(height=20): ui.Label("Automatically add physics", word_wrap=True, width=ui.Percent(35)) create_setting_widget("/isaac/shapenet/auto_add_physics", SettingType.BOOL) with ui.HStack(height=20): ui.Label("Use convex decomponsition", word_wrap=True, width=ui.Percent(35)) create_setting_widget("/isaac/shapenet/use_convex_decomp", SettingType.BOOL) def _on_add_model_fn(self): pos = self.getPos() rot = self.getRot() scale = self.getScale() global g_shapenet_db g_shapenet_db = get_database() if g_shapenet_db == None: print( "Please create an Shapenet ID by logging into shapenet.org with the UI, or by downloading it manually and setting the SHAPENET_LOCAL_DIR environment variable." ) return synsetId = self.getSynsetId() modelId = self.getModelId() return addShapePrim( self._settings.get("/isaac/shapenet/omniverseServer"), synsetId, modelId, pos, rot, scale, self._settings.get("/isaac/shapenet/auto_add_physics"), self._settings.get("/isaac/shapenet/use_convex_decomp"), ) def getPos(self): pos = self._settings.get("/isaac/shapenet/pos") return Gf.Vec3d(pos[0], pos[1], pos[2]) def getRot(self): axis = self._settings.get("/isaac/shapenet/rotaxis") a = self._settings.get("/isaac/shapenet/rotangle") return Gf.Rotation(Gf.Vec3d(axis[0], axis[1], axis[2]), a) def getScale(self): s = self._settings.get("/isaac/shapenet/scale") return s def getSynsetId(self): s = self._settings.get("/isaac/shapenet/synsetId") return s def getModelId(self): s = self._settings.get("/isaac/shapenet/modelId") return s
5,073
Python
43.902654
175
0.617189
eliabntt/GRADE-RR/isaac_internals/exts/omni.isaac.synthetic_recorder/omni/isaac/synthetic_recorder/synthetic_recorder_extension.py
# Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # import os import gc import time import asyncio import json import carb import carb.events import omni.kit.ui import omni.ui as ui import omni.replicator.core as rep import omni.timeline from omni.kit.viewport.utility import get_active_viewport from omni.kit.window.extensions.utils import open_file_using_os_default from omni.replicator.core import orchestrator from pxr import Semantics from functools import lru_cache from enum import Enum PARAM_TOOLTIPS = { "rgb": "Produces an array of type np.uint8 with shape (width, height, 4), where the four channels correspond to R,G,B,A.", "bounding_box_2d_tight": "Outputs tight 2d bounding box of each entity with semantics in the camera's viewport.\nTight bounding boxes bound only the visible pixels of entities.\nCompletely occluded entities are ommited.\nBounds only visible pixels.", "bounding_box_2d_loose": "Outputs loose 2d bounding box of each entity with semantics in the camera's field of view.\nLoose bounding boxes bound the entire entity regardless of occlusions.\nWill produce the loose 2d bounding box of any prim in the viewport, no matter if is partially occluded or fully occluded.", "semantic_segmentation": "Outputs semantic segmentation of each entity in the camera's viewport that has semantic labels.\nIf colorize is set to True (mapping from color to semantic labels), the image will be a 2d array of types np.uint8 with 4 channels.\nIf colorize is set to False (mapping from semantic id to semantic labels), the image will be a 2d array of types np.uint32 with 1 channel, which is the semantic id of the entities.", "colorize_semantic_segmentation": "If True, semantic segmentation is converted to an image where semantic ids are mapped to colors and saved as a uint8 4 channel PNG image.\nIf False, the output is saved as a uint32 PNG image.", "instance_id_segmentation": "Outputs instance id segmentation of each entity in the camera's viewport.\nThe instance id is unique for each prim in the scene with different paths.\nIf colorize is set to True (mapping from color to usd prim path of that entity), the image will be a 2d array of types np.uint8 with 4 channels.\nIf colorize is set to False (mapping from instance id to usd prim path of that entity), the image will be a 2d array of types np.uint32 with 1 channel, which is the instance id of the entities.", "colorize_instance_id_segmentation": "If True, instance id segmentation is converted to an image where instance ids are mapped to colors and saved as a uint8 4 channel PNG image.\nIf False, the output is saved as a uint32 PNG image.", "instance_segmentation": "Outputs instance segmentation of each entity in the camera' viewport.\nThe main difference between instance id segmentation and instance segmentation are that instance segmentation annotator goes down the hierarchy to the lowest level prim which has semantic labels,\n whereas instance id segmentation always goes down to the leaf prim.\nIf colorize is set to True (mapping from color to usd prim path of that semantic entity), the image will be a 2d array of types np.uint8 with 4 channels.\nIf colorize is set to False (mapping from instance id to usd prim path of that semantic entity), the image will be a 2d array of types np.uint32 with 1 channel, which is the instance id of the semantic entities.", "colorize_instance_segmentation": "If True, instance segmentation is converted to an image where instance are mapped to colors and saved as a uint8 4 channel PNG image.\nIf False, the output is saved as a uint32 PNG image.", "distance_to_camera": "Outputs a depth map from objects to camera positions.\nProduces a 2d array of types np.float32 with 1 channel.", "distance_to_image_plane": "Outputs a depth map from objects to image plane of the camera.\nProduces a 2d array of types np.float32 with 1 channel.", "bounding_box_3d": "Outputs 3D bounding box of each entity with semantics in the camera's viewport, generated regardless of occlusion.", "occlusion": "Outputs the occlusion of each entity in the camera's viewport.\nContains the instanceId, semanticId and the occlusionRatio.", "normals": "Produces an array of type np.float32 with shape (height, width, 4).\nThe first three channels correspond to (x, y, z).\nThe fourth channel is unused.", "motion_vectors": "Outputs a 2D array of motion vectors representing the relative motion of a pixel in the camera's viewport between frames.\nProduces a 2darray of types np.float32 with 4 channels.\nEach value is a normalized direction in 3D space.\nThe values represent motion relative to camera space.", "camera_params": "Outputs the camera model (pinhole or fisheye models), view matrix, projection matrix, fisheye nominal width/height, fisheye optical centre, fisheye maximum field of view, fisheye polynomial, near/far clipping range.", "pointcloud": "Outputs a 2D array of shape (N, 3) representing the points sampled on the surface of the prims in the viewport, where N is the number of point.\nPoint positions are in the world space.\nSample resolution is determined by the resolution of the render product.\nTo get the mapping from semantic id to semantic labels, pointcloud annotator is better used with semantic segmentation annotator, and users can extract the idToLabels data from the semantic segmentation annotator.", "pointcloud_include_unlabelled": "If True, pointcloud annotator will capture any prim in the camera's perspective, not matter if it has semantics or not.\nIf False, only prims with semantics will be captured.", "skeleton_data": "Retrieves skeleton data given skeleton prims and camera parameters", "s3_bucket": "The S3 Bucket name to write to. If not provided, disk backend will be used instead.\nThis backend requires that AWS credentials are set up in ~/.aws/credentials.", "s3_region": "If provided, this is the region the S3 bucket will be set to. Default: us-east-1", "s3_endpoint": "Gateway endpoint for Amazon S3", } MAX_RESOLUTION = (7680, 4320) # 8K WINDOW_NAME = "Synthetic Data Recorder" MENU_PATH = f"Replicator/{WINDOW_NAME}" class OutWriteType(Enum): OVERWRITE = 0 INCREMENT = 1 TIMESTAMP = 2 @lru_cache() def _ui_get_delete_glyph(): return omni.ui.get_custom_glyph_code("${glyphs}/menu_delete.svg") @lru_cache() def _ui_get_open_folder_glyph(): return omni.ui.get_custom_glyph_code("${glyphs}/folder_open.svg") @lru_cache() def _ui_get_reset_glyph(): return omni.ui.get_custom_glyph_code("${glyphs}/menu_refresh.svg") class SyntheticRecorderExtension(omni.ext.IExt): def on_startup(self, ext_id: str): """Caled to load the extension""" self._ext_id = ext_id self._window = ui.Window(WINDOW_NAME, dockPreference=ui.DockPreference.RIGHT_BOTTOM, visible=True) self._window.deferred_dock_in("Property", omni.ui.DockPolicy.DO_NOTHING) editor_menu = omni.kit.ui.get_editor_menu() if editor_menu: self._menu = editor_menu.add_item(MENU_PATH, self._menu_callback, toggle=True, value=True) self._window.set_visibility_changed_fn(self._visibility_changed_fn) self._writer_name = "BasicWriter" self._custom_writer_name = "MyCustomWriter" self._writer = None self._num_frames = 0 self._rt_subframes = 0 self._control_timeline = False self._orchestrator_status = rep.orchestrator.get_status() self._in_running_state = False # Orchestrator status update callback self._orchestrator_status_cb = rep.orchestrator.register_status_callback(self._on_orchestrator_status_changed) # Stage event callback self._sub_stage_event = ( omni.usd.get_context() .get_stage_event_stream() .create_subscription_to_pop_by_type(int(omni.usd.StageEventType.CLOSING), self._on_stage_closing_event) ) # Editor quit callback self._sub_shutdown = ( omni.kit.app.get_app() .get_shutdown_event_stream() .create_subscription_to_pop_by_type( omni.kit.app.POST_QUIT_EVENT_TYPE, self._on_editor_quit_event, name="omni.isaac.synthetic_recorder::shutdown_callback", order=0, ) ) self._config_dir = os.path.abspath( os.path.join(omni.kit.app.get_app().get_extension_manager().get_extension_path(self._ext_id), "data", "") ) self._last_config_path = os.path.join(self._config_dir, "last_config.json") self._custom_params_path = "" self._config_file = "custom_config.json" self._out_working_dir = os.getcwd() + "/" self._out_dir = "_out_sdrec" self._out_write_type = OutWriteType.OVERWRITE self._use_s3 = False self._s3_params = {"s3_bucket": "", "s3_region": "", "s3_endpoint": ""} self._basic_writer_params = { "rgb": True, "bounding_box_2d_tight": False, "bounding_box_2d_loose": False, "semantic_segmentation": False, "colorize_semantic_segmentation": False, "instance_id_segmentation": False, "colorize_instance_id_segmentation": False, "instance_segmentation": False, "colorize_instance_segmentation": False, "distance_to_camera": False, "distance_to_image_plane": False, "bounding_box_3d": False, "occlusion": False, "normals": False, "motion_vectors": False, "camera_params": False, "pointcloud": False, "pointcloud_include_unlabelled": False, "skeleton_data": False, } self._render_products = [] self._rp_data = [["/OmniverseKit_Persp", 512, 512]] # UI - frames collapsed state self._writer_frame_collapsed = False self._writer_params_frame_collapsed = True self._rp_frame_collapsed = False self._output_frame_collapsed = False self._s3_params_frame_collapsed = True self._config_frame_collapsed = True self._control_frame_collapsed = False self._control_params_frame_collapsed = False # UI - Buttons self._start_stop_button = None self._pause_resume_button = None # Load latest or default config values if os.path.isfile(self._last_config_path): self.load_config(self._last_config_path) else: self.load_config(os.path.join(self._config_dir, "default_config.json")) # Build the window ui self._build_window_ui() def _menu_callback(self, menu, value): self._window.visible = not self._window.visible def _visibility_changed_fn(self, visible): omni.kit.ui.get_editor_menu().set_value(MENU_PATH, visible) def _on_orchestrator_status_changed(self, status): new_status = status is not self._orchestrator_status if new_status: self._orchestrator_status = status # Check if the recorder was running and it stopped because it reached the number of requested frames has_finished_recording = self._in_running_state and status is rep.orchestrator.Status.STOPPED if has_finished_recording: asyncio.ensure_future(self._on_orchestrator_finish_async()) def _on_stage_closing_event(self, e: carb.events.IEvent): self._disable_all_buttons() if self._orchestrator_status is not orchestrator.Status.STOPPED: rep.orchestrator.stop() self._clear_recorder() self._enable_buttons(case="reset") def _on_editor_quit_event(self, e: carb.events.IEvent): # Fast shutdown of the extension, stop recorder save config files if self._orchestrator_status is not orchestrator.Status.STOPPED: rep.orchestrator.stop() self._clear_recorder() self.save_config(self._last_config_path) def on_shutdown(self): # Clean shutdown of the extension, called when the extension is unloaded (not called when the editor is closed) if self._orchestrator_status is not orchestrator.Status.STOPPED: rep.orchestrator.stop() self._clear_recorder() self._orchestrator_status_cb.unregister() self.save_config(self._last_config_path) editor_menu = omni.kit.ui.get_editor_menu() if editor_menu: self._menu = editor_menu.remove_item(MENU_PATH) self._menu = None self._window = None gc.collect() def _open_dir(self, path): if not os.path.isdir(path): carb.log_warn(f"Could not open directory {path}.") return open_file_using_os_default(path) def load_config(self, path): if not os.path.isfile(path): carb.log_warn(f"Could not find config file {path}.") return with open(path, "r") as f: config = json.load(f) if "writer_name" in config and config["writer_name"]: self._writer_name = config["writer_name"] if "custom_writer_name" in config and config["custom_writer_name"]: self._custom_writer_name = config["custom_writer_name"] if "num_frames" in config: self._num_frames = config["num_frames"] if "rt_subframes" in config: self._rt_subframes = config["rt_subframes"] if "control_timeline" in config: self._control_timeline = config["control_timeline"] if "config_file" in config and config["config_file"]: self._config_file = config["config_file"] if "custom_params_path" in config and config["custom_params_path"]: self._custom_params_path = config["custom_params_path"] if "out_working_dir" in config and config["out_working_dir"]: self._out_working_dir = config["out_working_dir"] if "out_dir" in config and config["out_dir"]: self._out_dir = config["out_dir"] if "out_write_type" in config: self._out_write_type = OutWriteType[config["out_write_type"]] if "use_s3" in config: self._use_s3 = config["use_s3"] if "s3_params" in config: self._s3_params = config["s3_params"] if "basic_writer_params" in config and isinstance(config["basic_writer_params"], dict): for key, value in config["basic_writer_params"].items(): if key in self._basic_writer_params: self._basic_writer_params[key] = value if "rp_data" in config: self._rp_data = config["rp_data"] def _load_config_and_refresh_ui(self, directory, filename): self.load_config(os.path.join(directory, filename)) self._build_window_ui() def save_config(self, path): os.makedirs(os.path.dirname(path), exist_ok=True) if os.path.isfile(path): carb.log_info(f"Overwriting config file {path}.") with open(path, "w") as json_file: config = { "num_frames": self._num_frames, "rt_subframes": self._rt_subframes, "control_timeline": self._control_timeline, "out_write_type": self._out_write_type.name, "use_s3": self._use_s3, "s3_params": self._s3_params, "basic_writer_params": self._basic_writer_params, "rp_data": self._rp_data, } # Only save string values if they are not empty if self._writer_name: config["writer_name"] = self._writer_name if self._custom_writer_name: config["custom_writer_name"] = self._custom_writer_name if self._config_file: config["config_file"] = self._config_file if self._custom_params_path: config["custom_params_path"] = self._custom_params_path if self._out_working_dir: config["out_working_dir"] = self._out_working_dir if self._out_dir: config["out_dir"] = self._out_dir json.dump(config, json_file, indent=4) def _get_custom_params(self, path): custom_params = {} if not os.path.isfile(path): carb.log_warn(f"Could not find params file {path}.") return custom_params with open(path, "r") as f: params = json.load(f) for key in params: custom_params[key] = params[key] return custom_params def _reset_config_dir(self): self._config_dir = os.path.abspath( os.path.join(omni.kit.app.get_app().get_extension_manager().get_extension_path(self._ext_id), "data", "") ) self._build_window_ui() def _reset_out_working_dir(self): self._out_working_dir = os.getcwd() + "/" self._build_window_ui() def _get_dir_next_numerical_suffix(self, path, dir_name): nums = [-1] for file in os.listdir(path): if file.startswith(dir_name) and os.path.isdir(os.path.join(path, file)): file = file[len(dir_name) :] file = file.replace("_", "") if file.isdecimal(): nums.append(int(file)) suffix = "_" + str(max(nums) + 1) return suffix def _get_output_dir(self): out_dir = self._out_dir if self._out_write_type is OutWriteType.INCREMENT: out_dir = out_dir + self._get_dir_next_numerical_suffix(self._out_working_dir, out_dir) elif self._out_write_type is OutWriteType.TIMESTAMP: out_dir = out_dir + time.strftime("_%Y-%m-%d-%H-%M-%S") return os.path.join(self._out_working_dir, out_dir, "") def _check_if_valid_camera(self, path): context = omni.usd.get_context() stage = context.get_stage() prim = stage.GetPrimAtPath(path) if not prim.IsValid(): carb.log_warn(f"{path} is not a valid prim path.") return False if prim.GetTypeName() == "Camera": return True else: carb.log_warn(f"{prim} is not a 'Camera' type.") return False def _check_if_valid_resolution(self, width, height): if 0 <= width <= MAX_RESOLUTION[0] and 0 <= height <= MAX_RESOLUTION[1]: return True else: carb.log_warn( f"Invalid resolution: {width}x{height}. Must be between 1x1 and {MAX_RESOLUTION[0]}x{MAX_RESOLUTION[1]}." ) return False def _check_if_valid_rp_entry(self, entry): if ( len(entry) == 3 and self._check_if_valid_camera(entry[0]) and self._check_if_valid_resolution(entry[1], entry[2]) ): return True else: return False def _check_if_stage_is_semantically_labeled(self): stage = omni.usd.get_context().get_stage() for prim in stage.Traverse(): if prim.HasAPI(Semantics.SemanticsAPI): return True carb.log_warn("Stage is not semantically labeled, semantics related annotators will not work.") return False def _check_if_stage_has_skeleton_prims(self): stage = omni.usd.get_context().get_stage() for prim in stage.Traverse(): if prim.GetTypeName() == "Skeleton": return True carb.log_warn("Stage does not have any skeleton prims, skeleton annotator will not work.") return False def _remove_semantics_annotators(self, writer_params): disabled_annotators = [] semantics_annotators = [ "bounding_box_2d_tight", "bounding_box_2d_loose", "semantic_segmentation", "instance_id_segmentation", "instance_segmentation", "bounding_box_3d", "occlusion", ] for annotator in semantics_annotators: if annotator in writer_params and writer_params[annotator]: writer_params[annotator] = False disabled_annotators.append(annotator) if disabled_annotators: carb.log_warn(f"Disabled the following semantics related annotators: {disabled_annotators}.") def _update_rp_entry(self, idx, field, value): self._rp_data[idx][field] = value def _remove_rp_entry(self, idx): del self._rp_data[idx] self._build_window_ui() def _add_new_rp_field(self): # If cameras are selected in the stage viewer use them default values context = omni.usd.get_context() stage = context.get_stage() selected_prims = context.get_selection().get_selected_prim_paths() selected_cameras = [path for path in selected_prims if stage.GetPrimAtPath(path).GetTypeName() == "Camera"] if selected_cameras: for path in selected_cameras: self._rp_data.append([path, 512, 512]) else: # Use selected viewport camera as default value active_vp = get_active_viewport() active_cam = active_vp.get_active_camera() self._rp_data.append([str(active_cam), 512, 512]) self._build_window_ui() def _clear_recorder(self): if self._writer: self._writer.detach() self._writer = None self._render_products.clear() def _init_recorder(self) -> bool: if self._writer is None: try: self._writer = rep.WriterRegistry.get(self._writer_name) except Exception as e: carb.log_warn(f"Could not create writer {self._writer_name}: {e}") return False # Set the number of subframes if self._rt_subframes != carb.settings.get_settings().get("/omni/replicator/RTSubframes"): rep.settings.carb_settings("/omni/replicator/RTSubframes", self._rt_subframes) carb.log_info(f"Setting 'RTSubframes' to {self._rt_subframes}.") # Init the default or custom writer with its parameters writer_params = {} if self._writer_name == "BasicWriter": # In case S3 is selected, make sure the s3 parameters are valid if self._use_s3: # s3_bucket is a required parameter, if it is not set, do not initialize the writer if not self._s3_params["s3_bucket"]: carb.log_warn("Could not initialize writer, s3_bucket parameters is missing.") return False # Other S3 parameters are optional, set them to None in case of empty strings for key, value in self._s3_params.items(): if value == "": self._s3_params[key] = None writer_params = {**self._basic_writer_params, **self._s3_params} else: writer_params = {**self._basic_writer_params} # Disable semantics related annotators if the stage is not semantically labeled stage_is_labeled = self._check_if_stage_is_semantically_labeled() if not stage_is_labeled: self._remove_semantics_annotators(writer_params) # Disable skeleton annotator if the stage does not have any skeleton prims if writer_params["skeleton_data"] and not self._check_if_stage_has_skeleton_prims(): carb.log_warn("Stage does not have any skeleton prims, disabling 'skeleton_data' annotator.") writer_params["skeleton_data"] = False else: # Custom writers will not get any sanity cheks, it is up to the user to make sure the parameters are valid custom_params = self._get_custom_params(self._custom_params_path) writer_params = {**custom_params} # Output path can suffixed with an increment or a timestamp if the user has enabled the option output_dir = self._get_output_dir() try: self._writer.initialize(output_dir=output_dir, **writer_params) except Exception as e: carb.log_warn(f"Could not initialize writer {self._writer_name}: {e}") return False # Create the render products for rp_entry in self._rp_data: if self._check_if_valid_rp_entry(rp_entry): rp = rep.create.render_product(rp_entry[0], (rp_entry[1], rp_entry[2])) self._render_products.append(rp) else: carb.log_warn(f"Invalid render product entry {rp_entry}.") if not self._render_products: carb.log_warn("No valid render products found to initialize the writer.") return False try: self._writer.attach(self._render_products) except Exception as e: carb.log_warn(f"Could not attach render products to writer: {e}") return False return True async def _on_orchestrator_finish_async(self): if self._control_timeline: await self._set_timeline_state_async(case="reset") await rep.orchestrator.wait_until_complete_async() self._clear_recorder() self._disable_all_buttons() self._enable_buttons(case="stop") self._in_running_state = False async def _set_timeline_state_async(self, case="reset"): timeline = omni.timeline.get_timeline_interface() if case == "reset": if timeline.is_playing(): timeline.stop() timeline.set_current_time(0) await omni.kit.app.get_app().next_update_async() elif case == "pause": if timeline.is_playing(): timeline.pause() elif case == "resume": if not timeline.is_playing(): timeline.play() async def _start_stop_recorder_async(self): if self._orchestrator_status is orchestrator.Status.STOPPED: self._disable_all_buttons() if self._init_recorder(): num_frames = None if self._num_frames <= 0 else self._num_frames await rep.orchestrator.run_async(num_frames=num_frames, start_timeline=self._control_timeline) self._in_running_state = True self._enable_buttons(case="start") else: self._clear_recorder() self._enable_buttons(case="reset") elif self._orchestrator_status in [orchestrator.Status.STARTED, orchestrator.Status.PAUSED]: self._disable_all_buttons() await rep.orchestrator.stop_async() if self._control_timeline: await self._set_timeline_state_async(case="reset") self._clear_recorder() self._in_running_state = False self._enable_buttons(case="stop") else: carb.log_warn( f"Replicator's current state({self._orchestrator_status.name}) is different state than STOPPED, STARTED or PAUSED. Try again in a bit." ) def _pause_resume_recorder(self): self._pause_resume_button.enabled = False if self._orchestrator_status is orchestrator.Status.STARTED: rep.orchestrator.pause() if self._control_timeline: asyncio.ensure_future(self._set_timeline_state_async(case="pause")) self._pause_resume_button.text = "Resume" elif self._orchestrator_status is orchestrator.Status.PAUSED: rep.orchestrator.resume() if self._control_timeline: asyncio.ensure_future(self._set_timeline_state_async(case="resume")) self._pause_resume_button.text = "Pause" else: carb.log_warn( f"Replicator's current state ({self._orchestrator_status.name}) is different state than STARTED or PAUSED. Try again in a bit." ) self._pause_resume_button.enabled = True def _disable_all_buttons(self): self._start_stop_button.enabled = False self._pause_resume_button.enabled = False def _enable_buttons(self, case="reset"): if case == "reset": self._start_stop_button.text = "Start" self._pause_resume_button.text = "Pause" self._start_stop_button.enabled = True elif case == "start": self._start_stop_button.text = "Stop" self._start_stop_button.enabled = True self._pause_resume_button.enabled = True elif case == "stop": self._start_stop_button.text = "Start" self._pause_resume_button.text = "Pause" self._start_stop_button.enabled = True def _build_config_ui(self): with ui.VStack(spacing=5): with ui.HStack(): ui.Spacer(width=10) ui.Label("Config Directory", tooltip="Config files directory path") with ui.HStack(): ui.Spacer(width=10) config_dir_model = ui.StringField(read_only=False).model config_dir_model.set_value(self._config_dir) def config_dir_changed(model): self._config_dir = model.as_string config_dir_model.add_value_changed_fn(config_dir_changed) ui.Spacer(width=5) ui.Button( f"{_ui_get_open_folder_glyph()}", width=20, clicked_fn=lambda: self._open_dir(self._config_dir), tooltip="Open config directory", ) ui.Button( f"{_ui_get_reset_glyph()}", width=20, clicked_fn=lambda: self._reset_config_dir(), tooltip="Reset config directory to default", ) with ui.HStack(spacing=5): ui.Spacer(width=5) config_file_model = ui.StringField(tooltip="Config file name").model config_file_model.set_value(self._config_file) def config_file_changed(model): self._config_file = model.as_string config_file_model.add_value_changed_fn(config_file_changed) ui.Button( "Load", clicked_fn=lambda: self._load_config_and_refresh_ui(self._config_dir, self._config_file), tooltip="Load and apply selected config file", ) ui.Button( "Save", clicked_fn=lambda: self.save_config(os.path.join(self._config_dir, self._config_file)), tooltip="Save current config to selected file", ) def _build_s3_ui(self): with ui.VStack(spacing=5): with ui.HStack(): ui.Spacer(width=10) ui.Label("Use S3", alignment=ui.Alignment.LEFT, tooltip="Write data to S3 buckets") s3_model = ui.CheckBox().model s3_model.set_value(self._use_s3) def value_changed(m): self._use_s3 = m.as_bool if self._use_s3 and self._out_write_type is OutWriteType.INCREMENT: print(f"Incremental output is not supported for S3. Switching to Timestamp.") self._out_write_type = OutWriteType.TIMESTAMP # Rebuild ui to update radio buttons state to TIMESTAMP self._build_window_ui() s3_model.add_value_changed_fn(value_changed) for key, val in self._s3_params.items(): with ui.HStack(): ui.Spacer(width=10) ui.Label(key, alignment=ui.Alignment.LEFT, tooltip=PARAM_TOOLTIPS[key]) model = ui.StringField().model if val: model.set_value(val) else: model.set_value("") def value_changed(m, k=key): self._s3_params[k] = m.as_string model.add_value_changed_fn(value_changed) def _build_output_ui(self): with ui.VStack(spacing=5): with ui.HStack(): ui.Spacer(width=10) ui.Label("Working Directory") with ui.HStack(): ui.Spacer(width=10) out_working_dir_model = ui.StringField().model out_working_dir_model.set_value(self._out_working_dir) def out_working_dir_changed(model): self._out_working_dir = model.as_string out_working_dir_model.add_value_changed_fn(out_working_dir_changed) ui.Spacer(width=5) ui.Button( f"{_ui_get_open_folder_glyph()}", width=20, clicked_fn=lambda: self._open_dir(self._out_working_dir), tooltip="Open working directory", ) ui.Button( f"{_ui_get_reset_glyph()}", width=20, clicked_fn=lambda: self._reset_out_working_dir(), tooltip="Reset directory to default", ) with ui.HStack(spacing=5): ui.Spacer(width=5) out_dir_model = ui.StringField().model out_dir_model.set_value(self._out_dir) def out_dir_changed(model): self._out_dir = model.as_string out_dir_model.add_value_changed_fn(out_dir_changed) write_collection = ui.RadioCollection() write_collection.model.set_value(self._out_write_type.value) def write_collection_changed(model): out_write_type = OutWriteType(model.as_int) if self._use_s3 and out_write_type is OutWriteType.INCREMENT: print(f"Incremental output is not supported for S3. Switching to Timestamp.") self._out_write_type = OutWriteType.TIMESTAMP # Rebuild ui to update radio buttons state to TIMESTAMP self._build_window_ui() else: self._out_write_type = out_write_type write_collection.model.add_value_changed_fn(write_collection_changed) ui.RadioButton( text="Overwrite", radio_collection=write_collection, tooltip="Overwrite data if output folder already exists", ) ui.RadioButton( text="Increment", radio_collection=write_collection, tooltip="Append numerical increments to output folder (e.g., _01, _02). NOTE: does not work with S3", ) ui.RadioButton( text="Timestamp", radio_collection=write_collection, tooltip="Append timestamp to output folder (e.g., _YYYY-mm-dd-HH-MM-SS)", ) s3_frame = ui.CollapsableFrame("S3 Bucket", height=0, collapsed=self._s3_params_frame_collapsed) with s3_frame: def on_collapsed_changed(collapsed): self._s3_params_frame_collapsed = collapsed s3_frame.set_collapsed_changed_fn(on_collapsed_changed) self._build_s3_ui() def _build_rp_ui(self): with ui.VStack(spacing=5): with ui.HStack(spacing=5): ui.Spacer(width=15) ui.Label("Camera Path", width=200, tooltip="Camera prim to be used as a render product") ui.Spacer(width=15) ui.Label("X", tooltip="X resolution of the render product") ui.Spacer(width=15) ui.Label("Y", tooltip="Y resolution of the render product") for i, entry in enumerate(self._rp_data): with ui.HStack(spacing=5): ui.Spacer(width=10) path_field_model = ui.StringField(width=200).model path_field_model.set_value(entry[0]) path_field_model.add_value_changed_fn(lambda m, idx=i: self._update_rp_entry(idx, 0, m.as_string)) ui.Spacer(width=10) x_field = ui.IntField() x_field.model.set_value(entry[1]) x_field.model.add_value_changed_fn(lambda m, idx=i: self._update_rp_entry(idx, 1, m.as_int)) ui.Spacer(width=10) y_field = ui.IntField() y_field.model.set_value(entry[2]) y_field.model.add_value_changed_fn(lambda m, idx=i: self._update_rp_entry(idx, 2, m.as_int)) ui.Button( f"{_ui_get_delete_glyph()}", width=30, clicked_fn=lambda idx=i: self._remove_rp_entry(idx), tooltip="Remove entry", ) with ui.HStack(spacing=5): ui.Spacer(width=5) ui.Button( "Add New Render Product Entry", clicked_fn=self._add_new_rp_field, tooltip="Create a new entry" ) def _build_params_ui(self): with ui.VStack(spacing=5): with ui.HStack(): ui.Spacer(width=10) ui.Label("Writer") writer_type_collection = ui.RadioCollection() if self._writer_name == "BasicWriter": writer_type_collection.model.set_value(0) else: writer_type_collection.model.set_value(1) def writer_type_collection_changed(model): if model.as_int == 0: self._custom_writer_name = self._writer_name self._writer_name = "BasicWriter" else: self._writer_name = self._custom_writer_name # self._writer_name = "BasicWriter" if model.as_int == 0 else "CustomWriter" self._build_window_ui() writer_type_collection.model.add_value_changed_fn(writer_type_collection_changed) ui.RadioButton( text="Default", radio_collection=writer_type_collection, tooltip="Uses the default BasicWriter" ) ui.RadioButton( text="Custom", radio_collection=writer_type_collection, tooltip="Loads a custom writer by name" ) if self._writer_name == "BasicWriter": self._build_basic_writer_ui() else: self._build_custom_writer_ui() def _build_basic_writer_ui(self): for key, val in self._basic_writer_params.items(): with ui.HStack(spacing=5): ui.Spacer(width=10) ui.Label(key, alignment=ui.Alignment.LEFT, tooltip=PARAM_TOOLTIPS[key]) model = ui.CheckBox().model model.set_value(val) def value_changed(m, k=key): self._basic_writer_params[k] = m.as_bool model.add_value_changed_fn(value_changed) def _build_custom_writer_ui(self): with ui.HStack(spacing=5): ui.Spacer(width=10) ui.Label("Name", tooltip="The name of the custom writer from the registry") writer_name_model = ui.StringField().model writer_name_model.set_value(self._writer_name) def writer_name_changed(m): self._writer_name = m.as_string writer_name_model.add_value_changed_fn(writer_name_changed) with ui.HStack(spacing=5): ui.Spacer(width=10) ui.Label("Parameters Path", tooltip="Path to the json file storing the custom writer parameters") path_model = ui.StringField().model path_model.set_value(self._custom_params_path) def path_changed(m): self._custom_params_path = m.as_string path_model.add_value_changed_fn(path_changed) def _build_writer_ui(self): with ui.VStack(spacing=5): rp_frame = ui.CollapsableFrame("Render Products", height=0, collapsed=self._rp_frame_collapsed) with rp_frame: def on_collapsed_changed(collapsed): self._rp_frame_collapsed = collapsed rp_frame.set_collapsed_changed_fn(on_collapsed_changed) self._build_rp_ui() params_frame = ui.CollapsableFrame("Parameters", height=0, collapsed=self._writer_params_frame_collapsed) with params_frame: def on_collapsed_changed(collapsed): self._writer_params_frame_collapsed = collapsed params_frame.set_collapsed_changed_fn(on_collapsed_changed) self._build_params_ui() output_frame = ui.CollapsableFrame("Output", height=0, collapsed=self._output_frame_collapsed) with output_frame: def on_collapsed_changed(collapsed): self._output_frame_collapsed = collapsed output_frame.set_collapsed_changed_fn(on_collapsed_changed) self._build_output_ui() config_frame = ui.CollapsableFrame("Config", height=0, collapsed=self._config_frame_collapsed) with config_frame: def on_collapsed_changed(collapsed): self._config_frame_collapsed = collapsed config_frame.set_collapsed_changed_fn(on_collapsed_changed) self._build_config_ui() def _build_control_params_ui(self): with ui.VStack(spacing=10): with ui.HStack(spacing=5): ui.Spacer(width=10) ui.Label("Number of frames", tooltip="If set to 0, data acquisition will run indefinitely") num_frames_model = ui.IntField().model num_frames_model.set_value(self._num_frames) def num_frames_changed(m): self._num_frames = m.as_int num_frames_model.add_value_changed_fn(num_frames_changed) ui.Label("RTSubframes", tooltip="Render extra frames between captures to avoid rendering artifacts") rt_subframes_model = ui.IntField().model rt_subframes_model.set_value(self._rt_subframes) def num_rt_subframes_changed(m): self._rt_subframes = m.as_int rt_subframes_model.add_value_changed_fn(num_rt_subframes_changed) with ui.HStack(spacing=5): ui.Spacer(width=10) ui.Label( "Control Timeline", alignment=ui.Alignment.LEFT, tooltip="Start/Stop/Pause/Reset the timeline with the recorder", ) control_timeline_model = ui.CheckBox().model control_timeline_model.set_value(self._control_timeline) def value_changed(m): self._control_timeline = m.as_bool control_timeline_model.add_value_changed_fn(value_changed) def _build_control_ui(self): with ui.VStack(spacing=5): control_params_frame = ui.CollapsableFrame( "Parameters", height=0, collapsed=self._control_params_frame_collapsed ) with control_params_frame: def on_collapsed_changed(collapsed): self._control_params_frame_collapsed = collapsed control_params_frame.set_collapsed_changed_fn(on_collapsed_changed) self._build_control_params_ui() with ui.HStack(spacing=5): ui.Spacer(width=5) self._start_stop_button = ui.Button( "Start", clicked_fn=lambda: asyncio.ensure_future(self._start_stop_recorder_async()), enabled=True, tooltip="Start/stop the recording", ) self._pause_resume_button = ui.Button( "Pause", clicked_fn=self._pause_resume_recorder, enabled=False, tooltip="Pause/resume recording" ) def _build_window_ui(self): with self._window.frame: with ui.ScrollingFrame(): with ui.VStack(spacing=5): writer_frame = ui.CollapsableFrame("Writer", height=0, collapsed=self._writer_frame_collapsed) with writer_frame: def on_collapsed_changed(collapsed): self._writer_frame_collapsed = collapsed writer_frame.set_collapsed_changed_fn(on_collapsed_changed) self._build_writer_ui() control_frame = ui.CollapsableFrame("Control", height=0, collapsed=self._control_frame_collapsed) with control_frame: def on_collapsed_changed(collapsed): self._control_frame_collapsed = collapsed control_frame.set_collapsed_changed_fn(on_collapsed_changed) self._build_control_ui()
46,149
Python
45.475327
736
0.587813
eliabntt/GRADE-RR/isaac_internals/exts/omni.isaac.synthetic_recorder/omni/isaac/synthetic_recorder/extension_custom.py
# Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # import carb import omni import omni.syntheticdata._syntheticdata as gt import omni.ui as ui from omni.kit.menu.utils import add_menu_items, remove_menu_items, MenuItemDescription import asyncio import atexit import colorsys import copy import queue import random import os import threading import numpy as np import weakref from carb.settings import get_settings from PIL import Image, ImageDraw from omni.isaac.synthetic_utils import visualization as vis from omni.isaac.synthetic_utils import SyntheticDataHelper, NumpyWriter from omni.syntheticdata import sensors, visualize EXTENSION_NAME = "Synthetic Data Recorder 2" class MyRecorder(): def on_startup(self): """Called to load the extension""" self._timeline = omni.timeline.get_timeline_interface() self._display_paths = [] self._interface = gt.acquire_syntheticdata_interface() self._enable_record = False self._enable_timeline_record = False self._counter = 0 # self.sub_update = omni.kit.app.get_app().get_update_event_stream().create_subscription_to_pop(self._update) self._update_fps = omni.kit.app.get_app().get_update_event_stream().create_subscription_to_pop( self._fps_info_update, name="omni.kit.debug.frame_info update" ) self._current_fps = "" self._settings = get_settings() self._viewport = omni.kit.viewport.window self._viewport_names = [] self._num_viewports = 0 self.data_writer = None self.sd_helper = SyntheticDataHelper() self.sensor_settings_default = { "rgb": {"enabled": False}, "depth": {"enabled": False, "colorize": False, "npy": False}, "depthLinear": {"enabled": False, "colorize": False, "npy": False}, "instance": {"enabled": False, "colorize": False, "npy": False}, "semantic": {"enabled": False, "colorize": False, "npy": False}, "bbox_2d_tight": {"enabled": False, "colorize": False, "npy": False}, "bbox_2d_loose": {"enabled": False, "colorize": False, "npy": False}, "normals": {"enabled": False, "colorize": False, "npy": False}, "motion-vector": {"enabled": False, "colorize": False, "npy": False}, "bbox_3d": {"enabled": False, "colorize": False, "npy": False}, "camera": {"enabled": False, "colorize": False, "npy": False}, "poses": {"enabled": False, "colorize": False, "npy": False}, } self._sensor_settings = {} self._sensor_settings_single = {} self.set_settings(self.sensor_settings_default) self._viewport_names = [] self._dir_name = '' self._num_threads = 10 self._max_queue_size = 500 self.verify = {} self.skip_cameras = 0 def get_default_settings(self): return self.sensor_settings_default def set_single_settings(self, settings): self._sensor_settings_single = copy.deepcopy(settings) def set_settings(self, settings): for index, viewport_name in enumerate(self._viewport_names): if index >= self.skip_cameras: viewport_name = viewport_name.split(" ")[0] + str(int(index - self.skip_cameras)) self._sensor_settings[viewport_name] = copy.deepcopy(settings) else: continue # def _update(self, e: carb.events.IEvent): def _update(self): tmp = [x for x in self._viewport.get_viewport_window_instances()] if len(tmp) != self._num_viewports: self._num_viewports = len(tmp) self._viewport_names = [x.name for x in tmp] self._is_first_run = [True] * self._num_viewports self.set_settings(self._sensor_settings_single) self.verify[self._viewport_names[-1]] = True if not self._timeline.is_playing(): print("Cannot Generate Data! Editor is not playing.") self._enable_record = False return if self._dir_name == "": print("Cannot generate data, empty dir") return data_dir = str(self._dir_name) if self.data_writer is None: self.data_writer = NumpyWriter( data_dir, self._num_threads, self._max_queue_size, self._sensor_settings, ) self.data_writer.start_threads() self._render_mode = str(self._settings.get("/rtx/rendermode")) for index, viewport_name in enumerate(self._viewport_names): if index < self.skip_cameras: continue real_viewport_name = viewport_name viewport_name = viewport_name.split(" ")[0] + str(int(index - self.skip_cameras)) groundtruth = { "METADATA": { "image_id": str(self._counter), "viewport_name": viewport_name, "DEPTH": {}, "DEPTHLINEAR": {}, "INSTANCE": {}, "SEMANTIC": {}, "BBOX2DTIGHT": {}, "BBOX2DLOOSE": {}, "NORMALS": {}, "MOTIONVECTOR": {}, "BBOX3D": {}, "CAMERA": {}, "POSES": {}, }, "DATA": {}, } gt_list = [] if self._sensor_settings[viewport_name]["rgb"]["enabled"]: gt_list.append("rgb") if self._sensor_settings[viewport_name]["depthLinear"]["enabled"]: gt_list.append("depthLinear") if self._sensor_settings[viewport_name]["depth"]["enabled"]: gt_list.append("depth") if self._sensor_settings[viewport_name]["bbox_2d_tight"]["enabled"]: gt_list.append("boundingBox2DTight") if self._sensor_settings[viewport_name]["bbox_2d_loose"]["enabled"]: gt_list.append("boundingBox2DLoose") if self._sensor_settings[viewport_name]["instance"]["enabled"]: gt_list.append("instanceSegmentation") if self._sensor_settings[viewport_name]["semantic"]["enabled"]: gt_list.append("semanticSegmentation") if self._sensor_settings[viewport_name]["motion-vector"]["enabled"]: gt_list.append("motion-vector") if self._sensor_settings[viewport_name]["normals"]["enabled"]: gt_list.append("normals") if self._sensor_settings[viewport_name]["bbox_3d"]["enabled"]: gt_list.append("boundingBox3D") if self._sensor_settings[viewport_name]["poses"]["enabled"]: gt_list.append("pose") if self._sensor_settings[viewport_name]["camera"]["enabled"]: gt_list.append("camera") for j in tmp: if j.name == real_viewport_name: viewport = j break try: gt = self.sd_helper.get_groundtruth(gt_list, viewport.viewport_api, verify_sensor_init=self.verify[real_viewport_name]) self.verify[real_viewport_name] = False except: self.verify[real_viewport_name] = True gt = self.sd_helper.get_groundtruth(gt_list, viewport.viewport_api, verify_sensor_init=self.verify[real_viewport_name]) self.verify[real_viewport_name] = False if self._enable_record == False: continue mappings = [] # RGB if self._sensor_settings[viewport_name]["rgb"]["enabled"] and gt["state"]["rgb"]: groundtruth["DATA"]["RGB"] = gt["rgb"] # Depth if self._sensor_settings[viewport_name]["depth"]["enabled"] and gt["state"]["depth"]: groundtruth["DATA"]["DEPTH"] = gt["depth"].squeeze() groundtruth["METADATA"]["DEPTH"]["COLORIZE"] = self._sensor_settings[viewport_name]["depth"]["colorize"] groundtruth["METADATA"]["DEPTH"]["NPY"] = self._sensor_settings[viewport_name]["depth"]["npy"] # DepthLinear if self._sensor_settings[viewport_name]["depthLinear"]["enabled"] and gt["state"]["depthLinear"]: groundtruth["DATA"]["DEPTHLINEAR"] = gt["depthLinear"].squeeze() groundtruth["METADATA"]["DEPTHLINEAR"]["COLORIZE"] = self._sensor_settings[viewport_name]["depthLinear"]["colorize"] groundtruth["METADATA"]["DEPTHLINEAR"]["NPY"] = self._sensor_settings[viewport_name]["depthLinear"]["npy"] # Instance Segmentation if self._sensor_settings[viewport_name]["instance"]["enabled"] and gt["state"]["instanceSegmentation"]: import ipdb; ipdb.set_trace() instance_data = gt["instanceSegmentation"] groundtruth["DATA"]["INSTANCE"] = instance_data try: groundtruth["METADATA"]["INSTANCE"]["WIDTH"] = instance_data[0].shape[1] groundtruth["METADATA"]["INSTANCE"]["HEIGHT"] = instance_data[0].shape[0] mappings = instance_data[1] except: groundtruth["METADATA"]["INSTANCE"]["WIDTH"] = instance_data.shape[1] groundtruth["METADATA"]["INSTANCE"]["HEIGHT"] = instance_data.shape[0] mappings = [] groundtruth["METADATA"]["INSTANCE"]["MAPPINGS"] = self._sensor_settings[viewport_name]["instance"][ "mappings" ] groundtruth["METADATA"]["INSTANCE"]["COLORIZE"] = self._sensor_settings[viewport_name]["instance"][ "colorize" ] groundtruth["METADATA"]["INSTANCE"]["NPY"] = self._sensor_settings[viewport_name]["instance"]["npy"] # Semantic Segmentation if self._sensor_settings[viewport_name]["semantic"]["enabled"] and gt["state"]["semanticSegmentation"]: semantic_data = gt["semanticSegmentation"] semantic_data[semantic_data == 65535] = 0 # deals with invalid semantic id groundtruth["DATA"]["SEMANTIC"] = (semantic_data, mappings) groundtruth["METADATA"]["SEMANTIC"]["WIDTH"] = semantic_data.shape[1] groundtruth["METADATA"]["SEMANTIC"]["HEIGHT"] = semantic_data.shape[0] groundtruth["METADATA"]["SEMANTIC"]["MAPPINGS"] = self._sensor_settings[viewport_name]["instance"][ "mappings" ] groundtruth["METADATA"]["SEMANTIC"]["COLORIZE"] = self._sensor_settings[viewport_name]["semantic"][ "colorize" ] groundtruth["METADATA"]["SEMANTIC"]["NPY"] = self._sensor_settings[viewport_name]["semantic"]["npy"] # 2D Tight BBox if self._sensor_settings[viewport_name]["bbox_2d_tight"]["enabled"] and gt["state"]["boundingBox2DTight"]: groundtruth["DATA"]["BBOX2DTIGHT"] = gt["boundingBox2DTight"] groundtruth["METADATA"]["BBOX2DTIGHT"]["COLORIZE"] = self._sensor_settings[viewport_name][ "bbox_2d_tight" ]["colorize"] groundtruth["METADATA"]["BBOX2DTIGHT"]["NPY"] = self._sensor_settings[viewport_name]["bbox_2d_tight"][ "npy" ] # 2D Loose BBox if self._sensor_settings[viewport_name]["bbox_2d_loose"]["enabled"] and gt["state"]["boundingBox2DLoose"]: groundtruth["DATA"]["BBOX2DLOOSE"] = gt["boundingBox2DLoose"] groundtruth["METADATA"]["BBOX2DLOOSE"]["COLORIZE"] = self._sensor_settings[viewport_name][ "bbox_2d_loose" ]["colorize"] groundtruth["METADATA"]["BBOX2DLOOSE"]["NPY"] = self._sensor_settings[viewport_name]["bbox_2d_loose"][ "npy" ] # 3D Bounding Box if self._sensor_settings[viewport_name]["bbox_3d"]["enabled"] and gt["state"]["boundingBox3D"]: groundtruth["DATA"]["BBOX3D"] = gt["boundingBox3D"].squeeze() groundtruth["METADATA"]["BBOX3D"]["COLORIZE"] = self._sensor_settings[viewport_name]["bbox_3d"][ "colorize"] groundtruth["METADATA"]["BBOX3D"]["NPY"] = self._sensor_settings[viewport_name]["bbox_3d"]["npy"] groundtruth["METADATA"]["BBOX3D_IMAGE"] =visualize.get_bbox3d(viewport.viewport_api) # Motion vector if self._sensor_settings[viewport_name]["motion-vector"]["enabled"] and gt["state"]["motion-vector"]: groundtruth["DATA"]["MOTIONVECTOR"] = gt["motion-vector"].squeeze() groundtruth["METADATA"]["MOTIONVECTOR"]["COLORIZE"] = \ self._sensor_settings[viewport_name]["motion-vector"][ "colorize"] groundtruth["METADATA"]["MOTIONVECTOR"]["NPY"] = self._sensor_settings[viewport_name]["motion-vector"][ "npy"] # Poses if self._sensor_settings[viewport_name]["poses"]["enabled"] and gt["pose"]: groundtruth["DATA"]["POSES"] = gt["pose"] groundtruth["METADATA"]["POSES"]["NPY"] = self._sensor_settings[viewport_name]["poses"]["npy"] # Camera if self._sensor_settings[viewport_name]["camera"]["enabled"] and gt["state"]["camera"]: groundtruth["DATA"]["CAMERA"] = gt["camera"] groundtruth["METADATA"]["CAMERA"]["NPY"] = self._sensor_settings[viewport_name]["camera"]["npy"] # Normals if self._sensor_settings[viewport_name]["normals"]["enabled"] and gt["state"]["normals"]: groundtruth["DATA"]["NORMALS"] = gt["normals"].squeeze() groundtruth["METADATA"]["NORMALS"]["NPY"] = self._sensor_settings[viewport_name]["normals"]["npy"] self.data_writer.q.put(copy.deepcopy(groundtruth)) # self._counter = self._counter + 1 def _fps_info_update(self, event): dt = event.payload["dt"] * 1000.0 self._current_fps = "%1.2fms [%1.2f]" % (dt, 1000.0 / dt)
14,898
Python
47.84918
132
0.558196
eliabntt/GRADE-RR/isaac_internals/kit/extscore/omni.syntheticdata/omni/syntheticdata/__init__.py
from . import _syntheticdata from .scripts.extension import * from .ogn import *
81
Python
19.499995
32
0.765432
eliabntt/GRADE-RR/isaac_internals/kit/extscore/omni.syntheticdata/omni/syntheticdata/ogn/OgnSdTestRenderProductCameraDatabase.py
"""Support for simplified access to data on nodes of type omni.syntheticdata.SdTestRenderProductCamera Synthetic Data node to test the renderProduct camera pipeline """ import omni.graph.core as og import omni.graph.core._omni_graph_core as _og import omni.graph.tools.ogn as ogn import numpy class OgnSdTestRenderProductCameraDatabase(og.Database): """Helper class providing simplified access to data on nodes of type omni.syntheticdata.SdTestRenderProductCamera Class Members: node: Node being evaluated Attribute Value Properties: Inputs: inputs.cameraApertureOffset inputs.cameraApertureSize inputs.cameraFStop inputs.cameraFisheyeParams inputs.cameraFocalLength inputs.cameraFocusDistance inputs.cameraModel inputs.cameraNearFar inputs.cameraProjection inputs.cameraViewTransform inputs.exec inputs.height inputs.metersPerSceneUnit inputs.renderProductCameraPath inputs.renderProductResolution inputs.stage inputs.swhFrameNumber inputs.traceError inputs.width Outputs: outputs.test Predefined Tokens: tokens.simulation tokens.postRender tokens.onDemand """ # This is an internal object that provides per-class storage of a per-node data dictionary PER_NODE_DATA = {} # This is an internal object that describes unchanging attributes in a generic way # The values in this list are in no particular order, as a per-attribute tuple # Name, Type, ExtendedTypeIndex, UiName, Description, Metadata, # Is_Required, DefaultValue, Is_Deprecated, DeprecationMsg # You should not need to access any of this data directly, use the defined database interfaces INTERFACE = og.Database._get_interface([ ('inputs:cameraApertureOffset', 'float2', 0, None, 'Camera horizontal and vertical aperture offset', {}, True, [0.0, 0.0], False, ''), ('inputs:cameraApertureSize', 'float2', 0, None, 'Camera horizontal and vertical aperture', {}, True, [0.0, 0.0], False, ''), ('inputs:cameraFStop', 'float', 0, None, 'Camera fStop', {}, True, 0.0, False, ''), ('inputs:cameraFisheyeParams', 'float[]', 0, None, 'Camera fisheye projection parameters', {}, True, [], False, ''), ('inputs:cameraFocalLength', 'float', 0, None, 'Camera focal length', {}, True, 0.0, False, ''), ('inputs:cameraFocusDistance', 'float', 0, None, 'Camera focus distance', {}, True, 0.0, False, ''), ('inputs:cameraModel', 'int', 0, None, 'Camera model (pinhole or fisheye models)', {}, True, 0, False, ''), ('inputs:cameraNearFar', 'float2', 0, None, 'Camera near/far clipping range', {}, True, [0.0, 0.0], False, ''), ('inputs:cameraProjection', 'matrix4d', 0, None, 'Camera projection matrix', {}, True, [[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0]], False, ''), ('inputs:cameraViewTransform', 'matrix4d', 0, None, 'Camera view matrix', {}, True, [[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0]], False, ''), ('inputs:exec', 'execution', 0, None, 'Trigger', {}, True, None, False, ''), ('inputs:height', 'uint', 0, None, 'Height of the frame', {}, True, 0, False, ''), ('inputs:metersPerSceneUnit', 'float', 0, None, 'Scene units to meters scale', {}, True, 0.0, False, ''), ('inputs:renderProductCameraPath', 'token', 0, None, 'RenderProduct camera prim path', {}, True, '', False, ''), ('inputs:renderProductResolution', 'int2', 0, None, 'RenderProduct resolution', {}, True, [0, 0], False, ''), ('inputs:stage', 'token', 0, None, 'Stage in {simulation, postrender, ondemand}', {}, True, '', False, ''), ('inputs:swhFrameNumber', 'uint64', 0, None, 'Fabric frame number', {}, True, 0, False, ''), ('inputs:traceError', 'bool', 0, None, 'If true print an error message when the frame numbers are out-of-sync', {ogn.MetadataKeys.DEFAULT: 'false'}, True, False, False, ''), ('inputs:width', 'uint', 0, None, 'Width of the frame', {}, True, 0, False, ''), ('outputs:test', 'bool', 0, None, 'Test value : false if failed', {}, True, None, False, ''), ]) class tokens: simulation = "simulation" postRender = "postRender" onDemand = "onDemand" @classmethod def _populate_role_data(cls): """Populate a role structure with the non-default roles on this node type""" role_data = super()._populate_role_data() role_data.inputs.cameraProjection = og.Database.ROLE_MATRIX role_data.inputs.cameraViewTransform = og.Database.ROLE_MATRIX role_data.inputs.exec = og.Database.ROLE_EXECUTION return role_data class ValuesForInputs(og.DynamicAttributeAccess): LOCAL_PROPERTY_NAMES = {"cameraApertureOffset", "cameraApertureSize", "cameraFStop", "cameraFocalLength", "cameraFocusDistance", "cameraModel", "cameraNearFar", "cameraProjection", "cameraViewTransform", "exec", "height", "metersPerSceneUnit", "renderProductCameraPath", "renderProductResolution", "stage", "swhFrameNumber", "traceError", "width", "_setting_locked", "_batchedReadAttributes", "_batchedReadValues"} """Helper class that creates natural hierarchical access to input attributes""" def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface): """Initialize simplified access for the attribute data""" context = node.get_graph().get_default_graph_context() super().__init__(context, node, attributes, dynamic_attributes) self._batchedReadAttributes = [self._attributes.cameraApertureOffset, self._attributes.cameraApertureSize, self._attributes.cameraFStop, self._attributes.cameraFocalLength, self._attributes.cameraFocusDistance, self._attributes.cameraModel, self._attributes.cameraNearFar, self._attributes.cameraProjection, self._attributes.cameraViewTransform, self._attributes.exec, self._attributes.height, self._attributes.metersPerSceneUnit, self._attributes.renderProductCameraPath, self._attributes.renderProductResolution, self._attributes.stage, self._attributes.swhFrameNumber, self._attributes.traceError, self._attributes.width] self._batchedReadValues = [[0.0, 0.0], [0.0, 0.0], 0.0, 0.0, 0.0, 0, [0.0, 0.0], [1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0], [1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0], None, 0, 0.0, "", [0, 0], "", 0, False, 0] @property def cameraFisheyeParams(self): data_view = og.AttributeValueHelper(self._attributes.cameraFisheyeParams) return data_view.get() @cameraFisheyeParams.setter def cameraFisheyeParams(self, value): if self._setting_locked: raise og.ReadOnlyError(self._attributes.cameraFisheyeParams) data_view = og.AttributeValueHelper(self._attributes.cameraFisheyeParams) data_view.set(value) self.cameraFisheyeParams_size = data_view.get_array_size() @property def cameraApertureOffset(self): return self._batchedReadValues[0] @cameraApertureOffset.setter def cameraApertureOffset(self, value): self._batchedReadValues[0] = value @property def cameraApertureSize(self): return self._batchedReadValues[1] @cameraApertureSize.setter def cameraApertureSize(self, value): self._batchedReadValues[1] = value @property def cameraFStop(self): return self._batchedReadValues[2] @cameraFStop.setter def cameraFStop(self, value): self._batchedReadValues[2] = value @property def cameraFocalLength(self): return self._batchedReadValues[3] @cameraFocalLength.setter def cameraFocalLength(self, value): self._batchedReadValues[3] = value @property def cameraFocusDistance(self): return self._batchedReadValues[4] @cameraFocusDistance.setter def cameraFocusDistance(self, value): self._batchedReadValues[4] = value @property def cameraModel(self): return self._batchedReadValues[5] @cameraModel.setter def cameraModel(self, value): self._batchedReadValues[5] = value @property def cameraNearFar(self): return self._batchedReadValues[6] @cameraNearFar.setter def cameraNearFar(self, value): self._batchedReadValues[6] = value @property def cameraProjection(self): return self._batchedReadValues[7] @cameraProjection.setter def cameraProjection(self, value): self._batchedReadValues[7] = value @property def cameraViewTransform(self): return self._batchedReadValues[8] @cameraViewTransform.setter def cameraViewTransform(self, value): self._batchedReadValues[8] = value @property def exec(self): return self._batchedReadValues[9] @exec.setter def exec(self, value): self._batchedReadValues[9] = value @property def height(self): return self._batchedReadValues[10] @height.setter def height(self, value): self._batchedReadValues[10] = value @property def metersPerSceneUnit(self): return self._batchedReadValues[11] @metersPerSceneUnit.setter def metersPerSceneUnit(self, value): self._batchedReadValues[11] = value @property def renderProductCameraPath(self): return self._batchedReadValues[12] @renderProductCameraPath.setter def renderProductCameraPath(self, value): self._batchedReadValues[12] = value @property def renderProductResolution(self): return self._batchedReadValues[13] @renderProductResolution.setter def renderProductResolution(self, value): self._batchedReadValues[13] = value @property def stage(self): return self._batchedReadValues[14] @stage.setter def stage(self, value): self._batchedReadValues[14] = value @property def swhFrameNumber(self): return self._batchedReadValues[15] @swhFrameNumber.setter def swhFrameNumber(self, value): self._batchedReadValues[15] = value @property def traceError(self): return self._batchedReadValues[16] @traceError.setter def traceError(self, value): self._batchedReadValues[16] = value @property def width(self): return self._batchedReadValues[17] @width.setter def width(self, value): self._batchedReadValues[17] = value def __getattr__(self, item: str): if item in self.LOCAL_PROPERTY_NAMES: return object.__getattribute__(self, item) else: return super().__getattr__(item) def __setattr__(self, item: str, new_value): if item in self.LOCAL_PROPERTY_NAMES: object.__setattr__(self, item, new_value) else: super().__setattr__(item, new_value) def _prefetch(self): readAttributes = self._batchedReadAttributes newValues = _og._prefetch_input_attributes_data(readAttributes) if len(readAttributes) == len(newValues): self._batchedReadValues = newValues class ValuesForOutputs(og.DynamicAttributeAccess): LOCAL_PROPERTY_NAMES = {"test", "_batchedWriteValues"} """Helper class that creates natural hierarchical access to output attributes""" def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface): """Initialize simplified access for the attribute data""" context = node.get_graph().get_default_graph_context() super().__init__(context, node, attributes, dynamic_attributes) self._batchedWriteValues = { } @property def test(self): value = self._batchedWriteValues.get(self._attributes.test) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.test) return data_view.get() @test.setter def test(self, value): self._batchedWriteValues[self._attributes.test] = value def __getattr__(self, item: str): if item in self.LOCAL_PROPERTY_NAMES: return object.__getattribute__(self, item) else: return super().__getattr__(item) def __setattr__(self, item: str, new_value): if item in self.LOCAL_PROPERTY_NAMES: object.__setattr__(self, item, new_value) else: super().__setattr__(item, new_value) def _commit(self): _og._commit_output_attributes_data(self._batchedWriteValues) self._batchedWriteValues = { } class ValuesForState(og.DynamicAttributeAccess): """Helper class that creates natural hierarchical access to state attributes""" def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface): """Initialize simplified access for the attribute data""" context = node.get_graph().get_default_graph_context() super().__init__(context, node, attributes, dynamic_attributes) def __init__(self, node): super().__init__(node) dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_INPUT) self.inputs = OgnSdTestRenderProductCameraDatabase.ValuesForInputs(node, self.attributes.inputs, dynamic_attributes) dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_OUTPUT) self.outputs = OgnSdTestRenderProductCameraDatabase.ValuesForOutputs(node, self.attributes.outputs, dynamic_attributes) dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_STATE) self.state = OgnSdTestRenderProductCameraDatabase.ValuesForState(node, self.attributes.state, dynamic_attributes)
14,809
Python
45.137072
636
0.628672
eliabntt/GRADE-RR/isaac_internals/kit/extscore/omni.syntheticdata/omni/syntheticdata/ogn/OgnSdPostSemantic3dBoundingBoxFilterDatabase.py
"""Support for simplified access to data on nodes of type omni.syntheticdata.SdPostSemantic3dBoundingBoxFilter Synthetic Data node to cull the semantic 3d bounding boxes. """ import omni.graph.core as og import omni.graph.core._omni_graph_core as _og import omni.graph.tools.ogn as ogn import numpy class OgnSdPostSemantic3dBoundingBoxFilterDatabase(og.Database): """Helper class providing simplified access to data on nodes of type omni.syntheticdata.SdPostSemantic3dBoundingBoxFilter Class Members: node: Node being evaluated Attribute Value Properties: Inputs: inputs.exec inputs.gpu inputs.instanceMappingInfoSDPtr inputs.metersPerSceneUnit inputs.rp inputs.sdSemBBox3dCamCornersCudaPtr inputs.sdSemBBoxInfosCudaPtr inputs.viewportNearFar Outputs: outputs.exec outputs.sdSemBBoxInfosCudaPtr Predefined Tokens: tokens.SemanticBoundingBox3DInfosSD tokens.SemanticBoundingBox3DFilterInfosSD """ # This is an internal object that provides per-class storage of a per-node data dictionary PER_NODE_DATA = {} # This is an internal object that describes unchanging attributes in a generic way # The values in this list are in no particular order, as a per-attribute tuple # Name, Type, ExtendedTypeIndex, UiName, Description, Metadata, # Is_Required, DefaultValue, Is_Deprecated, DeprecationMsg # You should not need to access any of this data directly, use the defined database interfaces INTERFACE = og.Database._get_interface([ ('inputs:exec', 'execution', 0, None, 'Trigger', {}, True, None, False, ''), ('inputs:gpu', 'uint64', 0, 'gpuFoundations', 'Pointer to shared context containing gpu foundations', {}, True, 0, False, ''), ('inputs:instanceMappingInfoSDPtr', 'uint64', 0, None, 'uint buffer pointer containing the following information : [numInstances, minInstanceId, numSemantics, minSemanticId, numProtoSemantic]', {}, True, 0, False, ''), ('inputs:metersPerSceneUnit', 'float', 0, None, 'Scene units to meters scale', {ogn.MetadataKeys.DEFAULT: '0.01'}, True, 0.01, False, ''), ('inputs:rp', 'uint64', 0, 'renderProduct', 'Pointer to render product for this view', {}, True, 0, False, ''), ('inputs:sdSemBBox3dCamCornersCudaPtr', 'uint64', 0, None, 'Cuda buffer containing the projection of the 3d bounding boxes on the camera plane represented as a float3=(u,v,z,a) for each bounding box corners', {}, True, 0, False, ''), ('inputs:sdSemBBoxInfosCudaPtr', 'uint64', 0, None, 'Cuda buffer containing valid bounding boxes infos', {}, True, 0, False, ''), ('inputs:viewportNearFar', 'float2', 0, None, 'near and far plane (in scene units) used to clip the 3d bounding boxes.', {ogn.MetadataKeys.DEFAULT: '[0.0, -1.0]'}, True, [0.0, -1.0], False, ''), ('outputs:exec', 'execution', 0, None, 'Trigger', {}, True, None, False, ''), ('outputs:sdSemBBoxInfosCudaPtr', 'uint64', 0, None, 'Cuda buffer containing valid bounding boxes infos', {}, True, None, False, ''), ]) class tokens: SemanticBoundingBox3DInfosSD = "SemanticBoundingBox3DInfosSD" SemanticBoundingBox3DFilterInfosSD = "SemanticBoundingBox3DFilterInfosSD" @classmethod def _populate_role_data(cls): """Populate a role structure with the non-default roles on this node type""" role_data = super()._populate_role_data() role_data.inputs.exec = og.Database.ROLE_EXECUTION role_data.outputs.exec = og.Database.ROLE_EXECUTION return role_data class ValuesForInputs(og.DynamicAttributeAccess): LOCAL_PROPERTY_NAMES = {"exec", "gpu", "instanceMappingInfoSDPtr", "metersPerSceneUnit", "rp", "sdSemBBox3dCamCornersCudaPtr", "sdSemBBoxInfosCudaPtr", "viewportNearFar", "_setting_locked", "_batchedReadAttributes", "_batchedReadValues"} """Helper class that creates natural hierarchical access to input attributes""" def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface): """Initialize simplified access for the attribute data""" context = node.get_graph().get_default_graph_context() super().__init__(context, node, attributes, dynamic_attributes) self._batchedReadAttributes = [self._attributes.exec, self._attributes.gpu, self._attributes.instanceMappingInfoSDPtr, self._attributes.metersPerSceneUnit, self._attributes.rp, self._attributes.sdSemBBox3dCamCornersCudaPtr, self._attributes.sdSemBBoxInfosCudaPtr, self._attributes.viewportNearFar] self._batchedReadValues = [None, 0, 0, 0.01, 0, 0, 0, [0.0, -1.0]] @property def exec(self): return self._batchedReadValues[0] @exec.setter def exec(self, value): self._batchedReadValues[0] = value @property def gpu(self): return self._batchedReadValues[1] @gpu.setter def gpu(self, value): self._batchedReadValues[1] = value @property def instanceMappingInfoSDPtr(self): return self._batchedReadValues[2] @instanceMappingInfoSDPtr.setter def instanceMappingInfoSDPtr(self, value): self._batchedReadValues[2] = value @property def metersPerSceneUnit(self): return self._batchedReadValues[3] @metersPerSceneUnit.setter def metersPerSceneUnit(self, value): self._batchedReadValues[3] = value @property def rp(self): return self._batchedReadValues[4] @rp.setter def rp(self, value): self._batchedReadValues[4] = value @property def sdSemBBox3dCamCornersCudaPtr(self): return self._batchedReadValues[5] @sdSemBBox3dCamCornersCudaPtr.setter def sdSemBBox3dCamCornersCudaPtr(self, value): self._batchedReadValues[5] = value @property def sdSemBBoxInfosCudaPtr(self): return self._batchedReadValues[6] @sdSemBBoxInfosCudaPtr.setter def sdSemBBoxInfosCudaPtr(self, value): self._batchedReadValues[6] = value @property def viewportNearFar(self): return self._batchedReadValues[7] @viewportNearFar.setter def viewportNearFar(self, value): self._batchedReadValues[7] = value def __getattr__(self, item: str): if item in self.LOCAL_PROPERTY_NAMES: return object.__getattribute__(self, item) else: return super().__getattr__(item) def __setattr__(self, item: str, new_value): if item in self.LOCAL_PROPERTY_NAMES: object.__setattr__(self, item, new_value) else: super().__setattr__(item, new_value) def _prefetch(self): readAttributes = self._batchedReadAttributes newValues = _og._prefetch_input_attributes_data(readAttributes) if len(readAttributes) == len(newValues): self._batchedReadValues = newValues class ValuesForOutputs(og.DynamicAttributeAccess): LOCAL_PROPERTY_NAMES = {"exec", "sdSemBBoxInfosCudaPtr", "_batchedWriteValues"} """Helper class that creates natural hierarchical access to output attributes""" def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface): """Initialize simplified access for the attribute data""" context = node.get_graph().get_default_graph_context() super().__init__(context, node, attributes, dynamic_attributes) self._batchedWriteValues = { } @property def exec(self): value = self._batchedWriteValues.get(self._attributes.exec) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.exec) return data_view.get() @exec.setter def exec(self, value): self._batchedWriteValues[self._attributes.exec] = value @property def sdSemBBoxInfosCudaPtr(self): value = self._batchedWriteValues.get(self._attributes.sdSemBBoxInfosCudaPtr) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.sdSemBBoxInfosCudaPtr) return data_view.get() @sdSemBBoxInfosCudaPtr.setter def sdSemBBoxInfosCudaPtr(self, value): self._batchedWriteValues[self._attributes.sdSemBBoxInfosCudaPtr] = value def __getattr__(self, item: str): if item in self.LOCAL_PROPERTY_NAMES: return object.__getattribute__(self, item) else: return super().__getattr__(item) def __setattr__(self, item: str, new_value): if item in self.LOCAL_PROPERTY_NAMES: object.__setattr__(self, item, new_value) else: super().__setattr__(item, new_value) def _commit(self): _og._commit_output_attributes_data(self._batchedWriteValues) self._batchedWriteValues = { } class ValuesForState(og.DynamicAttributeAccess): """Helper class that creates natural hierarchical access to state attributes""" def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface): """Initialize simplified access for the attribute data""" context = node.get_graph().get_default_graph_context() super().__init__(context, node, attributes, dynamic_attributes) def __init__(self, node): super().__init__(node) dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_INPUT) self.inputs = OgnSdPostSemantic3dBoundingBoxFilterDatabase.ValuesForInputs(node, self.attributes.inputs, dynamic_attributes) dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_OUTPUT) self.outputs = OgnSdPostSemantic3dBoundingBoxFilterDatabase.ValuesForOutputs(node, self.attributes.outputs, dynamic_attributes) dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_STATE) self.state = OgnSdPostSemantic3dBoundingBoxFilterDatabase.ValuesForState(node, self.attributes.state, dynamic_attributes)
10,643
Python
47.825688
309
0.653575
eliabntt/GRADE-RR/isaac_internals/kit/extscore/omni.syntheticdata/omni/syntheticdata/ogn/OgnSdTestStageManipulationScenariiDatabase.py
"""Support for simplified access to data on nodes of type omni.syntheticdata.SdTestStageManipulationScenarii Synthetic Data test node applying randomly some predefined stage manipulation scenarii """ import omni.graph.core as og import omni.graph.core._omni_graph_core as _og import omni.graph.tools.ogn as ogn import traceback import sys class OgnSdTestStageManipulationScenariiDatabase(og.Database): """Helper class providing simplified access to data on nodes of type omni.syntheticdata.SdTestStageManipulationScenarii Class Members: node: Node being evaluated Attribute Value Properties: Inputs: inputs.randomSeed inputs.worldPrimPath State: state.frameNumber """ # This is an internal object that provides per-class storage of a per-node data dictionary PER_NODE_DATA = {} # This is an internal object that describes unchanging attributes in a generic way # The values in this list are in no particular order, as a per-attribute tuple # Name, Type, ExtendedTypeIndex, UiName, Description, Metadata, # Is_Required, DefaultValue, Is_Deprecated, DeprecationMsg # You should not need to access any of this data directly, use the defined database interfaces INTERFACE = og.Database._get_interface([ ('inputs:randomSeed', 'int', 0, None, 'Random seed', {}, True, 0, False, ''), ('inputs:worldPrimPath', 'token', 0, None, 'Path of the world prim : contains every modifiable prim, cannot be modified', {}, True, '', False, ''), ('state:frameNumber', 'uint64', 0, None, 'Current frameNumber (number of invocations)', {ogn.MetadataKeys.DEFAULT: '0'}, True, 0, False, ''), ]) class ValuesForInputs(og.DynamicAttributeAccess): LOCAL_PROPERTY_NAMES = {"randomSeed", "worldPrimPath", "_setting_locked", "_batchedReadAttributes", "_batchedReadValues"} """Helper class that creates natural hierarchical access to input attributes""" def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface): """Initialize simplified access for the attribute data""" context = node.get_graph().get_default_graph_context() super().__init__(context, node, attributes, dynamic_attributes) self._batchedReadAttributes = [self._attributes.randomSeed, self._attributes.worldPrimPath] self._batchedReadValues = [0, ""] @property def randomSeed(self): return self._batchedReadValues[0] @randomSeed.setter def randomSeed(self, value): self._batchedReadValues[0] = value @property def worldPrimPath(self): return self._batchedReadValues[1] @worldPrimPath.setter def worldPrimPath(self, value): self._batchedReadValues[1] = value def __getattr__(self, item: str): if item in self.LOCAL_PROPERTY_NAMES: return object.__getattribute__(self, item) else: return super().__getattr__(item) def __setattr__(self, item: str, new_value): if item in self.LOCAL_PROPERTY_NAMES: object.__setattr__(self, item, new_value) else: super().__setattr__(item, new_value) def _prefetch(self): readAttributes = self._batchedReadAttributes newValues = _og._prefetch_input_attributes_data(readAttributes) if len(readAttributes) == len(newValues): self._batchedReadValues = newValues class ValuesForOutputs(og.DynamicAttributeAccess): LOCAL_PROPERTY_NAMES = { } """Helper class that creates natural hierarchical access to output attributes""" def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface): """Initialize simplified access for the attribute data""" context = node.get_graph().get_default_graph_context() super().__init__(context, node, attributes, dynamic_attributes) self._batchedWriteValues = { } def _commit(self): _og._commit_output_attributes_data(self._batchedWriteValues) self._batchedWriteValues = { } class ValuesForState(og.DynamicAttributeAccess): """Helper class that creates natural hierarchical access to state attributes""" def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface): """Initialize simplified access for the attribute data""" context = node.get_graph().get_default_graph_context() super().__init__(context, node, attributes, dynamic_attributes) @property def frameNumber(self): data_view = og.AttributeValueHelper(self._attributes.frameNumber) return data_view.get() @frameNumber.setter def frameNumber(self, value): data_view = og.AttributeValueHelper(self._attributes.frameNumber) data_view.set(value) def __init__(self, node): super().__init__(node) dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_INPUT) self.inputs = OgnSdTestStageManipulationScenariiDatabase.ValuesForInputs(node, self.attributes.inputs, dynamic_attributes) dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_OUTPUT) self.outputs = OgnSdTestStageManipulationScenariiDatabase.ValuesForOutputs(node, self.attributes.outputs, dynamic_attributes) dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_STATE) self.state = OgnSdTestStageManipulationScenariiDatabase.ValuesForState(node, self.attributes.state, dynamic_attributes) class abi: """Class defining the ABI interface for the node type""" @staticmethod def get_node_type(): get_node_type_function = getattr(OgnSdTestStageManipulationScenariiDatabase.NODE_TYPE_CLASS, 'get_node_type', None) if callable(get_node_type_function): return get_node_type_function() return 'omni.syntheticdata.SdTestStageManipulationScenarii' @staticmethod def compute(context, node): try: per_node_data = OgnSdTestStageManipulationScenariiDatabase.PER_NODE_DATA[node.node_id()] db = per_node_data.get('_db') if db is None: db = OgnSdTestStageManipulationScenariiDatabase(node) per_node_data['_db'] = db except: db = OgnSdTestStageManipulationScenariiDatabase(node) try: compute_function = getattr(OgnSdTestStageManipulationScenariiDatabase.NODE_TYPE_CLASS, 'compute', None) if callable(compute_function) and compute_function.__code__.co_argcount > 1: return compute_function(context, node) db.inputs._prefetch() db.inputs._setting_locked = True with og.in_compute(): return OgnSdTestStageManipulationScenariiDatabase.NODE_TYPE_CLASS.compute(db) except Exception as error: stack_trace = "".join(traceback.format_tb(sys.exc_info()[2].tb_next)) db.log_error(f'Assertion raised in compute - {error}\n{stack_trace}', add_context=False) finally: db.inputs._setting_locked = False db.outputs._commit() return False @staticmethod def initialize(context, node): OgnSdTestStageManipulationScenariiDatabase._initialize_per_node_data(node) initialize_function = getattr(OgnSdTestStageManipulationScenariiDatabase.NODE_TYPE_CLASS, 'initialize', None) if callable(initialize_function): initialize_function(context, node) @staticmethod def release(node): release_function = getattr(OgnSdTestStageManipulationScenariiDatabase.NODE_TYPE_CLASS, 'release', None) if callable(release_function): release_function(node) OgnSdTestStageManipulationScenariiDatabase._release_per_node_data(node) @staticmethod def update_node_version(context, node, old_version, new_version): update_node_version_function = getattr(OgnSdTestStageManipulationScenariiDatabase.NODE_TYPE_CLASS, 'update_node_version', None) if callable(update_node_version_function): return update_node_version_function(context, node, old_version, new_version) return False @staticmethod def initialize_type(node_type): initialize_type_function = getattr(OgnSdTestStageManipulationScenariiDatabase.NODE_TYPE_CLASS, 'initialize_type', None) needs_initializing = True if callable(initialize_type_function): needs_initializing = initialize_type_function(node_type) if needs_initializing: node_type.set_metadata(ogn.MetadataKeys.EXTENSION, "omni.syntheticdata") node_type.set_metadata(ogn.MetadataKeys.CATEGORIES, "graph:simulation,internal:test") node_type.set_metadata(ogn.MetadataKeys.DESCRIPTION, "Synthetic Data test node applying randomly some predefined stage manipulation scenarii") node_type.set_metadata(ogn.MetadataKeys.EXCLUSIONS, "tests") node_type.set_metadata(ogn.MetadataKeys.LANGUAGE, "Python") OgnSdTestStageManipulationScenariiDatabase.INTERFACE.add_to_node_type(node_type) node_type.set_has_state(True) @staticmethod def on_connection_type_resolve(node): on_connection_type_resolve_function = getattr(OgnSdTestStageManipulationScenariiDatabase.NODE_TYPE_CLASS, 'on_connection_type_resolve', None) if callable(on_connection_type_resolve_function): on_connection_type_resolve_function(node) NODE_TYPE_CLASS = None GENERATOR_VERSION = (1, 17, 2) TARGET_VERSION = (2, 65, 4) @staticmethod def register(node_type_class): OgnSdTestStageManipulationScenariiDatabase.NODE_TYPE_CLASS = node_type_class og.register_node_type(OgnSdTestStageManipulationScenariiDatabase.abi, 1) @staticmethod def deregister(): og.deregister_node_type("omni.syntheticdata.SdTestStageManipulationScenarii")
10,571
Python
52.664974
158
0.660865
eliabntt/GRADE-RR/isaac_internals/kit/extscore/omni.syntheticdata/omni/syntheticdata/ogn/OgnSdInstanceMappingDatabase.py
"""Support for simplified access to data on nodes of type omni.syntheticdata.SdInstanceMapping Synthetic Data node to expose the scene instances semantic hierarchy information """ import omni.graph.core as og import omni.graph.core._omni_graph_core as _og import omni.graph.tools.ogn as ogn import numpy class OgnSdInstanceMappingDatabase(og.Database): """Helper class providing simplified access to data on nodes of type omni.syntheticdata.SdInstanceMapping Class Members: node: Node being evaluated Attribute Value Properties: Inputs: inputs.exec inputs.lazy inputs.renderResults inputs.swhFrameNumber Outputs: outputs.exec outputs.sdIMInstanceSemanticMap outputs.sdIMInstanceTokens outputs.sdIMMaxSemanticHierarchyDepth outputs.sdIMMinInstanceIndex outputs.sdIMMinSemanticIndex outputs.sdIMNumInstances outputs.sdIMNumSemanticTokens outputs.sdIMNumSemantics outputs.sdIMSemanticLocalTransform outputs.sdIMSemanticTokenMap outputs.sdIMSemanticWorldTransform outputs.swhFrameNumber Predefined Tokens: tokens.InstanceMappingInfoSDhost tokens.InstanceMapSDhost tokens.SemanticLabelTokenSDhost tokens.InstancePrimTokenSDhost tokens.SemanticLocalTransformSDhost tokens.SemanticWorldTransformSDhost """ # This is an internal object that provides per-class storage of a per-node data dictionary PER_NODE_DATA = {} # This is an internal object that describes unchanging attributes in a generic way # The values in this list are in no particular order, as a per-attribute tuple # Name, Type, ExtendedTypeIndex, UiName, Description, Metadata, # Is_Required, DefaultValue, Is_Deprecated, DeprecationMsg # You should not need to access any of this data directly, use the defined database interfaces INTERFACE = og.Database._get_interface([ ('inputs:exec', 'execution', 0, None, 'Trigger', {}, True, None, False, ''), ('inputs:lazy', 'bool', 0, None, 'Compute outputs only when connected to a downstream node', {ogn.MetadataKeys.DEFAULT: 'true'}, True, True, False, ''), ('inputs:renderResults', 'uint64', 0, None, 'Render results pointer', {}, True, 0, False, ''), ('inputs:swhFrameNumber', 'uint64', 0, None, 'Fabric frame number', {}, True, 0, False, ''), ('outputs:exec', 'execution', 0, 'Received', 'Executes when the event is received', {}, True, None, False, ''), ('outputs:sdIMInstanceSemanticMap', 'uchar[]', 0, None, 'Raw array of uint16_t of size sdIMNumInstances*sdIMMaxSemanticHierarchyDepth containing the mapping from the instances index to their inherited semantic entities', {}, True, None, False, ''), ('outputs:sdIMInstanceTokens', 'token[]', 0, None, 'Instance array containing the token for every instances', {}, True, None, False, ''), ('outputs:sdIMMaxSemanticHierarchyDepth', 'uint', 0, None, 'Maximal number of semantic entities inherited by an instance', {}, True, None, False, ''), ('outputs:sdIMMinInstanceIndex', 'uint', 0, None, 'Instance id of the first instance in the instance arrays', {}, True, None, False, ''), ('outputs:sdIMMinSemanticIndex', 'uint', 0, None, 'Semantic id of the first semantic entity in the semantic arrays', {}, True, None, False, ''), ('outputs:sdIMNumInstances', 'uint', 0, None, 'Number of instances in the instance arrays', {}, True, None, False, ''), ('outputs:sdIMNumSemanticTokens', 'uint', 0, None, 'Number of semantics token including the semantic entity path, the semantic entity types and if the number of semantic types is greater than one a ', {}, True, None, False, ''), ('outputs:sdIMNumSemantics', 'uint', 0, None, 'Number of semantic entities in the semantic arrays', {}, True, None, False, ''), ('outputs:sdIMSemanticLocalTransform', 'float[]', 0, None, 'Semantic array of 4x4 float matrices containing the transform from world to local space for every semantic entity', {}, True, None, False, ''), ('outputs:sdIMSemanticTokenMap', 'token[]', 0, None, 'Semantic array of token of size numSemantics * numSemanticTypes containing the mapping from the semantic entities to the semantic entity path and semantic types', {}, True, None, False, ''), ('outputs:sdIMSemanticWorldTransform', 'float[]', 0, None, 'Semantic array of 4x4 float matrices containing the transform from local to world space for every semantic entity', {}, True, None, False, ''), ('outputs:swhFrameNumber', 'uint64', 0, None, 'Fabric frame number', {}, True, None, False, ''), ]) class tokens: InstanceMappingInfoSDhost = "InstanceMappingInfoSDhost" InstanceMapSDhost = "InstanceMapSDhost" SemanticLabelTokenSDhost = "SemanticLabelTokenSDhost" InstancePrimTokenSDhost = "InstancePrimTokenSDhost" SemanticLocalTransformSDhost = "SemanticLocalTransformSDhost" SemanticWorldTransformSDhost = "SemanticWorldTransformSDhost" @classmethod def _populate_role_data(cls): """Populate a role structure with the non-default roles on this node type""" role_data = super()._populate_role_data() role_data.inputs.exec = og.Database.ROLE_EXECUTION role_data.outputs.exec = og.Database.ROLE_EXECUTION return role_data class ValuesForInputs(og.DynamicAttributeAccess): LOCAL_PROPERTY_NAMES = {"exec", "lazy", "renderResults", "swhFrameNumber", "_setting_locked", "_batchedReadAttributes", "_batchedReadValues"} """Helper class that creates natural hierarchical access to input attributes""" def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface): """Initialize simplified access for the attribute data""" context = node.get_graph().get_default_graph_context() super().__init__(context, node, attributes, dynamic_attributes) self._batchedReadAttributes = [self._attributes.exec, self._attributes.lazy, self._attributes.renderResults, self._attributes.swhFrameNumber] self._batchedReadValues = [None, True, 0, 0] @property def exec(self): return self._batchedReadValues[0] @exec.setter def exec(self, value): self._batchedReadValues[0] = value @property def lazy(self): return self._batchedReadValues[1] @lazy.setter def lazy(self, value): self._batchedReadValues[1] = value @property def renderResults(self): return self._batchedReadValues[2] @renderResults.setter def renderResults(self, value): self._batchedReadValues[2] = value @property def swhFrameNumber(self): return self._batchedReadValues[3] @swhFrameNumber.setter def swhFrameNumber(self, value): self._batchedReadValues[3] = value def __getattr__(self, item: str): if item in self.LOCAL_PROPERTY_NAMES: return object.__getattribute__(self, item) else: return super().__getattr__(item) def __setattr__(self, item: str, new_value): if item in self.LOCAL_PROPERTY_NAMES: object.__setattr__(self, item, new_value) else: super().__setattr__(item, new_value) def _prefetch(self): readAttributes = self._batchedReadAttributes newValues = _og._prefetch_input_attributes_data(readAttributes) if len(readAttributes) == len(newValues): self._batchedReadValues = newValues class ValuesForOutputs(og.DynamicAttributeAccess): LOCAL_PROPERTY_NAMES = {"exec", "sdIMMaxSemanticHierarchyDepth", "sdIMMinInstanceIndex", "sdIMMinSemanticIndex", "sdIMNumInstances", "sdIMNumSemanticTokens", "sdIMNumSemantics", "swhFrameNumber", "_batchedWriteValues"} """Helper class that creates natural hierarchical access to output attributes""" def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface): """Initialize simplified access for the attribute data""" context = node.get_graph().get_default_graph_context() super().__init__(context, node, attributes, dynamic_attributes) self.sdIMInstanceSemanticMap_size = None self.sdIMInstanceTokens_size = None self.sdIMSemanticLocalTransform_size = None self.sdIMSemanticTokenMap_size = None self.sdIMSemanticWorldTransform_size = None self._batchedWriteValues = { } @property def sdIMInstanceSemanticMap(self): data_view = og.AttributeValueHelper(self._attributes.sdIMInstanceSemanticMap) return data_view.get(reserved_element_count=self.sdIMInstanceSemanticMap_size) @sdIMInstanceSemanticMap.setter def sdIMInstanceSemanticMap(self, value): data_view = og.AttributeValueHelper(self._attributes.sdIMInstanceSemanticMap) data_view.set(value) self.sdIMInstanceSemanticMap_size = data_view.get_array_size() @property def sdIMInstanceTokens(self): data_view = og.AttributeValueHelper(self._attributes.sdIMInstanceTokens) return data_view.get(reserved_element_count=self.sdIMInstanceTokens_size) @sdIMInstanceTokens.setter def sdIMInstanceTokens(self, value): data_view = og.AttributeValueHelper(self._attributes.sdIMInstanceTokens) data_view.set(value) self.sdIMInstanceTokens_size = data_view.get_array_size() @property def sdIMSemanticLocalTransform(self): data_view = og.AttributeValueHelper(self._attributes.sdIMSemanticLocalTransform) return data_view.get(reserved_element_count=self.sdIMSemanticLocalTransform_size) @sdIMSemanticLocalTransform.setter def sdIMSemanticLocalTransform(self, value): data_view = og.AttributeValueHelper(self._attributes.sdIMSemanticLocalTransform) data_view.set(value) self.sdIMSemanticLocalTransform_size = data_view.get_array_size() @property def sdIMSemanticTokenMap(self): data_view = og.AttributeValueHelper(self._attributes.sdIMSemanticTokenMap) return data_view.get(reserved_element_count=self.sdIMSemanticTokenMap_size) @sdIMSemanticTokenMap.setter def sdIMSemanticTokenMap(self, value): data_view = og.AttributeValueHelper(self._attributes.sdIMSemanticTokenMap) data_view.set(value) self.sdIMSemanticTokenMap_size = data_view.get_array_size() @property def sdIMSemanticWorldTransform(self): data_view = og.AttributeValueHelper(self._attributes.sdIMSemanticWorldTransform) return data_view.get(reserved_element_count=self.sdIMSemanticWorldTransform_size) @sdIMSemanticWorldTransform.setter def sdIMSemanticWorldTransform(self, value): data_view = og.AttributeValueHelper(self._attributes.sdIMSemanticWorldTransform) data_view.set(value) self.sdIMSemanticWorldTransform_size = data_view.get_array_size() @property def exec(self): value = self._batchedWriteValues.get(self._attributes.exec) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.exec) return data_view.get() @exec.setter def exec(self, value): self._batchedWriteValues[self._attributes.exec] = value @property def sdIMMaxSemanticHierarchyDepth(self): value = self._batchedWriteValues.get(self._attributes.sdIMMaxSemanticHierarchyDepth) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.sdIMMaxSemanticHierarchyDepth) return data_view.get() @sdIMMaxSemanticHierarchyDepth.setter def sdIMMaxSemanticHierarchyDepth(self, value): self._batchedWriteValues[self._attributes.sdIMMaxSemanticHierarchyDepth] = value @property def sdIMMinInstanceIndex(self): value = self._batchedWriteValues.get(self._attributes.sdIMMinInstanceIndex) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.sdIMMinInstanceIndex) return data_view.get() @sdIMMinInstanceIndex.setter def sdIMMinInstanceIndex(self, value): self._batchedWriteValues[self._attributes.sdIMMinInstanceIndex] = value @property def sdIMMinSemanticIndex(self): value = self._batchedWriteValues.get(self._attributes.sdIMMinSemanticIndex) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.sdIMMinSemanticIndex) return data_view.get() @sdIMMinSemanticIndex.setter def sdIMMinSemanticIndex(self, value): self._batchedWriteValues[self._attributes.sdIMMinSemanticIndex] = value @property def sdIMNumInstances(self): value = self._batchedWriteValues.get(self._attributes.sdIMNumInstances) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.sdIMNumInstances) return data_view.get() @sdIMNumInstances.setter def sdIMNumInstances(self, value): self._batchedWriteValues[self._attributes.sdIMNumInstances] = value @property def sdIMNumSemanticTokens(self): value = self._batchedWriteValues.get(self._attributes.sdIMNumSemanticTokens) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.sdIMNumSemanticTokens) return data_view.get() @sdIMNumSemanticTokens.setter def sdIMNumSemanticTokens(self, value): self._batchedWriteValues[self._attributes.sdIMNumSemanticTokens] = value @property def sdIMNumSemantics(self): value = self._batchedWriteValues.get(self._attributes.sdIMNumSemantics) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.sdIMNumSemantics) return data_view.get() @sdIMNumSemantics.setter def sdIMNumSemantics(self, value): self._batchedWriteValues[self._attributes.sdIMNumSemantics] = value @property def swhFrameNumber(self): value = self._batchedWriteValues.get(self._attributes.swhFrameNumber) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.swhFrameNumber) return data_view.get() @swhFrameNumber.setter def swhFrameNumber(self, value): self._batchedWriteValues[self._attributes.swhFrameNumber] = value def __getattr__(self, item: str): if item in self.LOCAL_PROPERTY_NAMES: return object.__getattribute__(self, item) else: return super().__getattr__(item) def __setattr__(self, item: str, new_value): if item in self.LOCAL_PROPERTY_NAMES: object.__setattr__(self, item, new_value) else: super().__setattr__(item, new_value) def _commit(self): _og._commit_output_attributes_data(self._batchedWriteValues) self._batchedWriteValues = { } class ValuesForState(og.DynamicAttributeAccess): """Helper class that creates natural hierarchical access to state attributes""" def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface): """Initialize simplified access for the attribute data""" context = node.get_graph().get_default_graph_context() super().__init__(context, node, attributes, dynamic_attributes) def __init__(self, node): super().__init__(node) dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_INPUT) self.inputs = OgnSdInstanceMappingDatabase.ValuesForInputs(node, self.attributes.inputs, dynamic_attributes) dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_OUTPUT) self.outputs = OgnSdInstanceMappingDatabase.ValuesForOutputs(node, self.attributes.outputs, dynamic_attributes) dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_STATE) self.state = OgnSdInstanceMappingDatabase.ValuesForState(node, self.attributes.state, dynamic_attributes)
17,311
Python
49.034682
256
0.658714
eliabntt/GRADE-RR/isaac_internals/kit/extscore/omni.syntheticdata/omni/syntheticdata/ogn/OgnSdPostRenderVarDisplayTextureDatabase.py
"""Support for simplified access to data on nodes of type omni.syntheticdata.SdPostRenderVarDisplayTexture Synthetic Data node to copy the input aov texture into the corresponding visualization texture """ import omni.graph.core as og import omni.graph.core._omni_graph_core as _og import omni.graph.tools.ogn as ogn import numpy class OgnSdPostRenderVarDisplayTextureDatabase(og.Database): """Helper class providing simplified access to data on nodes of type omni.syntheticdata.SdPostRenderVarDisplayTexture Class Members: node: Node being evaluated Attribute Value Properties: Inputs: inputs.cameraFisheyeParams inputs.cameraModel inputs.cameraNearFar inputs.exec inputs.gpu inputs.instanceMapSDCudaPtr inputs.instanceMappingInfoSDPtr inputs.metersPerSceneUnit inputs.mode inputs.parameters inputs.renderVar inputs.renderVarDisplay inputs.rp inputs.sdDisplayHeight inputs.sdDisplayWidth inputs.sdSemBBox3dCamCornersCudaPtr inputs.sdSemBBox3dCamExtentCudaPtr inputs.sdSemBBoxExtentCudaPtr inputs.sdSemBBoxInfosCudaPtr inputs.semanticLabelTokenSDCudaPtr inputs.semanticMapSDCudaPtr inputs.semanticPrimTokenSDCudaPtr inputs.semanticWorldTransformSDCudaPtr inputs.swhFrameNumber Outputs: outputs.cudaPtr outputs.exec outputs.format outputs.height outputs.renderVarDisplay outputs.width Predefined Tokens: tokens.LdrColorSD tokens.Camera3dPositionSD tokens.DistanceToImagePlaneSD tokens.DistanceToCameraSD tokens.InstanceSegmentationSD tokens.SemanticSegmentationSD tokens.NormalSD tokens.TargetMotionSD tokens.BoundingBox2DTightSD tokens.BoundingBox2DLooseSD tokens.BoundingBox3DSD tokens.OcclusionSD tokens.TruncationSD tokens.CrossCorrespondenceSD tokens.SemanticBoundingBox2DExtentTightSD tokens.SemanticBoundingBox2DInfosTightSD tokens.SemanticBoundingBox2DExtentLooseSD tokens.SemanticBoundingBox2DInfosLooseSD tokens.SemanticBoundingBox3DExtentSD tokens.SemanticBoundingBox3DInfosSD tokens.SemanticBoundingBox3DCamCornersSD tokens.SemanticBoundingBox3DDisplayAxesSD tokens.autoMode tokens.colorMode tokens.scaled3dVectorMode tokens.clippedValueMode tokens.normalized3dVectorMode tokens.segmentationMapMode tokens.instanceMapMode tokens.semanticPathMode tokens.semanticLabelMode tokens.semanticBoundingBox2dMode tokens.rawBoundingBox2dMode tokens.semanticProjBoundingBox3dMode tokens.semanticBoundingBox3dMode tokens.rawBoundingBox3dMode tokens.pinhole tokens.perspective tokens.orthographic tokens.fisheyePolynomial """ # This is an internal object that provides per-class storage of a per-node data dictionary PER_NODE_DATA = {} # This is an internal object that describes unchanging attributes in a generic way # The values in this list are in no particular order, as a per-attribute tuple # Name, Type, ExtendedTypeIndex, UiName, Description, Metadata, # Is_Required, DefaultValue, Is_Deprecated, DeprecationMsg # You should not need to access any of this data directly, use the defined database interfaces INTERFACE = og.Database._get_interface([ ('inputs:cameraFisheyeParams', 'float[]', 0, None, 'Camera fisheye projection parameters', {}, True, [], False, ''), ('inputs:cameraModel', 'int', 0, None, 'Camera model (pinhole or fisheye models)', {}, True, 0, False, ''), ('inputs:cameraNearFar', 'float2', 0, None, 'Camera near/far clipping range', {}, True, [0.0, 0.0], False, ''), ('inputs:exec', 'execution', 0, None, 'Trigger', {}, True, None, False, ''), ('inputs:gpu', 'uint64', 0, 'gpuFoundations', 'Pointer to shared context containing gpu foundations', {}, True, 0, False, ''), ('inputs:instanceMapSDCudaPtr', 'uint64', 0, None, 'cuda uint16_t buffer pointer of size numInstances containing the instance parent semantic index', {}, True, 0, False, ''), ('inputs:instanceMappingInfoSDPtr', 'uint64', 0, None, 'uint buffer pointer containing the following information : [numInstances, minInstanceId, numSemantics, minSemanticId, numProtoSemantic]', {}, True, 0, False, ''), ('inputs:metersPerSceneUnit', 'float', 0, None, 'Scene units to meters scale', {}, True, 0.0, False, ''), ('inputs:mode', 'token', 0, None, 'Display mode', {ogn.MetadataKeys.DEFAULT: '"autoMode"'}, True, 'autoMode', False, ''), ('inputs:parameters', 'float4', 0, None, 'Display parameters', {ogn.MetadataKeys.DEFAULT: '[0.0, 5.0, 0.33, 0.27]'}, True, [0.0, 5.0, 0.33, 0.27], False, ''), ('inputs:renderVar', 'token', 0, None, 'Name of the input RenderVar to display', {}, True, '', False, ''), ('inputs:renderVarDisplay', 'token', 0, None, 'Name of the output display RenderVar', {}, True, '', False, ''), ('inputs:rp', 'uint64', 0, 'renderProduct', 'Pointer to render product for this view', {}, True, 0, False, ''), ('inputs:sdDisplayHeight', 'uint', 0, None, 'Visualization texture Height', {}, True, 0, False, ''), ('inputs:sdDisplayWidth', 'uint', 0, None, 'Visualization texture width', {}, True, 0, False, ''), ('inputs:sdSemBBox3dCamCornersCudaPtr', 'uint64', 0, None, 'Cuda buffer containing the projection of the 3d bounding boxes on the camera plane represented as a float3=(u,v,z,a) for each bounding box corners', {}, True, 0, False, ''), ('inputs:sdSemBBox3dCamExtentCudaPtr', 'uint64', 0, None, 'Cuda buffer containing the 2d extent of the 3d bounding boxes on the camera plane represented as a float6=(u_min,u_max,v_min,v_max,z_min,z_max)', {}, True, 0, False, ''), ('inputs:sdSemBBoxExtentCudaPtr', 'uint64', 0, None, 'Cuda buffer containing the extent of the bounding boxes as a float4=(u_min,v_min,u_max,v_max) for 2D or a float6=(xmin,ymin,zmin,xmax,ymax,zmax) in object space for 3D', {}, True, 0, False, ''), ('inputs:sdSemBBoxInfosCudaPtr', 'uint64', 0, None, 'Cuda buffer containing valid bounding boxes infos', {}, True, 0, False, ''), ('inputs:semanticLabelTokenSDCudaPtr', 'uint64', 0, None, 'cuda uint64_t buffer pointer of size numSemantics containing the semantic label token', {}, True, 0, False, ''), ('inputs:semanticMapSDCudaPtr', 'uint64', 0, None, 'cuda uint16_t buffer pointer of size numSemantics containing the semantic parent semantic index', {}, True, 0, False, ''), ('inputs:semanticPrimTokenSDCudaPtr', 'uint64', 0, None, 'cuda uint64_t buffer pointer of size numSemantics containing the semantic path token', {}, True, 0, False, ''), ('inputs:semanticWorldTransformSDCudaPtr', 'uint64', 0, None, 'cuda float44 buffer pointer of size numSemantics containing the world semantic transform', {}, True, 0, False, ''), ('inputs:swhFrameNumber', 'uint64', 0, None, 'Fabric frame number', {}, True, 0, False, ''), ('outputs:cudaPtr', 'uint64', 0, None, 'Display texture CUDA pointer', {}, True, None, False, ''), ('outputs:exec', 'execution', 0, None, 'Trigger', {}, True, None, False, ''), ('outputs:format', 'uint64', 0, None, 'Display texture format', {}, True, None, False, ''), ('outputs:height', 'uint', 0, None, 'Display texture height', {}, True, None, False, ''), ('outputs:renderVarDisplay', 'token', 0, None, 'Name of the output display RenderVar', {}, True, None, False, ''), ('outputs:width', 'uint', 0, None, 'Display texture width', {}, True, None, False, ''), ]) class tokens: LdrColorSD = "LdrColorSD" Camera3dPositionSD = "Camera3dPositionSD" DistanceToImagePlaneSD = "DistanceToImagePlaneSD" DistanceToCameraSD = "DistanceToCameraSD" InstanceSegmentationSD = "InstanceSegmentationSD" SemanticSegmentationSD = "SemanticSegmentationSD" NormalSD = "NormalSD" TargetMotionSD = "TargetMotionSD" BoundingBox2DTightSD = "BoundingBox2DTightSD" BoundingBox2DLooseSD = "BoundingBox2DLooseSD" BoundingBox3DSD = "BoundingBox3DSD" OcclusionSD = "OcclusionSD" TruncationSD = "TruncationSD" CrossCorrespondenceSD = "CrossCorrespondenceSD" SemanticBoundingBox2DExtentTightSD = "SemanticBoundingBox2DExtentTightSD" SemanticBoundingBox2DInfosTightSD = "SemanticBoundingBox2DInfosTightSD" SemanticBoundingBox2DExtentLooseSD = "SemanticBoundingBox2DExtentLooseSD" SemanticBoundingBox2DInfosLooseSD = "SemanticBoundingBox2DInfosLooseSD" SemanticBoundingBox3DExtentSD = "SemanticBoundingBox3DExtentSD" SemanticBoundingBox3DInfosSD = "SemanticBoundingBox3DInfosSD" SemanticBoundingBox3DCamCornersSD = "SemanticBoundingBox3DCamCornersSD" SemanticBoundingBox3DDisplayAxesSD = "SemanticBoundingBox3DDisplayAxesSD" autoMode = "autoMode" colorMode = "colorMode" scaled3dVectorMode = "scaled3dVectorMode" clippedValueMode = "clippedValueMode" normalized3dVectorMode = "normalized3dVectorMode" segmentationMapMode = "segmentationMapMode" instanceMapMode = "instanceMapMode" semanticPathMode = "semanticPathMode" semanticLabelMode = "semanticLabelMode" semanticBoundingBox2dMode = "semanticBoundingBox2dMode" rawBoundingBox2dMode = "rawBoundingBox2dMode" semanticProjBoundingBox3dMode = "semanticProjBoundingBox3dMode" semanticBoundingBox3dMode = "semanticBoundingBox3dMode" rawBoundingBox3dMode = "rawBoundingBox3dMode" pinhole = "pinhole" perspective = "perspective" orthographic = "orthographic" fisheyePolynomial = "fisheyePolynomial" @classmethod def _populate_role_data(cls): """Populate a role structure with the non-default roles on this node type""" role_data = super()._populate_role_data() role_data.inputs.exec = og.Database.ROLE_EXECUTION role_data.outputs.exec = og.Database.ROLE_EXECUTION return role_data class ValuesForInputs(og.DynamicAttributeAccess): LOCAL_PROPERTY_NAMES = {"cameraModel", "cameraNearFar", "exec", "gpu", "instanceMapSDCudaPtr", "instanceMappingInfoSDPtr", "metersPerSceneUnit", "mode", "parameters", "renderVar", "renderVarDisplay", "rp", "sdDisplayHeight", "sdDisplayWidth", "sdSemBBox3dCamCornersCudaPtr", "sdSemBBox3dCamExtentCudaPtr", "sdSemBBoxExtentCudaPtr", "sdSemBBoxInfosCudaPtr", "semanticLabelTokenSDCudaPtr", "semanticMapSDCudaPtr", "semanticPrimTokenSDCudaPtr", "semanticWorldTransformSDCudaPtr", "swhFrameNumber", "_setting_locked", "_batchedReadAttributes", "_batchedReadValues"} """Helper class that creates natural hierarchical access to input attributes""" def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface): """Initialize simplified access for the attribute data""" context = node.get_graph().get_default_graph_context() super().__init__(context, node, attributes, dynamic_attributes) self._batchedReadAttributes = [self._attributes.cameraModel, self._attributes.cameraNearFar, self._attributes.exec, self._attributes.gpu, self._attributes.instanceMapSDCudaPtr, self._attributes.instanceMappingInfoSDPtr, self._attributes.metersPerSceneUnit, self._attributes.mode, self._attributes.parameters, self._attributes.renderVar, self._attributes.renderVarDisplay, self._attributes.rp, self._attributes.sdDisplayHeight, self._attributes.sdDisplayWidth, self._attributes.sdSemBBox3dCamCornersCudaPtr, self._attributes.sdSemBBox3dCamExtentCudaPtr, self._attributes.sdSemBBoxExtentCudaPtr, self._attributes.sdSemBBoxInfosCudaPtr, self._attributes.semanticLabelTokenSDCudaPtr, self._attributes.semanticMapSDCudaPtr, self._attributes.semanticPrimTokenSDCudaPtr, self._attributes.semanticWorldTransformSDCudaPtr, self._attributes.swhFrameNumber] self._batchedReadValues = [0, [0.0, 0.0], None, 0, 0, 0, 0.0, "autoMode", [0.0, 5.0, 0.33, 0.27], "", "", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] @property def cameraFisheyeParams(self): data_view = og.AttributeValueHelper(self._attributes.cameraFisheyeParams) return data_view.get() @cameraFisheyeParams.setter def cameraFisheyeParams(self, value): if self._setting_locked: raise og.ReadOnlyError(self._attributes.cameraFisheyeParams) data_view = og.AttributeValueHelper(self._attributes.cameraFisheyeParams) data_view.set(value) self.cameraFisheyeParams_size = data_view.get_array_size() @property def cameraModel(self): return self._batchedReadValues[0] @cameraModel.setter def cameraModel(self, value): self._batchedReadValues[0] = value @property def cameraNearFar(self): return self._batchedReadValues[1] @cameraNearFar.setter def cameraNearFar(self, value): self._batchedReadValues[1] = value @property def exec(self): return self._batchedReadValues[2] @exec.setter def exec(self, value): self._batchedReadValues[2] = value @property def gpu(self): return self._batchedReadValues[3] @gpu.setter def gpu(self, value): self._batchedReadValues[3] = value @property def instanceMapSDCudaPtr(self): return self._batchedReadValues[4] @instanceMapSDCudaPtr.setter def instanceMapSDCudaPtr(self, value): self._batchedReadValues[4] = value @property def instanceMappingInfoSDPtr(self): return self._batchedReadValues[5] @instanceMappingInfoSDPtr.setter def instanceMappingInfoSDPtr(self, value): self._batchedReadValues[5] = value @property def metersPerSceneUnit(self): return self._batchedReadValues[6] @metersPerSceneUnit.setter def metersPerSceneUnit(self, value): self._batchedReadValues[6] = value @property def mode(self): return self._batchedReadValues[7] @mode.setter def mode(self, value): self._batchedReadValues[7] = value @property def parameters(self): return self._batchedReadValues[8] @parameters.setter def parameters(self, value): self._batchedReadValues[8] = value @property def renderVar(self): return self._batchedReadValues[9] @renderVar.setter def renderVar(self, value): self._batchedReadValues[9] = value @property def renderVarDisplay(self): return self._batchedReadValues[10] @renderVarDisplay.setter def renderVarDisplay(self, value): self._batchedReadValues[10] = value @property def rp(self): return self._batchedReadValues[11] @rp.setter def rp(self, value): self._batchedReadValues[11] = value @property def sdDisplayHeight(self): return self._batchedReadValues[12] @sdDisplayHeight.setter def sdDisplayHeight(self, value): self._batchedReadValues[12] = value @property def sdDisplayWidth(self): return self._batchedReadValues[13] @sdDisplayWidth.setter def sdDisplayWidth(self, value): self._batchedReadValues[13] = value @property def sdSemBBox3dCamCornersCudaPtr(self): return self._batchedReadValues[14] @sdSemBBox3dCamCornersCudaPtr.setter def sdSemBBox3dCamCornersCudaPtr(self, value): self._batchedReadValues[14] = value @property def sdSemBBox3dCamExtentCudaPtr(self): return self._batchedReadValues[15] @sdSemBBox3dCamExtentCudaPtr.setter def sdSemBBox3dCamExtentCudaPtr(self, value): self._batchedReadValues[15] = value @property def sdSemBBoxExtentCudaPtr(self): return self._batchedReadValues[16] @sdSemBBoxExtentCudaPtr.setter def sdSemBBoxExtentCudaPtr(self, value): self._batchedReadValues[16] = value @property def sdSemBBoxInfosCudaPtr(self): return self._batchedReadValues[17] @sdSemBBoxInfosCudaPtr.setter def sdSemBBoxInfosCudaPtr(self, value): self._batchedReadValues[17] = value @property def semanticLabelTokenSDCudaPtr(self): return self._batchedReadValues[18] @semanticLabelTokenSDCudaPtr.setter def semanticLabelTokenSDCudaPtr(self, value): self._batchedReadValues[18] = value @property def semanticMapSDCudaPtr(self): return self._batchedReadValues[19] @semanticMapSDCudaPtr.setter def semanticMapSDCudaPtr(self, value): self._batchedReadValues[19] = value @property def semanticPrimTokenSDCudaPtr(self): return self._batchedReadValues[20] @semanticPrimTokenSDCudaPtr.setter def semanticPrimTokenSDCudaPtr(self, value): self._batchedReadValues[20] = value @property def semanticWorldTransformSDCudaPtr(self): return self._batchedReadValues[21] @semanticWorldTransformSDCudaPtr.setter def semanticWorldTransformSDCudaPtr(self, value): self._batchedReadValues[21] = value @property def swhFrameNumber(self): return self._batchedReadValues[22] @swhFrameNumber.setter def swhFrameNumber(self, value): self._batchedReadValues[22] = value def __getattr__(self, item: str): if item in self.LOCAL_PROPERTY_NAMES: return object.__getattribute__(self, item) else: return super().__getattr__(item) def __setattr__(self, item: str, new_value): if item in self.LOCAL_PROPERTY_NAMES: object.__setattr__(self, item, new_value) else: super().__setattr__(item, new_value) def _prefetch(self): readAttributes = self._batchedReadAttributes newValues = _og._prefetch_input_attributes_data(readAttributes) if len(readAttributes) == len(newValues): self._batchedReadValues = newValues class ValuesForOutputs(og.DynamicAttributeAccess): LOCAL_PROPERTY_NAMES = {"cudaPtr", "exec", "format", "height", "renderVarDisplay", "width", "_batchedWriteValues"} """Helper class that creates natural hierarchical access to output attributes""" def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface): """Initialize simplified access for the attribute data""" context = node.get_graph().get_default_graph_context() super().__init__(context, node, attributes, dynamic_attributes) self._batchedWriteValues = { } @property def cudaPtr(self): value = self._batchedWriteValues.get(self._attributes.cudaPtr) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.cudaPtr) return data_view.get() @cudaPtr.setter def cudaPtr(self, value): self._batchedWriteValues[self._attributes.cudaPtr] = value @property def exec(self): value = self._batchedWriteValues.get(self._attributes.exec) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.exec) return data_view.get() @exec.setter def exec(self, value): self._batchedWriteValues[self._attributes.exec] = value @property def format(self): value = self._batchedWriteValues.get(self._attributes.format) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.format) return data_view.get() @format.setter def format(self, value): self._batchedWriteValues[self._attributes.format] = value @property def height(self): value = self._batchedWriteValues.get(self._attributes.height) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.height) return data_view.get() @height.setter def height(self, value): self._batchedWriteValues[self._attributes.height] = value @property def renderVarDisplay(self): value = self._batchedWriteValues.get(self._attributes.renderVarDisplay) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.renderVarDisplay) return data_view.get() @renderVarDisplay.setter def renderVarDisplay(self, value): self._batchedWriteValues[self._attributes.renderVarDisplay] = value @property def width(self): value = self._batchedWriteValues.get(self._attributes.width) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.width) return data_view.get() @width.setter def width(self, value): self._batchedWriteValues[self._attributes.width] = value def __getattr__(self, item: str): if item in self.LOCAL_PROPERTY_NAMES: return object.__getattribute__(self, item) else: return super().__getattr__(item) def __setattr__(self, item: str, new_value): if item in self.LOCAL_PROPERTY_NAMES: object.__setattr__(self, item, new_value) else: super().__setattr__(item, new_value) def _commit(self): _og._commit_output_attributes_data(self._batchedWriteValues) self._batchedWriteValues = { } class ValuesForState(og.DynamicAttributeAccess): """Helper class that creates natural hierarchical access to state attributes""" def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface): """Initialize simplified access for the attribute data""" context = node.get_graph().get_default_graph_context() super().__init__(context, node, attributes, dynamic_attributes) def __init__(self, node): super().__init__(node) dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_INPUT) self.inputs = OgnSdPostRenderVarDisplayTextureDatabase.ValuesForInputs(node, self.attributes.inputs, dynamic_attributes) dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_OUTPUT) self.outputs = OgnSdPostRenderVarDisplayTextureDatabase.ValuesForOutputs(node, self.attributes.outputs, dynamic_attributes) dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_STATE) self.state = OgnSdPostRenderVarDisplayTextureDatabase.ValuesForState(node, self.attributes.state, dynamic_attributes)
24,183
Python
45.597302
858
0.652938
eliabntt/GRADE-RR/isaac_internals/kit/extscore/omni.syntheticdata/omni/syntheticdata/ogn/OgnSdTestStageSynchronizationDatabase.py
"""Support for simplified access to data on nodes of type omni.syntheticdata.SdTestStageSynchronization Synthetic Data node to test the pipeline stage synchronization """ import omni.graph.core as og import omni.graph.core._omni_graph_core as _og import omni.graph.tools.ogn as ogn class OgnSdTestStageSynchronizationDatabase(og.Database): """Helper class providing simplified access to data on nodes of type omni.syntheticdata.SdTestStageSynchronization Class Members: node: Node being evaluated Attribute Value Properties: Inputs: inputs.exec inputs.gpu inputs.randomMaxProcessingTimeUs inputs.randomSeed inputs.renderResults inputs.rp inputs.swhFrameNumber inputs.tag inputs.traceError Outputs: outputs.exec outputs.fabricSWHFrameNumber outputs.swhFrameNumber """ # This is an internal object that provides per-class storage of a per-node data dictionary PER_NODE_DATA = {} # This is an internal object that describes unchanging attributes in a generic way # The values in this list are in no particular order, as a per-attribute tuple # Name, Type, ExtendedTypeIndex, UiName, Description, Metadata, # Is_Required, DefaultValue, Is_Deprecated, DeprecationMsg # You should not need to access any of this data directly, use the defined database interfaces INTERFACE = og.Database._get_interface([ ('inputs:exec', 'execution', 0, None, 'OnDemand connection : trigger', {}, True, None, False, ''), ('inputs:gpu', 'uint64', 0, 'gpuFoundations', 'PostRender connection : pointer to shared context containing gpu foundations', {}, True, 0, False, ''), ('inputs:randomMaxProcessingTimeUs', 'uint', 0, None, 'Maximum number of micro-seconds to randomly (uniformely) wait for in order to simulate varying workload', {ogn.MetadataKeys.DEFAULT: '0'}, True, 0, False, ''), ('inputs:randomSeed', 'uint', 0, None, 'Random seed for the randomization', {ogn.MetadataKeys.DEFAULT: '0'}, True, 0, False, ''), ('inputs:renderResults', 'uint64', 0, None, 'OnDemand connection : pointer to render product results', {}, True, 0, False, ''), ('inputs:rp', 'uint64', 0, 'renderProduct', 'PostRender connection : pointer to render product for this view', {}, True, 0, False, ''), ('inputs:swhFrameNumber', 'uint64', 0, None, 'Fabric frame number', {}, True, 0, False, ''), ('inputs:tag', 'token', 0, None, 'A tag to identify the node', {}, True, '', False, ''), ('inputs:traceError', 'bool', 0, None, 'If true print an error message when the frame numbers are out-of-sync', {ogn.MetadataKeys.DEFAULT: 'false'}, True, False, False, ''), ('outputs:exec', 'execution', 0, None, 'OnDemand connection : trigger', {}, True, None, False, ''), ('outputs:fabricSWHFrameNumber', 'uint64', 0, None, 'Fabric frame number from the fabric', {}, True, None, False, ''), ('outputs:swhFrameNumber', 'uint64', 0, None, 'Fabric frame number', {}, True, None, False, ''), ]) @classmethod def _populate_role_data(cls): """Populate a role structure with the non-default roles on this node type""" role_data = super()._populate_role_data() role_data.inputs.exec = og.Database.ROLE_EXECUTION role_data.outputs.exec = og.Database.ROLE_EXECUTION return role_data class ValuesForInputs(og.DynamicAttributeAccess): LOCAL_PROPERTY_NAMES = {"exec", "gpu", "randomMaxProcessingTimeUs", "randomSeed", "renderResults", "rp", "swhFrameNumber", "tag", "traceError", "_setting_locked", "_batchedReadAttributes", "_batchedReadValues"} """Helper class that creates natural hierarchical access to input attributes""" def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface): """Initialize simplified access for the attribute data""" context = node.get_graph().get_default_graph_context() super().__init__(context, node, attributes, dynamic_attributes) self._batchedReadAttributes = [self._attributes.exec, self._attributes.gpu, self._attributes.randomMaxProcessingTimeUs, self._attributes.randomSeed, self._attributes.renderResults, self._attributes.rp, self._attributes.swhFrameNumber, self._attributes.tag, self._attributes.traceError] self._batchedReadValues = [None, 0, 0, 0, 0, 0, 0, "", False] @property def exec(self): return self._batchedReadValues[0] @exec.setter def exec(self, value): self._batchedReadValues[0] = value @property def gpu(self): return self._batchedReadValues[1] @gpu.setter def gpu(self, value): self._batchedReadValues[1] = value @property def randomMaxProcessingTimeUs(self): return self._batchedReadValues[2] @randomMaxProcessingTimeUs.setter def randomMaxProcessingTimeUs(self, value): self._batchedReadValues[2] = value @property def randomSeed(self): return self._batchedReadValues[3] @randomSeed.setter def randomSeed(self, value): self._batchedReadValues[3] = value @property def renderResults(self): return self._batchedReadValues[4] @renderResults.setter def renderResults(self, value): self._batchedReadValues[4] = value @property def rp(self): return self._batchedReadValues[5] @rp.setter def rp(self, value): self._batchedReadValues[5] = value @property def swhFrameNumber(self): return self._batchedReadValues[6] @swhFrameNumber.setter def swhFrameNumber(self, value): self._batchedReadValues[6] = value @property def tag(self): return self._batchedReadValues[7] @tag.setter def tag(self, value): self._batchedReadValues[7] = value @property def traceError(self): return self._batchedReadValues[8] @traceError.setter def traceError(self, value): self._batchedReadValues[8] = value def __getattr__(self, item: str): if item in self.LOCAL_PROPERTY_NAMES: return object.__getattribute__(self, item) else: return super().__getattr__(item) def __setattr__(self, item: str, new_value): if item in self.LOCAL_PROPERTY_NAMES: object.__setattr__(self, item, new_value) else: super().__setattr__(item, new_value) def _prefetch(self): readAttributes = self._batchedReadAttributes newValues = _og._prefetch_input_attributes_data(readAttributes) if len(readAttributes) == len(newValues): self._batchedReadValues = newValues class ValuesForOutputs(og.DynamicAttributeAccess): LOCAL_PROPERTY_NAMES = {"exec", "fabricSWHFrameNumber", "swhFrameNumber", "_batchedWriteValues"} """Helper class that creates natural hierarchical access to output attributes""" def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface): """Initialize simplified access for the attribute data""" context = node.get_graph().get_default_graph_context() super().__init__(context, node, attributes, dynamic_attributes) self._batchedWriteValues = { } @property def exec(self): value = self._batchedWriteValues.get(self._attributes.exec) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.exec) return data_view.get() @exec.setter def exec(self, value): self._batchedWriteValues[self._attributes.exec] = value @property def fabricSWHFrameNumber(self): value = self._batchedWriteValues.get(self._attributes.fabricSWHFrameNumber) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.fabricSWHFrameNumber) return data_view.get() @fabricSWHFrameNumber.setter def fabricSWHFrameNumber(self, value): self._batchedWriteValues[self._attributes.fabricSWHFrameNumber] = value @property def swhFrameNumber(self): value = self._batchedWriteValues.get(self._attributes.swhFrameNumber) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.swhFrameNumber) return data_view.get() @swhFrameNumber.setter def swhFrameNumber(self, value): self._batchedWriteValues[self._attributes.swhFrameNumber] = value def __getattr__(self, item: str): if item in self.LOCAL_PROPERTY_NAMES: return object.__getattribute__(self, item) else: return super().__getattr__(item) def __setattr__(self, item: str, new_value): if item in self.LOCAL_PROPERTY_NAMES: object.__setattr__(self, item, new_value) else: super().__setattr__(item, new_value) def _commit(self): _og._commit_output_attributes_data(self._batchedWriteValues) self._batchedWriteValues = { } class ValuesForState(og.DynamicAttributeAccess): """Helper class that creates natural hierarchical access to state attributes""" def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface): """Initialize simplified access for the attribute data""" context = node.get_graph().get_default_graph_context() super().__init__(context, node, attributes, dynamic_attributes) def __init__(self, node): super().__init__(node) dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_INPUT) self.inputs = OgnSdTestStageSynchronizationDatabase.ValuesForInputs(node, self.attributes.inputs, dynamic_attributes) dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_OUTPUT) self.outputs = OgnSdTestStageSynchronizationDatabase.ValuesForOutputs(node, self.attributes.outputs, dynamic_attributes) dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_STATE) self.state = OgnSdTestStageSynchronizationDatabase.ValuesForState(node, self.attributes.state, dynamic_attributes)
10,958
Python
45.634042
297
0.634331
eliabntt/GRADE-RR/isaac_internals/kit/extscore/omni.syntheticdata/omni/syntheticdata/ogn/OgnSdRenderVarDisplayTextureDatabase.py
"""Support for simplified access to data on nodes of type omni.syntheticdata.SdRenderVarDisplayTexture Synthetic Data node to expose texture handle of a visualization AOV """ import omni.graph.core as og import omni.graph.core._omni_graph_core as _og import omni.graph.tools.ogn as ogn class OgnSdRenderVarDisplayTextureDatabase(og.Database): """Helper class providing simplified access to data on nodes of type omni.syntheticdata.SdRenderVarDisplayTexture Class Members: node: Node being evaluated Attribute Value Properties: Inputs: inputs.exec inputs.renderResults inputs.renderVarDisplay inputs.swhFrameNumber Outputs: outputs.cudaPtr outputs.exec outputs.format outputs.handlePtr outputs.height outputs.swhFrameNumber outputs.width """ # This is an internal object that provides per-class storage of a per-node data dictionary PER_NODE_DATA = {} # This is an internal object that describes unchanging attributes in a generic way # The values in this list are in no particular order, as a per-attribute tuple # Name, Type, ExtendedTypeIndex, UiName, Description, Metadata, # Is_Required, DefaultValue, Is_Deprecated, DeprecationMsg # You should not need to access any of this data directly, use the defined database interfaces INTERFACE = og.Database._get_interface([ ('inputs:exec', 'execution', 0, None, 'Trigger', {}, True, None, False, ''), ('inputs:renderResults', 'uint64', 0, None, 'Render results pointer', {}, True, 0, False, ''), ('inputs:renderVarDisplay', 'token', 0, None, 'Name of the renderVar', {}, True, '', False, ''), ('inputs:swhFrameNumber', 'uint64', 0, None, 'Fabric frame number', {}, True, 0, False, ''), ('outputs:cudaPtr', 'uint64', 0, None, 'Display texture CUDA pointer', {}, True, None, False, ''), ('outputs:exec', 'execution', 0, None, 'Trigger', {}, True, None, False, ''), ('outputs:format', 'uint64', 0, None, 'Display texture format', {}, True, None, False, ''), ('outputs:handlePtr', 'uint64', 0, None, 'Display texture handle reference', {}, True, None, False, ''), ('outputs:height', 'uint', 0, None, 'Display texture height', {}, True, None, False, ''), ('outputs:swhFrameNumber', 'uint64', 0, None, 'Fabric frame number', {}, True, None, False, ''), ('outputs:width', 'uint', 0, None, 'Display texture width', {}, True, None, False, ''), ]) @classmethod def _populate_role_data(cls): """Populate a role structure with the non-default roles on this node type""" role_data = super()._populate_role_data() role_data.inputs.exec = og.Database.ROLE_EXECUTION role_data.outputs.exec = og.Database.ROLE_EXECUTION return role_data class ValuesForInputs(og.DynamicAttributeAccess): LOCAL_PROPERTY_NAMES = {"exec", "renderResults", "renderVarDisplay", "swhFrameNumber", "_setting_locked", "_batchedReadAttributes", "_batchedReadValues"} """Helper class that creates natural hierarchical access to input attributes""" def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface): """Initialize simplified access for the attribute data""" context = node.get_graph().get_default_graph_context() super().__init__(context, node, attributes, dynamic_attributes) self._batchedReadAttributes = [self._attributes.exec, self._attributes.renderResults, self._attributes.renderVarDisplay, self._attributes.swhFrameNumber] self._batchedReadValues = [None, 0, "", 0] @property def exec(self): return self._batchedReadValues[0] @exec.setter def exec(self, value): self._batchedReadValues[0] = value @property def renderResults(self): return self._batchedReadValues[1] @renderResults.setter def renderResults(self, value): self._batchedReadValues[1] = value @property def renderVarDisplay(self): return self._batchedReadValues[2] @renderVarDisplay.setter def renderVarDisplay(self, value): self._batchedReadValues[2] = value @property def swhFrameNumber(self): return self._batchedReadValues[3] @swhFrameNumber.setter def swhFrameNumber(self, value): self._batchedReadValues[3] = value def __getattr__(self, item: str): if item in self.LOCAL_PROPERTY_NAMES: return object.__getattribute__(self, item) else: return super().__getattr__(item) def __setattr__(self, item: str, new_value): if item in self.LOCAL_PROPERTY_NAMES: object.__setattr__(self, item, new_value) else: super().__setattr__(item, new_value) def _prefetch(self): readAttributes = self._batchedReadAttributes newValues = _og._prefetch_input_attributes_data(readAttributes) if len(readAttributes) == len(newValues): self._batchedReadValues = newValues class ValuesForOutputs(og.DynamicAttributeAccess): LOCAL_PROPERTY_NAMES = {"cudaPtr", "exec", "format", "handlePtr", "height", "swhFrameNumber", "width", "_batchedWriteValues"} """Helper class that creates natural hierarchical access to output attributes""" def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface): """Initialize simplified access for the attribute data""" context = node.get_graph().get_default_graph_context() super().__init__(context, node, attributes, dynamic_attributes) self._batchedWriteValues = { } @property def cudaPtr(self): value = self._batchedWriteValues.get(self._attributes.cudaPtr) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.cudaPtr) return data_view.get() @cudaPtr.setter def cudaPtr(self, value): self._batchedWriteValues[self._attributes.cudaPtr] = value @property def exec(self): value = self._batchedWriteValues.get(self._attributes.exec) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.exec) return data_view.get() @exec.setter def exec(self, value): self._batchedWriteValues[self._attributes.exec] = value @property def format(self): value = self._batchedWriteValues.get(self._attributes.format) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.format) return data_view.get() @format.setter def format(self, value): self._batchedWriteValues[self._attributes.format] = value @property def handlePtr(self): value = self._batchedWriteValues.get(self._attributes.handlePtr) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.handlePtr) return data_view.get() @handlePtr.setter def handlePtr(self, value): self._batchedWriteValues[self._attributes.handlePtr] = value @property def height(self): value = self._batchedWriteValues.get(self._attributes.height) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.height) return data_view.get() @height.setter def height(self, value): self._batchedWriteValues[self._attributes.height] = value @property def swhFrameNumber(self): value = self._batchedWriteValues.get(self._attributes.swhFrameNumber) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.swhFrameNumber) return data_view.get() @swhFrameNumber.setter def swhFrameNumber(self, value): self._batchedWriteValues[self._attributes.swhFrameNumber] = value @property def width(self): value = self._batchedWriteValues.get(self._attributes.width) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.width) return data_view.get() @width.setter def width(self, value): self._batchedWriteValues[self._attributes.width] = value def __getattr__(self, item: str): if item in self.LOCAL_PROPERTY_NAMES: return object.__getattribute__(self, item) else: return super().__getattr__(item) def __setattr__(self, item: str, new_value): if item in self.LOCAL_PROPERTY_NAMES: object.__setattr__(self, item, new_value) else: super().__setattr__(item, new_value) def _commit(self): _og._commit_output_attributes_data(self._batchedWriteValues) self._batchedWriteValues = { } class ValuesForState(og.DynamicAttributeAccess): """Helper class that creates natural hierarchical access to state attributes""" def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface): """Initialize simplified access for the attribute data""" context = node.get_graph().get_default_graph_context() super().__init__(context, node, attributes, dynamic_attributes) def __init__(self, node): super().__init__(node) dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_INPUT) self.inputs = OgnSdRenderVarDisplayTextureDatabase.ValuesForInputs(node, self.attributes.inputs, dynamic_attributes) dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_OUTPUT) self.outputs = OgnSdRenderVarDisplayTextureDatabase.ValuesForOutputs(node, self.attributes.outputs, dynamic_attributes) dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_STATE) self.state = OgnSdRenderVarDisplayTextureDatabase.ValuesForState(node, self.attributes.state, dynamic_attributes)
10,872
Python
43.379592
165
0.617274
eliabntt/GRADE-RR/isaac_internals/kit/extscore/omni.syntheticdata/omni/syntheticdata/ogn/OgnSdPostRenderVarTextureToBufferDatabase.py
"""Support for simplified access to data on nodes of type omni.syntheticdata.SdPostRenderVarTextureToBuffer Expose a device renderVar buffer a texture one. """ import omni.graph.core as og import omni.graph.core._omni_graph_core as _og import omni.graph.tools.ogn as ogn class OgnSdPostRenderVarTextureToBufferDatabase(og.Database): """Helper class providing simplified access to data on nodes of type omni.syntheticdata.SdPostRenderVarTextureToBuffer Class Members: node: Node being evaluated Attribute Value Properties: Inputs: inputs.gpu inputs.renderVar inputs.renderVarBufferSuffix inputs.rp Outputs: outputs.renderVar """ # This is an internal object that provides per-class storage of a per-node data dictionary PER_NODE_DATA = {} # This is an internal object that describes unchanging attributes in a generic way # The values in this list are in no particular order, as a per-attribute tuple # Name, Type, ExtendedTypeIndex, UiName, Description, Metadata, # Is_Required, DefaultValue, Is_Deprecated, DeprecationMsg # You should not need to access any of this data directly, use the defined database interfaces INTERFACE = og.Database._get_interface([ ('inputs:gpu', 'uint64', 0, None, 'Pointer to shared context containing gpu foundations', {}, True, 0, False, ''), ('inputs:renderVar', 'token', 0, None, 'Name of the device renderVar to expose on the host', {}, True, '', False, ''), ('inputs:renderVarBufferSuffix', 'string', 0, None, 'Suffix appended to the renderVar name', {ogn.MetadataKeys.DEFAULT: '"buffer"'}, True, 'buffer', False, ''), ('inputs:rp', 'uint64', 0, None, 'Pointer to render product for this view', {}, True, 0, False, ''), ('outputs:renderVar', 'token', 0, None, 'Name of the resulting renderVar on the host', {}, True, None, False, ''), ]) class ValuesForInputs(og.DynamicAttributeAccess): LOCAL_PROPERTY_NAMES = {"gpu", "renderVar", "renderVarBufferSuffix", "rp", "_setting_locked", "_batchedReadAttributes", "_batchedReadValues"} """Helper class that creates natural hierarchical access to input attributes""" def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface): """Initialize simplified access for the attribute data""" context = node.get_graph().get_default_graph_context() super().__init__(context, node, attributes, dynamic_attributes) self._batchedReadAttributes = [self._attributes.gpu, self._attributes.renderVar, self._attributes.renderVarBufferSuffix, self._attributes.rp] self._batchedReadValues = [0, "", "buffer", 0] @property def gpu(self): return self._batchedReadValues[0] @gpu.setter def gpu(self, value): self._batchedReadValues[0] = value @property def renderVar(self): return self._batchedReadValues[1] @renderVar.setter def renderVar(self, value): self._batchedReadValues[1] = value @property def renderVarBufferSuffix(self): return self._batchedReadValues[2] @renderVarBufferSuffix.setter def renderVarBufferSuffix(self, value): self._batchedReadValues[2] = value @property def rp(self): return self._batchedReadValues[3] @rp.setter def rp(self, value): self._batchedReadValues[3] = value def __getattr__(self, item: str): if item in self.LOCAL_PROPERTY_NAMES: return object.__getattribute__(self, item) else: return super().__getattr__(item) def __setattr__(self, item: str, new_value): if item in self.LOCAL_PROPERTY_NAMES: object.__setattr__(self, item, new_value) else: super().__setattr__(item, new_value) def _prefetch(self): readAttributes = self._batchedReadAttributes newValues = _og._prefetch_input_attributes_data(readAttributes) if len(readAttributes) == len(newValues): self._batchedReadValues = newValues class ValuesForOutputs(og.DynamicAttributeAccess): LOCAL_PROPERTY_NAMES = {"renderVar", "_batchedWriteValues"} """Helper class that creates natural hierarchical access to output attributes""" def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface): """Initialize simplified access for the attribute data""" context = node.get_graph().get_default_graph_context() super().__init__(context, node, attributes, dynamic_attributes) self._batchedWriteValues = { } @property def renderVar(self): value = self._batchedWriteValues.get(self._attributes.renderVar) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.renderVar) return data_view.get() @renderVar.setter def renderVar(self, value): self._batchedWriteValues[self._attributes.renderVar] = value def __getattr__(self, item: str): if item in self.LOCAL_PROPERTY_NAMES: return object.__getattribute__(self, item) else: return super().__getattr__(item) def __setattr__(self, item: str, new_value): if item in self.LOCAL_PROPERTY_NAMES: object.__setattr__(self, item, new_value) else: super().__setattr__(item, new_value) def _commit(self): _og._commit_output_attributes_data(self._batchedWriteValues) self._batchedWriteValues = { } class ValuesForState(og.DynamicAttributeAccess): """Helper class that creates natural hierarchical access to state attributes""" def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface): """Initialize simplified access for the attribute data""" context = node.get_graph().get_default_graph_context() super().__init__(context, node, attributes, dynamic_attributes) def __init__(self, node): super().__init__(node) dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_INPUT) self.inputs = OgnSdPostRenderVarTextureToBufferDatabase.ValuesForInputs(node, self.attributes.inputs, dynamic_attributes) dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_OUTPUT) self.outputs = OgnSdPostRenderVarTextureToBufferDatabase.ValuesForOutputs(node, self.attributes.outputs, dynamic_attributes) dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_STATE) self.state = OgnSdPostRenderVarTextureToBufferDatabase.ValuesForState(node, self.attributes.state, dynamic_attributes)
7,180
Python
47.52027
168
0.644568
eliabntt/GRADE-RR/isaac_internals/kit/extscore/omni.syntheticdata/omni/syntheticdata/ogn/OgnSdRenderProductCameraDatabase.py
"""Support for simplified access to data on nodes of type omni.syntheticdata.SdRenderProductCamera Synthetic Data node to expose the camera data """ import omni.graph.core as og import omni.graph.core._omni_graph_core as _og import omni.graph.tools.ogn as ogn import numpy class OgnSdRenderProductCameraDatabase(og.Database): """Helper class providing simplified access to data on nodes of type omni.syntheticdata.SdRenderProductCamera Class Members: node: Node being evaluated Attribute Value Properties: Inputs: inputs.exec inputs.renderProductPath inputs.renderResults inputs.swhFrameNumber Outputs: outputs.cameraApertureOffset outputs.cameraApertureSize outputs.cameraFStop outputs.cameraFisheyeParams outputs.cameraFocalLength outputs.cameraFocusDistance outputs.cameraModel outputs.cameraNearFar outputs.cameraProjection outputs.cameraViewTransform outputs.exec outputs.metersPerSceneUnit outputs.renderProductResolution outputs.swhFrameNumber """ # This is an internal object that provides per-class storage of a per-node data dictionary PER_NODE_DATA = {} # This is an internal object that describes unchanging attributes in a generic way # The values in this list are in no particular order, as a per-attribute tuple # Name, Type, ExtendedTypeIndex, UiName, Description, Metadata, # Is_Required, DefaultValue, Is_Deprecated, DeprecationMsg # You should not need to access any of this data directly, use the defined database interfaces INTERFACE = og.Database._get_interface([ ('inputs:exec', 'execution', 0, None, 'Trigger', {}, True, None, False, ''), ('inputs:renderProductPath', 'token', 0, None, 'RenderProduct prim path', {}, True, '', False, ''), ('inputs:renderResults', 'uint64', 0, None, 'Render results', {}, True, 0, False, ''), ('inputs:swhFrameNumber', 'uint64', 0, None, 'Fabric frame number', {}, True, 0, False, ''), ('outputs:cameraApertureOffset', 'float2', 0, None, 'Camera horizontal and vertical aperture offset', {}, True, None, False, ''), ('outputs:cameraApertureSize', 'float2', 0, None, 'Camera horizontal and vertical aperture', {}, True, None, False, ''), ('outputs:cameraFStop', 'float', 0, None, 'Camera fStop', {}, True, None, False, ''), ('outputs:cameraFisheyeParams', 'float[]', 0, None, 'Camera fisheye projection parameters', {}, True, None, False, ''), ('outputs:cameraFocalLength', 'float', 0, None, 'Camera focal length', {}, True, None, False, ''), ('outputs:cameraFocusDistance', 'float', 0, None, 'Camera focus distance', {}, True, None, False, ''), ('outputs:cameraModel', 'int', 0, None, 'Camera model (pinhole or fisheye models)', {}, True, None, False, ''), ('outputs:cameraNearFar', 'float2', 0, None, 'Camera near/far clipping range', {}, True, None, False, ''), ('outputs:cameraProjection', 'matrix4d', 0, None, 'Camera projection matrix', {}, True, None, False, ''), ('outputs:cameraViewTransform', 'matrix4d', 0, None, 'Camera view matrix', {}, True, None, False, ''), ('outputs:exec', 'execution', 0, 'Received', 'Executes for each newFrame event received', {}, True, None, False, ''), ('outputs:metersPerSceneUnit', 'float', 0, None, 'Scene units to meters scale', {}, True, None, False, ''), ('outputs:renderProductResolution', 'int2', 0, None, 'RenderProduct resolution', {}, True, None, False, ''), ('outputs:swhFrameNumber', 'uint64', 0, None, 'Fabric frame number', {}, True, None, False, ''), ]) @classmethod def _populate_role_data(cls): """Populate a role structure with the non-default roles on this node type""" role_data = super()._populate_role_data() role_data.inputs.exec = og.Database.ROLE_EXECUTION role_data.outputs.cameraProjection = og.Database.ROLE_MATRIX role_data.outputs.cameraViewTransform = og.Database.ROLE_MATRIX role_data.outputs.exec = og.Database.ROLE_EXECUTION return role_data class ValuesForInputs(og.DynamicAttributeAccess): LOCAL_PROPERTY_NAMES = {"exec", "renderProductPath", "renderResults", "swhFrameNumber", "_setting_locked", "_batchedReadAttributes", "_batchedReadValues"} """Helper class that creates natural hierarchical access to input attributes""" def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface): """Initialize simplified access for the attribute data""" context = node.get_graph().get_default_graph_context() super().__init__(context, node, attributes, dynamic_attributes) self._batchedReadAttributes = [self._attributes.exec, self._attributes.renderProductPath, self._attributes.renderResults, self._attributes.swhFrameNumber] self._batchedReadValues = [None, "", 0, 0] @property def exec(self): return self._batchedReadValues[0] @exec.setter def exec(self, value): self._batchedReadValues[0] = value @property def renderProductPath(self): return self._batchedReadValues[1] @renderProductPath.setter def renderProductPath(self, value): self._batchedReadValues[1] = value @property def renderResults(self): return self._batchedReadValues[2] @renderResults.setter def renderResults(self, value): self._batchedReadValues[2] = value @property def swhFrameNumber(self): return self._batchedReadValues[3] @swhFrameNumber.setter def swhFrameNumber(self, value): self._batchedReadValues[3] = value def __getattr__(self, item: str): if item in self.LOCAL_PROPERTY_NAMES: return object.__getattribute__(self, item) else: return super().__getattr__(item) def __setattr__(self, item: str, new_value): if item in self.LOCAL_PROPERTY_NAMES: object.__setattr__(self, item, new_value) else: super().__setattr__(item, new_value) def _prefetch(self): readAttributes = self._batchedReadAttributes newValues = _og._prefetch_input_attributes_data(readAttributes) if len(readAttributes) == len(newValues): self._batchedReadValues = newValues class ValuesForOutputs(og.DynamicAttributeAccess): LOCAL_PROPERTY_NAMES = {"cameraApertureOffset", "cameraApertureSize", "cameraFStop", "cameraFocalLength", "cameraFocusDistance", "cameraModel", "cameraNearFar", "cameraProjection", "cameraViewTransform", "exec", "metersPerSceneUnit", "renderProductResolution", "swhFrameNumber", "_batchedWriteValues"} """Helper class that creates natural hierarchical access to output attributes""" def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface): """Initialize simplified access for the attribute data""" context = node.get_graph().get_default_graph_context() super().__init__(context, node, attributes, dynamic_attributes) self.cameraFisheyeParams_size = None self._batchedWriteValues = { } @property def cameraFisheyeParams(self): data_view = og.AttributeValueHelper(self._attributes.cameraFisheyeParams) return data_view.get(reserved_element_count=self.cameraFisheyeParams_size) @cameraFisheyeParams.setter def cameraFisheyeParams(self, value): data_view = og.AttributeValueHelper(self._attributes.cameraFisheyeParams) data_view.set(value) self.cameraFisheyeParams_size = data_view.get_array_size() @property def cameraApertureOffset(self): value = self._batchedWriteValues.get(self._attributes.cameraApertureOffset) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.cameraApertureOffset) return data_view.get() @cameraApertureOffset.setter def cameraApertureOffset(self, value): self._batchedWriteValues[self._attributes.cameraApertureOffset] = value @property def cameraApertureSize(self): value = self._batchedWriteValues.get(self._attributes.cameraApertureSize) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.cameraApertureSize) return data_view.get() @cameraApertureSize.setter def cameraApertureSize(self, value): self._batchedWriteValues[self._attributes.cameraApertureSize] = value @property def cameraFStop(self): value = self._batchedWriteValues.get(self._attributes.cameraFStop) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.cameraFStop) return data_view.get() @cameraFStop.setter def cameraFStop(self, value): self._batchedWriteValues[self._attributes.cameraFStop] = value @property def cameraFocalLength(self): value = self._batchedWriteValues.get(self._attributes.cameraFocalLength) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.cameraFocalLength) return data_view.get() @cameraFocalLength.setter def cameraFocalLength(self, value): self._batchedWriteValues[self._attributes.cameraFocalLength] = value @property def cameraFocusDistance(self): value = self._batchedWriteValues.get(self._attributes.cameraFocusDistance) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.cameraFocusDistance) return data_view.get() @cameraFocusDistance.setter def cameraFocusDistance(self, value): self._batchedWriteValues[self._attributes.cameraFocusDistance] = value @property def cameraModel(self): value = self._batchedWriteValues.get(self._attributes.cameraModel) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.cameraModel) return data_view.get() @cameraModel.setter def cameraModel(self, value): self._batchedWriteValues[self._attributes.cameraModel] = value @property def cameraNearFar(self): value = self._batchedWriteValues.get(self._attributes.cameraNearFar) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.cameraNearFar) return data_view.get() @cameraNearFar.setter def cameraNearFar(self, value): self._batchedWriteValues[self._attributes.cameraNearFar] = value @property def cameraProjection(self): value = self._batchedWriteValues.get(self._attributes.cameraProjection) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.cameraProjection) return data_view.get() @cameraProjection.setter def cameraProjection(self, value): self._batchedWriteValues[self._attributes.cameraProjection] = value @property def cameraViewTransform(self): value = self._batchedWriteValues.get(self._attributes.cameraViewTransform) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.cameraViewTransform) return data_view.get() @cameraViewTransform.setter def cameraViewTransform(self, value): self._batchedWriteValues[self._attributes.cameraViewTransform] = value @property def exec(self): value = self._batchedWriteValues.get(self._attributes.exec) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.exec) return data_view.get() @exec.setter def exec(self, value): self._batchedWriteValues[self._attributes.exec] = value @property def metersPerSceneUnit(self): value = self._batchedWriteValues.get(self._attributes.metersPerSceneUnit) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.metersPerSceneUnit) return data_view.get() @metersPerSceneUnit.setter def metersPerSceneUnit(self, value): self._batchedWriteValues[self._attributes.metersPerSceneUnit] = value @property def renderProductResolution(self): value = self._batchedWriteValues.get(self._attributes.renderProductResolution) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.renderProductResolution) return data_view.get() @renderProductResolution.setter def renderProductResolution(self, value): self._batchedWriteValues[self._attributes.renderProductResolution] = value @property def swhFrameNumber(self): value = self._batchedWriteValues.get(self._attributes.swhFrameNumber) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.swhFrameNumber) return data_view.get() @swhFrameNumber.setter def swhFrameNumber(self, value): self._batchedWriteValues[self._attributes.swhFrameNumber] = value def __getattr__(self, item: str): if item in self.LOCAL_PROPERTY_NAMES: return object.__getattribute__(self, item) else: return super().__getattr__(item) def __setattr__(self, item: str, new_value): if item in self.LOCAL_PROPERTY_NAMES: object.__setattr__(self, item, new_value) else: super().__setattr__(item, new_value) def _commit(self): _og._commit_output_attributes_data(self._batchedWriteValues) self._batchedWriteValues = { } class ValuesForState(og.DynamicAttributeAccess): """Helper class that creates natural hierarchical access to state attributes""" def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface): """Initialize simplified access for the attribute data""" context = node.get_graph().get_default_graph_context() super().__init__(context, node, attributes, dynamic_attributes) def __init__(self, node): super().__init__(node) dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_INPUT) self.inputs = OgnSdRenderProductCameraDatabase.ValuesForInputs(node, self.attributes.inputs, dynamic_attributes) dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_OUTPUT) self.outputs = OgnSdRenderProductCameraDatabase.ValuesForOutputs(node, self.attributes.outputs, dynamic_attributes) dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_STATE) self.state = OgnSdRenderProductCameraDatabase.ValuesForState(node, self.attributes.state, dynamic_attributes)
16,236
Python
45.127841
309
0.631683
eliabntt/GRADE-RR/isaac_internals/kit/extscore/omni.syntheticdata/omni/syntheticdata/ogn/OgnSdPostCompRenderVarTexturesDatabase.py
"""Support for simplified access to data on nodes of type omni.syntheticdata.SdPostCompRenderVarTextures Synthetic Data node to compose a front renderVar texture into a back renderVar texture """ import omni.graph.core as og import omni.graph.core._omni_graph_core as _og import omni.graph.tools.ogn as ogn import numpy class OgnSdPostCompRenderVarTexturesDatabase(og.Database): """Helper class providing simplified access to data on nodes of type omni.syntheticdata.SdPostCompRenderVarTextures Class Members: node: Node being evaluated Attribute Value Properties: Inputs: inputs.cudaPtr inputs.format inputs.gpu inputs.height inputs.mode inputs.parameters inputs.renderVar inputs.rp inputs.width Predefined Tokens: tokens.line tokens.grid """ # This is an internal object that provides per-class storage of a per-node data dictionary PER_NODE_DATA = {} # This is an internal object that describes unchanging attributes in a generic way # The values in this list are in no particular order, as a per-attribute tuple # Name, Type, ExtendedTypeIndex, UiName, Description, Metadata, # Is_Required, DefaultValue, Is_Deprecated, DeprecationMsg # You should not need to access any of this data directly, use the defined database interfaces INTERFACE = og.Database._get_interface([ ('inputs:cudaPtr', 'uint64', 0, None, 'Front texture CUDA pointer', {}, True, 0, False, ''), ('inputs:format', 'uint64', 0, None, 'Front texture format', {}, True, 0, False, ''), ('inputs:gpu', 'uint64', 0, 'gpuFoundations', 'Pointer to shared context containing gpu foundations', {}, True, 0, False, ''), ('inputs:height', 'uint', 0, None, 'Front texture height', {}, True, 0, False, ''), ('inputs:mode', 'token', 0, None, 'Mode : grid, line', {ogn.MetadataKeys.DEFAULT: '"line"'}, True, 'line', False, ''), ('inputs:parameters', 'float3', 0, None, 'Parameters', {ogn.MetadataKeys.DEFAULT: '[0, 0, 0]'}, True, [0, 0, 0], False, ''), ('inputs:renderVar', 'token', 0, None, 'Name of the back RenderVar', {ogn.MetadataKeys.DEFAULT: '"LdrColor"'}, True, 'LdrColor', False, ''), ('inputs:rp', 'uint64', 0, 'renderProduct', 'Pointer to render product for this view', {}, True, 0, False, ''), ('inputs:width', 'uint', 0, None, 'Front texture width', {}, True, 0, False, ''), ]) class tokens: line = "line" grid = "grid" class ValuesForInputs(og.DynamicAttributeAccess): LOCAL_PROPERTY_NAMES = {"cudaPtr", "format", "gpu", "height", "mode", "parameters", "renderVar", "rp", "width", "_setting_locked", "_batchedReadAttributes", "_batchedReadValues"} """Helper class that creates natural hierarchical access to input attributes""" def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface): """Initialize simplified access for the attribute data""" context = node.get_graph().get_default_graph_context() super().__init__(context, node, attributes, dynamic_attributes) self._batchedReadAttributes = [self._attributes.cudaPtr, self._attributes.format, self._attributes.gpu, self._attributes.height, self._attributes.mode, self._attributes.parameters, self._attributes.renderVar, self._attributes.rp, self._attributes.width] self._batchedReadValues = [0, 0, 0, 0, "line", [0, 0, 0], "LdrColor", 0, 0] @property def cudaPtr(self): return self._batchedReadValues[0] @cudaPtr.setter def cudaPtr(self, value): self._batchedReadValues[0] = value @property def format(self): return self._batchedReadValues[1] @format.setter def format(self, value): self._batchedReadValues[1] = value @property def gpu(self): return self._batchedReadValues[2] @gpu.setter def gpu(self, value): self._batchedReadValues[2] = value @property def height(self): return self._batchedReadValues[3] @height.setter def height(self, value): self._batchedReadValues[3] = value @property def mode(self): return self._batchedReadValues[4] @mode.setter def mode(self, value): self._batchedReadValues[4] = value @property def parameters(self): return self._batchedReadValues[5] @parameters.setter def parameters(self, value): self._batchedReadValues[5] = value @property def renderVar(self): return self._batchedReadValues[6] @renderVar.setter def renderVar(self, value): self._batchedReadValues[6] = value @property def rp(self): return self._batchedReadValues[7] @rp.setter def rp(self, value): self._batchedReadValues[7] = value @property def width(self): return self._batchedReadValues[8] @width.setter def width(self, value): self._batchedReadValues[8] = value def __getattr__(self, item: str): if item in self.LOCAL_PROPERTY_NAMES: return object.__getattribute__(self, item) else: return super().__getattr__(item) def __setattr__(self, item: str, new_value): if item in self.LOCAL_PROPERTY_NAMES: object.__setattr__(self, item, new_value) else: super().__setattr__(item, new_value) def _prefetch(self): readAttributes = self._batchedReadAttributes newValues = _og._prefetch_input_attributes_data(readAttributes) if len(readAttributes) == len(newValues): self._batchedReadValues = newValues class ValuesForOutputs(og.DynamicAttributeAccess): LOCAL_PROPERTY_NAMES = { } """Helper class that creates natural hierarchical access to output attributes""" def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface): """Initialize simplified access for the attribute data""" context = node.get_graph().get_default_graph_context() super().__init__(context, node, attributes, dynamic_attributes) self._batchedWriteValues = { } def _commit(self): _og._commit_output_attributes_data(self._batchedWriteValues) self._batchedWriteValues = { } class ValuesForState(og.DynamicAttributeAccess): """Helper class that creates natural hierarchical access to state attributes""" def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface): """Initialize simplified access for the attribute data""" context = node.get_graph().get_default_graph_context() super().__init__(context, node, attributes, dynamic_attributes) def __init__(self, node): super().__init__(node) dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_INPUT) self.inputs = OgnSdPostCompRenderVarTexturesDatabase.ValuesForInputs(node, self.attributes.inputs, dynamic_attributes) dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_OUTPUT) self.outputs = OgnSdPostCompRenderVarTexturesDatabase.ValuesForOutputs(node, self.attributes.outputs, dynamic_attributes) dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_STATE) self.state = OgnSdPostCompRenderVarTexturesDatabase.ValuesForState(node, self.attributes.state, dynamic_attributes)
7,980
Python
43.837078
265
0.63183
eliabntt/GRADE-RR/isaac_internals/kit/extscore/omni.syntheticdata/omni/syntheticdata/ogn/OgnSdOnNewRenderProductFrameDatabase.py
"""Support for simplified access to data on nodes of type omni.syntheticdata.SdOnNewRenderProductFrame Synthetic Data postprocess node to execute pipeline after the NewFrame event has been received on the given renderProduct """ import omni.graph.core as og import omni.graph.core._omni_graph_core as _og import omni.graph.tools.ogn as ogn import numpy class OgnSdOnNewRenderProductFrameDatabase(og.Database): """Helper class providing simplified access to data on nodes of type omni.syntheticdata.SdOnNewRenderProductFrame Class Members: node: Node being evaluated Attribute Value Properties: Inputs: inputs.exec inputs.renderProductDataPtrs inputs.renderProductPath inputs.renderProductPaths inputs.swhFrameNumber Outputs: outputs.cudaStream outputs.exec outputs.renderProductPath outputs.renderResults outputs.swhFrameNumber """ # This is an internal object that provides per-class storage of a per-node data dictionary PER_NODE_DATA = {} # This is an internal object that describes unchanging attributes in a generic way # The values in this list are in no particular order, as a per-attribute tuple # Name, Type, ExtendedTypeIndex, UiName, Description, Metadata, # Is_Required, DefaultValue, Is_Deprecated, DeprecationMsg # You should not need to access any of this data directly, use the defined database interfaces INTERFACE = og.Database._get_interface([ ('inputs:exec', 'execution', 0, 'Received', 'Executes for each newFrame event received', {}, True, None, False, ''), ('inputs:renderProductDataPtrs', 'uint64[]', 0, None, 'HydraRenderProduct data pointers.', {}, True, [], False, ''), ('inputs:renderProductPath', 'token', 0, None, 'Path of the renderProduct to wait for being rendered', {}, True, '', False, ''), ('inputs:renderProductPaths', 'token[]', 0, None, 'Render product path tokens.', {}, True, [], False, ''), ('inputs:swhFrameNumber', 'uint64', 0, None, 'Fabric frame number', {}, True, 0, False, ''), ('outputs:cudaStream', 'uint64', 0, None, 'Cuda stream', {}, True, None, False, ''), ('outputs:exec', 'execution', 0, 'Received', 'Executes for each newFrame event received', {}, True, None, False, ''), ('outputs:renderProductPath', 'token', 0, None, 'Path of the renderProduct to wait for being rendered', {}, True, None, False, ''), ('outputs:renderResults', 'uint64', 0, None, 'Render results', {}, True, None, False, ''), ('outputs:swhFrameNumber', 'uint64', 0, None, 'Fabric frame number', {}, True, None, False, ''), ]) @classmethod def _populate_role_data(cls): """Populate a role structure with the non-default roles on this node type""" role_data = super()._populate_role_data() role_data.inputs.exec = og.Database.ROLE_EXECUTION role_data.outputs.exec = og.Database.ROLE_EXECUTION return role_data class ValuesForInputs(og.DynamicAttributeAccess): LOCAL_PROPERTY_NAMES = {"exec", "renderProductPath", "swhFrameNumber", "_setting_locked", "_batchedReadAttributes", "_batchedReadValues"} """Helper class that creates natural hierarchical access to input attributes""" def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface): """Initialize simplified access for the attribute data""" context = node.get_graph().get_default_graph_context() super().__init__(context, node, attributes, dynamic_attributes) self._batchedReadAttributes = [self._attributes.exec, self._attributes.renderProductPath, self._attributes.swhFrameNumber] self._batchedReadValues = [None, "", 0] @property def renderProductDataPtrs(self): data_view = og.AttributeValueHelper(self._attributes.renderProductDataPtrs) return data_view.get() @renderProductDataPtrs.setter def renderProductDataPtrs(self, value): if self._setting_locked: raise og.ReadOnlyError(self._attributes.renderProductDataPtrs) data_view = og.AttributeValueHelper(self._attributes.renderProductDataPtrs) data_view.set(value) self.renderProductDataPtrs_size = data_view.get_array_size() @property def renderProductPaths(self): data_view = og.AttributeValueHelper(self._attributes.renderProductPaths) return data_view.get() @renderProductPaths.setter def renderProductPaths(self, value): if self._setting_locked: raise og.ReadOnlyError(self._attributes.renderProductPaths) data_view = og.AttributeValueHelper(self._attributes.renderProductPaths) data_view.set(value) self.renderProductPaths_size = data_view.get_array_size() @property def exec(self): return self._batchedReadValues[0] @exec.setter def exec(self, value): self._batchedReadValues[0] = value @property def renderProductPath(self): return self._batchedReadValues[1] @renderProductPath.setter def renderProductPath(self, value): self._batchedReadValues[1] = value @property def swhFrameNumber(self): return self._batchedReadValues[2] @swhFrameNumber.setter def swhFrameNumber(self, value): self._batchedReadValues[2] = value def __getattr__(self, item: str): if item in self.LOCAL_PROPERTY_NAMES: return object.__getattribute__(self, item) else: return super().__getattr__(item) def __setattr__(self, item: str, new_value): if item in self.LOCAL_PROPERTY_NAMES: object.__setattr__(self, item, new_value) else: super().__setattr__(item, new_value) def _prefetch(self): readAttributes = self._batchedReadAttributes newValues = _og._prefetch_input_attributes_data(readAttributes) if len(readAttributes) == len(newValues): self._batchedReadValues = newValues class ValuesForOutputs(og.DynamicAttributeAccess): LOCAL_PROPERTY_NAMES = {"cudaStream", "exec", "renderProductPath", "renderResults", "swhFrameNumber", "_batchedWriteValues"} """Helper class that creates natural hierarchical access to output attributes""" def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface): """Initialize simplified access for the attribute data""" context = node.get_graph().get_default_graph_context() super().__init__(context, node, attributes, dynamic_attributes) self._batchedWriteValues = { } @property def cudaStream(self): value = self._batchedWriteValues.get(self._attributes.cudaStream) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.cudaStream) return data_view.get() @cudaStream.setter def cudaStream(self, value): self._batchedWriteValues[self._attributes.cudaStream] = value @property def exec(self): value = self._batchedWriteValues.get(self._attributes.exec) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.exec) return data_view.get() @exec.setter def exec(self, value): self._batchedWriteValues[self._attributes.exec] = value @property def renderProductPath(self): value = self._batchedWriteValues.get(self._attributes.renderProductPath) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.renderProductPath) return data_view.get() @renderProductPath.setter def renderProductPath(self, value): self._batchedWriteValues[self._attributes.renderProductPath] = value @property def renderResults(self): value = self._batchedWriteValues.get(self._attributes.renderResults) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.renderResults) return data_view.get() @renderResults.setter def renderResults(self, value): self._batchedWriteValues[self._attributes.renderResults] = value @property def swhFrameNumber(self): value = self._batchedWriteValues.get(self._attributes.swhFrameNumber) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.swhFrameNumber) return data_view.get() @swhFrameNumber.setter def swhFrameNumber(self, value): self._batchedWriteValues[self._attributes.swhFrameNumber] = value def __getattr__(self, item: str): if item in self.LOCAL_PROPERTY_NAMES: return object.__getattribute__(self, item) else: return super().__getattr__(item) def __setattr__(self, item: str, new_value): if item in self.LOCAL_PROPERTY_NAMES: object.__setattr__(self, item, new_value) else: super().__setattr__(item, new_value) def _commit(self): _og._commit_output_attributes_data(self._batchedWriteValues) self._batchedWriteValues = { } class ValuesForState(og.DynamicAttributeAccess): """Helper class that creates natural hierarchical access to state attributes""" def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface): """Initialize simplified access for the attribute data""" context = node.get_graph().get_default_graph_context() super().__init__(context, node, attributes, dynamic_attributes) def __init__(self, node): super().__init__(node) dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_INPUT) self.inputs = OgnSdOnNewRenderProductFrameDatabase.ValuesForInputs(node, self.attributes.inputs, dynamic_attributes) dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_OUTPUT) self.outputs = OgnSdOnNewRenderProductFrameDatabase.ValuesForOutputs(node, self.attributes.outputs, dynamic_attributes) dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_STATE) self.state = OgnSdOnNewRenderProductFrameDatabase.ValuesForState(node, self.attributes.state, dynamic_attributes)
11,145
Python
46.228813
145
0.637057
eliabntt/GRADE-RR/isaac_internals/kit/extscore/omni.syntheticdata/omni/syntheticdata/ogn/OgnSdTestPrintRawArrayDatabase.py
"""Support for simplified access to data on nodes of type omni.syntheticdata.SdTestPrintRawArray Synthetic Data test node printing the input linear array """ import omni.graph.core as og import omni.graph.core._omni_graph_core as _og import omni.graph.tools.ogn as ogn import numpy import sys import traceback class OgnSdTestPrintRawArrayDatabase(og.Database): """Helper class providing simplified access to data on nodes of type omni.syntheticdata.SdTestPrintRawArray Class Members: node: Node being evaluated Attribute Value Properties: Inputs: inputs.bufferSize inputs.data inputs.elementCount inputs.elementType inputs.exec inputs.height inputs.mode inputs.randomSeed inputs.referenceNumUniqueRandomValues inputs.referenceSWHFrameNumbers inputs.referenceTolerance inputs.referenceValues inputs.swhFrameNumber inputs.width Outputs: outputs.exec outputs.swhFrameNumber State: state.initialSWHFrameNumber Predefined Tokens: tokens.uint16 tokens.int16 tokens.uint32 tokens.int32 tokens.float32 tokens.token tokens.printFormatted tokens.printReferences """ # This is an internal object that provides per-class storage of a per-node data dictionary PER_NODE_DATA = {} # This is an internal object that describes unchanging attributes in a generic way # The values in this list are in no particular order, as a per-attribute tuple # Name, Type, ExtendedTypeIndex, UiName, Description, Metadata, # Is_Required, DefaultValue, Is_Deprecated, DeprecationMsg # You should not need to access any of this data directly, use the defined database interfaces INTERFACE = og.Database._get_interface([ ('inputs:bufferSize', 'uint', 0, None, 'Size (in bytes) of the buffer (0 if the input is a texture)', {}, True, 0, False, ''), ('inputs:data', 'uchar[]', 0, None, 'Buffer array data', {ogn.MetadataKeys.DEFAULT: '[]'}, True, [], False, ''), ('inputs:elementCount', 'int', 0, None, 'Number of array element', {ogn.MetadataKeys.DEFAULT: '1'}, True, 1, False, ''), ('inputs:elementType', 'token', 0, None, 'Type of the array element', {ogn.MetadataKeys.DEFAULT: '"uint8"'}, True, 'uint8', False, ''), ('inputs:exec', 'execution', 0, None, 'Trigger', {}, True, None, False, ''), ('inputs:height', 'uint', 0, None, 'Height (0 if the input is a buffer)', {}, True, 0, False, ''), ('inputs:mode', 'token', 0, None, 'Mode in [printFormatted, printReferences, testReferences]', {ogn.MetadataKeys.DEFAULT: '"printFormatted"'}, True, 'printFormatted', False, ''), ('inputs:randomSeed', 'int', 0, None, 'Random seed', {}, True, 0, False, ''), ('inputs:referenceNumUniqueRandomValues', 'int', 0, None, 'Number of reference unique random values to compare', {ogn.MetadataKeys.DEFAULT: '7'}, True, 7, False, ''), ('inputs:referenceSWHFrameNumbers', 'uint[]', 0, None, 'Reference swhFrameNumbers relative to the first one', {ogn.MetadataKeys.DEFAULT: '[11, 17, 29]'}, True, [11, 17, 29], False, ''), ('inputs:referenceTolerance', 'float', 0, None, 'Reference tolerance', {ogn.MetadataKeys.DEFAULT: '0.1'}, True, 0.1, False, ''), ('inputs:referenceValues', 'float[]', 0, None, 'Reference data point values', {}, True, [], False, ''), ('inputs:swhFrameNumber', 'uint64', 0, None, 'Frame number', {}, True, 0, False, ''), ('inputs:width', 'uint', 0, None, 'Width (0 if the input is a buffer)', {}, True, 0, False, ''), ('outputs:exec', 'execution', 0, 'Received', 'Executes when the event is received', {}, True, None, False, ''), ('outputs:swhFrameNumber', 'uint64', 0, None, 'FrameNumber just rendered', {}, True, None, False, ''), ('state:initialSWHFrameNumber', 'int64', 0, None, 'Initial swhFrameNumber', {ogn.MetadataKeys.DEFAULT: '-1'}, True, -1, False, ''), ]) class tokens: uint16 = "uint16" int16 = "int16" uint32 = "uint32" int32 = "int32" float32 = "float32" token = "token" printFormatted = "printFormatted" printReferences = "printReferences" @classmethod def _populate_role_data(cls): """Populate a role structure with the non-default roles on this node type""" role_data = super()._populate_role_data() role_data.inputs.exec = og.Database.ROLE_EXECUTION role_data.outputs.exec = og.Database.ROLE_EXECUTION return role_data class ValuesForInputs(og.DynamicAttributeAccess): LOCAL_PROPERTY_NAMES = {"bufferSize", "elementCount", "elementType", "exec", "height", "mode", "randomSeed", "referenceNumUniqueRandomValues", "referenceTolerance", "swhFrameNumber", "width", "_setting_locked", "_batchedReadAttributes", "_batchedReadValues"} """Helper class that creates natural hierarchical access to input attributes""" def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface): """Initialize simplified access for the attribute data""" context = node.get_graph().get_default_graph_context() super().__init__(context, node, attributes, dynamic_attributes) self._batchedReadAttributes = [self._attributes.bufferSize, self._attributes.elementCount, self._attributes.elementType, self._attributes.exec, self._attributes.height, self._attributes.mode, self._attributes.randomSeed, self._attributes.referenceNumUniqueRandomValues, self._attributes.referenceTolerance, self._attributes.swhFrameNumber, self._attributes.width] self._batchedReadValues = [0, 1, "uint8", None, 0, "printFormatted", 0, 7, 0.1, 0, 0] @property def data(self): data_view = og.AttributeValueHelper(self._attributes.data) return data_view.get() @data.setter def data(self, value): if self._setting_locked: raise og.ReadOnlyError(self._attributes.data) data_view = og.AttributeValueHelper(self._attributes.data) data_view.set(value) self.data_size = data_view.get_array_size() @property def referenceSWHFrameNumbers(self): data_view = og.AttributeValueHelper(self._attributes.referenceSWHFrameNumbers) return data_view.get() @referenceSWHFrameNumbers.setter def referenceSWHFrameNumbers(self, value): if self._setting_locked: raise og.ReadOnlyError(self._attributes.referenceSWHFrameNumbers) data_view = og.AttributeValueHelper(self._attributes.referenceSWHFrameNumbers) data_view.set(value) self.referenceSWHFrameNumbers_size = data_view.get_array_size() @property def referenceValues(self): data_view = og.AttributeValueHelper(self._attributes.referenceValues) return data_view.get() @referenceValues.setter def referenceValues(self, value): if self._setting_locked: raise og.ReadOnlyError(self._attributes.referenceValues) data_view = og.AttributeValueHelper(self._attributes.referenceValues) data_view.set(value) self.referenceValues_size = data_view.get_array_size() @property def bufferSize(self): return self._batchedReadValues[0] @bufferSize.setter def bufferSize(self, value): self._batchedReadValues[0] = value @property def elementCount(self): return self._batchedReadValues[1] @elementCount.setter def elementCount(self, value): self._batchedReadValues[1] = value @property def elementType(self): return self._batchedReadValues[2] @elementType.setter def elementType(self, value): self._batchedReadValues[2] = value @property def exec(self): return self._batchedReadValues[3] @exec.setter def exec(self, value): self._batchedReadValues[3] = value @property def height(self): return self._batchedReadValues[4] @height.setter def height(self, value): self._batchedReadValues[4] = value @property def mode(self): return self._batchedReadValues[5] @mode.setter def mode(self, value): self._batchedReadValues[5] = value @property def randomSeed(self): return self._batchedReadValues[6] @randomSeed.setter def randomSeed(self, value): self._batchedReadValues[6] = value @property def referenceNumUniqueRandomValues(self): return self._batchedReadValues[7] @referenceNumUniqueRandomValues.setter def referenceNumUniqueRandomValues(self, value): self._batchedReadValues[7] = value @property def referenceTolerance(self): return self._batchedReadValues[8] @referenceTolerance.setter def referenceTolerance(self, value): self._batchedReadValues[8] = value @property def swhFrameNumber(self): return self._batchedReadValues[9] @swhFrameNumber.setter def swhFrameNumber(self, value): self._batchedReadValues[9] = value @property def width(self): return self._batchedReadValues[10] @width.setter def width(self, value): self._batchedReadValues[10] = value def __getattr__(self, item: str): if item in self.LOCAL_PROPERTY_NAMES: return object.__getattribute__(self, item) else: return super().__getattr__(item) def __setattr__(self, item: str, new_value): if item in self.LOCAL_PROPERTY_NAMES: object.__setattr__(self, item, new_value) else: super().__setattr__(item, new_value) def _prefetch(self): readAttributes = self._batchedReadAttributes newValues = _og._prefetch_input_attributes_data(readAttributes) if len(readAttributes) == len(newValues): self._batchedReadValues = newValues class ValuesForOutputs(og.DynamicAttributeAccess): LOCAL_PROPERTY_NAMES = {"exec", "swhFrameNumber", "_batchedWriteValues"} """Helper class that creates natural hierarchical access to output attributes""" def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface): """Initialize simplified access for the attribute data""" context = node.get_graph().get_default_graph_context() super().__init__(context, node, attributes, dynamic_attributes) self._batchedWriteValues = { } @property def exec(self): value = self._batchedWriteValues.get(self._attributes.exec) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.exec) return data_view.get() @exec.setter def exec(self, value): self._batchedWriteValues[self._attributes.exec] = value @property def swhFrameNumber(self): value = self._batchedWriteValues.get(self._attributes.swhFrameNumber) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.swhFrameNumber) return data_view.get() @swhFrameNumber.setter def swhFrameNumber(self, value): self._batchedWriteValues[self._attributes.swhFrameNumber] = value def __getattr__(self, item: str): if item in self.LOCAL_PROPERTY_NAMES: return object.__getattribute__(self, item) else: return super().__getattr__(item) def __setattr__(self, item: str, new_value): if item in self.LOCAL_PROPERTY_NAMES: object.__setattr__(self, item, new_value) else: super().__setattr__(item, new_value) def _commit(self): _og._commit_output_attributes_data(self._batchedWriteValues) self._batchedWriteValues = { } class ValuesForState(og.DynamicAttributeAccess): """Helper class that creates natural hierarchical access to state attributes""" def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface): """Initialize simplified access for the attribute data""" context = node.get_graph().get_default_graph_context() super().__init__(context, node, attributes, dynamic_attributes) @property def initialSWHFrameNumber(self): data_view = og.AttributeValueHelper(self._attributes.initialSWHFrameNumber) return data_view.get() @initialSWHFrameNumber.setter def initialSWHFrameNumber(self, value): data_view = og.AttributeValueHelper(self._attributes.initialSWHFrameNumber) data_view.set(value) def __init__(self, node): super().__init__(node) dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_INPUT) self.inputs = OgnSdTestPrintRawArrayDatabase.ValuesForInputs(node, self.attributes.inputs, dynamic_attributes) dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_OUTPUT) self.outputs = OgnSdTestPrintRawArrayDatabase.ValuesForOutputs(node, self.attributes.outputs, dynamic_attributes) dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_STATE) self.state = OgnSdTestPrintRawArrayDatabase.ValuesForState(node, self.attributes.state, dynamic_attributes) class abi: """Class defining the ABI interface for the node type""" @staticmethod def get_node_type(): get_node_type_function = getattr(OgnSdTestPrintRawArrayDatabase.NODE_TYPE_CLASS, 'get_node_type', None) if callable(get_node_type_function): return get_node_type_function() return 'omni.syntheticdata.SdTestPrintRawArray' @staticmethod def compute(context, node): try: per_node_data = OgnSdTestPrintRawArrayDatabase.PER_NODE_DATA[node.node_id()] db = per_node_data.get('_db') if db is None: db = OgnSdTestPrintRawArrayDatabase(node) per_node_data['_db'] = db except: db = OgnSdTestPrintRawArrayDatabase(node) try: compute_function = getattr(OgnSdTestPrintRawArrayDatabase.NODE_TYPE_CLASS, 'compute', None) if callable(compute_function) and compute_function.__code__.co_argcount > 1: return compute_function(context, node) db.inputs._prefetch() db.inputs._setting_locked = True with og.in_compute(): return OgnSdTestPrintRawArrayDatabase.NODE_TYPE_CLASS.compute(db) except Exception as error: stack_trace = "".join(traceback.format_tb(sys.exc_info()[2].tb_next)) db.log_error(f'Assertion raised in compute - {error}\n{stack_trace}', add_context=False) finally: db.inputs._setting_locked = False db.outputs._commit() return False @staticmethod def initialize(context, node): OgnSdTestPrintRawArrayDatabase._initialize_per_node_data(node) initialize_function = getattr(OgnSdTestPrintRawArrayDatabase.NODE_TYPE_CLASS, 'initialize', None) if callable(initialize_function): initialize_function(context, node) @staticmethod def release(node): release_function = getattr(OgnSdTestPrintRawArrayDatabase.NODE_TYPE_CLASS, 'release', None) if callable(release_function): release_function(node) OgnSdTestPrintRawArrayDatabase._release_per_node_data(node) @staticmethod def update_node_version(context, node, old_version, new_version): update_node_version_function = getattr(OgnSdTestPrintRawArrayDatabase.NODE_TYPE_CLASS, 'update_node_version', None) if callable(update_node_version_function): return update_node_version_function(context, node, old_version, new_version) return False @staticmethod def initialize_type(node_type): initialize_type_function = getattr(OgnSdTestPrintRawArrayDatabase.NODE_TYPE_CLASS, 'initialize_type', None) needs_initializing = True if callable(initialize_type_function): needs_initializing = initialize_type_function(node_type) if needs_initializing: node_type.set_metadata(ogn.MetadataKeys.EXTENSION, "omni.syntheticdata") node_type.set_metadata(ogn.MetadataKeys.TOKENS, "[\"uint16\", \"int16\", \"uint32\", \"int32\", \"float32\", \"token\", \"printFormatted\", \"printReferences\"]") node_type.set_metadata(ogn.MetadataKeys.CATEGORIES, "graph:action,internal:test") node_type.set_metadata(ogn.MetadataKeys.DESCRIPTION, "Synthetic Data test node printing the input linear array") node_type.set_metadata(ogn.MetadataKeys.EXCLUSIONS, "tests") node_type.set_metadata(ogn.MetadataKeys.LANGUAGE, "Python") OgnSdTestPrintRawArrayDatabase.INTERFACE.add_to_node_type(node_type) node_type.set_has_state(True) @staticmethod def on_connection_type_resolve(node): on_connection_type_resolve_function = getattr(OgnSdTestPrintRawArrayDatabase.NODE_TYPE_CLASS, 'on_connection_type_resolve', None) if callable(on_connection_type_resolve_function): on_connection_type_resolve_function(node) NODE_TYPE_CLASS = None GENERATOR_VERSION = (1, 17, 2) TARGET_VERSION = (2, 65, 4) @staticmethod def register(node_type_class): OgnSdTestPrintRawArrayDatabase.NODE_TYPE_CLASS = node_type_class og.register_node_type(OgnSdTestPrintRawArrayDatabase.abi, 1) @staticmethod def deregister(): og.deregister_node_type("omni.syntheticdata.SdTestPrintRawArray")
18,880
Python
45.851117
375
0.628284
eliabntt/GRADE-RR/isaac_internals/kit/extscore/omni.syntheticdata/omni/syntheticdata/ogn/OgnSdTextureToLinearArrayDatabase.py
"""Support for simplified access to data on nodes of type omni.syntheticdata.SdTextureToLinearArray SyntheticData node to copy the input texture into a linear array buffer """ import omni.graph.core as og import omni.graph.core._omni_graph_core as _og import omni.graph.tools.ogn as ogn import numpy class OgnSdTextureToLinearArrayDatabase(og.Database): """Helper class providing simplified access to data on nodes of type omni.syntheticdata.SdTextureToLinearArray Class Members: node: Node being evaluated Attribute Value Properties: Inputs: inputs.cudaMipmappedArray inputs.format inputs.height inputs.hydraTime inputs.mipCount inputs.outputHeight inputs.outputWidth inputs.simTime inputs.stream inputs.width Outputs: outputs.data outputs.height outputs.hydraTime outputs.simTime outputs.stream outputs.width """ # This is an internal object that provides per-class storage of a per-node data dictionary PER_NODE_DATA = {} # This is an internal object that describes unchanging attributes in a generic way # The values in this list are in no particular order, as a per-attribute tuple # Name, Type, ExtendedTypeIndex, UiName, Description, Metadata, # Is_Required, DefaultValue, Is_Deprecated, DeprecationMsg # You should not need to access any of this data directly, use the defined database interfaces INTERFACE = og.Database._get_interface([ ('inputs:cudaMipmappedArray', 'uint64', 0, None, 'Pointer to the CUDA Mipmapped Array', {}, True, 0, False, ''), ('inputs:format', 'uint64', 0, None, 'Format', {}, True, 0, False, ''), ('inputs:height', 'uint', 0, None, 'Height', {}, True, 0, False, ''), ('inputs:hydraTime', 'double', 0, None, 'Hydra time in stage', {}, True, 0.0, False, ''), ('inputs:mipCount', 'uint', 0, None, 'Mip Count', {}, True, 0, False, ''), ('inputs:outputHeight', 'uint', 0, None, 'Requested output height', {ogn.MetadataKeys.DEFAULT: '0'}, True, 0, False, ''), ('inputs:outputWidth', 'uint', 0, None, 'Requested output width', {ogn.MetadataKeys.DEFAULT: '0'}, True, 0, False, ''), ('inputs:simTime', 'double', 0, None, 'Simulation time', {}, True, 0.0, False, ''), ('inputs:stream', 'uint64', 0, None, 'Pointer to the CUDA Stream', {}, True, 0, False, ''), ('inputs:width', 'uint', 0, None, 'Width', {}, True, 0, False, ''), ('outputs:data', 'float4[]', 0, None, 'Buffer array data', {ogn.MetadataKeys.MEMORY_TYPE: 'cuda', ogn.MetadataKeys.DEFAULT: '[]'}, True, [], False, ''), ('outputs:height', 'uint', 0, None, 'Buffer array height', {}, True, None, False, ''), ('outputs:hydraTime', 'double', 0, None, 'Hydra time in stage', {}, True, None, False, ''), ('outputs:simTime', 'double', 0, None, 'Simulation time', {}, True, None, False, ''), ('outputs:stream', 'uint64', 0, None, 'Pointer to the CUDA Stream', {}, True, None, False, ''), ('outputs:width', 'uint', 0, None, 'Buffer array width', {}, True, None, False, ''), ]) class ValuesForInputs(og.DynamicAttributeAccess): LOCAL_PROPERTY_NAMES = {"cudaMipmappedArray", "format", "height", "hydraTime", "mipCount", "outputHeight", "outputWidth", "simTime", "stream", "width", "_setting_locked", "_batchedReadAttributes", "_batchedReadValues"} """Helper class that creates natural hierarchical access to input attributes""" def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface): """Initialize simplified access for the attribute data""" context = node.get_graph().get_default_graph_context() super().__init__(context, node, attributes, dynamic_attributes) self._batchedReadAttributes = [self._attributes.cudaMipmappedArray, self._attributes.format, self._attributes.height, self._attributes.hydraTime, self._attributes.mipCount, self._attributes.outputHeight, self._attributes.outputWidth, self._attributes.simTime, self._attributes.stream, self._attributes.width] self._batchedReadValues = [0, 0, 0, 0.0, 0, 0, 0, 0.0, 0, 0] @property def cudaMipmappedArray(self): return self._batchedReadValues[0] @cudaMipmappedArray.setter def cudaMipmappedArray(self, value): self._batchedReadValues[0] = value @property def format(self): return self._batchedReadValues[1] @format.setter def format(self, value): self._batchedReadValues[1] = value @property def height(self): return self._batchedReadValues[2] @height.setter def height(self, value): self._batchedReadValues[2] = value @property def hydraTime(self): return self._batchedReadValues[3] @hydraTime.setter def hydraTime(self, value): self._batchedReadValues[3] = value @property def mipCount(self): return self._batchedReadValues[4] @mipCount.setter def mipCount(self, value): self._batchedReadValues[4] = value @property def outputHeight(self): return self._batchedReadValues[5] @outputHeight.setter def outputHeight(self, value): self._batchedReadValues[5] = value @property def outputWidth(self): return self._batchedReadValues[6] @outputWidth.setter def outputWidth(self, value): self._batchedReadValues[6] = value @property def simTime(self): return self._batchedReadValues[7] @simTime.setter def simTime(self, value): self._batchedReadValues[7] = value @property def stream(self): return self._batchedReadValues[8] @stream.setter def stream(self, value): self._batchedReadValues[8] = value @property def width(self): return self._batchedReadValues[9] @width.setter def width(self, value): self._batchedReadValues[9] = value def __getattr__(self, item: str): if item in self.LOCAL_PROPERTY_NAMES: return object.__getattribute__(self, item) else: return super().__getattr__(item) def __setattr__(self, item: str, new_value): if item in self.LOCAL_PROPERTY_NAMES: object.__setattr__(self, item, new_value) else: super().__setattr__(item, new_value) def _prefetch(self): readAttributes = self._batchedReadAttributes newValues = _og._prefetch_input_attributes_data(readAttributes) if len(readAttributes) == len(newValues): self._batchedReadValues = newValues class ValuesForOutputs(og.DynamicAttributeAccess): LOCAL_PROPERTY_NAMES = {"height", "hydraTime", "simTime", "stream", "width", "_batchedWriteValues"} """Helper class that creates natural hierarchical access to output attributes""" def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface): """Initialize simplified access for the attribute data""" context = node.get_graph().get_default_graph_context() super().__init__(context, node, attributes, dynamic_attributes) self.data_size = 0 self._batchedWriteValues = { } @property def data(self): data_view = og.AttributeValueHelper(self._attributes.data) return data_view.get(reserved_element_count=self.data_size, on_gpu=True) @data.setter def data(self, value): data_view = og.AttributeValueHelper(self._attributes.data) data_view.set(value, on_gpu=True) self.data_size = data_view.get_array_size() @property def height(self): value = self._batchedWriteValues.get(self._attributes.height) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.height) return data_view.get() @height.setter def height(self, value): self._batchedWriteValues[self._attributes.height] = value @property def hydraTime(self): value = self._batchedWriteValues.get(self._attributes.hydraTime) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.hydraTime) return data_view.get() @hydraTime.setter def hydraTime(self, value): self._batchedWriteValues[self._attributes.hydraTime] = value @property def simTime(self): value = self._batchedWriteValues.get(self._attributes.simTime) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.simTime) return data_view.get() @simTime.setter def simTime(self, value): self._batchedWriteValues[self._attributes.simTime] = value @property def stream(self): value = self._batchedWriteValues.get(self._attributes.stream) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.stream) return data_view.get() @stream.setter def stream(self, value): self._batchedWriteValues[self._attributes.stream] = value @property def width(self): value = self._batchedWriteValues.get(self._attributes.width) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.width) return data_view.get() @width.setter def width(self, value): self._batchedWriteValues[self._attributes.width] = value def __getattr__(self, item: str): if item in self.LOCAL_PROPERTY_NAMES: return object.__getattribute__(self, item) else: return super().__getattr__(item) def __setattr__(self, item: str, new_value): if item in self.LOCAL_PROPERTY_NAMES: object.__setattr__(self, item, new_value) else: super().__setattr__(item, new_value) def _commit(self): _og._commit_output_attributes_data(self._batchedWriteValues) self._batchedWriteValues = { } class ValuesForState(og.DynamicAttributeAccess): """Helper class that creates natural hierarchical access to state attributes""" def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface): """Initialize simplified access for the attribute data""" context = node.get_graph().get_default_graph_context() super().__init__(context, node, attributes, dynamic_attributes) def __init__(self, node): super().__init__(node) dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_INPUT) self.inputs = OgnSdTextureToLinearArrayDatabase.ValuesForInputs(node, self.attributes.inputs, dynamic_attributes) dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_OUTPUT) self.outputs = OgnSdTextureToLinearArrayDatabase.ValuesForOutputs(node, self.attributes.outputs, dynamic_attributes) dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_STATE) self.state = OgnSdTextureToLinearArrayDatabase.ValuesForState(node, self.attributes.state, dynamic_attributes)
12,156
Python
41.957597
320
0.608342
eliabntt/GRADE-RR/isaac_internals/kit/extscore/omni.syntheticdata/omni/syntheticdata/ogn/OgnSdTestInstanceMappingDatabase.py
"""Support for simplified access to data on nodes of type omni.syntheticdata.SdTestInstanceMapping Synthetic Data node to test the instance mapping pipeline """ import omni.graph.core as og import omni.graph.core._omni_graph_core as _og import omni.graph.tools.ogn as ogn import numpy class OgnSdTestInstanceMappingDatabase(og.Database): """Helper class providing simplified access to data on nodes of type omni.syntheticdata.SdTestInstanceMapping Class Members: node: Node being evaluated Attribute Value Properties: Inputs: inputs.exec inputs.instanceMapPtr inputs.instancePrimPathPtr inputs.minInstanceIndex inputs.minSemanticIndex inputs.numInstances inputs.numSemantics inputs.semanticLabelTokenPtrs inputs.semanticLocalTransformPtr inputs.semanticMapPtr inputs.semanticPrimPathPtr inputs.semanticWorldTransformPtr inputs.stage inputs.swhFrameNumber inputs.testCaseIndex Outputs: outputs.exec outputs.semanticFilterPredicate outputs.success Predefined Tokens: tokens.simulation tokens.postRender tokens.onDemand """ # This is an internal object that provides per-class storage of a per-node data dictionary PER_NODE_DATA = {} # This is an internal object that describes unchanging attributes in a generic way # The values in this list are in no particular order, as a per-attribute tuple # Name, Type, ExtendedTypeIndex, UiName, Description, Metadata, # Is_Required, DefaultValue, Is_Deprecated, DeprecationMsg # You should not need to access any of this data directly, use the defined database interfaces INTERFACE = og.Database._get_interface([ ('inputs:exec', 'execution', 0, None, 'Trigger', {}, True, None, False, ''), ('inputs:instanceMapPtr', 'uint64', 0, None, 'Array pointer of numInstances uint16_t containing the semantic index of the instance prim first semantic prim parent', {}, True, 0, False, ''), ('inputs:instancePrimPathPtr', 'uint64', 0, None, 'Array pointer of numInstances uint64_t containing the prim path tokens for every instance prims', {}, True, 0, False, ''), ('inputs:minInstanceIndex', 'uint', 0, None, 'Instance index of the first instance prim in the instance arrays', {}, True, 0, False, ''), ('inputs:minSemanticIndex', 'uint', 0, None, 'Semantic index of the first semantic prim in the semantic arrays', {}, True, 0, False, ''), ('inputs:numInstances', 'uint', 0, None, 'Number of instances prim in the instance arrays', {}, True, 0, False, ''), ('inputs:numSemantics', 'uint', 0, None, 'Number of semantic prim in the semantic arrays', {}, True, 0, False, ''), ('inputs:semanticLabelTokenPtrs', 'uint64[]', 0, None, 'Array containing for every input semantic filters the corresponding array pointer of numSemantics uint64_t representing the semantic label of the semantic prim', {}, True, [], False, ''), ('inputs:semanticLocalTransformPtr', 'uint64', 0, None, 'Array pointer of numSemantics 4x4 float matrices containing the transform from world to object space for every semantic prims', {}, True, 0, False, ''), ('inputs:semanticMapPtr', 'uint64', 0, None, 'Array pointer of numSemantics uint16_t containing the semantic index of the semantic prim first semantic prim parent', {}, True, 0, False, ''), ('inputs:semanticPrimPathPtr', 'uint64', 0, None, 'Array pointer of numSemantics uint32_t containing the prim part of the prim path tokens for every semantic prims', {}, True, 0, False, ''), ('inputs:semanticWorldTransformPtr', 'uint64', 0, None, 'Array pointer of numSemantics 4x4 float matrices containing the transform from local to world space for every semantic entity', {}, True, 0, False, ''), ('inputs:stage', 'token', 0, None, 'Stage in {simulation, postrender, ondemand}', {}, True, '', False, ''), ('inputs:swhFrameNumber', 'uint64', 0, None, 'Fabric frame number', {}, True, 0, False, ''), ('inputs:testCaseIndex', 'int', 0, None, 'Test case index', {ogn.MetadataKeys.DEFAULT: '-1'}, True, -1, False, ''), ('outputs:exec', 'execution', 0, 'Received', 'Executes when the event is received', {}, True, None, False, ''), ('outputs:semanticFilterPredicate', 'token', 0, None, 'The semantic filter predicate : a disjunctive normal form of semantic type and label', {}, True, None, False, ''), ('outputs:success', 'bool', 0, None, 'Test value : false if failed', {}, True, None, False, ''), ]) class tokens: simulation = "simulation" postRender = "postRender" onDemand = "onDemand" @classmethod def _populate_role_data(cls): """Populate a role structure with the non-default roles on this node type""" role_data = super()._populate_role_data() role_data.inputs.exec = og.Database.ROLE_EXECUTION role_data.outputs.exec = og.Database.ROLE_EXECUTION return role_data class ValuesForInputs(og.DynamicAttributeAccess): LOCAL_PROPERTY_NAMES = {"exec", "instanceMapPtr", "instancePrimPathPtr", "minInstanceIndex", "minSemanticIndex", "numInstances", "numSemantics", "semanticLocalTransformPtr", "semanticMapPtr", "semanticPrimPathPtr", "semanticWorldTransformPtr", "stage", "swhFrameNumber", "testCaseIndex", "_setting_locked", "_batchedReadAttributes", "_batchedReadValues"} """Helper class that creates natural hierarchical access to input attributes""" def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface): """Initialize simplified access for the attribute data""" context = node.get_graph().get_default_graph_context() super().__init__(context, node, attributes, dynamic_attributes) self._batchedReadAttributes = [self._attributes.exec, self._attributes.instanceMapPtr, self._attributes.instancePrimPathPtr, self._attributes.minInstanceIndex, self._attributes.minSemanticIndex, self._attributes.numInstances, self._attributes.numSemantics, self._attributes.semanticLocalTransformPtr, self._attributes.semanticMapPtr, self._attributes.semanticPrimPathPtr, self._attributes.semanticWorldTransformPtr, self._attributes.stage, self._attributes.swhFrameNumber, self._attributes.testCaseIndex] self._batchedReadValues = [None, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, "", 0, -1] @property def semanticLabelTokenPtrs(self): data_view = og.AttributeValueHelper(self._attributes.semanticLabelTokenPtrs) return data_view.get() @semanticLabelTokenPtrs.setter def semanticLabelTokenPtrs(self, value): if self._setting_locked: raise og.ReadOnlyError(self._attributes.semanticLabelTokenPtrs) data_view = og.AttributeValueHelper(self._attributes.semanticLabelTokenPtrs) data_view.set(value) self.semanticLabelTokenPtrs_size = data_view.get_array_size() @property def exec(self): return self._batchedReadValues[0] @exec.setter def exec(self, value): self._batchedReadValues[0] = value @property def instanceMapPtr(self): return self._batchedReadValues[1] @instanceMapPtr.setter def instanceMapPtr(self, value): self._batchedReadValues[1] = value @property def instancePrimPathPtr(self): return self._batchedReadValues[2] @instancePrimPathPtr.setter def instancePrimPathPtr(self, value): self._batchedReadValues[2] = value @property def minInstanceIndex(self): return self._batchedReadValues[3] @minInstanceIndex.setter def minInstanceIndex(self, value): self._batchedReadValues[3] = value @property def minSemanticIndex(self): return self._batchedReadValues[4] @minSemanticIndex.setter def minSemanticIndex(self, value): self._batchedReadValues[4] = value @property def numInstances(self): return self._batchedReadValues[5] @numInstances.setter def numInstances(self, value): self._batchedReadValues[5] = value @property def numSemantics(self): return self._batchedReadValues[6] @numSemantics.setter def numSemantics(self, value): self._batchedReadValues[6] = value @property def semanticLocalTransformPtr(self): return self._batchedReadValues[7] @semanticLocalTransformPtr.setter def semanticLocalTransformPtr(self, value): self._batchedReadValues[7] = value @property def semanticMapPtr(self): return self._batchedReadValues[8] @semanticMapPtr.setter def semanticMapPtr(self, value): self._batchedReadValues[8] = value @property def semanticPrimPathPtr(self): return self._batchedReadValues[9] @semanticPrimPathPtr.setter def semanticPrimPathPtr(self, value): self._batchedReadValues[9] = value @property def semanticWorldTransformPtr(self): return self._batchedReadValues[10] @semanticWorldTransformPtr.setter def semanticWorldTransformPtr(self, value): self._batchedReadValues[10] = value @property def stage(self): return self._batchedReadValues[11] @stage.setter def stage(self, value): self._batchedReadValues[11] = value @property def swhFrameNumber(self): return self._batchedReadValues[12] @swhFrameNumber.setter def swhFrameNumber(self, value): self._batchedReadValues[12] = value @property def testCaseIndex(self): return self._batchedReadValues[13] @testCaseIndex.setter def testCaseIndex(self, value): self._batchedReadValues[13] = value def __getattr__(self, item: str): if item in self.LOCAL_PROPERTY_NAMES: return object.__getattribute__(self, item) else: return super().__getattr__(item) def __setattr__(self, item: str, new_value): if item in self.LOCAL_PROPERTY_NAMES: object.__setattr__(self, item, new_value) else: super().__setattr__(item, new_value) def _prefetch(self): readAttributes = self._batchedReadAttributes newValues = _og._prefetch_input_attributes_data(readAttributes) if len(readAttributes) == len(newValues): self._batchedReadValues = newValues class ValuesForOutputs(og.DynamicAttributeAccess): LOCAL_PROPERTY_NAMES = {"exec", "semanticFilterPredicate", "success", "_batchedWriteValues"} """Helper class that creates natural hierarchical access to output attributes""" def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface): """Initialize simplified access for the attribute data""" context = node.get_graph().get_default_graph_context() super().__init__(context, node, attributes, dynamic_attributes) self._batchedWriteValues = { } @property def exec(self): value = self._batchedWriteValues.get(self._attributes.exec) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.exec) return data_view.get() @exec.setter def exec(self, value): self._batchedWriteValues[self._attributes.exec] = value @property def semanticFilterPredicate(self): value = self._batchedWriteValues.get(self._attributes.semanticFilterPredicate) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.semanticFilterPredicate) return data_view.get() @semanticFilterPredicate.setter def semanticFilterPredicate(self, value): self._batchedWriteValues[self._attributes.semanticFilterPredicate] = value @property def success(self): value = self._batchedWriteValues.get(self._attributes.success) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.success) return data_view.get() @success.setter def success(self, value): self._batchedWriteValues[self._attributes.success] = value def __getattr__(self, item: str): if item in self.LOCAL_PROPERTY_NAMES: return object.__getattribute__(self, item) else: return super().__getattr__(item) def __setattr__(self, item: str, new_value): if item in self.LOCAL_PROPERTY_NAMES: object.__setattr__(self, item, new_value) else: super().__setattr__(item, new_value) def _commit(self): _og._commit_output_attributes_data(self._batchedWriteValues) self._batchedWriteValues = { } class ValuesForState(og.DynamicAttributeAccess): """Helper class that creates natural hierarchical access to state attributes""" def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface): """Initialize simplified access for the attribute data""" context = node.get_graph().get_default_graph_context() super().__init__(context, node, attributes, dynamic_attributes) def __init__(self, node): super().__init__(node) dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_INPUT) self.inputs = OgnSdTestInstanceMappingDatabase.ValuesForInputs(node, self.attributes.inputs, dynamic_attributes) dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_OUTPUT) self.outputs = OgnSdTestInstanceMappingDatabase.ValuesForOutputs(node, self.attributes.outputs, dynamic_attributes) dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_STATE) self.state = OgnSdTestInstanceMappingDatabase.ValuesForState(node, self.attributes.state, dynamic_attributes)
14,821
Python
46.812903
516
0.647257
eliabntt/GRADE-RR/isaac_internals/kit/extscore/omni.syntheticdata/omni/syntheticdata/ogn/OgnSdPostSemantic3dBoundingBoxCameraProjectionDatabase.py
"""Support for simplified access to data on nodes of type omni.syntheticdata.SdPostSemantic3dBoundingBoxCameraProjection Synthetic Data node to project 3d bounding boxes data in camera space. """ import omni.graph.core as og import omni.graph.core._omni_graph_core as _og import omni.graph.tools.ogn as ogn import numpy class OgnSdPostSemantic3dBoundingBoxCameraProjectionDatabase(og.Database): """Helper class providing simplified access to data on nodes of type omni.syntheticdata.SdPostSemantic3dBoundingBoxCameraProjection Class Members: node: Node being evaluated Attribute Value Properties: Inputs: inputs.cameraFisheyeParams inputs.cameraModel inputs.cameraNearFar inputs.exec inputs.gpu inputs.instanceMappingInfoSDPtr inputs.metersPerSceneUnit inputs.renderProductResolution inputs.rp inputs.sdSemBBoxExtentCudaPtr inputs.sdSemBBoxInfosCudaPtr inputs.semanticWorldTransformSDCudaPtr inputs.viewportNearFar inputs.viewportResolution Outputs: outputs.exec outputs.sdSemBBox3dCamCornersCudaPtr outputs.sdSemBBox3dCamExtentCudaPtr Predefined Tokens: tokens.SemanticBoundingBox3DInfosSD tokens.SemanticBoundingBox3DCamCornersSD tokens.SemanticBoundingBox3DCamExtentSD """ # This is an internal object that provides per-class storage of a per-node data dictionary PER_NODE_DATA = {} # This is an internal object that describes unchanging attributes in a generic way # The values in this list are in no particular order, as a per-attribute tuple # Name, Type, ExtendedTypeIndex, UiName, Description, Metadata, # Is_Required, DefaultValue, Is_Deprecated, DeprecationMsg # You should not need to access any of this data directly, use the defined database interfaces INTERFACE = og.Database._get_interface([ ('inputs:cameraFisheyeParams', 'float[]', 0, None, 'Camera fisheye projection parameters', {}, True, [], False, ''), ('inputs:cameraModel', 'int', 0, None, 'Camera model (pinhole or fisheye models)', {}, True, 0, False, ''), ('inputs:cameraNearFar', 'float2', 0, None, 'Camera near/far clipping range', {}, True, [0.0, 0.0], False, ''), ('inputs:exec', 'execution', 0, None, 'Trigger', {}, True, None, False, ''), ('inputs:gpu', 'uint64', 0, 'gpuFoundations', 'Pointer to shared context containing gpu foundations', {}, True, 0, False, ''), ('inputs:instanceMappingInfoSDPtr', 'uint64', 0, None, 'uint buffer pointer containing the following information : [numInstances, minInstanceId, numSemantics, minSemanticId, numProtoSemantic]', {}, True, 0, False, ''), ('inputs:metersPerSceneUnit', 'float', 0, None, 'Scene units to meters scale', {ogn.MetadataKeys.DEFAULT: '0.01'}, True, 0.01, False, ''), ('inputs:renderProductResolution', 'int2', 0, None, 'RenderProduct resolution', {}, True, [0, 0], False, ''), ('inputs:rp', 'uint64', 0, 'renderProduct', 'Pointer to render product for this view', {}, True, 0, False, ''), ('inputs:sdSemBBoxExtentCudaPtr', 'uint64', 0, None, 'Cuda buffer containing the extent of the bounding boxes as a float4=(u_min,v_min,u_max,v_max) for 2D or a float6=(xmin,ymin,zmin,xmax,ymax,zmax) in object space for 3D', {}, True, 0, False, ''), ('inputs:sdSemBBoxInfosCudaPtr', 'uint64', 0, None, 'Cuda buffer containing valid bounding boxes infos', {}, True, 0, False, ''), ('inputs:semanticWorldTransformSDCudaPtr', 'uint64', 0, None, 'cuda float44 buffer pointer of size numSemantics containing the world semantic transform', {}, True, 0, False, ''), ('inputs:viewportNearFar', 'float2', 0, None, 'near and far plane (in scene units) used to clip the 3d bounding boxes.', {ogn.MetadataKeys.DEFAULT: '[1.0, 10000000.0]'}, True, [1.0, 10000000.0], False, ''), ('inputs:viewportResolution', 'int2', 0, None, 'viewport width and height (in pixels) used to clip the 3d bounding boxes.', {ogn.MetadataKeys.DEFAULT: '[65536, 65536]'}, True, [65536, 65536], False, ''), ('outputs:exec', 'execution', 0, None, 'Trigger', {}, True, None, False, ''), ('outputs:sdSemBBox3dCamCornersCudaPtr', 'uint64', 0, None, 'Cuda buffer containing the projection of the 3d bounding boxes on the camera plane represented as a float4=(u,v,z,a) for each bounding box corners', {}, True, None, False, ''), ('outputs:sdSemBBox3dCamExtentCudaPtr', 'uint64', 0, None, 'Cuda buffer containing the 2d extent of the 3d bounding boxes on the camera plane represented as a float6=(u_min,u_max,v_min,v_max,z_min,z_max)', {}, True, None, False, ''), ]) class tokens: SemanticBoundingBox3DInfosSD = "SemanticBoundingBox3DInfosSD" SemanticBoundingBox3DCamCornersSD = "SemanticBoundingBox3DCamCornersSD" SemanticBoundingBox3DCamExtentSD = "SemanticBoundingBox3DCamExtentSD" @classmethod def _populate_role_data(cls): """Populate a role structure with the non-default roles on this node type""" role_data = super()._populate_role_data() role_data.inputs.exec = og.Database.ROLE_EXECUTION role_data.outputs.exec = og.Database.ROLE_EXECUTION return role_data class ValuesForInputs(og.DynamicAttributeAccess): LOCAL_PROPERTY_NAMES = {"cameraModel", "cameraNearFar", "exec", "gpu", "instanceMappingInfoSDPtr", "metersPerSceneUnit", "renderProductResolution", "rp", "sdSemBBoxExtentCudaPtr", "sdSemBBoxInfosCudaPtr", "semanticWorldTransformSDCudaPtr", "viewportNearFar", "viewportResolution", "_setting_locked", "_batchedReadAttributes", "_batchedReadValues"} """Helper class that creates natural hierarchical access to input attributes""" def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface): """Initialize simplified access for the attribute data""" context = node.get_graph().get_default_graph_context() super().__init__(context, node, attributes, dynamic_attributes) self._batchedReadAttributes = [self._attributes.cameraModel, self._attributes.cameraNearFar, self._attributes.exec, self._attributes.gpu, self._attributes.instanceMappingInfoSDPtr, self._attributes.metersPerSceneUnit, self._attributes.renderProductResolution, self._attributes.rp, self._attributes.sdSemBBoxExtentCudaPtr, self._attributes.sdSemBBoxInfosCudaPtr, self._attributes.semanticWorldTransformSDCudaPtr, self._attributes.viewportNearFar, self._attributes.viewportResolution] self._batchedReadValues = [0, [0.0, 0.0], None, 0, 0, 0.01, [0, 0], 0, 0, 0, 0, [1.0, 10000000.0], [65536, 65536]] @property def cameraFisheyeParams(self): data_view = og.AttributeValueHelper(self._attributes.cameraFisheyeParams) return data_view.get() @cameraFisheyeParams.setter def cameraFisheyeParams(self, value): if self._setting_locked: raise og.ReadOnlyError(self._attributes.cameraFisheyeParams) data_view = og.AttributeValueHelper(self._attributes.cameraFisheyeParams) data_view.set(value) self.cameraFisheyeParams_size = data_view.get_array_size() @property def cameraModel(self): return self._batchedReadValues[0] @cameraModel.setter def cameraModel(self, value): self._batchedReadValues[0] = value @property def cameraNearFar(self): return self._batchedReadValues[1] @cameraNearFar.setter def cameraNearFar(self, value): self._batchedReadValues[1] = value @property def exec(self): return self._batchedReadValues[2] @exec.setter def exec(self, value): self._batchedReadValues[2] = value @property def gpu(self): return self._batchedReadValues[3] @gpu.setter def gpu(self, value): self._batchedReadValues[3] = value @property def instanceMappingInfoSDPtr(self): return self._batchedReadValues[4] @instanceMappingInfoSDPtr.setter def instanceMappingInfoSDPtr(self, value): self._batchedReadValues[4] = value @property def metersPerSceneUnit(self): return self._batchedReadValues[5] @metersPerSceneUnit.setter def metersPerSceneUnit(self, value): self._batchedReadValues[5] = value @property def renderProductResolution(self): return self._batchedReadValues[6] @renderProductResolution.setter def renderProductResolution(self, value): self._batchedReadValues[6] = value @property def rp(self): return self._batchedReadValues[7] @rp.setter def rp(self, value): self._batchedReadValues[7] = value @property def sdSemBBoxExtentCudaPtr(self): return self._batchedReadValues[8] @sdSemBBoxExtentCudaPtr.setter def sdSemBBoxExtentCudaPtr(self, value): self._batchedReadValues[8] = value @property def sdSemBBoxInfosCudaPtr(self): return self._batchedReadValues[9] @sdSemBBoxInfosCudaPtr.setter def sdSemBBoxInfosCudaPtr(self, value): self._batchedReadValues[9] = value @property def semanticWorldTransformSDCudaPtr(self): return self._batchedReadValues[10] @semanticWorldTransformSDCudaPtr.setter def semanticWorldTransformSDCudaPtr(self, value): self._batchedReadValues[10] = value @property def viewportNearFar(self): return self._batchedReadValues[11] @viewportNearFar.setter def viewportNearFar(self, value): self._batchedReadValues[11] = value @property def viewportResolution(self): return self._batchedReadValues[12] @viewportResolution.setter def viewportResolution(self, value): self._batchedReadValues[12] = value def __getattr__(self, item: str): if item in self.LOCAL_PROPERTY_NAMES: return object.__getattribute__(self, item) else: return super().__getattr__(item) def __setattr__(self, item: str, new_value): if item in self.LOCAL_PROPERTY_NAMES: object.__setattr__(self, item, new_value) else: super().__setattr__(item, new_value) def _prefetch(self): readAttributes = self._batchedReadAttributes newValues = _og._prefetch_input_attributes_data(readAttributes) if len(readAttributes) == len(newValues): self._batchedReadValues = newValues class ValuesForOutputs(og.DynamicAttributeAccess): LOCAL_PROPERTY_NAMES = {"exec", "sdSemBBox3dCamCornersCudaPtr", "sdSemBBox3dCamExtentCudaPtr", "_batchedWriteValues"} """Helper class that creates natural hierarchical access to output attributes""" def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface): """Initialize simplified access for the attribute data""" context = node.get_graph().get_default_graph_context() super().__init__(context, node, attributes, dynamic_attributes) self._batchedWriteValues = { } @property def exec(self): value = self._batchedWriteValues.get(self._attributes.exec) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.exec) return data_view.get() @exec.setter def exec(self, value): self._batchedWriteValues[self._attributes.exec] = value @property def sdSemBBox3dCamCornersCudaPtr(self): value = self._batchedWriteValues.get(self._attributes.sdSemBBox3dCamCornersCudaPtr) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.sdSemBBox3dCamCornersCudaPtr) return data_view.get() @sdSemBBox3dCamCornersCudaPtr.setter def sdSemBBox3dCamCornersCudaPtr(self, value): self._batchedWriteValues[self._attributes.sdSemBBox3dCamCornersCudaPtr] = value @property def sdSemBBox3dCamExtentCudaPtr(self): value = self._batchedWriteValues.get(self._attributes.sdSemBBox3dCamExtentCudaPtr) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.sdSemBBox3dCamExtentCudaPtr) return data_view.get() @sdSemBBox3dCamExtentCudaPtr.setter def sdSemBBox3dCamExtentCudaPtr(self, value): self._batchedWriteValues[self._attributes.sdSemBBox3dCamExtentCudaPtr] = value def __getattr__(self, item: str): if item in self.LOCAL_PROPERTY_NAMES: return object.__getattribute__(self, item) else: return super().__getattr__(item) def __setattr__(self, item: str, new_value): if item in self.LOCAL_PROPERTY_NAMES: object.__setattr__(self, item, new_value) else: super().__setattr__(item, new_value) def _commit(self): _og._commit_output_attributes_data(self._batchedWriteValues) self._batchedWriteValues = { } class ValuesForState(og.DynamicAttributeAccess): """Helper class that creates natural hierarchical access to state attributes""" def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface): """Initialize simplified access for the attribute data""" context = node.get_graph().get_default_graph_context() super().__init__(context, node, attributes, dynamic_attributes) def __init__(self, node): super().__init__(node) dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_INPUT) self.inputs = OgnSdPostSemantic3dBoundingBoxCameraProjectionDatabase.ValuesForInputs(node, self.attributes.inputs, dynamic_attributes) dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_OUTPUT) self.outputs = OgnSdPostSemantic3dBoundingBoxCameraProjectionDatabase.ValuesForOutputs(node, self.attributes.outputs, dynamic_attributes) dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_STATE) self.state = OgnSdPostSemantic3dBoundingBoxCameraProjectionDatabase.ValuesForState(node, self.attributes.state, dynamic_attributes)
15,079
Python
49.266667
494
0.659195
eliabntt/GRADE-RR/isaac_internals/kit/extscore/omni.syntheticdata/omni/syntheticdata/ogn/OgnSdPostInstanceMappingDatabase.py
"""Support for simplified access to data on nodes of type omni.syntheticdata.SdPostInstanceMapping Synthetic Data node to compute and store scene instances semantic hierarchy information """ import omni.graph.core as og import omni.graph.core._omni_graph_core as _og import omni.graph.tools.ogn as ogn class OgnSdPostInstanceMappingDatabase(og.Database): """Helper class providing simplified access to data on nodes of type omni.syntheticdata.SdPostInstanceMapping Class Members: node: Node being evaluated Attribute Value Properties: Inputs: inputs.exec inputs.gpu inputs.rp inputs.semanticFilterName inputs.swhFrameNumber Outputs: outputs.exec outputs.instanceMapSDCudaPtr outputs.instanceMappingInfoSDPtr outputs.instancePrimTokenSDCudaPtr outputs.semanticLabelTokenSDCudaPtr outputs.semanticLocalTransformSDCudaPtr outputs.semanticMapSDCudaPtr outputs.semanticPrimTokenSDCudaPtr outputs.semanticWorldTransformSDCudaPtr Predefined Tokens: tokens.InstanceMappingInfoSDhost tokens.SemanticMapSD tokens.SemanticMapSDhost tokens.SemanticPrimTokenSD tokens.SemanticPrimTokenSDhost tokens.InstanceMapSD tokens.InstanceMapSDhost tokens.InstancePrimTokenSD tokens.InstancePrimTokenSDhost tokens.SemanticLabelTokenSD tokens.SemanticLabelTokenSDhost tokens.SemanticLocalTransformSD tokens.SemanticLocalTransformSDhost tokens.SemanticWorldTransformSD tokens.SemanticWorldTransformSDhost """ # This is an internal object that provides per-class storage of a per-node data dictionary PER_NODE_DATA = {} # This is an internal object that describes unchanging attributes in a generic way # The values in this list are in no particular order, as a per-attribute tuple # Name, Type, ExtendedTypeIndex, UiName, Description, Metadata, # Is_Required, DefaultValue, Is_Deprecated, DeprecationMsg # You should not need to access any of this data directly, use the defined database interfaces INTERFACE = og.Database._get_interface([ ('inputs:exec', 'execution', 0, None, 'Trigger', {}, True, None, False, ''), ('inputs:gpu', 'uint64', 0, 'gpuFoundations', 'Pointer to shared context containing gpu foundations', {}, True, 0, False, ''), ('inputs:rp', 'uint64', 0, 'renderProduct', 'Pointer to render product for this view', {}, True, 0, False, ''), ('inputs:semanticFilterName', 'token', 0, None, 'Name of the semantic filter to apply to the semanticLabelToken', {ogn.MetadataKeys.DEFAULT: '"default"'}, True, 'default', False, ''), ('inputs:swhFrameNumber', 'uint64', 0, None, 'Fabric frame number', {}, True, 0, False, ''), ('outputs:exec', 'execution', 0, None, 'Trigger', {}, True, None, False, ''), ('outputs:instanceMapSDCudaPtr', 'uint64', 0, None, 'cuda uint16_t buffer pointer of size numInstances containing the instance parent semantic index', {}, True, None, False, ''), ('outputs:instanceMappingInfoSDPtr', 'uint64', 0, None, 'uint buffer pointer containing the following information : [numInstances, minInstanceId, numSemantics, minSemanticId, numProtoSemantic]', {}, True, None, False, ''), ('outputs:instancePrimTokenSDCudaPtr', 'uint64', 0, None, 'cuda uint64_t buffer pointer of size numInstances containing the instance path token', {}, True, None, False, ''), ('outputs:semanticLabelTokenSDCudaPtr', 'uint64', 0, None, 'cuda uint64_t buffer pointer of size numSemantics containing the semantic label token', {}, True, None, False, ''), ('outputs:semanticLocalTransformSDCudaPtr', 'uint64', 0, None, 'cuda float44 buffer pointer of size numSemantics containing the local semantic transform', {}, True, None, False, ''), ('outputs:semanticMapSDCudaPtr', 'uint64', 0, None, 'cuda uint16_t buffer pointer of size numSemantics containing the semantic parent semantic index', {}, True, None, False, ''), ('outputs:semanticPrimTokenSDCudaPtr', 'uint64', 0, None, 'cuda uint32_t buffer pointer of size numSemantics containing the prim part of the semantic path token', {}, True, None, False, ''), ('outputs:semanticWorldTransformSDCudaPtr', 'uint64', 0, None, 'cuda float44 buffer pointer of size numSemantics containing the world semantic transform', {}, True, None, False, ''), ]) class tokens: InstanceMappingInfoSDhost = "InstanceMappingInfoSDhost" SemanticMapSD = "SemanticMapSD" SemanticMapSDhost = "SemanticMapSDhost" SemanticPrimTokenSD = "SemanticPrimTokenSD" SemanticPrimTokenSDhost = "SemanticPrimTokenSDhost" InstanceMapSD = "InstanceMapSD" InstanceMapSDhost = "InstanceMapSDhost" InstancePrimTokenSD = "InstancePrimTokenSD" InstancePrimTokenSDhost = "InstancePrimTokenSDhost" SemanticLabelTokenSD = "SemanticLabelTokenSD" SemanticLabelTokenSDhost = "SemanticLabelTokenSDhost" SemanticLocalTransformSD = "SemanticLocalTransformSD" SemanticLocalTransformSDhost = "SemanticLocalTransformSDhost" SemanticWorldTransformSD = "SemanticWorldTransformSD" SemanticWorldTransformSDhost = "SemanticWorldTransformSDhost" @classmethod def _populate_role_data(cls): """Populate a role structure with the non-default roles on this node type""" role_data = super()._populate_role_data() role_data.inputs.exec = og.Database.ROLE_EXECUTION role_data.outputs.exec = og.Database.ROLE_EXECUTION return role_data class ValuesForInputs(og.DynamicAttributeAccess): LOCAL_PROPERTY_NAMES = {"exec", "gpu", "rp", "semanticFilterName", "swhFrameNumber", "_setting_locked", "_batchedReadAttributes", "_batchedReadValues"} """Helper class that creates natural hierarchical access to input attributes""" def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface): """Initialize simplified access for the attribute data""" context = node.get_graph().get_default_graph_context() super().__init__(context, node, attributes, dynamic_attributes) self._batchedReadAttributes = [self._attributes.exec, self._attributes.gpu, self._attributes.rp, self._attributes.semanticFilterName, self._attributes.swhFrameNumber] self._batchedReadValues = [None, 0, 0, "default", 0] @property def exec(self): return self._batchedReadValues[0] @exec.setter def exec(self, value): self._batchedReadValues[0] = value @property def gpu(self): return self._batchedReadValues[1] @gpu.setter def gpu(self, value): self._batchedReadValues[1] = value @property def rp(self): return self._batchedReadValues[2] @rp.setter def rp(self, value): self._batchedReadValues[2] = value @property def semanticFilterName(self): return self._batchedReadValues[3] @semanticFilterName.setter def semanticFilterName(self, value): self._batchedReadValues[3] = value @property def swhFrameNumber(self): return self._batchedReadValues[4] @swhFrameNumber.setter def swhFrameNumber(self, value): self._batchedReadValues[4] = value def __getattr__(self, item: str): if item in self.LOCAL_PROPERTY_NAMES: return object.__getattribute__(self, item) else: return super().__getattr__(item) def __setattr__(self, item: str, new_value): if item in self.LOCAL_PROPERTY_NAMES: object.__setattr__(self, item, new_value) else: super().__setattr__(item, new_value) def _prefetch(self): readAttributes = self._batchedReadAttributes newValues = _og._prefetch_input_attributes_data(readAttributes) if len(readAttributes) == len(newValues): self._batchedReadValues = newValues class ValuesForOutputs(og.DynamicAttributeAccess): LOCAL_PROPERTY_NAMES = {"exec", "instanceMapSDCudaPtr", "instanceMappingInfoSDPtr", "instancePrimTokenSDCudaPtr", "semanticLabelTokenSDCudaPtr", "semanticLocalTransformSDCudaPtr", "semanticMapSDCudaPtr", "semanticPrimTokenSDCudaPtr", "semanticWorldTransformSDCudaPtr", "_batchedWriteValues"} """Helper class that creates natural hierarchical access to output attributes""" def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface): """Initialize simplified access for the attribute data""" context = node.get_graph().get_default_graph_context() super().__init__(context, node, attributes, dynamic_attributes) self._batchedWriteValues = { } @property def exec(self): value = self._batchedWriteValues.get(self._attributes.exec) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.exec) return data_view.get() @exec.setter def exec(self, value): self._batchedWriteValues[self._attributes.exec] = value @property def instanceMapSDCudaPtr(self): value = self._batchedWriteValues.get(self._attributes.instanceMapSDCudaPtr) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.instanceMapSDCudaPtr) return data_view.get() @instanceMapSDCudaPtr.setter def instanceMapSDCudaPtr(self, value): self._batchedWriteValues[self._attributes.instanceMapSDCudaPtr] = value @property def instanceMappingInfoSDPtr(self): value = self._batchedWriteValues.get(self._attributes.instanceMappingInfoSDPtr) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.instanceMappingInfoSDPtr) return data_view.get() @instanceMappingInfoSDPtr.setter def instanceMappingInfoSDPtr(self, value): self._batchedWriteValues[self._attributes.instanceMappingInfoSDPtr] = value @property def instancePrimTokenSDCudaPtr(self): value = self._batchedWriteValues.get(self._attributes.instancePrimTokenSDCudaPtr) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.instancePrimTokenSDCudaPtr) return data_view.get() @instancePrimTokenSDCudaPtr.setter def instancePrimTokenSDCudaPtr(self, value): self._batchedWriteValues[self._attributes.instancePrimTokenSDCudaPtr] = value @property def semanticLabelTokenSDCudaPtr(self): value = self._batchedWriteValues.get(self._attributes.semanticLabelTokenSDCudaPtr) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.semanticLabelTokenSDCudaPtr) return data_view.get() @semanticLabelTokenSDCudaPtr.setter def semanticLabelTokenSDCudaPtr(self, value): self._batchedWriteValues[self._attributes.semanticLabelTokenSDCudaPtr] = value @property def semanticLocalTransformSDCudaPtr(self): value = self._batchedWriteValues.get(self._attributes.semanticLocalTransformSDCudaPtr) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.semanticLocalTransformSDCudaPtr) return data_view.get() @semanticLocalTransformSDCudaPtr.setter def semanticLocalTransformSDCudaPtr(self, value): self._batchedWriteValues[self._attributes.semanticLocalTransformSDCudaPtr] = value @property def semanticMapSDCudaPtr(self): value = self._batchedWriteValues.get(self._attributes.semanticMapSDCudaPtr) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.semanticMapSDCudaPtr) return data_view.get() @semanticMapSDCudaPtr.setter def semanticMapSDCudaPtr(self, value): self._batchedWriteValues[self._attributes.semanticMapSDCudaPtr] = value @property def semanticPrimTokenSDCudaPtr(self): value = self._batchedWriteValues.get(self._attributes.semanticPrimTokenSDCudaPtr) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.semanticPrimTokenSDCudaPtr) return data_view.get() @semanticPrimTokenSDCudaPtr.setter def semanticPrimTokenSDCudaPtr(self, value): self._batchedWriteValues[self._attributes.semanticPrimTokenSDCudaPtr] = value @property def semanticWorldTransformSDCudaPtr(self): value = self._batchedWriteValues.get(self._attributes.semanticWorldTransformSDCudaPtr) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.semanticWorldTransformSDCudaPtr) return data_view.get() @semanticWorldTransformSDCudaPtr.setter def semanticWorldTransformSDCudaPtr(self, value): self._batchedWriteValues[self._attributes.semanticWorldTransformSDCudaPtr] = value def __getattr__(self, item: str): if item in self.LOCAL_PROPERTY_NAMES: return object.__getattribute__(self, item) else: return super().__getattr__(item) def __setattr__(self, item: str, new_value): if item in self.LOCAL_PROPERTY_NAMES: object.__setattr__(self, item, new_value) else: super().__setattr__(item, new_value) def _commit(self): _og._commit_output_attributes_data(self._batchedWriteValues) self._batchedWriteValues = { } class ValuesForState(og.DynamicAttributeAccess): """Helper class that creates natural hierarchical access to state attributes""" def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface): """Initialize simplified access for the attribute data""" context = node.get_graph().get_default_graph_context() super().__init__(context, node, attributes, dynamic_attributes) def __init__(self, node): super().__init__(node) dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_INPUT) self.inputs = OgnSdPostInstanceMappingDatabase.ValuesForInputs(node, self.attributes.inputs, dynamic_attributes) dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_OUTPUT) self.outputs = OgnSdPostInstanceMappingDatabase.ValuesForOutputs(node, self.attributes.outputs, dynamic_attributes) dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_STATE) self.state = OgnSdPostInstanceMappingDatabase.ValuesForState(node, self.attributes.state, dynamic_attributes)
15,781
Python
48.628931
299
0.662949
eliabntt/GRADE-RR/isaac_internals/kit/extscore/omni.syntheticdata/omni/syntheticdata/ogn/OgnSdLinearArrayToTextureDatabase.py
"""Support for simplified access to data on nodes of type omni.syntheticdata.SdLinearArrayToTexture Synthetic Data node to copy the input buffer array into a texture for visualization """ import omni.graph.core as og import omni.graph.core._omni_graph_core as _og import omni.graph.tools.ogn as ogn import numpy class OgnSdLinearArrayToTextureDatabase(og.Database): """Helper class providing simplified access to data on nodes of type omni.syntheticdata.SdLinearArrayToTexture Class Members: node: Node being evaluated Attribute Value Properties: Inputs: inputs.data inputs.exec inputs.height inputs.sdDisplayCudaMipmappedArray inputs.sdDisplayFormat inputs.sdDisplayHeight inputs.sdDisplayStream inputs.sdDisplayWidth inputs.stream inputs.width Outputs: outputs.cudaPtr outputs.exec outputs.format outputs.handlePtr outputs.height outputs.stream outputs.width """ # This is an internal object that provides per-class storage of a per-node data dictionary PER_NODE_DATA = {} # This is an internal object that describes unchanging attributes in a generic way # The values in this list are in no particular order, as a per-attribute tuple # Name, Type, ExtendedTypeIndex, UiName, Description, Metadata, # Is_Required, DefaultValue, Is_Deprecated, DeprecationMsg # You should not need to access any of this data directly, use the defined database interfaces INTERFACE = og.Database._get_interface([ ('inputs:data', 'float4[]', 0, None, 'Buffer array data', {ogn.MetadataKeys.MEMORY_TYPE: 'cuda'}, True, [], False, ''), ('inputs:exec', 'execution', 0, None, 'Trigger', {}, True, None, False, ''), ('inputs:height', 'uint', 0, None, 'Buffer array height', {}, True, 0, False, ''), ('inputs:sdDisplayCudaMipmappedArray', 'uint64', 0, None, 'Visualization texture CUDA mipmapped array pointer', {}, True, 0, False, ''), ('inputs:sdDisplayFormat', 'uint64', 0, None, 'Visualization texture format', {}, True, 0, False, ''), ('inputs:sdDisplayHeight', 'uint', 0, None, 'Visualization texture Height', {}, True, 0, False, ''), ('inputs:sdDisplayStream', 'uint64', 0, None, 'Visualization texture CUDA stream pointer', {}, True, 0, False, ''), ('inputs:sdDisplayWidth', 'uint', 0, None, 'Visualization texture width', {}, True, 0, False, ''), ('inputs:stream', 'uint64', 0, None, 'Pointer to the CUDA Stream', {}, True, 0, False, ''), ('inputs:width', 'uint', 0, None, 'Buffer array width', {}, True, 0, False, ''), ('outputs:cudaPtr', 'uint64', 0, None, 'Display texture CUDA pointer', {}, True, None, False, ''), ('outputs:exec', 'execution', 0, 'Received', 'Executes when the event is received', {}, True, None, False, ''), ('outputs:format', 'uint64', 0, None, 'Display texture format', {}, True, None, False, ''), ('outputs:handlePtr', 'uint64', 0, None, 'Display texture handle reference', {}, True, None, False, ''), ('outputs:height', 'uint', 0, None, 'Display texture height', {}, True, None, False, ''), ('outputs:stream', 'uint64', 0, None, 'Output texture CUDA stream pointer', {}, True, None, False, ''), ('outputs:width', 'uint', 0, None, 'Display texture width', {}, True, None, False, ''), ]) @classmethod def _populate_role_data(cls): """Populate a role structure with the non-default roles on this node type""" role_data = super()._populate_role_data() role_data.inputs.exec = og.Database.ROLE_EXECUTION role_data.outputs.exec = og.Database.ROLE_EXECUTION return role_data class ValuesForInputs(og.DynamicAttributeAccess): LOCAL_PROPERTY_NAMES = {"exec", "height", "sdDisplayCudaMipmappedArray", "sdDisplayFormat", "sdDisplayHeight", "sdDisplayStream", "sdDisplayWidth", "stream", "width", "_setting_locked", "_batchedReadAttributes", "_batchedReadValues"} """Helper class that creates natural hierarchical access to input attributes""" def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface): """Initialize simplified access for the attribute data""" context = node.get_graph().get_default_graph_context() super().__init__(context, node, attributes, dynamic_attributes) self._batchedReadAttributes = [self._attributes.exec, self._attributes.height, self._attributes.sdDisplayCudaMipmappedArray, self._attributes.sdDisplayFormat, self._attributes.sdDisplayHeight, self._attributes.sdDisplayStream, self._attributes.sdDisplayWidth, self._attributes.stream, self._attributes.width] self._batchedReadValues = [None, 0, 0, 0, 0, 0, 0, 0, 0] @property def data(self): data_view = og.AttributeValueHelper(self._attributes.data) return data_view.get(on_gpu=True) @data.setter def data(self, value): if self._setting_locked: raise og.ReadOnlyError(self._attributes.data) data_view = og.AttributeValueHelper(self._attributes.data) data_view.set(value, on_gpu=True) self.data_size = data_view.get_array_size() @property def exec(self): return self._batchedReadValues[0] @exec.setter def exec(self, value): self._batchedReadValues[0] = value @property def height(self): return self._batchedReadValues[1] @height.setter def height(self, value): self._batchedReadValues[1] = value @property def sdDisplayCudaMipmappedArray(self): return self._batchedReadValues[2] @sdDisplayCudaMipmappedArray.setter def sdDisplayCudaMipmappedArray(self, value): self._batchedReadValues[2] = value @property def sdDisplayFormat(self): return self._batchedReadValues[3] @sdDisplayFormat.setter def sdDisplayFormat(self, value): self._batchedReadValues[3] = value @property def sdDisplayHeight(self): return self._batchedReadValues[4] @sdDisplayHeight.setter def sdDisplayHeight(self, value): self._batchedReadValues[4] = value @property def sdDisplayStream(self): return self._batchedReadValues[5] @sdDisplayStream.setter def sdDisplayStream(self, value): self._batchedReadValues[5] = value @property def sdDisplayWidth(self): return self._batchedReadValues[6] @sdDisplayWidth.setter def sdDisplayWidth(self, value): self._batchedReadValues[6] = value @property def stream(self): return self._batchedReadValues[7] @stream.setter def stream(self, value): self._batchedReadValues[7] = value @property def width(self): return self._batchedReadValues[8] @width.setter def width(self, value): self._batchedReadValues[8] = value def __getattr__(self, item: str): if item in self.LOCAL_PROPERTY_NAMES: return object.__getattribute__(self, item) else: return super().__getattr__(item) def __setattr__(self, item: str, new_value): if item in self.LOCAL_PROPERTY_NAMES: object.__setattr__(self, item, new_value) else: super().__setattr__(item, new_value) def _prefetch(self): readAttributes = self._batchedReadAttributes newValues = _og._prefetch_input_attributes_data(readAttributes) if len(readAttributes) == len(newValues): self._batchedReadValues = newValues class ValuesForOutputs(og.DynamicAttributeAccess): LOCAL_PROPERTY_NAMES = {"cudaPtr", "exec", "format", "handlePtr", "height", "stream", "width", "_batchedWriteValues"} """Helper class that creates natural hierarchical access to output attributes""" def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface): """Initialize simplified access for the attribute data""" context = node.get_graph().get_default_graph_context() super().__init__(context, node, attributes, dynamic_attributes) self._batchedWriteValues = { } @property def cudaPtr(self): value = self._batchedWriteValues.get(self._attributes.cudaPtr) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.cudaPtr) return data_view.get() @cudaPtr.setter def cudaPtr(self, value): self._batchedWriteValues[self._attributes.cudaPtr] = value @property def exec(self): value = self._batchedWriteValues.get(self._attributes.exec) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.exec) return data_view.get() @exec.setter def exec(self, value): self._batchedWriteValues[self._attributes.exec] = value @property def format(self): value = self._batchedWriteValues.get(self._attributes.format) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.format) return data_view.get() @format.setter def format(self, value): self._batchedWriteValues[self._attributes.format] = value @property def handlePtr(self): value = self._batchedWriteValues.get(self._attributes.handlePtr) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.handlePtr) return data_view.get() @handlePtr.setter def handlePtr(self, value): self._batchedWriteValues[self._attributes.handlePtr] = value @property def height(self): value = self._batchedWriteValues.get(self._attributes.height) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.height) return data_view.get() @height.setter def height(self, value): self._batchedWriteValues[self._attributes.height] = value @property def stream(self): value = self._batchedWriteValues.get(self._attributes.stream) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.stream) return data_view.get() @stream.setter def stream(self, value): self._batchedWriteValues[self._attributes.stream] = value @property def width(self): value = self._batchedWriteValues.get(self._attributes.width) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.width) return data_view.get() @width.setter def width(self, value): self._batchedWriteValues[self._attributes.width] = value def __getattr__(self, item: str): if item in self.LOCAL_PROPERTY_NAMES: return object.__getattribute__(self, item) else: return super().__getattr__(item) def __setattr__(self, item: str, new_value): if item in self.LOCAL_PROPERTY_NAMES: object.__setattr__(self, item, new_value) else: super().__setattr__(item, new_value) def _commit(self): _og._commit_output_attributes_data(self._batchedWriteValues) self._batchedWriteValues = { } class ValuesForState(og.DynamicAttributeAccess): """Helper class that creates natural hierarchical access to state attributes""" def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface): """Initialize simplified access for the attribute data""" context = node.get_graph().get_default_graph_context() super().__init__(context, node, attributes, dynamic_attributes) def __init__(self, node): super().__init__(node) dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_INPUT) self.inputs = OgnSdLinearArrayToTextureDatabase.ValuesForInputs(node, self.attributes.inputs, dynamic_attributes) dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_OUTPUT) self.outputs = OgnSdLinearArrayToTextureDatabase.ValuesForOutputs(node, self.attributes.outputs, dynamic_attributes) dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_STATE) self.state = OgnSdLinearArrayToTextureDatabase.ValuesForState(node, self.attributes.state, dynamic_attributes)
13,538
Python
42.533762
320
0.613385
eliabntt/GRADE-RR/isaac_internals/kit/extscore/omni.syntheticdata/omni/syntheticdata/ogn/OgnSdPostSemanticBoundingBoxDatabase.py
"""Support for simplified access to data on nodes of type omni.syntheticdata.SdPostSemanticBoundingBox Synthetic Data node to compute the bounding boxes of the scene semantic entities. """ import omni.graph.core as og import omni.graph.core._omni_graph_core as _og import omni.graph.tools.ogn as ogn import numpy class OgnSdPostSemanticBoundingBoxDatabase(og.Database): """Helper class providing simplified access to data on nodes of type omni.syntheticdata.SdPostSemanticBoundingBox Class Members: node: Node being evaluated Attribute Value Properties: Inputs: inputs.exec inputs.gpu inputs.instanceMapSDCudaPtr inputs.instanceMappingInfoSDPtr inputs.renderProductResolution inputs.renderVar inputs.rp inputs.semanticLocalTransformSDCudaPtr inputs.semanticMapSDCudaPtr Outputs: outputs.exec outputs.sdSemBBoxExtentCudaPtr outputs.sdSemBBoxInfosCudaPtr Predefined Tokens: tokens.BoundingBox2DLooseSD tokens.BoundingBox2DTightSD tokens.SemanticBoundingBox2DExtentLooseSD tokens.SemanticBoundingBox2DInfosLooseSD tokens.SemanticBoundingBox2DExtentTightSD tokens.SemanticBoundingBox2DInfosTightSD tokens.BoundingBox3DSD tokens.SemanticBoundingBox3DExtentSD tokens.SemanticBoundingBox3DInfosSD """ # This is an internal object that provides per-class storage of a per-node data dictionary PER_NODE_DATA = {} # This is an internal object that describes unchanging attributes in a generic way # The values in this list are in no particular order, as a per-attribute tuple # Name, Type, ExtendedTypeIndex, UiName, Description, Metadata, # Is_Required, DefaultValue, Is_Deprecated, DeprecationMsg # You should not need to access any of this data directly, use the defined database interfaces INTERFACE = og.Database._get_interface([ ('inputs:exec', 'execution', 0, None, 'Trigger', {}, True, None, False, ''), ('inputs:gpu', 'uint64', 0, 'gpuFoundations', 'Pointer to shared context containing gpu foundations', {}, True, 0, False, ''), ('inputs:instanceMapSDCudaPtr', 'uint64', 0, None, 'cuda uint16_t buffer pointer of size numInstances containing the instance parent semantic index', {}, True, 0, False, ''), ('inputs:instanceMappingInfoSDPtr', 'uint64', 0, None, 'uint buffer pointer containing the following information : [numInstances, minInstanceId, numSemantics, minSemanticId, numProtoSemantic]', {}, True, 0, False, ''), ('inputs:renderProductResolution', 'int2', 0, None, 'RenderProduct resolution', {}, True, [0, 0], False, ''), ('inputs:renderVar', 'token', 0, None, 'Name of the BoundingBox RenderVar to process', {}, True, '', False, ''), ('inputs:rp', 'uint64', 0, 'renderProduct', 'Pointer to render product for this view', {}, True, 0, False, ''), ('inputs:semanticLocalTransformSDCudaPtr', 'uint64', 0, None, 'cuda float44 buffer pointer of size numSemantics containing the local semantic transform', {}, True, 0, False, ''), ('inputs:semanticMapSDCudaPtr', 'uint64', 0, None, 'cuda uint16_t buffer pointer of size numSemantics containing the semantic parent semantic index', {}, True, 0, False, ''), ('outputs:exec', 'execution', 0, None, 'Trigger', {}, True, None, False, ''), ('outputs:sdSemBBoxExtentCudaPtr', 'uint64', 0, None, 'Cuda buffer containing the extent of the bounding boxes as a float4=(u_min,v_min,u_max,v_max) for 2D or a float6=(xmin,ymin,zmin,xmax,ymax,zmax) in object space for 3D', {}, True, None, False, ''), ('outputs:sdSemBBoxInfosCudaPtr', 'uint64', 0, None, 'Cuda buffer containing valid bounding boxes infos', {}, True, None, False, ''), ]) class tokens: BoundingBox2DLooseSD = "BoundingBox2DLooseSD" BoundingBox2DTightSD = "BoundingBox2DTightSD" SemanticBoundingBox2DExtentLooseSD = "SemanticBoundingBox2DExtentLooseSD" SemanticBoundingBox2DInfosLooseSD = "SemanticBoundingBox2DInfosLooseSD" SemanticBoundingBox2DExtentTightSD = "SemanticBoundingBox2DExtentTightSD" SemanticBoundingBox2DInfosTightSD = "SemanticBoundingBox2DInfosTightSD" BoundingBox3DSD = "BoundingBox3DSD" SemanticBoundingBox3DExtentSD = "SemanticBoundingBox3DExtentSD" SemanticBoundingBox3DInfosSD = "SemanticBoundingBox3DInfosSD" @classmethod def _populate_role_data(cls): """Populate a role structure with the non-default roles on this node type""" role_data = super()._populate_role_data() role_data.inputs.exec = og.Database.ROLE_EXECUTION role_data.outputs.exec = og.Database.ROLE_EXECUTION return role_data class ValuesForInputs(og.DynamicAttributeAccess): LOCAL_PROPERTY_NAMES = {"exec", "gpu", "instanceMapSDCudaPtr", "instanceMappingInfoSDPtr", "renderProductResolution", "renderVar", "rp", "semanticLocalTransformSDCudaPtr", "semanticMapSDCudaPtr", "_setting_locked", "_batchedReadAttributes", "_batchedReadValues"} """Helper class that creates natural hierarchical access to input attributes""" def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface): """Initialize simplified access for the attribute data""" context = node.get_graph().get_default_graph_context() super().__init__(context, node, attributes, dynamic_attributes) self._batchedReadAttributes = [self._attributes.exec, self._attributes.gpu, self._attributes.instanceMapSDCudaPtr, self._attributes.instanceMappingInfoSDPtr, self._attributes.renderProductResolution, self._attributes.renderVar, self._attributes.rp, self._attributes.semanticLocalTransformSDCudaPtr, self._attributes.semanticMapSDCudaPtr] self._batchedReadValues = [None, 0, 0, 0, [0, 0], "", 0, 0, 0] @property def exec(self): return self._batchedReadValues[0] @exec.setter def exec(self, value): self._batchedReadValues[0] = value @property def gpu(self): return self._batchedReadValues[1] @gpu.setter def gpu(self, value): self._batchedReadValues[1] = value @property def instanceMapSDCudaPtr(self): return self._batchedReadValues[2] @instanceMapSDCudaPtr.setter def instanceMapSDCudaPtr(self, value): self._batchedReadValues[2] = value @property def instanceMappingInfoSDPtr(self): return self._batchedReadValues[3] @instanceMappingInfoSDPtr.setter def instanceMappingInfoSDPtr(self, value): self._batchedReadValues[3] = value @property def renderProductResolution(self): return self._batchedReadValues[4] @renderProductResolution.setter def renderProductResolution(self, value): self._batchedReadValues[4] = value @property def renderVar(self): return self._batchedReadValues[5] @renderVar.setter def renderVar(self, value): self._batchedReadValues[5] = value @property def rp(self): return self._batchedReadValues[6] @rp.setter def rp(self, value): self._batchedReadValues[6] = value @property def semanticLocalTransformSDCudaPtr(self): return self._batchedReadValues[7] @semanticLocalTransformSDCudaPtr.setter def semanticLocalTransformSDCudaPtr(self, value): self._batchedReadValues[7] = value @property def semanticMapSDCudaPtr(self): return self._batchedReadValues[8] @semanticMapSDCudaPtr.setter def semanticMapSDCudaPtr(self, value): self._batchedReadValues[8] = value def __getattr__(self, item: str): if item in self.LOCAL_PROPERTY_NAMES: return object.__getattribute__(self, item) else: return super().__getattr__(item) def __setattr__(self, item: str, new_value): if item in self.LOCAL_PROPERTY_NAMES: object.__setattr__(self, item, new_value) else: super().__setattr__(item, new_value) def _prefetch(self): readAttributes = self._batchedReadAttributes newValues = _og._prefetch_input_attributes_data(readAttributes) if len(readAttributes) == len(newValues): self._batchedReadValues = newValues class ValuesForOutputs(og.DynamicAttributeAccess): LOCAL_PROPERTY_NAMES = {"exec", "sdSemBBoxExtentCudaPtr", "sdSemBBoxInfosCudaPtr", "_batchedWriteValues"} """Helper class that creates natural hierarchical access to output attributes""" def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface): """Initialize simplified access for the attribute data""" context = node.get_graph().get_default_graph_context() super().__init__(context, node, attributes, dynamic_attributes) self._batchedWriteValues = { } @property def exec(self): value = self._batchedWriteValues.get(self._attributes.exec) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.exec) return data_view.get() @exec.setter def exec(self, value): self._batchedWriteValues[self._attributes.exec] = value @property def sdSemBBoxExtentCudaPtr(self): value = self._batchedWriteValues.get(self._attributes.sdSemBBoxExtentCudaPtr) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.sdSemBBoxExtentCudaPtr) return data_view.get() @sdSemBBoxExtentCudaPtr.setter def sdSemBBoxExtentCudaPtr(self, value): self._batchedWriteValues[self._attributes.sdSemBBoxExtentCudaPtr] = value @property def sdSemBBoxInfosCudaPtr(self): value = self._batchedWriteValues.get(self._attributes.sdSemBBoxInfosCudaPtr) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.sdSemBBoxInfosCudaPtr) return data_view.get() @sdSemBBoxInfosCudaPtr.setter def sdSemBBoxInfosCudaPtr(self, value): self._batchedWriteValues[self._attributes.sdSemBBoxInfosCudaPtr] = value def __getattr__(self, item: str): if item in self.LOCAL_PROPERTY_NAMES: return object.__getattribute__(self, item) else: return super().__getattr__(item) def __setattr__(self, item: str, new_value): if item in self.LOCAL_PROPERTY_NAMES: object.__setattr__(self, item, new_value) else: super().__setattr__(item, new_value) def _commit(self): _og._commit_output_attributes_data(self._batchedWriteValues) self._batchedWriteValues = { } class ValuesForState(og.DynamicAttributeAccess): """Helper class that creates natural hierarchical access to state attributes""" def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface): """Initialize simplified access for the attribute data""" context = node.get_graph().get_default_graph_context() super().__init__(context, node, attributes, dynamic_attributes) def __init__(self, node): super().__init__(node) dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_INPUT) self.inputs = OgnSdPostSemanticBoundingBoxDatabase.ValuesForInputs(node, self.attributes.inputs, dynamic_attributes) dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_OUTPUT) self.outputs = OgnSdPostSemanticBoundingBoxDatabase.ValuesForOutputs(node, self.attributes.outputs, dynamic_attributes) dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_STATE) self.state = OgnSdPostSemanticBoundingBoxDatabase.ValuesForState(node, self.attributes.state, dynamic_attributes)
12,640
Python
48.18677
349
0.662104
eliabntt/GRADE-RR/isaac_internals/kit/extscore/omni.syntheticdata/omni/syntheticdata/ogn/OgnSdInstanceMappingPtrDatabase.py
"""Support for simplified access to data on nodes of type omni.syntheticdata.SdInstanceMappingPtr Synthetic Data node to expose the scene instances semantic hierarchy information """ import omni.graph.core as og import omni.graph.core._omni_graph_core as _og import omni.graph.tools.ogn as ogn import numpy class OgnSdInstanceMappingPtrDatabase(og.Database): """Helper class providing simplified access to data on nodes of type omni.syntheticdata.SdInstanceMappingPtr Class Members: node: Node being evaluated Attribute Value Properties: Inputs: inputs.cudaPtr inputs.exec inputs.renderResults inputs.semanticFilerTokens inputs.swhFrameNumber Outputs: outputs.exec outputs.instanceMapPtr outputs.instancePrimPathPtr outputs.minInstanceIndex outputs.minSemanticIndex outputs.numInstances outputs.numSemantics outputs.semanticLabelTokenPtrs outputs.semanticLocalTransformPtr outputs.semanticMapPtr outputs.semanticPrimPathPtr outputs.semanticWorldTransformPtr outputs.swhFrameNumber Predefined Tokens: tokens.InstanceMappingInfoSDhost tokens.InstancePrimTokenSDhost tokens.InstancePrimTokenSD tokens.SemanticPrimTokenSDhost tokens.SemanticPrimTokenSD tokens.InstanceMapSDhost tokens.InstanceMapSD tokens.SemanticMapSDhost tokens.SemanticMapSD tokens.SemanticWorldTransformSDhost tokens.SemanticWorldTransformSD tokens.SemanticLocalTransformSDhost tokens.SemanticLocalTransformSD tokens.SemanticLabelTokenSDhost tokens.SemanticLabelTokenSD """ # This is an internal object that provides per-class storage of a per-node data dictionary PER_NODE_DATA = {} # This is an internal object that describes unchanging attributes in a generic way # The values in this list are in no particular order, as a per-attribute tuple # Name, Type, ExtendedTypeIndex, UiName, Description, Metadata, # Is_Required, DefaultValue, Is_Deprecated, DeprecationMsg # You should not need to access any of this data directly, use the defined database interfaces INTERFACE = og.Database._get_interface([ ('inputs:cudaPtr', 'bool', 0, None, 'If true, return cuda device pointer instead of host pointer', {ogn.MetadataKeys.DEFAULT: 'false'}, True, False, False, ''), ('inputs:exec', 'execution', 0, None, 'Trigger', {}, True, None, False, ''), ('inputs:renderResults', 'uint64', 0, None, 'Render results pointer', {}, True, 0, False, ''), ('inputs:semanticFilerTokens', 'token[]', 0, None, 'Tokens identifying the semantic filters applied to the output semantic labels. Each token should correspond to an activated SdSemanticFilter node', {ogn.MetadataKeys.DEFAULT: '[]'}, True, [], False, ''), ('inputs:swhFrameNumber', 'uint64', 0, None, 'Fabric frame number', {}, True, 0, False, ''), ('outputs:exec', 'execution', 0, 'Received', 'Executes when the event is received', {}, True, None, False, ''), ('outputs:instanceMapPtr', 'uint64', 0, None, 'Array pointer of numInstances uint16_t containing the semantic index of the instance prim first semantic prim parent', {}, True, None, False, ''), ('outputs:instancePrimPathPtr', 'uint64', 0, None, 'Array pointer of numInstances uint64_t containing the prim path tokens for every instance prims', {}, True, None, False, ''), ('outputs:minInstanceIndex', 'uint', 0, None, 'Instance index of the first instance prim in the instance arrays', {}, True, None, False, ''), ('outputs:minSemanticIndex', 'uint', 0, None, 'Semantic index of the first semantic prim in the semantic arrays', {}, True, None, False, ''), ('outputs:numInstances', 'uint', 0, None, 'Number of instances prim in the instance arrays', {}, True, None, False, ''), ('outputs:numSemantics', 'uint', 0, None, 'Number of semantic prim in the semantic arrays', {}, True, None, False, ''), ('outputs:semanticLabelTokenPtrs', 'uint64[]', 0, None, 'Array containing for every input semantic filters the corresponding array pointer of numSemantics uint64_t representing the semantic label of the semantic prim', {}, True, None, False, ''), ('outputs:semanticLocalTransformPtr', 'uint64', 0, None, 'Array pointer of numSemantics 4x4 float matrices containing the transform from world to object space for every semantic prims', {}, True, None, False, ''), ('outputs:semanticMapPtr', 'uint64', 0, None, 'Array pointer of numSemantics uint16_t containing the semantic index of the semantic prim first semantic prim parent', {}, True, None, False, ''), ('outputs:semanticPrimPathPtr', 'uint64', 0, None, 'Array pointer of numSemantics uint32_t containing the prim part of the prim path tokens for every semantic prims', {}, True, None, False, ''), ('outputs:semanticWorldTransformPtr', 'uint64', 0, None, 'Array pointer of numSemantics 4x4 float matrices containing the transform from local to world space for every semantic entity', {}, True, None, False, ''), ('outputs:swhFrameNumber', 'uint64', 0, None, 'Fabric frame number', {}, True, None, False, ''), ]) class tokens: InstanceMappingInfoSDhost = "InstanceMappingInfoSDhost" InstancePrimTokenSDhost = "InstancePrimTokenSDhost" InstancePrimTokenSD = "InstancePrimTokenSD" SemanticPrimTokenSDhost = "SemanticPrimTokenSDhost" SemanticPrimTokenSD = "SemanticPrimTokenSD" InstanceMapSDhost = "InstanceMapSDhost" InstanceMapSD = "InstanceMapSD" SemanticMapSDhost = "SemanticMapSDhost" SemanticMapSD = "SemanticMapSD" SemanticWorldTransformSDhost = "SemanticWorldTransformSDhost" SemanticWorldTransformSD = "SemanticWorldTransformSD" SemanticLocalTransformSDhost = "SemanticLocalTransformSDhost" SemanticLocalTransformSD = "SemanticLocalTransformSD" SemanticLabelTokenSDhost = "SemanticLabelTokenSDhost" SemanticLabelTokenSD = "SemanticLabelTokenSD" @classmethod def _populate_role_data(cls): """Populate a role structure with the non-default roles on this node type""" role_data = super()._populate_role_data() role_data.inputs.exec = og.Database.ROLE_EXECUTION role_data.outputs.exec = og.Database.ROLE_EXECUTION return role_data class ValuesForInputs(og.DynamicAttributeAccess): LOCAL_PROPERTY_NAMES = {"cudaPtr", "exec", "renderResults", "swhFrameNumber", "_setting_locked", "_batchedReadAttributes", "_batchedReadValues"} """Helper class that creates natural hierarchical access to input attributes""" def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface): """Initialize simplified access for the attribute data""" context = node.get_graph().get_default_graph_context() super().__init__(context, node, attributes, dynamic_attributes) self._batchedReadAttributes = [self._attributes.cudaPtr, self._attributes.exec, self._attributes.renderResults, self._attributes.swhFrameNumber] self._batchedReadValues = [False, None, 0, 0] @property def semanticFilerTokens(self): data_view = og.AttributeValueHelper(self._attributes.semanticFilerTokens) return data_view.get() @semanticFilerTokens.setter def semanticFilerTokens(self, value): if self._setting_locked: raise og.ReadOnlyError(self._attributes.semanticFilerTokens) data_view = og.AttributeValueHelper(self._attributes.semanticFilerTokens) data_view.set(value) self.semanticFilerTokens_size = data_view.get_array_size() @property def cudaPtr(self): return self._batchedReadValues[0] @cudaPtr.setter def cudaPtr(self, value): self._batchedReadValues[0] = value @property def exec(self): return self._batchedReadValues[1] @exec.setter def exec(self, value): self._batchedReadValues[1] = value @property def renderResults(self): return self._batchedReadValues[2] @renderResults.setter def renderResults(self, value): self._batchedReadValues[2] = value @property def swhFrameNumber(self): return self._batchedReadValues[3] @swhFrameNumber.setter def swhFrameNumber(self, value): self._batchedReadValues[3] = value def __getattr__(self, item: str): if item in self.LOCAL_PROPERTY_NAMES: return object.__getattribute__(self, item) else: return super().__getattr__(item) def __setattr__(self, item: str, new_value): if item in self.LOCAL_PROPERTY_NAMES: object.__setattr__(self, item, new_value) else: super().__setattr__(item, new_value) def _prefetch(self): readAttributes = self._batchedReadAttributes newValues = _og._prefetch_input_attributes_data(readAttributes) if len(readAttributes) == len(newValues): self._batchedReadValues = newValues class ValuesForOutputs(og.DynamicAttributeAccess): LOCAL_PROPERTY_NAMES = {"exec", "instanceMapPtr", "instancePrimPathPtr", "minInstanceIndex", "minSemanticIndex", "numInstances", "numSemantics", "semanticLocalTransformPtr", "semanticMapPtr", "semanticPrimPathPtr", "semanticWorldTransformPtr", "swhFrameNumber", "_batchedWriteValues"} """Helper class that creates natural hierarchical access to output attributes""" def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface): """Initialize simplified access for the attribute data""" context = node.get_graph().get_default_graph_context() super().__init__(context, node, attributes, dynamic_attributes) self.semanticLabelTokenPtrs_size = None self._batchedWriteValues = { } @property def semanticLabelTokenPtrs(self): data_view = og.AttributeValueHelper(self._attributes.semanticLabelTokenPtrs) return data_view.get(reserved_element_count=self.semanticLabelTokenPtrs_size) @semanticLabelTokenPtrs.setter def semanticLabelTokenPtrs(self, value): data_view = og.AttributeValueHelper(self._attributes.semanticLabelTokenPtrs) data_view.set(value) self.semanticLabelTokenPtrs_size = data_view.get_array_size() @property def exec(self): value = self._batchedWriteValues.get(self._attributes.exec) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.exec) return data_view.get() @exec.setter def exec(self, value): self._batchedWriteValues[self._attributes.exec] = value @property def instanceMapPtr(self): value = self._batchedWriteValues.get(self._attributes.instanceMapPtr) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.instanceMapPtr) return data_view.get() @instanceMapPtr.setter def instanceMapPtr(self, value): self._batchedWriteValues[self._attributes.instanceMapPtr] = value @property def instancePrimPathPtr(self): value = self._batchedWriteValues.get(self._attributes.instancePrimPathPtr) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.instancePrimPathPtr) return data_view.get() @instancePrimPathPtr.setter def instancePrimPathPtr(self, value): self._batchedWriteValues[self._attributes.instancePrimPathPtr] = value @property def minInstanceIndex(self): value = self._batchedWriteValues.get(self._attributes.minInstanceIndex) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.minInstanceIndex) return data_view.get() @minInstanceIndex.setter def minInstanceIndex(self, value): self._batchedWriteValues[self._attributes.minInstanceIndex] = value @property def minSemanticIndex(self): value = self._batchedWriteValues.get(self._attributes.minSemanticIndex) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.minSemanticIndex) return data_view.get() @minSemanticIndex.setter def minSemanticIndex(self, value): self._batchedWriteValues[self._attributes.minSemanticIndex] = value @property def numInstances(self): value = self._batchedWriteValues.get(self._attributes.numInstances) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.numInstances) return data_view.get() @numInstances.setter def numInstances(self, value): self._batchedWriteValues[self._attributes.numInstances] = value @property def numSemantics(self): value = self._batchedWriteValues.get(self._attributes.numSemantics) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.numSemantics) return data_view.get() @numSemantics.setter def numSemantics(self, value): self._batchedWriteValues[self._attributes.numSemantics] = value @property def semanticLocalTransformPtr(self): value = self._batchedWriteValues.get(self._attributes.semanticLocalTransformPtr) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.semanticLocalTransformPtr) return data_view.get() @semanticLocalTransformPtr.setter def semanticLocalTransformPtr(self, value): self._batchedWriteValues[self._attributes.semanticLocalTransformPtr] = value @property def semanticMapPtr(self): value = self._batchedWriteValues.get(self._attributes.semanticMapPtr) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.semanticMapPtr) return data_view.get() @semanticMapPtr.setter def semanticMapPtr(self, value): self._batchedWriteValues[self._attributes.semanticMapPtr] = value @property def semanticPrimPathPtr(self): value = self._batchedWriteValues.get(self._attributes.semanticPrimPathPtr) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.semanticPrimPathPtr) return data_view.get() @semanticPrimPathPtr.setter def semanticPrimPathPtr(self, value): self._batchedWriteValues[self._attributes.semanticPrimPathPtr] = value @property def semanticWorldTransformPtr(self): value = self._batchedWriteValues.get(self._attributes.semanticWorldTransformPtr) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.semanticWorldTransformPtr) return data_view.get() @semanticWorldTransformPtr.setter def semanticWorldTransformPtr(self, value): self._batchedWriteValues[self._attributes.semanticWorldTransformPtr] = value @property def swhFrameNumber(self): value = self._batchedWriteValues.get(self._attributes.swhFrameNumber) if value: return value else: data_view = og.AttributeValueHelper(self._attributes.swhFrameNumber) return data_view.get() @swhFrameNumber.setter def swhFrameNumber(self, value): self._batchedWriteValues[self._attributes.swhFrameNumber] = value def __getattr__(self, item: str): if item in self.LOCAL_PROPERTY_NAMES: return object.__getattribute__(self, item) else: return super().__getattr__(item) def __setattr__(self, item: str, new_value): if item in self.LOCAL_PROPERTY_NAMES: object.__setattr__(self, item, new_value) else: super().__setattr__(item, new_value) def _commit(self): _og._commit_output_attributes_data(self._batchedWriteValues) self._batchedWriteValues = { } class ValuesForState(og.DynamicAttributeAccess): """Helper class that creates natural hierarchical access to state attributes""" def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface): """Initialize simplified access for the attribute data""" context = node.get_graph().get_default_graph_context() super().__init__(context, node, attributes, dynamic_attributes) def __init__(self, node): super().__init__(node) dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_INPUT) self.inputs = OgnSdInstanceMappingPtrDatabase.ValuesForInputs(node, self.attributes.inputs, dynamic_attributes) dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_OUTPUT) self.outputs = OgnSdInstanceMappingPtrDatabase.ValuesForOutputs(node, self.attributes.outputs, dynamic_attributes) dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_STATE) self.state = OgnSdInstanceMappingPtrDatabase.ValuesForState(node, self.attributes.state, dynamic_attributes)
18,594
Python
47.550914
292
0.651178
eliabntt/GRADE-RR/isaac_internals/kit/extscore/omni.syntheticdata/omni/syntheticdata/ogn/python/nodes/OgnSdTestPrintRawArray.py
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # import omni.graph.core as og import numpy as np import random class OgnSdTestPrintRawArray: @staticmethod def compute(db) -> bool: if db.state.initialSWHFrameNumber < 0: db.state.initialSWHFrameNumber = db.inputs.swhFrameNumber frameNumber = db.inputs.swhFrameNumber - db.state.initialSWHFrameNumber rd_seed = db.inputs.randomSeed + ((frameNumber * 17) % 491) random.seed(rd_seed) db.outputs.swhFrameNumber = db.inputs.swhFrameNumber db.outputs.exec = og.ExecutionAttributeState.ENABLED elemenType = np.uint8 if db.inputs.elementType == db.tokens.uint16: elemenType = np.uint16 elif db.inputs.elementType == db.tokens.int16: elemenType = np.int16 elif db.inputs.elementType == db.tokens.uint32: elemenType = np.uint32 elif db.inputs.elementType == db.tokens.int32: elemenType = np.int32 elif db.inputs.elementType == db.tokens.float32: elemenType = np.float32 elif db.inputs.elementType == db.tokens.token: elemenType = np.uint64 elementCount = db.inputs.elementCount data = db.inputs.data data = data.view(elemenType) if db.inputs.mode == db.tokens.printFormatted: is2DArray = db.inputs.bufferSize == 0 if not is2DArray: data = data.reshape(data.shape[0] // elementCount, elementCount) if elementCount > 1 else data else: data = ( data.reshape(db.inputs.height, db.inputs.width, elementCount) if elementCount > 1 else data.reshape(db.inputs.height, db.inputs.width) ) print("OgnSdPrintRawArray : ", db.inputs.swhFrameNumber) print(data) elif (frameNumber in db.inputs.referenceSWHFrameNumbers) and (data.shape[0]>=db.inputs.referenceNumUniqueRandomValues): if (db.inputs.mode == db.tokens.printReferences): ref_values = data.astype(np.float32) random.shuffle(ref_values) ref_values = ref_values[:db.inputs.referenceNumUniqueRandomValues] print(ref_values) else: ref_values = data.astype(np.float32) random.shuffle(ref_values) ref_values = ref_values[:db.inputs.referenceNumUniqueRandomValues] frame_offset = np.where(db.inputs.referenceSWHFrameNumbers == frameNumber)[0][0] reference_offset = frame_offset * db.inputs.referenceNumUniqueRandomValues err = np.square(ref_values - db.inputs.referenceValues[reference_offset:reference_offset+db.inputs.referenceNumUniqueRandomValues]).max() if err >= db.inputs.referenceTolerance: print(f"OgnSdTestPrintRawArray [Error]") return True
3,401
Python
41.524999
153
0.633931
eliabntt/GRADE-RR/isaac_internals/kit/extscore/omni.syntheticdata/omni/syntheticdata/ogn/python/nodes/OgnSdTestStageManipulationScenarii.py
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # import omni.usd import omni.graph.core as og from pxr import Gf, Semantics, UsdGeom import numpy as np class OgnSdTestStageManipulationScenarii: _prim_names = ["Sphere", "Capsule", "Plane", "Torus", "Cube", "Cone"] _sem_types = ["type", "class", "genre"] _sem_labels = ["sphere", "capsule", "plane", "torus", "cube", "ball", "cone"] @staticmethod def add_semantics(prim, semantic_label, semantic_type="class"): sem = Semantics.SemanticsAPI.Apply(prim, "Semantics") sem.CreateSemanticTypeAttr() sem.CreateSemanticDataAttr() sem.GetSemanticTypeAttr().Set(semantic_type) sem.GetSemanticDataAttr().Set(semantic_label) @staticmethod def get_random_transform(rng): tf = np.eye(4) tf[:3, :3] = Gf.Matrix3d(Gf.Rotation(rng.rand(3).tolist(), rng.rand(3).tolist())) tf[3, :3] = rng.rand(3).tolist() return Gf.Matrix4d(tf) @staticmethod def compute(db) -> bool: usd_context = omni.usd.get_context() stage = usd_context.get_stage() if not stage: return False rng = np.random.default_rng(db.inputs.randomSeed + ((db.state.frameNumber * 23) % 1579)) world_prim = stage.GetPrimAtPath(db.inputs.worldPrimPath) if not world_prim: world_prim = stage.DefinePrim(db.inputs.worldPrimPath) if world_prim: world_xform_prim = UsdGeom.Xformable(world_prim) if world_prim else None if world_xform_prim: world_xform_prim.AddTransformOp().Set(OgnSdTestStageManipulationScenarii.get_random_transform(rng)) if not world_prim: return False db.state.frameNumber += 1 num_manipulations = rng.randint(0, 3) for manip_index in range(num_manipulations): prims = world_prim.GetChildren() prims.append(world_prim) prim = rng.choice(prims) if not prim : continue manipulation = rng.randint(0, 38) if (manipulation < 11): """create a new children prim""" prim_name = rng.choice(OgnSdTestStageManipulationScenarii._prim_names) prim_path = prim.GetPath().pathString + "/" + prim_name + "_" + str(db.state.frameNumber) + "_" + str(manip_index) new_prim = stage.DefinePrim(prim_path, prim_name) new_prim_color_attr = new_prim.GetAttribute("primvars:displayColor") if new_prim else None if new_prim_color_attr: new_prim_color_attr.Set([rng.rand(3).tolist()]) xform_prim = UsdGeom.Xformable(new_prim) if new_prim else None if xform_prim: xform_prim.AddScaleOp().Set((175.0*rng.random(), 175.0*rng.random(), 175.0*rng.random())) xform_prim.AddTransformOp().Set(OgnSdTestStageManipulationScenarii.get_random_transform(rng)) elif (manipulation >= 11) and (manipulation <12): """remove the prim""" stage.RemovePrim(prim.GetPath()) elif (manipulation >=12) and (manipulation <23): """move the prim""" xform_prim = UsdGeom.Xformable(prim) if xform_prim: xform_prim.ClearXformOpOrder() xform_prim.AddTransformOp().Set(OgnSdTestStageManipulationScenarii.get_random_transform(rng)) elif (manipulation >=23) and (manipulation < 31): """add semantic to the prim""" OgnSdTestStageManipulationScenarii.add_semantics(prim, rng.choice(OgnSdTestStageManipulationScenarii._sem_labels), rng.choice(OgnSdTestStageManipulationScenarii._sem_types)) elif (manipulation >=31) and (manipulation < 39): """change color of the prim""" prim_color_attr = prim.GetAttribute("primvars:displayColor") if prim_color_attr: prim_color_attr.Set([rng.rand(3).tolist()]) return True
4,542
Python
42.266666
189
0.606781
eliabntt/GRADE-RR/isaac_internals/kit/extscore/omni.syntheticdata/omni/syntheticdata/ogn/tests/TestOgnSdRenderProductCamera.py
import omni.kit.test import omni.graph.core as og import omni.graph.core.tests as ogts import os class TestOgn(ogts.OmniGraphTestCase): async def test_data_access(self): from omni.syntheticdata.ogn.OgnSdRenderProductCameraDatabase import OgnSdRenderProductCameraDatabase test_file_name = "OgnSdRenderProductCameraTemplate.usda" usd_path = os.path.join(os.path.dirname(__file__), "usd", test_file_name) if not os.path.exists(usd_path): self.assertTrue(False, f"{usd_path} not found for loading test") (result, error) = await ogts.load_test_file(usd_path) self.assertTrue(result, f'{error} on {usd_path}') test_node = og.Controller.node("/TestGraph/Template_omni_syntheticdata_SdRenderProductCamera") database = OgnSdRenderProductCameraDatabase(test_node) self.assertTrue(test_node.is_valid()) node_type_name = test_node.get_type_name() self.assertEqual(og.GraphRegistry().get_node_type_version(node_type_name), 2) def _attr_error(attribute: og.Attribute, usd_test: bool) -> str: test_type = "USD Load" if usd_test else "Database Access" return f"{node_type_name} {test_type} Test - {attribute.get_name()} value error" self.assertTrue(test_node.get_attribute_exists("inputs:exec")) attribute = test_node.get_attribute("inputs:exec") db_value = database.inputs.exec self.assertTrue(test_node.get_attribute_exists("inputs:renderProductPath")) attribute = test_node.get_attribute("inputs:renderProductPath") db_value = database.inputs.renderProductPath expected_value = "" actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:renderResults")) attribute = test_node.get_attribute("inputs:renderResults") db_value = database.inputs.renderResults expected_value = 0 actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:swhFrameNumber")) attribute = test_node.get_attribute("inputs:swhFrameNumber") db_value = database.inputs.swhFrameNumber expected_value = 0 actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("outputs:cameraApertureOffset")) attribute = test_node.get_attribute("outputs:cameraApertureOffset") db_value = database.outputs.cameraApertureOffset self.assertTrue(test_node.get_attribute_exists("outputs:cameraApertureSize")) attribute = test_node.get_attribute("outputs:cameraApertureSize") db_value = database.outputs.cameraApertureSize self.assertTrue(test_node.get_attribute_exists("outputs:cameraFStop")) attribute = test_node.get_attribute("outputs:cameraFStop") db_value = database.outputs.cameraFStop self.assertTrue(test_node.get_attribute_exists("outputs:cameraFisheyeParams")) attribute = test_node.get_attribute("outputs:cameraFisheyeParams") db_value = database.outputs.cameraFisheyeParams self.assertTrue(test_node.get_attribute_exists("outputs:cameraFocalLength")) attribute = test_node.get_attribute("outputs:cameraFocalLength") db_value = database.outputs.cameraFocalLength self.assertTrue(test_node.get_attribute_exists("outputs:cameraFocusDistance")) attribute = test_node.get_attribute("outputs:cameraFocusDistance") db_value = database.outputs.cameraFocusDistance self.assertTrue(test_node.get_attribute_exists("outputs:cameraModel")) attribute = test_node.get_attribute("outputs:cameraModel") db_value = database.outputs.cameraModel self.assertTrue(test_node.get_attribute_exists("outputs:cameraNearFar")) attribute = test_node.get_attribute("outputs:cameraNearFar") db_value = database.outputs.cameraNearFar self.assertTrue(test_node.get_attribute_exists("outputs:cameraProjection")) attribute = test_node.get_attribute("outputs:cameraProjection") db_value = database.outputs.cameraProjection self.assertTrue(test_node.get_attribute_exists("outputs:cameraViewTransform")) attribute = test_node.get_attribute("outputs:cameraViewTransform") db_value = database.outputs.cameraViewTransform self.assertTrue(test_node.get_attribute_exists("outputs:exec")) attribute = test_node.get_attribute("outputs:exec") db_value = database.outputs.exec self.assertTrue(test_node.get_attribute_exists("outputs:metersPerSceneUnit")) attribute = test_node.get_attribute("outputs:metersPerSceneUnit") db_value = database.outputs.metersPerSceneUnit self.assertTrue(test_node.get_attribute_exists("outputs:renderProductResolution")) attribute = test_node.get_attribute("outputs:renderProductResolution") db_value = database.outputs.renderProductResolution self.assertTrue(test_node.get_attribute_exists("outputs:swhFrameNumber")) attribute = test_node.get_attribute("outputs:swhFrameNumber") db_value = database.outputs.swhFrameNumber
5,723
Python
50.567567
108
0.710292
eliabntt/GRADE-RR/isaac_internals/kit/extscore/omni.syntheticdata/omni/syntheticdata/ogn/tests/TestOgnSdPostRenderVarTextureToBuffer.py
import omni.kit.test import omni.graph.core as og import omni.graph.core.tests as ogts import os class TestOgn(ogts.OmniGraphTestCase): async def test_data_access(self): from omni.syntheticdata.ogn.OgnSdPostRenderVarTextureToBufferDatabase import OgnSdPostRenderVarTextureToBufferDatabase test_file_name = "OgnSdPostRenderVarTextureToBufferTemplate.usda" usd_path = os.path.join(os.path.dirname(__file__), "usd", test_file_name) if not os.path.exists(usd_path): self.assertTrue(False, f"{usd_path} not found for loading test") (result, error) = await ogts.load_test_file(usd_path) self.assertTrue(result, f'{error} on {usd_path}') test_node = og.Controller.node("/TestGraph/Template_omni_syntheticdata_SdPostRenderVarTextureToBuffer") database = OgnSdPostRenderVarTextureToBufferDatabase(test_node) self.assertTrue(test_node.is_valid()) node_type_name = test_node.get_type_name() self.assertEqual(og.GraphRegistry().get_node_type_version(node_type_name), 1) def _attr_error(attribute: og.Attribute, usd_test: bool) -> str: test_type = "USD Load" if usd_test else "Database Access" return f"{node_type_name} {test_type} Test - {attribute.get_name()} value error" self.assertTrue(test_node.get_attribute_exists("inputs:gpu")) attribute = test_node.get_attribute("inputs:gpu") db_value = database.inputs.gpu expected_value = 0 actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:renderVar")) attribute = test_node.get_attribute("inputs:renderVar") db_value = database.inputs.renderVar expected_value = "" actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:renderVarBufferSuffix")) attribute = test_node.get_attribute("inputs:renderVarBufferSuffix") db_value = database.inputs.renderVarBufferSuffix expected_value = "buffer" actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:rp")) attribute = test_node.get_attribute("inputs:rp") db_value = database.inputs.rp expected_value = 0 actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("outputs:renderVar")) attribute = test_node.get_attribute("outputs:renderVar") db_value = database.outputs.renderVar
3,241
Python
50.460317
126
0.691762
eliabntt/GRADE-RR/isaac_internals/kit/extscore/omni.syntheticdata/omni/syntheticdata/ogn/tests/__init__.py
"""====== GENERATED BY omni.graph.tools - DO NOT EDIT ======""" import omni.graph.tools as ogt ogt.import_tests_in_directory(__file__, __name__)
145
Python
35.499991
63
0.634483
eliabntt/GRADE-RR/isaac_internals/kit/extscore/omni.syntheticdata/omni/syntheticdata/ogn/tests/TestOgnSdPostCompRenderVarTextures.py
import omni.kit.test import omni.graph.core as og import omni.graph.core.tests as ogts import os class TestOgn(ogts.OmniGraphTestCase): async def test_data_access(self): from omni.syntheticdata.ogn.OgnSdPostCompRenderVarTexturesDatabase import OgnSdPostCompRenderVarTexturesDatabase test_file_name = "OgnSdPostCompRenderVarTexturesTemplate.usda" usd_path = os.path.join(os.path.dirname(__file__), "usd", test_file_name) if not os.path.exists(usd_path): self.assertTrue(False, f"{usd_path} not found for loading test") (result, error) = await ogts.load_test_file(usd_path) self.assertTrue(result, f'{error} on {usd_path}') test_node = og.Controller.node("/TestGraph/Template_omni_syntheticdata_SdPostCompRenderVarTextures") database = OgnSdPostCompRenderVarTexturesDatabase(test_node) self.assertTrue(test_node.is_valid()) node_type_name = test_node.get_type_name() self.assertEqual(og.GraphRegistry().get_node_type_version(node_type_name), 1) def _attr_error(attribute: og.Attribute, usd_test: bool) -> str: test_type = "USD Load" if usd_test else "Database Access" return f"{node_type_name} {test_type} Test - {attribute.get_name()} value error" self.assertTrue(test_node.get_attribute_exists("inputs:cudaPtr")) attribute = test_node.get_attribute("inputs:cudaPtr") db_value = database.inputs.cudaPtr expected_value = 0 actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:format")) attribute = test_node.get_attribute("inputs:format") db_value = database.inputs.format expected_value = 0 actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:gpu")) attribute = test_node.get_attribute("inputs:gpu") db_value = database.inputs.gpu expected_value = 0 actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:height")) attribute = test_node.get_attribute("inputs:height") db_value = database.inputs.height expected_value = 0 actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:mode")) attribute = test_node.get_attribute("inputs:mode") db_value = database.inputs.mode expected_value = "line" actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:parameters")) attribute = test_node.get_attribute("inputs:parameters") db_value = database.inputs.parameters expected_value = [0, 0, 0] actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:renderVar")) attribute = test_node.get_attribute("inputs:renderVar") db_value = database.inputs.renderVar expected_value = "LdrColor" actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:rp")) attribute = test_node.get_attribute("inputs:rp") db_value = database.inputs.rp expected_value = 0 actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:width")) attribute = test_node.get_attribute("inputs:width") db_value = database.inputs.width expected_value = 0 actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False))
5,147
Python
50.999999
120
0.682728
eliabntt/GRADE-RR/isaac_internals/kit/extscore/omni.syntheticdata/omni/syntheticdata/ogn/tests/TestOgnSdTestInstanceMapping.py
import omni.kit.test import omni.graph.core as og import omni.graph.core.tests as ogts import os class TestOgn(ogts.OmniGraphTestCase): async def test_data_access(self): from omni.syntheticdata.ogn.OgnSdTestInstanceMappingDatabase import OgnSdTestInstanceMappingDatabase test_file_name = "OgnSdTestInstanceMappingTemplate.usda" usd_path = os.path.join(os.path.dirname(__file__), "usd", test_file_name) if not os.path.exists(usd_path): self.assertTrue(False, f"{usd_path} not found for loading test") (result, error) = await ogts.load_test_file(usd_path) self.assertTrue(result, f'{error} on {usd_path}') test_node = og.Controller.node("/TestGraph/Template_omni_syntheticdata_SdTestInstanceMapping") database = OgnSdTestInstanceMappingDatabase(test_node) self.assertTrue(test_node.is_valid()) node_type_name = test_node.get_type_name() self.assertEqual(og.GraphRegistry().get_node_type_version(node_type_name), 1) def _attr_error(attribute: og.Attribute, usd_test: bool) -> str: test_type = "USD Load" if usd_test else "Database Access" return f"{node_type_name} {test_type} Test - {attribute.get_name()} value error" self.assertTrue(test_node.get_attribute_exists("inputs:exec")) attribute = test_node.get_attribute("inputs:exec") db_value = database.inputs.exec self.assertTrue(test_node.get_attribute_exists("inputs:instanceMapPtr")) attribute = test_node.get_attribute("inputs:instanceMapPtr") db_value = database.inputs.instanceMapPtr expected_value = 0 actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:instancePrimPathPtr")) attribute = test_node.get_attribute("inputs:instancePrimPathPtr") db_value = database.inputs.instancePrimPathPtr expected_value = 0 actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:minInstanceIndex")) attribute = test_node.get_attribute("inputs:minInstanceIndex") db_value = database.inputs.minInstanceIndex expected_value = 0 actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:minSemanticIndex")) attribute = test_node.get_attribute("inputs:minSemanticIndex") db_value = database.inputs.minSemanticIndex expected_value = 0 actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:numInstances")) attribute = test_node.get_attribute("inputs:numInstances") db_value = database.inputs.numInstances expected_value = 0 actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:numSemantics")) attribute = test_node.get_attribute("inputs:numSemantics") db_value = database.inputs.numSemantics expected_value = 0 actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:semanticLabelTokenPtrs")) attribute = test_node.get_attribute("inputs:semanticLabelTokenPtrs") db_value = database.inputs.semanticLabelTokenPtrs expected_value = [] actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:semanticLocalTransformPtr")) attribute = test_node.get_attribute("inputs:semanticLocalTransformPtr") db_value = database.inputs.semanticLocalTransformPtr expected_value = 0 actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:semanticMapPtr")) attribute = test_node.get_attribute("inputs:semanticMapPtr") db_value = database.inputs.semanticMapPtr expected_value = 0 actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:semanticPrimPathPtr")) attribute = test_node.get_attribute("inputs:semanticPrimPathPtr") db_value = database.inputs.semanticPrimPathPtr expected_value = 0 actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:semanticWorldTransformPtr")) attribute = test_node.get_attribute("inputs:semanticWorldTransformPtr") db_value = database.inputs.semanticWorldTransformPtr expected_value = 0 actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:stage")) attribute = test_node.get_attribute("inputs:stage") db_value = database.inputs.stage expected_value = "" actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:swhFrameNumber")) attribute = test_node.get_attribute("inputs:swhFrameNumber") db_value = database.inputs.swhFrameNumber expected_value = 0 actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:testCaseIndex")) attribute = test_node.get_attribute("inputs:testCaseIndex") db_value = database.inputs.testCaseIndex expected_value = -1 actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("outputs:exec")) attribute = test_node.get_attribute("outputs:exec") db_value = database.outputs.exec self.assertTrue(test_node.get_attribute_exists("outputs:semanticFilterPredicate")) attribute = test_node.get_attribute("outputs:semanticFilterPredicate") db_value = database.outputs.semanticFilterPredicate self.assertTrue(test_node.get_attribute_exists("outputs:success")) attribute = test_node.get_attribute("outputs:success") db_value = database.outputs.success
8,424
Python
53.354838
108
0.698362
eliabntt/GRADE-RR/isaac_internals/kit/extscore/omni.syntheticdata/omni/syntheticdata/ogn/tests/TestOgnSdInstanceMappingPtr.py
import omni.kit.test import omni.graph.core as og import omni.graph.core.tests as ogts import os class TestOgn(ogts.OmniGraphTestCase): async def test_data_access(self): from omni.syntheticdata.ogn.OgnSdInstanceMappingPtrDatabase import OgnSdInstanceMappingPtrDatabase test_file_name = "OgnSdInstanceMappingPtrTemplate.usda" usd_path = os.path.join(os.path.dirname(__file__), "usd", test_file_name) if not os.path.exists(usd_path): self.assertTrue(False, f"{usd_path} not found for loading test") (result, error) = await ogts.load_test_file(usd_path) self.assertTrue(result, f'{error} on {usd_path}') test_node = og.Controller.node("/TestGraph/Template_omni_syntheticdata_SdInstanceMappingPtr") database = OgnSdInstanceMappingPtrDatabase(test_node) self.assertTrue(test_node.is_valid()) node_type_name = test_node.get_type_name() self.assertEqual(og.GraphRegistry().get_node_type_version(node_type_name), 1) def _attr_error(attribute: og.Attribute, usd_test: bool) -> str: test_type = "USD Load" if usd_test else "Database Access" return f"{node_type_name} {test_type} Test - {attribute.get_name()} value error" self.assertTrue(test_node.get_attribute_exists("inputs:cudaPtr")) attribute = test_node.get_attribute("inputs:cudaPtr") db_value = database.inputs.cudaPtr expected_value = False actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:exec")) attribute = test_node.get_attribute("inputs:exec") db_value = database.inputs.exec self.assertTrue(test_node.get_attribute_exists("inputs:renderResults")) attribute = test_node.get_attribute("inputs:renderResults") db_value = database.inputs.renderResults expected_value = 0 actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:semanticFilerTokens")) attribute = test_node.get_attribute("inputs:semanticFilerTokens") db_value = database.inputs.semanticFilerTokens expected_value = [] actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:swhFrameNumber")) attribute = test_node.get_attribute("inputs:swhFrameNumber") db_value = database.inputs.swhFrameNumber expected_value = 0 actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("outputs:exec")) attribute = test_node.get_attribute("outputs:exec") db_value = database.outputs.exec self.assertTrue(test_node.get_attribute_exists("outputs:instanceMapPtr")) attribute = test_node.get_attribute("outputs:instanceMapPtr") db_value = database.outputs.instanceMapPtr self.assertTrue(test_node.get_attribute_exists("outputs:instancePrimPathPtr")) attribute = test_node.get_attribute("outputs:instancePrimPathPtr") db_value = database.outputs.instancePrimPathPtr self.assertTrue(test_node.get_attribute_exists("outputs:minInstanceIndex")) attribute = test_node.get_attribute("outputs:minInstanceIndex") db_value = database.outputs.minInstanceIndex self.assertTrue(test_node.get_attribute_exists("outputs:minSemanticIndex")) attribute = test_node.get_attribute("outputs:minSemanticIndex") db_value = database.outputs.minSemanticIndex self.assertTrue(test_node.get_attribute_exists("outputs:numInstances")) attribute = test_node.get_attribute("outputs:numInstances") db_value = database.outputs.numInstances self.assertTrue(test_node.get_attribute_exists("outputs:numSemantics")) attribute = test_node.get_attribute("outputs:numSemantics") db_value = database.outputs.numSemantics self.assertTrue(test_node.get_attribute_exists("outputs:semanticLabelTokenPtrs")) attribute = test_node.get_attribute("outputs:semanticLabelTokenPtrs") db_value = database.outputs.semanticLabelTokenPtrs self.assertTrue(test_node.get_attribute_exists("outputs:semanticLocalTransformPtr")) attribute = test_node.get_attribute("outputs:semanticLocalTransformPtr") db_value = database.outputs.semanticLocalTransformPtr self.assertTrue(test_node.get_attribute_exists("outputs:semanticMapPtr")) attribute = test_node.get_attribute("outputs:semanticMapPtr") db_value = database.outputs.semanticMapPtr self.assertTrue(test_node.get_attribute_exists("outputs:semanticPrimPathPtr")) attribute = test_node.get_attribute("outputs:semanticPrimPathPtr") db_value = database.outputs.semanticPrimPathPtr self.assertTrue(test_node.get_attribute_exists("outputs:semanticWorldTransformPtr")) attribute = test_node.get_attribute("outputs:semanticWorldTransformPtr") db_value = database.outputs.semanticWorldTransformPtr self.assertTrue(test_node.get_attribute_exists("outputs:swhFrameNumber")) attribute = test_node.get_attribute("outputs:swhFrameNumber") db_value = database.outputs.swhFrameNumber
5,966
Python
50.886956
106
0.708683
eliabntt/GRADE-RR/isaac_internals/kit/extscore/omni.syntheticdata/omni/syntheticdata/ogn/tests/TestOgnSdPostRenderVarDisplayTexture.py
import omni.kit.test import omni.graph.core as og import omni.graph.core.tests as ogts import os class TestOgn(ogts.OmniGraphTestCase): async def test_data_access(self): from omni.syntheticdata.ogn.OgnSdPostRenderVarDisplayTextureDatabase import OgnSdPostRenderVarDisplayTextureDatabase test_file_name = "OgnSdPostRenderVarDisplayTextureTemplate.usda" usd_path = os.path.join(os.path.dirname(__file__), "usd", test_file_name) if not os.path.exists(usd_path): self.assertTrue(False, f"{usd_path} not found for loading test") (result, error) = await ogts.load_test_file(usd_path) self.assertTrue(result, f'{error} on {usd_path}') test_node = og.Controller.node("/TestGraph/Template_omni_syntheticdata_SdPostRenderVarDisplayTexture") database = OgnSdPostRenderVarDisplayTextureDatabase(test_node) self.assertTrue(test_node.is_valid()) node_type_name = test_node.get_type_name() self.assertEqual(og.GraphRegistry().get_node_type_version(node_type_name), 1) def _attr_error(attribute: og.Attribute, usd_test: bool) -> str: test_type = "USD Load" if usd_test else "Database Access" return f"{node_type_name} {test_type} Test - {attribute.get_name()} value error" self.assertTrue(test_node.get_attribute_exists("inputs:cameraFisheyeParams")) attribute = test_node.get_attribute("inputs:cameraFisheyeParams") db_value = database.inputs.cameraFisheyeParams expected_value = [] actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:cameraModel")) attribute = test_node.get_attribute("inputs:cameraModel") db_value = database.inputs.cameraModel expected_value = 0 actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:cameraNearFar")) attribute = test_node.get_attribute("inputs:cameraNearFar") db_value = database.inputs.cameraNearFar expected_value = [0.0, 0.0] actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:exec")) attribute = test_node.get_attribute("inputs:exec") db_value = database.inputs.exec self.assertTrue(test_node.get_attribute_exists("inputs:gpu")) attribute = test_node.get_attribute("inputs:gpu") db_value = database.inputs.gpu expected_value = 0 actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:instanceMapSDCudaPtr")) attribute = test_node.get_attribute("inputs:instanceMapSDCudaPtr") db_value = database.inputs.instanceMapSDCudaPtr expected_value = 0 actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:instanceMappingInfoSDPtr")) attribute = test_node.get_attribute("inputs:instanceMappingInfoSDPtr") db_value = database.inputs.instanceMappingInfoSDPtr expected_value = 0 actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:metersPerSceneUnit")) attribute = test_node.get_attribute("inputs:metersPerSceneUnit") db_value = database.inputs.metersPerSceneUnit expected_value = 0.0 actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:mode")) attribute = test_node.get_attribute("inputs:mode") db_value = database.inputs.mode expected_value = "autoMode" actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:parameters")) attribute = test_node.get_attribute("inputs:parameters") db_value = database.inputs.parameters expected_value = [0.0, 5.0, 0.33, 0.27] actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:renderVar")) attribute = test_node.get_attribute("inputs:renderVar") db_value = database.inputs.renderVar expected_value = "" actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:renderVarDisplay")) attribute = test_node.get_attribute("inputs:renderVarDisplay") db_value = database.inputs.renderVarDisplay expected_value = "" actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:rp")) attribute = test_node.get_attribute("inputs:rp") db_value = database.inputs.rp expected_value = 0 actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:sdDisplayHeight")) attribute = test_node.get_attribute("inputs:sdDisplayHeight") db_value = database.inputs.sdDisplayHeight expected_value = 0 actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:sdDisplayWidth")) attribute = test_node.get_attribute("inputs:sdDisplayWidth") db_value = database.inputs.sdDisplayWidth expected_value = 0 actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:sdSemBBox3dCamCornersCudaPtr")) attribute = test_node.get_attribute("inputs:sdSemBBox3dCamCornersCudaPtr") db_value = database.inputs.sdSemBBox3dCamCornersCudaPtr expected_value = 0 actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:sdSemBBox3dCamExtentCudaPtr")) attribute = test_node.get_attribute("inputs:sdSemBBox3dCamExtentCudaPtr") db_value = database.inputs.sdSemBBox3dCamExtentCudaPtr expected_value = 0 actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:sdSemBBoxExtentCudaPtr")) attribute = test_node.get_attribute("inputs:sdSemBBoxExtentCudaPtr") db_value = database.inputs.sdSemBBoxExtentCudaPtr expected_value = 0 actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:sdSemBBoxInfosCudaPtr")) attribute = test_node.get_attribute("inputs:sdSemBBoxInfosCudaPtr") db_value = database.inputs.sdSemBBoxInfosCudaPtr expected_value = 0 actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:semanticLabelTokenSDCudaPtr")) attribute = test_node.get_attribute("inputs:semanticLabelTokenSDCudaPtr") db_value = database.inputs.semanticLabelTokenSDCudaPtr expected_value = 0 actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:semanticMapSDCudaPtr")) attribute = test_node.get_attribute("inputs:semanticMapSDCudaPtr") db_value = database.inputs.semanticMapSDCudaPtr expected_value = 0 actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:semanticPrimTokenSDCudaPtr")) attribute = test_node.get_attribute("inputs:semanticPrimTokenSDCudaPtr") db_value = database.inputs.semanticPrimTokenSDCudaPtr expected_value = 0 actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:semanticWorldTransformSDCudaPtr")) attribute = test_node.get_attribute("inputs:semanticWorldTransformSDCudaPtr") db_value = database.inputs.semanticWorldTransformSDCudaPtr expected_value = 0 actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("inputs:swhFrameNumber")) attribute = test_node.get_attribute("inputs:swhFrameNumber") db_value = database.inputs.swhFrameNumber expected_value = 0 actual_value = og.Controller.get(attribute) ogts.verify_values(expected_value, actual_value, _attr_error(attribute, True)) ogts.verify_values(expected_value, db_value, _attr_error(attribute, False)) self.assertTrue(test_node.get_attribute_exists("outputs:cudaPtr")) attribute = test_node.get_attribute("outputs:cudaPtr") db_value = database.outputs.cudaPtr self.assertTrue(test_node.get_attribute_exists("outputs:exec")) attribute = test_node.get_attribute("outputs:exec") db_value = database.outputs.exec self.assertTrue(test_node.get_attribute_exists("outputs:format")) attribute = test_node.get_attribute("outputs:format") db_value = database.outputs.format self.assertTrue(test_node.get_attribute_exists("outputs:height")) attribute = test_node.get_attribute("outputs:height") db_value = database.outputs.height self.assertTrue(test_node.get_attribute_exists("outputs:renderVarDisplay")) attribute = test_node.get_attribute("outputs:renderVarDisplay") db_value = database.outputs.renderVarDisplay self.assertTrue(test_node.get_attribute_exists("outputs:width")) attribute = test_node.get_attribute("outputs:width") db_value = database.outputs.width
13,206
Python
54.259414
124
0.699606
eliabntt/GRADE-RR/isaac_internals/kit/extscore/omni.syntheticdata/omni/syntheticdata/scripts/viewport_legacy.py
from pathlib import Path from pxr import Sdf import carb.settings import omni.ui as ui import omni.usd from .SyntheticData import SyntheticData from .visualizer_window import VisualizerWindow import weakref CURRENT_PATH = Path(__file__).parent.absolute() ICON_PATH = CURRENT_PATH.parent.parent.parent.joinpath("data") BUTTON_STYLE = { "height": 22, "width": 26, "style": {"Button": {"padding": 4, "background_color": 0x80303030}}, "image_height": 14, "image_width": 26, } MENU_FLAGS = {"flags": ui.WINDOW_FLAGS_POPUP | ui.WINDOW_FLAGS_NO_TITLE_BAR, "auto_resize": True} class ViewportLegacy: _g_visualizers = {} _g_iface = None @staticmethod def create_update_subscription(): import omni.kit.viewport_legacy ViewportLegacy._g_iface = omni.kit.viewport_legacy.get_viewport_interface() if ViewportLegacy._g_iface is None: return import omni.kit.app event_stream = omni.kit.app.get_app().get_update_event_stream() return event_stream.create_subscription_to_pop(ViewportLegacy._on_update, name="omni.syntheticdata update") @staticmethod def close_viewports(): visualizers, ViewportLegacy._g_visualizers = ViewportLegacy._g_visualizers, {} if visualizers: for visualizer, vp_delegate in visualizers.values(): visualizer.close() vp_delegate.destroy() @staticmethod def _on_update(dt): stage = omni.usd.get_context().get_stage() if stage is None: return # retrieve the list of active viewports viewport_names = set([ViewportLegacy._g_iface.get_viewport_window_name(vp) for vp in ViewportLegacy._g_iface.get_instance_list()]) visualizers = ViewportLegacy._g_visualizers # remove obsolete extension viewports data for vp_name in set(visualizers.keys()).difference(viewport_names): visualizer, vp_delegate = visualizers[vp_name] visualizer.close() vp_delegate.destroy() del visualizers[vp_name] # create missing extension viewports data for vp_name in viewport_names.difference(set(visualizers.keys())): vp_delegate = ViewportLegacy(vp_name) visualizer_window = VisualizerWindow(vp_name, vp_delegate) vp_delegate.set_visualizer_window(weakref.proxy(visualizer_window)) visualizers[vp_name] = visualizer_window, vp_delegate # update all valid viewport for vp_name, vis_and_delegate in visualizers.items(): legacy_vp = ViewportLegacy._g_iface.get_viewport_window(ViewportLegacy._g_iface.get_instance(vp_name)) if legacy_vp: visualizer, vp_delegate = vis_and_delegate camera_path = legacy_vp.get_active_camera() vp_delegate._update_legacy_buttons(Sdf.Path(camera_path).name, legacy_vp.is_visible()) visualizer.update(legacy_vp.get_render_product_path(), stage) def __init__(self, name: str): self.__window_name = name self.__visualizer_window = None # initialize ui self.__menus = None self.__btns = {"window": ui.Window(name, detachable=False)} with self.__btns["window"].frame: with ui.VStack(): ui.Spacer(height=4) with ui.HStack(height=0, width=0): self.__btns["spacer"] = ui.Spacer(width=300) self.__btns["icon"] = ui.Button( tooltip="Synthetic Data Sensors", image_url=f"{ICON_PATH}/sensor_icon.svg", **BUTTON_STYLE ) self.__btns["icon"].set_mouse_pressed_fn(lambda x, y, *_: self._show_legacy_ui_menu(x, y)) def __del__(self): self.destroy() def destroy(self): self.__btns = None self.__menus = None self.__window_name = None self.__visualizer_window = None def set_visualizer_window(self, visualizer_window): self.__visualizer_window = visualizer_window def _update_legacy_buttons(self, cam_name: str, is_visible: bool): # update the buttons in a legacy viewport (dependent on camera name length) render_mode = carb.settings.get_settings().get("/rtx/rendermode") render_spacing = 15 if render_mode == "RaytracedLighting": render_spacing = 12 elif render_mode == "PathTracing": render_spacing = 31 spacing = 5 + (len(cam_name) + render_spacing) * 15 self.__btns["spacer"].width = ui.Length(max(300, spacing)) self.__btns["window"].visible = is_visible def _build_legacy_ui_menu(self): self.__menus = ui.Window(f"{self.__window_name}-sensor-menu", **MENU_FLAGS) with self.__menus.frame: with ui.VStack(width=200, spacing=5): render_product_combo_model = self.__visualizer_window.render_product_combo_model if render_product_combo_model: with ui.HStack(height=40): ui.Label("RenderProduct", width=150) ui.ComboBox(render_product_combo_model) render_var_combo_model = self.__visualizer_window.render_var_combo_model if render_var_combo_model: with ui.HStack(height=40): ui.Label("RenderVar", width=150) ui.ComboBox(render_var_combo_model) with ui.HStack(height=20): model = ui.FloatSlider(name="angle", min=-100.0, max=100.0).model model.add_value_changed_fn( lambda m: render_var_combo_model.set_combine_angle(m.get_value_as_float()) ) model = ui.FloatSlider(name="x", min=-100.0, max=100.0).model model.add_value_changed_fn( lambda m: render_var_combo_model.set_combine_divide_x(m.get_value_as_float()) ) model = ui.FloatSlider(name="y", min=-100.0, max=100.0).model model.add_value_changed_fn( lambda m: render_var_combo_model.set_combine_divide_y(m.get_value_as_float()) ) with ui.HStack(height=40): ui.Label("Synthetic Data Sensors", width=150) btn = ui.Button("Clear All") selection_stack = ui.VStack(spacing=5) btn.set_clicked_fn(lambda ss=selection_stack: self._clear_all(ss)) selection_stack.clear() with selection_stack: self._build_ui_sensor_selection() self.__menus.visible = False # callback to reset the sensor selection def _clear_all(self, selection_stack): self.__visualizer_window.visualization_activation.clear() selection_stack.clear() with selection_stack: self._build_ui_sensor_selection() def _show_window(self): self.__visualizer_window.toggle_enable_visualization() SyntheticData.disable_async_rendering() def _build_ui_sensor_selection(self): for sensor_label, sensor in SyntheticData.get_registered_visualization_template_names_for_display(): with ui.HStack(): ui.Label(sensor_label, width=300) cb = ui.CheckBox( width=0, style={"font_size": 24, "margin": 3}, style_type_name_override="Options.CheckBox" ) cb.model.set_value(sensor in self.__visualizer_window.visualization_activation) cb.model.add_value_changed_fn(lambda c, s=sensor: self.__visualizer_window.on_sensor_item_clicked(c.as_bool, s)) ui.Button("Show", height=40, clicked_fn=lambda: self._show_window()) def _show_legacy_ui_menu(self, x, y): self.__menus = None self._build_legacy_ui_menu() self.__menus.position_x = x self.__menus.position_y = y self.__menus.visible = True @property def render_product_path(self): legacy_vp = ViewportLegacy._g_iface.get_viewport_window(ViewportLegacy._g_iface.get_instance(self.__window_name)) return legacy_vp.get_render_product_path() if legacy_vp else None @render_product_path.setter def render_product_path(self, prim_path: str): legacy_vp = ViewportLegacy._g_iface.get_viewport_window(ViewportLegacy._g_iface.get_instance(self.__window_name)) if legacy_vp: legacy_vp.set_render_product_path(prim_path) @property def usd_context(self): legacy_vp = ViewportLegacy._g_iface.get_viewport_window(ViewportLegacy._g_iface.get_instance(self.__window_name)) usd_context_name = legacy_vp.get_usd_context_name() if hasattr(legacy_vp, 'get_usd_context_name') else '' return omni.usd.get_context(usd_context_name)
9,000
Python
41.658768
138
0.597889
eliabntt/GRADE-RR/isaac_internals/kit/extscore/omni.syntheticdata/omni/syntheticdata/scripts/helpers.py
import math from functools import lru_cache import numpy.lib.recfunctions as rfn import carb import numpy as np import omni.usd from pxr import UsdGeom, UsdShade, Semantics from .. import _syntheticdata EPS = 1e-8 @lru_cache() def _get_syntheticdata_iface(): return _syntheticdata.acquire_syntheticdata_interface() def _interpolate(p, a, b): p0 = 1.0 - p return [int(p0 * a[0] + p * b[0]), int(p0 * a[1] + p * b[1]), int(p0 * a[2] + p * b[2]), 255] def get_bbox_3d_corners(extents): """Return transformed points in the following order: [LDB, RDB, LUB, RUB, LDF, RDF, LUF, RUF] where R=Right, L=Left, D=Down, U=Up, B=Back, F=Front and LR: x-axis, UD: y-axis, FB: z-axis. Args: extents (numpy.ndarray): A structured numpy array containing the fields: [`x_min`, `y_min`, `x_max`, `y_max`, `transform`. Returns: (numpy.ndarray): Transformed corner coordinates with shape `(N, 8, 3)`. """ rdb = [extents["x_max"], extents["y_min"], extents["z_min"]] ldb = [extents["x_min"], extents["y_min"], extents["z_min"]] lub = [extents["x_min"], extents["y_max"], extents["z_min"]] rub = [extents["x_max"], extents["y_max"], extents["z_min"]] ldf = [extents["x_min"], extents["y_min"], extents["z_max"]] rdf = [extents["x_max"], extents["y_min"], extents["z_max"]] luf = [extents["x_min"], extents["y_max"], extents["z_max"]] ruf = [extents["x_max"], extents["y_max"], extents["z_max"]] tfs = extents["transform"] corners = np.stack((ldb, rdb, lub, rub, ldf, rdf, luf, ruf), 0) corners_homo = np.pad(corners, ((0, 0), (0, 1), (0, 0)), constant_values=1.0) return np.einsum("jki,ikl->ijl", corners_homo, tfs)[..., :3] def reduce_bboxes_2d(bboxes, instance_mappings): """ Reduce 2D bounding boxes of leaf nodes to prims with a semantic label. Args: bboxes (numpy.ndarray): A structured numpy array containing the fields: `[ ("instanceId", "<u4"), ("semanticId", "<u4"), ("x_min", "<i4"), ("y_min", "<i4"), ("x_max", "<i4"), ("y_max", "<i4")]` instance_mappings (numpy.ndarray): A structured numpy array containing the fields: `[("uniqueId", np.int32), ("name", "O"), ("semanticId", "<u4"), ("semanticLabel", "O"), ("instanceIds", "O"), ("metadata", "O")]` Returns: (numpy.ndarray): A structured numpy array containing the fields: `[ ("uniqueId", np.int32), ("name", "O"), ("semanticLabel", "O"), ("instanceIds", "O"), ("semanticId", "<u4"), ("metadata", "O"), ("x_min", "<i4"), ("y_min", "<i4"), ("x_max", "<i4"), ("y_max", "<i4")]` """ bboxes = bboxes[bboxes["x_min"] < 2147483647] reduced_bboxes = [] for im in instance_mappings: if im["instanceIds"]: # if mapping has descendant instance ids mask = np.isin(bboxes["instanceId"], im["instanceIds"]) bbox_masked = bboxes[mask] if len(bbox_masked) > 0: reduced_bboxes.append( ( im["uniqueId"], im["name"], im["semanticLabel"], im["metadata"], im["instanceIds"], im["semanticId"], np.min(bbox_masked["x_min"]), np.min(bbox_masked["y_min"]), np.max(bbox_masked["x_max"]), np.max(bbox_masked["y_max"]), ) ) return np.array( reduced_bboxes, dtype=[("uniqueId", np.int32), ("name", "O"), ("semanticLabel", "O"), ("metadata", "O"), ("instanceIds", "O")] + bboxes.dtype.descr[1:], ) def reduce_bboxes_3d(bboxes, instance_mappings): """ Reduce 3D bounding boxes of leaf nodes to prims with a semantic label. Args: bboxes (numpy.ndarray): A structured numpy array containing the fields: `[ ("instanceId", "<u4"), ("semanticId", "<u4"), ("x_min", "<i4"), ("y_min", "<i4"), ("z_min", "<i4"), ("x_max", "<i4"), ("y_max", "<i4"), ("z_max", "<i4"), ("transform", "<f4", (4, 4))]` instance_mappings (numpy.ndarray): A structured numpy array containing the fields: `[("uniqueId", np.int32), ("name", "O"), ("semanticId", "<u4"), ("semanticLabel", "O"), ("instanceIds", "<u4"), ("metadata", "O")]` Returns: (numpy.ndarray): A structured numpy array containing the fields: `[("uniqueId", np.int32) ("name", "O"), ("semanticLabel", "O"), ("instanceIds", "O"), ("metadata", "O"), ("semanticId", "<u4"),("x_min", "<i4"), ("y_min", "<i4"), ("z_min", "<i4"), ("x_max", "<i4"), ("y_max", "<i4"), ("z_max", "<i4"), ("transform", "<f4", (4, 4))]` If `corners` field is supplied in `bboxes` argument, the field will be updated accordingly. """ current_time = omni.timeline.get_timeline_interface().get_current_time() reduced_bboxes = [] stage = omni.usd.get_context().get_stage() if "corners" in bboxes.dtype.names: corners = bboxes["corners"] else: # TODO if not corners, use extents corners = get_bbox_3d_corners(bboxes) max_instance_id = bboxes["instanceId"].max() idx_lut = np.zeros(max_instance_id + 1, dtype=np.uint32) for i, bb_id in enumerate(bboxes["instanceId"]): idx_lut[bb_id] = i for i, im in enumerate(instance_mappings): prim = stage.GetPrimAtPath(im["name"]) tf = np.array(UsdGeom.Imageable(prim).ComputeLocalToWorldTransform(current_time)) tf_inv = np.linalg.inv(tf) # filter instance ids that corresponding to invisible bounding boxes (not filtered in the instance mapping) instIds = [instId for instId in im["instanceIds"] if instId < len(idx_lut)] if len(instIds) == 0: continue idxs = idx_lut[instIds] children_corners = corners[idxs] children_corners_homo = np.pad(children_corners.reshape(-1, 3), ((0, 0), (0, 1)), constant_values=1.0) corners_local = np.einsum("bj,jk->bk", children_corners_homo, tf_inv)[:, :3] corners_local_min = corners_local[..., :3].reshape(-1, 3).min(0) corners_local_max = corners_local[..., :3].reshape(-1, 3).max(0) extents_local = np.stack([corners_local_min, corners_local_max]) row = [ im["uniqueId"], im["name"], im["semanticLabel"], im["metadata"], im["instanceIds"], im["semanticId"], *extents_local.reshape(-1), tf, ] if "corners" in bboxes.dtype.names: world_corners = get_bbox_3d_corners( { "x_min": [extents_local[0, 0]], "x_max": [extents_local[1, 0]], "y_min": [extents_local[0, 1]], "y_max": [extents_local[1, 1]], "z_min": [extents_local[0, 2]], "z_max": [extents_local[1, 2]], "transform": [tf], } ) row.append(world_corners) reduced_bboxes.append(tuple(row)) return np.array( reduced_bboxes, dtype=[("uniqueId", np.int32), ("name", "O"), ("semanticLabel", "O"), ("metadata", "O"), ("instanceIds", "O")] + bboxes.dtype.descr[1:], ) def merge_sensors( bounding_box_2d_tight=None, bounding_box_2d_loose=None, bounding_box_3d=None, occlusion_quadrants=None ): """ Merge sensor structured array outputs. Args: bounding_box_2d_tight (numpy.ndarray, optional): A structured numpy array containing the fields: `[("uniqueId", "<i4"), ("name", "O"), ("semanticLabel", "O"), ("semanticId", "<u4"), ("metadata", "O"), ("instanceIds", "O"), ("x_min", "<i4"), ("y_min", "<i4"), ("x_max", "<i4"), ("y_max", "<i4")]` bounding_box_2d_loose (numpy.ndarray, optional): A structured numpy array containing the fields: `[("uniqueId", "<i4"), ("name", "O"), ("semanticLabel", "O"), ("instanceId", "<u4"), ("semanticId", "<u4"), ("metadata", "O"), ("instanceIds", "O"), ("x_min", "<i4"), ("y_min", "<i4"), ("x_max", "<i4"), ("y_max", "<i4")]` bounding_box_3d (numpy.ndarray, optional): A structured numpy array containing the fields: `[("uniqueId", "<i4"), ("name", "O"), ("semanticLabel", "O"), ("semanticId", "<u4"), ("metadata", "O"), ("instanceIds", "O"), ("x_min", "<i4"), ("y_min", "<i4"), ("z_min", "<i4"), ("x_max", "<i4"), ("y_max", "<i4"), ("z_max", "<i4"), ("transform", "<f4", (4, 4))]` occlusion_quadrants (numpy.ndarray, optional): A structured numpy array containing the fields: [("uniqueId", "<i4"), ("name", "O"), ("semanticLabel", "O"),("semanticId", "<u4"), ("metadata", "O"), ("instanceIds", "O"), ("occlusion_quadrant", "O")] Returns: (numpy.ndarray): A structured array containing merged data from the arguments supplied. """ arrays = [] array_suffixes = [] defaults = {"x_min": -1, "x_max": -1, "y_min": -1, "y_max": -1, "z_min": -1, "z_max": -1} # Add valid arrays to merge list and set suffixes if bounding_box_2d_tight is not None: arrays.append(bounding_box_2d_tight) array_suffixes.append("_bbox2d_tight") if bounding_box_2d_loose is not None: arrays.append(bounding_box_2d_loose) array_suffixes.append("_bbox2d_loose") if bounding_box_3d is not None: arrays.append(bounding_box_3d) array_suffixes.append("_bbox3d") if occlusion_quadrants is not None: arrays.append(occlusion_quadrants) array_suffixes.append("_occ") if not arrays: return None r0 = arrays.pop() r0_suf = array_suffixes.pop() while arrays: r1 = arrays.pop() r1_suf = array_suffixes.pop() # Add suffixes r0.dtype.names = [f"{n}{r0_suf}" if n in defaults.keys() else n for n in r0.dtype.names] r1.dtype.names = [f"{n}{r1_suf}" if n in defaults.keys() else n for n in r1.dtype.names] defaults_suf = {} defaults_suf.update({f"{k}{r0_suf}": v for k, v in defaults.items()}) defaults_suf.update({f"{k}{r1_suf}": v for k, v in defaults.items()}) r0 = rfn.join_by( ["uniqueId", "name", "semanticId", "semanticLabel", "metadata", "instanceIds"], r0, r1, defaults=defaults_suf, r1postfix=r0_suf, r2postfix=r1_suf, jointype="outer", usemask=False, ) r0_suf = "" return r0 def get_projection_matrix(fov, aspect_ratio, z_near, z_far): """ Calculate the camera projection matrix. Args: fov (float): Field of View (in radians) aspect_ratio (float): Image aspect ratio (Width / Height) z_near (float): distance to near clipping plane z_far (float): distance to far clipping plane Returns: (numpy.ndarray): View projection matrix with shape `(4, 4)` """ a = -1.0 / math.tan(fov / 2) b = -a * aspect_ratio c = z_far / (z_far - z_near) d = z_near * z_far / (z_far - z_near) return np.array([[a, 0.0, 0.0, 0.0], [0.0, b, 0.0, 0.0], [0.0, 0.0, c, 1.0], [0.0, 0.0, d, 0.0]]) def get_view_proj_mat(view_params): """ Get View Projection Matrix. Args: view_params (dict): dictionary containing view parameters """ z_near, z_far = view_params["clipping_range"] view_matrix = np.linalg.inv(view_params["view_to_world"]) fov = 2 * math.atan(view_params["horizontal_aperture"] / (2 * view_params["focal_length"])) projection_mat = get_projection_matrix(fov, view_params["aspect_ratio"], z_near, z_far) return np.dot(view_matrix, projection_mat) def project_pinhole(points, view_params): """ Project 3D points to 2D camera view using a pinhole camera model. Args: points (numpy.ndarray): Array of points in world frame of shape (num_points, 3). view_params: Returns: (numpy.ndarray): Image-space points of shape (num_points, 3) """ view_proj_matrix = get_view_proj_mat(view_params) homo = np.pad(points, ((0, 0), (0, 1)), constant_values=1.0) tf_points = np.dot(homo, view_proj_matrix) tf_points = tf_points / (tf_points[..., -1:]) tf_points[..., :2] = 0.5 * (tf_points[..., :2] + 1) return tf_points[..., :3] def _parse_semantic_schemas(prim): """ Return the class name and (type, data) pairs of metadata """ schemas = [s.split(":")[1] for s in prim.GetAppliedSchemas() if "SemanticsAPI" in s] metadata = [] semantic_class = None for schema in schemas: sem = Semantics.SemanticsAPI.Get(prim, schema) sem_type = sem.GetSemanticTypeAttr().Get() sem_data = sem.GetSemanticDataAttr().Get() if sem_type == "class": semantic_class = sem_data else: metadata.append((sem_type, sem_data)) return semantic_class, metadata def _parse_instance_mappings(cur_prim): """ Recursively parse instance mappings. """ # TODO @jlafleche currently not compatible with two labels per prim instance_mappings = [] children = cur_prim.GetChildren() instance_ids = _get_syntheticdata_iface().get_instance_segmentation_id(cur_prim.GetPath().pathString).tolist() if children: for child in children: child_instance_ids, child_instance_mappings = _parse_instance_mappings(child) instance_ids += child_instance_ids instance_mappings += child_instance_mappings has_prim_semantics = cur_prim.HasAPI(Semantics.SemanticsAPI) material = UsdShade.MaterialBindingAPI(cur_prim).ComputeBoundMaterial()[0].GetPrim() has_material_semantics = material and material.HasAPI(Semantics.SemanticsAPI) if has_prim_semantics and instance_ids: semantic_class, metadata = _parse_semantic_schemas(cur_prim) if semantic_class: semantic_id = _get_syntheticdata_iface().get_semantic_segmentation_id_from_data("class", semantic_class) instance_mappings.append((str(cur_prim.GetPath()), semantic_id, semantic_class, instance_ids, metadata)) elif has_material_semantics and instance_ids: semantic_class, metadata = _parse_semantic_schemas(material) if semantic_class: semantic_id = _get_syntheticdata_iface().get_semantic_segmentation_id_from_data("class", semantic_class) instance_mappings.append((str(cur_prim.GetPath()), semantic_id, semantic_class, instance_ids, metadata)) return instance_ids, instance_mappings def get_instance_mappings(): """ Get instance mappings. Uses update number as frame ID for caching. """ app = omni.kit.app.get_app_interface() frame_id = app.get_update_number() mappings = _get_instance_mappings(frame_id) return mappings @lru_cache(maxsize=1) def _get_instance_mappings(frame_id=None): """ Get instance mappings. Uses `frame_id` for caching. """ stage = omni.usd.get_context().get_stage() """ Use the C++ API to retrieve the instance mapping """ # _, instance_mappings = _parse_instance_mappings(stage.GetPseudoRoot()) # mappings_raw = [(i + 1, *im) for i, im in enumerate(instance_mappings)] mappings_raw = _get_syntheticdata_iface().get_instance_mapping_list() mappings = np.array( mappings_raw, dtype=[ ("uniqueId", np.int32), ("name", "O"), ("semanticId", np.int32), ("semanticLabel", "O"), ("instanceIds", "O"), ("metadata", "O"), ], ) return mappings def reduce_occlusion(occlusion_data, instance_mappings): """ Reduce occlusion value of leaf nodes to prims with a semantic label. Args: sensor_data (numpy.ndarray): A structured numpy array with the fields: [("instanceId", "<u4"), ("semanticId", "<u4"), ("occlusionRatio", "<f4")], where occlusion ranges from 0 (not occluded) to 1 (fully occluded). Returns: (numpy.ndarray): A structured numpy array with the fields: [("uniqueId", np.int32) ("name", "O"), ("semanticLabel", "O"), ("instanceIds", "O"), ("semanticId", "<u4"), ("metadata", "O"), ("occlusionRatio", "<f4")] """ mapped_data = [] occlusion_data = occlusion_data[~np.isnan(occlusion_data["occlusionRatio"])] for im in instance_mappings: if im["instanceIds"]: # if mapping has descendant instance ids mask = np.isin(occlusion_data["instanceId"], im["instanceIds"]) if mask.sum() > 1: carb.log_warn( f"[syntheticdata.viz] Mapping on {im['name']} contains multiple child meshes, occlusion value may be incorrect." ) occ = occlusion_data[mask] if len(occ) > 0: mapped_data.append( ( im["uniqueId"], im["name"], im["semanticLabel"], im["metadata"], im["instanceIds"], im["semanticId"], np.mean(occ["occlusionRatio"]), ) ) return np.array( mapped_data, dtype=[("uniqueId", np.int32), ("name", "O"), ("semanticLabel", "O"), ("metadata", "O"), ("instanceIds", "O")] + occlusion_data.dtype.descr[1:], ) def _join_struct_arrays(arrays): """ Join N numpy structured arrays. """ n = len(arrays[0]) assert all([len(a) == n for a in arrays]) dtypes = sum(([d for d in a.dtype.descr if d[0]] for a in arrays), []) joined = np.empty(n, dtype=dtypes) for a in arrays: joined[list(a.dtype.names)] = a return joined def _fish_eye_map_to_sphere(screen, screen_norm, theta, max_fov): """ Utility function to map a sample from a disk on the image plane to a sphere. """ direction = np.array([[0, 0, -1]] * screen.shape[0], dtype=np.float) extent = np.zeros(screen.shape[0], dtype=np.float) # A real fisheye have some maximum FOV after which the lens clips. valid_mask = theta <= max_fov # Map to a disk: screen / R normalizes the polar direction in screen space. xy = screen[valid_mask] screen_norm_mask = screen_norm[valid_mask] > 1e-5 xy[screen_norm_mask] = xy[screen_norm_mask] / screen_norm[valid_mask, None] # Map disk to a sphere cos_theta = np.cos(theta[valid_mask]) sin_theta = np.sqrt(1.0 - cos_theta ** 2) # Todo: is this right? Do we assume z is negative (RH coordinate system)? z = -cos_theta xy = xy * sin_theta[:, None] direction[valid_mask] = np.stack([xy[valid_mask, 0], xy[valid_mask, 1], z], axis=1) extent[valid_mask] = 1.0 # < far clip is not a plane, it's a sphere! return direction, extent def project_fish_eye_map_to_sphere(direction): z = direction[:, 2:] cos_theta = -z theta = np.arccos(np.clip(cos_theta, 0.0, 1.0)) theta = np.arccos(cos_theta) # TODO currently projecting outside of max FOV sin_theta = np.sqrt(1.0 - cos_theta * cos_theta + EPS) xy = direction[:, :2] / (sin_theta + EPS) return xy, theta def fish_eye_polynomial(ndc, view_params): """ FTheta camera model based on DW src/rigconfiguration/CameraModelsNoEigen.hpp """ # Convert NDC pixel position to screen space... well almost. It is screen space but the extent of x is [-0.5, 0.5] # and the extent of y is [-0.5/aspectRatio, 0.5/aspectRatio]. screen = ndc - 0.5 aspect_ratio = view_params["aspect_ratio"] screen[:, 1] /= -aspect_ratio # The FTheta polynomial works at a nominal resolution. So far we have done calculations in NDC to be # resolution independent. Here we scale by the nominal resolution in X. screen = (screen - view_params["ftheta"]["c_ndc"]) * view_params["ftheta"]["width"] # Compute the radial distance on the screen from its center point r = np.sqrt(screen[:, 0] ** 2 + screen[:, 1] ** 2) theta = ftheta_distortion(view_params["ftheta"], r) max_fov = math.radians(view_params["ftheta"]["max_fov"] / 2) return _fish_eye_map_to_sphere(screen, r, theta, max_fov) def project_fish_eye_polynomial(points, view_params): """ Project F-Theta camera model. Args: points (numpy.ndarray): Array of points in world frame of shape (num_points, 3). view_params (dict): dictionary containing view parameters Returns: (numpy.ndarray): Image-space points of shape (num_points, 3) """ points_h = np.pad(points, ((0, 0), (0, 1)), constant_values=1) points_cam_frame = np.einsum("jk,kl->jl", points_h, view_params["world_to_view"])[..., :3] directions = points_cam_frame / np.linalg.norm(points_cam_frame + EPS, axis=1)[:, None] xy, theta = project_fish_eye_map_to_sphere(directions) r = _ftheta_distortion_solver(view_params["ftheta"], theta) screen = xy * r screen = screen / view_params["ftheta"]["width"] + view_params["ftheta"]["c_ndc"] screen[:, 1] *= -view_params["aspect_ratio"] ndc = screen + 0.5 ndc = np.pad(ndc, ((0, 0), (0, 1)), constant_values=0) return ndc def get_view_params(viewport): """ Get view parameters. Args: viewport (opaque Viewport instance): Viewport from which to retrieve/create sensor. Returns: (dict): Dictionary containing view parameters. """ stage = omni.usd.get_context().get_stage() camera = stage.GetPrimAtPath(viewport.camera_path) current_time = omni.timeline.get_timeline_interface().get_current_time() view_to_world = UsdGeom.Imageable(camera).ComputeLocalToWorldTransform(current_time) world_to_view = view_to_world.GetInverse() width, height = viewport.resolution projection_type = camera.GetAttribute("cameraProjectionType").Get(current_time) if projection_type == "fisheyePolynomial": ftheta = { "width": camera.GetAttribute("fthetaWidth").Get(), "height": camera.GetAttribute("fthetaHeight").Get(), "cx": camera.GetAttribute("fthetaCx").Get(), "cy": camera.GetAttribute("fthetaCy").Get(), "poly_a": camera.GetAttribute("fthetaPolyA").Get(), "poly_b": camera.GetAttribute("fthetaPolyB").Get(), "poly_c": camera.GetAttribute("fthetaPolyC").Get(), "poly_d": camera.GetAttribute("fthetaPolyD").Get(), "poly_e": camera.GetAttribute("fthetaPolyE").Get(), "max_fov": camera.GetAttribute("fthetaMaxFov").Get(), } ftheta["edge_fov"] = ftheta_distortion(ftheta, ftheta["width"] / 2) ftheta["c_ndc"] = np.array( [ (ftheta["cx"] - ftheta["width"] / 2) / ftheta["width"], (ftheta["height"] / 2 - ftheta["cy"]) / ftheta["width"], ] ) else: ftheta = None view_params = { "view_to_world": np.array(view_to_world), "world_to_view": np.array(world_to_view), "projection_type": projection_type, "ftheta": ftheta, "width": width, "height": height, "aspect_ratio": width / height, "clipping_range": camera.GetAttribute("clippingRange").Get(current_time), "horizontal_aperture": camera.GetAttribute("horizontalAperture").Get(current_time), "focal_length": camera.GetAttribute("focalLength").Get(current_time), } return view_params def image_to_world(image_coordinates, view_params): """ Map each image coordinate to a corresponding direction vector. Args: pixel (numpy.ndarray): Pixel coordinates of shape (num_pixels, 2) view_params (dict): dictionary containing view parameters Returns (numpy.ndarray): Direction vectors of shape (num_pixels, 3) """ ndc = image_coordinates / np.array([view_params["width"], view_params["height"]]) direction, extent = fish_eye_polynomial(ndc, view_params) view_to_world = view_params["view_to_world"] origin = np.matmul(np.array([0, 0, 0, 1]), view_to_world)[:3] direction = np.matmul(np.pad(direction, ((0, 0), (0, 1)), constant_values=0), view_to_world)[:, :3] direction /= np.linalg.norm(direction, axis=1, keepdims=True) return origin, direction def world_to_image(points, viewport, view_params=None): """ Project world coordinates to image-space. Args: points (numpy.ndarray): Array of points in world frame of shape (num_points, 3). viewport (opaque Viewport instance): Viewport from which to retrieve/create sensor. view_params (dict, Optional): View parameters dictionary obtained from `omni.syntheticdata.helpers.get_view_params(viewport)`. Will be computed for current viewport state if not provided. Returns: (numpy.ndarray): Image-space points of shape (num_points, 3) """ if view_params is None: view_params = get_view_params(viewport) if view_params["projection_type"] == "pinhole" or view_params["projection_type"] is None: points_image_space = project_pinhole(points, view_params) elif view_params["projection_type"] == "fisheyePolynomial": points_image_space = project_fish_eye_polynomial(points, view_params) else: raise ValueError(f"Projection type {view_params['projection_type']} is not currently supported.") return points_image_space def ftheta_distortion(ftheta, x): """ F-Theta distortion. """ return ftheta["poly_a"] + x * ( ftheta["poly_b"] + x * (ftheta["poly_c"] + x * (ftheta["poly_d"] + x * ftheta["poly_e"])) ) def ftheta_distortion_prime(ftheta, x): """ Derivative to f_theta_distortion. """ return ftheta["poly_b"] + x * (2 * ftheta["poly_c"] + x * (3 * ftheta["poly_d"] + x * 4 * ftheta["poly_e"])) def _ftheta_distortion_solver(ftheta, y): # Guess by linear approximation. 2 loops provides sufficient precision in working range. ratio = ftheta["width"] / 2 / ftheta["edge_fov"] guess = y * ratio for i in range(2): guessed_y = ftheta_distortion(ftheta, guess) dy = y - guessed_y dx = ftheta_distortion_prime(ftheta, guess) mask = dx != 0 guess[mask] += dy[mask] / dx[mask] guess[~mask] += dy[~mask] * ratio return guess
24,371
Python
37.08125
117
0.636289
eliabntt/GRADE-RR/isaac_internals/kit/extscore/omni.syntheticdata/omni/syntheticdata/scripts/sensors.py
import carb import omni.usd import omni.kit from pxr import UsdGeom import numpy as np import asyncio from .. import _syntheticdata from . import helpers from .SyntheticData import * def get_synthetic_data(): sdg = SyntheticData.Get() if not sdg: SyntheticData.Initialize() sdg = SyntheticData.Get() assert sdg return sdg async def next_sensor_data_async(viewport=None, waitSimFrame: bool = False, inViewportId: int = None): """Wait for frame complete event from Kit for specific viewport. """ # next_sensor_data_async API previously passed inViewportId as ViewportHandle # This is actually incorrect and bad due to waiting on that handle, which can # change for a variety of reasons between the retrieval of the handle and # the wait on it below. if hasattr(viewport, "frame_info"): inViewportId = viewport.frame_info.get("viewport_handle") else: if inViewportId is None: if isinstance(viewport, int): inViewportId = viewport else: inViewportId = 0 viewport = None carb.log_warn( f"Depreacted usage of next_sensor_data_async with inViewportId={inViewportId}, pass the Viewport instead") app = omni.kit.app.get_app() # wait for the next pre_update call pre_f = asyncio.Future() def on_pre_event(e: carb.events.IEvent): if not pre_f.done(): swhFrameNumber = e.payload["SWHFrameNumber"] # drivesim legacy name if not swhFrameNumber: swhFrameNumber = e.payload["frameNumber"] pre_f.set_result(swhFrameNumber) sub_pre = app.get_pre_update_event_stream().create_subscription_to_pop(on_pre_event, name="omni.kit.app._pre_update_async") # wait the next frame to be rendered render_f = asyncio.Future() def on_render_event(e: carb.events.IEvent): # Grab the ViewportHandle to match from the Viewport if we have it or the legacy inViewportId cur_viewport_handle = viewport.frame_info.get("viewport_handle") if viewport else inViewportId viewId = e.payload["viewport_handle"] frameNumber = e.payload["swh_frame_number"] if ((viewId == cur_viewport_handle) and (not waitSimFrame or (pre_f.done() and (frameNumber >= pre_f.result())))): if not render_f.done(): render_f.set_result(frameNumber) sub_render = ( omni.usd.get_context() .get_rendering_event_stream() .create_subscription_to_pop_by_type( int(omni.usd.StageRenderingEventType.NEW_FRAME), on_render_event, name="omni.syntheticdata.sensors._next_sensor_data_async", order=0, ) ) MAX_NUM_SKIPPED_UPDATE = 150 num_skipped_update = 0 while (num_skipped_update < MAX_NUM_SKIPPED_UPDATE) and (not render_f.done()): await app.next_update_async() num_skipped_update += 1 if num_skipped_update >= MAX_NUM_SKIPPED_UPDATE: raise SyntheticDataException(f"waiting for next frame failed.") def enable_sensors(viewport, sensor_types): """ activate the host buffer copy nodes for given sensor NB: This function is deprecated """ for sensor_type in sensor_types: rendervar_name = SyntheticData.convert_sensor_type_to_rendervar(sensor_type.name) get_synthetic_data().activate_node_template(rendervar_name + "ExportRawArray", 0, [viewport.render_product_path]) def disable_sensors(viewport, sensor_types): """ deactivate the host buffer copy nodes for given sensor NB: This function is deprecated """ for sensor_type in sensor_types: rendervar_name = SyntheticData.convert_sensor_type_to_rendervar(sensor_type.name) get_synthetic_data().deactivate_node_template(rendervar_name + "ExportRawArray", 0, [viewport.render_product_path]) def create_or_retrieve_sensor(viewport, sensor_type): """ Retrieve a sensor for the specified viewport and sensor type. If the sensor does not exist, it is created. Note that the sensor will be uninitialized until a frame is rendered after the sensor is created. NB: This function is deprecated and the asynchronous version below (create_or_retrieve_sensor_async) should be used instead to ensure sensors are properly initialized by the renderer after creation Args: viewport (opaque Viewport instance): Viewport from which to retrieve/create sensor. sensor_type (omni.syntheticdata._syntheticdata.SensorType): Type of sensor to retrieve/create. """ enable_sensors(viewport, [sensor_type]) return sensor_type async def create_or_retrieve_sensor_async(viewport, sensor_type): """ Retrieve a sensor for the specified viewport and sensor type. If the sensor does not exist, it is created. Note that the sensor will be uninitialized until a frame is rendered after the sensor is created. Args: viewport (opaque Viewport instance): Viewport from which to retrieve/create sensor. sensor_type (omni.syntheticdata._syntheticdata.SensorType): Type of sensor to retrieve/create. """ enable_sensors(viewport, [sensor_type]) await next_sensor_data_async(viewport, True) return sensor_type async def initialize_async(viewport, sensor_types): """ Initialize sensors in the list provided. Args: viewport (opaque Viewport instance): Viewport from which to retrieve/create sensor. sensor_types (list of omni.syntheticdata._syntheticdata.SensorType): List of sensor types to initialize. """ await omni.kit.app.get_app_interface().next_update_async() enable_sensors(viewport, sensor_types) await next_sensor_data_async(viewport, True) def get_sensor_array(viewport, sensor_type, elemType, elemCount, is2DArray): """ Retrieve the sensor array data from the last sensor node evaluation. Args: viewport (opaque Viewport instance): Viewport from which to retrieve/create sensor. sensor_type : Sensor type to retrieve the data from. is2DArray : True if the array to be retrieved is a 2d array """ output_names = ["outputs:data"] if is2DArray: output_names.append("outputs:width") output_names.append("outputs:height") else: output_names.append("outputs:bufferSize") rendervar_name = SyntheticData.convert_sensor_type_to_rendervar(sensor_type.name) outputs = get_synthetic_data().get_node_attributes(rendervar_name + "ExportRawArray", output_names, viewport.render_product_path) data = outputs["outputs:data"] if outputs and ("outputs:data" in outputs) else None if is2DArray: height = outputs["outputs:height"] if outputs and ("outputs:height" in outputs) else 0 width = outputs["outputs:width"] if outputs and ("outputs:width" in outputs) else 0 bufferSize = height * width * elemCount * np.dtype(elemType).itemsize else: bufferSize = outputs["outputs:bufferSize"] if outputs and ("outputs:bufferSize" in outputs) else 0 if (data is None) or (len(data) < np.dtype(elemType).itemsize): if is2DArray: shape = (0, 0, elemCount) if elemCount > 1 else (0, 0) else: shape = (0, elemCount) if elemCount > 1 else (0) return np.empty(shape, elemType) assert bufferSize == len(data) data = data.view(elemType) assert len(data) > 0 if not is2DArray: return data.reshape(data.shape[0] // elemCount, elemCount) if elemCount > 1 else data return data.reshape(height, width, elemCount) if elemCount > 1 else data.reshape(height, width) def get_rgb(viewport): """ Get RGB sensor output. Args: viewport (opaque Viewport instance): Viewport from which to retrieve/create sensor. Return: (numpy.ndarray): A uint8 array of shape (height, width, 4) """ return get_sensor_array(viewport, _syntheticdata.SensorType.Rgb, np.uint8, 4, True) def get_depth(viewport): """ Get Inverse Depth sensor output. *** DEPRECATED *** Args: viewport (opaque Viewport instance): Viewport from which to retrieve/create sensor. Return: (numpy.ndarray): A float32 array of shape (height, width, 1). """ return get_sensor_array(viewport, _syntheticdata.SensorType.Depth, np.float32, 1, True) def get_depth_linear(viewport): """ Get Linear Depth sensor output. *** DEPRECATED *** Args: viewport (opaque Viewport instance): Viewport from which to retrieve/create sensor. Return: (numpy.ndarray): A float32 array of shape (height, width, 1). """ return get_sensor_array(viewport, _syntheticdata.SensorType.DepthLinear, np.float32, 1, True) def get_distance_to_image_plane(viewport): """ Get distance to image plane sensor output. Args: viewport (opaque Viewport instance): Viewport from which to retrieve/create sensor. Return: (numpy.ndarray): A float32 array of shape (height, width, 1). """ return get_sensor_array(viewport, _syntheticdata.SensorType.DistanceToImagePlane, np.float32, 1, True) def get_distance_to_camera(viewport): """ Get distance to camera sensor output. Args: viewport (opaque Viewport instance): Viewport from which to retrieve/create sensor. Return: (numpy.ndarray): A float32 array of shape (height, width, 1). """ return get_sensor_array(viewport, _syntheticdata.SensorType.DistanceToCamera, np.float32, 1, True) def get_camera_3d_position(viewport): """ Get camera space 3d position sensor output. Args: viewport (opaque Viewport instance): Viewport from which to retrieve/create sensor. Return: (numpy.ndarray): A float32 array of shape (height, width, 4). """ return get_sensor_array(viewport, _syntheticdata.SensorType.Camera3dPosition, np.float32, 4, True) def get_bounding_box_3d(viewport, parsed=False, return_corners=False, camera_frame=False, instance_mappings=None): """ Get bounding box 3D sensor output. Args: viewport (opaque Viewport instance): Viewport from which to retrieve/create sensor. parsed (bool): If True, return a single bounding box for each prim with a semantic schema. Otherwise, a bounding box will be provided for each leaf prim. include_corners (bool): if True, calculate and return the 8 corners of each 3D bounding box. Corners are returned in the order: [LDB, RDB, LUB, RUB, LDF, RDF, LUF, RUF] where L=Left, R=Right, D=Down, U=Up, B=Back, F=Front and LR: x-axis, UD: y-axis, FB: z-axis. camera_frame (bool): If True, the transforms and corners will be returned in the camera's reference frame. Otherwise, coordinates are returned with respect to the world frame. Note: The coordinate system is right-handed. instance_mappings (numpy.ndarray, optional): A structured array returned by `helpers.get_instance_mappings`. If not provided (default), a new instance mappings will be computed. Return: (numpy.ndarray): A structured array with the fields: `[('instanceId', '<u4'), ('semanticId', '<u4'), ("metadata", "O"), ('x_min', '<f4'), ('y_min', '<f4'), ('z_min', '<f4'), ('x_max', '<f4'), ('y_max', '<f4'), ('z_max', '<f4'), ('transform', '<f4', (4, 4))]`. If `return_corners` is `True`, an additional field `('corners', '<f4', (8, 3)` is returned. """ BoundingBox3DValuesType = np.dtype( [ ("instanceId", "<u4"), ("semanticId", "<u4"), ("x_min", "<f4"), ("y_min", "<f4"), ("z_min", "<f4"), ("x_max", "<f4"), ("y_max", "<f4"), ("z_max", "<f4"), ("transform", "<f4", (4, 4)), ] ) bboxes_3d_data = get_sensor_array( viewport, _syntheticdata.SensorType.BoundingBox3D, BoundingBox3DValuesType, 1, False ) # Return immediately if empty if bboxes_3d_data.size == 0: return bboxes_3d_data if return_corners: corners = helpers.get_bbox_3d_corners(bboxes_3d_data) corners_struc = np.zeros(len(corners), dtype=[("corners", np.float32, (8, 3))]) corners_struc["corners"] = corners bboxes_3d_data = helpers._join_struct_arrays([bboxes_3d_data, corners_struc]) if parsed: if instance_mappings is None: instance_mappings = helpers.get_instance_mappings() bboxes_3d_data = helpers.reduce_bboxes_3d(bboxes_3d_data, instance_mappings) if camera_frame: stage = omni.usd.get_context().get_stage() camera = stage.GetPrimAtPath(viewport.camera_path) current_time = omni.timeline.get_timeline_interface().get_current_time() tf_mat = np.array(UsdGeom.Camera(camera).ComputeLocalToWorldTransform(current_time)) view_matrix = np.linalg.inv(tf_mat) bboxes_3d_data["transform"] = np.einsum("ijk,kl->ijl", bboxes_3d_data["transform"], view_matrix) if return_corners: corners_homo = np.pad(bboxes_3d_data["corners"], ((0, 0), (0, 0), (0, 1)), constant_values=1.0) bboxes_3d_data["corners"] = np.einsum("ijk,kl->ijl", corners_homo, view_matrix)[..., :3] return bboxes_3d_data def get_bounding_box_2d_tight(viewport, instance_mappings=None): """ Get Bounding Box 2D Tight sensor output. Tight bounding boxes only bound the visible or unoccluded portions of an object. If an object is completely occluded, it is omitted from the returned array. Bounds units are in pixels. Args: viewport (opaque Viewport instance): Viewport from which to retrieve/create sensor. instance_mappings (numpy.ndarray, optional): A structured array returned by `helpers.get_instance_mappings`. If not provided (default), a new instance mappings will be computed. Return: (np.ndarray): A structured numpy array with the fields: `[('name', 'O'), ('semanticLabel', 'O'), ('instanceId', '<u4'), ('semanticId', '<u4'), ("metadata", "O"), ('x_min', '<i4'), ('y_min', '<i4'), ('x_max', '<i4'), ('y_max', '<i4')]` """ BoundingBox2DValuesType = np.dtype( [ ("instanceId", "<u4"), ("semanticId", "<u4"), ("x_min", "<i4"), ("y_min", "<i4"), ("x_max", "<i4"), ("y_max", "<i4"), ] ) bboxes_2d_data = get_sensor_array( viewport, _syntheticdata.SensorType.BoundingBox2DTight, BoundingBox2DValuesType, 1, is2DArray=False ) if instance_mappings is None: instance_mappings = helpers.get_instance_mappings() bboxes_2d_data = helpers.reduce_bboxes_2d(bboxes_2d_data, instance_mappings) return bboxes_2d_data def get_bounding_box_2d_loose(viewport, instance_mappings=None): """ Get Bounding Box 2D Loose sensor output. Loose bounding boxes bound the full extents of an object, even if totally occluded. Bounds units are in pixels. Args: viewport (opaque Viewport instance): Viewport from which to retrieve/create sensor. instance_mappings (numpy.ndarray, optional): A structured array returned by `helpers.get_instance_mappings`. If not provided (default), a new instance mappings will be computed. Return: (np.ndarray): A structured numpy array with the fields: `[('name', 'O'), ('semanticLabel', 'O'), ('instanceId', '<u4'), ('semanticId', '<u4'), ("metadata", "O"), ('x_min', '<i4'), ('y_min', '<i4'), ('x_max', '<i4'), ('y_max', '<i4')]` """ BoundingBox2DValuesType = np.dtype( [ ("instanceId", "<u4"), ("semanticId", "<u4"), ("x_min", "<i4"), ("y_min", "<i4"), ("x_max", "<i4"), ("y_max", "<i4"), ] ) bboxes_2d_data = get_sensor_array( viewport, _syntheticdata.SensorType.BoundingBox2DLoose, BoundingBox2DValuesType, 1, is2DArray=False ) if instance_mappings is None: instance_mappings = helpers.get_instance_mappings() bboxes_2d_data = helpers.reduce_bboxes_2d(bboxes_2d_data, instance_mappings) return bboxes_2d_data def get_semantic_segmentation(viewport): """ Get semantic segmentation sensor output. Args: viewport (opaque Viewport instance): Viewport from which to retrieve/create sensor. Return: output (np.ndarray): A uint32 array of shape `(height, width)`. """ return get_sensor_array(viewport, _syntheticdata.SensorType.SemanticSegmentation, np.uint32, 1, True) def get_instance_segmentation(viewport, parsed=False, return_mapping=False, instance_mappings=None): """ Get instance segmentation sensor output. Args: viewport (opaque Viewport instance): Viewport from which to retrieve/create sensor. parsed (bool): If True, map each leaf prim to a parent with a semantic schema applied. Otherwise, each leaf prim is returned as a unique instance. return_mapping (bool): Whether to also return an array mapping instance IDs to their corresponding prims. instance_mappings (numpy.ndarray, optional): A structured array returned by `helpers.get_instance_mappings`. If not provided (default), a new instance mappings will be computed. Return: output (np.ndarray): A uint32 array of shape `(height, width)`. mapping (list): (optional) If `return_mapping` is True, there will be an additional array containing the mapping of instance IDs to their corresponding prims. Each row corresponds to a prim with a SemanticSchema of Type="class". The mapping is provided in the following format: (ID (int), path (str), semanticID (int), semanticLabel (str), descendentIDs (list of int)) """ instance_data = get_sensor_array(viewport, _syntheticdata.SensorType.InstanceSegmentation, np.uint32, 1, True) if parsed: if instance_mappings is None: instance_mappings = helpers.get_instance_mappings() if len(instance_mappings) == 0: return instance_data instances_list = [(im[0], im[4]) for im in instance_mappings][::-1] if len(instances_list) == 0: carb.log_warn("[omni.syntheticdata.visualize] No instances found.") return instance_data max_instance_id_list = max([max(il[1]) for il in instances_list]) max_instance_id = instance_data.max() lut = np.zeros(max(max_instance_id, max_instance_id_list) + 1, dtype=np.uint32) # todo this avoids that overlapping instances are mapped to the same id, but it's not clear if this is the correct way for uid, il in instances_list: for j in il: if lut[j] == 0: lut[j] = uid instance_data = np.take(lut, instance_data) if return_mapping: if instance_mappings is None: instance_mappings = helpers.get_instance_mappings() return instance_data, instance_mappings else: return instance_data def get_normals(viewport): """ Get Normals sensor output. Args: viewport (opaque Viewport instance): Viewport from which to retrieve/create sensor. Return: (numpy.ndarray): A float32 array of shape `(height, width, 3)` with values in the range of `(-1., 1.)`. """ normals = get_sensor_array(viewport, _syntheticdata.SensorType.Normal, np.float32, 4, True)[..., :3] # Return (0, 0, 0) for background pixels # HACK: background is returned as (-0., -0., 1.), so use negative sign of 0 as background indicator bkg_mask = np.all(normals == np.array([0.0, 0.0, -1.0]), axis=-1) & np.all( np.copysign(np.ones_like(normals), normals) == -np.ones(3), axis=-1 ) normals[bkg_mask] = 0.0 return normals def get_motion_vector(viewport): """ Get Motion Vector sensor output. Args: viewport (opaque Viewport instance): Viewport from which to retrieve/create sensor. Return: TOCHECK : this does not describe what the legacy interface was returning (numpy.ndarray): A float32 array of shape `(height, width, 3)` with values in the range of `(-1., 1.)`. """ return get_sensor_array(viewport, _syntheticdata.SensorType.MotionVector, np.float32, 4, True) def get_cross_correspondence(viewport): """ Get Cross Correspondence sensor output. Args: viewport (opaque Viewport instance): Viewport from which to retrieve/create sensor. Return: (numpy.ndarray): A float32 array of shape `(height, width, 4)` with values in the range of `(-1., 1.)`. """ return get_sensor_array(viewport, _syntheticdata.SensorType.CrossCorrespondence, np.float32, 4, True) def get_occlusion(viewport, parsed=False, instance_mappings=None): """Get Occlusion values. Returns occlusion of instances as a ratio from 0. to 1. Note that this sensor is only applied to leaf prims. For example, if an asset is composed of multiple sub-meshes, an occlusion value will be calculated for each sub-mesh. Args: viewport (opaque Viewport instance): Viewport from which to retrieve/create sensor. parsed (bool): If True, map occlusion values to prims with a semantic class. If the mapped prim has more than one child with an occlusion value, a naive average will be performed. Note that this value will likely not be accurate. instance_mappings (numpy.ndarray, optional): A structured array returned by `helpers.get_instance_mappings`. If not provided (default), a new instance mappings will be computed. Returns: (numpy.ndarray): A structured numpy array with the fields: [('instanceId', '<u4'), ('semanticId', '<u4'), ('occlusionRatio', '<f4')], where occlusion ranges from 0 (not occluded) to 1 (fully occluded). If `parsed` is True, the additional fields [('name', 'O'), ('semanticLabel', 'O'), ("metadata", "O")] are returned. """ OcclusionType = np.dtype([("instanceId", "<u4"), ("semanticId", "<u4"), ("occlusionRatio", "<f4")]) data = get_sensor_array(viewport, _syntheticdata.SensorType.Occlusion, OcclusionType, 1, is2DArray=False) if parsed: if instance_mappings is None: instance_mappings = helpers.get_instance_mappings() return helpers.reduce_occlusion(data, instance_mappings) return data def get_semantic_data(instance_mappings=None): """ Get Semantic Data. Args: instance_mappings (numpy.ndarray, optional): A structured array returned by `helpers.get_instance_mappings`. If not provided (default), a new instance mappings will be computed. Returns: (numpy.ndarray): A structured numpy array with the fields: [('uniqueId', '<i4'), ('name', 'O'), ('semanticLabel', 'O'), ('metadata', 'O')] """ if instance_mappings is None: instance_mappings = helpers.get_instance_mappings() output = [] for row in instance_mappings: output.append((row[0], row[1], row[3], row[5])) output = np.array(output, dtype=[("uniqueId", np.int32), ("name", "O"), ("semanticLabel", "O"), ("metadata", "O")]) return output def get_occlusion_quadrant(viewport, return_bounding_boxes=False): """ Get Occlusion Quadrant. Uses loose and tight bounding boxes to return the occluded quadrant of all prims with semantic class. Note that the label "fully-visible" specifies that the full height and width of the prim's bounds can be determined, and the prim may still be partially occluded. Args: viewport (opaque Viewport instance): Viewport from which to retrieve/create sensor. Returns: (numpy.ndarray): A structured numpy array with the fields: [('name', 'O'), ('semanticLabel', 'O'), ('instanceId', '<u4'), ('semanticId', '<u4'), ('occlusion_quadrant', 'O')], where occlusion_quadrant is a string from ['bottom', 'top', 'right', 'left', 'bottom-right', 'bottom-left', 'top-right', 'top-left', 'fully-visible', 'fully-occluded']. If `return_bounding_boxes` is True, the fields `x_min`, `y_min`, `x_max`, `y_max` for with suffixes `_bbox2d_tight` and `_bbox2d_loose` will be returned as well. """ tight_data = get_bounding_box_2d_tight(viewport) loose_data = get_bounding_box_2d_loose(viewport) merged_data = helpers.merge_sensors(bounding_box_2d_tight=tight_data, bounding_box_2d_loose=loose_data) is_fully_occluded = merged_data["x_min_bbox2d_tight"] == -1 is_occluded_left = (merged_data["x_min_bbox2d_tight"] > merged_data["x_min_bbox2d_loose"]) & ~is_fully_occluded is_occluded_right = (merged_data["x_max_bbox2d_tight"] < merged_data["x_max_bbox2d_loose"]) & ~is_fully_occluded is_occluded_top = (merged_data["y_min_bbox2d_tight"] > merged_data["y_min_bbox2d_loose"]) & ~is_fully_occluded is_occluded_bottom = (merged_data["y_max_bbox2d_tight"] < merged_data["y_max_bbox2d_loose"]) & ~is_fully_occluded is_occluded_bottom_left = is_occluded_bottom & is_occluded_left is_occluded_bottom_right = is_occluded_bottom & is_occluded_right is_occluded_top_right = is_occluded_top & is_occluded_right is_occluded_top_left = is_occluded_top & is_occluded_left label = np.array(["fully-visible"] * len(merged_data), dtype=[("occlusion_quadrant", "O")]) label[is_occluded_top] = "top" label[is_occluded_bottom] = "bottom" label[is_occluded_right] = "right" label[is_occluded_left] = "left" label[is_occluded_bottom_left] = "bottom-left" label[is_occluded_bottom_right] = "bottom-right" label[is_occluded_top_left] = "top-left" label[is_occluded_top_right] = "top-right" label[is_fully_occluded] = "fully-occluded" if return_bounding_boxes: output = helpers._join_struct_arrays([merged_data, label]) else: output = helpers._join_struct_arrays( [merged_data[["uniqueId", "name", "semanticLabel", "metadata", "instanceIds"]], label] ) return output
24,981
Python
39.621138
120
0.686922
eliabntt/GRADE-RR/isaac_internals/kit/extscore/omni.syntheticdata/omni/syntheticdata/scripts/visualize.py
import random import colorsys import numpy as np import carb from PIL import Image, ImageDraw from . import helpers from . import sensors # Colorize Helpers def colorize_distance(image_data): height, width = image_data.shape[:2] colorized_image = np.zeros((height, width, 4)) image_data[image_data == 0.0] = 1e-5 image_data = np.clip(image_data, 0, 255) image_data -= np.min(image_data) image_data /= np.max(image_data) + 1e-8 colorized_image[:, :, 0] = image_data colorized_image[:, :, 1] = image_data colorized_image[:, :, 2] = image_data colorized_image[:, :, 3] = 1 colorized_image = (colorized_image * 255).astype(int) return colorized_image def colorize_segmentation(segmentation_image): segmentation_ids = np.unique(segmentation_image) num_colours = len(segmentation_ids) # This is to avoid generating lots of colours for semantic classes not in frame lut = np.array([segmentation_ids, list(range(num_colours))]) new_segmentation_image = lut[1, np.searchsorted(lut[0, :], segmentation_image)] colours = np.array([[0.0] * 4] + random_colours(num_colours)) segmentation_image_rgba = (colours[new_segmentation_image] * 255).astype(int) return segmentation_image_rgba def colorize_semantic_from_instance(instance_image, instance_mappings): if len(instance_mappings) == 0: blank = np.zeros_like(instance_image) return colorize_segmentation(blank) semantic_instances = {} for im in instance_mappings[::-1]: semantic_instances.setdefault(im["semanticId"], []).extend(im["instanceIds"]) max_semantic_instance_id = np.max([max(il) for _, il in semantic_instances.items()]) max_instance_id = instance_image.max() lut = np.zeros(max(max_semantic_instance_id, max_instance_id) + 1, dtype=np.uint32) for i, (_, il) in enumerate(semantic_instances.items()): lut[np.array(il)] = i + 1 # +1 to differentiate from background re_instanced = np.take(lut, instance_image) colours = np.array([[0.0] * 4] + random_colours(len(semantic_instances))) instance_rgba = (colours[re_instanced] * 255).astype(np.uint8) return instance_rgba def colorize_bboxes(bboxes_2d_data, bboxes_2d_rgb): semantic_id_list = [] bbox_2d_list = [] for bbox_2d in bboxes_2d_data: if bbox_2d["semanticId"] > 0: semantic_id_list.append(bbox_2d["semanticId"]) bbox_2d_list.append(bbox_2d) semantic_id_list_np = np.unique(np.array(semantic_id_list)) color_list = random_colours(len(semantic_id_list_np.tolist())) img = Image.fromarray(bboxes_2d_rgb) draw = ImageDraw.Draw(img) for bbox_2d in bbox_2d_list: index = np.where(semantic_id_list_np == bbox_2d["semanticId"])[0][0] bbox_color = color_list[index] draw.rectangle( xy=[(bbox_2d["x_min"], bbox_2d["y_min"]), (bbox_2d["x_max"], bbox_2d["y_max"])], outline=( int(255 * bbox_color[0]), int(255 * bbox_color[1]), int(255 * bbox_color[2]), int(255 * bbox_color[3]), ), width=4, ) return np.asarray(img) def colorize_bboxes_3d(bboxes_3d_corners, rgb): """bboxes_3d_corners: in the local camera frame""" height, width = rgb.shape[:2] # FILTER BOXES mask_uv = ~np.any(np.all(bboxes_3d_corners < 0, axis=1), axis=1) & ~np.any( np.all(bboxes_3d_corners > 1, axis=1), axis=1 ) mask_z = np.all(np.all(bboxes_3d_corners[..., 2:] >= 0, axis=1), axis=1) & np.all( np.all(bboxes_3d_corners[..., 2:] <= 1, axis=1), axis=1 ) bboxes_3d_corners = bboxes_3d_corners[mask_uv & mask_z] bboxes_3d_corners = bboxes_3d_corners[..., :2].reshape(-1, 8, 2) * np.array([[width, height]]) face_idx_list = [[0, 1, 3, 2], [4, 5, 7, 6], [2, 3, 7, 6], [0, 1, 5, 4], [0, 2, 6, 4], [1, 3, 7, 5]] colours = random_colours(len(face_idx_list)) master_overlay = np.zeros_like(rgb) master_overlay_img = Image.fromarray(master_overlay) for face_idxs, colour in zip(face_idx_list, colours): overlay = Image.new("RGBA", (width, height)) draw = ImageDraw.Draw(overlay) colour = [int(c * 255) for c in colour] for p in bboxes_3d_corners: draw.polygon([tuple(xy) for xy in p[face_idxs]], fill=tuple([*colour[:3], 120])) draw.line([tuple(xy) for xy in p[face_idxs]], width=3, fill=tuple(colour)) master_overlay_img = Image.alpha_composite(master_overlay_img, overlay) rgb_img = Image.fromarray(rgb) rgb_img = Image.alpha_composite(rgb_img, master_overlay_img) return np.asarray(rgb_img) def random_colours(N): """ Generate random colors. Generate visually distinct colours by linearly spacing the hue channel in HSV space and then convert to RGB space. """ colour_rand = random.Random(2018) # Produces consistent random colours start = colour_rand.random() hues = [(start + i / N) % 1.0 for i in range(N)] colours = [list(colorsys.hsv_to_rgb(h, 0.9, 1.0)) + [1.0] for i, h in enumerate(hues)] colour_rand.shuffle(colours) return colours def get_bbox2d_tight(viewport): rgb_data = sensors.get_rgb(viewport) bboxes_2d_data = sensors.get_bounding_box_2d_tight(viewport) bboxes_2d_rgb = colorize_bboxes(bboxes_2d_data, rgb_data) return bboxes_2d_rgb def get_bbox2d_loose(viewport): rgb_data = sensors.get_rgb(viewport) bboxes_2d_data = sensors.get_bounding_box_2d_loose(viewport) bboxes_2d_rgb = colorize_bboxes(bboxes_2d_data, rgb_data) return bboxes_2d_rgb def get_normals(viewport): normals = sensors.get_normals(viewport) background_mask = np.sum(normals, axis=-1) == 0.0 # normalize from [-1, 1] to [0, 255] normals = (normals + 1.0) / 2 * 255 # Set background alpha to 0. normals = np.pad(normals, ((0, 0), (0, 0), (0, 1)), constant_values=255) normals[background_mask, 3] = 0.0 return normals.astype(np.uint8) def get_motion_vector(viewport): motion_vector = sensors.get_motion_vector(viewport) _min, _max = motion_vector.min(), motion_vector.max() motion_vector = (motion_vector - _min) / (_max - _min) * 255.0 return motion_vector.astype(np.uint8) def get_cross_correspondence(viewport): cross_correspondence = sensors.get_cross_correspondence(viewport) # normalize from [-1, 1] to [0, 255] # invalid values of -1 convert to 0 cross_correspondence = ((cross_correspondence + 1.0) / 2) * 255 return cross_correspondence.astype(np.uint8) def get_instance_segmentation(viewport, mode=None): if not mode: carb.log_info('[omni.syntheticdata.visualize] No semantic mode provided, defaulting to "parsed"') mode = "parsed" if mode == "raw": instance_data = sensors.get_instance_segmentation(viewport, parsed=False) elif mode == "parsed": instance_data = sensors.get_instance_segmentation(viewport, parsed=True) else: raise NotImplementedError instance_image = colorize_segmentation(instance_data) return instance_image def get_semantic_segmentation(viewport, mode=""): if not mode: carb.log_info('[omni.syntheticdata.visualize] No semantic mode provided, defaulting to "parsed"') mode = "instance_map" # s = time.time() if mode == "raw": semantic_data = sensors.get_semantic_segmentation(viewport) semantic_image = colorize_segmentation(semantic_data) elif mode == "parsed": instance_data = sensors.get_instance_segmentation(viewport) instance_mappings = helpers.get_instance_mappings() semantic_image = colorize_semantic_from_instance(instance_data, instance_mappings) else: raise NotImplementedError return semantic_image def get_bbox3d(viewport, mode="parsed"): rgb_data = sensors.get_rgb(viewport) bbox_3d_data = sensors.get_bounding_box_3d(viewport, parsed=(mode == "parsed"), return_corners=True) if bbox_3d_data.size == 0: carb.log_info("[omni.syntheticdata.visualize] No 3D bounding boxes found.") return rgb_data bbox_3d_corners = bbox_3d_data["corners"] projected_corners = helpers.world_to_image(bbox_3d_corners.reshape(-1, 3), viewport).reshape(-1, 8, 3) bboxes_3d_rgb = colorize_bboxes_3d(projected_corners, rgb_data) return bboxes_3d_rgb # *** DEPRECATED *** def get_depth(viewport, mode="linear"): if mode == "linear": depth_data = sensors.get_depth_linear(viewport) depth_data[depth_data == depth_data.max()] = 0.0 elif mode == "inverse_depth": depth_data = sensors.get_depth(viewport) else: raise ValueError(f"Mode {mode} is invalid. Choose between " "['linear', 'inverse_depth'].") return colorize_distance(depth_data.squeeze()) def get_distance(viewport, mode="image_plane"): if mode == "image_plane": distance_data = sensors.get_distance_to_image_plane(viewport) distance_data[distance_data == distance_data.max()] = 0.0 elif mode == "camera": distance_data = sensors.get_distance_to_camera(viewport) distance_data[distance_data == distance_data.max()] = 0.0 else: raise ValueError(f"Mode {mode} is invalid. Choose between " "['image_plane', 'camera'].") return colorize_distance(distance_data.squeeze())
9,390
Python
37.966805
106
0.646858
eliabntt/GRADE-RR/isaac_internals/kit/extscore/omni.syntheticdata/omni/syntheticdata/scripts/extension.py
from pxr import Tf, Trace, Usd import carb.settings import omni.kit import omni.ext # legacy extension export from . import helpers from . import visualize from . import sensors from .SyntheticData import * EXTENSION_NAME = "Synthetic Data" _extension_instance = None class Extension(omni.ext.IExt): def __init__(self): self.__viewport_legacy_event_sub = None self.__viewport_legacy_close = None self.__extension_loaded = None self.__menu_container = None def __menubar_core_loaded(self): from .menu import SynthDataMenuContainer self.__menu_container = SynthDataMenuContainer() def __menubar_core_unloaded(self): if self.__menu_container: self.__menu_container.destroy() self.__menu_container = None def __viewport_legcy_loaded(self): from .viewport_legacy import ViewportLegacy self.__viewport_legacy_event_sub = ViewportLegacy.create_update_subscription() self.__viewport_legacy_close = ViewportLegacy.close_viewports def __viewport_legcy_unloaded(self): if self.__viewport_legacy_event_sub: self.__viewport_legacy_event_sub = None if self.__viewport_legacy_close: self.__viewport_legacy_close() self.__viewport_legacy_close = None def on_startup(self, ext_id): global _extension_instance _extension_instance = self carb.log_info("[omni.syntheticdata] SyntheticData startup") manager = omni.kit.app.get_app().get_extension_manager() self.__extension_loaded = ( manager.subscribe_to_extension_enable( lambda _: self.__menubar_core_loaded(), lambda _: self.__menubar_core_unloaded(), ext_name="omni.kit.viewport.menubar.core", hook_name=f"{ext_id} omni.kit.viewport.menubar.core listener", ), manager.subscribe_to_extension_enable( lambda _: self.__viewport_legcy_loaded(), lambda _: self.__viewport_legcy_unloaded(), ext_name="omni.kit.window.viewport", hook_name=f"{ext_id} omni.kit.window.viewport listener", ) ) self._stage_event_sub = ( omni.usd.get_context() .get_stage_event_stream() .create_subscription_to_pop(self._on_stage_event, name="omni.syntheticdata stage update") ) self._usd_event_listener = None # self._usd_event_listener = Tf.Notice.Register(Usd.Notice.ObjectsChanged, self._on_usd_changed, None) # force settings settings = carb.settings.get_settings() settings.set("/rtx/hydra/enableSemanticSchema", True) # TODO : deprecate if settings.get_as_bool("/app/asyncRendering") or settings.get_as_bool("/app/asyncRenderingLowLatency"): carb.log_warn(f"SyntheticData extension is not supporting asyncRendering") stageHistoryFrameCount = settings.get_as_int("/app/settings/flatCacheStageFrameHistoryCount") if not stageHistoryFrameCount or (int(stageHistoryFrameCount) < 3): carb.log_warn(f"SyntheticData extension needs at least a stageFrameHistoryCount of 3") if settings.get_as_bool("/rtx/gatherColorToDisplayDevice") and settings.get_as_bool("/renderer/multiGpu/enabled"): carb.log_error("SyntheticData extension does not support /rtx/gatherColorToDisplayDevice=true with multiple GPUs.") SyntheticData.Initialize() # @Trace.TraceFunction # def _on_usd_changed(self, notice, stage): # if notice.GetResyncedPaths(): # self._viewports = {} def _on_stage_event(self, event): if event.type == int(omni.usd.StageEventType.CLOSING): if self.__viewport_legacy_close: self.__viewport_legacy_close() # FIXME : this cause rendering issues (added for unittests) SyntheticData.Get().reset(False) # this is fishy but if we reset the graphs in the closing event the rendering is not happy elif event.type == int(omni.usd.StageEventType.OPENED): SyntheticData.Get().reset(False) def on_shutdown(self): global _extension_instance _extension_instance = None self.__extension_loaded = None self._stage_event_sub = None self.__viewport_legcy_unloaded() self.__menubar_core_unloaded() if self._usd_event_listener: self._usd_event_listener.Revoke() self._usd_event_listener = None SyntheticData.Reset() def get_name(self): return EXTENSION_NAME @staticmethod def get_instance(): return _extension_instance
4,736
Python
37.201613
127
0.635769
eliabntt/GRADE-RR/isaac_internals/kit/extscore/omni.syntheticdata/omni/syntheticdata/scripts/SyntheticData.py
from sqlite3 import connect from pxr import Sdf, Usd, UsdRender import carb import omni.graph.core as og import omni.usd import omni.kit from dataclasses import dataclass, field """ SyntheticData class is the prototype interface implementation (will be eventually integrated in SynthetiData C++ interface ) - contains the definition of all omnigraphs - expose methods for the user to - add / remove custom nodes to graphs """ _sdg_iface = None class SyntheticDataException(Exception): def __init__(self, message="error"): self.message = message super().__init__(self.message) class SyntheticDataStage: # stage is set automatically from the node connections' stages AUTO = -1 # global simulation : node scheduled in the simulation graph SIMULATION = 0 # prerender : node scheduled in the prerender graph PRE_RENDER = 1 # postrender : node scheduled in the postrender graph for a specific renderproduct POST_RENDER = 2 # on demand : node scheduled in the postprocess graph ON_DEMAND = 3 class SyntheticData: _graphPathRoot = "/Render" _graphName = "SDGPipeline" _simulationGraphPath = "Simulation/" + _graphName _preRenderGraphPath = "PreRender/" + _graphName _postRenderGraphPath = "PostRender/" + _graphName _postProcessGraphPath = "PostProcess/" + _graphName _rendererTemplateName = "GpuInteropEntry" _renderVarBuffSuffix = "buff" _renderVarHostSuffix = "host" _renderVarToHostTemplateName = "PostRenderVarToHost" _renderProductAttributeName = "inputs:renderProductPath" _instanceMappingCtrl = "InstanceMappingPre" _defaultSemanticFilterName = "DefaultSemanticFilter" # graph registry : contains node templates used to construct a graph # node template name / id # list containing : # - node type # - list of template dependencies description : # - connection node template name or renderVar name # - index of the render product in the list provided during activation # - dictionnary of inputs / outputs mapping # - node attributes name/value dictionnary to be set during the activation # @dataclass class NodeConnectionTemplate: node_template_id: str render_product_idxs: tuple = (0,) attributes_mapping: dict = field(default_factory=dict) @dataclass class NodeTemplate: pipeline_stage: int node_type_id: str connections: list = field(default_factory=list) attributes: dict = field(default_factory=dict) _ogn_templates_registry = { # --- Camera "RenderProductCameraPrimPath": NodeTemplate( SyntheticDataStage.SIMULATION, "omni.syntheticdata.SdSimRenderProductCamera" ), "PostRenderProductCamera": NodeTemplate( SyntheticDataStage.POST_RENDER, "omni.syntheticdata.SdRenderProductCamera", [ NodeConnectionTemplate( _rendererTemplateName, attributes_mapping={ "outputs:rp": "inputs:renderResults", "outputs:swhFrameNumber": "inputs:swhFrameNumber", "outputs:exec": "inputs:exec" }), NodeConnectionTemplate("RenderProductCameraPrimPath", attributes_mapping={ "outputs:exec": "inputs:exec"}) ] ), # --- GPUInterop _rendererTemplateName: NodeTemplate(SyntheticDataStage.POST_RENDER, "omni.graph.nodes.GpuInteropRenderProductEntry"), # --- InstanceMapping _instanceMappingCtrl : NodeTemplate( SyntheticDataStage.SIMULATION, "omni.syntheticdata.SdSimInstanceMapping", attributes={"inputs:needTransform": False, "inputs:semanticFilterPredicate":"*:*"} ), _defaultSemanticFilterName: NodeTemplate( SyntheticDataStage.SIMULATION, "omni.syntheticdata.SdSemanticFilter", attributes={"inputs:name": "default", "inputs:predicate": "*:*"} ), "InstanceMappingTransforms": NodeTemplate( SyntheticDataStage.SIMULATION, "omni.syntheticdata.SdSimInstanceMapping", [ NodeConnectionTemplate(_instanceMappingCtrl, render_product_idxs=()) ], {"inputs:needTransform": True} ), "InstanceMappingPost": NodeTemplate( SyntheticDataStage.POST_RENDER, "omni.syntheticdata.SdPostInstanceMapping", [ NodeConnectionTemplate("InstanceIdTokenMapSD"), NodeConnectionTemplate(_instanceMappingCtrl, attributes_mapping={"outputs:exec": "inputs:exec"}, render_product_idxs=()) ], {}, ), # --- NoOp node used to expose the semantic transforms renderVars "InstanceMappingPostWithTransforms": NodeTemplate( SyntheticDataStage.POST_RENDER, "omni.syntheticdata.SdNoOp", [ NodeConnectionTemplate("InstanceMappingTransforms", attributes_mapping={"outputs:exec": "inputs:exec"}, render_product_idxs=()), NodeConnectionTemplate("InstanceMappingPost", attributes_mapping={"outputs:exec": "inputs:exec"}) ], {}, ), # --- BoundingBoxes "BoundingBox2DTightReduction": NodeTemplate( SyntheticDataStage.POST_RENDER, "omni.syntheticdata.SdPostSemanticBoundingBox", [ NodeConnectionTemplate("BoundingBox2DTightSD"), NodeConnectionTemplate("InstanceMappingPost") ], {"inputs:renderVar": "BoundingBox2DTightSD"}, ), "BoundingBox2DLooseReduction": NodeTemplate( SyntheticDataStage.POST_RENDER, "omni.syntheticdata.SdPostSemanticBoundingBox", [ NodeConnectionTemplate("BoundingBox2DLooseSD"), NodeConnectionTemplate("InstanceMappingPost") ], {"inputs:renderVar": "BoundingBox2DLooseSD"}, ), "BoundingBox3DReduction": NodeTemplate( SyntheticDataStage.POST_RENDER, "omni.syntheticdata.SdPostSemanticBoundingBox", [ NodeConnectionTemplate("BoundingBox3DSD"), NodeConnectionTemplate("InstanceMappingTransforms", attributes_mapping={"outputs:exec": "inputs:exec"}, render_product_idxs=()), NodeConnectionTemplate("InstanceMappingPost") ], {"inputs:renderVar": "BoundingBox3DSD"}, ), "BoundingBox3DCameraProjection": NodeTemplate( SyntheticDataStage.POST_RENDER, "omni.syntheticdata.SdPostSemantic3dBoundingBoxCameraProjection", [ NodeConnectionTemplate("BoundingBox3DSD"), NodeConnectionTemplate("BoundingBox3DReduction"), NodeConnectionTemplate("PostRenderProductCamera"), NodeConnectionTemplate("InstanceMappingTransforms", attributes_mapping={"outputs:exec": "inputs:exec"}, render_product_idxs=()), NodeConnectionTemplate("InstanceMappingPost") ] ), "BoundingBox3DFilter": NodeTemplate( SyntheticDataStage.POST_RENDER, "omni.syntheticdata.SdPostSemantic3dBoundingBoxFilter", [ NodeConnectionTemplate("BoundingBox3DSD"), NodeConnectionTemplate("BoundingBox3DCameraProjection"), NodeConnectionTemplate("PostRenderProductCamera"), NodeConnectionTemplate("BoundingBox3DReduction"), NodeConnectionTemplate("InstanceMappingPost") ] ), # --- PostRenderVarDisplay "LdrColorDisplayPost": NodeTemplate( SyntheticDataStage.POST_RENDER, "omni.syntheticdata.SdPostRenderVarDisplayTexture", [NodeConnectionTemplate("LdrColorSD")], {"inputs:renderVar": "LdrColorSD"}, ), "DistanceToImagePlaneDisplayPost": NodeTemplate( SyntheticDataStage.POST_RENDER, "omni.syntheticdata.SdPostRenderVarDisplayTexture", [NodeConnectionTemplate("DistanceToImagePlaneSD")], { "inputs:renderVar": "DistanceToImagePlaneSD", "inputs:parameters": [0.0, 100.0, 0.0, 0.0] }, ), "DistanceToCameraDisplayPost": NodeTemplate( SyntheticDataStage.POST_RENDER, "omni.syntheticdata.SdPostRenderVarDisplayTexture", [NodeConnectionTemplate("DistanceToCameraSD")], { "inputs:renderVar": "DistanceToCameraSD", "inputs:parameters": [0.0, 100.0, 0.0, 0.0] }, ), "Camera3dPositionDisplayPost": NodeTemplate( SyntheticDataStage.POST_RENDER, "omni.syntheticdata.SdPostRenderVarDisplayTexture", [NodeConnectionTemplate("Camera3dPositionSD")], {"inputs:renderVar": "Camera3dPositionSD"}, ), "NormalDisplayPost": NodeTemplate( SyntheticDataStage.POST_RENDER, "omni.syntheticdata.SdPostRenderVarDisplayTexture", [NodeConnectionTemplate("NormalSD")], {"inputs:renderVar": "NormalSD"}, ), "CrossCorrespondenceDisplayPost": NodeTemplate( SyntheticDataStage.POST_RENDER, "omni.syntheticdata.SdPostRenderVarDisplayTexture", [NodeConnectionTemplate("CrossCorrespondenceSD")], {"inputs:renderVar": "CrossCorrespondenceSD"}, ), "TargetMotionDisplayPost": NodeTemplate( SyntheticDataStage.POST_RENDER, "omni.syntheticdata.SdPostRenderVarDisplayTexture", [NodeConnectionTemplate("TargetMotionSD")], { "inputs:renderVar": "TargetMotionSD", "inputs:parameters": [1.0, 5.0, 0.0, 0.0] }, ), "InstanceIdSegmentationDisplayPost": NodeTemplate( SyntheticDataStage.POST_RENDER, "omni.syntheticdata.SdPostRenderVarDisplayTexture", [NodeConnectionTemplate("InstanceSegmentationSD")], {"inputs:renderVar": "InstanceSegmentationSD", "inputs:renderVarDisplay": "RawInstanceSegmentationSDDisplay", "inputs:mode": "segmentationMapMode"}, ), "InstanceSegmentationDisplayPost": NodeTemplate( SyntheticDataStage.POST_RENDER, "omni.syntheticdata.SdPostRenderVarDisplayTexture", [ NodeConnectionTemplate("InstanceSegmentationSD"), NodeConnectionTemplate("InstanceMappingPost") ], {"inputs:renderVar": "InstanceSegmentationSD", "inputs:mode": "semanticPathMode"}, ), "SemanticSegmentationDisplayPost": NodeTemplate( SyntheticDataStage.POST_RENDER, "omni.syntheticdata.SdPostRenderVarDisplayTexture", [ NodeConnectionTemplate("InstanceSegmentationSD"), NodeConnectionTemplate("InstanceMappingPost"), ], {"inputs:renderVar": "InstanceSegmentationSD", "inputs:renderVarDisplay": "SemanticSegmentationSDDisplay", "inputs:mode": "semanticLabelMode"}, ), "SemanticIdSegmentationDisplayPost": NodeTemplate( SyntheticDataStage.POST_RENDER, "omni.syntheticdata.SdPostRenderVarDisplayTexture", [NodeConnectionTemplate("SemanticSegmentationSD")], {"inputs:renderVar": "SemanticSegmentationSD", "inputs:renderVarDisplay": "RawSemanticSegmentationSDDisplay", "inputs:mode": "segmentationMapMode"}, ), "BoundingBox2DTightDisplayPost": NodeTemplate( SyntheticDataStage.POST_RENDER, "omni.syntheticdata.SdPostRenderVarDisplayTexture", [ NodeConnectionTemplate("LdrColorSD"), NodeConnectionTemplate("InstanceMappingPost"), NodeConnectionTemplate("BoundingBox2DTightReduction"), ], {"inputs:renderVar": "LdrColorSD", "inputs:renderVarDisplay": "BoundingBox2DTightSDDisplay", "inputs:mode": "semanticBoundingBox2dMode"}, ), "BoundingBox2DLooseDisplayPost": NodeTemplate( SyntheticDataStage.POST_RENDER, "omni.syntheticdata.SdPostRenderVarDisplayTexture", [ NodeConnectionTemplate("LdrColorSD"), NodeConnectionTemplate("InstanceMappingPost"), NodeConnectionTemplate("BoundingBox2DLooseReduction"), ], {"inputs:renderVar": "LdrColorSD", "inputs:renderVarDisplay": "BoundingBox2DLooseSDDisplay", "inputs:mode": "semanticBoundingBox2dMode"}, ), "BoundingBox3DDisplayPost": NodeTemplate( SyntheticDataStage.POST_RENDER, "omni.syntheticdata.SdPostRenderVarDisplayTexture", [ NodeConnectionTemplate("LdrColorSD"), NodeConnectionTemplate("Camera3dPositionSD"), NodeConnectionTemplate("PostRenderProductCamera"), NodeConnectionTemplate("InstanceMappingPost"), NodeConnectionTemplate("BoundingBox3DFilter"), NodeConnectionTemplate("BoundingBox3DCameraProjection"), NodeConnectionTemplate("BoundingBox3DReduction"), ], { "inputs:renderVar": "LdrColorSD", "inputs:renderVarDisplay": "BoundingBox3DSDDisplay", "inputs:mode": "semanticBoundingBox3dMode", "inputs:parameters": [0.0, 5.0, 0.027, 0.27] }, ), # --- PostProcess "PostProcessDispatcher": NodeTemplate( SyntheticDataStage.ON_DEMAND, "omni.syntheticdata.SdOnNewFrame" ), "PostProcessDispatch": NodeTemplate( SyntheticDataStage.ON_DEMAND, "omni.syntheticdata.SdOnNewRenderProductFrame", [NodeConnectionTemplate("PostProcessDispatcher", render_product_idxs=())] ), "PostProcessRenderProductCamera": NodeTemplate( SyntheticDataStage.ON_DEMAND, "omni.syntheticdata.SdRenderProductCamera", [ NodeConnectionTemplate("PostProcessDispatch"), NodeConnectionTemplate("RenderProductCameraPrimPath", attributes_mapping={ "outputs:exec": "inputs:exec"}), NodeConnectionTemplate(_rendererTemplateName, attributes_mapping={ "outputs:exec": "inputs:exec"}) # provide the renderResults ] ), "InstanceMapping": NodeTemplate( SyntheticDataStage.ON_DEMAND, "omni.syntheticdata.SdInstanceMapping", [ NodeConnectionTemplate("PostProcessDispatch"), NodeConnectionTemplate("InstanceMappingPost", attributes_mapping={"outputs:exec": "inputs:exec"}) ] ), "InstanceMappingWithTransforms": NodeTemplate( SyntheticDataStage.ON_DEMAND, "omni.syntheticdata.SdInstanceMapping", [ NodeConnectionTemplate("PostProcessDispatch"), NodeConnectionTemplate("InstanceMappingTransforms", attributes_mapping={"outputs:exec": "inputs:exec"}, render_product_idxs=()), NodeConnectionTemplate("InstanceMappingPost", attributes_mapping={"outputs:exec": "inputs:exec"}) ] ), "InstanceMappingPtr": NodeTemplate( SyntheticDataStage.ON_DEMAND, "omni.syntheticdata.SdInstanceMappingPtr", [ NodeConnectionTemplate("PostProcessDispatch"), NodeConnectionTemplate("InstanceMappingPost", attributes_mapping={"outputs:exec": "inputs:exec"}) ] ), "InstanceMappingPtrWithTransforms": NodeTemplate( SyntheticDataStage.ON_DEMAND, "omni.syntheticdata.SdInstanceMappingPtr", [ NodeConnectionTemplate("PostProcessDispatch"), NodeConnectionTemplate("InstanceMappingTransforms", attributes_mapping={"outputs:exec": "inputs:exec"}, render_product_idxs=()), NodeConnectionTemplate("InstanceMappingPost", attributes_mapping={"outputs:exec": "inputs:exec"}) ] ) } # set of rendervars associated to the node exposing them : # - renderVar generated by the renderer are exposed by the GpuInteropEntry # - others renderVars are generated by some postRender nodes # FIXME : the list of renderer rendervars should be queried from the renderer _ogn_rendervars = { # renderer renderVars "LdrColorSD": _rendererTemplateName, "Camera3dPositionSD": _rendererTemplateName, "DistanceToImagePlaneSD": _rendererTemplateName, "DistanceToCameraSD": _rendererTemplateName, "DepthSD": _rendererTemplateName, "DepthLinearSD": _rendererTemplateName, "InstanceSegmentationSD": _rendererTemplateName, "SemanticSegmentationSD": _rendererTemplateName, "NormalSD": _rendererTemplateName, "TargetMotionSD": _rendererTemplateName, "BoundingBox2DTightSD": _rendererTemplateName, "BoundingBox2DLooseSD": _rendererTemplateName, "BoundingBox3DSD": _rendererTemplateName, "OcclusionSD": _rendererTemplateName, "TruncationSD": _rendererTemplateName, "CrossCorrespondenceSD": _rendererTemplateName, "InstanceIdTokenMapSD": _rendererTemplateName, "SemanticIdTokenMapSD": _rendererTemplateName, # postRender nodes rendervars "InstanceMappingInfoSDhost": "InstanceMappingPost", "SemanticMapSD": "InstanceMappingPost", "SemanticMapSDhost": "InstanceMappingPost", "SemanticPrimTokenSD": "InstanceMappingPost", "SemanticPrimTokenSDhost": "InstanceMappingPost", "InstanceMapSD": "InstanceMappingPost", "InstanceMapSDhost": "InstanceMappingPost", "InstancePrimTokenSD": "InstanceMappingPost", "InstancePrimTokenSDhost": "InstanceMappingPost", "SemanticLabelTokenSD": "InstanceMappingPost", "SemanticLabelTokenSDhost": "InstanceMappingPost", "SemanticLocalTransformSD": "InstanceMappingPostWithTransforms", "SemanticLocalTransformSDhost": "InstanceMappingPostWithTransforms", "SemanticWorldTransformSD": "InstanceMappingPostWithTransforms", "SemanticWorldTransformSDhost": "InstanceMappingPostWithTransforms", "SemanticBoundingBox2DExtentTightSD": "BoundingBox2DTightReduction", "SemanticBoundingBox2DInfosTightSD": "BoundingBox2DTightReduction", "SemanticBoundingBox2DExtentLooseSD": "BoundingBox2DLooseReduction", "SemanticBoundingBox2DInfosLooseSD": "BoundingBox2DLooseReduction", "SemanticBoundingBox3DExtentSD": "BoundingBox3DReduction", "SemanticBoundingBox3DInfosSD": "BoundingBox3DReduction", "SemanticBoundingBox3DCamCornersSD": "BoundingBox3DCameraProjection", "SemanticBoundingBox3DCamExtentSD": "BoundingBox3DCameraProjection", "SemanticBoundingBox3DFilterInfosSD": "BoundingBox3DFilter" } _ogn_post_display_types = [ "omni.syntheticdata.SdPostRenderVarDisplayTexture" ] _ogn_display_types = [ "omni.syntheticdata.SdRenderVarDisplayTexture", "omni.syntheticdata.SdLinearArrayToTexture" ] """lst: List of omnigraph node types conforming the display api. Todo : use reflexivity on the node outputs.""" @staticmethod def register_display_rendervar_templates() -> None: """Automatically register SdRenderVarDisplayTexture node template for all registerd nodes whose type is in the post display type. The function is called for every statically registered nodes during the interface initialization . It may be called after having registered nodes whose type is omni.syntheticdata.SdPostRenderVarDisplayTexture. """ ogn_registry_keys = [key for key in SyntheticData._ogn_templates_registry.keys()] for tplName in ogn_registry_keys: tplParams = SyntheticData._ogn_templates_registry[tplName] tplNameDisplay = tplName[:-11] + "Display" if (tplParams.node_type_id in SyntheticData._ogn_post_display_types) and (tplNameDisplay not in SyntheticData._ogn_templates_registry): SyntheticData.register_node_template(SyntheticData.NodeTemplate( SyntheticDataStage.ON_DEMAND, "omni.syntheticdata.SdRenderVarDisplayTexture", [ SyntheticData.NodeConnectionTemplate("PostProcessDispatch"), SyntheticData.NodeConnectionTemplate(tplName) ], ), template_name=tplNameDisplay ) @staticmethod def register_combine_rendervar_templates() -> None: """Automatically register SdPostCompRenderVarTextures node template for all registerd nodes whose type is in the post display type list. The function is called for every statically registered nodes during the interface initialization . It may be called after having registered nodes whose type is in the post display type list. """ ogn_registry_keys = [key for key in SyntheticData._ogn_templates_registry.keys()] for tplName in ogn_registry_keys: tplParams = SyntheticData._ogn_templates_registry[tplName] if (tplParams.node_type_id in SyntheticData._ogn_post_display_types) and ( tplName + "Combine" not in SyntheticData._ogn_templates_registry ): SyntheticData.register_combine_rendervar_template(tplName) @staticmethod def register_combine_rendervar_template(template_name: str) -> None: """Automatically register SdPostCompRenderVarTextures node template for the given template name. Args: template_name: name of the node template for which registering a SdPostCompRenderVarTextures template """ if not template_name in SyntheticData._ogn_templates_registry: raise SyntheticDataException(f'graph node template "{template_name}" not registered') # cannot combine node results from the ondemand graph if SyntheticData._ogn_templates_registry[template_name].pipeline_stage > SyntheticDataStage.POST_RENDER: return templateParams = SyntheticData._ogn_templates_registry[template_name] if templateParams.node_type_id not in SyntheticData._ogn_post_display_types: raise SyntheticDataException(f'graph node template "{template_name}" not registered as a display node') templateNameCombine = template_name + "Combine" if templateNameCombine not in SyntheticData._ogn_templates_registry: SyntheticData.register_node_template(SyntheticData.NodeTemplate( SyntheticDataStage.POST_RENDER, "omni.syntheticdata.SdPostCompRenderVarTextures", [ SyntheticData.NodeConnectionTemplate(SyntheticData._rendererTemplateName), SyntheticData.NodeConnectionTemplate( template_name, attributes_mapping={ "outputs:cudaPtr": "inputs:cudaPtr", "outputs:width": "inputs:width", "outputs:height": "inputs:height", "outputs:format": "inputs:format" } ) ] ), template_name=templateNameCombine, ) @staticmethod def register_device_rendervar_to_host_templates(rendervars: list) -> None: """Automatically register SdPostRenderVarToHost node templates for the given rendervars Args: rendervars: list of renderVar names to register the rendervar device to host copy node template """ # copy the rendervars list since the registration may modify the list rendervars_copy = rendervars.copy() for rv in rendervars_copy: rv_host = rv+SyntheticData._renderVarHostSuffix if rv.endswith(SyntheticData._renderVarHostSuffix) or (rv_host in SyntheticData._ogn_rendervars): continue template_name = rv + "PostCopyToHost" if template_name not in SyntheticData._ogn_templates_registry: SyntheticData.register_node_template( SyntheticData.NodeTemplate( SyntheticDataStage.POST_RENDER, "omni.syntheticdata.SdPostRenderVarToHost", [ SyntheticData.NodeConnectionTemplate(rv), SyntheticData.NodeConnectionTemplate( SyntheticData._rendererTemplateName, attributes_mapping={ "outputs:rp": "inputs:rp", "outputs:gpu": "inputs:gpu" } ) ], { "inputs:renderVar": rv, "inputs:renderVarHostSuffix" : SyntheticData._renderVarHostSuffix } ), rendervars=[rv_host], template_name=template_name, ) @staticmethod def register_device_rendervar_tex_to_buff_templates(rendervars: list) -> None: """Automatically register SdPostRenderVarTextureToBuffer node templates for the given rendervars Args: rendervars: list of renderVar names to register the rendervar device texture to buffer copy node template """ # copy the rendervars list since the registration may modify the list rendervars_copy = rendervars.copy() for rv in rendervars_copy: rv_buff = rv+SyntheticData._renderVarBuffSuffix if rv.endswith(SyntheticData._renderVarBuffSuffix) or (rv_buff in SyntheticData._ogn_rendervars): continue template_name = rv + "PostCopyToBuff" if template_name not in SyntheticData._ogn_templates_registry: SyntheticData.register_node_template( SyntheticData.NodeTemplate( SyntheticDataStage.POST_RENDER, "omni.syntheticdata.SdPostRenderVarTextureToBuffer", [ SyntheticData.NodeConnectionTemplate(rv), SyntheticData.NodeConnectionTemplate( SyntheticData._rendererTemplateName, attributes_mapping={ "outputs:rp": "inputs:rp", "outputs:gpu": "inputs:gpu" } ) ], { "inputs:renderVar": rv, "inputs:renderVarBufferSuffix" : SyntheticData._renderVarBuffSuffix } ), rendervars=[rv_buff], template_name=template_name, ) @staticmethod def register_export_rendervar_ptr_templates(rendervars: list) -> None: """Automatically register SdRenderVarPtr node templates for the given rendervars Args: rendervars: list of renderVar names to register the ptr node template """ for rv in rendervars: template_name = rv + "Ptr" if template_name not in SyntheticData._ogn_templates_registry: SyntheticData.register_node_template( SyntheticData.NodeTemplate( SyntheticDataStage.ON_DEMAND, "omni.syntheticdata.SdRenderVarPtr", [ SyntheticData.NodeConnectionTemplate(rv, (0,), None), SyntheticData.NodeConnectionTemplate("PostProcessDispatch") ], {"inputs:renderVar": rv} ), template_name=template_name, ) @staticmethod def register_export_rendervar_array_templates(rendervars: list) -> None: """Automatically register SdRenderVarToRawArray node templates for the given rendervars Args: rendervars: list of renderVar names to register the export raw array node template """ for rv in rendervars: template_name = rv + "ExportRawArray" if template_name not in SyntheticData._ogn_templates_registry: SyntheticData.register_node_template( SyntheticData.NodeTemplate( SyntheticDataStage.ON_DEMAND, "omni.syntheticdata.SdRenderVarToRawArray", [ SyntheticData.NodeConnectionTemplate(rv, (0,), None), SyntheticData.NodeConnectionTemplate("PostProcessDispatch") ], {"inputs:renderVar": rv} ), template_name=template_name, ) @staticmethod def convert_sensor_type_to_rendervar(legacy_type_name: str) -> None: """Convert of legacy sensor type name to its rendervar name Args: legacy_type_name: legacy sensor type name to convert Returns: the name of the renderVar correspoding to the legacy name """ if legacy_type_name == "Rgb": return "LdrColorSD" elif legacy_type_name == "MotionVector": return "TargetMotionSD" else: return legacy_type_name + "SD" @staticmethod def disable_async_rendering(): """Disable asynchronous rendering Since asyncRendering is not supported by the fabric, graphs are currently not compatible with this mode. """ settings = carb.settings.get_settings() if settings.get("/app/asyncRendering") or settings.get("/app/asyncRenderingLowLatency"): carb.log_warn(f"SyntheticData is not supporting asyncRendering : switching it off.") settings.set("/app/asyncRendering", False) settings.set("/app/asyncRenderingLowLatency", False) @staticmethod def _has_rendervar(renderProductPath: str, renderVar: str, usdStage: Usd.Stage = None) -> bool: if not usdStage: usdStage = omni.usd.get_context().get_stage() if not usdStage: raise SyntheticDataException("No stage provided or in use by default UsdContext") renderProductPrim = usdStage.GetPrimAtPath(renderProductPath) if not renderProductPrim: raise SyntheticDataException(f"invalid renderProduct {renderProductPath}") renderVarPrimPath = f"/Render/Vars/{renderVar}" renderVarPrim = usdStage.GetPrimAtPath(renderVarPrimPath) if not renderVarPrim: return False renderProductRenderVarRel = renderProductPrim.GetRelationship("orderedVars") if not renderProductRenderVarRel: return False return renderVarPrimPath in renderProductRenderVarRel.GetTargets() @staticmethod def _add_rendervar(renderProductPath: str, renderVar: str, usdStage: Usd.Stage = None) -> None: # FIXME : we have to use the legacy Viewport interface to modify the renderproduct, otherwise changes may be overwritten vp_1found = False try: import omni.kit.viewport_legacy vp_iface = omni.kit.viewport_legacy.get_viewport_interface() viewports = vp_iface.get_instance_list() for viewport in viewports: vpw = vp_iface.get_viewport_window(viewport) if vpw.get_render_product_path() == renderProductPath: vpw.add_aov(renderVar, False) vp_1found = True except ImportError: pass # Both Viewport-1 and Viewport-2 won't share a common renderProductPath if vp_1found: return if not usdStage: usdStage = omni.usd.get_context().get_stage() if not usdStage: raise SyntheticDataException("No stage provided or in use by default UsdContext") with Usd.EditContext(usdStage, usdStage.GetSessionLayer()): renderProductPrim = usdStage.GetPrimAtPath(renderProductPath) if not renderProductPrim: raise SyntheticDataException(f"invalid renderProduct {renderProductPath}") renderVarPrimPath = f"/Render/Vars/{renderVar}" renderVarPrim = usdStage.GetPrimAtPath(renderVarPrimPath) if not renderVarPrim: renderVarPrim = usdStage.DefinePrim(renderVarPrimPath) if not renderVarPrim: raise SyntheticDataException(f"cannot create renderVar {renderVarPrimPath}") renderVarPrim.CreateAttribute("sourceName", Sdf.ValueTypeNames.String).Set(renderVar) renderVarPrim.SetMetadata("hide_in_stage_window", True) renderVarPrim.SetMetadata("no_delete", True) renderProductRenderVarRel = renderProductPrim.GetRelationship("orderedVars") if not renderProductRenderVarRel: renderProductRenderVarRel = renderProductPrim.CreateRelationship("orderedVars") if not renderProductRenderVarRel: raise SyntheticDataException( f"cannot set orderedVars relationship for renderProduct {renderProductPath}") renderProductRenderVarRel.AddTarget(renderVarPrimPath) @staticmethod def _remove_rendervar(renderProductPath: str, renderVar: str, usdStage: Usd.Stage = None) -> None: # we should not remove the LdrColor since it is the default renderVar if renderVar == "LdrColor": return # FIXME : we have to use the legacy Viewport interface to modify the renderproduct, otherwise changes may be overwritten vp_1found = False try: import omni.kit.viewport_legacy vp_iface = omni.kit.viewport_legacy.get_viewport_interface() viewports = vp_iface.get_instance_list() for viewport in viewports: vpw = vp_iface.get_viewport_window(viewport) if vpw.get_render_product_path() == renderProductPath: vpw.add_aov(renderVar, False) vp_1found = True except ImportError: pass # Both Viewport-1 and Viewport-2 won't share a common renderProductPath if vp_1found: return if not usdStage: usdStage = omni.usd.get_context().get_stage() if not usdStage: raise SyntheticDataException("No stage provided or in use by default UsdContext") with Usd.EditContext(usdStage, usdStage.GetSessionLayer()): renderProductPrim = usdStage.GetPrimAtPath(renderProductPath) if not renderProductPrim: raise SyntheticDataException(f"invalid renderProduct {renderProductPath}") renderVarPrimPath = f"/Render/Vars/{renderVar}" renderProductRenderVarRel = renderProductPrim.GetRelationship("orderedVars") if not renderProductRenderVarRel: return renderProductRenderVarRel.RemoveTarget(renderVarPrimPath) @staticmethod def get_registered_visualization_template_names() -> list: """Get the registered node template names which types are in the display type list Returns: list of registered template names which types are in the display type list """ registeredTemplateName = [] for name, val in SyntheticData._ogn_templates_registry.items(): if val.node_type_id in SyntheticData._ogn_display_types: registeredTemplateName.append(name) return registeredTemplateName @staticmethod def get_registered_visualization_template_names_for_display() -> list: """Get the registered node template names which types are in the display type list and their display name Returns: list of tuples of registered template names which types are in the display type list and their display name """ for sensor in SyntheticData.get_registered_visualization_template_names(): # by convention visualization sensors end with "Display" yield (sensor[0:-7] if sensor.endswith("Display") else sensor, sensor) @staticmethod def _get_graph_path(stage: int, renderProductPath: str = None) -> str: # simulation stages live in the same graph if stage == SyntheticDataStage.SIMULATION: return f"{SyntheticData._graphPathRoot}/{SyntheticData._simulationGraphPath}" elif stage == SyntheticDataStage.PRE_RENDER: # check if the renderProductPath has already an associated graph usdStage = omni.usd.get_context().get_stage() prim = usdStage.GetPrimAtPath(renderProductPath) ogpreprocesspath_attribute = prim.GetAttribute("ogPreProcessPath") if ogpreprocesspath_attribute: return f"{ogpreprocesspath_attribute.Get()}/{SyntheticData._graphName}" else: return f"{renderProductPath}/{SyntheticData._preRenderGraphPath}" # postprocess stages live in the same graph elif stage == SyntheticDataStage.ON_DEMAND: return f"{SyntheticData._graphPathRoot}/{SyntheticData._postProcessGraphPath}" elif stage == SyntheticDataStage.POST_RENDER: # check if the renderProductPath has already an associated graph usdStage = omni.usd.get_context().get_stage() prim = usdStage.GetPrimAtPath(renderProductPath) ogpostprocesspath_attribute = prim.GetAttribute("ogPostProcessPath") if ogpostprocesspath_attribute: return f"{ogpostprocesspath_attribute.Get()}/{SyntheticData._graphName}" else: return f"{renderProductPath}/{SyntheticData._postRenderGraphPath}" @staticmethod def _get_node_path(templateName: str, renderProductPath: str = None) -> str: if templateName not in SyntheticData._ogn_templates_registry: raise SyntheticDataException(f'graph node template "{templateName}" not registered') nodeStage = SyntheticData._ogn_templates_registry[templateName].pipeline_stage graphPath = SyntheticData._get_graph_path(nodeStage, renderProductPath) # prefix the node name by the renderproduct name for nodes living in the same graph # (simulation and postprocess graphs) nodeName = templateName if renderProductPath: renderProductName = renderProductPath.split("/")[-1] nodeName = f"{renderProductName}_{nodeName}" return f"{graphPath}/{nodeName}" @staticmethod def _unregister_node_template_rec(templateList: list) -> None: if not templateList: return templateDependenciesList = [] for templateName in templateList: if templateName not in SyntheticData._ogn_templates_registry: continue dependencyNames = [] for rv, tpl in SyntheticData._ogn_rendervars.items(): if tpl == templateName: dependencyNames.append(rv) for rv in dependencyNames: SyntheticData._ogn_rendervars.pop(rv) dependencyNames.append(templateName) SyntheticData._ogn_templates_registry.pop(templateName) for otherTemplateName, otherTemplateVal in SyntheticData._ogn_templates_registry.items(): for otherTemplateConnection in otherTemplateVal.connections: if otherTemplateConnection.node_template_id in dependencyNames: templateDependenciesList.append(otherTemplateName) SyntheticData._unregister_node_template_rec(templateDependenciesList) @staticmethod def _connect_nodes(srcNode, dstNode, connectionMap, enable) -> bool: success = True for srcAttrName, dstAttrName in connectionMap.items(): if (not srcNode.get_attribute_exists(srcAttrName)) or (not dstNode.get_attribute_exists(dstAttrName)): carb.log_error( f"SyntheticData failed to (dis)connect node {srcNode.get_prim_path()}:{srcAttrName} to {dstNode.get_prim_path()}:{dstAttrName}" ) success = False # best effort continue dstAttr = dstNode.get_attribute(dstAttrName) srcAttr = srcNode.get_attribute(srcAttrName) if enable: srcAttr.connect(dstAttr, True) else: srcAttr.disconnect(dstAttr, True) return success @staticmethod def _auto_connect_nodes(srcNode, dstNode, enable, srcIndex=0) -> bool: """Connect a source node to destination node The connections are made by matching outputs / inputs node attributes names In case of outputs attributes name clashing, the first node in the list is connected Optionnally outputs attributes name could be indexed : terminated by underscore followed by the srcNode list index (no leading zero) Indexed outputs attributes names take precedence """ success = False for attr in srcNode.get_attributes(): srcAttrName = attr.get_name() if not srcAttrName.startswith("outputs:"): continue dstAttrName = "inputs:%s_%d" % (srcAttrName[8:], srcIndex) if ( not dstNode.get_attribute_exists(dstAttrName) or dstNode.get_attribute(dstAttrName).get_upstream_connection_count() ): dstAttrName = "inputs:%s" % srcAttrName[8:] if ( not dstNode.get_attribute_exists(dstAttrName) or dstNode.get_attribute(dstAttrName).get_upstream_connection_count() ): continue dstAttr = dstNode.get_attribute(dstAttrName) srcAttr = srcNode.get_attribute(srcAttrName) if enable: srcAttr.connect(dstAttr, True) else: srcAttr.disconnect(dstAttr, True) success = True return success @staticmethod def Initialize(): """Initialize interface singleton instance.""" global _sdg_iface if _sdg_iface is None: SyntheticData.register_device_rendervar_tex_to_buff_templates(SyntheticData._ogn_rendervars) SyntheticData.register_device_rendervar_to_host_templates(SyntheticData._ogn_rendervars) SyntheticData.register_display_rendervar_templates() SyntheticData.register_combine_rendervar_templates() SyntheticData.register_export_rendervar_ptr_templates(SyntheticData._ogn_rendervars) SyntheticData.register_export_rendervar_array_templates(SyntheticData._ogn_rendervars) _sdg_iface = SyntheticData() @staticmethod def Get(): """Get the interface singleton instance.""" global _sdg_iface return _sdg_iface @staticmethod def Reset(): """Reset the interface singleton """ global _sdg_iface if _sdg_iface: _sdg_iface.reset() _sdg_iface = None @staticmethod def register_node_template(node_template: NodeTemplate, rendervars: list = None, template_name: str = None) -> str: """Register a node template. Add a node template in the node registry. After the template has been added it may be activated for being executed in its associated stage. Args: node_template : template to be added to the registry rendervars : list of renderVar the node is producing template_name : unique name id of the template Returns: the unique name id of the registered template """ # check type if og.GraphRegistry().get_node_type_version(node_template.node_type_id) is None: raise SyntheticDataException( f"failed to register node template. Type {node_template.node_type_id} is not in the registry") # check template_name if template_name is None: numTypeTemplates = 0 for template in SyntheticData._ogn_templates_registry.values(): if template.node_type_id == node_template.node_type_id: numTypeTemplates += 1 template_name = "%s_%04d" % (node_template.node_type_id.split(".")[-1], numTypeTemplates) elif template_name in SyntheticData._ogn_templates_registry: raise SyntheticDataException( f"failed to register node template. Template {template_name} is already in the registry") elif template_name in SyntheticData._ogn_rendervars: raise SyntheticDataException( f"failed to register node template. Template {template_name} is already registered as a renderVar") # check connections autoStage = SyntheticDataStage.POST_RENDER if rendervars else SyntheticDataStage.SIMULATION i_connections = node_template.connections if node_template.connections else [] for conn in i_connections: conn_name = conn.node_template_id if conn_name in SyntheticData._ogn_rendervars: conn_name = SyntheticData._ogn_rendervars[conn_name] if conn_name not in SyntheticData._ogn_templates_registry: raise SyntheticDataException( f"failed to register node template. Connection template name {conn_name} is not in the registry") conn_stage = SyntheticData._ogn_templates_registry[conn_name].pipeline_stage autoStage = max(autoStage, conn_stage) conn_map = conn.attributes_mapping if conn.attributes_mapping else {} if not type(conn_map) is dict: raise SyntheticDataException( f"failed to register node template. connection attributes map is not a dictionnary") # check stage if node_template.pipeline_stage == SyntheticDataStage.AUTO: node_template.pipeline_stage = autoStage if node_template.pipeline_stage < autoStage: raise SyntheticDataException( f"failed to register node template. Stage {node_template.pipeline_stage} is not compatible with the connections") # check and register renderVars if rendervars: if node_template.pipeline_stage != SyntheticDataStage.POST_RENDER: raise SyntheticDataException( f"failed to register node template. Only postRender nodes may produce renderVars") for rv in rendervars: if (rv in SyntheticData._ogn_templates_registry) or (rv in SyntheticData._ogn_rendervars): raise SyntheticDataException(f"failed to register node template. RenderVar {rv} already registered") else: SyntheticData._ogn_rendervars[rv] = template_name SyntheticData._ogn_templates_registry[template_name] = node_template return template_name @staticmethod def is_node_template_registered(template_name: str) -> bool: """Check if a node template has already been registered. Args: template_name: name of the node template to check Returns: True if the template_name specifie a node template within the registry, False otherwise """ return template_name in SyntheticData._ogn_templates_registry @staticmethod def unregister_node_template(template_name: str) -> None: """Unregister a node template. Remove a node template from the registry and all its dependencies. After removing a template, it cannot be activated anymore, nor its dependent templates. """ SyntheticData._unregister_node_template_rec([template_name]) def _reset_node_graph(self, nodeGraph): graph = nodeGraph.get_wrapped_graph() for node in graph.get_nodes(): graph.destroy_node(node.get_prim_path(), True) orchestration_graph = nodeGraph.get_graph() orchestration_graph.destroy_node(nodeGraph.get_prim_path(), True) def _clear_empty_graphs(self): emptyGraph = [] for graphPath, nodeGraph in self._nodeGraphs.items(): if nodeGraph.get_wrapped_graph().get_nodes(): emptyGraph.append(graphPath) for graphPath in emptyGraph: self._reset_node_graph(nodeGraph) self._nodeGraphs.pop(graphPath) def _set_process_path(self, renderProductPath, graphPath, processPathAttribueName): if not renderProductPath: raise SyntheticDataException("invalid renderProductPath") usdStage = omni.usd.get_context().get_stage() prim = usdStage.GetPrimAtPath(renderProductPath) ogprocesspath_attribute = prim.GetAttribute(processPathAttribueName) if not ogprocesspath_attribute: assert graphPath.endswith("/" + SyntheticData._graphName) ogProcessPath = graphPath[: -len("/" + SyntheticData._graphName)] prim.CreateAttribute(processPathAttribueName, Sdf.ValueTypeNames.String).Set(ogProcessPath) def _get_or_create_graph(self, path: str, stage: int, renderProductPath: object) -> object: if path in self._nodeGraphs: return self._nodeGraphs[path] settings = carb.settings.get_settings() use_legacy_simulation_pipeline = settings.get("/persistent/omnigraph/useLegacySimulationPipeline") pipelineStage = og.GraphPipelineStage.GRAPH_PIPELINE_STAGE_SIMULATION executionModel = "push" backingType = og.GraphBackingType.GRAPH_BACKING_TYPE_FLATCACHE_SHARED if (stage == SyntheticDataStage.PRE_RENDER) and (not use_legacy_simulation_pipeline): pipelineStage = og.GraphPipelineStage.GRAPH_PIPELINE_STAGE_PRERENDER backingType = og.GraphBackingType.GRAPH_BACKING_TYPE_FLATCACHE_SHARED # GRAPH_BACKING_TYPE_FLATCACHE_WITHOUT_HISTORY elif (stage == SyntheticDataStage.POST_RENDER) and (not use_legacy_simulation_pipeline): pipelineStage = og.GraphPipelineStage.GRAPH_PIPELINE_STAGE_POSTRENDER # GRAPH_BACKING_TYPE_FLATCACHE_WITHOUT_HISTORY backingType = backingType = og.GraphBackingType.GRAPH_BACKING_TYPE_FLATCACHE_SHARED elif (stage == SyntheticDataStage.ON_DEMAND) or (stage == SyntheticDataStage.ON_DEMAND): pipelineStage = og.GraphPipelineStage.GRAPH_PIPELINE_STAGE_SIMULATION # ONDEMAND (FIXME) executionModel = "execution" usdStage = omni.usd.get_context().get_stage() primExistWorkaround = not usdStage.GetPrimAtPath(path) orchestration_graphs = og.get_global_orchestration_graphs_in_pipeline_stage(pipelineStage) nodeGraph = orchestration_graphs[0].create_graph_as_node( path.replace("/", "_"), path, executionModel, True, primExistWorkaround, backingType, pipelineStage, ) if stage == SyntheticDataStage.PRE_RENDER: self._set_process_path(renderProductPath, path, "ogPreProcessPath") elif stage == SyntheticDataStage.POST_RENDER: self._set_process_path(renderProductPath, path, "ogPostProcessPath") self._nodeGraphs[path] = nodeGraph return nodeGraph def _activate_node_rec(self, templateName: str, renderProductIndex: int = -1, renderProductPaths: list = None, render_var_activations: dict = None) -> None: renderProductPath = renderProductPaths[renderProductIndex] if renderProductIndex > -1 else None # renderVar template if templateName in SyntheticData._ogn_rendervars: renderVarName = templateName templateName = SyntheticData._ogn_rendervars[templateName] if (not render_var_activations is None) and renderProductPath and (templateName == SyntheticData._rendererTemplateName): if renderProductPath not in render_var_activations: render_var_activations[renderProductPath]={renderVarName:0} elif renderVarName not in render_var_activations[renderProductPath]: render_var_activations[renderProductPath][renderVarName]=0 render_var_activations[renderProductPath][renderVarName]+=1 if templateName not in SyntheticData._ogn_templates_registry: raise SyntheticDataException(f"graph node template depends on unregistered template {templateName}") nodePath = SyntheticData._get_node_path(templateName, renderProductPath) if nodePath in self._graphNodes: return templateName template = SyntheticData._ogn_templates_registry[templateName] nodeStage = template.pipeline_stage graphPath = SyntheticData._get_graph_path(nodeStage, renderProductPath) nodeGraph = self._get_or_create_graph(graphPath, nodeStage, renderProductPath) nodeType = template.node_type_id usdStage = omni.usd.get_context().get_stage() primExistWorkaround = not usdStage.GetPrimAtPath(nodePath) self._graphNodes[nodePath] = nodeGraph.get_wrapped_graph().create_node(nodePath, nodeType, primExistWorkaround) node = self._graphNodes[nodePath] # setup static attributes for attrName, attrVal in template.attributes.items(): if node.get_attribute_exists(attrName): node.get_attribute(attrName).set(attrVal) else: carb.log_error(f"SyntheticData failed to set node {nodePath} static attribute {attrName}") # do not return error : the default value in the ogn spec will be used # set inputs:renderProductPathPath if renderProductPath and node.get_attribute_exists(SyntheticData._renderProductAttributeName): node.get_attribute(SyntheticData._renderProductAttributeName).set(renderProductPath) # recursive call for upstream connections for connIndex in range(len(template.connections)): connection = template.connections[connIndex] connTemplateName = connection.node_template_id connRenderProductPaths = [renderProductPaths[idx] for idx in connection.render_product_idxs] if ( renderProductPaths and connection.render_product_idxs) else None # activate the template connTemplateName = self._activate_node_rec(connTemplateName, 0 if connRenderProductPaths else - 1, connRenderProductPaths, render_var_activations) # setup connection attributes connRenderProductPath = connRenderProductPaths[0] if connRenderProductPaths else None connNodePath = SyntheticData._get_node_path(connTemplateName, connRenderProductPath) connNode = self._graphNodes[connNodePath] connMap = connection.attributes_mapping if not connMap is None: if connMap: SyntheticData._connect_nodes(connNode, node, connMap, True) else: SyntheticData._auto_connect_nodes(connNode, node, True, connIndex) return templateName def _deactivate_node_rec( self, templateName: str, renderProductIndex: int = -1, renderProductPaths: list = None, render_var_deactivations: dict = None, only_automatically_activated_nodes: bool = True, manual_deactivation: bool = True ) -> None: renderProductPath = renderProductPaths[renderProductIndex] if renderProductIndex > -1 else None if templateName in SyntheticData._ogn_rendervars: renderVarName = templateName templateName = SyntheticData._ogn_rendervars[templateName] if (not render_var_deactivations is None) and renderProductPath and (templateName == SyntheticData._rendererTemplateName): if renderProductPath not in render_var_deactivations: render_var_deactivations[renderProductPath]={renderVarName:0} elif renderVarName not in render_var_deactivations[renderProductPath]: render_var_deactivations[renderProductPath][renderVarName]=0 render_var_deactivations[renderProductPath][renderVarName]+=1 nodePath = SyntheticData._get_node_path(templateName, renderProductPath) # prevent automatically deactivating manually activated node if (nodePath not in self._graphNodes) or (not manual_deactivation and only_automatically_activated_nodes and (nodePath in self._activatedNodePaths)): return templateName node = self._graphNodes[nodePath] template = SyntheticData._ogn_templates_registry[templateName] # abort if the node has a downstream connection for attr in node.get_attributes(): if attr.get_downstream_connection_count(): return templateName node.get_graph().destroy_node(nodePath, True) self._graphNodes.pop(nodePath) # remove unused connections for connection in template.connections: connTemplateName = connection.node_template_id connRenderProductPaths = [renderProductPaths[idx] for idx in connection.render_product_idxs] if ( renderProductPaths and connection.render_product_idxs) else None # deactivate the template self._deactivate_node_rec(connTemplateName, 0 if connRenderProductPaths else -1, connRenderProductPaths, render_var_deactivations, only_automatically_activated_nodes, False) return templateName def _set_node_attributes(self, nodePath, attributes) -> None: if not attributes: return if not nodePath in self._graphNodes: raise SyntheticDataException(f"invalid node {nodePath}") node = self._graphNodes[nodePath] for attrName, attrVal in attributes.items(): if node.get_attribute_exists(attrName): og.Controller(attribute=node.get_attribute(attrName)).set(value=attrVal) else: raise SyntheticDataException(f"invalid node attribute {nodePath}.{attrName}") def _get_node_attributes(self, nodePath, attribute_names: list, gpu=False) -> dict: if not nodePath in self._graphNodes: return None node = self._graphNodes[nodePath] attributes = {} for attrName in attribute_names: if node.get_attribute_exists(attrName): attributes[attrName] = og.Controller(attribute=node.get_attribute(attrName)).get(on_gpu=gpu) return attributes def __init__(self) -> None: self._nodeGraphs = {} self._graphNodes = {} self._activatedNodePaths = [] self._render_product_var_activations = {} def reset(self, usd=True, remove_activated_render_vars=False) -> None: """Reset the SyntheticData instance Args: usd : if true reset the graph in the usd stage session layer remove_activated_render_vars : if True and usd is True remove the render vars activated by the node activation If the stage is valid it will destroy every graph created. """ stage = omni.usd.get_context().get_stage() if stage and usd: session_layer = stage.GetSessionLayer() with Usd.EditContext(stage, session_layer): for nodeGraph in self._nodeGraphs.values(): self._reset_node_graph(nodeGraph) if remove_activated_render_vars: for rp, rvs in self._render_product_var_activations.items(): for rv, num_act in rvs.items(): if num_act[1] and (num_act[0] > 0): self._remove_rendervar(rp,rv,stage) self._render_product_var_activations = {} self._activatedNodePaths = [] self._graphNodes = {} self._nodeGraphs = {} def get_graph(self, stage: int = SyntheticDataStage.ON_DEMAND, renderProductPath: str = None) -> object: """Return the graph at a given stage, for a given renderProduct. Gives access to the SyntheticData graphs. Args: stage : SyntheticDataStage of the queried graph renderProductPath : (for POST_RENDER stage only) the renderProductPath for which to get the POST_RENDER graph Returns: the graph at the given stage for the given renderProductPath. """ if renderProductPath and stage != SyntheticDataStage.POST_RENDER: raise SyntheticDataException("invalid graph") graphPath = SyntheticData._get_graph_path(stage, renderProductPath) return self._get_or_create_graph(graphPath, stage, renderProductPath) def activate_node_template( self, template_name: str, render_product_path_index: int = -1, render_product_paths: list = None, attributes: dict = None, stage: Usd.Stage = None, activate_render_vars: bool = True ) -> None: """Activate a registered node. Create a node instance for the given node template and all its missing dependencies (including nodes and renderVar). The node will be executed during the next stage execution. Args: template_name : name of the node template to be activate render_product_path_index : if the node template is associated to a render product, index of the associated render product in the render product path list render_product_paths : render product path list to be used for specifying the render product of the node template and its dependencies to activate attributes : dictionnary of attributes to set to the activated "template_name" node stage : the stage to change, if None use the stage of the current usd context activate_render_vars : if True activate the required render_vars, if False it is the user responsability to activate the required render_vars Return: A dictionnary containing for every render products the list of render var dependencies of this activation NB : if activate_render_vars is True those render vars are added """ if (template_name not in SyntheticData._ogn_templates_registry) and (template_name not in SyntheticData._ogn_rendervars): raise SyntheticDataException(f'graph node template "{template_name}" unregistered') node_path = SyntheticData._get_node_path( template_name, render_product_paths[render_product_path_index] if render_product_path_index > -1 else None ) if node_path in self._activatedNodePaths: return if not stage: stage = omni.usd.get_context().get_stage() if not stage: raise SyntheticDataException("invalid USD stage") session_layer = stage.GetSessionLayer() with Usd.EditContext(stage, session_layer): render_var_activations = {} self._activate_node_rec(template_name, render_product_path_index, render_product_paths, render_var_activations) self._set_node_attributes(node_path, attributes) self._activatedNodePaths.append(node_path) # maintain the render_vars activation number for every render products activated_render_vars = {} for rp, rvs in render_var_activations.items(): if rp not in self._render_product_var_activations: self._render_product_var_activations[rp]={} for rv, num in rvs.items(): need_activation = not self._has_rendervar(rp,rv,stage) if rv not in self._render_product_var_activations[rp]: self._render_product_var_activations[rp][rv] = [num, need_activation and activate_render_vars] else: self._render_product_var_activations[rp][rv][0] += num self._render_product_var_activations[rp][rv][1] = need_activation and activate_render_vars if need_activation: if rp not in activated_render_vars: activated_render_vars[rp]=[] if rv not in activated_render_vars[rp]: activated_render_vars[rp].append(rv) if activate_render_vars: for rp, rvs in activated_render_vars.items(): for rv in rvs: SyntheticData._add_rendervar(rp, rv, stage) return activated_render_vars def is_node_template_activated( self, template_name: str, render_product_path: str = None, only_manually_activated: bool = False ) -> None: """Query the activation status of a node template. Args: template_name : name of the node template to query the activation status render_product_path : render product path for which to check the template activation status (None if not applicable) only_manually_activated: if True check the activation for only the explicitely activated templates ( exclude the automatically activated template ) Return: True if the node template is currently activated and, if only_explicitely_activated is True, if it has been explicitely activated """ node_path = SyntheticData._get_node_path(template_name, render_product_path) return node_path in self._activatedNodePaths if only_manually_activated else node_path in self._graphNodes def deactivate_node_template( self, template_name: str, render_product_path_index: int = -1, render_product_paths: list = [], stage: Usd.Stage = None, deactivate_render_vars: bool = False, recurse_only_automatically_activated: bool = True ) -> None: """Deactivate a registered node. Delete a node instance for the given node template and all its automatically activated dependencies with no more downstream connections. The node won't be executed anymore starting with the next stage execution. Args: template_name : name of the node template to deactivate render_product_path_index : if the node template is associated to a render product, index of the associated render product in the render product path list render_product_paths : render product path list to be used for specifying the render product of the node template and its dependencies to deactivate stage : the stage to change, if None use the stage of the current usd context deactivate_render_vars : if True deactivate the render_vars that have been activated in a call to activate_node_template and which are not used anymore by the managed graphs. Beware that in some cases, some of these render vars maybe actually used by other graphs, hence it is False by default if False it is the user responsability to deactivate the unused render_vars. recurse_only_automatically_activated : if True recursively deactivate only automatically activated upstream nodes without other connections if False recursively deactivate all upstream nodes without other connections Return: A dictionnary containing for every render products path the list of render var dependencies that have been activated by activate_node_template and are not used anymore by the managed graphs. NB : if deactivate_render_vars is True those render vars are removed """ if not stage: stage = omni.usd.get_context().get_stage() if not stage: raise SyntheticDataException("invalid USD stage") session_layer = stage.GetSessionLayer() with Usd.EditContext(stage, session_layer): render_var_deactivations = {} self._deactivate_node_rec(template_name, render_product_path_index, render_product_paths, render_var_deactivations, recurse_only_automatically_activated) node_path = SyntheticData._get_node_path( template_name, render_product_paths[render_product_path_index] if render_product_path_index > -1 else None ) if (node_path in self._activatedNodePaths) and (node_path not in self._graphNodes): self._activatedNodePaths.remove(node_path) # maintain the render_vars activation number for every render products deactivated_render_vars = {} for rp, rvs in render_var_deactivations.items(): valid_rp = rp in self._render_product_var_activations for rv, num in rvs.items(): valid_rv = valid_rp and rv in self._render_product_var_activations[rp] if valid_rv and (self._render_product_var_activations[rp][rv][0] <= num): if self._render_product_var_activations[rp][rv][1]: if rp not in deactivated_render_vars: deactivated_render_vars[rp]=[rv] else: deactivated_render_vars[rp].append(rv) self._render_product_var_activations[rp].pop(rv) elif valid_rv: self._render_product_var_activations[rp][rv][0] -= num if deactivate_render_vars: for rp, rvs in deactivated_render_vars.items(): for rv in rvs: SyntheticData._remove_rendervar(rp, rv, stage) return deactivated_render_vars def connect_node_template(self, src_template_name: str, dst_template_name: str, render_product_path: str=None, connection_map: dict=None): """Connect the given source node template to the destination node template Args: src_template_name : name of the source node template dst_template_name : name of the destination node template render_product_path : render product path of the node templates (None if the node are not specific to a render product) connection_map : attribute mapping for the source inputs to the destination outputs. (None for an automatic mapping based on names) """ src_node_path = SyntheticData._get_node_path(src_template_name, render_product_path) if src_node_path not in self._graphNodes: raise SyntheticDataException(f'cannot connect node template : "{src_node_path}" not activated') else: src_node = self._graphNodes[src_node_path] dst_node_path = SyntheticData._get_node_path(dst_template_name, render_product_path) if dst_node_path not in self._graphNodes: raise SyntheticDataException(f'cannot connect node template : "{dst_node_path}" not activated') else: dst_node = self._graphNodes[dst_node_path] if connection_map: SyntheticData._connect_nodes(src_node, dst_node, connection_map, True) else: SyntheticData._auto_connect_nodes(src_node, dst_node, True) def disconnect_node_template(self, src_template_name: str, dst_template_name: str, render_product_path: str=None, connection_map: dict=None): """Disconnect the given source node template to the destination node template Args: src_template_name : name of the source node template dst_template_name : name of the destination node template render_product_path : render product path of the node templates (None if the node are not specific to a render product) connection_map : attribute mapping for the source inputs to the destination outputs. (None for an automatic mapping based on names) """ src_node_path = SyntheticData._get_node_path(src_template_name, render_product_path) if src_node_path not in self._graphNodes: raise SyntheticDataException(f'cannot disconnect node template : "{src_node_path}" not activated') else: src_node = self._graphNodes[src_node_path] dst_node_path = SyntheticData._get_node_path(dst_template_name, render_product_path) if dst_node_path not in self._graphNodes: raise SyntheticDataException(f'cannot disconnect node template : "{dst_node_path}" not activated') else: dst_node = self._graphNodes[dst_node_path] if connection_map: SyntheticData._connect_nodes(src_node, dst_node, connection_map, False) else: SyntheticData._auto_connect_nodes(src_node, dst_node, False) def set_node_attributes(self, template_name: str, attributes: dict, render_product_path: str=None) -> None: """Set the value of an activated node attribute. The function may be used to set the value of multiple activated node input attributes before the execution of its stage. Args: template_name : name of the activated node render_product_path : if the activated node is associated to a render product, provide its path attributes : dictionnary of attribute name/value to set """ node_path = SyntheticData._get_node_path(template_name, render_product_path) self._set_node_attributes(node_path, attributes) def get_node_attributes( self, template_name: str, attribute_names: list, render_product_path=None, gpu=False ) -> dict: """Get the value of several activated node's attributes. The function may be used to retrieve the value of multiple activated node output attributes after the execution of its graph. Args: template_name : name of the activated node attribute_names : list of node attribute names to retrieve the value render_product_path : if the activated node is associated to a render product, provide its path gpu : for array data attribute, get a gpu data Returns: A dictionnary of attribute name/value for every successfully retrieved attributes None if the node is not a valid activated node """ node_path = SyntheticData._get_node_path(template_name, render_product_path) return self._get_node_attributes(node_path, attribute_names, gpu) def set_instance_mapping_semantic_filter(self, predicate="*:*"): """Set the semantic filter predicate to be applied to the instance mapping. Contrary to the default semantic filter this filter affect the instance mapping. All semantic data filtered at this level is not available in the instance mapping. Args: predicate : a semantic filter predicate. predicate examples : "typeA : labelA & !labelB | labelC , typeB: labelA ; typeC: labelD" "typeA : * ; * : labelA" """ SyntheticData._ogn_templates_registry[SyntheticData._instanceMappingCtrl].attributes["inputs:semanticFilterPredicate"] = predicate node_path = SyntheticData._get_node_path(SyntheticData._instanceMappingCtrl) if node_path in self._graphNodes: self.set_node_attributes(SyntheticData._instanceMappingCtrl, {"inputs:semanticFilterPredicate":predicate}) def set_default_semantic_filter(self, predicate="*:*", hierarchical_labels=False, matching_labels=True): """Set the default semantic filter predicate. Args: predicate : a semantic filter predicate. hierarchical_labels : option to propagate semantic labels within the hiearchy, from parent to childrens matching_labels : option to remove from the set of labels the one that do not match the predicate predicate examples : "typeA : labelA & !labelB | labelC , typeB: labelA ; typeC: labelD" "typeA : * ; * : labelA" """ node_path = SyntheticData._get_node_path(SyntheticData._defaultSemanticFilterName) attributes = {"inputs:predicate": predicate, "inputs:hierarchicalLabels": hierarchical_labels, "inputs:matchingLabels": matching_labels} if node_path in self._graphNodes: self.set_node_attributes(SyntheticData._defaultSemanticFilterName, attributes) else: self.activate_node_template(SyntheticData._defaultSemanticFilterName, attributes=attributes) def enable_rendervar(self, render_product_path:str, render_var:str, usd_stage: Usd.Stage = None) -> None: """Explicitely enable the computation of a render_var for a given render_product. Args: render_product_path : the render_product for which to enable the given render_var computation render_var : the name of the render_var to enable usd_stage : usd stage """ SyntheticData._add_rendervar(render_product_path, render_var, usd_stage) def disable_rendervar(self, render_product_path:str, render_var:str, usd_stage: Usd.Stage = None) -> None: """Explicitely disable the computation of a render_var for a given render_product. Args: render_product_path : the render_product for which to disable the given render_var computation render_var : the name of the render_var to disable usd_stage : usd stage """ SyntheticData._remove_rendervar(render_product_path, render_var, usd_stage) def is_rendervar_used(self, render_product_path:str, render_var:str) -> None: """ query the used status of a render var for a render product Args: render_product_path: the path of the render product renver_var: the name of the render_var Returns: True if the given render var is currently in use by the activated syntheticData nodes for the given render product """ if (render_product_path in self._render_product_var_activations) and (render_var in self._render_product_var_activations[render_product_path]): return self._render_product_var_activations[render_product_path][render_var][0] > 0 else: return False def is_rendervar_enabled(self, render_product_path:str, render_var:str, only_sdg_activated: bool = False, usd_stage: Usd.Stage = None) -> None: """ query the enabled status of a render var for a render product Args: render_product_path: the path of the render product renver_var: the name of the render_var only_sdg_activated: consider only the render var automatically enabled by a call to activate_node_template usd_stage: the usd stage (if None use the current usd context stage) Returns: True if the given render var is currently enabled for the given render product and, if only_sdg_activated is True, if it has been enabled by a call to activate_node_template """ if only_sdg_activated: if (render_product_path in self._render_product_var_activations) and (render_var in self._render_product_var_activations[render_product_path]): return self._render_product_var_activations[render_product_path][render_var][1] else: return False else: return SyntheticData._has_rendervar(render_product_path, render_var, usd_stage)
81,667
Python
48.405929
202
0.633095
eliabntt/GRADE-RR/isaac_internals/kit/extscore/omni.syntheticdata/omni/syntheticdata/scripts/model.py
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # __all__ = ['RenderProductModel', 'RenderVarModel'] import omni.usd import omni.ui as ui from .SyntheticData import SyntheticData from pxr import Usd class RenderProductItem(ui.AbstractItem): def __init__(self, model): super().__init__() self.model = model class RenderProductModel(ui.AbstractItemModel): def __init__(self, viewport_name: str, viewport_api): super().__init__() # Omniverse interfaces self._viewport_api = viewport_api self._stage_update = omni.stageupdate.get_stage_update_interface() self._stage_subscription = self._stage_update.create_stage_update_node( "RenderProductModel_" + viewport_name, None, None, None, self._on_prim_created, None, self._on_prim_removed, ) # The list of the cameras is here self._render_products = [] # The current index of the editable_combo box self._current_index = ui.SimpleIntModel() self._current_index.add_value_changed_fn(self._current_index_changed) # Iterate the stage and get all the renderProduct stage = viewport_api.usd_context.get_stage() if stage: for prim in Usd.PrimRange(stage.GetPseudoRoot()): if prim.IsA("UsdRenderProduct"): self._render_products.append( RenderProductItem(ui.SimpleStringModel(prim.GetPath().pathString)) ) def destroy(self): self._viewport_api = None def get_item_children(self, item): return self._render_products def get_item_value_model(self, item, column_id): if item is None: return self._current_index return item.model def _on_prim_created(self, path): self._render_products.append(RenderProductItem(ui.SimpleStringModel(path))) self._item_changed(None) def _on_prim_removed(self, path): render_products = [rp.model.as_string for rp in self._render_products] if path in render_products: index = render_products.index(path) del self._render_products[index] self._current_index.as_int = 0 self._item_changed(None) def _current_index_changed(self, model): index = model.as_int render_product_path = self._render_products[index].model.as_string self._viewport_api.render_product_path = render_product_path self._item_changed(None) class RenderVarItem(ui.AbstractItem): def __init__(self, model): super().__init__() self.model = model class RenderVarModel(ui.AbstractItemModel): def _create_item(self, name): return RenderVarItem(ui.SimpleStringModel(name)) def __init__(self, viewport_api): super().__init__() self._viewport_api = viewport_api self._render_vars = [ self._create_item(rv[0:-7]) for rv in SyntheticData.get_registered_visualization_template_names() ] self._default_index_int = 0 self._current_index = ui.SimpleIntModel() self._current_index.add_value_changed_fn(self._current_index_changed) self._previous_index_int = self._current_index.as_int self._combine_params = [0, 0, -100] def destroy(self): self._viewport_api = None def get_item_children(self, item): return self._render_vars def get_item_value_model(self, item, column_id): if item is None: return self._current_index return item.model def _current_index_changed(self, model): index = model.as_int isdg = SyntheticData.Get() if isdg: render_prod_path = self.get_render_product_path() stage = self._viewport_api.usd_context.get_stage() if self._render_vars[self._previous_index_int].model.as_string != "LdrColor": isdg.deactivate_node_template( self._render_vars[self._previous_index_int].model.as_string + "DisplayPostCombine", 0, [render_prod_path], stage ) if self._render_vars[index].model.as_string != "LdrColor": isdg.activate_node_template( self._render_vars[index].model.as_string + "DisplayPostCombine", 0, [render_prod_path], None, stage ) SyntheticData.disable_async_rendering() self._previous_index_int = index self.update_combine() self._item_changed(None) def set_default_item(self): self._current_index.set_value(self._default_index_int) def get_render_product_path(self): render_prod_path = self._viewport_api.render_product_path # XXX: Issue with Viewport-2 and omni.kit.hydra_texture # The default product path is returned as a string that isn't the prim-path # We can work around it by noting the path isn't absolute and fixing it u pi that case. if render_prod_path and (not render_prod_path.startswith('/')): render_prod_path = f'/Render/RenderProduct_{render_prod_path}' return render_prod_path def set_combine_angle(self, angle): self._combine_params[0] = angle self.update_combine() def set_combine_divide_x(self, divide): self._combine_params[1] = divide self.update_combine() def set_combine_divide_y(self, divide): self._combine_params[2] = divide self.update_combine() def get_combine_angle(self): return self._combine_params[0] def get_combine_divide_x(self): return self._combine_params[1] def get_combine_divide_y(self): return self._combine_params[2] def update_combine(self): if self._render_vars[self._previous_index_int].model.as_string == "LdrColor": return isdg = SyntheticData.Get() if isdg: isdg.set_node_attributes( self._render_vars[self._previous_index_int].model.as_string + "DisplayPostCombine", {"inputs:parameters": self._combine_params}, self.get_render_product_path() )
6,627
Python
35.021739
132
0.622001
eliabntt/GRADE-RR/isaac_internals/kit/extscore/omni.syntheticdata/omni/syntheticdata/scripts/menu.py
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # __all__ = ["SynthDataMenuContainer"] from omni.kit.viewport.menubar.core import ( ComboBoxModel, ComboBoxItem, ComboBoxMenuDelegate, CheckboxMenuDelegate, IconMenuDelegate, SliderMenuDelegate, ViewportMenuContainer, ViewportMenuItem, ViewportMenuSeparator ) from .SyntheticData import SyntheticData from .visualizer_window import VisualizerWindow import carb import omni.ui as ui from pathlib import Path import weakref ICON_PATH = Path(carb.tokens.get_tokens_interface().resolve("${omni.syntheticdata}")).joinpath("data") UI_STYLE = {"Menu.Item.Icon::SyntheticData": {"image_url": str(ICON_PATH.joinpath("sensor_icon.svg"))}} class SensorAngleModel(ui.AbstractValueModel): def __init__(self, getter, setter, *args, **kwargs): super().__init__(*args, **kwargs) self.__getter = getter self.__setter = setter def destroy(self): self.__getter = None self.__setter = None def get_value_as_float(self) -> float: return self.__getter() def get_value_as_int(self) -> int: return int(self.get_value_as_float()) def set_value(self, value): value = float(value) if self.get_value_as_float() != value: self.__setter(value) self._value_changed() class SensorVisualizationModel(ui.AbstractValueModel): def __init__(self, sensor: str, visualizer_window, *args, **kwargs): super().__init__(*args, **kwargs) self.__sensor = sensor self.__visualizer_window = visualizer_window def destroy(self): self.__visualizer_window = None def get_value_as_bool(self) -> bool: try: return bool(self.__sensor in self.__visualizer_window.visualization_activation) except: return False def get_value_as_int(self) -> int: return 1 if self.get_value_as_bool() else 0 def set_value(self, enabled): enabled = bool(enabled) if self.get_value_as_bool() != enabled: self.__visualizer_window.on_sensor_item_clicked(enabled, self.__sensor) self._value_changed() class SynthDataMenuContainer(ViewportMenuContainer): def __init__(self): super().__init__(name="SyntheticData", delegate=IconMenuDelegate("SyntheticData"), # tooltip="Synthetic Data Sensors"), order=-10, style=UI_STYLE) self.__hide_on_click = False self.__visualizer_window = None self.__sensor_models = set() def __del__(self): self.destroy() def destroy(self): self.__sensor_models = set() if self.__visualizer_window: self.__visualizer_window.close() self.__visualizer_window = None super().destroy() def build_fn(self, desc: dict): viewport_api = desc.get("viewport_api") if not viewport_api: raise RuntimeError("Need a viewport_api") if self.__visualizer_window: self.__visualizer_window.close() self.__visualizer_window = None name = f"{viewport_api.usd_context_name}" self.__visualizer_window = VisualizerWindow(name, viewport_api) with self: self.add_render_settings_items() ViewportMenuSeparator() self.add_angles_items() ViewportMenuSeparator() self.add_sensor_selection() ViewportMenuSeparator() ViewportMenuItem(name="Clear All", hide_on_click=self.__hide_on_click, onclick_fn=self.clear_all) ViewportMenuItem(name="Show Window", hide_on_click=self.__hide_on_click, onclick_fn=self.show_window) super().build_fn(desc) def add_render_settings_items(self): render_product_combo_model = self.__visualizer_window.render_product_combo_model if render_product_combo_model: ViewportMenuItem( "RenderProduct", delegate=ComboBoxMenuDelegate(model=render_product_combo_model), hide_on_click=self.__hide_on_click, ) render_var_combo_model = self.__visualizer_window.render_var_combo_model if render_var_combo_model: ViewportMenuItem( "RenderVar", delegate=ComboBoxMenuDelegate(model=render_var_combo_model), hide_on_click=self.__hide_on_click, ) def add_angles_items(self): render_var_combo_model = self.__visualizer_window.render_var_combo_model if render_var_combo_model: ViewportMenuItem( name="Angle", hide_on_click=self.__hide_on_click, delegate=SliderMenuDelegate( model=SensorAngleModel(render_var_combo_model.get_combine_angle, render_var_combo_model.set_combine_angle), min=-100.0, max=100.0, tooltip="Set Combine Angle", ), ) ViewportMenuItem( name="X", hide_on_click=self.__hide_on_click, delegate=SliderMenuDelegate( model=SensorAngleModel(render_var_combo_model.get_combine_divide_x, render_var_combo_model.set_combine_divide_x), min=-100.0, max=100.0, tooltip="Set Combine Divide X", ), ) ViewportMenuItem( name="Y", hide_on_click=self.__hide_on_click, delegate=SliderMenuDelegate( model=SensorAngleModel(render_var_combo_model.get_combine_divide_y, render_var_combo_model.set_combine_divide_y), min=-100.0, max=100.0, tooltip="Set Combine Divide Y", ), ) def add_sensor_selection(self): for sensor_label, sensor in SyntheticData.get_registered_visualization_template_names_for_display(): model = SensorVisualizationModel(sensor, weakref.proxy(self.__visualizer_window)) self.__sensor_models.add(model) ViewportMenuItem( name=sensor_label, hide_on_click=self.__hide_on_click, delegate=CheckboxMenuDelegate(model=model, tooltip=f'Enable "{sensor}" visualization') ) def clear_all(self, *args, **kwargs): for smodel in self.__sensor_models: smodel.set_value(False) # XXX: This isn't really neccessary if self.__visualizer_window: self.__visualizer_window.visualization_activation.clear() def show_window(self, *args, **kwargs): self.__visualizer_window.toggle_enable_visualization() SyntheticData.disable_async_rendering()
7,377
Python
34.301435
113
0.591975
eliabntt/GRADE-RR/isaac_internals/kit/extscore/omni.syntheticdata/omni/syntheticdata/scripts/visualizer_window.py
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # __all__ = ["VisualizerWindow"] import omni.ui as ui from .SyntheticData import SyntheticData from .model import RenderProductModel, RenderVarModel import math DEBUG_VIEW = False class VisualizerWindow: def __init__(self, name, viewport_api): # create the window self._visualize_window = ui.Window(name + " Sensors Output ", width=800, height=600) self._visualize_window.set_width_changed_fn(lambda _: self._update_visualization_ui()) self._visualize_window.set_height_changed_fn(lambda _: self._update_visualization_ui()) self._visualize_window.visible = False self._render_product_combo_model = RenderProductModel(name, viewport_api) if DEBUG_VIEW else None self._render_var_combo_model = RenderVarModel(viewport_api) self._render_product_path = self._render_var_combo_model.get_render_product_path() # activated visualization contains the set of display node that have been activated through the UI self._visualization_activation = set() # visualisation_data contain the image provider for all currently activated display node self._activated_visualization_data = {} if hasattr(viewport_api, 'subscribe_to_frame_change'): self.__frame_changed_sub = viewport_api.subscribe_to_frame_change(self.__frame_changed) def __frame_changed(self, viewport_api): render_product = self._render_var_combo_model.get_render_product_path() self.update(render_product, viewport_api.stage) def close(self): self.__frame_changed_sub = None if self._visualize_window: self._visualize_window.visible = False self._visualize_window = None if self._render_product_combo_model: self._render_product_combo_model = None if self._render_var_combo_model: self._render_var_combo_model = None self._visualization_activation = set() self._activated_visualization_data = {} @property def render_product_combo_model(self): return self._render_product_combo_model @property def render_var_combo_model(self): return self._render_var_combo_model @property def visualization_activation(self): return self._visualization_activation # callback function for handling sensor selection def on_sensor_item_clicked(self, checked, sensor): if checked: self._visualization_activation.add(sensor) else: self._visualization_activation.remove(sensor) # visualization callback def toggle_enable_visualization(self): if self._visualize_window: self._visualize_window.visible = not self._visualize_window.visible def update(self, render_product_path: str, stage): sdg_iface = SyntheticData.Get() if render_product_path != self._render_product_path: for sensor in self._activated_visualization_data: sdg_iface.deactivate_node_template(sensor,0,[render_product_path]) self._visualization_activation = set() self._activated_visualization_data = {} self._render_product_path = render_product_path self._render_var_combo_model.set_default_item() # update the activated sensors visualization_activation = self._visualization_activation.copy() # NB this is not threadsafe to_activate = visualization_activation.difference(set(self._activated_visualization_data.keys())) to_deactivate = set(self._activated_visualization_data.keys()).difference(visualization_activation) self._activated_visualization_data = {} for sensor in visualization_activation: self._activated_visualization_data[sensor] = None for sensor in to_activate: sdg_iface.activate_node_template(sensor, 0, [render_product_path], None, stage) for sensor in to_deactivate: sdg_iface.deactivate_node_template(sensor, 0, [render_product_path], stage) # update the visualization window if self._visualize_window.visible: for sensor in self._activated_visualization_data: # create image provider from the sensor texture data self._activated_visualization_data[sensor] = ui.ImageProvider() display_output_names = ["outputs:handlePtr", "outputs:width", "outputs:height", "outputs:format"] display_outputs = sdg_iface.get_node_attributes(sensor, display_output_names, render_product_path) if display_outputs and all(o in display_outputs for o in display_output_names): self._activated_visualization_data[sensor].set_image_data( display_outputs["outputs:handlePtr"], display_outputs["outputs:width"], display_outputs["outputs:height"], ui.TextureFormat(display_outputs["outputs:format"]) ) self._update_visualization_ui() def _update_visualization_ui(self): num_sensors = len(self._activated_visualization_data) if num_sensors == 0: rows, columns = 0, 0 else: # Attempt a responsive layout to the number of enabled sensors columns = math.ceil(math.sqrt(num_sensors)) rows = math.ceil(num_sensors / columns) if self._visualize_window.height > self._visualize_window.width: columns, rows = rows, columns enabled_sensors = list(self._activated_visualization_data.keys()) with self._visualize_window.frame: with ui.VStack(): idx = 0 for _ in range(rows): with ui.HStack(): for col in range(columns): sensor = enabled_sensors[idx] with ui.VStack(): ui.Label(sensor, alignment=ui.Alignment.CENTER, height=20) ui.ImageWithProvider( self._activated_visualization_data[sensor], alignment=ui.Alignment.CENTER_TOP ) ui.Spacer(height=20) idx += 1 if col < columns - 1: # Add a spacer if inner grid edge ui.Spacer(width=3) if idx >= len(enabled_sensors): break
7,055
Python
44.52258
114
0.619277
eliabntt/GRADE-RR/isaac_internals/kit/extscore/omni.syntheticdata/omni/syntheticdata/tests/__init__.py
""" Presence of this file allows the tests directory to be imported as a module so that all of its contents can be scanned to automatically add tests that are placed into this directory. """ scan_for_test_modules = True from .sensors.test_bbox3d import * from .sensors.test_bbox2d_loose import * from .sensors.test_bbox2d_tight import * from .sensors.test_distance_to_camera import * from .sensors.test_distance_to_image_plane import * from .sensors.test_depth import * # *** DEPRECATED *** from .sensors.test_depth_linear import * # *** DEPRECATED *** from .sensors.test_motion_vector import * from .sensors.test_normals import * from .sensors.test_occlusion import * from .sensors.test_rgb import * from .sensors.test_instance_seg import * from .sensors.test_semantic_seg import * from .sensors.test_cross_correspondence import * from .sensors.test_swh_frame_number import * from .sensors.test_renderproduct_camera import * from .sensors.test_rendervar_buff_host_ptr import * from .sensors.test_semantic_filter import * from .helpers.test_instance_mapping import * from .helpers.test_projection import * from .helpers.test_bboxes import * from .visualize.test_semantic_seg import * from .visualize.test_flattener import * from .pipeline.test_instance_mapping import * from .pipeline.test_swh_frame_number import * from .pipeline.test_renderproduct_camera import * from .graph.test_graph_manipulation import *
1,417
Python
37.324323
103
0.774876
eliabntt/GRADE-RR/isaac_internals/kit/extscore/omni.syntheticdata/omni/syntheticdata/tests/utils.py
import random import numpy as np from pxr import Gf, Semantics def add_semantics(prim, semantic_label, semantic_type="class"): if not prim.HasAPI(Semantics.SemanticsAPI): sem = Semantics.SemanticsAPI.Apply(prim, "Semantics") sem.CreateSemanticTypeAttr() sem.CreateSemanticDataAttr() sem.GetSemanticTypeAttr().Set(semantic_type) sem.GetSemanticDataAttr().Set(semantic_label) def get_random_transform(): camera_tf = np.eye(4) camera_tf[:3, :3] = Gf.Matrix3d(Gf.Rotation(np.random.rand(3).tolist(), np.random.rand(3).tolist())) camera_tf[3, :3] = np.random.rand(3).tolist() return Gf.Matrix4d(camera_tf)
666
Python
30.761903
104
0.689189
eliabntt/GRADE-RR/isaac_internals/kit/extscore/omni.syntheticdata/omni/syntheticdata/tests/pipeline/test_renderproduct_camera.py
import carb from pxr import Gf, UsdGeom, UsdLux, Sdf import omni.hydratexture import omni.kit.test from omni.syntheticdata import SyntheticData, SyntheticDataStage # Test the instance mapping pipeline class TestRenderProductCamera(omni.kit.test.AsyncTestCase): def __init__(self, methodName: str) -> None: super().__init__(methodName=methodName) def render_product_path(self, hydra_texture) -> str: '''Return a string to the UsdRender.Product used by the texture''' render_product = hydra_texture.get_render_product_path() if render_product and (not render_product.startswith('/')): render_product = '/Render/RenderProduct_' + render_product return render_product def register_test_rp_cam_pipeline(self): sdg_iface = SyntheticData.Get() if not sdg_iface.is_node_template_registered("TestSimRpCam"): sdg_iface.register_node_template( SyntheticData.NodeTemplate( SyntheticDataStage.SIMULATION, "omni.syntheticdata.SdTestRenderProductCamera", attributes={"inputs:stage":"simulation"} ), template_name="TestSimRpCam" ) if not sdg_iface.is_node_template_registered("TestPostRpCam"): sdg_iface.register_node_template( SyntheticData.NodeTemplate( SyntheticDataStage.POST_RENDER, "omni.syntheticdata.SdTestRenderProductCamera", [SyntheticData.NodeConnectionTemplate("PostRenderProductCamera")], attributes={"inputs:stage":"postRender"} ), template_name="TestPostRpCam" ) if not sdg_iface.is_node_template_registered("TestOnDemandRpCam"): sdg_iface.register_node_template( SyntheticData.NodeTemplate( SyntheticDataStage.ON_DEMAND, "omni.syntheticdata.SdTestRenderProductCamera", [SyntheticData.NodeConnectionTemplate("PostProcessRenderProductCamera")], attributes={"inputs:stage":"onDemand"} ), template_name="TestOnDemandRpCam" ) def activate_test_rp_cam_pipeline(self, test_case_index): sdg_iface = SyntheticData.Get() attributes = { "inputs:renderProductCameraPath": self._camera_path, "inputs:width": self._resolution[0], "inputs:height": self._resolution[1], "inputs:traceError": True } sdg_iface.activate_node_template("TestSimRpCam", 0, [self.render_product_path(self._hydra_texture_0)], attributes) sdg_iface.activate_node_template("TestPostRpCam", 0, [self.render_product_path(self._hydra_texture_0)], attributes) sdg_iface.activate_node_template("TestOnDemandRpCam", 0, [self.render_product_path(self._hydra_texture_0)],attributes) async def wait_for_num_frames(self, num_frames, max_num_frames=5000): self._hydra_texture_rendered_counter = 0 wait_frames_left = max_num_frames while (self._hydra_texture_rendered_counter < num_frames) and (wait_frames_left > 0): await omni.kit.app.get_app().next_update_async() wait_frames_left -= 1 async def setUp(self): self._settings = carb.settings.acquire_settings_interface() self._hydra_texture_factory = omni.hydratexture.acquire_hydra_texture_factory_interface() self._usd_context_name = '' self._usd_context = omni.usd.get_context(self._usd_context_name) await self._usd_context.new_stage_async() # camera self._camera_path = "/TestRPCamera" UsdGeom.Camera.Define(omni.usd.get_context().get_stage(), self._camera_path) self._resolution = [980,540] # renderer renderer = "rtx" if renderer not in self._usd_context.get_attached_hydra_engine_names(): omni.usd.add_hydra_engine(renderer, self._usd_context) # create the hydra textures self._hydra_texture_0 = self._hydra_texture_factory.create_hydra_texture( "TEX0", width=self._resolution[0], height=self._resolution[1], usd_context_name=self._usd_context_name, usd_camera_path=self._camera_path, hydra_engine_name=renderer, is_async=self._settings.get("/app/asyncRendering") ) self._hydra_texture_rendered_counter = 0 def on_hydra_texture_0(event: carb.events.IEvent): self._hydra_texture_rendered_counter += 1 self._hydra_texture_rendered_counter_sub = self._hydra_texture_0.get_event_stream().create_subscription_to_push_by_type( omni.hydratexture.EVENT_TYPE_DRAWABLE_CHANGED, on_hydra_texture_0, name='async rendering test drawable update', ) self.register_test_rp_cam_pipeline() async def tearDown(self): self._hydra_texture_rendered_counter_sub = None self._hydra_texture_0 = None self._usd_context.close_stage() omni.usd.release_all_hydra_engines(self._usd_context) self._hydra_texture_factory = None self._settings = None wait_iterations = 6 for _ in range(wait_iterations): await omni.kit.app.get_app().next_update_async() async def test_case_0(self): print("test actual camera pipeline here.") self.activate_test_rp_cam_pipeline(0) await self.wait_for_num_frames(33)
5,694
Python
41.819549
128
0.612048
eliabntt/GRADE-RR/isaac_internals/kit/extscore/omni.syntheticdata/omni/syntheticdata/tests/pipeline/test_swh_frame_number.py
import carb from pxr import Gf, UsdGeom, UsdLux, Sdf import omni.hydratexture import omni.kit.test from omni.syntheticdata import SyntheticData, SyntheticDataStage # Test the Fabric frame number synchronization class TestSWHFrameNumber(omni.kit.test.AsyncTestCase): def __init__(self, methodName: str) -> None: super().__init__(methodName=methodName) def render_product_path(self, hydra_texture) -> str: '''Return a string to the UsdRender.Product used by the texture''' render_product = hydra_texture.get_render_product_path() if render_product and (not render_product.startswith('/')): render_product = '/Render/RenderProduct_' + render_product return render_product async def wait_for_num_sims(self, num_sims, max_num_sims=5000): self._hydra_texture_rendered_counter = 0 wait_sims_left = max_num_sims while (self._hydra_texture_rendered_counter < num_sims) and (wait_sims_left > 0): await omni.kit.app.get_app().next_update_async() wait_sims_left -= 1 async def setUp(self): self._settings = carb.settings.acquire_settings_interface() self._hydra_texture_factory = omni.hydratexture.acquire_hydra_texture_factory_interface() self._usd_context_name = '' self._usd_context = omni.usd.get_context(self._usd_context_name) await self._usd_context.new_stage_async() # Setup the scene stage = omni.usd.get_context().get_stage() world_prim = UsdGeom.Xform.Define(stage,"/World") UsdGeom.Xformable(world_prim).AddTranslateOp().Set((0, 0, 0)) UsdGeom.Xformable(world_prim).AddRotateXYZOp().Set((0, 0, 0)) capsule0_prim = stage.DefinePrim("/World/Capsule0", "Capsule") UsdGeom.Xformable(capsule0_prim).AddTranslateOp().Set((100, 0, 0)) UsdGeom.Xformable(capsule0_prim).AddScaleOp().Set((30, 30, 30)) UsdGeom.Xformable(capsule0_prim).AddRotateXYZOp().Set((-90, 0, 0)) capsule0_prim.GetAttribute("primvars:displayColor").Set([(0.3, 1, 0)]) capsule1_prim = stage.DefinePrim("/World/Capsule1", "Capsule") UsdGeom.Xformable(capsule1_prim).AddTranslateOp().Set((-100, 0, 0)) UsdGeom.Xformable(capsule1_prim).AddScaleOp().Set((30, 30, 30)) UsdGeom.Xformable(capsule1_prim).AddRotateXYZOp().Set((-90, 0, 0)) capsule1_prim.GetAttribute("primvars:displayColor").Set([(0, 1, 0.3)]) spherelight = UsdLux.SphereLight.Define(stage, "/SphereLight") spherelight.GetIntensityAttr().Set(30000) spherelight.GetRadiusAttr().Set(30) camera_1 = stage.DefinePrim("/Camera1", "Camera") camera_1.CreateAttribute("cameraProjectionType", Sdf.ValueTypeNames.Token).Set("fisheyePolynomial") UsdGeom.Xformable(camera_1).AddTranslateOp().Set((0, 250, 0)) UsdGeom.Xformable(camera_1).AddRotateXYZOp().Set((-90, 0, 0)) # renderer renderer = "rtx" if renderer not in self._usd_context.get_attached_hydra_engine_names(): omni.usd.add_hydra_engine(renderer, self._usd_context) # create the hydra textures self._hydra_texture_0 = self._hydra_texture_factory.create_hydra_texture( "TEX0", 1920, 1080, self._usd_context_name, hydra_engine_name=renderer, is_async=self._settings.get("/app/asyncRendering") ) render_product_path_0 = self.render_product_path(self._hydra_texture_0) self._hydra_texture_rendered_counter = 0 def on_hydra_texture_0(event: carb.events.IEvent): self._hydra_texture_rendered_counter += 1 self._hydra_texture_rendered_counter_sub = self._hydra_texture_0.get_event_stream().create_subscription_to_push_by_type( omni.hydratexture.EVENT_TYPE_DRAWABLE_CHANGED, on_hydra_texture_0, name='async rendering test drawable update', ) self._hydra_texture_1 = self._hydra_texture_factory.create_hydra_texture( "TEX1", 512, 512, self._usd_context_name, str(camera_1.GetPath()), hydra_engine_name=renderer, is_async=self._settings.get("/app/asyncRendering") ) render_product_path_1 = self.render_product_path(self._hydra_texture_1) # SyntheticData singleton interface sdg_iface = SyntheticData.Get() # Register node templates in the SyntheticData register # (a node template is a template for creating a node specified by its type and its connections) # # to illustrate we are using the generic omni.syntheticdata.SdTestStageSynchronization node type which supports every stage of the SyntheticData pipeline. When executed it logs the fabric frame number. # # register a node template in the simulation stage # NB : this node template has no connections if not sdg_iface.is_node_template_registered("TestSyncSim"): sdg_iface.register_node_template( SyntheticData.NodeTemplate( SyntheticDataStage.SIMULATION, # node tempalte stage "omni.syntheticdata.SdTestStageSynchronization", # node template type attributes={ "inputs:tag":"0", "inputs:randomSeed": 13, "inputs:randomMaxProcessingTimeUs": 33333, "inputs:traceError": True } ), # node template default attribute values (when differs from the default value specified in the .ogn) template_name="TestSyncSim" # node template name ) # register a node template in the postrender stage # NB : this template may be activated for several different renderproducts if not sdg_iface.is_node_template_registered("TestSyncPost"): sdg_iface.register_node_template( SyntheticData.NodeTemplate( SyntheticDataStage.POST_RENDER, # node template stage "omni.syntheticdata.SdTestStageSynchronization", # node template type # node template connections [ # connected to a TestSyncSim node (a TestSyncSim node will be activated when activating this template) SyntheticData.NodeConnectionTemplate("TestSyncSim", (), None), # connected to a LdrColorSD rendervar (the renderVar will be activated when activating this template) SyntheticData.NodeConnectionTemplate("LdrColorSD"), # connected to a BoundingBox3DSD rendervar (the renderVar will be activated when activating this template) SyntheticData.NodeConnectionTemplate("BoundingBox3DSD") ], attributes={ "inputs:randomSeed": 27, "inputs:randomMaxProcessingTimeUs": 33333, "inputs:traceError": True } ), template_name="TestSyncPost" # node template name ) # register a node template in the postprocess stage # NB : this template may be activated for several different renderproducts if not sdg_iface.is_node_template_registered("TestSyncOnDemand"): sdg_iface.register_node_template( SyntheticData.NodeTemplate( SyntheticDataStage.ON_DEMAND, # node template stage "omni.syntheticdata.SdTestStageSynchronization", # node template type # node template connections [ # connected to a TestSyncSim node (a TestSyncSim node will be activated when activating this template) SyntheticData.NodeConnectionTemplate("TestSyncSim", (), None), # connected to a PostProcessDispatch node : the PostProcessDispatch node trigger the execution of its downstream connections for every rendered frame # (a PostProcessDispatch node will be activated when activating this template) SyntheticData.NodeConnectionTemplate("PostProcessDispatch") ], attributes={ "inputs:randomSeed": 51, "inputs:randomMaxProcessingTimeUs": 33333, "inputs:traceError": True } # node template default attribute values (when differs from the default value specified in the .ogn) ), template_name="TestSyncOnDemand" # node template name ) # register a node template in the postprocess stage # NB : this template may be activated for any combination of renderproduct pairs if not sdg_iface.is_node_template_registered("TestSyncCross"): # register an accumulator which trigger once when all its upstream connections have triggered sdg_iface.register_node_template( SyntheticData.NodeTemplate( SyntheticDataStage.ON_DEMAND, # node template stage "omni.graph.action.SyncGate", # node template type # node template connections [ # connected to the PostProcessDispatcher for the synchronization value SyntheticData.NodeConnectionTemplate( "PostProcessDispatcher", (), {"outputs:swhFrameNumber":"inputs:syncValue"} ), # connected to a TestSyncOnDemand node for the first renderproduct (a TestSyncSim node will be activated when activating this template) SyntheticData.NodeConnectionTemplate( "TestSyncOnDemand", (0,), {"outputs:exec":"inputs:execIn"} ), # connected to a TestSyncOnDemand node for the second renderproduct (a TestSyncSim node will be activated when activating this template) SyntheticData.NodeConnectionTemplate( "TestSyncOnDemand", (1,), {"outputs:exec":"inputs:execIn"} ), ] ), template_name="TestSyncAccum" # node template name ) sdg_iface.register_node_template( SyntheticData.NodeTemplate( SyntheticDataStage.ON_DEMAND, # node template stage "omni.syntheticdata.SdTestStageSynchronization", # node template type # node template connections [ # connected to a TestSyncAccum node (a TestSyncAccum node will be activated when activating this template) SyntheticData.NodeConnectionTemplate( "TestSyncAccum", (0,1), { "outputs:execOut":"inputs:exec", "outputs:syncValue":"inputs:swhFrameNumber" } ), ], attributes={ "inputs:randomSeed": 62, "inputs:randomMaxProcessingTimeUs": 33333, "inputs:traceError": True } ), template_name="TestSyncCross" # node template name ) # Activate the node templates for the renderproducts # this will create the node (and all their missing dependencies) within the associated graphs # # activate the TestSyncSim sdg_iface.activate_node_template("TestSyncSim") # wait for the next update to make sure the simulation node is activated when activating the post-render and post-process nodes # activate the TestSyncPost for the renderpoduct renderpoduct_0 # this will also activate the LdrColorSD and BoundingBox3DSD renderVars for the renderpoduct renderpoduct_0 # this will set the tag node attribute to "1" sdg_iface.activate_node_template("TestSyncPost", 0, [render_product_path_0],{"inputs:tag":"1"}) # activate the TestSyncPost for the renderpoduct renderpoduct_1 # this will also activate the LdrColorSD and BoundingBox3DSD renderVars for the renderpoduct renderpoduct_1 # NB TestSyncSim has already been activated # this will set the tag node attribute to "2" sdg_iface.activate_node_template("TestSyncPost", 0, [render_product_path_1],{"inputs:tag":"2"}) # FIXME : wait a couple of simulation updates as a workaround of an issue with the first # syncGate not being activated await self.wait_for_num_sims(3) # activate the TestSyncCross for the renderpoducts [renderproduct_0, renderproduct_1] # this will also activate : # - TestSyncAccum for the renderpoducts [renderproduct_0, renderproduct_1] # - PostProcessDispatch for the renderpoduct renderproduct_0 # - TestSyncOnDemand for the renderproduct renderproduct_0 # - TestSyncOnDemand for the renderproduct renderproduct_1 # - PostProcessDispatch for the renderpoduct renderproduct_1 # this will set the tag node attribute to "5" and processingTime to 30000 sdg_iface.activate_node_template("TestSyncCross", 0, [render_product_path_0,render_product_path_1],{"inputs:tag":"5"}) # Set some specific attributes to nodes that have been automatically activated # set the tag to the TestSyncOnDemand for renderproduct renderproduct_0 sdg_iface.set_node_attributes("TestSyncOnDemand",{"inputs:tag":"3"},render_product_path_0) # set the tag to the TestSyncOnDemand for renderproduct renderproduct_1 sdg_iface.set_node_attributes("TestSyncOnDemand",{"inputs:tag":"4"},render_product_path_1) # setup members self._num_sims = 555 async def tearDown(self): self._hydra_texture_rendered_counter_sub = None self._hydra_texture_0 = None self._hydra_texture_1 = None self._usd_context.close_stage() omni.usd.release_all_hydra_engines(self._usd_context) self._hydra_texture_factory = None self._settings = None wait_iterations = 6 for _ in range(wait_iterations): await omni.kit.app.get_app().next_update_async() async def test_pipline(self): """ Test swh frame synhronization """ await self.wait_for_num_sims(self._num_sims)
15,183
Python
51.178694
209
0.595534
eliabntt/GRADE-RR/isaac_internals/kit/extscore/omni.syntheticdata/omni/syntheticdata/tests/pipeline/test_instance_mapping.py
import carb from pxr import Gf, UsdGeom, UsdLux, Sdf import omni.hydratexture import omni.kit.test from omni.syntheticdata import SyntheticData, SyntheticDataStage # Test the instance mapping pipeline class TestInstanceMapping(omni.kit.test.AsyncTestCase): def __init__(self, methodName: str) -> None: super().__init__(methodName=methodName) def render_product_path(self, hydra_texture) -> str: '''Return a string to the UsdRender.Product used by the texture''' render_product = hydra_texture.get_render_product_path() if render_product and (not render_product.startswith('/')): render_product = '/Render/RenderProduct_' + render_product return render_product def register_test_instance_mapping_pipeline(self): sdg_iface = SyntheticData.Get() if not sdg_iface.is_node_template_registered("TestSimSWHFrameNumber"): sdg_iface.register_node_template( SyntheticData.NodeTemplate( SyntheticDataStage.SIMULATION, "omni.syntheticdata.SdUpdateSwFrameNumber" ), template_name="TestSimSWHFrameNumber" ) if not sdg_iface.is_node_template_registered("TestSimInstanceMapping"): sdg_iface.register_node_template( SyntheticData.NodeTemplate( SyntheticDataStage.SIMULATION, "omni.syntheticdata.SdTestInstanceMapping", [ SyntheticData.NodeConnectionTemplate("TestSimSWHFrameNumber", ()) ], {"inputs:stage":"simulation"} ), template_name="TestSimInstanceMapping" ) if not sdg_iface.is_node_template_registered("TestOnDemandInstanceMapping"): sdg_iface.register_node_template( SyntheticData.NodeTemplate( SyntheticDataStage.ON_DEMAND, "omni.syntheticdata.SdTestInstanceMapping", [ SyntheticData.NodeConnectionTemplate("InstanceMappingPtrWithTransforms"), SyntheticData.NodeConnectionTemplate("TestSimInstanceMapping", (), attributes_mapping={"outputs:exec": "inputs:exec"}) ], {"inputs:stage":"ondemand"} ), template_name="TestOnDemandInstanceMapping" ) def activate_test_instance_mapping_pipeline(self, case_index): sdg_iface = SyntheticData.Get() sdg_iface.activate_node_template("TestSimInstanceMapping", attributes={"inputs:testCaseIndex":case_index}) sdg_iface.activate_node_template("TestOnDemandInstanceMapping", 0, [self.render_product_path(self._hydra_texture_0)], {"inputs:testCaseIndex":case_index}) sdg_iface.connect_node_template("TestSimInstanceMapping", "InstanceMappingPre", None, {"outputs:semanticFilterPredicate":"inputs:semanticFilterPredicate"}) async def wait_for_num_frames(self, num_frames, max_num_frames=5000): self._hydra_texture_rendered_counter = 0 wait_frames_left = max_num_frames while (self._hydra_texture_rendered_counter < num_frames) and (wait_frames_left > 0): await omni.kit.app.get_app().next_update_async() wait_frames_left -= 1 async def setUp(self): self._settings = carb.settings.acquire_settings_interface() self._hydra_texture_factory = omni.hydratexture.acquire_hydra_texture_factory_interface() self._usd_context_name = '' self._usd_context = omni.usd.get_context(self._usd_context_name) await self._usd_context.new_stage_async() # renderer renderer = "rtx" if renderer not in self._usd_context.get_attached_hydra_engine_names(): omni.usd.add_hydra_engine(renderer, self._usd_context) # create the hydra textures self._hydra_texture_0 = self._hydra_texture_factory.create_hydra_texture( "TEX0", 1920, 1080, self._usd_context_name, hydra_engine_name=renderer, is_async=self._settings.get("/app/asyncRendering") ) self._hydra_texture_rendered_counter = 0 def on_hydra_texture_0(event: carb.events.IEvent): self._hydra_texture_rendered_counter += 1 self._hydra_texture_rendered_counter_sub = self._hydra_texture_0.get_event_stream().create_subscription_to_push_by_type( omni.hydratexture.EVENT_TYPE_DRAWABLE_CHANGED, on_hydra_texture_0, name='async rendering test drawable update', ) self.register_test_instance_mapping_pipeline() async def tearDown(self): self._hydra_texture_rendered_counter_sub = None self._hydra_texture_0 = None self._usd_context.close_stage() omni.usd.release_all_hydra_engines(self._usd_context) self._hydra_texture_factory = None self._settings = None wait_iterations = 6 for _ in range(wait_iterations): await omni.kit.app.get_app().next_update_async() async def test_case_0(self): self.activate_test_instance_mapping_pipeline(0) await self.wait_for_num_frames(11) async def test_case_1(self): self.activate_test_instance_mapping_pipeline(1) await self.wait_for_num_frames(11) async def test_case_2(self): self.activate_test_instance_mapping_pipeline(2) await self.wait_for_num_frames(11) async def test_case_3(self): self.activate_test_instance_mapping_pipeline(3) await self.wait_for_num_frames(11) async def test_case_4(self): self.activate_test_instance_mapping_pipeline(4) await self.wait_for_num_frames(11)
6,115
Python
40.605442
142
0.601962
eliabntt/GRADE-RR/isaac_internals/kit/extscore/omni.syntheticdata/omni/syntheticdata/tests/sensors/test_motion_vector.py
# NOTE: # omni.kit.test - std python's unittest module with additional wrapping to add suport for async/await tests # For most things refer to unittest docs: https://docs.python.org/3/library/unittest.html import os import math import asyncio from PIL import Image from time import time from pathlib import Path import carb import numpy as np from numpy.lib.arraysetops import unique import unittest import omni.kit.test from omni.kit.viewport.utility import get_active_viewport from pxr import Gf, UsdGeom # Import extension python module we are testing with absolute import path, as if we are external user (other extension) import omni.syntheticdata as syn from ..utils import add_semantics FILE_DIR = os.path.dirname(os.path.realpath(__file__)) TIMEOUT = 200 # Having a test class derived from omni.kit.test.AsyncTestCase declared on the root of module will make it auto-discoverable by omni.kit.test class TestMotionVector(omni.kit.test.AsyncTestCase): def __init__(self, methodName: str) -> None: super().__init__(methodName=methodName) self.golden_image_path = Path(os.path.dirname(os.path.abspath(__file__))) / ".." / "data" / "golden" self.output_image_path = Path(os.path.dirname(os.path.abspath(__file__))) / ".." / "data" / "output" def writeDataToImage(self, data, name): if not os.path.isdir(self.output_image_path): os.mkdir(self.output_image_path) data = ((data + 1.0) / 2) * 255 outputPath = str(self.output_image_path) + "/" + name + ".png" print("Writing data to " + outputPath) Image.fromarray(data.astype(np.uint8), "RGBA").save(outputPath) # Before running each test async def setUp(self): np.random.seed(1234) # Setup viewport self.viewport = get_active_viewport() # Initialize Sensor await omni.usd.get_context().new_stage_async() stage = omni.usd.get_context().get_stage() await omni.kit.app.get_app().next_update_async() await syn.sensors.create_or_retrieve_sensor_async(self.viewport, syn._syntheticdata.SensorType.MotionVector) async def test_empty(self): """ Test motion vector sensor on empty stage. """ await syn.sensors.next_sensor_data_async(self.viewport,True) data = syn.sensors.get_motion_vector(self.viewport) allChannelsAreZero = np.allclose(data, 0, atol=0.001) if not allChannelsAreZero: self.writeDataToImage(data, "test_empty") assert allChannelsAreZero async def test_dtype(self): stage = omni.usd.get_context().get_stage() cube = stage.DefinePrim("/Cube", "Cube") add_semantics(cube, "cube") await syn.sensors.next_sensor_data_async(self.viewport,True) data = syn.sensors.get_motion_vector(self.viewport) assert data.dtype == np.float32 async def test_unmoving_cube(self): stage = omni.usd.get_context().get_stage() cube = stage.DefinePrim("/Cube", "Cube") add_semantics(cube, "cube") cube.GetAttribute("size").Set(100) cube.GetAttribute("primvars:displayColor").Set([(0, 0, 1)]) UsdGeom.Xformable(cube).AddTranslateOp() cube.GetAttribute("xformOp:translate").Set((350, 365, 350), time=0) await syn.sensors.next_sensor_data_async(self.viewport,True) data = syn.sensors.get_motion_vector(self.viewport) # 4th channel will wary based on geo hit, so we ignore checking it here rgbChannelsAreZero = np.allclose(data[:, [0, 1, 2]], 0, atol=0.001) if not rgbChannelsAreZero: self.writeDataToImage(data, "test_unmoving_cube") assert rgbChannelsAreZero @unittest.skip("OM-44310") async def test_partially_disoccluding_cube(self): # disabling temporarly the test for OMNI-GRAPH support : OM-44310 stage = omni.usd.get_context().get_stage() stage.SetStartTimeCode(0) stage.SetEndTimeCode(100) cube = stage.DefinePrim("/Cube", "Cube") add_semantics(cube, "cube") cube.GetAttribute("size").Set(10) cube.GetAttribute("primvars:displayColor").Set([(0, 0, 1)]) # add translation down to create disocclusion due to fetching from out of screen bounds UsdGeom.Xformable(cube).AddTranslateOp() cube.GetAttribute("xformOp:translate").Set((480, 487, 480), time=0) cube.GetAttribute("xformOp:translate").Set((480, 480, 480), time=0.001) # add rotation around up vector to create disocclusion due to fetching from an incompatible surface UsdGeom.Xformable(cube).AddRotateYOp() cube.GetAttribute("xformOp:rotateY").Set(40, time=0) cube.GetAttribute("xformOp:rotateY").Set(70, time=0.001) await omni.kit.app.get_app().next_update_async() # Render one frame itl = omni.timeline.get_timeline_interface() itl.play() await syn.sensors.next_sensor_data_async(self.viewport, True) data = syn.sensors.get_motion_vector(self.viewport) golden_image = np.load(self.golden_image_path / "motion_partially_disoccluding_cube.npz")["array"] # normalize xy (mvec) to zw channels' value range # x100 seems like a good number to bring mvecs to ~1 data[:, [0, 1]] *= 100 golden_image[:, [0, 1]] *= 100 std_dev = np.sqrt(np.square(data - golden_image).astype(float).mean()) # OM-41605 - using higher std dev here to make linux run succeed std_dev_tolerance = 0.12 print("Calculated std.dev: " + str(std_dev), " Std dev tolerance: " + str(std_dev_tolerance)) if std_dev >= std_dev_tolerance: self.writeDataToImage(golden_image, "test_partially_disoccluding_cube_golden") self.writeDataToImage(data, "test_partially_disoccluding_cube") np.savez_compressed(self.output_image_path / "motion_partially_disoccluding_cube.npz", array=data) assert std_dev < std_dev_tolerance # After running each test async def tearDown(self): pass
6,086
Python
41.566433
141
0.661847
eliabntt/GRADE-RR/isaac_internals/kit/extscore/omni.syntheticdata/omni/syntheticdata/tests/sensors/test_occlusion.py
# NOTE: # omni.kit.test - std python's unittest module with additional wrapping to add suport for async/await tests # For most things refer to unittest docs: https://docs.python.org/3/library/unittest.html import os import math from time import time from pathlib import Path import carb import numpy as np import unittest import omni.kit.test from omni.kit.viewport.utility import get_active_viewport from pxr import UsdGeom, Sdf # Import extension python module we are testing with absolute import path, as if we are external user (other extension) import omni.syntheticdata as syn from ..utils import add_semantics FILE_DIR = os.path.dirname(os.path.realpath(__file__)) TIMEOUT = 200 # Having a test class derived from omni.kit.test.AsyncTestCase declared on the root of module will make it auto-discoverable by omni.kit.test class TestOcclusion(omni.kit.test.AsyncTestCase): def __init__(self, methodName: str) -> None: super().__init__(methodName=methodName) self.golden_image_path = Path(os.path.dirname(os.path.abspath(__file__))) / ".." / "data" / "golden" # Before running each test async def setUp(self): await omni.usd.get_context().new_stage_async() stage = omni.usd.get_context().get_stage() self.viewport = get_active_viewport() # Initialize Sensors syn.sensors.enable_sensors( self.viewport, [ syn._syntheticdata.SensorType.BoundingBox2DLoose, syn._syntheticdata.SensorType.BoundingBox2DTight, syn._syntheticdata.SensorType.Occlusion, ], ) await syn.sensors.next_sensor_data_async(self.viewport,True) async def test_fields_exist(self): stage = omni.usd.get_context().get_stage() cube = stage.DefinePrim("/Cube", "Cube") add_semantics(cube, "cube") await syn.sensors.next_sensor_data_async(self.viewport,True) data = syn.sensors.get_occlusion(self.viewport) valid_dtype = [("instanceId", "<u4"), ("semanticId", "<u4"), ("occlusionRatio", "<f4")] assert data.dtype == np.dtype(valid_dtype) async def test_fields_exist_parsed(self): stage = omni.usd.get_context().get_stage() cube = stage.DefinePrim("/Cube", "Cube") add_semantics(cube, "cube") await syn.sensors.next_sensor_data_async(self.viewport,True) data = syn.sensors.get_occlusion(self.viewport, parsed=True) valid_dtype = [ ("uniqueId", "<i4"), ("name", "O"), ("semanticLabel", "O"), ("metadata", "O"), ("instanceIds", "O"), ("semanticId", "<u4"), ("occlusionRatio", "<f4"), ] assert data.dtype == np.dtype(valid_dtype) async def test_occlusion(self): path = os.path.join(FILE_DIR, "../data/scenes/occlusion.usda") await omni.usd.get_context().open_stage_async(path) syn.sensors.enable_sensors(self.viewport, [syn._syntheticdata.SensorType.Occlusion]) await syn.sensors.next_sensor_data_async(self.viewport,True) occlusion_out = syn.sensors.get_occlusion(self.viewport, parsed=True) for row in occlusion_out: gt = float(row["semanticLabel"]) / 100.0 assert math.isclose(gt, row["occlusionRatio"], abs_tol=0.015), f"Expected {gt}, got {row['occlusionRatio']}" async def test_self_occlusion(self): path = os.path.join(FILE_DIR, "../data/scenes/torus_sphere.usda") await omni.usd.get_context().open_stage_async(path) syn.sensors.enable_sensors(self.viewport, [syn._syntheticdata.SensorType.Occlusion]) await syn.sensors.next_sensor_data_async(self.viewport,True) occlusion_out = syn.sensors.get_occlusion(self.viewport) occlusion_out_ratios = np.sort(occlusion_out["occlusionRatio"]) assert np.allclose(occlusion_out_ratios, [0.0, 0.6709], atol=0.05) async def test_full_occlusion(self): path = os.path.join(FILE_DIR, "../data/scenes/cube_full_occlusion.usda") await omni.usd.get_context().open_stage_async(path) syn.sensors.enable_sensors(self.viewport, [syn._syntheticdata.SensorType.Occlusion]) await syn.sensors.next_sensor_data_async(self.viewport,True) occlusion_out = syn.sensors.get_occlusion(self.viewport) occlusion_out_ratios = np.sort(occlusion_out["occlusionRatio"]) assert np.allclose(occlusion_out_ratios, [0.0, 1.0], atol=0.05) async def test_occlusion_pathtracing(self): """ Basic funtionality test of the sensor, but in path tracing mode. """ settings = carb.settings.get_settings() settings.set_string("/rtx/rendermode", "PathTracing") settings.set_int("/rtx/pathtracing/spp", 32) settings.set_int("/persistent/app/viewport/displayOptions", 0) path = os.path.join(FILE_DIR, "../data/scenes/occlusion.usda") await omni.usd.get_context().open_stage_async(path) syn.sensors.enable_sensors(self.viewport, [syn._syntheticdata.SensorType.Occlusion]) await syn.sensors.next_sensor_data_async(self.viewport,True) occlusion_out = syn.sensors.get_occlusion(self.viewport, parsed=True) for row in occlusion_out: gt = float(row["semanticLabel"]) / 100.0 assert math.isclose(gt, row["occlusionRatio"], abs_tol=0.015), f"Expected {gt}, got {row['occlusionRatio']}" async def test_occlusion_ray_traced_lighting(self): """ Basic funtionality test of the sensor, but in ray traced lighting. """ # Set the rendering mode to be ray traced lighting. settings_interface = carb.settings.get_settings() settings_interface.set_string("/rtx/rendermode", "RayTracedLighting") path = os.path.join(FILE_DIR, "../data/scenes/occlusion.usda") await omni.usd.get_context().open_stage_async(path) syn.sensors.enable_sensors(self.viewport, [syn._syntheticdata.SensorType.Occlusion]) await syn.sensors.next_sensor_data_async(self.viewport,True) occlusion_out = syn.sensors.get_occlusion(self.viewport, parsed=True) for row in occlusion_out: gt = float(row["semanticLabel"]) / 100.0 assert math.isclose(gt, row["occlusionRatio"], abs_tol=0.015), f"Expected {gt}, got {row['occlusionRatio']}" async def test_occlusion_ftheta(self): """ Basic funtionality test of the sensor under ftheta camera. """ settings = carb.settings.get_settings() settings.set_string("/rtx/rendermode", "PathTracing") settings.set_int("/rtx/pathtracing/spp", 32) settings.set_int("/persistent/app/viewport/displayOptions", 0) path = os.path.join(FILE_DIR, "../data/scenes/occlusion.usda") await omni.usd.get_context().open_stage_async(path) await omni.kit.app.get_app().next_update_async() stage = omni.usd.get_context().get_stage() camera = stage.DefinePrim("/Camera", "Camera") # Set the camera to be polynomial fish eye camera. camera.CreateAttribute("cameraProjectionType", Sdf.ValueTypeNames.Token).Set("fisheyePolynomial") # Set the Camera's position UsdGeom.Xformable(camera).AddTranslateOp().Set((100, 200, 300)) self.viewport.camera_path = camera.GetPath() syn.sensors.enable_sensors(self.viewport, [syn._syntheticdata.SensorType.Occlusion]) await syn.sensors.next_sensor_data_async(self.viewport,True) # Camera type should not affect occlusion. occlusion_out = syn.sensors.get_occlusion(self.viewport, parsed=True) data = np.array([row['occlusionRatio'] for row in occlusion_out]) # np.savez_compressed(self.golden_image_path / 'occlusion_ftheta.npz', array=data) golden = np.load(self.golden_image_path / "occlusion_ftheta.npz")["array"] assert np.isclose(data, golden, atol=1e-3).all() async def test_occlusion_spherical(self): """ Basic funtionality test of the sensor under spherical camera. """ settings = carb.settings.get_settings() settings.set_string("/rtx/rendermode", "PathTracing") settings.set_int("/rtx/pathtracing/spp", 32) settings.set_int("/persistent/app/viewport/displayOptions", 0) path = os.path.join(FILE_DIR, "../data/scenes/occlusion.usda") await omni.usd.get_context().open_stage_async(path) await omni.kit.app.get_app().next_update_async() stage = omni.usd.get_context().get_stage() camera = stage.DefinePrim("/Camera", "Camera") # Set the camera to be polynomial fish eye camera. camera.CreateAttribute("cameraProjectionType", Sdf.ValueTypeNames.Token).Set("fisheyeSpherical") # Set the Camera's position UsdGeom.Xformable(camera).AddTranslateOp().Set((100, 200, 300)) self.viewport.camera_path = camera.GetPath() syn.sensors.enable_sensors(self.viewport, [syn._syntheticdata.SensorType.Occlusion]) await syn.sensors.next_sensor_data_async(self.viewport,True) # Camera type should not affect occlusion. occlusion_out = syn.sensors.get_occlusion(self.viewport, parsed=True) data = np.array([row['occlusionRatio'] for row in occlusion_out]) # np.savez_compressed(self.golden_image_path / 'occlusion_spherical.npz', array=data) golden = np.load(self.golden_image_path / "occlusion_spherical.npz")["array"] assert np.isclose(data, golden, atol=1e-1).all() @unittest.skip("OM-44310") async def test_occlusion_quadrant(self): # disabling temporarly the test for OMNI-GRAPH support : OM-44310 # Test quadrant sensor. It takes loose and tight bounding boxes to # return the type of occlusion # Expected occlusion value for time=1, 2, 3... TESTS = [ "fully-occluded", "left", "right", "bottom", "top", "fully-visible", # corner occlusion "fully-visible", # corner occlusion "bottom-right", "bottom-left", "top-right", "top-left", "fully-visible", ] path = os.path.join(FILE_DIR, "../data/scenes/occlusion_quadrant.usda") await omni.usd.get_context().open_stage_async(path) await omni.kit.app.get_app().next_update_async() syn.sensors.enable_sensors( self.viewport, [ syn._syntheticdata.SensorType.BoundingBox2DLoose, syn._syntheticdata.SensorType.BoundingBox2DTight, syn._syntheticdata.SensorType.Occlusion, ], ) await syn.sensors.next_sensor_data_async(self.viewport,True) timeline_iface = omni.timeline.acquire_timeline_interface() timeline_iface.set_time_codes_per_second(1) for time, gt in enumerate(TESTS): timeline_iface.set_current_time(time) await omni.kit.app.get_app().next_update_async() # Investigate these in OM-31155 sensor_out = syn.sensors.get_occlusion_quadrant(self.viewport) result = sensor_out["occlusion_quadrant"][0] assert result == gt, f"Got {result}, expected {gt}" # After running each test async def tearDown(self): pass
11,419
Python
43.784314
141
0.644715
eliabntt/GRADE-RR/isaac_internals/kit/extscore/omni.syntheticdata/omni/syntheticdata/tests/sensors/test_renderproduct_camera.py
# NOTE: # omni.kit.test - std python's unittest module with additional wrapping to add suport for async/await tests # For most things refer to unittest docs: https://docs.python.org/3/library/unittest.html import os import carb from pxr import Gf, UsdGeom, Sdf, UsdLux from omni.kit.viewport.utility import get_active_viewport, create_viewport_window import omni.kit.test from omni.syntheticdata import SyntheticData, SyntheticDataStage # Test the RenderProductCamera nodes class TestRenderProductCamera(omni.kit.test.AsyncTestCase): def __init__(self, methodName: str) -> None: super().__init__(methodName=methodName) async def setUp(self): settings = carb.settings.get_settings() settings.set_string("/rtx/rendermode", "PathTracing") settings.set_int("/rtx/pathtracing/spp", 32) settings.set_int("/persistent/app/viewport/displayOptions", 0) self.numLoops = 7 self.multiViewport = False # Setup the scene await omni.usd.get_context().new_stage_async() stage = omni.usd.get_context().get_stage() # Setup viewports / renderproduct # first default viewport with the default perspective camera viewport_0 = get_active_viewport() resolution_0 = viewport_0.resolution camera_0 = UsdGeom.Camera.Define(stage, "/Camera0").GetPrim() viewport_0.camera_path = camera_0.GetPath() render_product_path_0 = viewport_0.render_product_path self.render_product_path_0 = render_product_path_0 # second viewport with a ftheta camera if self.multiViewport: resolution_1 = (512, 512) viewport_window = create_viewport_window(width=resolution_1[0], height=resolution_1[1]) viewport_1 = viewport_window.viewport_api viewport_1.resolution = resolution_1 camera_1 = UsdGeom.Camera.Define(stage, "/Camera1").GetPrim() viewport_1.camera_path = camera_1.GetPath() render_product_path_1 = viewport_1.render_product_path self.render_product_path_1 = render_product_path_1 # SyntheticData singleton interface sdg_iface = SyntheticData.Get() if not sdg_iface.is_node_template_registered("TestSimRpCam"): sdg_iface.register_node_template( SyntheticData.NodeTemplate( SyntheticDataStage.SIMULATION, "omni.syntheticdata.SdTestRenderProductCamera", attributes={"inputs:stage":"simulation"} ), template_name="TestSimRpCam" ) if not sdg_iface.is_node_template_registered("TestPostRpCam"): sdg_iface.register_node_template( SyntheticData.NodeTemplate( SyntheticDataStage.POST_RENDER, "omni.syntheticdata.SdTestRenderProductCamera", [SyntheticData.NodeConnectionTemplate("PostRenderProductCamera")], attributes={"inputs:stage":"postRender"} ), template_name="TestPostRpCam" ) if not sdg_iface.is_node_template_registered("TestOnDemandRpCam"): sdg_iface.register_node_template( SyntheticData.NodeTemplate( SyntheticDataStage.ON_DEMAND, "omni.syntheticdata.SdTestRenderProductCamera", [SyntheticData.NodeConnectionTemplate("PostProcessRenderProductCamera")], attributes={"inputs:stage":"onDemand"} ), template_name="TestOnDemandRpCam" ) attributes_0 = { "inputs:renderProductCameraPath":camera_0.GetPath().pathString, "inputs:width":resolution_0[0], "inputs:height":resolution_0[1] } sdg_iface.activate_node_template("TestSimRpCam", 0, [render_product_path_0], attributes_0) sdg_iface.activate_node_template("TestPostRpCam", 0, [render_product_path_0], attributes_0) sdg_iface.activate_node_template("TestOnDemandRpCam", 0, [render_product_path_0],attributes_0) if self.multiViewport: attributes_1 = { "inputs:renderProductCameraPath":camera_1.GetPath().pathString, "inputs:width":resolution_1[0], "inputs:height":resolution_1[1] } sdg_iface.activate_node_template("TestSimRpCam", 0, [render_product_path_1], attributes_1) sdg_iface.activate_node_template("TestPostRpCam", 0, [render_product_path_1], attributes_1) sdg_iface.activate_node_template("TestOnDemandRpCam", 0, [render_product_path_1],attributes_1) async def test_renderproduct_camera(self): """ Test render product camera pipeline """ sdg_iface = SyntheticData.Get() test_outname = "outputs:test" test_attributes_names = [test_outname] for _ in range(3): await omni.kit.app.get_app().next_update_async() for _ in range(self.numLoops): await omni.kit.app.get_app().next_update_async() assert sdg_iface.get_node_attributes("TestSimRpCam", test_attributes_names, self.render_product_path_0)[test_outname] assert sdg_iface.get_node_attributes("TestPostRpCam", test_attributes_names, self.render_product_path_0)[test_outname] assert sdg_iface.get_node_attributes("TestOnDemandRpCam", test_attributes_names, self.render_product_path_0)[test_outname] if self.multiViewport: assert sdg_iface.get_node_attributes("TestSimRpCam", test_attributes_names, self.render_product_path_1)[test_outname] assert sdg_iface.get_node_attributes("TestPostRpCam", test_attributes_names, self.render_product_path_1)[test_outname] assert sdg_iface.get_node_attributes("TestOnDemandRpCam", test_attributes_names, self.render_product_path_1)[test_outname] async def tearDown(self): pass
6,091
Python
46.224806
138
0.631752
eliabntt/GRADE-RR/isaac_internals/kit/extscore/omni.syntheticdata/omni/syntheticdata/tests/sensors/test_swh_frame_number.py
# NOTE: # omni.kit.test - std python's unittest module with additional wrapping to add suport for async/await tests # For most things refer to unittest docs: https://docs.python.org/3/library/unittest.html import os import carb from pxr import Gf, UsdGeom, UsdLux, Sdf import unittest import omni.kit.test from omni.kit.viewport.utility import get_active_viewport, create_viewport_window from omni.syntheticdata import SyntheticData, SyntheticDataStage # Test the Fabric frame number synchronization class TestSWHFrameNumber(omni.kit.test.AsyncTestCase): def __init__(self, methodName: str) -> None: super().__init__(methodName=methodName) async def setUp(self): settings = carb.settings.get_settings() settings.set_string("/rtx/rendermode", "PathTracing") settings.set_int("/rtx/pathtracing/spp", 32) settings.set_int("/persistent/app/viewport/displayOptions", 0) # Setup the scene await omni.usd.get_context().new_stage_async() stage = omni.usd.get_context().get_stage() world_prim = UsdGeom.Xform.Define(stage,"/World") UsdGeom.Xformable(world_prim).AddTranslateOp().Set((0, 0, 0)) UsdGeom.Xformable(world_prim).AddRotateXYZOp().Set((0, 0, 0)) capsule0_prim = stage.DefinePrim("/World/Capsule0", "Capsule") UsdGeom.Xformable(capsule0_prim).AddTranslateOp().Set((100, 0, 0)) UsdGeom.Xformable(capsule0_prim).AddScaleOp().Set((30, 30, 30)) UsdGeom.Xformable(capsule0_prim).AddRotateXYZOp().Set((-90, 0, 0)) capsule0_prim.GetAttribute("primvars:displayColor").Set([(0.3, 1, 0)]) capsule1_prim = stage.DefinePrim("/World/Capsule1", "Capsule") UsdGeom.Xformable(capsule1_prim).AddTranslateOp().Set((-100, 0, 0)) UsdGeom.Xformable(capsule1_prim).AddScaleOp().Set((30, 30, 30)) UsdGeom.Xformable(capsule1_prim).AddRotateXYZOp().Set((-90, 0, 0)) capsule1_prim.GetAttribute("primvars:displayColor").Set([(0, 1, 0.3)]) spherelight = UsdLux.SphereLight.Define(stage, "/SphereLight") spherelight.GetIntensityAttr().Set(30000) spherelight.GetRadiusAttr().Set(30) # first default viewport with the default perspective camera viewport_0 = get_active_viewport() render_product_path_0 = viewport_0.render_product_path # second viewport with a ftheta camera viewport_1_window = create_viewport_window(width=512, height=512) viewport_1 = viewport_1_window.viewport_api camera_1 = stage.DefinePrim("/Camera1", "Camera") camera_1.CreateAttribute("cameraProjectionType", Sdf.ValueTypeNames.Token).Set("fisheyePolynomial") UsdGeom.Xformable(camera_1).AddTranslateOp().Set((0, 250, 0)) UsdGeom.Xformable(camera_1).AddRotateXYZOp().Set((-90, 0, 0)) viewport_1.camera_path = camera_1.GetPath() render_product_path_1 = viewport_1.render_product_path # SyntheticData singleton interface sdg_iface = SyntheticData.Get() # Register node templates in the SyntheticData register # (a node template is a template for creating a node specified by its type and its connections) # # to illustrate we are using the generic omni.syntheticdata.SdTestStageSynchronization node type which supports every stage of the SyntheticData pipeline. When executed it logs the fabric frame number. # # register a node template in the simulation stage # NB : this node template has no connections if not sdg_iface.is_node_template_registered("TestSyncSim"): sdg_iface.register_node_template( SyntheticData.NodeTemplate( SyntheticDataStage.SIMULATION, # node tempalte stage "omni.syntheticdata.SdTestStageSynchronization", # node template type attributes={"inputs:tag":"0"}), # node template default attribute values (when differs from the default value specified in the .ogn) template_name="TestSyncSim" # node template name ) # register a node template in the postrender stage # NB : this template may be activated for several different renderproducts if not sdg_iface.is_node_template_registered("TestSyncPost"): sdg_iface.register_node_template( SyntheticData.NodeTemplate( SyntheticDataStage.POST_RENDER, # node template stage "omni.syntheticdata.SdTestStageSynchronization", # node template type # node template connections [ # connected to a TestSyncSim node (a TestSyncSim node will be activated when activating this template) SyntheticData.NodeConnectionTemplate("TestSyncSim", (), None), # connected to a LdrColorSD rendervar (the renderVar will be activated when activating this template) SyntheticData.NodeConnectionTemplate("LdrColorSD"), # connected to a BoundingBox3DSD rendervar (the renderVar will be activated when activating this template) SyntheticData.NodeConnectionTemplate("BoundingBox3DSD") ]), template_name="TestSyncPost" # node template name ) # register a node template in the postprocess stage # NB : this template may be activated for several different renderproducts if not sdg_iface.is_node_template_registered("TestSyncOnDemand"): sdg_iface.register_node_template( SyntheticData.NodeTemplate( SyntheticDataStage.ON_DEMAND, # node template stage "omni.syntheticdata.SdTestStageSynchronization", # node template type # node template connections [ # connected to a TestSyncSim node (a TestSyncSim node will be activated when activating this template) SyntheticData.NodeConnectionTemplate("TestSyncSim", (), None), # connected to a PostProcessDispatch node : the PostProcessDispatch node trigger the execution of its downstream connections for every rendered frame # (a PostProcessDispatch node will be activated when activating this template) SyntheticData.NodeConnectionTemplate("PostProcessDispatch") ] ), template_name="TestSyncOnDemand" # node template name ) # register a node template in the postprocess stage # NB : this template may be activated for any combination of renderproduct pairs if not sdg_iface.is_node_template_registered("TestSyncCross"): # register an accumulator which trigger once when all its upstream connections have triggered sdg_iface.register_node_template( SyntheticData.NodeTemplate( SyntheticDataStage.ON_DEMAND, # node template stage "omni.graph.action.SyncGate", # node template type # node template connections [ # connected to the PostProcessDispatcher for the synchronization value SyntheticData.NodeConnectionTemplate( "PostProcessDispatcher", (), {"outputs:swhFrameNumber":"inputs:syncValue"} ), # connected to a TestSyncOnDemand node for the first renderproduct (a TestSyncSim node will be activated when activating this template) SyntheticData.NodeConnectionTemplate( "TestSyncOnDemand", (0,), {"outputs:exec":"inputs:execIn"} ), # connected to a TestSyncOnDemand node for the second renderproduct (a TestSyncSim node will be activated when activating this template) SyntheticData.NodeConnectionTemplate( "TestSyncOnDemand", (1,), {"outputs:exec":"inputs:execIn"} ), ] ), template_name="TestSyncAccum" # node template name ) sdg_iface.register_node_template( SyntheticData.NodeTemplate( SyntheticDataStage.ON_DEMAND, # node template stage "omni.syntheticdata.SdTestStageSynchronization", # node template type # node template connections [ # connected to a TestSyncAccum node (a TestSyncAccum node will be activated when activating this template) SyntheticData.NodeConnectionTemplate( "TestSyncAccum", (0,1), { "outputs:execOut":"inputs:exec", "outputs:syncValue":"inputs:swhFrameNumber" } ), ] ), template_name="TestSyncCross" # node template name ) # Activate the node templates for the renderproducts # this will create the node (and all their missing dependencies) within the associated graphs # # activate the TestSyncPost for the renderpoduct renderpoduct_0 # this will also activate the TestSyncSim node and the LdrColorSD and BoundingBox3DSD renderVars for the renderpoduct renderpoduct_0 # this will set the tag node attribute to "1" sdg_iface.activate_node_template("TestSyncPost", 0, [render_product_path_0],{"inputs:tag":"1"}) # activate the TestSyncPost for the renderpoduct renderpoduct_1 # this will also activate the LdrColorSD and BoundingBox3DSD renderVars for the renderpoduct renderpoduct_1 # NB TestSyncSim has already been activated # this will set the tag node attribute to "2" sdg_iface.activate_node_template("TestSyncPost", 0, [render_product_path_1],{"inputs:tag":"2"}) # activate the TestSyncCross for the renderpoducts [renderproduct_0, renderproduct_1] # this will also activate : # - TestSyncAccum for the renderpoducts [renderproduct_0, renderproduct_1] # - PostProcessDispatch for the renderpoduct renderproduct_0 # - TestSyncOnDemand for the renderproduct renderproduct_0 # - TestSyncOnDemand for the renderproduct renderproduct_1 # - PostProcessDispatch for the renderpoduct renderproduct_1 # this will set the tag node attribute to "5" sdg_iface.activate_node_template("TestSyncCross", 0, [render_product_path_0,render_product_path_1],{"inputs:tag":"5"}) # Set some specific attributes to nodes that have been automatically activated # set the tag to the TestSyncOnDemand for renderproduct renderproduct_0 sdg_iface.set_node_attributes("TestSyncOnDemand",{"inputs:tag":"3"},render_product_path_0) # set the tag to the TestSyncOnDemand for renderproduct renderproduct_1 sdg_iface.set_node_attributes("TestSyncOnDemand",{"inputs:tag":"4"},render_product_path_1) # setup members self.render_product_path_0 = render_product_path_0 self.render_product_path_1 = render_product_path_1 self.numLoops = 33 async def run_loop(self): sdg_iface = SyntheticData.Get() render_product_path_0 = self.render_product_path_0 render_product_path_1 = self.render_product_path_1 test_attributes_names = ["outputs:swhFrameNumber","outputs:fabricSWHFrameNumber"] # ensuring that the setup is taken into account for _ in range(5): await omni.kit.app.get_app().next_update_async() for _ in range(self.numLoops): await omni.kit.app.get_app().next_update_async() # test the post-render pipeline synchronization sync_post_attributes = sdg_iface.get_node_attributes( "TestSyncPost",test_attributes_names,render_product_path_0) assert sync_post_attributes and all(attr in sync_post_attributes for attr in test_attributes_names) assert sync_post_attributes["outputs:swhFrameNumber"] == sync_post_attributes["outputs:fabricSWHFrameNumber"] # test the on-demand pipeline synchronization sync_ondemand_attributes = sdg_iface.get_node_attributes( "TestSyncOnDemand",test_attributes_names,render_product_path_1) assert sync_ondemand_attributes and all(attr in sync_ondemand_attributes for attr in test_attributes_names) assert sync_ondemand_attributes["outputs:swhFrameNumber"] == sync_ondemand_attributes["outputs:fabricSWHFrameNumber"] # test the on-demand cross renderproduct synchronization sync_cross_ondemand_attributes = sdg_iface.get_node_attributes( "TestSyncCross",test_attributes_names,render_product_path_0) assert sync_cross_ondemand_attributes and all(attr in sync_cross_ondemand_attributes for attr in test_attributes_names) assert sync_cross_ondemand_attributes["outputs:swhFrameNumber"] == sync_cross_ondemand_attributes["outputs:fabricSWHFrameNumber"] async def test_sync_idle(self): """ Test swh frame synhronization with : - asyncRendering Off - waitIdle On """ settings = carb.settings.get_settings() settings.set_bool("/app/asyncRendering",False) settings.set_int("/app/settings/flatCacheStageFrameHistoryCount",3) settings.set_bool("/app/renderer/waitIdle",True) settings.set_bool("/app/hydraEngine/waitIdle",True) await self.run_loop() @unittest.skip("DRIVE-3247 : SyntheticData does not support async rendering.") async def test_sync(self): """ Test swh frame synhronization with : - asyncRendering Off - waitIdle Off """ settings = carb.settings.get_settings() settings.set_bool("/app/asyncRendering",False) settings.set_int("/app/settings/flatCacheStageFrameHistoryCount",3) settings.set_bool("/app/renderer/waitIdle",False) settings.set_bool("/app/hydraEngine/waitIdle",False) await self.run_loop() @unittest.skip("DRIVE-3247 : SyntheticData does not support async rendering.") async def test_async(self): """ Test swh frame synhronization with : - asyncRendering On - waitIdle Off """ settings = carb.settings.get_settings() settings.set_bool("/app/asyncRendering",True) settings.set_int("/app/settings/flatCacheStageFrameHistoryCount",3) settings.set_bool("/app/renderer/waitIdle",False) settings.set_bool("/app/hydraEngine/waitIdle",False) await self.run_loop() async def tearDown(self): # reset to the default params settings = carb.settings.get_settings() settings.set_bool("/app/asyncRendering",False) settings.set_bool("/app/renderer/waitIdle",True) settings.set_bool("/app/hydraEngine/waitIdle",True)
15,648
Python
53.908772
209
0.62647
eliabntt/GRADE-RR/isaac_internals/kit/extscore/omni.syntheticdata/omni/syntheticdata/tests/sensors/test_distance_to_image_plane.py
# NOTE: # omni.kit.test - std python's unittest module with additional wrapping to add suport for async/await tests # For most things refer to unittest docs: https://docs.python.org/3/library/unittest.html import os import math import asyncio from time import time import carb import numpy as np import omni.kit.test from omni.kit.viewport.utility import get_active_viewport from pxr import Gf, UsdGeom # Import extension python module we are testing with absolute import path, as if we are external user (other extension) import omni.syntheticdata as syn from ..utils import add_semantics FILE_DIR = os.path.dirname(os.path.realpath(__file__)) TIMEOUT = 200 # Having a test class derived from omni.kit.test.AsyncTestCase declared on the root of module will make it auto-discoverable by omni.kit.test class TestDistanceToImagePlane(omni.kit.test.AsyncTestCase): # Before running each test async def setUp(self): np.random.seed(1234) # Setup viewport self.viewport = get_active_viewport() # Initialize Sensor await omni.usd.get_context().new_stage_async() stage = omni.usd.get_context().get_stage() await omni.kit.app.get_app().next_update_async() await syn.sensors.create_or_retrieve_sensor_async( self.viewport, syn._syntheticdata.SensorType.DistanceToImagePlane ) async def test_parsed_empty(self): """ Test distance-to-image-plane sensor on empty stage. """ # Render one frame await syn.sensors.next_sensor_data_async(self.viewport,True) data = syn.sensors.get_distance_to_image_plane(self.viewport) assert np.all(data > 1000) async def test_parsed_dtype(self): stage = omni.usd.get_context().get_stage() cube = stage.DefinePrim("/Cube", "Cube") add_semantics(cube, "cube") # Render one frame await syn.sensors.next_sensor_data_async(self.viewport,True) data = syn.sensors.get_distance_to_image_plane(self.viewport) assert data.dtype == np.float32 async def test_distances(self): stage = omni.usd.get_context().get_stage() camera = stage.DefinePrim("/Camera", "Camera") UsdGeom.Xformable(camera).AddTranslateOp().Set((0, 0, 0)) self.viewport.camera_path = camera.GetPath() await omni.kit.app.get_app().next_update_async() for n in range(10, 100, 10): cube = stage.DefinePrim("/Cube", "Cube") add_semantics(cube, "cube") # n = 5 UsdGeom.XformCommonAPI(cube).SetTranslate((0, 0, -n)) # Render one frame await syn.sensors.next_sensor_data_async(self.viewport,True) data = syn.sensors.get_distance_to_image_plane(self.viewport) assert data.max() > 1000 # The front of the cube is 1 ahead of its center position assert np.isclose(data.min(), (n - 1) / 100, atol=1e-5) async def test_distances_pathtracing(self): """ Basic funtionality test of the sensor, but in path tracing mode. """ # Set the rendering mode to be pathtracing settings = carb.settings.get_settings() settings.set_string("/rtx/rendermode", "PathTracing") settings.set_int("/rtx/pathtracing/spp", 32) settings.set_int("/persistent/app/viewport/displayOptions", 0) stage = omni.usd.get_context().get_stage() camera = stage.DefinePrim("/Camera", "Camera") UsdGeom.Xformable(camera).AddTranslateOp().Set((0, 0, 0)) self.viewport.camera_path = camera.GetPath() await omni.kit.app.get_app().next_update_async() for n in range(10, 100, 10): cube = stage.DefinePrim("/Cube", "Cube") add_semantics(cube, "cube") # n = 5 UsdGeom.XformCommonAPI(cube).SetTranslate((0, 0, -n)) # Render one frame await syn.sensors.next_sensor_data_async(self.viewport,True) data = syn.sensors.get_distance_to_image_plane(self.viewport) assert data.max() > 1000 # The front of the cube is 1 ahead of its center position assert np.isclose(data.min(), (n - 1) / 100, atol=1e-5) async def test_distances_ray_traced_lighting(self): """ Basic funtionality test of the sensor, but in ray traced lighting. """ # Set the rendering mode to be pathtracing settings_interface = carb.settings.get_settings() settings_interface.set_string("/rtx/rendermode", "RayTracedLighting") stage = omni.usd.get_context().get_stage() camera = stage.DefinePrim("/Camera", "Camera") UsdGeom.Xformable(camera).AddTranslateOp().Set((0, 0, 0)) self.viewport.camera_path = camera.GetPath() await omni.kit.app.get_app().next_update_async() for n in range(10, 100, 10): cube = stage.DefinePrim("/Cube", "Cube") add_semantics(cube, "cube") # n = 5 UsdGeom.XformCommonAPI(cube).SetTranslate((0, 0, -n)) # Render one frame await syn.sensors.next_sensor_data_async(self.viewport,True) data = syn.sensors.get_distance_to_image_plane(self.viewport) assert data.max() > 1000 # The front of the cube is 1 ahead of its center position assert np.isclose(data.min(), (n - 1) / 100, atol=1e-5) # After running each test async def tearDown(self): pass
5,548
Python
38.635714
141
0.628875
eliabntt/GRADE-RR/isaac_internals/kit/extscore/omni.syntheticdata/omni/syntheticdata/tests/sensors/test_semantic_filter.py
# NOTE: # omni.kit.test - std python's unittest module with additional wrapping to add suport for async/await tests # For most things refer to unittest docs: https://docs.python.org/3/library/unittest.html import os import unittest import omni.kit.test from omni.kit.viewport.utility import get_active_viewport from omni.syntheticdata import SyntheticData from ..utils import add_semantics import numpy as np # Test the semantic filter class TestSemanticFilter(omni.kit.test.AsyncTestCase): def __init__(self, methodName: str) -> None: super().__init__(methodName=methodName) async def setUp(self): await omni.usd.get_context().new_stage_async() stage = omni.usd.get_context().get_stage() # scene # /World [belong_to:world] # /Cube [class:cube] # /Sphere [class:sphere] # /Sphere [class:sphere] # /Capsule [class:capsule] # /Cube [class:cube] # /Capsule [class:capsule] # /Nothing [belong_to:nothing] world_prim = stage.DefinePrim("/World", "Plane") add_semantics(world_prim, "world", "belong_to") world_cube_prim = stage.DefinePrim("/World/Cube", "Cube") add_semantics(world_cube_prim, "cube", "class") world_cube_sphere_prim = stage.DefinePrim("/World/Cube/Sphere", "Sphere") add_semantics(world_cube_sphere_prim, "sphere", "class") world_sphere_prim = stage.DefinePrim("/World/Sphere", "Sphere") add_semantics(world_sphere_prim, "sphere", "class") world_capsule_prim = stage.DefinePrim("/World/Capsule", "Capsule") add_semantics(world_capsule_prim, "capsule", "class") cube_prim = stage.DefinePrim("/Cube", "Cube") add_semantics(cube_prim, "cube", "class") capsule_prim = stage.DefinePrim("/Capsule", "Capsule") add_semantics(capsule_prim, "capsule", "class") nothing_prim = stage.DefinePrim("/Nothing", "Plane") add_semantics(nothing_prim, "nothing", "belong_to") self.render_product_path = get_active_viewport().render_product_path SyntheticData.Get().activate_node_template("SemanticLabelTokenSDExportRawArray", 0, [self.render_product_path]) await omni.kit.app.get_app().next_update_async() def fetch_semantic_label_tokens(self): output_names = ["outputs:data","outputs:bufferSize"] outputs = SyntheticData.Get().get_node_attributes("SemanticLabelTokenSDExportRawArray", output_names, self.render_product_path) assert outputs return outputs["outputs:data"].view(np.uint64) async def check_num_valid_labels(self, expected_num_valid_labels): wait_iterations = 6 for _ in range(wait_iterations): await omni.kit.app.get_app().next_update_async() num_valid_labels = np.count_nonzero(self.fetch_semantic_label_tokens()) assert num_valid_labels == expected_num_valid_labels async def test_semantic_filter_all(self): SyntheticData.Get().set_default_semantic_filter("*:*", True) await self.check_num_valid_labels(8) async def test_semantic_filter_no_world(self): SyntheticData.Get().set_default_semantic_filter("!belong_to:world", True) # /Cube /Capsule /Nothing await self.check_num_valid_labels(3) async def test_semantic_filter_all_class_test(self): SyntheticData.Get().set_default_semantic_filter("class:*", True) await self.check_num_valid_labels(6) async def test_semantic_filter_all_class_no_cube_test(self): SyntheticData.Get().set_default_semantic_filter("class:!cube&*", True) await self.check_num_valid_labels(3) async def test_semantic_filter_only_sphere_or_cube_test(self): SyntheticData.Get().set_default_semantic_filter("class:cube|sphere", True) await self.check_num_valid_labels(4) async def test_semantic_filter_sphere_and_cube_test(self): SyntheticData.Get().set_default_semantic_filter("class:cube&sphere", True) # /World/Cube/Sphere await self.check_num_valid_labels(1) async def test_semantic_filter_world_and_sphere_test(self): SyntheticData.Get().set_default_semantic_filter("class:sphere,belong_to:world", True) await self.check_num_valid_labels(2) async def test_semantic_filter_no_belong_test(self): SyntheticData.Get().set_default_semantic_filter("belong_to:!*", True) # /Cube /Capsule await self.check_num_valid_labels(2) async def test_semantic_filter_world_or_capsule_test(self): SyntheticData.Get().set_default_semantic_filter("belong_to:world;class:capsule", True) await self.check_num_valid_labels(6) async def test_semantic_filter_belong_to_nohierarchy(self): SyntheticData.Get().set_default_semantic_filter("belong_to:*", False) await self.check_num_valid_labels(2) async def tearDown(self): SyntheticData.Get().set_default_semantic_filter("*:*")
5,054
Python
39.766129
135
0.661061
eliabntt/GRADE-RR/isaac_internals/kit/extscore/omni.syntheticdata/omni/syntheticdata/tests/sensors/test_cross_correspondence.py
# NOTE: # omni.kit.test - std python's unittest module with additional wrapping to add suport for async/await tests # For most things refer to unittest docs: https://docs.python.org/3/library/unittest.html import os import math import asyncio from PIL import Image from time import time from pathlib import Path import carb import numpy as np from numpy.lib.arraysetops import unique import omni.kit.test from pxr import Gf, UsdGeom from omni.kit.viewport.utility import get_active_viewport, next_viewport_frame_async, create_viewport_window # Import extension python module we are testing with absolute import path, as if we are external user (other extension) import omni.syntheticdata as syn from ..utils import add_semantics FILE_DIR = os.path.dirname(os.path.realpath(__file__)) TIMEOUT = 200 cameras = ["/World/Cameras/CameraFisheyeLeft", "/World/Cameras/CameraPinhole", "/World/Cameras/CameraFisheyeRight"] # Having a test class derived from omni.kit.test.AsyncTestCase declared on the root of module will make it auto-discoverable by omni.kit.test # This test has to run last and thus it's prefixed as such to force that: # - This is because it has to create additional viewports which makes the test # get stuck if it's not the last one in the OV process session class ZZHasToRunLast_TestCrossCorrespondence(omni.kit.test.AsyncTestCase): def __init__(self, methodName: str) -> None: super().__init__(methodName=methodName) self.golden_image_path = Path(os.path.dirname(os.path.abspath(__file__))) / ".." / "data" / "golden" self.output_image_path = Path(os.path.dirname(os.path.abspath(__file__))) / ".." / "data" / "output" self.StdDevTolerance = 0.1 self.sensorViewport = None # Before running each test async def setUp(self): global cameras np.random.seed(1234) # Load the scene scenePath = os.path.join(FILE_DIR, "../data/scenes/cross_correspondence.usda") await omni.usd.get_context().open_stage_async(scenePath) await omni.kit.app.get_app().next_update_async() # Get the main-viewport as the sensor-viewport self.sensorViewport = get_active_viewport() await next_viewport_frame_async(self.sensorViewport) # Setup viewports resolution = self.sensorViewport.resolution viewport_windows = [None] * 2 x_pos, y_pos = 12, 75 for i in range(len(viewport_windows)): viewport_windows[i] = create_viewport_window(width=resolution[0], height=resolution[1], position_x=x_pos, position_y=y_pos) viewport_windows[i].width = 500 viewport_windows[i].height = 500 x_pos += 500 # Setup cameras self.sensorViewport.camera_path = cameras[0] for i in range(len(viewport_windows)): viewport_windows[i].viewport_api.camera_path = cameras[i + 1] # Use default viewport for sensor target as otherwise sensor enablement doesn't work # also the test will get stuck # Initialize Sensor await syn.sensors.create_or_retrieve_sensor_async( self.sensorViewport, syn._syntheticdata.SensorType.CrossCorrespondence ) async def test_golden_image(self): # Render one frame await syn.sensors.next_sensor_data_async(self.sensorViewport,True) data = syn.sensors.get_cross_correspondence(self.sensorViewport) golden_image = np.load(self.golden_image_path / "cross_correspondence.npz")["array"] # normalize xy (uv offset) to zw channels' value range # x100 seems like a good number to bring uv offset to ~1 data[:, [0, 1]] *= 100 golden_image[:, [0, 1]] *= 100 std_dev = np.sqrt(np.square(data - golden_image).astype(float).mean()) if std_dev >= self.StdDevTolerance: if not os.path.isdir(self.output_image_path): os.mkdir(self.output_image_path) np.savez_compressed(self.output_image_path / "cross_correspondence.npz", array=data) golden_image = ((golden_image + 1.0) / 2) * 255 data = ((data + 1.0) / 2) * 255 Image.fromarray(golden_image.astype(np.uint8), "RGBA").save( self.output_image_path / "cross_correspondence_golden.png" ) Image.fromarray(data.astype(np.uint8), "RGBA").save(self.output_image_path / "cross_correspondence.png") self.assertTrue(std_dev < self.StdDevTolerance) # After running each test async def tearDown(self): pass
4,577
Python
41.785046
141
0.668779
eliabntt/GRADE-RR/isaac_internals/kit/extscore/omni.syntheticdata/omni/syntheticdata/tests/sensors/test_stage_manipulation.py
# NOTE: # omni.kit.test - std python's unittest module with additional wrapping to add suport for async/await tests # For most things refer to unittest docs: https://docs.python.org/3/library/unittest.html import os import carb import random from pxr import Gf, UsdGeom, UsdLux, Sdf import unittest import omni.kit.test from omni.syntheticdata import SyntheticData, SyntheticDataStage from omni.kit.viewport.utility import get_active_viewport FILE_DIR = os.path.dirname(os.path.realpath(__file__)) # Test the ogn node repeatability under stage manipulation class TestStageManipulation(omni.kit.test.AsyncTestCase): def __init__(self, methodName: str) -> None: super().__init__(methodName=methodName) async def setUp(self): path = os.path.join(FILE_DIR, "../data/scenes/scene_instance_test.usda") await omni.usd.get_context().open_stage_async(path) #await omni.usd.get_context().new_stage_async() viewport = get_active_viewport() self.render_product_path = viewport.render_product_path # SyntheticData singleton interface sdg_iface = SyntheticData.Get() if not sdg_iface.is_node_template_registered("TestStageManipulationScenarii"): sdg_iface.register_node_template( SyntheticData.NodeTemplate( SyntheticDataStage.SIMULATION, "omni.syntheticdata.SdTestStageManipulationScenarii", attributes={"inputs:worldPrimPath":"/World"} ), template_name="TestStageManipulationScenarii" # node template name ) render_vars = [ #"SemanticMapSD", #"SemanticPrimTokenSD", #"InstanceMapSD", #"InstancePrimTokenSD", #"SemanticLabelTokenSD", #"SemanticLocalTransformSD", #"SemanticWorldTransformSD", "SemanticBoundingBox2DExtentTightSD", #"SemanticBoundingBox2DInfosTightSD", "SemanticBoundingBox2DExtentLooseSD", #"SemanticBoundingBox2DInfosLooseSD", "SemanticBoundingBox3DExtentSD", "SemanticBoundingBox3DInfosSD" ] for rv in render_vars: template_name = "TestRawArray" + rv if not sdg_iface.is_node_template_registered(template_name): sdg_iface.register_node_template( SyntheticData.NodeTemplate( SyntheticDataStage.ON_DEMAND, "omni.syntheticdata.SdTestPrintRawArray", [SyntheticData.NodeConnectionTemplate(rv + "ExportRawArray")] ), template_name=template_name ) self.num_loops = 37 async def render_var_test(self, render_var, ref_values, num_references_values, element_type, rand_seed=0, mode="printReferences"): sdg_iface = SyntheticData.Get() sdg_iface.activate_node_template("TestStageManipulationScenarii") sdg_iface.activate_node_template("TestRawArray" + render_var, 0, [self.render_product_path], {"inputs:elementType": element_type, "inputs:referenceValues": ref_values, "inputs:randomSeed": rand_seed, "inputs:mode": mode, "inputs:referenceNumUniqueRandomValues": num_references_values}) for _ in range(self.num_loops): await omni.kit.app.get_app().next_update_async() sdg_iface.deactivate_node_template("TestRawArray" + render_var, 0, [self.render_product_path]) sdg_iface.deactivate_node_template("TestStageManipulationScenarii") @unittest.skip("Unimplemented") async def test_semantic_map(self): await self.render_var_test("SemanticMapSD", [], "uint16", 2) async def test_semantic_bbox3d_extent(self): await self.render_var_test("SemanticBoundingBox3DExtentSD", [ 87.556404, 223.83577, -129.42677, -155.79227, -49.999996, 421.41083, 88.13742, -50.000004, 49.999905, 39.782856, -50.000004, -155.52794, -16.202198, -50.0, 136.29709, -104.94976, -155.52792, 87.556404, -50.000008, 223.83577, 49.99991, -87.8103, -50.0, -50.00001, 276.29846, 50.000004, 421.41083, -50.0, 60.42457, 223.83574, -129.42676, 312.2204, 277.44424, -50.000004, -37.84166, 87.556404, 188.92877, 136.2971, 50.000004 ], 13, "float32", 3, mode="testReferences") # async def test_semantic_bbox3d_infos(self): # await self.render_var_test("SemanticBoundingBox3DInfosSD", # [ # -50.000008, 57.119793, 49.9999, -50.000004, -50.000015, -50.000004, 62.03122, # -50.000008, -50.000004, -50.000004, -50.0, 50.0, -50.0, 57.119793, # 9.5100141e-01, -4.7552836e-01, 6.1506079e+02, 1.0000000e+00, -1.0000000e+00, 1.3421423e+03, 4.9999901e+01 # ], 11, "int32", 4, mode="printReferences") async def test_semantic_bbox2d_extent_loose(self): await self.render_var_test("SemanticBoundingBox2DExtentLooseSD", [ 733, 479, 532, 507, 460, 611, 763, 309, 17, 827, 789, 698, 554, 947, 789, 581, 534, 156, 582, 323, 825, 298, 562, 959, 595, 299, 117, 445, 572, 31, 622, 609, 228 ], 11, "int32", 5, mode="testReferences") async def test_semantic_bbox2d_extent_tight(self): await self.render_var_test("SemanticBoundingBox2DExtentTightSD", [ 0.0000000e+00, 5.0700000e+02, 1.1600000e+02, 7.4600000e+02, 5.9500000e+02, 2.1474836e+09, 2.1474836e+09, 2.5300000e+02, 3.6100000e+02, 1.7000000e+01, 0.0000000e+00, 2.1474836e+09, 2.1474836e+09, 2.1474836e+09, 2.1474836e+09, 2.1474836e+09, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 2.1474836e+09, 0.0000000e+00, 2.1474836e+09, 0.0000000e+00, 3.1000000e+01, 5.3900000e+02, 2.3600000e+02, 2.1474836e+09, 5.7200000e+02, 8.9200000e+02, 9.0500000e+02, 5.6200000e+02, 5.1300000e+02, 0.0000000e+00 ], 11, "int32", 9, mode="testReferences") async def tearDown(self): pass
6,181
Python
48.456
177
0.621582
eliabntt/GRADE-RR/isaac_internals/kit/extscore/omni.syntheticdata/omni/syntheticdata/tests/sensors/test_bbox3d.py
# NOTE: # omni.kit.test - std python's unittest module with additional wrapping to add suport for async/await tests # For most things refer to unittest docs: https://docs.python.org/3/library/unittest.html import os import unittest import uuid import math import shutil import asyncio from time import time import carb.tokens import carb.settings import numpy as np import omni.kit.test from omni.kit.viewport.utility import get_active_viewport from pxr import Gf, UsdGeom, Usd, Sdf # Import extension python module we are testing with absolute import path, as if we are external user (other extension) import omni.syntheticdata as syn from .. import utils FILE_DIR = os.path.dirname(os.path.realpath(__file__)) TIMEOUT = 200 TMP = carb.tokens.get_tokens_interface().resolve("${temp}") # Having a test class derived from omni.kit.test.AsyncTestCase declared on the root of module will make it auto-discoverable by omni.kit.test class TestBBox3D(omni.kit.test.AsyncTestCase): # Before running each test async def setUp(self): np.random.seed(1234) # Setup viewport self.viewport = get_active_viewport() # Initialize Sensor await omni.usd.get_context().new_stage_async() stage = omni.usd.get_context().get_stage() await omni.kit.app.get_app().next_update_async() await syn.sensors.create_or_retrieve_sensor_async(self.viewport, syn._syntheticdata.SensorType.BoundingBox3D) async def test_parsed_empty(self): """ Test 3D bounding box on empty stage. """ bbox3d_data = syn.sensors.get_bounding_box_3d(self.viewport, parsed=True, return_corners=True) assert not bool(bbox3d_data) async def test_fields_exist(self): """ Test the correctness of the output dtype. """ stage = omni.usd.get_context().get_stage() cube = stage.DefinePrim("/Cube", "Cube") utils.add_semantics(cube, "cube") # Render one frame await syn.sensors.next_sensor_data_async(self.viewport, True) bbox3d_data_raw = syn.sensors.get_bounding_box_3d(self.viewport, parsed=False, return_corners=False) bbox3d_data_parsed = syn.sensors.get_bounding_box_3d(self.viewport, parsed=True, return_corners=True) raw_dtype = np.dtype( [ ("instanceId", "<u4"), ("semanticId", "<u4"), ("x_min", "<f4"), ("y_min", "<f4"), ("z_min", "<f4"), ("x_max", "<f4"), ("y_max", "<f4"), ("z_max", "<f4"), ("transform", "<f4", (4, 4)), ] ) parsed_dtype = np.dtype( [ ("uniqueId", "<i4"), ("name", "O"), ("semanticLabel", "O"), ("metadata", "O"), ("instanceIds", "O"), ("semanticId", "<u4"), ("x_min", "<f4"), ("y_min", "<f4"), ("z_min", "<f4"), ("x_max", "<f4"), ("y_max", "<f4"), ("z_max", "<f4"), ("transform", "<f4", (4, 4)), ("corners", "<f4", (8, 3)), ] ) assert bbox3d_data_raw.dtype == raw_dtype assert bbox3d_data_parsed.dtype == parsed_dtype async def test_parsed_nested_Y_pathtracing(self): """ Test 3D bounding box with nested semantics and transforms, Y-Up, in pathtracing mode. """ settings = carb.settings.get_settings() settings.set_string("/rtx/rendermode", "PathTracing") settings.set_int("/rtx/pathtracing/spp", 32) settings.set_int("/persistent/app/viewport/displayOptions", 0) # Create 2 cubes (size=1) under a parent prim stage = omni.usd.get_context().get_stage() UsdGeom.SetStageUpAxis(stage, "Y") parent = stage.DefinePrim("/World/Parent", "Xform") child1 = stage.DefinePrim("/World/Parent/Child1", "Cube") child2 = stage.DefinePrim("/World/Parent/Child2", "Cube") child1.GetAttribute("size").Set(1.0) child2.GetAttribute("size").Set(1.0) utils.add_semantics(parent, "parent") utils.add_semantics(child1, "child1") utils.add_semantics(child2, "child2") UsdGeom.Xformable(parent).ClearXformOpOrder() UsdGeom.Xformable(child1).ClearXformOpOrder() UsdGeom.Xformable(child2).ClearXformOpOrder() UsdGeom.Xformable(parent).AddRotateYOp().Set(45) UsdGeom.Xformable(child1).AddTranslateOp().Set((-0.5, 0.5, 0.0)) UsdGeom.Xformable(child1).AddRotateYOp().Set(45) UsdGeom.Xformable(child2).AddTranslateOp().Set((0.5, -0.5, 0.0)) await omni.kit.app.get_app().next_update_async() # Render one frame await syn.sensors.next_sensor_data_async(self.viewport,True) bbox3d_data = syn.sensors.get_bounding_box_3d(self.viewport, parsed=True, return_corners=True) parent_bbox = [row for row in bbox3d_data if row["name"] == parent.GetPath()][0] child1_bbox = [row for row in bbox3d_data if row["name"] == child1.GetPath()][0] child2_bbox = [row for row in bbox3d_data if row["name"] == child2.GetPath()][0] # Only takes into account child transforms a = math.cos(math.pi / 4) parent_bounds = [[-a - 0.5, -1.0, -a], [1.0, 1.0, a]] child1_bounds = [[-0.5, -0.5, -0.5], [0.5, 0.5, 0.5]] child2_bounds = [[-0.5, -0.5, -0.5], [0.5, 0.5, 0.5]] # Doesn't take into account transforms for bbox, bounds in zip([parent_bbox, child1_bbox, child2_bbox], [parent_bounds, child1_bounds, child2_bounds]): self.assertAlmostEqual(bbox["x_min"], bounds[0][0], places=5) self.assertAlmostEqual(bbox["y_min"], bounds[0][1], places=5) self.assertAlmostEqual(bbox["z_min"], bounds[0][2], places=5) self.assertAlmostEqual(bbox["x_max"], bounds[1][0], places=5) self.assertAlmostEqual(bbox["y_max"], bounds[1][1], places=5) self.assertAlmostEqual(bbox["z_max"], bounds[1][2], places=5) prim = stage.GetPrimAtPath(bbox["name"]) tf = np.array(UsdGeom.Imageable(prim).ComputeLocalToWorldTransform(0.0)) gf_range = Gf.Range3f(*bounds) gf_corners = np.array([gf_range.GetCorner(i) for i in range(8)]) gf_corners = np.pad(gf_corners, ((0, 0), (0, 1)), constant_values=1.0) gf_corners = np.dot(gf_corners, tf)[:, :3] assert np.allclose(bbox["corners"], gf_corners, atol=1e-5) async def test_parsed_nested_Y_ray_traced_lighting(self): """ Test 3D bounding box with nested semantics and transforms, Y-Up, in ray traced lighting mode. """ # Set the rendering mode to be ray traced lighting. settings_interface = carb.settings.get_settings() settings_interface.set_string("/rtx/rendermode", "RayTracedLighting") # Create 2 cubes (size=1) under a parent prim stage = omni.usd.get_context().get_stage() UsdGeom.SetStageUpAxis(stage, "Y") parent = stage.DefinePrim("/World/Parent", "Xform") child1 = stage.DefinePrim("/World/Parent/Child1", "Cube") child2 = stage.DefinePrim("/World/Parent/Child2", "Cube") child1.GetAttribute("size").Set(1.0) child2.GetAttribute("size").Set(1.0) utils.add_semantics(parent, "parent") utils.add_semantics(child1, "child1") utils.add_semantics(child2, "child2") UsdGeom.Xformable(parent).ClearXformOpOrder() UsdGeom.Xformable(child1).ClearXformOpOrder() UsdGeom.Xformable(child2).ClearXformOpOrder() UsdGeom.Xformable(parent).AddRotateYOp().Set(45) UsdGeom.Xformable(child1).AddTranslateOp().Set((-0.5, 0.5, 0.0)) UsdGeom.Xformable(child1).AddRotateYOp().Set(45) UsdGeom.Xformable(child2).AddTranslateOp().Set((0.5, -0.5, 0.0)) # Render one frame await syn.sensors.next_sensor_data_async(self.viewport,True) bbox3d_data = syn.sensors.get_bounding_box_3d(self.viewport, parsed=True, return_corners=True) parent_bbox = [row for row in bbox3d_data if row["name"] == parent.GetPath()][0] child1_bbox = [row for row in bbox3d_data if row["name"] == child1.GetPath()][0] child2_bbox = [row for row in bbox3d_data if row["name"] == child2.GetPath()][0] # Only takes into account child transforms a = math.cos(math.pi / 4) parent_bounds = [[-a - 0.5, -1.0, -a], [1.0, 1.0, a]] child1_bounds = [[-0.5, -0.5, -0.5], [0.5, 0.5, 0.5]] child2_bounds = [[-0.5, -0.5, -0.5], [0.5, 0.5, 0.5]] # Doesn't take into account transforms for bbox, bounds in zip([parent_bbox, child1_bbox, child2_bbox], [parent_bounds, child1_bounds, child2_bounds]): self.assertAlmostEqual(bbox["x_min"], bounds[0][0], places=5) self.assertAlmostEqual(bbox["y_min"], bounds[0][1], places=5) self.assertAlmostEqual(bbox["z_min"], bounds[0][2], places=5) self.assertAlmostEqual(bbox["x_max"], bounds[1][0], places=5) self.assertAlmostEqual(bbox["y_max"], bounds[1][1], places=5) self.assertAlmostEqual(bbox["z_max"], bounds[1][2], places=5) prim = stage.GetPrimAtPath(bbox["name"]) tf = np.array(UsdGeom.Imageable(prim).ComputeLocalToWorldTransform(0.0)) gf_range = Gf.Range3f(*bounds) gf_corners = np.array([gf_range.GetCorner(i) for i in range(8)]) gf_corners = np.pad(gf_corners, ((0, 0), (0, 1)), constant_values=1.0) gf_corners = np.dot(gf_corners, tf)[:, :3] assert np.allclose(bbox["corners"], gf_corners, atol=1e-5) async def test_parsed_nested_Y(self): """ Test 3D bounding box with nested semantics and transforms, Y-Up. """ # Create 2 cubes (size=1) under a parent prim stage = omni.usd.get_context().get_stage() UsdGeom.SetStageUpAxis(stage, "Y") parent = stage.DefinePrim("/World/Parent", "Xform") child1 = stage.DefinePrim("/World/Parent/Child1", "Cube") child2 = stage.DefinePrim("/World/Parent/Child2", "Cube") child1.GetAttribute("size").Set(1.0) child2.GetAttribute("size").Set(1.0) utils.add_semantics(parent, "parent") utils.add_semantics(child1, "child1") utils.add_semantics(child2, "child2") UsdGeom.Xformable(parent).ClearXformOpOrder() UsdGeom.Xformable(child1).ClearXformOpOrder() UsdGeom.Xformable(child2).ClearXformOpOrder() UsdGeom.Xformable(parent).AddRotateYOp().Set(45) UsdGeom.Xformable(child1).AddTranslateOp().Set((-0.5, 0.5, 0.0)) UsdGeom.Xformable(child1).AddRotateYOp().Set(45) UsdGeom.Xformable(child2).AddTranslateOp().Set((0.5, -0.5, 0.0)) # Render one frame await syn.sensors.next_sensor_data_async(self.viewport,True) bbox3d_data = syn.sensors.get_bounding_box_3d(self.viewport, parsed=True, return_corners=True) parent_bbox = [row for row in bbox3d_data if row["name"] == parent.GetPath()][0] child1_bbox = [row for row in bbox3d_data if row["name"] == child1.GetPath()][0] child2_bbox = [row for row in bbox3d_data if row["name"] == child2.GetPath()][0] # Only takes into account child transforms a = math.cos(math.pi / 4) parent_bounds = [[-a - 0.5, -1.0, -a], [1.0, 1.0, a]] child1_bounds = [[-0.5, -0.5, -0.5], [0.5, 0.5, 0.5]] child2_bounds = [[-0.5, -0.5, -0.5], [0.5, 0.5, 0.5]] # Doesn't take into account transforms for bbox, bounds in zip([parent_bbox, child1_bbox, child2_bbox], [parent_bounds, child1_bounds, child2_bounds]): self.assertAlmostEqual(bbox["x_min"], bounds[0][0], places=5) self.assertAlmostEqual(bbox["y_min"], bounds[0][1], places=5) self.assertAlmostEqual(bbox["z_min"], bounds[0][2], places=5) self.assertAlmostEqual(bbox["x_max"], bounds[1][0], places=5) self.assertAlmostEqual(bbox["y_max"], bounds[1][1], places=5) self.assertAlmostEqual(bbox["z_max"], bounds[1][2], places=5) prim = stage.GetPrimAtPath(bbox["name"]) tf = np.array(UsdGeom.Imageable(prim).ComputeLocalToWorldTransform(0.0)) gf_range = Gf.Range3f(*bounds) gf_corners = np.array([gf_range.GetCorner(i) for i in range(8)]) gf_corners = np.pad(gf_corners, ((0, 0), (0, 1)), constant_values=1.0) gf_corners = np.dot(gf_corners, tf)[:, :3] assert np.allclose(bbox["corners"], gf_corners, atol=1e-5) async def test_parsed_nested_Z(self): """ Test 3D bounding box with nested semantics and transforms, Z-Up. """ # Create 2 cubes (size=1) under a parent prim stage = omni.usd.get_context().get_stage() UsdGeom.SetStageUpAxis(stage, "Z") parent = stage.DefinePrim("/World/Parent", "Xform") child1 = stage.DefinePrim("/World/Parent/Child1", "Cube") child2 = stage.DefinePrim("/World/Parent/Child2", "Cube") child1.GetAttribute("size").Set(1.0) child2.GetAttribute("size").Set(1.0) utils.add_semantics(parent, "parent") utils.add_semantics(child1, "child1") utils.add_semantics(child2, "child2") UsdGeom.Xformable(parent).ClearXformOpOrder() UsdGeom.Xformable(child1).ClearXformOpOrder() UsdGeom.Xformable(child2).ClearXformOpOrder() UsdGeom.Xformable(parent).AddRotateYOp().Set(45) UsdGeom.Xformable(child1).AddTranslateOp().Set((-0.5, 0.5, 0.0)) UsdGeom.Xformable(child1).AddRotateYOp().Set(45) UsdGeom.Xformable(child2).AddTranslateOp().Set((0.5, -0.5, 0.0)) # Render one frame await syn.sensors.next_sensor_data_async(self.viewport,True) bbox3d_data = syn.sensors.get_bounding_box_3d(self.viewport, parsed=True, return_corners=True) parent_bbox = [row for row in bbox3d_data if row["name"] == parent.GetPath()][0] child1_bbox = [row for row in bbox3d_data if row["name"] == child1.GetPath()][0] child2_bbox = [row for row in bbox3d_data if row["name"] == child2.GetPath()][0] # Only takes into account child transforms a = math.cos(math.pi / 4) parent_bounds = [[-a - 0.5, -1.0, -a], [1.0, 1.0, a]] child1_bounds = [[-0.5, -0.5, -0.5], [0.5, 0.5, 0.5]] child2_bounds = [[-0.5, -0.5, -0.5], [0.5, 0.5, 0.5]] # Doesn't take into account transforms for bbox, bounds in zip([parent_bbox, child1_bbox, child2_bbox], [parent_bounds, child1_bounds, child2_bounds]): self.assertAlmostEqual(bbox["x_min"], bounds[0][0], places=5) self.assertAlmostEqual(bbox["y_min"], bounds[0][1], places=5) self.assertAlmostEqual(bbox["z_min"], bounds[0][2], places=5) self.assertAlmostEqual(bbox["x_max"], bounds[1][0], places=5) self.assertAlmostEqual(bbox["y_max"], bounds[1][1], places=5) self.assertAlmostEqual(bbox["z_max"], bounds[1][2], places=5) prim = stage.GetPrimAtPath(bbox["name"]) tf = np.array(UsdGeom.Imageable(prim).ComputeLocalToWorldTransform(0.0)) gf_range = Gf.Range3f(*bounds) gf_corners = np.array([gf_range.GetCorner(i) for i in range(8)]) gf_corners = np.pad(gf_corners, ((0, 0), (0, 1)), constant_values=1.0) gf_corners = np.dot(gf_corners, tf)[:, :3] assert np.allclose(bbox["corners"], gf_corners, atol=1e-5) @unittest.skip("OM-45008") async def test_camera_frame_simple_ftheta(self): """ Test 3D bounding box in a simple scene under ftheta camera. """ settings = carb.settings.get_settings() settings.set_string("/rtx/rendermode", "PathTracing") settings.set_int("/rtx/pathtracing/spp", 32) settings.set_int("/persistent/app/viewport/displayOptions", 0) stage = omni.usd.get_context().get_stage() # TEST SIMPLE SCENE cube = stage.DefinePrim("/Cube", "Cube") cube.GetAttribute("size").Set(2.0) UsdGeom.Xformable(cube).AddTranslateOp().Set((10.0, 1.0, 2)) utils.add_semantics(cube, "cube") camera = stage.DefinePrim("/Camera", "Camera") camera.CreateAttribute("cameraProjectionType", Sdf.ValueTypeNames.Token).Set("fisheyePolynomial") UsdGeom.Xformable(camera).AddTranslateOp().Set((10.0, 0.0, 0.0)) self.viewport.camera_path = camera.GetPath() # Render one frame await syn.sensors.next_sensor_data_async(self.viewport,True) bbox3d_data = syn.sensors.get_bounding_box_3d( self.viewport, parsed=True, return_corners=True, camera_frame=True ) # TODO: find the correct value of distorted result. # The f theta will distort the result. extents = Gf.Range3d([-1.0, 0, 1], [1.0, 2.0, 3]) corners = np.array([[extents.GetCorner(i) for i in range(8)]]) assert not np.allclose(bbox3d_data[0]["corners"], corners) @unittest.skip("OM-45008") async def test_camera_frame_simple_spherical(self): """ Test 3D bounding box in a simple scene under fisheye spherical camera. """ settings = carb.settings.get_settings() settings.set_string("/rtx/rendermode", "PathTracing") settings.set_int("/rtx/pathtracing/spp", 32) settings.set_int("/persistent/app/viewport/displayOptions", 0) stage = omni.usd.get_context().get_stage() # TEST SIMPLE SCENE cube = stage.DefinePrim("/Cube", "Cube") cube.GetAttribute("size").Set(2.0) UsdGeom.Xformable(cube).AddTranslateOp().Set((10.0, 1.0, 2)) utils.add_semantics(cube, "cube") camera = stage.DefinePrim("/Camera", "Camera") camera.CreateAttribute("cameraProjectionType", Sdf.ValueTypeNames.Token).Set("fisheyeSpherical") UsdGeom.Xformable(camera).AddTranslateOp().Set((10.0, 0.0, 0.0)) self.viewport.camera_path = camera.GetPath() # Render one frame await syn.sensors.next_sensor_data_async(self.viewport,True) bbox3d_data = syn.sensors.get_bounding_box_3d( self.viewport, parsed=True, return_corners=True, camera_frame=True ) # TODO: find the correct value of distorted result. # The spherical camera will distort the result. extents = Gf.Range3d([-1.0, 0, 1], [1.0, 2.0, 3]) corners = np.array([[extents.GetCorner(i) for i in range(8)]]) assert not np.allclose(bbox3d_data[0]["corners"], corners) async def test_camera_frame_simple(self): """ Test 3D bounding box in a simple scene. """ stage = omni.usd.get_context().get_stage() # TEST SIMPLE SCENE cube = stage.DefinePrim("/Cube", "Cube") cube.GetAttribute("size").Set(2.0) UsdGeom.Xformable(cube).AddTranslateOp().Set((10.0, 0.0, 10.0)) utils.add_semantics(cube, "cube") camera = stage.DefinePrim("/Camera", "Camera") UsdGeom.Xformable(camera).AddTranslateOp().Set((10.0, 0.0, 0.0)) self.viewport.camera_path = camera.GetPath() # Render one frame await syn.sensors.next_sensor_data_async(self.viewport,True) bbox3d_data = syn.sensors.get_bounding_box_3d( self.viewport, parsed=True, return_corners=True, camera_frame=True ) extents = Gf.Range3d([-1.0, -1.0, 9.0], [1.0, 1.0, 11.0]) corners = np.array([[extents.GetCorner(i) for i in range(8)]]) assert np.allclose(bbox3d_data[0]["corners"], corners) tf = np.eye(4) tf[3, 2] = 10.0 assert np.allclose(bbox3d_data[0]["transform"], tf) async def test_camera_frame_reference(self): """ Test 3D bounding box in a simple scene. """ ref_path = os.path.join(TMP, f"ref_stage{uuid.uuid1()}.usd") ref_stage = Usd.Stage.CreateNew(ref_path) world = ref_stage.DefinePrim("/World", "Xform") world_tf = utils.get_random_transform() UsdGeom.Xformable(world).AddTransformOp().Set(world_tf) cube = ref_stage.DefinePrim("/World/Cube", "Cube") cube.GetAttribute("size").Set(2.0) cube_tf = Gf.Matrix4d().SetTranslateOnly((10.0, 0.0, 10.0)) UsdGeom.Xformable(cube).AddTransformOp().Set(cube_tf) utils.add_semantics(cube, "cube") camera = ref_stage.DefinePrim("/World/Camera", "Camera") camera_tf = cube_tf UsdGeom.Xformable(camera).AddTransformOp().Set(camera_tf) ref_stage.Save() # omni.usd.get_context().new_stage() # await omni.usd.get_context().new_stage_async() stage = omni.usd.get_context().get_stage() rig = stage.DefinePrim("/Rig", "Xform") rig_tf = utils.get_random_transform() UsdGeom.Xformable(rig).AddTransformOp().Set(rig_tf) ref = stage.DefinePrim("/Rig/Ref") ref.GetReferences().AddReference(ref_path, "/World") self.viewport.camera_path = "/Rig/Ref/Camera" # Render one frame await syn.sensors.next_sensor_data_async(self.viewport,True) bbox3d_data_world = syn.sensors.get_bounding_box_3d( self.viewport, parsed=True, return_corners=True, camera_frame=False ) bbox3d_data_camera = syn.sensors.get_bounding_box_3d( self.viewport, parsed=True, return_corners=True, camera_frame=True ) extents = Gf.Range3d([-1.0, -1.0, -1.0], [1.0, 1.0, 1.0]) corners = np.array([[extents.GetCorner(i) for i in range(8)]]) assert np.allclose(bbox3d_data_camera[0]["corners"], corners) combined_tf = np.matmul(cube_tf, np.matmul(world_tf, rig_tf)) corners_tf = np.matmul(np.pad(corners.reshape(-1, 3), ((0, 0), (0, 1)), constant_values=1), combined_tf) corners_tf = corners_tf[:, :3].reshape(-1, 8, 3) assert np.allclose(bbox3d_data_world[0]["corners"], corners_tf) # tf = np.eye(4) # tf[3, 2] = 10.0 assert np.allclose(bbox3d_data_world[0]["transform"], combined_tf) pt_camera_min = [bbox3d_data_camera[0][f"{a}_min"] for a in ["x", "y", "z"]] pt_camera_min = np.array([*pt_camera_min, 1.0]) pt_camera_max = [bbox3d_data_camera[0][f"{a}_max"] for a in ["x", "y", "z"]] pt_camera_max = np.array([*pt_camera_max, 1.0]) assert np.allclose(np.matmul(pt_camera_min, bbox3d_data_camera[0]["transform"])[:3], corners[0, 0]) assert np.allclose(np.matmul(pt_camera_max, bbox3d_data_camera[0]["transform"])[:3], corners[0, 7]) async def test_camera_frame_Y(self): # TEST NESTED TRANSFORMS, UP AXIS # Create 2 cubes (size=1) under a parent prim stage = omni.usd.get_context().get_stage() UsdGeom.SetStageUpAxis(stage, "Y") parent = stage.DefinePrim("/World/Parent", "Xform") child1 = stage.DefinePrim("/World/Parent/Child1", "Cube") child2 = stage.DefinePrim("/World/Parent/Child2", "Cube") camera = stage.DefinePrim("/World/Camera", "Camera") child1.GetAttribute("size").Set(1.0) child2.GetAttribute("size").Set(1.0) utils.add_semantics(parent, "parent") utils.add_semantics(child1, "child1") utils.add_semantics(child2, "child2") UsdGeom.Xformable(parent).ClearXformOpOrder() UsdGeom.Xformable(child1).ClearXformOpOrder() UsdGeom.Xformable(child2).ClearXformOpOrder() UsdGeom.Xformable(camera).ClearXformOpOrder() UsdGeom.Xformable(parent).AddRotateYOp().Set(45) UsdGeom.Xformable(child1).AddTranslateOp().Set((-0.5, 0.5, 0.0)) UsdGeom.Xformable(child1).AddRotateYOp().Set(45) UsdGeom.Xformable(child2).AddTranslateOp().Set((0.5, -0.5, 0.0)) # Move camera with random transform camera_tf = utils.get_random_transform() UsdGeom.Xformable(camera).AddTransformOp().Set(Gf.Matrix4d(camera_tf)) camera_tf_inv = np.linalg.inv(camera_tf) self.viewport.camera_path = camera.GetPath() # Render one frame await syn.sensors.next_sensor_data_async(self.viewport,True) bbox3d_data = syn.sensors.get_bounding_box_3d( self.viewport, parsed=True, return_corners=True, camera_frame=True ) parent_bbox = [row for row in bbox3d_data if row["name"] == parent.GetPath()][0] child1_bbox = [row for row in bbox3d_data if row["name"] == child1.GetPath()][0] child2_bbox = [row for row in bbox3d_data if row["name"] == child2.GetPath()][0] # Only takes into account child transforms a = math.cos(math.pi / 4) parent_bounds = [[-a - 0.5, -1.0, -a], [1.0, 1.0, a]] child1_bounds = [[-0.5, -0.5, -0.5], [0.5, 0.5, 0.5]] child2_bounds = [[-0.5, -0.5, -0.5], [0.5, 0.5, 0.5]] # Doesn't take into account transforms for bbox, bounds in zip([parent_bbox, child1_bbox, child2_bbox], [parent_bounds, child1_bounds, child2_bounds]): self.assertAlmostEqual(bbox["x_min"], bounds[0][0], places=5) self.assertAlmostEqual(bbox["y_min"], bounds[0][1], places=5) self.assertAlmostEqual(bbox["z_min"], bounds[0][2], places=5) self.assertAlmostEqual(bbox["x_max"], bounds[1][0], places=5) self.assertAlmostEqual(bbox["y_max"], bounds[1][1], places=5) self.assertAlmostEqual(bbox["z_max"], bounds[1][2], places=5) prim = stage.GetPrimAtPath(bbox["name"]) tf = np.array(UsdGeom.Imageable(prim).ComputeLocalToWorldTransform(0.0)) gf_range = Gf.Range3f(*bounds) gf_corners = np.array([gf_range.GetCorner(i) for i in range(8)]) gf_corners = np.pad(gf_corners, ((0, 0), (0, 1)), constant_values=1.0) gf_corners = np.dot(gf_corners, tf) gf_corners = np.dot(gf_corners, camera_tf_inv)[:, :3] assert np.allclose(bbox["corners"], gf_corners, atol=1e-5) async def test_camera_frame_Z(self): # TEST NESTED TRANSFORMS, UP AXIS # Create 2 cubes (size=1) under a parent prim stage = omni.usd.get_context().get_stage() UsdGeom.SetStageUpAxis(stage, "Z") parent = stage.DefinePrim("/World/Parent", "Xform") child1 = stage.DefinePrim("/World/Parent/Child1", "Cube") child2 = stage.DefinePrim("/World/Parent/Child2", "Cube") camera = stage.DefinePrim("/World/Camera", "Camera") child1.GetAttribute("size").Set(1.0) child2.GetAttribute("size").Set(1.0) utils.add_semantics(parent, "parent") utils.add_semantics(child1, "child1") utils.add_semantics(child2, "child2") UsdGeom.Xformable(parent).ClearXformOpOrder() UsdGeom.Xformable(child1).ClearXformOpOrder() UsdGeom.Xformable(child2).ClearXformOpOrder() UsdGeom.Xformable(camera).ClearXformOpOrder() UsdGeom.Xformable(parent).AddRotateYOp().Set(45) UsdGeom.Xformable(child1).AddTranslateOp().Set((-0.5, 0.5, 0.0)) UsdGeom.Xformable(child1).AddRotateYOp().Set(45) UsdGeom.Xformable(child2).AddTranslateOp().Set((0.5, -0.5, 0.0)) # Move camera with random transform camera_tf = np.eye(4) camera_tf[:3, :3] = Gf.Matrix3d(Gf.Rotation(np.random.rand(3).tolist(), np.random.rand(3).tolist())) camera_tf[3, :3] = np.random.rand(1, 3) UsdGeom.Xformable(camera).AddTransformOp().Set(Gf.Matrix4d(camera_tf)) camera_tf_inv = np.linalg.inv(camera_tf) self.viewport.camera_path = camera.GetPath() # Render one frame await syn.sensors.next_sensor_data_async(self.viewport,True) bbox3d_data = syn.sensors.get_bounding_box_3d( self.viewport, parsed=True, return_corners=True, camera_frame=True ) parent_bbox = [row for row in bbox3d_data if row["name"] == parent.GetPath()][0] child1_bbox = [row for row in bbox3d_data if row["name"] == child1.GetPath()][0] child2_bbox = [row for row in bbox3d_data if row["name"] == child2.GetPath()][0] # Only takes into account child transforms a = math.cos(math.pi / 4) parent_bounds = [[-a - 0.5, -1.0, -a], [1.0, 1.0, a]] child1_bounds = [[-0.5, -0.5, -0.5], [0.5, 0.5, 0.5]] child2_bounds = [[-0.5, -0.5, -0.5], [0.5, 0.5, 0.5]] # Doesn't take into account transforms for bbox, bounds in zip([parent_bbox, child1_bbox, child2_bbox], [parent_bounds, child1_bounds, child2_bounds]): self.assertAlmostEqual(bbox["x_min"], bounds[0][0], places=5) self.assertAlmostEqual(bbox["y_min"], bounds[0][1], places=5) self.assertAlmostEqual(bbox["z_min"], bounds[0][2], places=5) self.assertAlmostEqual(bbox["x_max"], bounds[1][0], places=5) self.assertAlmostEqual(bbox["y_max"], bounds[1][1], places=5) self.assertAlmostEqual(bbox["z_max"], bounds[1][2], places=5) prim = stage.GetPrimAtPath(bbox["name"]) tf = np.array(UsdGeom.Imageable(prim).ComputeLocalToWorldTransform(0.0)) gf_range = Gf.Range3f(*bounds) gf_corners = np.array([gf_range.GetCorner(i) for i in range(8)]) gf_corners = np.pad(gf_corners, ((0, 0), (0, 1)), constant_values=1.0) gf_corners = np.dot(gf_corners, tf) gf_corners = np.dot(gf_corners, camera_tf_inv)[:, :3] assert np.allclose(bbox["corners"], gf_corners, atol=1e-5) @unittest.skip("OM-46398") async def test_bbox_3d_scene_instance(self): """ Test sensor on scene instance. """ path = os.path.join(FILE_DIR, "../data/scenes/scene_instance_test.usda") await omni.usd.get_context().open_stage_async(path) # Render one frame await syn.sensors.next_sensor_data_async(self.viewport,True) data = syn.sensors.get_bounding_box_3d_(self.viewport) # should be 3 prims in the scene # TODO: add more complicated test assert len(data) == 3 # After running each test async def tearDown(self): pass
30,376
Python
46.169255
141
0.604655
eliabntt/GRADE-RR/isaac_internals/kit/extscore/omni.syntheticdata/omni/syntheticdata/tests/sensors/test_depth.py
# NOTE: # omni.kit.test - std python's unittest module with additional wrapping to add suport for async/await tests # For most things refer to unittest docs: https://docs.python.org/3/library/unittest.html import os import math import asyncio from time import time import carb import numpy as np import omni.kit.test from omni.kit.viewport.utility import get_active_viewport from pxr import Gf, UsdGeom, Sdf # Import extension python module we are testing with absolute import path, as if we are external user (other extension) import omni.syntheticdata as syn from ..utils import add_semantics FILE_DIR = os.path.dirname(os.path.realpath(__file__)) TIMEOUT = 200 # Having a test class derived from omni.kit.test.AsyncTestCase declared on the root of module will make it auto-discoverable by omni.kit.test class TestDepth(omni.kit.test.AsyncTestCase): # Before running each test async def setUp(self): np.random.seed(1234) # Setup viewport self.viewport = get_active_viewport() # Initialize Sensor await omni.usd.get_context().new_stage_async() stage = omni.usd.get_context().get_stage() await omni.kit.app.get_app().next_update_async() await syn.sensors.create_or_retrieve_sensor_async(self.viewport, syn._syntheticdata.SensorType.Depth) async def test_parsed_empty(self): """ Test depth sensor on empty stage. """ # Render one frame await syn.sensors.next_sensor_data_async(self.viewport,True) data = syn.sensors.get_depth(self.viewport) assert data.sum() == 0 async def test_parsed_dtype(self): stage = omni.usd.get_context().get_stage() cube = stage.DefinePrim("/Cube", "Cube") add_semantics(cube, "cube") # Render one frame await syn.sensors.next_sensor_data_async(self.viewport,True) data = syn.sensors.get_depth(self.viewport) assert data.dtype == np.float32 async def test_distances(self): stage = omni.usd.get_context().get_stage() camera = stage.DefinePrim("/Camera", "Camera") UsdGeom.Xformable(camera).AddTranslateOp().Set((0, 0, 0)) self.viewport.camera_path = camera.GetPath() await omni.kit.app.get_app().next_update_async() for n in range(10, 100, 10): cube = stage.DefinePrim("/Cube", "Cube") add_semantics(cube, "cube") # n = 5 UsdGeom.XformCommonAPI(cube).SetTranslate((0, 0, -n)) # Render one frame await syn.sensors.next_sensor_data_async(self.viewport,True) data = syn.sensors.get_depth(self.viewport) assert np.isclose(data.min(), 0, atol=1e-5) # The front of the cube is 1 ahead of its center position assert np.isclose(data.max(), 1 / (n - 1), atol=1e-5) async def test_distances_pathtracing(self): """ Basic funtionality test of the sensor, but in path tracing mode. """ # Set the rendering mode to be pathtracing settings = carb.settings.get_settings() settings.set_string("/rtx/rendermode", "PathTracing") settings.set_int("/rtx/pathtracing/spp", 32) settings.set_int("/persistent/app/viewport/displayOptions", 0) stage = omni.usd.get_context().get_stage() camera = stage.DefinePrim("/Camera", "Camera") UsdGeom.Xformable(camera).AddTranslateOp().Set((0, 0, 0)) self.viewport.camera_path = camera.GetPath() await omni.kit.app.get_app().next_update_async() for n in range(10, 100, 10): cube = stage.DefinePrim("/Cube", "Cube") add_semantics(cube, "cube") # n = 5 UsdGeom.XformCommonAPI(cube).SetTranslate((0, 0, -n)) # Render one frame await syn.sensors.next_sensor_data_async(self.viewport,True) data = syn.sensors.get_depth(self.viewport) assert np.isclose(data.min(), 0, atol=1e-5) # The front of the cube is 1 ahead of its center position assert np.isclose(data.max(), 1 / (n - 1), atol=1e-5) async def test_distances_ray_traced_lighting(self): """ Basic funtionality test of the sensor, but in ray traced lighting. """ # Set the rendering mode to be pathtracing settings_interface = carb.settings.get_settings() settings_interface.set_string("/rtx/rendermode", "RayTracedLighting") stage = omni.usd.get_context().get_stage() camera = stage.DefinePrim("/Camera", "Camera") UsdGeom.Xformable(camera).AddTranslateOp().Set((0, 0, 0)) self.viewport.camera_path = camera.GetPath() await omni.kit.app.get_app().next_update_async() for n in range(10, 100, 10): cube = stage.DefinePrim("/Cube", "Cube") add_semantics(cube, "cube") # n = 5 UsdGeom.XformCommonAPI(cube).SetTranslate((0, 0, -n)) # Render one frame await syn.sensors.next_sensor_data_async(self.viewport,True) data = syn.sensors.get_depth(self.viewport) assert np.isclose(data.min(), 0, atol=1e-5) # The front of the cube is 1 ahead of its center position assert np.isclose(data.max(), 1 / (n - 1), atol=1e-5) async def test_ftheta_camera(self): """ Test the functionality of the sensor under f-theta camera. """ settings = carb.settings.get_settings() settings.set_string("/rtx/rendermode", "PathTracing") settings.set_int("/rtx/pathtracing/spp", 32) settings.set_int("/persistent/app/viewport/displayOptions", 0) stage = omni.usd.get_context().get_stage() camera = stage.DefinePrim("/Camera", "Camera") # Set the camera to be polynomial fish eye camera. camera.CreateAttribute("cameraProjectionType", Sdf.ValueTypeNames.Token).Set("fisheyePolynomial") # Set the Camera's position UsdGeom.Xformable(camera).AddTranslateOp().Set((0, 0, 0)) self.viewport.camera_path = camera.GetPath() await omni.kit.app.get_app().next_update_async() # Add a cube at the centre of the scene cube_prim = stage.DefinePrim("/Cube", "Cube") add_semantics(cube_prim, "cube") cube = UsdGeom.Cube(cube_prim) # Render one frame await syn.sensors.next_sensor_data_async(self.viewport,True) data = syn.sensors.get_depth(self.viewport) await omni.kit.app.get_app().next_update_async() # Centre of the data should be half of the cube edge's length, adjusted to correct scale. edge_length = cube.GetSizeAttr().Get() assert np.isclose(1 / (edge_length - 1), data.max(), atol=1e-3) assert np.isclose(1 / (np.sqrt(((edge_length) ** 2)*2) - 1), data[data > 0].min(), atol=1e-1) # After running each test async def tearDown(self): pass
7,003
Python
39.252873
141
0.625875
eliabntt/GRADE-RR/isaac_internals/kit/extscore/omni.syntheticdata/omni/syntheticdata/tests/sensors/test_semantic_seg.py
# NOTE: # omni.kit.test - std python's unittest module with additional wrapping to add suport for async/await tests # For most things refer to unittest docs: https://docs.python.org/3/library/unittest.html import os import math import asyncio from time import time from pathlib import Path import carb import numpy as np import omni.kit.test from omni.kit.viewport.utility import get_active_viewport from pxr import Gf, UsdGeom, Sdf # Import extension python module we are testing with absolute import path, as if we are external user (other extension) import omni.syntheticdata as syn from ..utils import add_semantics import unittest FILE_DIR = os.path.dirname(os.path.realpath(__file__)) TIMEOUT = 200 # Having a test class derived from omni.kit.test.AsyncTestCase declared on the root of module will make it auto-discoverable by omni.kit.test class TestSemanticSeg(omni.kit.test.AsyncTestCase): def __init__(self, methodName: str) -> None: super().__init__(methodName=methodName) self.golden_image_path = Path(os.path.dirname(os.path.abspath(__file__))) / ".." / "data" / "golden" # Before running each test async def setUp(self): np.random.seed(1234) # Setup viewport self.viewport = get_active_viewport() # Initialize Sensor await omni.usd.get_context().new_stage_async() await syn.sensors.initialize_async( self.viewport, [ syn._syntheticdata.SensorType.SemanticSegmentation, syn._syntheticdata.SensorType.InstanceSegmentation ] ) async def test_empty(self): """ Test semantic segmentation on empty stage. """ data = syn.sensors.get_semantic_segmentation(self.viewport) assert data.sum() == 0 async def test_dtype(self): stage = omni.usd.get_context().get_stage() cube = stage.DefinePrim("/Cube", "Cube") add_semantics(cube, "cube") await syn.sensors.next_sensor_data_async(self.viewport,True) data = syn.sensors.get_semantic_segmentation(self.viewport) assert data.dtype == np.uint32 async def test_cube(self): stage = omni.usd.get_context().get_stage() cube = stage.DefinePrim("/Cube", "Cube") add_semantics(cube, "cube") cube.GetAttribute("size").Set(100) await syn.sensors.next_sensor_data_async(self.viewport,True) data = syn.sensors.get_semantic_segmentation(self.viewport) # np.savez_compressed(self.golden_image_path / 'semantic_seg_cube.npz', array=data) golden_image = np.load(self.golden_image_path / "semantic_seg_cube.npz")["array"] std_dev = np.sqrt(np.square(data - golden_image).astype(float).mean()) assert std_dev < 0.1 async def test_cube_sphere(self): stage = omni.usd.get_context().get_stage() cube = stage.DefinePrim("/Cube", "Cube") add_semantics(cube, "cube") cube.GetAttribute("size").Set(100) sphere_prim = stage.DefinePrim("/Sphere", "Sphere") UsdGeom.XformCommonAPI(sphere_prim).SetTranslate((300, 0, 0)) add_semantics(sphere_prim, "sphere") sphere = UsdGeom.Sphere(sphere_prim) sphere.GetRadiusAttr().Set(100) await syn.sensors.next_sensor_data_async(self.viewport,True) data = syn.sensors.get_instance_segmentation(self.viewport) # np.savez_compressed(self.golden_image_path / 'instance_seg_cube.npz', array=data) assert len(data) != 0 async def test_cube_pathtracing(self): """ Basic funtionality test of the sensor, but in path tracing mode. """ settings = carb.settings.get_settings() settings.set_string("/rtx/rendermode", "PathTracing") settings.set_int("/rtx/pathtracing/spp", 32) settings.set_int("/persistent/app/viewport/displayOptions", 0) stage = omni.usd.get_context().get_stage() cube = stage.DefinePrim("/Cube", "Cube") add_semantics(cube, "cube") cube.GetAttribute("size").Set(100) await syn.sensors.next_sensor_data_async(self.viewport,True) data = syn.sensors.get_semantic_segmentation(self.viewport) golden_image = np.load(self.golden_image_path / "semantic_seg_cube.npz")["array"] std_dev = np.sqrt(np.square(data - golden_image).astype(float).mean()) assert std_dev < 0.1 async def test_cube_ray_traced_lighting(self): """ Basic funtionality test of the sensor, but in ray traced lighting. """ settings = carb.settings.get_settings() settings.set_string("/rtx/rendermode", "RayTracedLighting") stage = omni.usd.get_context().get_stage() cube = stage.DefinePrim("/Cube", "Cube") add_semantics(cube, "cube") cube.GetAttribute("size").Set(100) await syn.sensors.next_sensor_data_async(self.viewport,True) data = syn.sensors.get_semantic_segmentation(self.viewport) golden_image = np.load(self.golden_image_path / "semantic_seg_cube.npz")["array"] std_dev = np.sqrt(np.square(data - golden_image).astype(float).mean()) assert std_dev < 0.1 async def test_cube_ftheta(self): """ Basic funtionality test of the sensor under f theta camera. """ settings = carb.settings.get_settings() settings.set_string("/rtx/rendermode", "PathTracing") settings.set_int("/rtx/pathtracing/spp", 32) settings.set_int("/persistent/app/viewport/displayOptions", 0) stage = omni.usd.get_context().get_stage() cube = stage.DefinePrim("/Cube", "Cube") add_semantics(cube, "cube") cube.GetAttribute("size").Set(100) await omni.kit.app.get_app().next_update_async() camera = stage.DefinePrim("/Camera", "Camera") # Set the camera to be polynomial fish eye camera. camera.CreateAttribute("cameraProjectionType", Sdf.ValueTypeNames.Token).Set("fisheyePolynomial") # Set the Camera's position UsdGeom.Xformable(camera).AddTranslateOp().Set((100, 100, 100)) self.viewport.camera_path = camera.GetPath() await syn.sensors.next_sensor_data_async(self.viewport,True) data = syn.sensors.get_semantic_segmentation(self.viewport) # np.savez_compressed(self.golden_image_path / 'semantic_seg_cube_ftheta.npz', array=data) golden_image = np.load(self.golden_image_path / "semantic_seg_cube_ftheta.npz")["array"] std_dev = np.sqrt(np.square(data - golden_image).astype(float).mean()) assert std_dev < 0.1 async def test_cube_spherical(self): """ Basic funtionality test of the sensor under spherical camera. """ settings = carb.settings.get_settings() settings.set_string("/rtx/rendermode", "PathTracing") settings.set_int("/rtx/pathtracing/spp", 32) settings.set_int("/persistent/app/viewport/displayOptions", 0) stage = omni.usd.get_context().get_stage() cube = stage.DefinePrim("/Cube", "Cube") add_semantics(cube, "cube") cube.GetAttribute("size").Set(100) await omni.kit.app.get_app().next_update_async() camera = stage.DefinePrim("/Camera", "Camera") # Set the camera to be spherical fish eye camera. camera.CreateAttribute("cameraProjectionType", Sdf.ValueTypeNames.Token).Set("fisheyeSpherical") # Set the Camera's position UsdGeom.Xformable(camera).AddTranslateOp().Set((100, 100, 100)) self.viewport.camera_path = camera.GetPath() await syn.sensors.next_sensor_data_async(self.viewport,True) data = syn.sensors.get_semantic_segmentation(self.viewport) # np.savez_compressed(self.golden_image_path / 'semantic_seg_cube_spherical.npz', array=data) golden_image = np.load(self.golden_image_path / "semantic_seg_cube_spherical.npz")["array"] std_dev = np.sqrt(np.square(data - golden_image).astype(float).mean()) assert std_dev < 0.1 @unittest.skip("OM-46393") async def test_geom_subset(self): """ Test sensor on GeomSubset. """ path = os.path.join(FILE_DIR, "../data/scenes/streetlamp_03_golden.usd") await omni.usd.get_context().open_stage_async(path) await syn.sensors.next_sensor_data_async(self.viewport,True) data = syn.sensors.get_semantic_segmentation(self.viewport) assert len(data) != 0 @unittest.skip("OM-46394") async def test_sem_seg_scene_instance(self): """ Test sensor on scene instance. """ path = os.path.join(FILE_DIR, "../data/scenes/scene_instance_test.usda") await omni.usd.get_context().open_stage_async(path) await syn.sensors.next_sensor_data_async(self.viewport,True) data = syn.sensors.get_semantic_segmentation(self.viewport) # TODO add more complicated test assert len(data) != 0 # After running each test async def tearDown(self): pass
9,115
Python
39.878924
141
0.64531
eliabntt/GRADE-RR/isaac_internals/kit/extscore/omni.syntheticdata/omni/syntheticdata/tests/sensors/test_bbox2d_tight.py
# NOTE: # omni.kit.test - std python's unittest module with additional wrapping to add suport for async/await tests # For most things refer to unittest docs: https://docs.python.org/3/library/unittest.html import os import math import asyncio from time import time import unittest import carb import numpy as np import omni.kit.test from pxr import Gf, UsdGeom from omni.kit.viewport.utility import get_active_viewport # Import extension python module we are testing with absolute import path, as if we are external user (other extension) import omni.syntheticdata as syn from ..utils import add_semantics FILE_DIR = os.path.dirname(os.path.realpath(__file__)) TIMEOUT = 200 # Having a test class derived from omni.kit.test.AsyncTestCase declared on the root of module will make it auto-discoverable by omni.kit.test class TestBBox2DTight(omni.kit.test.AsyncTestCase): # Before running each test async def setUp(self): np.random.seed(1234) # Setup viewport self.viewport = get_active_viewport() # Initialize Sensor await omni.usd.get_context().new_stage_async() stage = omni.usd.get_context().get_stage() await omni.kit.app.get_app().next_update_async() await syn.sensors.create_or_retrieve_sensor_async( self.viewport, syn._syntheticdata.SensorType.BoundingBox2DTight ) async def test_parsed_empty(self): """ Test 2D bounding box on empty stage. """ bbox2d_data = syn.sensors.get_bounding_box_2d_tight(self.viewport) assert not bool(bbox2d_data) async def test_fields_exist(self): stage = omni.usd.get_context().get_stage() cube = stage.DefinePrim("/Cube", "Cube") add_semantics(cube, "cube") # Render one frame await syn.sensors.next_sensor_data_async(self.viewport,True) bbox2d_data = syn.sensors.get_bounding_box_2d_tight(self.viewport) valid_dtype = [ ("uniqueId", "<i4"), ("name", "O"), ("semanticLabel", "O"), ("metadata", "O"), ("instanceIds", "O"), ("semanticId", "<u4"), ("x_min", "<i4"), ("y_min", "<i4"), ("x_max", "<i4"), ("y_max", "<i4"), ] assert bbox2d_data.dtype == np.dtype(valid_dtype) async def test_cube(self): """ Basic test for the sensor. """ stage = omni.usd.get_context().get_stage() camera = stage.DefinePrim("/Camera", "Camera") UsdGeom.Xformable(camera).AddTranslateOp().Set((0, 0, 0)) self.viewport.camera_path = camera.GetPath() await omni.kit.app.get_app().next_update_async() cube = stage.DefinePrim("/Cube", "Cube") add_semantics(cube, "cube") UsdGeom.XformCommonAPI(cube).SetTranslate((0, 0, -10)) # Render one frame await syn.sensors.next_sensor_data_async(self.viewport,True) bbox2d_data = syn.sensors.get_bounding_box_2d_tight(self.viewport) assert bbox2d_data[0] x_min, y_min, x_max, y_max = bbox2d_data[0][6], bbox2d_data[0][7], bbox2d_data[0][8], bbox2d_data[0][9] assert x_min == 301 assert y_min == 21 assert x_max == 978 assert y_max == 698 @unittest.skip("OM-46398") async def test_bbox_2d_tight_scene_instance(self): """ Test sensor on scene instance. """ settings = carb.settings.get_settings() if settings.get("/rtx/hydra/enableSemanticSchema"): path = os.path.join(FILE_DIR, "../data/scenes/scene_instance_test.usda") await omni.usd.get_context().open_stage_async(path) # Render one frame await syn.sensors.next_sensor_data_async(self.viewport,True) data = syn.sensors.get_bounding_box_2d_tight(self.viewport) # should be 3 prims in the scene. # TODO: Add more complicated test assert len(data) == 3 async def test_cube_pathtracing(self): """ Basic funtionality test of the sensor, but in path tracing mode. """ settings = carb.settings.get_settings() settings.set_string("/rtx/rendermode", "PathTracing") settings.set_int("/rtx/pathtracing/spp", 32) settings.set_int("/persistent/app/viewport/displayOptions", 0) stage = omni.usd.get_context().get_stage() camera = stage.DefinePrim("/Camera", "Camera") UsdGeom.Xformable(camera).AddTranslateOp().Set((0, 0, 0)) self.viewport.camera_path = camera.GetPath() await omni.kit.app.get_app().next_update_async() cube = stage.DefinePrim("/Cube", "Cube") add_semantics(cube, "cube") UsdGeom.XformCommonAPI(cube).SetTranslate((0, 0, -10)) # Render one frame await syn.sensors.next_sensor_data_async(self.viewport,True) bbox2d_data = syn.sensors.get_bounding_box_2d_tight(self.viewport) x_min, y_min, x_max, y_max = bbox2d_data[0][6], bbox2d_data[0][7], bbox2d_data[0][8], bbox2d_data[0][9] assert x_min == 301 assert y_min == 21 assert x_max == 978 assert y_max == 698 async def test_cube_ray_traced_lighting(self): """ Basic test for the sensor, but in ray traced lighting mode. """ # Set the rendering mode to be ray traced lighting. settings_interface = carb.settings.get_settings() settings_interface.set_string("/rtx/rendermode", "RayTracedLighting") stage = omni.usd.get_context().get_stage() camera = stage.DefinePrim("/Camera", "Camera") UsdGeom.Xformable(camera).AddTranslateOp().Set((0, 0, 0)) self.viewport.camera_path = camera.GetPath() await omni.kit.app.get_app().next_update_async() cube = stage.DefinePrim("/Cube", "Cube") add_semantics(cube, "cube") UsdGeom.XformCommonAPI(cube).SetTranslate((0, 0, -10)) # Render one frame await syn.sensors.next_sensor_data_async(self.viewport,True) bbox2d_data = syn.sensors.get_bounding_box_2d_tight(self.viewport) x_min, y_min, x_max, y_max = bbox2d_data[0][6], bbox2d_data[0][7], bbox2d_data[0][8], bbox2d_data[0][9] assert x_min == 301 assert y_min == 21 assert x_max == 978 assert y_max == 698 # After running each test async def tearDown(self): pass
6,495
Python
35.088889
141
0.610162
eliabntt/GRADE-RR/isaac_internals/kit/extscore/omni.syntheticdata/omni/syntheticdata/tests/sensors/test_distance_to_camera.py
# NOTE: # omni.kit.test - std python's unittest module with additional wrapping to add suport for async/await tests # For most things refer to unittest docs: https://docs.python.org/3/library/unittest.html import os from time import time from pathlib import Path import carb import numpy as np import omni.kit.test from omni.kit.viewport.utility import get_active_viewport from pxr import Gf, UsdGeom, Sdf # Import extension python module we are testing with absolute import path, as if we are external user (other extension) import omni.syntheticdata as syn from ..utils import add_semantics FILE_DIR = os.path.dirname(os.path.realpath(__file__)) TIMEOUT = 200 # Having a test class derived from omni.kit.test.AsyncTestCase declared on the root of module will make it auto-discoverable by omni.kit.test class TestDistanceToCamera(omni.kit.test.AsyncTestCase): # Before running each test async def setUp(self): np.random.seed(1234) # Setup viewport self.viewport = get_active_viewport() self.golden_image_path = Path(os.path.dirname(os.path.abspath(__file__))) / ".." / "data" / "golden" # Initialize Sensor await omni.usd.get_context().new_stage_async() stage = omni.usd.get_context().get_stage() await omni.kit.app.get_app().next_update_async() await syn.sensors.create_or_retrieve_sensor_async(self.viewport, syn._syntheticdata.SensorType.DistanceToCamera) async def test_parsed_empty(self): """ Test distance-to-camera sensor on empty stage. """ # Render one frame await syn.sensors.next_sensor_data_async(self.viewport,True) data = syn.sensors.get_distance_to_camera(self.viewport) assert np.all(data > 1000) async def test_parsed_dtype(self): stage = omni.usd.get_context().get_stage() cube = stage.DefinePrim("/Cube", "Cube") add_semantics(cube, "cube") # Render one frame await syn.sensors.next_sensor_data_async(self.viewport,True) data = syn.sensors.get_distance_to_camera(self.viewport) assert data.dtype == np.float32 async def test_distances(self): stage = omni.usd.get_context().get_stage() camera = stage.DefinePrim("/Camera", "Camera") UsdGeom.Xformable(camera).AddTranslateOp().Set((0, 0, 0)) self.viewport.camera_path = camera.GetPath() await omni.kit.app.get_app().next_update_async() for n in range(10, 100, 10): cube = stage.DefinePrim("/Cube", "Cube") add_semantics(cube, "cube") # n = 5 UsdGeom.XformCommonAPI(cube).SetTranslate((0, 0, -n)) # Render one frame await syn.sensors.next_sensor_data_async(self.viewport,True) data = syn.sensors.get_distance_to_camera(self.viewport) assert data.max() > 1000 # The front of the cube is 1 ahead of its center position # TODO get a more precise calculation of eye distance assert np.isclose(data.min(), (n - 1) / 100, atol=1e-1) async def test_distances_pathtracing(self): """ Basic funtionality test of the sensor, but in path tracing mode. """ # Set the rendering mode to be pathtracing settings = carb.settings.get_settings() settings.set_string("/rtx/rendermode", "PathTracing") settings.set_int("/rtx/pathtracing/spp", 32) settings.set_int("/persistent/app/viewport/displayOptions", 0) stage = omni.usd.get_context().get_stage() camera = stage.DefinePrim("/Camera", "Camera") UsdGeom.Xformable(camera).AddTranslateOp().Set((0, 0, 0)) self.viewport.camera_path = camera.GetPath() await omni.kit.app.get_app().next_update_async() for n in range(10, 100, 10): cube = stage.DefinePrim("/Cube", "Cube") add_semantics(cube, "cube") # n = 5 UsdGeom.XformCommonAPI(cube).SetTranslate((0, 0, -n)) # Render one frame await syn.sensors.next_sensor_data_async(self.viewport,True) data = syn.sensors.get_distance_to_camera(self.viewport) assert data.max() > 1000 # The front of the cube is 1 ahead of its center position # TODO get a more precise calculation of eye distance assert np.isclose(data.min(), (n - 1) / 100, atol=1e-1) async def test_distances_ray_traced_lighting(self): """ Basic funtionality test of the sensor, but in ray traced lighting. """ # Set the rendering mode to be pathtracing settings_interface = carb.settings.get_settings() settings_interface.set_string("/rtx/rendermode", "RayTracedLighting") stage = omni.usd.get_context().get_stage() camera = stage.DefinePrim("/Camera", "Camera") UsdGeom.Xformable(camera).AddTranslateOp().Set((0, 0, 0)) self.viewport.camera_path = camera.GetPath() await omni.kit.app.get_app().next_update_async() for n in range(10, 100, 10): cube = stage.DefinePrim("/Cube", "Cube") add_semantics(cube, "cube") # n = 5 UsdGeom.XformCommonAPI(cube).SetTranslate((0, 0, -n)) # Render one frame await syn.sensors.next_sensor_data_async(self.viewport,True) data = syn.sensors.get_distance_to_camera(self.viewport) assert data.max() > 1000 # The front of the cube is 1 ahead of its center position # TODO get a more precise calculation of eye distance assert np.isclose(data.min(), (n - 1) / 100, atol=1e-1) async def test_ftheta_camera(self): """ Test the functionality of the sensor under f-theta camera. """ settings = carb.settings.get_settings() settings.set_string("/rtx/rendermode", "PathTracing") settings.set_int("/rtx/pathtracing/spp", 32) settings.set_int("/persistent/app/viewport/displayOptions", 0) stage = omni.usd.get_context().get_stage() camera = stage.DefinePrim("/Camera", "Camera") # Set the camera to be polynomial fish eye camera. camera.CreateAttribute("cameraProjectionType", Sdf.ValueTypeNames.Token).Set("fisheyePolynomial") # Set the Camera's position UsdGeom.Xformable(camera).AddTranslateOp().Set((0, 0, 0)) self.viewport.camera_path = camera.GetPath() await omni.kit.app.get_app().next_update_async() # Add a cube at the centre of the scene cube_prim = stage.DefinePrim("/Cube", "Cube") add_semantics(cube_prim, "cube") cube = UsdGeom.Cube(cube_prim) # Render one frame await syn.sensors.next_sensor_data_async(self.viewport,True) data = syn.sensors.get_distance_to_camera(self.viewport) await omni.kit.app.get_app().next_update_async() # Centre of the data should be half of the cube edge's length, adjusted to correct scale. edge_length = (cube.GetSizeAttr().Get() - 1) / 100 # The max should be sqrt(((edge_length / 2) ** 2) * 2), which a pinhole camera won't see. assert np.isclose(np.sqrt(((edge_length / 2) ** 2)*2), data[data != np.inf].max(), atol=1e-3) async def test_spherical_camera(self): """ Test the functionality of the sensor under fisheye spherical camera. """ settings = carb.settings.get_settings() settings.set_string("/rtx/rendermode", "PathTracing") settings.set_int("/rtx/pathtracing/spp", 32) settings.set_int("/persistent/app/viewport/displayOptions", 0) stage = omni.usd.get_context().get_stage() camera = stage.DefinePrim("/Camera", "Camera") # Set the camera to be spherical camera camera.CreateAttribute("cameraProjectionType", Sdf.ValueTypeNames.Token).Set("fisheyeSpherical") # Set the Camera at the centre of the stage. UsdGeom.Xformable(camera).AddTranslateOp().Set((0, 0, 0)) self.viewport.camera_path = camera.GetPath() await omni.kit.app.get_app().next_update_async() sphere_prim = stage.DefinePrim("/Sphere", "Sphere") add_semantics(sphere_prim, "sphere") sphere = UsdGeom.Sphere(sphere_prim) sphere.GetRadiusAttr().Set(20) # Render one frame await syn.sensors.next_sensor_data_async(self.viewport,True) data = syn.sensors.get_distance_to_camera(self.viewport) # np.savez_compressed(self.golden_image_path / 'distance_to_camera_spherical.npz', array=data) golden_image = np.load(self.golden_image_path / "distance_to_camera_spherical.npz")["array"] std_dev = np.sqrt(np.square(data - golden_image).astype(float).mean()) assert std_dev < 2 # After running each test async def tearDown(self): pass
8,962
Python
40.688372
141
0.632113
eliabntt/GRADE-RR/isaac_internals/kit/extscore/omni.syntheticdata/omni/syntheticdata/tests/sensors/test_normals.py
# NOTE: # omni.kit.test - std python's unittest module with additional wrapping to add suport for async/await tests # For most things refer to unittest docs: https://docs.python.org/3/library/unittest.html import os import math import asyncio from time import time from pathlib import Path import carb import numpy as np import omni.kit.test from omni.kit.viewport.utility import get_active_viewport from pxr import Gf, UsdGeom, Sdf # Import extension python module we are testing with absolute import path, as if we are external user (other extension) import omni.syntheticdata as syn from ..utils import add_semantics FILE_DIR = os.path.dirname(os.path.realpath(__file__)) TIMEOUT = 200 # Having a test class derived from omni.kit.test.AsyncTestCase declared on the root of module will make it auto-discoverable by omni.kit.test class TestNormals(omni.kit.test.AsyncTestCase): def __init__(self, methodName: str) -> None: super().__init__(methodName=methodName) self.golden_image_path = Path(os.path.dirname(os.path.abspath(__file__))) / ".." / "data" / "golden" # Before running each test async def setUp(self): np.random.seed(1234) # Setup viewport self.viewport = get_active_viewport() # Initialize Sensor await omni.usd.get_context().new_stage_async() stage = omni.usd.get_context().get_stage() await omni.kit.app.get_app().next_update_async() await syn.sensors.create_or_retrieve_sensor_async(self.viewport, syn._syntheticdata.SensorType.Normal) async def test_parsed_empty(self): """ Test normals sensor on empty stage. """ # Render one frame await syn.sensors.next_sensor_data_async(self.viewport,True) data = syn.sensors.get_normals(self.viewport) assert np.allclose(data, 0, 1e-3) async def test_parsed_dtype(self): stage = omni.usd.get_context().get_stage() cube = stage.DefinePrim("/Cube", "Cube") add_semantics(cube, "cube") await syn.sensors.next_sensor_data_async(self.viewport,True) data = syn.sensors.get_normals(self.viewport) assert data.dtype == np.float32 async def test_neg_z(self): """ Test that negative z faces are distinct from background """ stage = omni.usd.get_context().get_stage() camera = stage.DefinePrim("/Camera", "Camera") UsdGeom.Xformable(camera).AddRotateYOp().Set(180) UsdGeom.Xformable(camera).AddTranslateOp().Set((0.0, 0.0, 20.0)) self.viewport.camera_path = camera.GetPath() await omni.kit.app.get_app().next_update_async() cube = stage.DefinePrim("/Cube", "Cube") add_semantics(cube, "cube") await syn.sensors.next_sensor_data_async(self.viewport,True) data = syn.sensors.get_normals(self.viewport) assert len(np.unique(data)) == 2 async def test_rotated_cube(self): stage = omni.usd.get_context().get_stage() cube = stage.DefinePrim("/Cube", "Cube") add_semantics(cube, "cube") cube.GetAttribute("size").Set(100) await syn.sensors.next_sensor_data_async(self.viewport,True) data = syn.sensors.get_normals(self.viewport) # np.savez_compressed(self.golden_image_path / 'normals_cube.npz', array=data) golden_image = np.load(self.golden_image_path / "normals_cube.npz")["array"] std_dev = np.sqrt(np.square(data - golden_image).astype(float).mean()) assert std_dev < 2 async def test_rotated_cube_pathtracing(self): """ Basic funtionality test of the sensor, but in path tracing mode. """ settings = carb.settings.get_settings() settings.set_string("/rtx/rendermode", "PathTracing") settings.set_int("/rtx/pathtracing/spp", 32) settings.set_int("/persistent/app/viewport/displayOptions", 0) stage = omni.usd.get_context().get_stage() cube = stage.DefinePrim("/Cube", "Cube") add_semantics(cube, "cube") cube.GetAttribute("size").Set(100) await syn.sensors.next_sensor_data_async(self.viewport,True) data = syn.sensors.get_normals(self.viewport) # np.savez_compressed(self.golden_image_path / 'normals_cube.npz', array=data) golden_image = np.load(self.golden_image_path / "normals_cube.npz")["array"] std_dev = np.sqrt(np.square(data - golden_image).astype(float).mean()) assert std_dev < 2 async def test_rotated_cube_ray_traced_lighting(self): """ Basic funtionality test of the sensor, but in ray traced lighting. """ # Set the rendering mode to be ray traced lighting. settings_interface = carb.settings.get_settings() settings_interface.set_string("/rtx/rendermode", "RayTracedLighting") stage = omni.usd.get_context().get_stage() cube = stage.DefinePrim("/Cube", "Cube") add_semantics(cube, "cube") cube.GetAttribute("size").Set(100) await syn.sensors.next_sensor_data_async(self.viewport,True) data = syn.sensors.get_normals(self.viewport) # np.savez_compressed(self.golden_image_path / 'normals_cube.npz', array=data) golden_image = np.load(self.golden_image_path / "normals_cube.npz")["array"] std_dev = np.sqrt(np.square(data - golden_image).astype(float).mean()) assert std_dev < 2 async def test_rotated_cube_ftheta(self): """ Basic funtionality test of the sensor in f theta camera. """ # Set the mode to path traced for f theta camera. settings = carb.settings.get_settings() settings.set_string("/rtx/rendermode", "PathTracing") settings.set_int("/rtx/pathtracing/spp", 32) settings.set_int("/persistent/app/viewport/displayOptions", 0) stage = omni.usd.get_context().get_stage() cube = stage.DefinePrim("/Cube", "Cube") add_semantics(cube, "cube") cube.GetAttribute("size").Set(100) await omni.kit.app.get_app().next_update_async() # Setting up camera. camera = stage.DefinePrim("/Camera", "Camera") # Set the camera to be polynomial fish eye camera. camera.CreateAttribute("cameraProjectionType", Sdf.ValueTypeNames.Token).Set("fisheyePolynomial") # Set the Camera's position UsdGeom.Xformable(camera).AddTranslateOp().Set((200, 200, 200)) self.viewport.camera_path = camera.GetPath() await syn.sensors.next_sensor_data_async(self.viewport,True) data = syn.sensors.get_normals(self.viewport) # np.savez_compressed(self.golden_image_path / 'normals_cube_ftheta.npz', array=data) golden_image = np.load(self.golden_image_path / "normals_cube_ftheta.npz")["array"] std_dev = np.sqrt(np.square(data - golden_image).astype(float).mean()) assert std_dev < 2 async def test_rotated_cube_spherical(self): """ Basic funtionality test of the sensor in fisheye spherical camera. """ # Set the mode to path traced. settings = carb.settings.get_settings() settings.set_string("/rtx/rendermode", "PathTracing") settings.set_int("/rtx/pathtracing/spp", 32) settings.set_int("/persistent/app/viewport/displayOptions", 0) stage = omni.usd.get_context().get_stage() cube = stage.DefinePrim("/Cube", "Cube") add_semantics(cube, "cube") cube.GetAttribute("size").Set(100) # Setting up camera. camera = stage.DefinePrim("/Camera", "Camera") # Set the camera to be polynomial fish eye camera. camera.CreateAttribute("cameraProjectionType", Sdf.ValueTypeNames.Token).Set("fisheyeSpherical") # Set the Camera's position UsdGeom.Xformable(camera).AddTranslateOp().Set((200, 200, 200)) self.viewport.camera_path = camera.GetPath() await syn.sensors.next_sensor_data_async(self.viewport,True) data = syn.sensors.get_normals(self.viewport) # np.savez_compressed(self.golden_image_path / 'normals_cube_spherical.npz', array=data) golden_image = np.load(self.golden_image_path / "normals_cube_spherical.npz")["array"] std_dev = np.sqrt(np.square(data - golden_image).astype(float).mean()) assert std_dev < 2 # After running each test async def tearDown(self): pass
8,401
Python
40.800995
141
0.653256
eliabntt/GRADE-RR/isaac_internals/kit/extscore/omni.syntheticdata/omni/syntheticdata/tests/sensors/test_rgb.py
# NOTE: # omni.kit.test - std python's unittest module with additional wrapping to add suport for async/await tests # For most things refer to unittest docs: https://docs.python.org/3/library/unittest.html import os import math import asyncio from time import time from pathlib import Path import unittest from PIL import Image import carb import numpy as np from numpy.lib.arraysetops import unique import omni.kit.test from omni.kit.viewport.utility import get_active_viewport from pxr import Gf, UsdGeom, Sdf, UsdLux # Import extension python module we are testing with absolute import path, as if we are external user (other extension) import omni.syntheticdata as syn from ..utils import add_semantics FILE_DIR = os.path.dirname(os.path.realpath(__file__)) TIMEOUT = 200 # Having a test class derived from omni.kit.test.AsyncTestCase declared on the root of module will make it auto-discoverable by omni.kit.test class TestRGB(omni.kit.test.AsyncTestCase): def __init__(self, methodName: str) -> None: super().__init__(methodName=methodName) self.golden_image_path = Path(os.path.dirname(os.path.abspath(__file__))) / ".." / "data" / "golden" # Before running each test async def setUp(self): np.random.seed(1234) settings = carb.settings.get_settings() settings.set_string("/rtx/rendermode", "PathTracing") settings.set_int("/rtx/pathtracing/spp", 32) settings.set_int("/persistent/app/viewport/displayOptions", 0) # Setup viewport self.viewport = get_active_viewport() # Initialize Sensor await omni.usd.get_context().new_stage_async() stage = omni.usd.get_context().get_stage() await omni.kit.app.get_app().next_update_async() await syn.sensors.create_or_retrieve_sensor_async(self.viewport, syn._syntheticdata.SensorType.Rgb) async def test_empty(self): """ Test RGB sensor on empty stage. """ # Render one frame await syn.sensors.next_sensor_data_async(self.viewport,True) data = syn.sensors.get_rgb(self.viewport) std_dev = np.sqrt(np.square(data - np.zeros_like(data)).astype(float).mean()) assert std_dev < 2 async def test_cube(self): """ Test RGB sensor on stage with cube. """ stage = omni.usd.get_context().get_stage() cube = stage.DefinePrim("/Cube", "Cube") add_semantics(cube, "cube") cube.GetAttribute("size").Set(100) cube.GetAttribute("primvars:displayColor").Set([(0, 0, 1)]) await omni.kit.app.get_app().next_update_async() # Render one frame await syn.sensors.next_sensor_data_async(self.viewport,True) data = syn.sensors.get_rgb(self.viewport) golden_image = np.asarray(Image.open(str(self.golden_image_path / "rgb_cube.png"))) std_dev = np.sqrt(np.square(data - golden_image).astype(float).mean()) assert std_dev < 2 async def test_dtype(self): stage = omni.usd.get_context().get_stage() cube = stage.DefinePrim("/Cube", "Cube") add_semantics(cube, "cube") await syn.sensors.next_sensor_data_async(self.viewport,True) data = syn.sensors.get_rgb(self.viewport) assert data.dtype == np.uint8 @unittest.skip("OM-44741") async def test_cube_polynomial(self): """ Test RGB sensor on stage with cube. """ settings = carb.settings.get_settings() settings.set_string("/rtx/rendermode", "PathTracing") settings.set_int("/rtx/pathtracing/spp", 32) settings.set_int("/persistent/app/viewport/displayOptions", 0) stage = omni.usd.get_context().get_stage() cube = stage.DefinePrim("/Cube", "Cube") add_semantics(cube, "cube") cube.GetAttribute("size").Set(100) cube.GetAttribute("primvars:displayColor").Set([(0, 0, 1)]) await omni.kit.app.get_app().next_update_async() # TODO: Add a light camera = stage.DefinePrim("/Camera", "Camera") # Set the camera to be spherical camera camera.CreateAttribute("cameraProjectionType", Sdf.ValueTypeNames.Token).Set("fisheyePolynomial") UsdGeom.Xformable(camera).AddTranslateOp().Set((0, 0, 200)) self.viewport.camera_path = camera.GetPath() # Render one frame await syn.sensors.next_sensor_data_async(self.viewport,True) data = syn.sensors.get_rgb(self.viewport) # image = Image.fromarray(data) # image.save(str(self.golden_image_path / "rgb_cube_ftheta.png")) golden_image = np.asarray(Image.open(str(self.golden_image_path / "rgb_cube_ftheta.png"))) std_dev = np.sqrt(np.square(data - golden_image).astype(float).mean()) assert std_dev < 2 @unittest.skip("OM-44741") async def test_cube_spherical(self): """ Test RGB sensor on stage with cube. """ settings = carb.settings.get_settings() settings.set_string("/rtx/rendermode", "PathTracing") settings.set_int("/rtx/pathtracing/spp", 32) settings.set_int("/persistent/app/viewport/displayOptions", 0) stage = omni.usd.get_context().get_stage() cube = stage.DefinePrim("/Cube", "Cube") add_semantics(cube, "cube") cube.GetAttribute("size").Set(100) cube.GetAttribute("primvars:displayColor").Set([(0, 0, 1)]) await omni.kit.app.get_app().next_update_async() # TODO: Add a light camera = stage.DefinePrim("/Camera", "Camera") # Set the camera to be spherical camera camera.CreateAttribute("cameraProjectionType", Sdf.ValueTypeNames.Token).Set("fisheyeSpherical") UsdGeom.Xformable(camera).AddTranslateOp().Set((0, 0, 200)) self.viewport.camera_path = camera.GetPath() # Render one frame await syn.sensors.next_sensor_data_async(self.viewport,True) data = syn.sensors.get_rgb(self.viewport) # image = Image.fromarray(data) # image.save(str(self.golden_image_path / "rgb_cube_spherical.png")) golden_image = np.asarray(Image.open(str(self.golden_image_path / "rgb_cube_spherical.png"))) std_dev = np.sqrt(np.square(data - golden_image).astype(float).mean()) assert std_dev < 2 # After running each test async def tearDown(self): pass
6,388
Python
38.196319
141
0.648716
eliabntt/GRADE-RR/isaac_internals/kit/extscore/omni.syntheticdata/omni/syntheticdata/tests/sensors/test_rendervar_buff_host_ptr.py
# NOTE: # omni.kit.test - std python's unittest module with additional wrapping to add suport for async/await tests # For most things refer to unittest docs: https://docs.python.org/3/library/unittest.html import unittest import numpy as np import ctypes import omni.kit.test from omni.kit.viewport.utility import get_active_viewport from pxr import UsdGeom, UsdLux # Import extension python module we are testing with absolute import path, as if we are external user (other extension) import omni.syntheticdata as syn from ..utils import add_semantics # Test the SyntheticData following nodes : # - SdPostRenderVarTextureToBuffer : node to convert a texture device rendervar into a buffer device rendervar # - SdPostRenderVarToHost : node to readback a device rendervar into a host rendervar # - SdRenderVarPtr : node to expose in the action graph, raw device / host pointers on the renderVars # # the tests consists in pulling the ptr data and comparing it with the data ouputed by : # - SdRenderVarToRawArray # class TestRenderVarBuffHostPtr(omni.kit.test.AsyncTestCase): _tolerance = 1.1 _outputs_ptr = ["outputs:dataPtr","outputs:width","outputs:height","outputs:bufferSize","outputs:format"] _outputs_arr = ["outputs:data","outputs:width","outputs:height","outputs:bufferSize","outputs:format"] @staticmethod def _assert_equal_tex_infos(out_a, out_b): assert((out_a["outputs:width"] == out_b["outputs:width"]) and (out_a["outputs:height"] == out_b["outputs:height"]) and (out_a["outputs:format"] == out_b["outputs:format"])) @staticmethod def _assert_equal_buff_infos(out_a, out_b): assert((out_a["outputs:bufferSize"] == out_b["outputs:bufferSize"])) @staticmethod def _assert_equal_data(data_a, data_b): assert(np.amax(np.square(data_a - data_b)) < TestRenderVarBuffHostPtr._tolerance) def _get_raw_array(self, rv): return syn.SyntheticData.Get().get_node_attributes(rv + "ExportRawArray", TestRenderVarBuffHostPtr._outputs_arr, self.render_product) def _get_ptr_array(self, rv, ptr_suffix): ptr_outputs = syn.SyntheticData.Get().get_node_attributes(rv + ptr_suffix, TestRenderVarBuffHostPtr._outputs_ptr, self.render_product) c_ptr = ctypes.cast(ptr_outputs["outputs:dataPtr"],ctypes.POINTER(ctypes.c_ubyte)) ptr_outputs["outputs:dataPtr"] = np.ctypeslib.as_array(c_ptr,shape=(ptr_outputs["outputs:bufferSize"],)) return ptr_outputs def _assert_equal_rv_ptr(self, rv:str, ptr_suffix:str, texture=None): arr_out = self._get_raw_array(rv) ptr_out = self._get_ptr_array(rv,ptr_suffix) if not texture is None: if texture: TestRenderVarBuffHostPtr._assert_equal_tex_infos(arr_out,ptr_out) else: TestRenderVarBuffHostPtr._assert_equal_buff_infos(arr_out,ptr_out) TestRenderVarBuffHostPtr._assert_equal_data(arr_out["outputs:data"],ptr_out["outputs:dataPtr"]) def _assert_equal_rv_arr(self, rv:str, ptr_suffix:str, texture=None): arr_out_a = self._get_raw_array(rv) arr_out_b = self._get_raw_array(rv+ptr_suffix) if not texture is None: if texture: TestRenderVarBuffHostPtr._assert_equal_tex_infos(arr_out_a,arr_out_b) else: TestRenderVarBuffHostPtr._assert_equal_buff_infos(arr_out_a,arr_out_b) TestRenderVarBuffHostPtr._assert_equal_data(arr_out_a["outputs:data"],arr_out_b["outputs:data"]) def __init__(self, methodName: str) -> None: super().__init__(methodName=methodName) async def setUp(self): await omni.usd.get_context().new_stage_async() stage = omni.usd.get_context().get_stage() world_prim = UsdGeom.Xform.Define(stage,"/World") UsdGeom.Xformable(world_prim).AddTranslateOp().Set((0, 0, 0)) UsdGeom.Xformable(world_prim).AddRotateXYZOp().Set((0, 0, 0)) sphere_prim = stage.DefinePrim("/World/Sphere", "Sphere") add_semantics(sphere_prim, "sphere") UsdGeom.Xformable(sphere_prim).AddTranslateOp().Set((0, 0, 0)) UsdGeom.Xformable(sphere_prim).AddScaleOp().Set((77, 77, 77)) UsdGeom.Xformable(sphere_prim).AddRotateXYZOp().Set((-90, 0, 0)) sphere_prim.GetAttribute("primvars:displayColor").Set([(1, 0.3, 1)]) capsule0_prim = stage.DefinePrim("/World/Sphere/Capsule0", "Capsule") add_semantics(capsule0_prim, "capsule0") UsdGeom.Xformable(capsule0_prim).AddTranslateOp().Set((3, 0, 0)) UsdGeom.Xformable(capsule0_prim).AddRotateXYZOp().Set((0, 0, 0)) capsule0_prim.GetAttribute("primvars:displayColor").Set([(0.3, 1, 0)]) capsule1_prim = stage.DefinePrim("/World/Sphere/Capsule1", "Capsule") add_semantics(capsule1_prim, "capsule1") UsdGeom.Xformable(capsule1_prim).AddTranslateOp().Set((-3, 0, 0)) UsdGeom.Xformable(capsule1_prim).AddRotateXYZOp().Set((0, 0, 0)) capsule1_prim.GetAttribute("primvars:displayColor").Set([(0, 1, 0.3)]) capsule2_prim = stage.DefinePrim("/World/Sphere/Capsule2", "Capsule") add_semantics(capsule2_prim, "capsule2") UsdGeom.Xformable(capsule2_prim).AddTranslateOp().Set((0, 3, 0)) UsdGeom.Xformable(capsule2_prim).AddRotateXYZOp().Set((0, 0, 0)) capsule2_prim.GetAttribute("primvars:displayColor").Set([(0.7, 0.1, 0.4)]) capsule3_prim = stage.DefinePrim("/World/Sphere/Capsule3", "Capsule") add_semantics(capsule3_prim, "capsule3") UsdGeom.Xformable(capsule3_prim).AddTranslateOp().Set((0, -3, 0)) UsdGeom.Xformable(capsule3_prim).AddRotateXYZOp().Set((0, 0, 0)) capsule3_prim.GetAttribute("primvars:displayColor").Set([(0.1, 0.7, 0.4)]) spherelight = UsdLux.SphereLight.Define(stage, "/SphereLight") spherelight.GetIntensityAttr().Set(30000) spherelight.GetRadiusAttr().Set(30) self.viewport = get_active_viewport() self.render_product = self.viewport.render_product_path async def test_host_arr(self): render_vars = [ "BoundingBox2DLooseSD", "SemanticLocalTransformSD" ] for rv in render_vars: syn.SyntheticData.Get().activate_node_template(rv + "ExportRawArray", 0, [self.render_product]) syn.SyntheticData.Get().activate_node_template(rv + "hostExportRawArray", 0, [self.render_product]) await syn.sensors.next_sensor_data_async(self.viewport,True) await omni.kit.app.get_app().next_update_async() for rv in render_vars: self._assert_equal_rv_arr(rv,"host", False) async def test_buff_arr(self): render_vars = [ "Camera3dPositionSD", "DistanceToImagePlaneSD", ] for rv in render_vars: syn.SyntheticData.Get().activate_node_template(rv + "ExportRawArray", 0, [self.render_product]) syn.SyntheticData.Get().activate_node_template(rv + "buffExportRawArray", 0, [self.render_product]) await syn.sensors.next_sensor_data_async(self.viewport,True) await omni.kit.app.get_app().next_update_async() for rv in render_vars: self._assert_equal_rv_arr(rv, "buff") async def test_host_ptr(self): render_vars = [ "BoundingBox2DTightSD", "BoundingBox3DSD", "InstanceMapSD" ] for rv in render_vars: syn.SyntheticData.Get().activate_node_template(rv + "ExportRawArray", 0, [self.render_product]) syn.SyntheticData.Get().activate_node_template(rv + "hostPtr", 0, [self.render_product]) await syn.sensors.next_sensor_data_async(self.viewport,True) await omni.kit.app.get_app().next_update_async() for rv in render_vars: self._assert_equal_rv_ptr(rv,"hostPtr",False) async def test_host_ptr_tex(self): render_vars = [ "NormalSD", "DistanceToCameraSD" ] for rv in render_vars: syn.SyntheticData.Get().activate_node_template(rv + "ExportRawArray", 0, [self.render_product]) syn.SyntheticData.Get().activate_node_template(rv + "hostPtr", 0, [self.render_product]) await syn.sensors.next_sensor_data_async(self.viewport,True) await omni.kit.app.get_app().next_update_async() for rv in render_vars: self._assert_equal_rv_ptr(rv,"hostPtr",True) async def test_buff_host_ptr(self): render_vars = [ "LdrColorSD", "InstanceSegmentationSD", ] for rv in render_vars: syn.SyntheticData.Get().activate_node_template(rv + "ExportRawArray", 0, [self.render_product]) syn.SyntheticData.Get().activate_node_template(rv + "buffhostPtr", 0, [self.render_product]) await syn.sensors.next_sensor_data_async(self.viewport,True) await syn.sensors.next_sensor_data_async(self.viewport,True) await syn.sensors.next_sensor_data_async(self.viewport,True) await omni.kit.app.get_app().next_update_async() for rv in render_vars: self._assert_equal_rv_ptr(rv, "buffhostPtr",True) # After running each test async def tearDown(self): pass
9,320
Python
47.546875
180
0.652575
eliabntt/GRADE-RR/isaac_internals/kit/extscore/omni.syntheticdata/omni/syntheticdata/tests/sensors/test_bbox2d_loose.py
# NOTE: # omni.kit.test - std python's unittest module with additional wrapping to add suport for async/await tests # For most things refer to unittest docs: https://docs.python.org/3/library/unittest.html import os import math import asyncio from time import time import unittest import carb import numpy as np import omni.kit.test from omni.kit.viewport.utility import get_active_viewport from pxr import Gf, UsdGeom, Sdf # Import extension python module we are testing with absolute import path, as if we are external user (other extension) import omni.syntheticdata as syn from ..utils import add_semantics FILE_DIR = os.path.dirname(os.path.realpath(__file__)) TIMEOUT = 200 # Having a test class derived from omni.kit.test.AsyncTestCase declared on the root of module will make it auto-discoverable by omni.kit.test class TestBBox2DLoose(omni.kit.test.AsyncTestCase): # Before running each test async def setUp(self): np.random.seed(1234) # Setup viewport self.viewport = get_active_viewport() # Initialize Sensor await omni.usd.get_context().new_stage_async() stage = omni.usd.get_context().get_stage() await omni.kit.app.get_app().next_update_async() await syn.sensors.create_or_retrieve_sensor_async( self.viewport, syn._syntheticdata.SensorType.BoundingBox2DLoose ) async def test_parsed_empty(self): """ Test 2D bounding box on empty stage. """ bbox2d_data = syn.sensors.get_bounding_box_2d_loose(self.viewport) assert not bool(bbox2d_data) async def test_bbox_2d_loose_fields_exist(self): stage = omni.usd.get_context().get_stage() cube = stage.DefinePrim("/Cube", "Cube") add_semantics(cube, "cube") # Render one frame await syn.sensors.next_sensor_data_async(self.viewport,True) bbox2d_data = syn.sensors.get_bounding_box_2d_loose(self.viewport) valid_dtype = [ ("uniqueId", "<i4"), ("name", "O"), ("semanticLabel", "O"), ("metadata", "O"), ("instanceIds", "O"), ("semanticId", "<u4"), ("x_min", "<i4"), ("y_min", "<i4"), ("x_max", "<i4"), ("y_max", "<i4"), ] assert bbox2d_data.dtype == np.dtype(valid_dtype) async def test_bbox_2d_loose_cube(self): """ Basic test for the sensor. """ stage = omni.usd.get_context().get_stage() camera = stage.DefinePrim("/Camera", "Camera") UsdGeom.Xformable(camera).AddTranslateOp().Set((0, 0, 0)) self.viewport.camera_path = camera.GetPath() await omni.kit.app.get_app().next_update_async() cube = stage.DefinePrim("/Cube", "Cube") add_semantics(cube, "cube") UsdGeom.XformCommonAPI(cube).SetTranslate((0, 0, -10)) # Render one frame await syn.sensors.next_sensor_data_async(self.viewport,True) bbox2d_data = syn.sensors.get_bounding_box_2d_loose(self.viewport) assert bbox2d_data['x_min'] == 301 assert bbox2d_data['y_min'] == 21 assert bbox2d_data['x_max'] == 978 assert bbox2d_data['y_max'] == 698 async def test_cube_pathtracing(self): """ Basic funtionality test of the sensor, but in path tracing mode. """ settings = carb.settings.get_settings() settings.set_string("/rtx/rendermode", "PathTracing") settings.set_int("/rtx/pathtracing/spp", 32) settings.set_int("/persistent/app/viewport/displayOptions", 0) stage = omni.usd.get_context().get_stage() camera = stage.DefinePrim("/Camera", "Camera") UsdGeom.Xformable(camera).AddTranslateOp().Set((0, 0, 0)) self.viewport.camera_path = camera.GetPath() await omni.kit.app.get_app().next_update_async() cube = stage.DefinePrim("/Cube", "Cube") add_semantics(cube, "cube") UsdGeom.XformCommonAPI(cube).SetTranslate((0, 0, -10)) # Render one frame await syn.sensors.next_sensor_data_async(self.viewport,True) bbox2d_data = syn.sensors.get_bounding_box_2d_loose(self.viewport) assert bbox2d_data['x_min'] == 301 assert bbox2d_data['y_min'] == 21 assert bbox2d_data['x_max'] == 978 assert bbox2d_data['y_max'] == 698 async def test_cube_ray_traced_lighting(self): """ Basic test for the sensor, but in ray traced lighting mode. """ # Set the rendering mode to be ray traced lighting. settings_interface = carb.settings.get_settings() settings_interface.set_string("/rtx/rendermode", "RayTracedLighting") stage = omni.usd.get_context().get_stage() camera = stage.DefinePrim("/Camera", "Camera") UsdGeom.Xformable(camera).AddTranslateOp().Set((0, 0, 0)) self.viewport.camera_path = camera.GetPath() await omni.kit.app.get_app().next_update_async() cube = stage.DefinePrim("/Cube", "Cube") add_semantics(cube, "cube") UsdGeom.XformCommonAPI(cube).SetTranslate((0, 0, -10)) # Render one frame await syn.sensors.next_sensor_data_async(self.viewport,True) bbox2d_data = syn.sensors.get_bounding_box_2d_loose(self.viewport) assert bbox2d_data['x_min'] == 301 assert bbox2d_data['y_min'] == 21 assert bbox2d_data['x_max'] == 978 assert bbox2d_data['y_max'] == 698 async def test_cube_ftheta(self): """ Basic funtionality test of the sensor in ftheta camera. """ settings = carb.settings.get_settings() settings.set_string("/rtx/rendermode", "PathTracing") settings.set_int("/rtx/pathtracing/spp", 32) settings.set_int("/persistent/app/viewport/displayOptions", 0) stage = omni.usd.get_context().get_stage() camera = stage.DefinePrim("/Camera", "Camera") # Set the camera to be polynomial fish eye camera. camera.CreateAttribute("cameraProjectionType", Sdf.ValueTypeNames.Token).Set("fisheyePolynomial") UsdGeom.Xformable(camera).AddTranslateOp().Set((0, 0, 0)) self.viewport.camera_path = camera.GetPath() await omni.kit.app.get_app().next_update_async() cube = stage.DefinePrim("/Cube", "Cube") add_semantics(cube, "cube") UsdGeom.XformCommonAPI(cube).SetTranslate((0, 0, -10)) # Render one frame await syn.sensors.next_sensor_data_async(self.viewport,True) bbox2d_data = syn.sensors.get_bounding_box_2d_loose(self.viewport) assert bbox2d_data['x_min'] == 612 assert bbox2d_data['y_min'] == 325 assert bbox2d_data['x_max'] == 671 assert bbox2d_data['y_max'] == 384 async def test_cube_spherical(self): """ Basic funtionality test of the sensor in fisheye spherical camera. """ settings = carb.settings.get_settings() settings.set_string("/rtx/rendermode", "PathTracing") settings.set_int("/rtx/pathtracing/spp", 32) settings.set_int("/persistent/app/viewport/displayOptions", 0) stage = omni.usd.get_context().get_stage() camera = stage.DefinePrim("/Camera", "Camera") # Set the camera to be polynomial fish eye camera. camera.CreateAttribute("cameraProjectionType", Sdf.ValueTypeNames.Token).Set("fisheyeSpherical") UsdGeom.Xformable(camera).AddTranslateOp().Set((0, 0, 0)) self.viewport.camera_path = camera.GetPath() await omni.kit.app.get_app().next_update_async() cube = stage.DefinePrim("/Cube", "Cube") add_semantics(cube, "cube") UsdGeom.XformCommonAPI(cube).SetTranslate((0, 0, -10)) # Render one frame await syn.sensors.next_sensor_data_async(self.viewport,True) bbox2d_data = syn.sensors.get_bounding_box_2d_loose(self.viewport) assert bbox2d_data['x_min'] == 617 assert bbox2d_data['y_min'] == 335 assert bbox2d_data['x_max'] == 662 assert bbox2d_data['y_max'] == 384 # After running each test async def tearDown(self): pass
8,240
Python
36.630137
141
0.622816