file_path
stringlengths 21
202
| content
stringlengths 19
1.02M
| size
int64 19
1.02M
| lang
stringclasses 8
values | avg_line_length
float64 5.88
100
| max_line_length
int64 12
993
| alphanum_fraction
float64 0.27
0.93
|
---|---|---|---|---|---|---|
omniverse-code/kit/exts/omni.kit.capture/omni/kit/capture/extension.py | # Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import os
import math
import time
import datetime
import omni.ext
import carb
import omni.kit.app
import omni.timeline
import omni.usd
from omni.kit.viewport.utility import get_active_viewport, capture_viewport_to_file
from .capture_options import *
from .capture_progress import *
from .video_generation import VideoGenerationHelper
from .helper import get_num_pattern_file_path
VIDEO_FRAMES_DIR_NAME = "frames"
DEFAULT_IMAGE_FRAME_TYPE_FOR_VIDEO = ".png"
capture_instance = None
class CaptureExtension(omni.ext.IExt):
def on_startup(self):
global capture_instance
capture_instance = self
self._options = CaptureOptions()
self._progress = CaptureProgress()
self._progress_window = CaptureProgressWindow()
self._renderer = None
import omni.renderer_capture
self._renderer = omni.renderer_capture.acquire_renderer_capture_interface()
self._viewport_api = None
self._app = omni.kit.app.get_app_interface()
self._timeline = omni.timeline.get_timeline_interface()
self._usd_context = omni.usd.get_context()
self._selection = self._usd_context.get_selection()
self._settings = carb.settings.get_settings()
self._show_default_progress_window = True
self._progress_update_fn = None
self._forward_one_frame_fn = None
self._capture_finished_fn = None
carb.log_warn("Deprecated notice: Please be noted that omni.kit.capture has been replaced with omni.kit.capture.viewport and will be removed in future releases. " \
"Please update the use of this extension to the new one.")
def on_shutdown(self):
self._progress = None
self._progress_window = None
global capture_instance
capture_instance = None
@property
def options(self):
return self._options
@options.setter
def options(self, value):
self._options = value
@property
def progress(self):
return self._progress
@property
def show_default_progress_window(self):
return self._show_default_progress_window
@show_default_progress_window.setter
def show_default_progress_window(self, value):
self._show_default_progress_window = value
@property
def progress_update_fn(self):
return self._progress_update_fn
@progress_update_fn.setter
def progress_update_fn(self, value):
self._progress_update_fn = value
@property
def forward_one_frame_fn(self):
return self._forward_one_frame_fn
@forward_one_frame_fn.setter
def forward_one_frame_fn(self, value):
self._forward_one_frame_fn = value
@property
def capture_finished_fn(self):
return self._capture_finished_fn
@capture_finished_fn.setter
def capture_finished_fn(self, value):
self._capture_finished_fn = value
def start(self):
self._prepare_folder_and_counters()
self._prepare_viewport()
self._start_internal()
def pause(self):
if self._progress.capture_status == CaptureStatus.CAPTURING and self._ui_pause_button.text == "Pause":
self._progress.capture_status = CaptureStatus.PAUSED
def resume(self):
if self._progress.capture_status == CaptureStatus.PAUSED:
self._progress.capture_status = CaptureStatus.CAPTURING
def cancel(self):
if (
self._progress.capture_status == CaptureStatus.CAPTURING
or self._progress.capture_status == CaptureStatus.PAUSED
):
self._progress.capture_status = CaptureStatus.CANCELLED
def _update_progress_hook(self):
if self._progress_update_fn is not None:
self._progress_update_fn(
self._progress.capture_status,
self._progress.progress,
self._progress.elapsed_time,
self._progress.estimated_time_remaining,
self._progress.current_frame_time,
self._progress.average_frame_time,
self._progress.encoding_time,
self._frame_counter,
self._total_frame_count,
)
def _get_index_for_image(self, dir, file_name, image_suffix):
def is_int(string_val):
try:
v = int(string_val)
return True
except:
return False
images = os.listdir(dir)
name_len = len(file_name)
suffix_len = len(image_suffix)
max_index = 0
for item in images:
if item.startswith(file_name) and item.endswith(image_suffix):
num_part = item[name_len : (len(item) - suffix_len)]
if is_int(num_part):
num = int(num_part)
if max_index < num:
max_index = num
return max_index + 1
def _float_to_time(self, ft):
hour = int(ft)
ft = (ft - hour) * 60
minute = int(ft)
ft = (ft - minute) * 60
second = int(ft)
ft = (ft - second) * 1000000
microsecond = int(ft)
return datetime.time(hour, minute, second, microsecond)
def _is_environment_sunstudy_player(self):
if self._options.sunstudy_player is not None:
return type(self._options.sunstudy_player).__module__ == "omni.kit.environment.core.sunstudy_player.player"
else:
carb.log_warn("Sunstudy player type check is valid only when the player is available.")
return False
def _update_sunstudy_player_time(self):
if self._is_environment_sunstudy_player():
self._options.sunstudy_player.current_time = self._sunstudy_current_time
else:
self._options.sunstudy_player.update_time(self._sunstudy_current_time)
def _set_sunstudy_player_time(self, current_time):
if self._is_environment_sunstudy_player():
self._options.sunstudy_player.current_time = current_time
else:
date_time = self._options.sunstudy_player.get_date_time()
time_to_set = self._float_to_time(current_time)
new_date_time = datetime.datetime(
date_time.year, date_time.month, date_time.day,
time_to_set.hour, time_to_set.minute, time_to_set.second
)
self._options.sunstudy_player.set_date_time(new_date_time)
def _prepare_sunstudy_counters(self):
self._total_frame_count = self._options.fps * self._options.sunstudy_movie_length_in_seconds
duration = self._options.sunstudy_end_time - self._options.sunstudy_start_time
self._sunstudy_iterations_per_frame = self._options.ptmb_subframes_per_frame
self._sunstudy_delta_time_per_iteration = duration / float(self._total_frame_count * self._sunstudy_iterations_per_frame)
self._sunstudy_current_time = self._options.sunstudy_start_time
self._set_sunstudy_player_time(self._sunstudy_current_time)
def _prepare_folder_and_counters(self):
self._workset_dir = self._options.output_folder
if not self._make_sure_directory_existed(self._workset_dir):
carb.log_warn(f"Capture failed due to unable to create folder {self._workset_dir}")
self._finish()
return
if self._options.is_capturing_nth_frames():
if self._options.capture_every_Nth_frames == 1:
frames_folder = self._options.file_name + "_frames"
else:
frames_folder = self._options.file_name + "_" + str(self._options.capture_every_Nth_frames) + "th_frames"
self._nth_frames_dir = os.path.join(self._workset_dir, frames_folder)
if not self._make_sure_directory_existed(self._nth_frames_dir):
carb.log_warn(f"Capture failed due to unable to create folder {self._nth_frames_dir}")
self._finish()
return
if self._options.is_video():
self._frames_dir = os.path.join(self._workset_dir, self._options.file_name + "_" + VIDEO_FRAMES_DIR_NAME)
if not self._make_sure_directory_existed(self._frames_dir):
carb.log_warn(
f"Capture failed due to unable to create folder {self._workset_dir} to save frames of the video."
)
self._finish()
return
self._video_name = self._options.get_full_path()
self._frame_pattern_prefix = os.path.join(self._frames_dir, self._options.file_name)
if self._options.is_capturing_frame():
self._start_time = float(self._options.start_frame) / self._options.fps
self._end_time = float(self._options.end_frame + 1) / self._options.fps
self._time = self._start_time
self._frame = self._options.start_frame
self._start_number = self._frame
self._total_frame_count = round((self._end_time - self._start_time) * self._options.fps)
else:
self._start_time = self._options.start_time
self._end_time = self._options.end_time
self._time = self._options.start_time
self._frame = int(self._options.start_time * self._options.fps)
self._start_number = self._frame
self._total_frame_count = math.ceil((self._end_time - self._start_time) * self._options.fps)
if self._options.movie_type == CaptureMovieType.SUNSTUDY:
self._prepare_sunstudy_counters()
else:
if self._options.is_capturing_nth_frames():
self._frame_pattern_prefix = self._nth_frames_dir
if self._options.is_capturing_frame():
self._start_time = float(self._options.start_frame) / self._options.fps
self._end_time = float(self._options.end_frame + 1) / self._options.fps
self._time = self._start_time
self._frame = self._options.start_frame
self._start_number = self._frame
self._total_frame_count = round((self._end_time - self._start_time) * self._options.fps)
else:
self._start_time = self._options.start_time
self._end_time = self._options.end_time
self._time = self._options.start_time
self._frame = int(self._options.start_time * self._options.fps)
self._start_number = self._frame
self._total_frame_count = math.ceil((self._end_time - self._start_time) * self._options.fps)
if self._options.movie_type == CaptureMovieType.SUNSTUDY:
self._prepare_sunstudy_counters()
else:
index = self._get_index_for_image(self._workset_dir, self._options.file_name, self._options.file_type)
self._frame_pattern_prefix = os.path.join(self._workset_dir, self._options.file_name + str(index))
self._start_time = self._timeline.get_current_time()
self._end_time = self._timeline.get_current_time()
self._time = self._timeline.get_current_time()
self._frame = 1
self._start_number = self._frame
self._total_frame_count = 1
self._subframe = 0
self._sample_count = 0
self._frame_counter = 0
self._real_time_settle_latency_frames_done = 0
self._last_skipped_frame_path = ""
self._path_trace_iterations = 0
self._time_rate = 1.0 / self._options.fps
self._time_subframe_rate = (
self._time_rate * (self._options.ptmb_fsc - self._options.ptmb_fso) / self._options.ptmb_subframes_per_frame
)
def _prepare_viewport(self):
viewport_api = get_active_viewport()
if viewport_api is None:
return False
self._record_current_window_status(viewport_api)
viewport_api.camera_path = self._options.camera
viewport_api.resolution = (self._options.res_width, self._options.res_height)
self._settings.set_bool("/persistent/app/captureFrame/viewport", True)
self._settings.set_bool("/app/captureFrame/setAlphaTo1", not self._options.save_alpha)
if self._options.file_type == ".exr":
self._settings.set_bool("/app/captureFrame/hdr", self._options.hdr_output)
else:
self._settings.set_bool("/app/captureFrame/hdr", False)
if self._options.save_alpha:
self._settings.set_bool("/rtx/post/backgroundZeroAlpha/enabled", True)
self._settings.set_bool("/rtx/post/backgroundZeroAlpha/backgroundComposite", False)
self._selection.clear_selected_prim_paths()
if self._options.render_preset == CaptureRenderPreset.RAY_TRACE:
self._switch_renderer = not (self._saved_active_render == "rtx" and self._saved_render_mode == "RaytracedLighting")
if self._switch_renderer:
viewport_api.set_hd_engine("rtx", "RaytracedLighting")
carb.log_info("Switching to RayTracing Mode")
elif self._options.render_preset == CaptureRenderPreset.PATH_TRACE:
self._switch_renderer = not (self._saved_active_render == "rtx" and self._saved_render_mode == "PathTracing")
if self._switch_renderer:
viewport_api.set_hd_engine("rtx", "PathTracing")
carb.log_info("Switching to PathTracing Mode")
elif self._options.render_preset == CaptureRenderPreset.IRAY:
self._switch_renderer = not (self._saved_active_render == "iray" and self._saved_render_mode == "iray")
if self._switch_renderer:
viewport_api.set_hd_engine("iray", "iray")
carb.log_info("Switching to IRay Mode")
else:
self._switch_renderer = False
carb.log_info("Keeping current Render Mode")
if self._options.debug_material_type == CaptureDebugMaterialType.SHADED:
self._settings.set_int("/rtx/debugMaterialType", -1)
elif self._options.debug_material_type == CaptureDebugMaterialType.WHITE:
self._settings.set_int("/rtx/debugMaterialType", 0)
else:
carb.log_info("Keeping current debug mateiral type")
# set it to 0 to ensure we accumulate as many samples as requested even across subframes (for motion blur)
self._settings.set_int("/rtx/pathtracing/totalSpp", 0)
# don't show light and grid during capturing
self._settings.set_int("/persistent/app/viewport/displayOptions", 0)
# disable async rendering for capture, otherwise it won't capture images correctly
self._settings.set_bool("/app/asyncRendering", False)
self._settings.set_bool("/app/asyncRenderingLowLatency", False)
# Rendering to some image buffers additionally require explicitly setting `set_capture_sync(True)`, on top of
# disabling the `/app/asyncRendering` setting. This can otherwise cause images to hold corrupted buffer
# information by erroneously assuming a complete image buffer is available when only a first partial subframe
# has been renderer (as in the case of EXR):
self._renderer.set_capture_sync( self._options.file_type == ".exr" )
# frames to wait for the async settings above to be ready, they will need to be detected by viewport, and
# then viewport will notify the renderer not to do async rendering
self._frames_to_disable_async_rendering = 2
# Normally avoid using a high /rtx/pathtracing/spp setting since it causes GPU
# timeouts for large sample counts. But a value larger than 1 can be useful in Multi-GPU setups
self._settings.set_int(
"/rtx/pathtracing/spp", min(self._options.spp_per_iteration, self._options.path_trace_spp)
)
# Setting resetPtAccumOnlyWhenExternalFrameCounterChanges ensures we control accumulation explicitly
# by simpling changing the /rtx/externalFrameCounter value
self._settings.set_bool("/rtx-transient/resetPtAccumOnlyWhenExternalFrameCounterChanges", True)
if self._options.render_preset == CaptureRenderPreset.IRAY:
self._settings.set("/rtx/iray/progressive_rendering_max_samples", self._options.path_trace_spp)
# Enable syncLoads in materialDB and Hydra. This is needed to make sure texture updates finish before we start the rendering
self._settings.set("/rtx/materialDb/syncLoads", True)
self._settings.set("/rtx/hydra/materialSyncLoads", True)
self._settings.set("/rtx-transient/resourcemanager/texturestreaming/async", False)
self._settings.set("/rtx-transient/resourcemanager/texturestreaming/streamingBudgetMB", 0)
return True
def _show_progress_window(self):
return (
self._options.is_video() or self._options.is_capturing_nth_frames() or (self._options.show_single_frame_progress and self._options.is_capturing_pathtracing_single_frame())
) and self.show_default_progress_window
def _start_internal(self):
# If texture streaming is enabled, set the preroll to at least 1 frame
if self._settings.get("/rtx-transient/resourcemanager/enableTextureStreaming"):
self._options.preroll_frames = max(self._options.preroll_frames, 1)
# set usd time code second to target frame rate
self._saved_timecodes_per_second = self._timeline.get_time_codes_per_seconds()
self._timeline.set_time_codes_per_second(self._options.fps)
# if we want preroll, then set timeline's current time back with the preroll frames' time,
# and rely on timeline to do the preroll using the give timecode
if self._options.preroll_frames > 0:
self._timeline.set_current_time(self._start_time - self._options.preroll_frames / self._options.fps)
self._settings.set("/iray/current_frame_time", self._start_time - self._options.preroll_frames / self._options.fps)
else:
self._timeline.set_current_time(self._start_time)
self._settings.set("/iray/current_frame_time", self._start_time)
# change timeline to be in play state
self._timeline.play()
# disable automatic time update in timeline so that movie capture tool can control time step
self._timeline.set_auto_update(False)
self._update_sub = (
omni.kit.app.get_app().get_update_event_stream().create_subscription_to_pop(self._on_update, order=1000000)
)
self._progress.start_capturing(self._end_time - self._start_time, self._time_rate, self._options.preroll_frames)
# always show single frame capture progress for PT mode
self._options.show_single_frame_progress = True
if self._show_progress_window():
self._progress_window.show(
self._progress,
self._options.is_capturing_pathtracing_single_frame(),
show_pt_subframes=self._options.render_preset == CaptureRenderPreset.PATH_TRACE,
show_pt_iterations=self._options.render_preset == CaptureRenderPreset.IRAY
)
def _record_current_window_status(self, viewport_api):
assert viewport_api is not None, "No viewport to record to"
self._viewport_api = viewport_api
self._saved_camera = viewport_api.camera_path
self._saved_hydra_engine = viewport_api.hydra_engine
self._saved_render_mode = viewport_api.render_mode
resolution = viewport_api.resolution
self._saved_resolution_width = int(resolution[0])
self._saved_resolution_height = int(resolution[1])
self._saved_capture_frame_viewport = self._settings.get("/persistent/app/captureFrame/viewport")
self._saved_active_render = self._settings.get("/renderer/active")
self._saved_debug_material_type = self._settings.get("/rtx/debugMaterialType")
self._saved_total_spp = int(self._settings.get("/rtx/pathtracing/totalSpp"))
self._saved_spp = int(self._settings.get("/rtx/pathtracing/spp"))
self._saved_reset_pt_accum_only = self._settings.get("/rtx-transient/resetPtAccumOnlyWhenExternalFrameCounterChanges")
self._saved_display_options = self._settings.get("/persistent/app/viewport/displayOptions")
self._saved_async_rendering = self._settings.get("/app/asyncRendering")
self._saved_async_renderingLatency = self._settings.get("/app/asyncRenderingLowLatency")
self._saved_background_zero_alpha = self._settings.get("/rtx/post/backgroundZeroAlpha/enabled")
self._saved_background_zero_alpha_comp = self._settings.get("/rtx/post/backgroundZeroAlpha/backgroundComposite")
if self._options.render_preset == CaptureRenderPreset.IRAY:
self._saved_iray_sample_limit = int(self._settings.get("/rtx/iray/progressive_rendering_max_samples"))
if self._options.movie_type == CaptureMovieType.SUNSTUDY:
self._saved_sunstudy_current_time = self._options.sunstudy_current_time
self._saved_timeline_current_time = self._timeline.get_current_time()
self._saved_rtx_sync_load_setting = self._settings.get("/rtx/materialDb/syncLoads")
self._saved_hydra_sync_load_setting = self._settings.get("/rtx/hydra/materialSyncLoads")
self._saved_async_texture_streaming = self._settings.get("/rtx-transient/resourcemanager/texturestreaming/async")
self._saved_texture_streaming_budget = self._settings.get("/rtx-transient/resourcemanager/texturestreaming/streamingBudgetMB")
def _restore_window_status(self):
viewport_api, self._viewport_api = self._viewport_api, None
assert viewport_api is not None, "No viewport to restore to"
viewport_api.camera_path = self._saved_camera
viewport_api.resolution = (self._saved_resolution_width, self._saved_resolution_height)
if self._switch_renderer:
viewport_api.set_hd_engine(self._saved_hydra_engine, self._saved_render_mode)
self._settings.set_bool("/persistent/app/captureFrame/viewport", self._saved_capture_frame_viewport)
self._settings.set_int("/rtx/debugMaterialType", self._saved_debug_material_type)
self._settings.set_int("/rtx/pathtracing/totalSpp", self._saved_total_spp)
self._settings.set_int("/rtx/pathtracing/spp", self._saved_spp)
self._settings.set_bool("/rtx-transient/resetPtAccumOnlyWhenExternalFrameCounterChanges", self._saved_reset_pt_accum_only)
self._settings.set_int("/persistent/app/viewport/displayOptions", self._saved_display_options)
self._settings.set_bool("/app/asyncRendering", self._saved_async_rendering)
self._settings.set_bool("/app/asyncRenderingLowLatency", self._saved_async_renderingLatency)
self._renderer.set_capture_sync(not self._saved_async_rendering)
self._settings.set_bool("/rtx/post/backgroundZeroAlpha/enabled", self._saved_background_zero_alpha)
self._settings.set_bool(
"/rtx/post/backgroundZeroAlpha/backgroundComposite", self._saved_background_zero_alpha_comp
)
if self._options.render_preset == CaptureRenderPreset.IRAY:
self._settings.set("/rtx/iray/progressive_rendering_max_samples", self._saved_iray_sample_limit)
if self._options.movie_type == CaptureMovieType.SUNSTUDY:
self._set_sunstudy_player_time(self._saved_sunstudy_current_time)
self._settings.set("/rtx/materialDb/syncLoads", self._saved_rtx_sync_load_setting)
self._settings.set("/rtx/hydra/materialSyncLoads", self._saved_hydra_sync_load_setting)
self._settings.set("/rtx-transient/resourcemanager/texturestreaming/async", self._saved_async_texture_streaming)
self._settings.set("/rtx-transient/resourcemanager/texturestreaming/streamingBudgetMB", self._saved_texture_streaming_budget)
self._settings.set("/app/captureFrame/hdr", False)
def _clean_pngs_in_directory(self, directory):
self._clean_files_in_directory(directory, ".png")
def _clean_files_in_directory(self, directory, suffix):
images = os.listdir(directory)
for item in images:
if item.endswith(suffix):
os.remove(os.path.join(directory, item))
def _make_sure_directory_existed(self, directory):
if not os.path.exists(directory):
try:
os.makedirs(directory, exist_ok=True)
except OSError as error:
carb.log_warn(f"Directory cannot be created: {dir}")
return False
return True
def _finish(self):
if (
self._progress.capture_status == CaptureStatus.FINISHING
or self._progress.capture_status == CaptureStatus.CANCELLED
):
self._update_sub = None
self._restore_window_status()
self._sample_count = 0
self._start_number = 0
self._frame_counter = 0
self._path_trace_iterations = 0
self._progress.capture_status = CaptureStatus.NONE
# restore timeline settings
# stop timeline, but re-enable auto update
timeline = self._timeline
timeline.set_auto_update(True)
timeline.stop()
timeline.set_time_codes_per_second(self._saved_timecodes_per_second)
self._timeline.set_current_time(self._saved_timeline_current_time)
self._settings.set("/iray/current_frame_time", self._saved_timeline_current_time)
if self._show_progress_window():
self._progress_window.close()
if self._capture_finished_fn is not None:
self._capture_finished_fn()
def _wait_for_image_writing(self):
# wait for the last frame is written to disk
# my tests of scenes of different complexity show a range of 0.2 to 1 seconds wait time
# so 5 second max time should be enough and we can early quit by checking the last
# frame every 0.1 seconds.
SECONDS_TO_WAIT = 5
SECONDS_EACH_TIME = 0.1
seconds_tried = 0.0
carb.log_info("Waiting for frames to be ready for encoding.")
while seconds_tried < SECONDS_TO_WAIT:
if os.path.isfile(self._frame_path) and os.access(self._frame_path, os.R_OK):
break
else:
time.sleep(SECONDS_EACH_TIME)
seconds_tried += SECONDS_EACH_TIME
if seconds_tried >= SECONDS_TO_WAIT:
carb.log_warn(f"Wait time out. To start encoding with images already have.")
def _capture_viewport(self, frame_path: str):
capture_viewport_to_file(self._viewport_api, file_path=frame_path)
carb.log_info(f"Capturing {frame_path}")
def _get_current_frame_output_path(self):
frame_path = ""
if self._options.is_video():
frame_path = get_num_pattern_file_path(
self._frames_dir,
self._options.file_name,
self._options.file_name_num_pattern,
self._frame,
DEFAULT_IMAGE_FRAME_TYPE_FOR_VIDEO,
self._options.renumber_negative_frame_number_from_0,
abs(self._start_number)
)
else:
if self._options.is_capturing_nth_frames():
if self._frame_counter % self._options.capture_every_Nth_frames == 0:
frame_path = get_num_pattern_file_path(
self._frame_pattern_prefix,
self._options.file_name,
self._options.file_name_num_pattern,
self._frame,
self._options.file_type,
self._options.renumber_negative_frame_number_from_0,
abs(self._start_number)
)
else:
frame_path = self._frame_pattern_prefix + self._options.file_type
return frame_path
def _handle_skipping_frame(self, dt):
if not self._options.overwrite_existing_frames:
if os.path.exists(self._frame_path):
carb.log_warn(f"Frame {self._frame_path} exists, skip it...")
self._settings.set_int("/rtx/pathtracing/spp", 1)
self._subframe = 0
self._sample_count = 0
self._path_trace_iterations = 0
can_continue = True
if self._forward_one_frame_fn is not None:
can_continue = self._forward_one_frame_fn(dt)
elif self._options.movie_type == CaptureMovieType.SUNSTUDY and \
(self._options.is_video() or self._options.is_capturing_nth_frames()):
self._sunstudy_current_time += self._sunstudy_delta_time_per_iteration * self._sunstudy_iterations_per_frame
self._update_sunstudy_player_time()
else: # movie type is SEQUENCE
self._time = self._start_time + (self._frame - self._start_number) * self._time_rate
self._timeline.set_current_time(self._time)
self._settings.set("/iray/current_frame_time", self._time)
self._frame += 1
self._frame_counter += 1
# check if capture ends
if self._forward_one_frame_fn is not None:
if can_continue is False:
if self._options.is_video():
self._progress.capture_status = CaptureStatus.TO_START_ENCODING
elif self._options.is_capturing_nth_frames():
self._progress.capture_status = CaptureStatus.FINISHING
else:
if self._time >= self._end_time or self._frame_counter >= self._total_frame_count:
if self._options.is_video():
self._progress.capture_status = CaptureStatus.TO_START_ENCODING
elif self._options.is_capturing_nth_frames():
self._progress.capture_status = CaptureStatus.FINISHING
return True
else:
if os.path.exists(self._frame_path) and self._last_skipped_frame_path != self._frame_path:
carb.log_warn(f"Frame {self._frame_path} will be overwritten.")
self._last_skipped_frame_path = self._frame_path
return False
def _handle_real_time_capture_settle_latency(self):
if (self._options.render_preset == CaptureRenderPreset.RAY_TRACE
and self._options.real_time_settle_latency_frames > 0):
self._real_time_settle_latency_frames_done += 1
if self._real_time_settle_latency_frames_done > self._options.real_time_settle_latency_frames:
self._real_time_settle_latency_frames_done = 0
return False
else:
self._subframe = 0
self._sample_count = 0
return True
return False
def _on_update(self, e):
dt = e.payload["dt"]
if self._progress.capture_status == CaptureStatus.FINISHING:
self._finish()
self._update_progress_hook()
elif self._progress.capture_status == CaptureStatus.CANCELLED:
# carb.log_warn("video recording cancelled")
self._update_progress_hook()
self._finish()
elif self._progress.capture_status == CaptureStatus.ENCODING:
if VideoGenerationHelper().encoding_done:
self._progress.capture_status = CaptureStatus.FINISHING
self._update_progress_hook()
self._progress.add_encoding_time(dt)
elif self._progress.capture_status == CaptureStatus.TO_START_ENCODING:
if VideoGenerationHelper().is_encoding is False:
self._wait_for_image_writing()
if self._options.renumber_negative_frame_number_from_0 is True and self._start_number < 0:
video_frame_start_num = 0
else:
video_frame_start_num = self._start_number
started = VideoGenerationHelper().generating_video(
self._video_name,
self._frames_dir,
self._options.file_name,
self._options.file_name_num_pattern,
video_frame_start_num,
self._total_frame_count,
self._options.fps,
)
if started:
self._progress.capture_status = CaptureStatus.ENCODING
self._update_progress_hook()
self._progress.add_encoding_time(dt)
else:
carb.log_warn("Movie capture failed to encode the capture images.")
self._progress.capture_status = CaptureStatus.FINISHING
elif self._progress.capture_status == CaptureStatus.CAPTURING:
if self._frames_to_disable_async_rendering >= 0:
self._frames_to_disable_async_rendering -= 1
return
if self._progress.is_prerolling():
self._progress.prerolled_frames += 1
self._settings.set_int("/rtx/pathtracing/spp", 1)
left_preroll_frames = self._options.preroll_frames - self._progress.prerolled_frames
self._timeline.set_current_time(self._start_time - left_preroll_frames / self._options.fps)
self._settings.set("/iray/current_frame_time", self._start_time - left_preroll_frames / self._options.fps)
return
self._frame_path = self._get_current_frame_output_path()
if self._handle_skipping_frame(dt):
return
self._settings.set_int(
"/rtx/pathtracing/spp", min(self._options.spp_per_iteration, self._options.path_trace_spp)
)
if self._options.render_preset == CaptureRenderPreset.IRAY:
iterations_done = int(self._settings.get("/iray/progression"))
self._path_trace_iterations = iterations_done
self._sample_count = iterations_done
else:
self._sample_count += self._options.spp_per_iteration
self._timeline.set_prerolling(False)
self._settings.set_int("/rtx/externalFrameCounter", self._frame)
# update progress timers
if self._options.is_capturing_pathtracing_single_frame():
self._progress.add_single_frame_capture_time(self._subframe, self._options.ptmb_subframes_per_frame, dt)
else:
self._progress.add_frame_time(self._frame, dt, self._subframe + 1, self._path_trace_iterations)
self._update_progress_hook()
# capture frame when we reach the sample count for this frame and are rendering the last subframe
# Note _sample_count can go over _samples_per_pixel when 'spp_per_iteration > 1'
if (self._sample_count >= self._options.path_trace_spp) and (
self._subframe == self._options.ptmb_subframes_per_frame - 1
):
if self._handle_real_time_capture_settle_latency():
return
if self._options.is_video():
self._capture_viewport(self._frame_path)
else:
if self._options.is_capturing_nth_frames():
if self._frame_counter % self._options.capture_every_Nth_frames == 0:
self._capture_viewport(self._frame_path)
else:
self._progress.capture_status = CaptureStatus.FINISHING
self._capture_viewport(self._frame_path)
# reset time the *next frame* (since otherwise we capture the first sample)
if self._sample_count >= self._options.path_trace_spp:
self._sample_count = 0
self._path_trace_iterations = 0
self._subframe += 1
if self._subframe == self._options.ptmb_subframes_per_frame:
self._subframe = 0
self._frame += 1
self._frame_counter += 1
self._time = self._start_time + (self._frame - self._start_number) * self._time_rate
can_continue = False
if self._forward_one_frame_fn is not None:
can_continue = self._forward_one_frame_fn(dt)
elif self._options.movie_type == CaptureMovieType.SUNSTUDY and \
(self._options.is_video() or self._options.is_capturing_nth_frames()):
self._sunstudy_current_time += self._sunstudy_delta_time_per_iteration
self._update_sunstudy_player_time()
else:
cur_time = (
self._time + (self._options.ptmb_fso * self._time_rate) + self._time_subframe_rate * self._subframe
)
self._timeline.set_current_time(cur_time)
self._settings.set("/iray/current_frame_time", cur_time)
if self._forward_one_frame_fn is not None:
if can_continue == False:
if self._options.is_video():
self._progress.capture_status = CaptureStatus.TO_START_ENCODING
elif self._options.is_capturing_nth_frames():
self._progress.capture_status = CaptureStatus.FINISHING
else:
if self._time >= self._end_time or self._frame_counter >= self._total_frame_count:
if self._options.is_video():
self._progress.capture_status = CaptureStatus.TO_START_ENCODING
elif self._options.is_capturing_nth_frames():
self._progress.capture_status = CaptureStatus.FINISHING
@staticmethod
def get_instance():
global capture_instance
return capture_instance
| 38,884 | Python | 48.91656 | 183 | 0.610045 |
omniverse-code/kit/exts/omni.kit.capture/omni/kit/capture/capture_options.py | # Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import os
from enum import Enum, IntEnum
class CaptureMovieType(IntEnum):
SEQUENCE = 0
SUNSTUDY = 1
PLAYLIST = 2
class CaptureRangeType(IntEnum):
FRAMES = 0
SECONDS = 1
class CaptureRenderPreset(IntEnum):
PATH_TRACE = 0
RAY_TRACE = 1
IRAY = 2
class CaptureDebugMaterialType(IntEnum):
SHADED = 0
WHITE = 1
class CaptureOptions:
""" All Capture options that will be used when capturing.
Note: When adding an attribute make sure it is exposed via the constructor.
Not doing this will cause erorrs when serializing and deserializing this object.
"""
def __init__(
self,
camera="camera",
range_type=CaptureRangeType.FRAMES,
capture_every_nth_frames=-1,
fps="24",
start_frame=1,
end_frame=40,
start_time=0,
end_time=10,
res_width=1920,
res_height=1080,
render_preset=CaptureRenderPreset.PATH_TRACE,
debug_material_type=CaptureDebugMaterialType.SHADED,
spp_per_iteration=1,
path_trace_spp=1,
ptmb_subframes_per_frame=1,
ptmb_fso=0.0,
ptmb_fsc=1.0,
output_folder="",
file_name="Capture",
file_name_num_pattern=".####",
file_type=".tga",
save_alpha=False,
hdr_output=False,
show_pathtracing_single_frame_progress=False,
preroll_frames=0,
overwrite_existing_frames=False,
movie_type=CaptureMovieType.SEQUENCE,
sunstudy_start_time=0.0,
sunstudy_current_time=0.0,
sunstudy_end_time=0.0,
sunstudy_movie_length_in_seconds=0,
sunstudy_player=None,
real_time_settle_latency_frames=0,
renumber_negative_frame_number_from_0=False
):
self._camera = camera
self._range_type = range_type
self._capture_every_nth_frames = capture_every_nth_frames
self._fps = fps
self._start_frame = start_frame
self._end_frame = end_frame
self._start_time = start_time
self._end_time = end_time
self._res_width = res_width
self._res_height = res_height
self._render_preset = render_preset
self._debug_material_type = debug_material_type
self._spp_per_iteration = spp_per_iteration
self._path_trace_spp = path_trace_spp
self._ptmb_subframes_per_frame = ptmb_subframes_per_frame
self._ptmb_fso = ptmb_fso
self._ptmb_fsc = ptmb_fsc
self._output_folder = output_folder
self._file_name = file_name
self._file_name_num_pattern = file_name_num_pattern
self._file_type = file_type
self._save_alpha = save_alpha
self._hdr_output = hdr_output
self._show_pathtracing_single_frame_progress = show_pathtracing_single_frame_progress
self._preroll_frames = preroll_frames
self._overwrite_existing_frames = overwrite_existing_frames
self._movie_type = movie_type
self._sunstudy_start_time = sunstudy_start_time
self._sunstudy_current_time = sunstudy_current_time
self._sunstudy_end_time = sunstudy_end_time
self._sunstudy_movie_length_in_seconds = sunstudy_movie_length_in_seconds
self._sunstudy_player = sunstudy_player
self._real_time_settle_latency_frames = real_time_settle_latency_frames
self._renumber_negative_frame_number_from_0 = renumber_negative_frame_number_from_0
def to_dict(self):
data = vars(self)
return {key.lstrip("_"): value for key, value in data.items()}
@classmethod
def from_dict(cls, options):
return cls(**options)
@property
def camera(self):
return self._camera
@camera.setter
def camera(self, value):
self._camera = value
@property
def range_type(self):
return self._range_type
@range_type.setter
def range_type(self, value):
self._range_type = value
@property
def capture_every_Nth_frames(self):
return self._capture_every_nth_frames
@capture_every_Nth_frames.setter
def capture_every_Nth_frames(self, value):
self._capture_every_nth_frames = value
@property
def fps(self):
return self._fps
@fps.setter
def fps(self, value):
self._fps = value
@property
def start_frame(self):
return self._start_frame
@start_frame.setter
def start_frame(self, value):
self._start_frame = value
@property
def end_frame(self):
return self._end_frame
@end_frame.setter
def end_frame(self, value):
self._end_frame = value
@property
def start_time(self):
return self._start_time
@start_time.setter
def start_time(self, value):
self._start_time = value
@property
def end_time(self):
return self._end_time
@end_time.setter
def end_time(self, value):
self._end_time = value
@property
def res_width(self):
return self._res_width
@res_width.setter
def res_width(self, value):
self._res_width = value
@property
def res_height(self):
return self._res_height
@res_height.setter
def res_height(self, value):
self._res_height = value
@property
def render_preset(self):
return self._render_preset
@render_preset.setter
def render_preset(self, value):
self._render_preset = value
@property
def debug_material_type(self):
return self._debug_material_type
@debug_material_type.setter
def debug_material_type(self, value):
self._debug_material_type = value
@property
def spp_per_iteration(self):
return self._spp_per_iteration
@spp_per_iteration.setter
def spp_per_iteration(self, value):
self._spp_per_iteration = value
@property
def path_trace_spp(self):
return self._path_trace_spp
@path_trace_spp.setter
def path_trace_spp(self, value):
self._path_trace_spp = value
@property
def ptmb_subframes_per_frame(self):
return self._ptmb_subframes_per_frame
@ptmb_subframes_per_frame.setter
def ptmb_subframes_per_frame(self, value):
self._ptmb_subframes_per_frame = value
@property
def ptmb_fso(self):
return self._ptmb_fso
@ptmb_fso.setter
def ptmb_fso(self, value):
self._ptmb_fso = value
@property
def ptmb_fsc(self):
return self._ptmb_fsc
@ptmb_fsc.setter
def ptmb_fsc(self, value):
self._ptmb_fsc = value
@property
def output_folder(self):
return self._output_folder
@output_folder.setter
def output_folder(self, value):
self._output_folder = value
@property
def file_name(self):
return self._file_name
@file_name.setter
def file_name(self, value):
self._file_name = value
@property
def file_name_num_pattern(self):
return self._file_name_num_pattern
@file_name_num_pattern.setter
def file_name_num_pattern(self, value):
self._file_name_num_pattern = value
@property
def file_type(self):
return self._file_type
@file_type.setter
def file_type(self, value):
self._file_type = value
@property
def save_alpha(self):
return self._save_alpha
@save_alpha.setter
def save_alpha(self, value):
self._save_alpha = value
@property
def hdr_output(self):
return self._hdr_output
@hdr_output.setter
def hdr_output(self, value):
self._hdr_output = value
@property
def show_pathtracing_single_frame_progress(self):
return self._show_pathtracing_single_frame_progress
@show_pathtracing_single_frame_progress.setter
def show_pathtracing_single_frame_progress(self, value):
self._show_pathtracing_single_frame_progress = value
@property
def preroll_frames(self):
return self._preroll_frames
@preroll_frames.setter
def preroll_frames(self, value):
self._preroll_frames = value
@property
def overwrite_existing_frames(self):
return self._overwrite_existing_frames
@overwrite_existing_frames.setter
def overwrite_existing_frames(self, value):
self._overwrite_existing_frames = value
@property
def movie_type(self):
return self._movie_type
@movie_type.setter
def movie_type(self, value):
self._movie_type = value
@property
def sunstudy_start_time(self):
return self._sunstudy_start_time
@sunstudy_start_time.setter
def sunstudy_start_time(self, value):
self._sunstudy_start_time = value
@property
def sunstudy_current_time(self):
return self._sunstudy_current_time
@sunstudy_current_time.setter
def sunstudy_current_time(self, value):
self._sunstudy_current_time = value
@property
def sunstudy_end_time(self):
return self._sunstudy_end_time
@sunstudy_end_time.setter
def sunstudy_end_time(self, value):
self._sunstudy_end_time = value
@property
def sunstudy_movie_length_in_seconds(self):
return self._sunstudy_movie_length_in_seconds
@sunstudy_movie_length_in_seconds.setter
def sunstudy_movie_length_in_seconds(self, value):
self._sunstudy_movie_length_in_seconds = value
@property
def sunstudy_player(self):
return self._sunstudy_player
@sunstudy_player.setter
def sunstudy_player(self, value):
self._sunstudy_player = value
@property
def real_time_settle_latency_frames(self):
return self._real_time_settle_latency_frames
@real_time_settle_latency_frames.setter
def real_time_settle_latency_frames(self, value):
self._real_time_settle_latency_frames = value
@property
def renumber_negative_frame_number_from_0(self):
return self._renumber_negative_frame_number_from_0
@renumber_negative_frame_number_from_0.setter
def renumber_negative_frame_number_from_0(self, value):
self._renumber_negative_frame_number_from_0 = value
def is_video(self):
return self.file_type == ".mp4"
def is_capturing_nth_frames(self):
return self._capture_every_nth_frames > 0
def is_capturing_pathtracing_single_frame(self):
return self.is_video() is False and self.is_capturing_nth_frames() is False and self.render_preset == CaptureRenderPreset.PATH_TRACE
def is_capturing_frame(self):
return self._range_type == CaptureRangeType.FRAMES
def get_full_path(self):
return os.path.join(self._output_folder, self._file_name + self._file_type)
| 11,163 | Python | 26.429975 | 140 | 0.642659 |
omniverse-code/kit/exts/omni.kit.capture/omni/kit/capture/helper.py | # Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import os
def get_num_pattern_file_path(frames_dir, file_name, num_pattern, frame_num, file_type, renumber_frames=False, renumber_offset=0):
if renumber_frames:
renumbered_frames = frame_num + renumber_offset
else:
renumbered_frames = frame_num
abs_frame_num = abs(renumbered_frames)
padding_length = len(num_pattern.strip(".")[len(str(abs_frame_num)) :])
if renumbered_frames >= 0:
padded_string = "0" * padding_length + str(renumbered_frames)
else:
padded_string = "-" + "0" * padding_length + str(abs_frame_num)
filename = ".".join((file_name, padded_string, file_type.strip(".")))
frame_path = os.path.join(frames_dir, filename)
return frame_path
| 1,154 | Python | 43.423075 | 130 | 0.713172 |
omniverse-code/kit/exts/omni.kit.capture/omni/kit/capture/tests/__init__.py | from .test_capture_options import TestCaptureOptions
# from .test_capture_hdr import TestCaptureHdr
| 100 | Python | 32.666656 | 52 | 0.84 |
omniverse-code/kit/exts/omni.kit.capture/omni/kit/capture/tests/test_capture_hdr.py | from typing import Type
import os.path
import omni.kit.test
import carb
import carb.settings
import carb.tokens
import pathlib
import omni.kit.capture.capture_options as _capture_options
from omni.kit.viewport.utility import get_active_viewport, capture_viewport_to_file, create_viewport_window
OUTPUTS_DIR = omni.kit.test.get_test_output_path()
class TestCaptureHdr(omni.kit.test.AsyncTestCase):
# Before running each test
async def setUp(self):
settings = carb.settings.get_settings()
settings.set("/app/asyncRendering", False)
settings.set("/app/asyncRenderingLowLatency", False)
settings.set("/app/captureFrame/hdr", True)
# Setup viewport
self._usd_context = ''
await omni.usd.get_context(self._usd_context).new_stage_async()
async def test_hdr_capture(self):
viewport_api = get_active_viewport(self._usd_context)
if viewport_api is None:
return
# Wait until the viewport has valid resources
await viewport_api.wait_for_rendered_frames()
capture_filename = "capture.hdr_test.exr"
filePath = pathlib.Path(OUTPUTS_DIR).joinpath(capture_filename)
capture_viewport_to_file(viewport_api, str(filePath))
await omni.kit.app.get_app_interface().next_update_async()
await omni.kit.app.get_app_interface().next_update_async()
await omni.kit.app.get_app_interface().next_update_async()
assert os.path.isfile(str(filePath))
# Make sure we do not crash in the unsupported multi-view case
async def do_test_hdr_multiview_capture(self, test_legacy: bool):
viewport_api = get_active_viewport(self._usd_context)
if viewport_api is None:
return
# Wait until the viewport has valid resources
await viewport_api.wait_for_rendered_frames()
second_vp_window = create_viewport_window('Viewport 2', width=256, height=256)
await second_vp_window.viewport_api.wait_for_rendered_frames()
capture_filename = "capture.hdr_test.multiview.exr"
filePath = pathlib.Path(OUTPUTS_DIR).joinpath(capture_filename)
# Multiview HDR output is not yet fully supported, confirm this exits gracefully
capture_viewport_to_file(viewport_api, str(filePath))
await omni.kit.app.get_app_interface().next_update_async()
await omni.kit.app.get_app_interface().next_update_async()
await omni.kit.app.get_app_interface().next_update_async()
assert os.path.isfile(str(filePath))
second_vp_window.destroy()
del second_vp_window
async def test_hdr_multiview_capture_legacy(self):
await self.do_test_hdr_multiview_capture(True)
async def test_hdr_multiview_capture(self):
await self.do_test_hdr_multiview_capture(False)
| 2,828 | Python | 36.223684 | 107 | 0.688826 |
omniverse-code/kit/exts/omni.kit.capture/omni/kit/capture/tests/test_capture_options.py | from typing import Type
import omni.kit.test
import omni.kit.capture.capture_options as _capture_options
class TestCaptureOptions(omni.kit.test.AsyncTestCase):
async def test_capture_options_serialisation(self):
options = _capture_options.CaptureOptions()
data_dict = options.to_dict()
self.assertIsInstance(data_dict, dict)
async def test_capture_options_deserialisation(self):
options = _capture_options.CaptureOptions()
data_dict = options.to_dict()
regenerated_options = _capture_options.CaptureOptions.from_dict(data_dict)
self.assertIsInstance(regenerated_options, _capture_options.CaptureOptions)
async def test_capture_options_values_persisted(self):
options = _capture_options.CaptureOptions(camera="my_camera")
data_dict = options.to_dict()
regenerated_options = _capture_options.CaptureOptions.from_dict(data_dict)
self.assertEqual(regenerated_options.camera, "my_camera")
async def test_adding_random_attribute_fails(self):
""" Test that adding new attributes without making them configurable via the __init__ will raise an exception
"""
options = _capture_options.CaptureOptions()
options._my_new_value = "foo"
data_dict = options.to_dict()
with self.assertRaises(TypeError, msg="__init__() got an unexpected keyword argument 'my_new_value'"):
regnerated = _capture_options.CaptureOptions.from_dict(data_dict) | 1,496 | Python | 40.583332 | 117 | 0.703209 |
omniverse-code/kit/exts/omni.kit.capture/docs/CHANGELOG.md | # Changelog
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
## [0.5.1] - 2022-06-22
### Changed
- Add declaration of deprecation: this extension has been replaced with omni.kit.capture.viewport will be removed in future releases.
## [0.5.0] - 2022-05-23
### Changes
- Support legacy and new Viewport API
- Add dependency on omni.kit.viewport.utility
## [0.4.9] - 2021-11-27
### Changed
- Changed the way sunstudy updates its player by supporting the new player in environment.sunstudy
## [0.4.8] - 2021-09-08
### Changed
- Merge fixes from the release/102 branch; fixes includes:
- Fix the iray capture produces duplicated images issue
- Fix corrupted EXR image capture under certain circumstances, due to the rendering not behaving according to the value of the `asyncRendering` setting
- Backport required features of the plugin related to validation of frame counts and erroneous selection of Hydra engine when switching between RTX and Iray
- Add current frame index and total frame count to the notification system during frame capture
## [0.4.4] - 2021-07-05
### Added
- Added current frame index and total frame count to the notification system during frame capture.
## [0.4.3] - 2021-07-04
### Fixed
- Fixed the issue that can't switch to iray mode to capture images
## [0.4.2] - 2021-06-14
### Added
- Added support to renumber negative frame numbers from zero
### Changed
- Negative frame number format changed from ".00-x.png" to ".-000x.png"
## [0.4.1] - 2021-06-09
### Added
- Added the handling of path tracing iterations for capturing in Iray mode, also added a new entry to show number of iterations done on the progress window for Iray capturing
## [0.4.0] - 2021-05-25
### Fixed
- Fixed the issue that animation timeline's current time reset to zero after capture, now it's restored to the time before capture
## [0.3.9] - 2021-05-20
### Added
- Added a settle latency for capture in RTX real time mode to set the number of frames to be settled to improve image quality for each frame captured
## [0.3.8] - 2021-05-11
### Added
- Added support to the overwrite existing frame option; now if the frame path exists already, will warn either it will be skipped or it will be overwritten
- Added capture end callback in case users want it
### Fixed
- Fixed the issue that capture doesn't end in time if there are skipped frames
- Fixed the issue that all frames with number greater than start frame number in the output folder will be encoded into the final mp4 file, now only captured frames will be added.
## [0.3.7] - 2021-04-29
### Added
- Add support to new movie type sunstudy
## [0.3.6] - 2021-04-15
### Changed
- Change the name pattern of images of mp4 to be the same to capture Nth frames name pattern
## [0.3.5] - 2021-04-10
### Added
- Add support to skip existing frames at capturing sequence
## [0.3.4] - 2021-04-02
### Added
- Add preroll frames support so that it can run given frames before starting actual capturing. To save performance
for the preroll frames, "/rtx/pathtracing/spp" for path tracing will be set to 1
## [0.3.3] - 2021-03-01
### Added
- Add progress report of single frame capture in PT mode
- Update progress timers for video captures per iteration instead of per frame
## [0.3.2] - 2021-02-03
### Fixed
- Fix the time precision is a bit much issue
## [0.3.1] - 2021-02-03
### Fixed
- Fixed the PhysX and Flow simulation speed is much faster than normal in the captured movie issue
- Fixed the path tracing options are wrongly taken in the ray tracing mode during capture issue
## [0.3.0] - 2020-12-03
### Added
- Add support to capture in IRay mode
## [0.2.1] - 2020-11-30
### Removed
- Removed verbose warning during startup
## [0.2.0] - 2020-11-19
### Added
- Added functionality to capture with Kit Next
## [0.1.7] - 2020-10-19
### Fixed
- Hide lights and grid during capturing
## [0.1.6] - 2020-10-08
### Fixed
- Correct the update of motion blur subframe time
- Make sure frame count is right with when motion blur frame shutter open and close difference is not 1
## [0.1.5] - 2020-10-07
### Fixed
- Pass through the right file name
- Make sure padding is applied for files
| 4,191 | Markdown | 35.137931 | 179 | 0.730375 |
omniverse-code/kit/exts/omni.kit.capture/docs/README.md | # Omniverse Kit Image and Video Capture
Extension to handle the capturing and recording of the viewport | 104 | Markdown | 33.999989 | 63 | 0.826923 |
omniverse-code/kit/exts/omni.kit.window.splash_close_example/config/extension.toml | [package]
version = "0.2.0"
category = "Internal"
[core]
# Load as late as possible
order = 10000
[dependencies]
"omni.kit.window.splash" = { optional = true }
[[python.module]]
name = "omni.kit.window.splash_close_example"
[[test]]
waiver = "Simple example extension" # OM-48132
| 285 | TOML | 14.888888 | 46 | 0.687719 |
omniverse-code/kit/exts/omni.kit.window.splash_close_example/omni/kit/window/splash_close_example/extension.py | import carb
import omni.ext
class SplashCloseExtension(omni.ext.IExt):
def on_startup(self):
try:
import omni.splash
splash_iface = omni.splash.acquire_splash_screen_interface()
except (ImportError, ModuleNotFoundError):
splash_iface = None
if not splash_iface:
return
splash_iface.close_all()
def on_shutdown(self):
pass
| 424 | Python | 19.238094 | 72 | 0.603774 |
omniverse-code/kit/exts/omni.kit.window.splash_close_example/docs/index.rst | omni.kit.window.splash_close_example: omni.kit.window.stats
############################################################
Window to display omni.stats
| 152 | reStructuredText | 24.499996 | 60 | 0.480263 |
omniverse-code/kit/exts/omni.kit.property.light/omni/kit/property/light/scripts/light_properties.py | import os
import carb
import omni.ext
from pathlib import Path
from pxr import Sdf, UsdLux, Usd
TEST_DATA_PATH = ""
class LightPropertyExtension(omni.ext.IExt):
def __init__(self):
self._registered = False
super().__init__()
def on_startup(self, ext_id):
self._register_widget()
manager = omni.kit.app.get_app().get_extension_manager()
extension_path = manager.get_extension_path(ext_id)
global TEST_DATA_PATH
TEST_DATA_PATH = Path(extension_path).joinpath("data").joinpath("tests")
def on_shutdown(self):
if self._registered:
self._unregister_widget()
def _register_widget(self):
import omni.kit.window.property as p
from omni.kit.window.property.property_scheme_delegate import PropertySchemeDelegate
from .prim_light_widget import LightSchemaAttributesWidget
w = p.get_window()
if w:
# https://github.com/PixarAnimationStudios/USD/commit/7540fdf3b2aa6b6faa0fce8e7b4c72b756286f51
w.register_widget(
"prim",
"light",
LightSchemaAttributesWidget(
"Light",
# https://github.com/PixarAnimationStudios/USD/commit/7540fdf3b2aa6b6faa0fce8e7b4c72b756286f51
UsdLux.LightAPI if hasattr(UsdLux, 'LightAPI') else UsdLux.Light,
[
UsdLux.CylinderLight,
UsdLux.DiskLight,
UsdLux.DistantLight,
UsdLux.DomeLight,
UsdLux.GeometryLight,
UsdLux.RectLight,
UsdLux.SphereLight,
UsdLux.ShapingAPI,
UsdLux.ShadowAPI,
UsdLux.LightFilter,
# https://github.com/PixarAnimationStudios/USD/commit/9eda37ec9e1692dd290efd9a26526e0d2c21bb03
UsdLux.PortalLight if hasattr(UsdLux, 'PortalLight') else UsdLux.LightPortal,
UsdLux.ListAPI,
],
[
"color",
"enableColorTemperature",
"colorTemperature",
"intensity",
"exposure",
"normalize",
"angle",
"radius",
"height",
"width",
"radius",
"length",
"texture:file",
"texture:format",
"diffuse",
"specular",
"shaping:focus",
"shaping:focusTint",
"shaping:cone:angle",
"shaping:cone:softness",
"shaping:ies:file",
"shaping:ies:angleScale",
"shaping:ies:normalize",
"collection:shadowLink:expansionRule",
"collection:shadowLink:excludes",
"collection:shadowLink:includes",
"collection:lightLink:expansionRule",
"collection:lightLink:excludes",
"collection:lightLink:includes",
"light:enableCaustics",
"visibleInPrimaryRay",
"disableFogInteraction",
"isProjector",
] \
# https://github.com/PixarAnimationStudios/USD/commit/c8cd344af6be342911e50d2350c228ed329be6b2
# USD v22.08 adds this to LightAPI as an API schema override of CollectionAPI
+ (["collection:lightLink:includeRoot"] if Usd.GetVersion() >= (0,22,8) else []),
[],
),
)
self._registered = True
def _unregister_widget(self):
import omni.kit.window.property as p
w = p.get_window()
if w:
w.unregister_widget("prim", "light")
self._registered = False
| 4,274 | Python | 37.863636 | 118 | 0.477305 |
omniverse-code/kit/exts/omni.kit.property.light/omni/kit/property/light/scripts/__init__.py | from .light_properties import *
| 32 | Python | 15.499992 | 31 | 0.78125 |
omniverse-code/kit/exts/omni.kit.property.light/omni/kit/property/light/scripts/prim_light_widget.py | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from typing import Set
import omni.ui as ui
import omni.usd
from omni.kit.property.usd.usd_property_widget import MultiSchemaPropertiesWidget, UsdPropertyUiEntry
from omni.kit.property.usd.usd_property_widget import create_primspec_bool
from pxr import Kind, Sdf, Usd, UsdLux
import carb
PERSISTENT_SETTINGS_PREFIX = "/persistent"
class LightSchemaAttributesWidget(MultiSchemaPropertiesWidget):
def __init__(self, title: str, schema, schema_subclasses: list, include_list: list = [], exclude_list: list = []):
"""
Constructor.
Args:
title (str): Title of the widgets on the Collapsable Frame.
schema: The USD IsA schema or applied API schema to filter attributes.
schema_subclasses (list): list of subclasses
include_list (list): list of additional schema named to add
exclude_list (list): list of additional schema named to remove
"""
super().__init__(title, schema, schema_subclasses, include_list, exclude_list)
self._settings = carb.settings.get_settings()
self._setting_path = PERSISTENT_SETTINGS_PREFIX + "/app/usd/usdLuxUnprefixedCompat"
self._subscription = self._settings.subscribe_to_node_change_events(self._setting_path, self._on_change)
self.lux_attributes: Set[str] = set(['inputs:angle', 'inputs:color', 'inputs:temperature', 'inputs:diffuse', 'inputs:specular', 'inputs:enableColorTemperature',
'inputs:exposure', 'inputs:height', 'inputs:width', 'inputs:intensity', 'inputs:length',
'inputs:normalize', 'inputs:radius', 'inputs:shadow:color', 'inputs:shadow:distance', 'inputs:shadow:enable', 'inputs:shadow:falloff',
'inputs:shadow:falloffGamma', 'inputs:shaping:cone:angle', 'inputs:shaping:cone:softness', 'inputs:shaping:focus',
'inputs:shaping:focusTint', 'inputs:shaping:ies:angleScale', 'inputs:shaping:ies:file', 'inputs:shaping:ies:normalize', 'inputs:texture:format'])
# custom attributes
def is_prim_light_primary_visible_supported(prim):
return (
prim.IsA(UsdLux.DomeLight)
or prim.IsA(UsdLux.DiskLight)
or prim.IsA(UsdLux.RectLight)
or prim.IsA(UsdLux.SphereLight)
or prim.IsA(UsdLux.CylinderLight)
or prim.IsA(UsdLux.DistantLight)
)
def is_prim_light_disable_fog_interaction_supported(prim):
return (
prim.IsA(UsdLux.DomeLight)
or prim.IsA(UsdLux.DiskLight)
or prim.IsA(UsdLux.RectLight)
or prim.IsA(UsdLux.SphereLight)
or prim.IsA(UsdLux.CylinderLight)
or prim.IsA(UsdLux.DistantLight)
)
def is_prim_light_caustics_supported(prim):
return prim.IsA(UsdLux.DiskLight) or prim.IsA(UsdLux.RectLight) or prim.IsA(UsdLux.SphereLight)
def is_prim_light_is_projector_supported(prim):
return prim.IsA(UsdLux.RectLight)
def add_vipr(attribute_name, value_dict):
anchor_prim = self._get_prim(self._payload[-1])
if anchor_prim and (anchor_prim.IsA(UsdLux.DomeLight) or anchor_prim.IsA(UsdLux.DistantLight)):
return UsdPropertyUiEntry("visibleInPrimaryRay", "", create_primspec_bool(True), Usd.Attribute)
else:
return UsdPropertyUiEntry("visibleInPrimaryRay", "", create_primspec_bool(False), Usd.Attribute)
self.add_custom_schema_attribute("visibleInPrimaryRay", is_prim_light_primary_visible_supported, add_vipr, "", {})
self.add_custom_schema_attribute("disableFogInteraction", is_prim_light_disable_fog_interaction_supported, None, "", create_primspec_bool(False))
self.add_custom_schema_attribute("light:enableCaustics", is_prim_light_caustics_supported, None, "", create_primspec_bool(False))
self.add_custom_schema_attribute("isProjector", is_prim_light_is_projector_supported, None, "", create_primspec_bool(False))
def _on_change(self, item, event_type):
self.request_rebuild()
def on_new_payload(self, payload):
"""
See PropertyWidget.on_new_payload
"""
if not super().on_new_payload(payload):
return False
if not self._payload or len(self._payload) == 0:
return False
used = []
for prim_path in self._payload:
prim = self._get_prim(prim_path)
# https://github.com/PixarAnimationStudios/USD/commit/7540fdf3b2aa6b6faa0fce8e7b4c72b756286f51
if self._schema().IsTyped() and not prim.IsA(self._schema):
return False
if self._schema().IsAPISchema() and not prim.HasAPI(self._schema):
return False
used += [attr for attr in prim.GetAttributes() if attr.GetName() in self._schema_attr_names and not attr.IsHidden()]
if self.is_custom_schema_attribute_used(prim):
used.append(None)
return used
def has_authored_inputs_attr(self, prim):
attrs = set([a.GetName() for a in prim.GetAuthoredAttributes()])
any_authored = attrs.intersection(self.lux_attributes)
return any_authored
def _customize_props_layout(self, attrs):
from omni.kit.property.usd.custom_layout_helper import (
CustomLayoutFrame,
CustomLayoutGroup,
CustomLayoutProperty,
)
from omni.kit.window.property.templates import (
SimplePropertyWidget,
LABEL_WIDTH,
LABEL_HEIGHT,
HORIZONTAL_SPACING,
)
self.add_custom_schema_attributes_to_props(attrs)
frame = CustomLayoutFrame(hide_extra=False)
anchor_prim = self._get_prim(self._payload[-1])
# TODO -
# add shadow values (Shadow Enable, Shadow Include, Shadow Exclude, Shadow Color, Shadow Distance, Shadow Falloff, Shadow Falloff Gamma)
# add UsdLux.DomeLight portals (see UsdLux.DomeLight GetPortalsRel)
# add filters (see UsdLux.Light / UsdLux.LightAPI GetFiltersRel)
self.usdLuxUnprefixedCompat = self._settings.get(PERSISTENT_SETTINGS_PREFIX + "/app/usd/usdLuxUnprefixedCompat")
has_authored_inputs = self.has_authored_inputs_attr(anchor_prim)
with frame:
with CustomLayoutGroup("Main"):
self._create_property("color", "Color", anchor_prim, has_authored_inputs)
self._create_property("enableColorTemperature", "Enable Color Temperature", anchor_prim, has_authored_inputs)
self._create_property("colorTemperature", "Color Temperature", anchor_prim, has_authored_inputs)
self._create_property("intensity", "Intensity", anchor_prim, has_authored_inputs)
self._create_property("exposure", "Exposure", anchor_prim, has_authored_inputs)
self._create_property("normalize", "Normalize Power", anchor_prim, has_authored_inputs)
if anchor_prim and anchor_prim.IsA(UsdLux.DistantLight):
self._create_property("angle", "Angle", anchor_prim, has_authored_inputs)
if anchor_prim and anchor_prim.IsA(UsdLux.DiskLight):
self._create_property("radius", "Radius", anchor_prim, has_authored_inputs)
if anchor_prim and anchor_prim.IsA(UsdLux.RectLight):
self._create_property("height", "Height", anchor_prim, has_authored_inputs)
self._create_property("width", "Width", anchor_prim, has_authored_inputs)
self._create_property("texture:file", "Texture File", anchor_prim, has_authored_inputs)
if anchor_prim and anchor_prim.IsA(UsdLux.SphereLight):
self._create_property("radius", "Radius", anchor_prim, has_authored_inputs)
CustomLayoutProperty("treatAsPoint", "Treat As Point")
if anchor_prim and anchor_prim.IsA(UsdLux.CylinderLight):
self._create_property("length", "Length", anchor_prim, has_authored_inputs)
self._create_property("radius", "Radius", anchor_prim, has_authored_inputs)
CustomLayoutProperty("treatAsLine", "Treat As Line")
if anchor_prim and anchor_prim.IsA(UsdLux.DomeLight):
self._create_property("texture:file", "Texture File", anchor_prim, has_authored_inputs)
self._create_property("texture:format", "Texture Format", anchor_prim, has_authored_inputs)
self._create_property("diffuse", "Diffuse Multiplier", anchor_prim, has_authored_inputs)
self._create_property("specular", "Specular Multiplier", anchor_prim, has_authored_inputs)
CustomLayoutProperty("visibleInPrimaryRay", "Visible In Primary Ray")
CustomLayoutProperty("disableFogInteraction", "Disable Fog Interaction")
CustomLayoutProperty("light:enableCaustics", "Enable Caustics")
CustomLayoutProperty("isProjector", "Projector light type")
with CustomLayoutGroup("Shaping", collapsed=True):
self._create_property("shaping:focus", "Focus", anchor_prim, has_authored_inputs)
self._create_property("shaping:focusTint", "Focus Tint", anchor_prim, has_authored_inputs)
self._create_property("shaping:cone:angle", "Cone Angle", anchor_prim, has_authored_inputs)
self._create_property("shaping:cone:softness", "Cone Softness", anchor_prim, has_authored_inputs)
self._create_property("shaping:ies:file", "File", anchor_prim, has_authored_inputs)
self._create_property("shaping:ies:angleScale", "AngleScale", anchor_prim, has_authored_inputs)
self._create_property("shaping:ies:normalize", "Normalize", anchor_prim, has_authored_inputs)
with CustomLayoutGroup("Light Link"):
CustomLayoutProperty("collection:lightLink:includeRoot", "Light Link Include Root")
CustomLayoutProperty("collection:lightLink:expansionRule", "Light Link Expansion Rule")
CustomLayoutProperty("collection:lightLink:includes", "Light Link Includes")
CustomLayoutProperty("collection:lightLink:excludes", "Light Link Excludes")
with CustomLayoutGroup("Shadow Link"):
CustomLayoutProperty("collection:shadowLink:includeRoot", "Shadow Link Include Root")
CustomLayoutProperty("collection:shadowLink:expansionRule", "Shadow Link Expansion Rule")
CustomLayoutProperty("collection:shadowLink:includes", "Shadow Link Includes")
CustomLayoutProperty("collection:shadowLink:excludes", "Shadow Link Excludes")
return frame.apply(attrs)
def _create_property(self, name: str, display_name: str, prim, has_authored_inputs):
from omni.kit.property.usd.custom_layout_helper import (
CustomLayoutProperty
)
if has_authored_inputs or not self.usdLuxUnprefixedCompat or not prim.HasAttribute(name):
prefixed_name = "inputs:" + name
return CustomLayoutProperty(prefixed_name, display_name)
return CustomLayoutProperty(name, display_name)
| 11,860 | Python | 54.167442 | 168 | 0.649494 |
omniverse-code/kit/exts/omni.kit.property.light/docs/CHANGELOG.md | # Changelog
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
## [1.0.6] - 2022-07-25
### Changes
- Refactored unittests to make use of content_browser test helpers
## [1.0.5] - 2022-01-10
### Changes
- Updated Shadow Link & Light Link. Now has include/exclude
## [1.0.4] - 2021-02-19
### Changes
- Added UI test
## [1.0.3] - 2021-01-11
### Changes
- visibleInPrimaryRay default value changed to True
## [1.0.2] - 2020-12-09
### Changes
- Added extension icon
- Added readme
- Updated preview image
## [1.0.1] - 2020-10-22
### Changes
- Improved layout
## [1.0.0] - 2020-09-17
### Changes
- Created
| 638 | Markdown | 17.794117 | 80 | 0.65674 |
omniverse-code/kit/exts/omni.kit.property.light/docs/README.md | # omni.kit.property.light
## Introduction
Property window extensions are for viewing and editing Usd Prim Attributes
## This extension supports editing of these Usd Types;
- UsdLux.CylinderLight
- UsdLux.DiskLight
- UsdLux.DistantLight
- UsdLux.DomeLight
- UsdLux.GeometryLight
- UsdLux.RectLight
- UsdLux.SphereLight
- UsdLux.ShapingAPI
- UsdLux.ShadowAPI
- UsdLux.LightFilter
- UsdLux.LightPortal
### and supports editing of these Usd APIs;
- UsdLux.ListAPI
| 467 | Markdown | 17.719999 | 74 | 0.785867 |
omniverse-code/kit/exts/omni.kit.property.light/docs/index.rst | omni.kit.property.light
###########################
Property Light Values
.. toctree::
:maxdepth: 1
CHANGELOG
| 121 | reStructuredText | 9.166666 | 27 | 0.528926 |
omniverse-code/kit/exts/omni.kit.audiodeviceenum/config/extension.toml | [package]
title = "Kit Audio Device Enumerator"
category = "Audio"
feature = true
version = "1.0.0"
description = "An audio device enumeration API which is available from python and C++"
authors = ["NVIDIA"]
keywords = ["audio", "device"]
[dependencies]
"carb.audio" = {}
[[native.plugin]]
path = "bin/*.plugin"
[[python.module]]
name = "omni.kit.audiodeviceenum"
| 368 | TOML | 18.421052 | 86 | 0.690217 |
omniverse-code/kit/exts/omni.kit.audiodeviceenum/omni/kit/audiodeviceenum/__init__.py | """
This module contains bindings to the C++ omni::audio::IAudioDeviceEnum
interface. This provides functionality for enumerating available audio devices
and collecting some basic information on each one.
Sound devices attached to the system may change at any point due to user
activity (ie: connecting or unplugging a USB audio device). When enumerating
devices, it is important to collect all device information directly instead
of caching it.
The device information is suitable to be used to display to a user in a menu
to allow them to choose a device to use by name.
"""
from ._audiodeviceenum import *
# Cached audio device enumerator instance pointer
def get_audio_device_enum_interface() -> IAudioDeviceEnum:
"""
helper method to retrieve a cached version of the IAudioDeviceEnum interface.
Returns:
The cached :class:`omni.kit.audiodeviceenum.IAudioDeviceEnum` interface. This will
only be retrieved on the first call. All subsequent calls will return the cached
interface object.
"""
if not hasattr(get_audio_device_enum_interface, "audio_device_enum"):
get_audio_device_enum_interface.audio_device_enum = acquire_audio_device_enum_interface()
return get_audio_device_enum_interface.audio_device_enum
| 1,351 | Python | 39.969696 | 97 | 0.720207 |
omniverse-code/kit/exts/omni.kit.audiodeviceenum/omni/kit/audiodeviceenum/_audio.pyi | """
This module contains bindings to the C++ omni::audio::IAudioDeviceEnum
interface. This provides functionality for enumerating available audio
devices and collecting some basic information on each one.
Sound devices attached to the system may change at any point due to user
activity (ie: connecting or unplugging a USB audio device). When enumerating
devices, it is important to collect all device information directly instead
of caching it.
The device information is suitable to be used to display to a user in a menu
to allow them to choose a device to use by name.
"""
import omni.kit.audiodeviceenum._audio
import typing
__all__ = [
"Direction",
"IAudioDeviceEnum",
"SampleType",
"acquire_audio_device_enum_interface"
]
class Direction():
"""
Members:
PLAYBACK : audio playback devices only.
CAPTURE : audio capture devices only.
"""
def __init__(self, arg0: int) -> None: ...
def __int__(self) -> int: ...
@property
def name(self) -> str:
"""
(self: handle) -> str
:type: str
"""
CAPTURE: omni.kit.audiodeviceenum._audio.Direction # value = Direction.CAPTURE
PLAYBACK: omni.kit.audiodeviceenum._audio.Direction # value = Direction.PLAYBACK
__members__: dict # value = {'PLAYBACK': Direction.PLAYBACK, 'CAPTURE': Direction.CAPTURE}
pass
class IAudioDeviceEnum():
"""
This interface contains functions for audio device enumeration. This is able to
enumerate all audio devices attached to the system at any given point and collect the
information for each device. This is only intended to collect the device information
needed to display to the user for device selection purposes. If a device is to be
chosen based on certain needs (ie: channel count, frame rate, etc), it should be done
directly through the audio playback or capture context during creation. This is able
to collect information for both playback and capture devices.
All the function in this interface are in omni.kit.audio.IAudioDeviceEnum class.
To retrieve this object, use get_audio_device_enum_interface() method:
>>> import omni.kit.audio
>>> dev = omni.kit.audio.get_audio_device_enum_interface()
>>> count = dev.get_device_count(PLAYBACK)
>>> desc = dev.get_device_description(PLAYBACK, 0)
"""
def get_device_channel_count(self, dir: Direction, index: int) -> int:
"""
Retrieves the maximum channel count for a requested device.
This retrieves the maximum channel count for a requested device. This count
is the maximum number of channels that the device can natively handle without
having to trim or reprocess the data. Using a device with a different channel
count than its maximum is allowed but will result in extra processing time to
upmix or downmix channels in the stream. Note that downmixing channel counts
(ie: 7.1 to stereo) will often result in multiple channels being blended
together and can result in an unexpected final signal in certain cases.
This function will open the audio device to test on some systems. The
caller should ensure that isDirectHardwareBackend() returns false
before calling this.
Args:
dir: the audio direction to get the maximum channel count for.
index: the index of the device to retrieve the channel count for. This should
be between 0 and one less than the most recent return value of getDeviceCount().
Returns:
If successful, this returns the maximum channel count of the requested device.
If the requested device is out of range of those connected to the system, 0
is returned.
"""
def get_device_count(self, dir: Direction) -> int:
"""
Retrieves the total number of devices attached to the system of a requested type.
Args:
dir: the audio direction to get the device count for.
Returns:
If successful, this returns the total number of connected audio devices of the
requested type.
If there are no devices of the requested type connected to the system, 0 is
returned.
"""
def get_device_description(self, dir: Direction, index: int) -> object:
"""
Retrieves a descriptive string for a requested audio device.
This retrieves a descriptive string for the requested device. This string is
suitable for display to a user in a menu or selection list.
Args:
dir: the audio direction to get the description string for.
index: the index of the device to retrieve the description for. This should be
between 0 and one less than the most recent return value of getDeviceCount().
Returns:
If successful, this returns a python string describing the requested device.
If the requested device is out of range of those connected to the system, this
returns None.
"""
def get_device_frame_rate(self, dir: Direction, index: int) -> int:
"""
Retrieves the preferred frame rate of a requested device.
This retrieves the preferred frame rate of a requested device. The preferred
frame rate is the rate at which the device natively wants to process audio data.
Using the device at other frame rates may be possible but would require extra
processing time. Using a device at a different frame rate than its preferred
one may also result in degraded quality depending on what the processing versus
preferred frame rate is.
This function will open the audio device to test on some systems. The
caller should ensure that isDirectHardwareBackend() returns false
before calling this.
Args:
dir: the audio direction to get the preferred frame rate for.
index: the index of the device to retrieve the frame rate for. This should
be between 0 and one less than the most recent return value of
getDeviceCount().
Returns:
If successful, this returns the preferred frame rate of the requested device.
If the requested device was out of range of those connected to the system, 0
is returned.
"""
def get_device_id(self, dir: Direction, index: int) -> object:
"""
Retrieves the unique identifier for the requested device.
Args:
dir: the audio direction to get the device name for.
index: the index of the device to retrieve the identifier for. This
should be between 0 and one less than the most recent return
value of getDeviceCount().
Returns:
If successful, this returns a python string containing the unique identifier
of the requested device.
If the requested device is out of range of those connected to the system, this
returns None.
"""
def get_device_name(self, dir: Direction, index: int) -> object:
"""
Retrieves the friendly name of a requested device.
Args:
dir: the audio direction to get the device name for.
index: the index of the device to retrieve the name for. This should be between
0 and one less than the most recent return value of getDeviceCount().
Returns:
If successful, this returns a python string containing the friendly name of the
requested device.
If the requested device is out of range of those connected to the system, this
returns None.
"""
def get_device_sample_size(self, dir: Direction, index: int) -> int:
"""
Retrieves the native sample size for a requested device.
This retrieves the bits per sample that a requested device prefers to process
its data at. It may be possible to use the device at a different sample size,
but that would likely result in extra processing time. Using a device at a
different sample rate than its native could degrade the quality of the final
signal.
This function will open the audio device to test on some systems. The
caller should ensure that isDirectHardwareBackend() returns false
before calling this.
Args:
dir: the audio direction to get the native sample size for.
index: the index of the device to retrieve the sample size for. This should
be between 0 and one less than the most recent return value of getDeviceCount().
Returns:
If successful, this returns the native sample size in bits per sample of the
requested device.
If the requested device is out of range of those connected to the system, 0
is returned.
"""
def get_device_sample_type(self, dir: Direction, index: int) -> SampleType:
"""
Retrieves the native sample data type for a requested device.
This retrieves the sample data type that a requested device prefers to process
its data in. It may be possible to use the device with a different data type,
but that would likely result in extra processing time. Using a device with a
different sample data type than its native could degrade the quality of the
final signal.
This function will open the audio device to test on some systems. The
caller should ensure that isDirectHardwareBackend() returns false
before calling this.
Args:
dir: the audio direction to get the native sample data type for.
index: the index of the device to retrieve the sample data type for. This should
be between 0 and one less than the most recent return value of getDeviceCount().
Returns:
If successful, this returns the native sample data type of the requested device.
If the requested device is out of range of those connected to the system,
UNKNOWN is returned.
"""
def is_direct_hardware_backend(self) -> bool:
"""
/** Check if the audio device backend uses direct hardware access.
*
* A direct hardware audio backend is capable of exclusively locking audio
* devices, so devices are not guaranteed to open successfully and opening
* devices to test their format may be disruptive to the system.
*
* ALSA is the only 'direct hardware' backend that's currently supported.
* Some devices under ALSA will exclusively lock the audio device; these
* may fail to open because they're busy.
* Additionally, some devices under ALSA can fail to open because they're
* misconfigured (Ubuntu's default ALSA configuration can contain
* misconfigured devices).
* In addition to this, opening some devices under ALSA can take a
* substantial amount of time (over 100ms).
* For these reasons, it is important to verify that you are not using a
* 'direct hardware' backend if you are going to call certain functions in
* this interface.
*
* Args:
* No arguments.
*
* Returns:
* This returns `True` if this backend has direct hardware access.
* This will be returned when ALSA is in use.
* This returns `False` if the backend is an audio mixing server.
* This will be returned when Pulse Audio or Window Audio Services
* are in use.
"""
pass
class SampleType():
"""
Members:
UNKNOWN : could not determine the same type or an invalid device index.
PCM_SIGNED_INTEGER : signed integer PCM samples.
PCM_UNSIGNED_INTEGER : unsigned integer PCM samples.
PCM_FLOAT : single precision floating point PCM samples.
COMPRESSED : a compressed sample format.
"""
def __init__(self, arg0: int) -> None: ...
def __int__(self) -> int: ...
@property
def name(self) -> str:
"""
(self: handle) -> str
:type: str
"""
COMPRESSED: omni.kit.audiodeviceenum._audio.SampleType # value = SampleType.COMPRESSED
PCM_FLOAT: omni.kit.audiodeviceenum._audio.SampleType # value = SampleType.PCM_FLOAT
PCM_SIGNED_INTEGER: omni.kit.audiodeviceenum._audio.SampleType # value = SampleType.PCM_SIGNED_INTEGER
PCM_UNSIGNED_INTEGER: omni.kit.audiodeviceenum._audio.SampleType # value = SampleType.PCM_UNSIGNED_INTEGER
UNKNOWN: omni.kit.audiodeviceenum._audio.SampleType # value = SampleType.UNKNOWN
__members__: dict # value = {'UNKNOWN': SampleType.UNKNOWN, 'PCM_SIGNED_INTEGER': SampleType.PCM_SIGNED_INTEGER, 'PCM_UNSIGNED_INTEGER': SampleType.PCM_UNSIGNED_INTEGER, 'PCM_FLOAT': SampleType.PCM_FLOAT, 'COMPRESSED': SampleType.COMPRESSED}
pass
def acquire_audio_device_enum_interface(plugin_name: str = None, library_path: str = None) -> IAudioDeviceEnum:
pass
| 14,603 | unknown | 46.570032 | 245 | 0.60241 |
omniverse-code/kit/exts/omni.kit.audiodeviceenum/omni/kit/audiodeviceenum/tests/test_device.py | import omni.kit.test
import omni.kit.audiodeviceenum
def get_name_from_sample_type(sample_type): # pragma: no cover
if sample_type == omni.kit.audiodeviceenum.SampleType.UNKNOWN:
return "UNKNOWN"
if sample_type == omni.kit.audiodeviceenum.SampleType.PCM_SIGNED_INTEGER:
return "INT PCM"
if sample_type == omni.kit.audiodeviceenum.SampleType.PCM_UNSIGNED_INTEGER:
return "UINT PCM"
if sample_type == omni.kit.audiodeviceenum.SampleType.PCM_FLOAT:
return "FLOAT PCM"
if sample_type == omni.kit.audiodeviceenum.SampleType.COMPRESSED:
return "COMPRESSED"
class TestAudio(omni.kit.test.AsyncTestCase): # pragma: no cover
async def test_audio_device(self):
audio = omni.kit.audiodeviceenum.get_audio_device_enum_interface()
self.assertIsNotNone(audio)
count = audio.get_device_count(omni.kit.audiodeviceenum.Direction.PLAYBACK)
self.assertGreaterEqual(count, 0)
printDevices = 0
if printDevices != 0:
print("Found ", count, " Available Playback Devices:")
if count > 0:
for i in range(0, count):
desc = audio.get_device_description(omni.kit.audiodeviceenum.Direction.PLAYBACK, i)
self.assertIsNotNone(desc)
name = audio.get_device_name(omni.kit.audiodeviceenum.Direction.PLAYBACK, i)
self.assertIsNotNone(name)
uniqueId = audio.get_device_id(omni.kit.audiodeviceenum.Direction.PLAYBACK, i)
self.assertIsNotNone(uniqueId)
if not audio.is_direct_hardware_backend():
frame_rate = audio.get_device_frame_rate(omni.kit.audiodeviceenum.Direction.PLAYBACK, i)
self.assertGreater(frame_rate, 0)
channel_count = audio.get_device_channel_count(omni.kit.audiodeviceenum.Direction.PLAYBACK, i)
self.assertGreater(channel_count, 0)
sample_size = audio.get_device_sample_size(omni.kit.audiodeviceenum.Direction.PLAYBACK, i)
self.assertGreater(sample_size, 0)
sample_type = audio.get_device_sample_type(omni.kit.audiodeviceenum.Direction.PLAYBACK, i)
if printDevices != 0:
str_desc = " found the device '" + name + "' "
str_desc += "{" + str(channel_count) + " channels "
str_desc += "@ " + str(frame_rate) + "Hz "
str_desc += str(sample_size) + "-bit " + get_name_from_sample_type(sample_type)
print(str_desc)
print(" " + desc)
else:
if printDevices != 0:
print(" found the device '" + name + "' ")
print(" " + desc)
# make sure out of range values fail.
desc = audio.get_device_description(omni.kit.audiodeviceenum.Direction.PLAYBACK, count)
self.assertIsNone(desc)
name = audio.get_device_name(omni.kit.audiodeviceenum.Direction.PLAYBACK, count)
self.assertIsNone(name)
uniqueId = audio.get_device_id(omni.kit.audiodeviceenum.Direction.PLAYBACK, count)
self.assertIsNone(uniqueId)
frame_rate = audio.get_device_frame_rate(omni.kit.audiodeviceenum.Direction.PLAYBACK, count)
self.assertEqual(frame_rate, 0)
channel_count = audio.get_device_channel_count(omni.kit.audiodeviceenum.Direction.PLAYBACK, count)
self.assertEqual(channel_count, 0)
sample_size = audio.get_device_sample_size(omni.kit.audiodeviceenum.Direction.PLAYBACK, count)
self.assertEqual(sample_size, 0)
sample_type = audio.get_device_sample_type(omni.kit.audiodeviceenum.Direction.PLAYBACK, count)
self.assertEqual(sample_type, omni.kit.audiodeviceenum.SampleType.UNKNOWN)
count = audio.get_device_count(omni.kit.audiodeviceenum.Direction.CAPTURE)
self.assertGreaterEqual(count, 0)
if printDevices != 0:
print("\nFound ", count, " Available Capture Devices:")
if count > 0:
for i in range(0, count):
desc = audio.get_device_description(omni.kit.audiodeviceenum.Direction.CAPTURE, i)
self.assertIsNotNone(desc)
name = audio.get_device_name(omni.kit.audiodeviceenum.Direction.CAPTURE, i)
self.assertIsNotNone(name)
uniqueId = audio.get_device_id(omni.kit.audiodeviceenum.Direction.CAPTURE, i)
self.assertIsNotNone(uniqueId)
if not audio.is_direct_hardware_backend():
frame_rate = audio.get_device_frame_rate(omni.kit.audiodeviceenum.Direction.CAPTURE, i)
self.assertGreater(frame_rate, 0)
channel_count = audio.get_device_channel_count(omni.kit.audiodeviceenum.Direction.CAPTURE, i)
self.assertGreater(channel_count, 0)
sample_size = audio.get_device_sample_size(omni.kit.audiodeviceenum.Direction.CAPTURE, i)
self.assertGreater(sample_size, 0)
sample_type = audio.get_device_sample_type(omni.kit.audiodeviceenum.Direction.CAPTURE, i)
if printDevices != 0:
str_desc = " found the device '" + name + "' "
str_desc += "{" + str(channel_count) + " channels "
str_desc += "@ " + str(frame_rate) + "Hz "
str_desc += str(sample_size) + "-bit " + get_name_from_sample_type(sample_type)
print(str_desc)
print(" " + desc)
else:
if printDevices != 0:
print(" found the device '" + name + "' ")
print(" " + desc)
# make sure out of range values fail.
desc = audio.get_device_description(omni.kit.audiodeviceenum.Direction.CAPTURE, count)
self.assertIsNone(desc)
name = audio.get_device_name(omni.kit.audiodeviceenum.Direction.CAPTURE, count)
self.assertIsNone(name)
uniqueId = audio.get_device_id(omni.kit.audiodeviceenum.Direction.CAPTURE, count)
self.assertIsNone(uniqueId)
frame_rate = audio.get_device_frame_rate(omni.kit.audiodeviceenum.Direction.CAPTURE, count)
self.assertEqual(frame_rate, 0)
channel_count = audio.get_device_channel_count(omni.kit.audiodeviceenum.Direction.CAPTURE, count)
self.assertEqual(channel_count, 0)
sample_size = audio.get_device_sample_size(omni.kit.audiodeviceenum.Direction.CAPTURE, count)
self.assertEqual(sample_size, 0)
sample_type = audio.get_device_sample_type(omni.kit.audiodeviceenum.Direction.CAPTURE, count)
self.assertEqual(sample_type, omni.kit.audiodeviceenum.SampleType.UNKNOWN)
| 6,979 | Python | 47.137931 | 114 | 0.60854 |
omniverse-code/kit/exts/omni.kit.audiodeviceenum/omni/kit/audiodeviceenum/tests/__init__.py | from .test_device import * # pragma: no cover
| 48 | Python | 15.333328 | 46 | 0.6875 |
omniverse-code/kit/exts/omni.kit.window.cursor/omni/kit/window/cursor/cursor.py | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from pathlib import Path
from carb import windowing, log_warn
import omni.ext
import weakref
EXTEND_CURSOR_GRAB_OPEN = "Grab_open"
EXTEND_CURSOR_GRAB_CLOSE = "Grab_close"
EXTEND_CURSOR_PAN_FILE = "cursorPan.png"
EXTEND_CURSOR_PAN_CLOSE_FILE = "cursorPanClosed.png"
main_window_cursor = None
def get_main_window_cursor():
return main_window_cursor
def get_icon_path(path: str):
extension_path = omni.kit.app.get_app().get_extension_manager().get_extension_path_by_module(__name__)
icon_path = Path(extension_path).joinpath("data").joinpath("icons").joinpath(path)
return str(icon_path)
class WindowCursor(omni.ext.IExt):
def __init__(self):
self._imgui_renderer = None
self._app_window = None
super().__init__()
def on_startup(self, ext_id):
global main_window_cursor
main_window_cursor = weakref.proxy(self)
try:
import omni.kit.imgui_renderer
import omni.appwindow
self._imgui_renderer = omni.kit.imgui_renderer.acquire_imgui_renderer_interface()
self._app_window = omni.appwindow.get_default_app_window()
except Exception:
# Currently need both of these for further API usage, which can't happen now; so clear them both out
self._imgui_renderer, self._app_window = None, None
log_warn("omni.kit.window.cursor failed to initialize properly, changing cursor with it will not work")
import traceback
log_warn(f"{traceback.format_exc()}")
self._app_ready_sub = omni.kit.app.get_app().get_startup_event_stream().create_subscription_to_pop_by_type(
omni.kit.app.EVENT_APP_READY,
self._on_app_ready,
)
def on_shutdown(self):
if self._imgui_renderer:
self._imgui_renderer.unregister_cursor_shape_extend(EXTEND_CURSOR_GRAB_OPEN)
self._imgui_renderer.unregister_cursor_shape_extend(EXTEND_CURSOR_GRAB_CLOSE)
global main_window_cursor
main_window_cursor = None
self._imgui_renderer = None
self._app_window = None
self._app_ready_sub = None
def _on_app_ready(self, event):
if self._imgui_renderer:
self._imgui_renderer.register_cursor_shape_extend(EXTEND_CURSOR_GRAB_OPEN, get_icon_path(EXTEND_CURSOR_PAN_FILE))
self._imgui_renderer.register_cursor_shape_extend(EXTEND_CURSOR_GRAB_CLOSE, get_icon_path(EXTEND_CURSOR_PAN_CLOSE_FILE))
def override_cursor_shape_extend(self, shape_name: str):
if self._app_window and self._imgui_renderer:
self._imgui_renderer.set_cursor_shape_override_extend(self._app_window, shape_name)
return True
return False
def override_cursor_shape(self, shape: windowing.CursorStandardShape):
if self._app_window and self._imgui_renderer:
self._imgui_renderer.set_cursor_shape_override(self._app_window, shape)
return True
return False
def clear_overridden_cursor_shape(self):
if self._app_window and self._imgui_renderer:
self._imgui_renderer.clear_cursor_shape_override(self._app_window)
return True
return False
def get_cursor_shape_override_extend(self):
if self._app_window and self._imgui_renderer:
return self._imgui_renderer.get_cursor_shape_override_extend(self._app_window)
return None
def get_cursor_shape_override(self):
if self._app_window and self._imgui_renderer:
return self._imgui_renderer.get_cursor_shape_override(self._app_window)
return None
| 4,062 | Python | 37.695238 | 132 | 0.674545 |
omniverse-code/kit/exts/omni.kit.window.cursor/omni/kit/window/cursor/__init__.py | from .cursor import *
| 22 | Python | 10.499995 | 21 | 0.727273 |
omniverse-code/kit/exts/omni.kit.window.cursor/omni/kit/window/cursor/tests/tests.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import carb.imgui
import carb.input
import carb.windowing
import omni.appwindow
import omni.kit.app
import omni.kit.test
import omni.kit.window.cursor
import omni.kit.imgui_renderer
import omni.ui as ui
class TestCursorShape(omni.kit.test.AsyncTestCase):
async def setUp(self):
app_window = omni.appwindow.get_default_app_window()
self._mouse = app_window.get_mouse()
self._app = omni.kit.app.get_app()
self._cursor = omni.kit.window.cursor.get_main_window_cursor()
self._input_provider = carb.input.acquire_input_provider()
self._imgui = carb.imgui.acquire_imgui()
self._windowing = carb.windowing.acquire_windowing_interface()
self._os_window = app_window.get_window()
self._imgui_renderer = omni.kit.imgui_renderer.acquire_imgui_renderer_interface()
await self._build_test_windows()
async def tearDown(self):
self._os_window = None
self._windowing = None
self._imgui = None
self._window = None
self._dock_window_1 = None
self._dock_window_2 = None
self._imgui_renderer = None
async def test_cursor_shape(self):
main_dockspace = ui.Workspace.get_window("DockSpace")
width = main_dockspace.width
height = main_dockspace.height
await self._wait()
await self._move_and_test_cursor_shape((width / 2, 1), carb.imgui.MouseCursor.ARROW)
# Put on StringField and test IBeam shape
await self._move_and_test_cursor_shape((width / 2, height / 4), carb.imgui.MouseCursor.TEXT_INPUT)
# Put on vertical split and test HORIZONTAL_RESIZE
await self._move_and_test_cursor_shape(
(width / 2 + 1, height * 3 / 4),
carb.imgui.MouseCursor.RESIZE_EW,
)
# Put on horizontal split and test VERTICAL_RESIZE
await self._move_and_test_cursor_shape(
(width * 3 / 4, height / 2 + 1),
carb.imgui.MouseCursor.RESIZE_NS,
)
await self._wait()
# test override_cursor_shape_extend and get_cursor_shape_override_extend
test_extend_shapes = [
"IBeam",
"Grab_close",
"Crosshair",
"Grab_open",
]
for cursor_shape in test_extend_shapes:
self._cursor.override_cursor_shape_extend(cursor_shape)
await self._wait(100)
cur_cursor = self._cursor.get_cursor_shape_override_extend()
self.assertEqual(cur_cursor, cursor_shape)
# test override_cursor_shape and get_cursor_shape_override
test_shapes = [
carb.windowing.CursorStandardShape.CROSSHAIR,
carb.windowing.CursorStandardShape.IBEAM,
]
for cursor_shape in test_shapes:
self._cursor.override_cursor_shape(cursor_shape)
await self._wait(100)
cur_cursor = self._cursor.get_cursor_shape_override()
self.assertEqual(cur_cursor, cursor_shape)
all_shapes = self._imgui_renderer.get_all_cursor_shape_names()
print(all_shapes)
self._cursor.clear_overridden_cursor_shape()
await self._wait()
async def _move_and_test_cursor_shape(self, pos, cursor: carb.imgui.MouseCursor):
# Available options:
# carb.imgui.MouseCursor.ARROW: carb.windowing.CursorStandardShape.ARROW,
# carb.imgui.MouseCursor.TEXT_INPUT: carb.windowing.CursorStandardShape.IBEAM,
# carb.imgui.MouseCursor.RESIZE_NS: carb.windowing.CursorStandardShape.VERTICAL_RESIZE,
# carb.imgui.MouseCursor.RESIZE_EW: carb.windowing.CursorStandardShape.HORIZONTAL_RESIZE,
# carb.imgui.MouseCursor.HAND: carb.windowing.CursorStandardShape.HAND,
# carb.imgui.MouseCursor.CROSSHAIR: carb.windowing.CursorStandardShape.CROSSHAIR,
self._input_provider.buffer_mouse_event(self._mouse, carb.input.MouseEventType.MOVE, pos, 0, pos)
self._windowing.set_cursor_position(self._os_window, carb.Int2(*[int(p) for p in pos]))
await self._wait()
imgui_cursor = self._imgui.get_mouse_cursor()
self.assertEqual(cursor, imgui_cursor, f"Expect {cursor} but got {imgui_cursor}")
# build a dockspace with windows/widgets to test various cursor shape from imgui
async def _build_test_windows(self):
import omni.ui as ui
self._window = ui.Window("CursorShapeTest")
with self._window.frame:
with ui.ZStack():
with ui.Placer(offset_x=0, offset_y=10):
ui.StringField()
self._dock_window_1 = ui.Window("DockWindow1")
self._dock_window_2 = ui.Window("DockWindow2")
await self._app.next_update_async()
main_dockspace = ui.Workspace.get_window("DockSpace")
window_handle = ui.Workspace.get_window("CursorShapeTest")
dock_window_handle1 = ui.Workspace.get_window("DockWindow1")
dock_window_handle2 = ui.Workspace.get_window("DockWindow2")
window_handle.dock_in(main_dockspace, ui.DockPosition.SAME)
dock_window_handle1.dock_in(window_handle, ui.DockPosition.BOTTOM, 0.5)
dock_window_handle2.dock_in(dock_window_handle1, ui.DockPosition.RIGHT, 0.5)
async def _wait(self, num=8):
for i in range(num):
await self._app.next_update_async() | 5,770 | Python | 38.8 | 106 | 0.656672 |
omniverse-code/kit/exts/omni.kit.window.cursor/omni/kit/window/cursor/tests/__init__.py | from .tests import *
| 21 | Python | 9.999995 | 20 | 0.714286 |
omniverse-code/kit/exts/omni.kit.window.cursor/docs/CHANGELOG.md | # CHANGELOG
This document records all notable changes to ``omni.kit.window.cursor`` extension.
This project adheres to `Semantic Versioning <https://semver.org/>`.
## [1.1.1] - 2022-09-26
### Changed
- Store weakly referenced proxy to extension object.
- Handle possiblity of lower level API failure better.
- Return value of success for changing / restoring the cursor.
## [1.1.0] - 2022-05-06
### Changed
- Using `IImGuiRenderer` to override the mouse cursor
## [1.0.1] - 2021-03-01
### Added
- Added tests.
## [1.0.0] - 2020-11-06
### Added
- Initial extensions.
| 575 | Markdown | 20.333333 | 82 | 0.697391 |
omniverse-code/kit/exts/omni.kit.widget.live/config/extension.toml | [package]
version = "2.0.3"
title = "Kit Live Mode Control Widget"
category = "Internal"
changelog = "docs/CHANGELOG.md"
feature = true
keywords = ["live", "session"]
authors = ["NVIDIA"]
repository = ""
[dependencies]
"omni.usd" = {}
"omni.client" = {}
"omni.ui" = {}
"omni.kit.usd.layers" = {}
"omni.kit.menu.utils" = {}
"omni.kit.widget.live_session_management" = {}
[[python.module]]
name = "omni.kit.widget.live"
[settings]
exts."omni.kit.widget.live".enable_server_tests = false
[[test]]
args = [
"--/app/asyncRendering=false",
"--/renderer/enabled=pxr",
"--/renderer/active=pxr",
"--/app/file/ignoreUnsavedOnExit=true",
"--/app/window/dpiScaleOverride=1.0",
"--/app/window/scaleToMonitor=false",
"--/persistent/app/omniverse/filepicker/options_menu/show_details=false",
"--no-window"
]
dependencies = [
"omni.hydra.pxr",
"omni.kit.commands",
"omni.kit.selection",
"omni.kit.renderer.capture",
"omni.kit.mainwindow",
"omni.kit.ui_test",
"omni.kit.test_suite.helpers"
]
stdoutFailPatterns.include = []
stdoutFailPatterns.exclude = [] | 1,101 | TOML | 21.958333 | 77 | 0.654859 |
omniverse-code/kit/exts/omni.kit.widget.live/omni/kit/widget/live/extension.py | # Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import omni.usd
import omni.ext
import omni.kit.app
from .live_state_menu import LiveStateMenu
from .icons import Icons
from .style import Styles
class OmniLiveWidgetExtension(omni.ext.IExt):
def on_startup(self, ext_id):
extension_path = omni.kit.app.get_app_interface().get_extension_manager().get_extension_path(ext_id)
Icons.on_startup(extension_path)
Styles.on_startup()
usd_context = omni.usd.get_context()
self._live_state_menu = LiveStateMenu(usd_context)
self._live_state_menu.register_menu_widgets()
def on_shutdown(self):
self._live_state_menu.unregister_menu_widgets()
Icons.on_shutdown()
| 1,112 | Python | 33.781249 | 108 | 0.739209 |
omniverse-code/kit/exts/omni.kit.widget.live/omni/kit/widget/live/live_state_menu.py | import carb
import omni.usd
import omni.ui as ui
import omni.kit.usd.layers as layers
import omni.kit.widget.live_session_management as lsm
from functools import partial
from omni.kit.menu.utils import MenuItemDescription, MenuAlignment
from omni.ui import color as cl
from typing import Union
from .style import Styles
class LiveStateDelegate(ui.MenuDelegate):
def __init__(self, layers_interface, **kwargs):
super().__init__(**kwargs)
self._layers = layers_interface
self._live_syncing = layers_interface.get_live_syncing()
self._usd_context = self._live_syncing.usd_context
self._live_background = None
self._drop_down_background = None
self._live_button = None
self._drop_down_button = None
self._live_session_user_list_widget = None
self._layers_event_subs = []
for event in [
layers.LayerEventType.LIVE_SESSION_STATE_CHANGED,
layers.LayerEventType.LIVE_SESSION_USER_JOINED,
layers.LayerEventType.LIVE_SESSION_USER_LEFT,
]:
layers_event_sub = self._layers.get_event_stream().create_subscription_to_pop_by_type(
event, self._on_layer_event, name=f"omni.kit.widget.live {str(event)}"
)
self._layers_event_subs.append(layers_event_sub)
self._stage_event_sub = self._usd_context.get_stage_event_stream().create_subscription_to_pop(
self._on_stage_event, name="omni.kit.widget.live stage event"
)
def destroy(self):
if self._live_button:
self._live_button.set_clicked_fn(None)
self._live_button = None
if self._drop_down_button:
self._drop_down_button.set_clicked_fn(None)
self._drop_down_button = None
if self._live_session_user_list_widget:
self._live_session_user_list_widget.destroy()
self._live_session_user_list_widget = None
self._live_syncing = None
self._layers_event_subs = []
self._stage_event_sub = None
def _on_layer_event(self, event: carb.events.IEvent):
payload = layers.get_layer_event_payload(event)
if not payload:
return
if payload.event_type == layers.LayerEventType.LIVE_SESSION_STATE_CHANGED:
if not payload.is_layer_influenced(self._usd_context.get_stage_url()):
return
self.__update_live_state()
elif (
payload.event_type == layers.LayerEventType.LIVE_SESSION_USER_JOINED or
payload.event_type == layers.LayerEventType.LIVE_SESSION_USER_LEFT
):
if not payload.is_layer_influenced(self._usd_context.get_stage_url()):
return
self.__update_live_tooltip()
def _on_stage_event(self, event: carb.events.IEvent):
if event.type == int(omni.usd.StageEventType.OPENED):
if self._live_session_user_list_widget:
self._live_session_user_list_widget.track_layer(self._usd_context.get_stage_url())
def _on_live_widget_button_clicked(self, button, show_options):
menu_widget = lsm.stop_or_show_live_session_widget(
self._live_syncing.usd_context,
not show_options,
False,
show_options
)
if not menu_widget:
return
# Try to align it with the button.
drop_down_x = button.screen_position_x
drop_down_y = button.screen_position_y
drop_down_height = button.computed_height
# FIXME: The width of context menu cannot be got. Using fixed width here.
menu_widget.show_at(
drop_down_x - 94,
drop_down_y + drop_down_height + 2
)
def build_item(self, item: ui.MenuHelper):
margin = 2
with ui.HStack(width=0, style={"margin" : 0}):
with ui.HStack(content_clipping=1, width=0, style=Styles.LIVE_STATE_ITEM_STYLE):
with ui.VStack():
ui.Spacer(height=margin)
self.__build_user_list()
ui.Spacer(height=margin)
ui.Spacer(width=2 * margin)
with ui.VStack():
ui.Spacer(height=margin)
with ui.ZStack(width=0):
self._live_background = ui.Rectangle(width=50, name="offline")
with ui.HStack(width=50):
ui.Spacer()
with ui.VStack(width=0):
ui.Spacer()
ui.Image(width=14, height=14, name="lightning")
ui.Spacer()
ui.Spacer(width=margin)
ui.Label("LIVE", width=0)
ui.Spacer()
self._live_button = ui.InvisibleButton(width=50, identifier="live_button")
self._live_button.set_clicked_fn(
partial(self._on_live_widget_button_clicked, self._live_button, False)
)
ui.Spacer(height=margin)
ui.Spacer(width=margin)
with ui.VStack():
ui.Spacer(height=margin)
with ui.ZStack(width=0):
self._drop_down_background = ui.Rectangle(width=16, name="offline")
with ui.HStack(width=16):
ui.Spacer()
with ui.VStack(width=0):
ui.Spacer()
ui.Image(width=14, height=14, name="arrow_down")
ui.Spacer()
ui.Spacer()
self._drop_down_button = ui.InvisibleButton(width=16, identifier="drop_down_button")
self._drop_down_button.set_clicked_fn(
partial(self._on_live_widget_button_clicked, self._drop_down_button, True)
)
ui.Spacer(height=margin)
ui.Spacer(width=8)
self.__update_live_state()
def get_menu_alignment(self):
return MenuAlignment.RIGHT
def update_menu_item(self, menu_item: Union[ui.Menu, ui.MenuItem], menu_refresh: bool):
if isinstance(menu_item, ui.MenuItem):
menu_item.visible = False
def __update_live_tooltip(self):
current_session = self._live_syncing.get_current_live_session()
if current_session:
peer_users_count = len(current_session.peer_users)
if peer_users_count > 0:
self._live_background.set_tooltip(
f"Leave Session {current_session.name}\n{peer_users_count + 1} Total Users in Session"
)
else:
self._live_background.set_tooltip(
f"Leave Session {current_session.name}"
)
def __update_live_state(self):
if self._live_background:
current_session = self._live_syncing.get_current_live_session()
if current_session:
self._live_background.name = "live"
self.__update_live_tooltip()
self._drop_down_background.name = "live"
else:
self._live_background.name = "offline"
self._live_background.set_tooltip("Start Session")
self._drop_down_background.name = "offline"
def __build_user_list(self):
def is_follow_enabled():
settings = carb.settings.get_settings()
enabled = settings.get(f"/app/liveSession/enableMenuFollowUser")
if enabled == True or enabled == False:
return enabled
return True
stage_url = self._usd_context.get_stage_url()
self._live_session_user_list_widget = lsm.LiveSessionUserList(
self._usd_context, stage_url,
follow_user_with_double_click=is_follow_enabled(),
allow_timeline_settings=True,
maximum_users=10
)
class LiveStateMenu:
def __init__(self, usd_context):
self._live_menu_name = "Live State Widget"
self._menu_list = [MenuItemDescription(name="placeholder", show_fn=lambda: False)]
self._usd_context = usd_context
self._layers = layers.get_layers(self._usd_context)
self._layer_state_delegate = None
def register_menu_widgets(self):
self._layer_state_delegate = LiveStateDelegate(self._layers)
omni.kit.menu.utils.add_menu_items(self._menu_list, name=self._live_menu_name, delegate=self._layer_state_delegate)
def unregister_menu_widgets(self):
omni.kit.menu.utils.remove_menu_items(self._menu_list, self._live_menu_name)
self._menu_list = None
if self._layer_state_delegate:
self._layer_state_delegate.destroy()
self._layer_state_delegate = None
self._layers = None
self._usd_context = None
| 9,094 | Python | 39.066079 | 123 | 0.55905 |
omniverse-code/kit/exts/omni.kit.widget.live/omni/kit/widget/live/__init__.py | from .extension import OmniLiveWidgetExtension | 46 | Python | 45.999954 | 46 | 0.913043 |
omniverse-code/kit/exts/omni.kit.widget.live/omni/kit/widget/live/tests/base.py | import carb
def enable_server_tests():
settings = carb.settings.get_settings()
return settings.get_as_bool("/exts/omni.kit.widget.live/enable_server_tests")
| 168 | Python | 20.124997 | 81 | 0.72619 |
omniverse-code/kit/exts/omni.kit.widget.live/omni/kit/widget/live/tests/test_live_widget.py | import omni.kit.test
from pathlib import Path
import omni.usd
import omni.client
import omni.kit.app
import omni.kit.usd.layers as layers
import omni.kit.clipboard
from omni.ui.tests.test_base import OmniUiTest
from pxr import Usd, Sdf
from omni.kit.usd.layers.tests.mock_utils import MockLiveSyncingApi, join_new_simulated_user, quit_simulated_user
CURRENT_PATH = Path(__file__).parent.joinpath("../../../../../data")
class TestLiveWidget(OmniUiTest):
# Before running each test
async def setUp(self):
self.previous_retry_values = omni.client.set_retries(0, 0, 0)
self.app = omni.kit.app.get_app()
self.usd_context = omni.usd.get_context()
self.layers = layers.get_layers(self.usd_context)
self.live_syncing = self.layers.get_live_syncing()
await omni.usd.get_context().new_stage_async()
self._golden_img_dir = CURRENT_PATH.absolute().resolve().joinpath("tests")
self.simulated_user_names = []
self.simulated_user_ids = []
self.test_session_name = "test"
self.stage_url = "omniverse://__faked_omniverse_server__/test/live_session.usd"
async def tearDown(self):
omni.client.set_retries(*self.previous_retry_values)
async def wait(self, frames=10):
for i in range(frames):
await self.app.next_update_async()
async def __create_simulated_users(self, count=2):
for i in range(count):
user_name = f"test{i}"
user_id = user_name
self.simulated_user_names.append(user_name)
self.simulated_user_ids.append(user_id)
join_new_simulated_user(user_name, user_id)
await self.wait(2)
async def test_menu_setup(self):
import omni.kit.ui_test as ui_test
await self.usd_context.new_stage_async()
menu_widget = ui_test.get_menubar()
menu = menu_widget.find_menu("Live State Widget")
self.assertTrue(menu)
async def __create_fake_stage(self, join_test_session=True):
format = Sdf.FileFormat.FindByExtension(".usd")
# Sdf.Layer.New will not save layer so it won't fail.
# This can be used to test layer identifier with omniverse sheme without
# touching real server.
layer = Sdf.Layer.New(format, self.stage_url)
stage = Usd.Stage.Open(layer.identifier)
session = self.live_syncing.create_live_session(self.test_session_name, layer_identifier=layer.identifier)
self.assertTrue(session)
if join_test_session:
await self.usd_context.attach_stage_async(stage)
self.live_syncing.join_live_session(session)
return stage, layer
@MockLiveSyncingApi
async def test_open_stage_with_live_session(self):
import omni.kit.ui_test as ui_test
await self.usd_context.new_stage_async()
_, layer = await self.__create_fake_stage(False)
menu_widget = ui_test.get_menubar()
menu = menu_widget.find_menu("Live State Widget")
self.assertTrue(menu)
await menu.bring_to_front()
await menu.click(menu.center + ui_test.Vec2(50, 0), False, 10)
# hard code the position of click point, as the menu positon changes after running test_join_live_session_users
# don't know why the menu positon changes after running test_join_live_session_users
# await menu.click(ui_test.Vec2(1426, 5), False, 10)
await ui_test.human_delay(100)
await ui_test.select_context_menu("Join With Session Link")
stage_url_with_session = f"{layer.identifier}?live_session_name=test"
omni.kit.clipboard.copy(stage_url_with_session)
window = ui_test.find("JOIN LIVE SESSION WITH LINK")
self.assertTrue(window is not None)
input_field = window.find("**/StringField[*].name=='new_session_link_field'")
self.assertTrue(input_field)
confirm_button = window.find("**/Button[*].name=='confirm_button'")
self.assertTrue(confirm_button)
cancel_button = window.find("**/Button[*].name=='cancel_button'")
self.assertTrue(cancel_button)
# Invalid session name will fail to join
await ui_test.human_delay(100)
input_field.model.set_value("111111")
await confirm_button.click()
await ui_test.human_delay(100)
self.assertFalse(self.live_syncing.is_stage_in_live_session())
self.assertTrue(window.window.visible)
input_field.model.set_value("")
await confirm_button.click()
await ui_test.human_delay(100)
self.assertFalse(self.live_syncing.is_stage_in_live_session())
self.assertTrue(window.window.visible)
await cancel_button.click()
self.assertFalse(self.live_syncing.is_stage_in_live_session())
self.assertFalse(window.window.visible)
window.window.visible = True
await self.wait()
# Valid session link
paste_button = window.find("**/ToolButton[*].name=='paste_button'")
self.assertTrue(paste_button)
await paste_button.click()
self.assertEqual(input_field.model.get_value_as_string(), stage_url_with_session)
await confirm_button.click()
self.assertFalse(window.window.visible)
await ui_test.human_delay(300)
self.assertTrue(self.live_syncing.is_stage_in_live_session())
await menu.click(menu.center + ui_test.Vec2(70, 0), False, 10)
await ui_test.human_delay(100)
# Copy session link
omni.kit.clipboard.copy("unkonwn")
await ui_test.select_context_menu("Share Session Link")
window = ui_test.find("SHARE LIVE SESSION LINK")
self.assertTrue(window is not None)
confirm_button = window.find("**/InvisibleButton[*].name=='confirm_button'")
self.assertTrue(confirm_button)
await confirm_button.click()
stage_url_with_session = f"{layer.identifier}?live_session_name=test"
live_session_link = omni.kit.clipboard.paste()
self.assertTrue(live_session_link, stage_url_with_session)
self.assertFalse(window.window.visible)
await omni.kit.app.get_app().next_update_async()
await omni.kit.app.get_app().next_update_async()
@MockLiveSyncingApi(user_name="test", user_id="test")
async def test_join_live_session_users(self):
await self.__create_fake_stage(join_test_session=True)
await self.wait()
await self.__create_simulated_users()
await self.wait()
await self.finalize_test(
golden_img_dir=self._golden_img_dir,
hide_menu_bar = False,
threshold=1e-4
)
self.live_syncing.stop_all_live_sessions()
await self.wait()
@MockLiveSyncingApi(user_name="test", user_id="test")
async def test_leave_live_session_users(self):
await self.__create_fake_stage(join_test_session=True)
await self.wait()
await self.__create_simulated_users()
await self.wait()
quit_simulated_user(self.simulated_user_ids[0])
await self.wait()
await self.finalize_test(
golden_img_dir=self._golden_img_dir,
hide_menu_bar=False,
threshold=1e-4
)
self.live_syncing.stop_all_live_sessions()
await self.wait()
@MockLiveSyncingApi(user_name="test", user_id="test")
async def test_join_live_session_over_max_users(self):
await self.__create_fake_stage(join_test_session=True)
await self.wait()
await self.__create_simulated_users(26)
await self.wait()
await self.finalize_test(golden_img_dir=self._golden_img_dir, hide_menu_bar=False)
self.live_syncing.stop_all_live_sessions()
await self.wait()
| 7,798 | Python | 36.676328 | 119 | 0.645037 |
omniverse-code/kit/exts/omni.kit.widget.live/omni/kit/widget/live/tests/__init__.py | from .test_live_widget import TestLiveWidget | 44 | Python | 43.999956 | 44 | 0.863636 |
omniverse-code/kit/exts/omni.kit.widget.live/docs/CHANGELOG.md | # Changelog
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
## [2.0.3] - 2022-09-29
- Use new omni.kit.usd.layers interfaces.
## [2.0.2] - 2022-08-30
### Changes
- Show menu options for join.
## [2.0.1] - 2022-07-19
### Changes
- Show app name for user in the live session.
## [2.0.0] - 2022-06-29
### Changes
- Refactoring live widget to move all code into python.
## [1.0.0] - 2022-06-13
### Changes
- Initialize live widget changelog. | 476 | Markdown | 20.681817 | 80 | 0.655462 |
omniverse-code/kit/exts/omni.kit.widget.live/docs/index.rst | omni.kit.widget.live
####################
Omniverse Kit Live Mode Control Widget
| 82 | reStructuredText | 15.599997 | 38 | 0.609756 |
omniverse-code/kit/exts/omni.kit.viewport.ready/config/extension.toml | [package]
# Semantic Versioning is used: https://semver.org/
version = "1.0.2"
# Lists people or organizations that are considered the "authors" of the package.
authors = ["NVIDIA"]
# The title and description fields are primarly for displaying extension info in UI
title = "Viewport Ready"
description="Extension to inject a omni.ui element into the Viewport until rendering has begun"
# Keywords for the extension
keywords = ["kit", "viewport", "utility", "ready"]
# Location of change log file in target (final) folder of extension, relative to the root.
# More info on writing changelog: https://keepachangelog.com/en/1.0.0/
changelog="docs/CHANGELOG.md"
# Path (relative to the root) or content of readme markdown file for UI.
readme = "docs/README.md"
# Icon is shown in Extensions window, it is recommended to be square, of size 256x256.
icon = "data/icon.png"
category = "Viewport"
[dependencies]
"omni.ui" = {}
"omni.usd" = {}
"omni.kit.window.viewport" = {optional = true} # Load after legacy omni.kit.window.viewport, but don't require it
"omni.kit.viewport.window" = {optional = true} # Load after new omni.kit.viewport.window, but don't require it
# Main python module this extension provides, it will be publicly available as "import omni.kit.viewport.registry".
[[python.module]]
name = "omni.kit.viewport.ready"
[settings]
exts."omni.kit.viewport.ready".startup.enabled = true
exts."omni.kit.viewport.ready".startup.viewport = "Viewport"
exts."omni.kit.viewport.ready".startup.show_once = true
exts."omni.kit.viewport.ready".message = "RTX Loading"
exts."omni.kit.viewport.ready".font_size = 72
exts."omni.kit.viewport.ready".background_color = 0
[[test]]
args = [
"--/app/window/width=512",
"--/app/window/height=512",
"--/app/window/dpiScaleOverride=1.0",
"--/app/window/scaleToMonitor=false",
"--no-window"
]
dependencies = [
"omni.kit.renderer.capture"
]
[documentation]
pages = [
"docs/Overview.md",
"docs/CHANGELOG.md",
]
| 1,990 | TOML | 30.603174 | 115 | 0.720603 |
omniverse-code/kit/exts/omni.kit.viewport.ready/omni/kit/viewport/ready/extension.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import omni.ext
import omni.kit.app
import carb
class ViewportReadyExtension(omni.ext.IExt):
def on_startup(self, ext_id):
self.__vp_ready = None
self.__vp_extension_hooks = None
# Create a default ready-delegate when exts/omni.kit.viewport.ready/startup/enabled is set
if carb.settings.get_settings().get("exts/omni.kit.viewport.ready/startup/enabled"):
manager = omni.kit.app.get_app().get_extension_manager()
self.__vp_extension_hooks = (
manager.subscribe_to_extension_enable(
self._create_viewport_ready,
self._remove_viewport_ready,
ext_name="omni.kit.window.viewport",
hook_name="omni.kit.viewport.ready.LoadListener-1",
),
manager.subscribe_to_extension_enable(
self._create_viewport_ready,
self._remove_viewport_ready,
ext_name="omni.kit.viewport.window",
hook_name="omni.kit.viewport.ready.LoadListener-2",
)
)
def on_shutdown(self):
self._remove_viewport_ready()
def _create_viewport_ready(self, *args):
async def viewport_ready_ui():
# Honor setting to force this message to only ever show once in a session if requested.
# This needs to happen in the async loop as subscribe_to_extension_enable can call _create_viewport_ready
# before it has returned and assigend hooks to self.__vp_extension_hooks
settings = carb.settings.get_settings()
show_once = bool(settings.get("exts/omni.kit.viewport.ready/startup/show_once"))
if show_once:
self.__vp_extension_hooks = None
import omni.ui
from .viewport_ready import ViewportReady, ViewportReadyDelegate
max_wait = 100
viewport_name = settings.get("exts/omni.kit.viewport.ready/startup/viewport") or "Viewport"
viewport_window = omni.ui.Workspace.get_window(viewport_name)
while viewport_window is None and max_wait:
await omni.kit.app.get_app().next_update_async()
viewport_window = omni.ui.Workspace.get_window(viewport_name)
max_wait = max_wait - 1
# Create a simple delegate that responds to on_complete and clears the objects created
class DefaultDelegate(ViewportReadyDelegate):
def on_complete(delegate_self):
# self is ViewportReadyExtension instance
self.__vp_ready = None
super().on_complete()
self.__vp_ready = ViewportReady(DefaultDelegate())
import asyncio
asyncio.ensure_future(viewport_ready_ui())
def _remove_viewport_ready(self, *args):
self.__vp_ready = None
self.__vp_extension_hooks = None
| 3,381 | Python | 44.093333 | 117 | 0.623188 |
omniverse-code/kit/exts/omni.kit.viewport.ready/omni/kit/viewport/ready/__init__.py | from .extension import ViewportReadyExtension
| 46 | Python | 22.499989 | 45 | 0.891304 |
omniverse-code/kit/exts/omni.kit.viewport.ready/omni/kit/viewport/ready/viewport_ready.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
__all__ = ['ViewportReadyProgressDelegate', 'ViewportReadyDelegate', 'ViewportReady']
import omni.ui
import omni.usd
import carb
class ViewportReadyProgressDelegate:
def __init__(self, settings: carb.settings.ISettings):
self.__progress = None
self.__label = None
self.__shader_count = None
self.__enabled_mask_id = None
self.setup_updates(settings)
def __del__(self):
self.destroy()
def setup_updates(self, settings: carb.settings.ISettings):
"""Setup the ViewportReadyProgressDelegate to get progress updates"""
try:
import re
import omni.activity.core
import omni.activity.profiler
self.__re_matcher = re.compile("^.*\.hlsl \(UID: [0-9]+\)")
persisted_shader_count = settings.get("/persistent/exts/omni.kit.viewport.ready/shader_count")
# Guard against divide by 0 error in activity_event which shouldn't happen but did once somehow.
self.__shader_count = (0, persisted_shader_count if persisted_shader_count else 1)
profiler = omni.activity.profiler.get_activity_profiler()
if profiler:
self.__enabled_mask_id = profiler.enable_capture_mask(omni.activity.profiler.CAPTURE_MASK_STARTUP)
activity = omni.activity.core.get_instance()
self.__activity = (activity, activity.create_callback_to_pop(self.activity_event))
except (ImportError, RuntimeError):
required_exts = "omni.activity.profiler and omni.activity.pump"
carb.log_error(f"{required_exts} must be enabled with /exts/omni.kit.viewport.ready/activity_progress=true")
def destroy(self):
self.__activity = None
progress, self.__progress = self.__progress, None
if progress:
progress.destroy()
label, self.__label = self.__label, None
if label:
label.destroy()
# Save the total number of shaders processed as the progress max hint for the next launch
if self.__shader_count:
carb.settings.get_settings().set("/persistent/exts/omni.kit.viewport.ready/shader_count", self.__shader_count[0])
# Restore the profile mask to whatever it was before
enabled_mask_id, self.__enabled_mask_id = self.__enabled_mask_id, None
if enabled_mask_id is not None:
profiler = omni.activity.profiler.get_activity_profiler()
profiler.disable_capture_mask(enabled_mask_id)
def activity_event(self, node, root_node: bool = True):
"""Event handler for omni.activity messages"""
# This is a rather delicate method relying on strings being constant to filter properly
node_name = node.name or ""
if not root_node:
if self.__re_matcher.search(node_name) is not None:
self.__shader_count = self.__shader_count[0] + 1, self.__shader_count[1]
self.set_progress(self.__shader_count[0] / self.__shader_count[1], f"Compiling: {node_name}")
elif node_name == "Ray Tracing Pipeline":
for node_id in range(node.child_count):
self.activity_event(node.get_child(node_id), False)
def set_progress(self, amount: float, message: str):
if self.__progress:
self.__progress.model.set_value(amount)
if self.__label:
self.__label.text = message
def build_ui(self, settings: carb.settings.ISettings, font_size: float, color: omni.ui.color):
"""Build the progress ui with font and color style hints"""
margin = settings.get("/exts/omni.kit.viewport.ready/activity_progress/margin")
margin = omni.ui.Percent(20 if margin is None else margin)
omni.ui.Spacer(width=margin)
with omni.ui.VStack():
omni.ui.Spacer(height=5)
self.__progress = omni.ui.ProgressBar(height=5,
alignment=omni.ui.Alignment.CENTER_BOTTOM,
style={"border_width": 2, "border_radius": 5,
"color": color})
omni.ui.Spacer(height=5)
self.__label = omni.ui.Label("", alignment=omni.ui.Alignment.CENTER_TOP, style={"font_size": font_size})
omni.ui.Spacer(width=margin)
class ViewportReadyDelegate:
def __init__(self, viewport_name: str = 'Viewport', usd_context_name: str = '', viewport_handle: int = None):
self.__viewport_name = viewport_name
self.__usd_context_name = usd_context_name
self.__viewport_handle = viewport_handle
self.__frame = None
# The signal RTX sends that first frame is ready could actually come before omni.ui has called the ui build fn
self.__destroyed = False
# Fill in some default based on carb.settings
settings = carb.settings.get_settings()
self.__message = settings.get("/exts/omni.kit.viewport.ready/message") or "RTX Loading"
self.__font_size = settings.get("/exts/omni.kit.viewport.ready/font_size") or 72
if settings.get("/exts/omni.kit.viewport.ready/activity_progress/enabled"):
self.__progress = ViewportReadyProgressDelegate(settings)
else:
self.__progress = None
@property
def message(self) -> str:
'''Return the string for the default omni.ui.Label.'''
if self.__message == "RTX Loading":
active = {
"rtx": "RTX",
"iray": "Iray",
"pxr": "Storm",
"index": "Index"
}.get(carb.settings.get_settings().get("/renderer/active"), None)
if active:
return f"{active} Loading"
return self.__message
@property
def font_size(self) -> float:
'''Return the font-size for the default omni.ui.Label.'''
return self.__font_size
@property
def usd_context_name(self) -> str:
'''Return the omni.usd.UsdContext name this delegate is waiting for completion on.'''
return self.__usd_context_name
@property
def viewport_name(self) -> str:
'''Return the name of the omni.ui.Window this delegate is waiting for completion on.'''
return self.__viewport_name
@property
def viewport_handle(self) -> int:
'''Return the ViewportHandle this delegate is waiting for completion on or None for any Viewport.'''
return self.__viewport_handle
def on_complete(self):
'''Callback function invoked when first rendered frame is delivered'''
# Make sure to always call destroy if any exception is thrown during log_complete
try:
self.log_complete()
finally:
self.destroy()
def log_complete(self, msg: str = None, prefix_time_since_start: bool = True):
'''Callback function that will log to info (and stdout if /app/enableStdoutOutput is enabled)'''
if msg is None:
msg = 'RTX ready'
if prefix_time_since_start:
import omni.kit.app
seconds = omni.kit.app.get_app().get_time_since_start_s()
msg = "[{0:.3f}s] ".format(seconds) + msg
if carb.settings.get_settings().get('/app/enableStdoutOutput'):
# Going to sys.stdout directly is cleaner when launched with -info
import sys
sys.stdout.write(msg + '\n')
else:
carb.log_info(msg)
def build_label(self) -> omni.ui.Widget:
'''Simple method to override the basic label showing progress.'''
return omni.ui.Label(self.message, alignment=omni.ui.Alignment.CENTER, style={"font_size": self.font_size})
def build_progress(self) -> None:
settings = carb.settings.get_settings()
color = settings.get("/exts/omni.kit.viewport.ready/activity_progress/color")
color = omni.ui.color("#76b900" if color is None else color)
font_size = settings.get("/exts/omni.kit.viewport.ready/activity_progress/font_size")
if font_size is None:
font_size = self.__font_size / 6
omni.ui.Spacer(height=5)
with omni.ui.HStack():
self.__progress.build_ui(settings, font_size, color)
def build_frame(self) -> None:
'''Method to override the basic frame showing progress.'''
# If RTX is ready before omni.ui is calling the build function, then do nothing
if self.__destroyed:
return
self.__frame = omni.ui.ZStack()
with self.__frame:
bg_color = carb.settings.get_settings().get('/exts/omni.kit.viewport.ready/background_color')
if bg_color:
omni.ui.Rectangle(style={"background_color": bg_color})
with omni.ui.VStack():
kwargs = {}
if self.__progress:
kwargs = {"height": omni.ui.Percent(35)}
omni.ui.Spacer(**kwargs)
self.build_label()
if self.__progress:
self.build_progress()
omni.ui.Spacer()
def build_ui(self) -> None:
'''Method to build the progress/info state in the Viewport window.'''
# If RTX is ready before build_ui is called then do nothing
if self.__destroyed:
return
if self.__frame is None:
viewport_window = omni.ui.Workspace.get_window(self.viewport_name)
if viewport_window:
frame = None
if hasattr(viewport_window, 'get_frame'):
frame = viewport_window.get_frame('omni.kit.viewport.ready.ViewportReadyDelegate')
else:
# TODO: Use omni.kit.viewport.utility get_viewport_window to manage omni.ui Wrapper on legacy viewport
if hasattr(viewport_window, 'frame'):
frame = viewport_window.frame
if frame:
frame.set_build_fn(self.build_frame)
def __del__(self):
self.destroy()
def destroy(self):
self.__destroyed = True
progress, self.__progress = self.__progress, None
if progress:
progress.destroy()
if self.__frame:
self.__frame.visible = False
if hasattr(self.__frame, 'clear'):
self.__frame.clear()
self.__frame.destroy()
self.__frame = None
class ViewportReady:
def __init__(self, viewport_ready: ViewportReadyDelegate = None):
self.__viewport_ready = viewport_ready if viewport_ready else ViewportReadyDelegate()
usd_context_name = self.__viewport_ready.usd_context_name
usd_context = omni.usd.get_context(usd_context_name)
if usd_context:
# Build the UI objects now
self.__viewport_ready.build_ui()
# Subscribe to the eent to wait for frame delivery
self.__new_frame_sub = usd_context.get_rendering_event_stream().create_subscription_to_push_by_type(
int(omni.usd.StageRenderingEventType.NEW_FRAME),
self.__on_event,
name=f"omni.kit.viewport.ready.ViewportReady"
)
else:
raise RuntimeError(f'No omni.usd.UsdContext found with name "{usd_context_name}"')
def __del__(self):
self.destroy()
def destroy(self):
self.__new_frame_sub = None
self.__viewport_ready = None
def __on_event(self, e: carb.events.IEvent):
waiting_for = self.__viewport_ready.viewport_handle
if (waiting_for is None) or (waiting_for == e.payload["viewport_handle"]):
if self.__viewport_complete():
# Avoid wrapping callback in try/catch by storing it first, destroying, then calling it
viewport_ready, self.__viewport_ready = self.__viewport_ready, None
self.destroy()
viewport_ready.on_complete()
del viewport_ready
def __viewport_complete(self):
usd_context_name = self.__viewport_ready.usd_context_name
window_name = self.__viewport_ready.viewport_name
try:
from omni.kit.viewport.window import get_viewport_window_instances
# Get every ViewportWindow, regardless of UsdContext it is attached to
for window in get_viewport_window_instances(usd_context_name):
if window.title == window_name:
return window.viewport_api.frame_info.get('viewport_handle', None) is not None
except ImportError:
pass
try:
import omni.kit.viewport_legacy as vp_legacy
vp_iface = vp_legacy.get_viewport_interface()
viewport_handle = vp_iface.get_instance(window_name)
if viewport_handle:
vp_window = vp_iface.get_viewport_window(viewport_handle)
if vp_window:
return bool(vp_window.get_drawable_ldr_resource() or vp_window.get_drawable_hdr_resource())
except ImportError:
pass
return False
| 13,578 | Python | 42.107936 | 125 | 0.602887 |
omniverse-code/kit/exts/omni.kit.viewport.ready/omni/kit/viewport/ready/tests/__init__.py | ## Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
##
## NVIDIA CORPORATION and its licensors retain all intellectual property
## and proprietary rights in and to this software, related documentation
## and any modifications thereto. Any use, reproduction, disclosure or
## distribution of this software and related documentation without an express
## license agreement from NVIDIA CORPORATION is strictly prohibited.
##
__all__ = ['TestViewportReady']
import omni.kit.test
from ..viewport_ready import ViewportReadyDelegate, ViewportReady
import omni.usd
import omni.ui
from omni.ui.tests.test_base import OmniUiTest
from pathlib import Path
import carb
EXTENSION_PATH = Path(carb.tokens.get_tokens_interface().resolve("${omni.kit.viewport.ready}")).resolve().absolute()
TEST_PATH = EXTENSION_PATH.joinpath("data", "tests")
WIDTH, HEIGHT = 512, 512
class TestViewportReady(OmniUiTest):
async def setUp(self):
await super().setUp()
async def tearDown(self):
await super().tearDown()
@staticmethod
def __test_image_name(base_name: str):
settings = carb.settings.get_settings()
if settings.get("/exts/omni.kit.viewport.ready/activity_progress/enabled"):
base_name += "_progress"
if settings.get("/exts/omni.kit.viewport.window/startup/windowName"):
base_name += "_viewport"
return base_name + ".png"
async def test_ready_delegate_begin(self):
'''Test the Viewport readys status message is placed inside a ui.Window named Viewport'''
await self.create_test_area(WIDTH, HEIGHT)
test_window = omni.ui.Window(
"Viewport",
dockPreference=omni.ui.DockPreference.DISABLED,
flags=omni.ui.WINDOW_FLAGS_NO_SCROLLBAR | omni.ui.WINDOW_FLAGS_NO_TITLE_BAR | omni.ui.WINDOW_FLAGS_NO_RESIZE,
width=WIDTH,
height=HEIGHT
)
await self.wait_n_updates()
delegate = ViewportReadyDelegate()
delegate.build_ui()
await self.wait_n_updates()
await self.finalize_test(golden_img_dir=TEST_PATH, golden_img_name=self.__test_image_name("test_ready_delegate_begin"))
async def test_ready_delegate_end(self):
'''Test the Viewport readys status message is removed from the Viewport'''
await self.create_test_area(WIDTH, HEIGHT)
test_window = omni.ui.Window(
"Viewport",
dockPreference=omni.ui.DockPreference.DISABLED,
flags=omni.ui.WINDOW_FLAGS_NO_SCROLLBAR | omni.ui.WINDOW_FLAGS_NO_TITLE_BAR | omni.ui.WINDOW_FLAGS_NO_RESIZE,
width=WIDTH,
height=HEIGHT
)
await self.wait_n_updates()
delegate = ViewportReadyDelegate()
delegate.build_ui()
await self.wait_n_updates()
delegate.on_complete()
await self.wait_n_updates()
await self.finalize_test(golden_img_dir=TEST_PATH, golden_img_name=self.__test_image_name("test_ready_delegate_end"))
async def test_settings(self):
'''Test carb.settings usage for default delegate'''
# Make sure the values being set aren't the defaults!
delegate = ViewportReadyDelegate()
self.assertNotEqual(delegate.message, 'TEST MESSAGE')
self.assertNotEqual(delegate.font_size, 128)
import carb
settings = carb.settings.get_settings()
# Save all the defaults for restoration for later tests
preferences = [
'/exts/omni.kit.viewport.ready/message',
'/exts/omni.kit.viewport.ready/font_size',
]
defaults = {k: settings.get(k) for k in preferences}
try:
settings.set('/exts/omni.kit.viewport.ready/message', 'TEST MESSAGE')
settings.set('/exts/omni.kit.viewport.ready/font_size', 128)
delegate = ViewportReadyDelegate(viewport_name='Not a Viewport', usd_context_name='Not a UsdContext')
self.assertEqual(delegate.message, 'TEST MESSAGE')
self.assertEqual(delegate.font_size, 128)
self.assertEqual(delegate.viewport_name, 'Not a Viewport')
self.assertEqual(delegate.usd_context_name, 'Not a UsdContext')
except Exception as e:
raise e
finally:
# Restore the defaults
for k, v in defaults.items():
settings.set(k, v)
async def test_throw_in_log_message(self):
'''Test the Viewport readys status message is removed from the Viewport when log_message throws an Exception'''
await self.create_test_area(WIDTH, HEIGHT)
test_window = omni.ui.Window(
"Viewport",
dockPreference=omni.ui.DockPreference.DISABLED,
flags=omni.ui.WINDOW_FLAGS_NO_SCROLLBAR | omni.ui.WINDOW_FLAGS_NO_TITLE_BAR | omni.ui.WINDOW_FLAGS_NO_RESIZE,
width=WIDTH,
height=HEIGHT
)
await self.wait_n_updates()
log_complete_threw = False
class LocalExpection(RuntimeError):
pass
class ThrowingViewportReadyDelegate(ViewportReadyDelegate):
def log_complete(self):
nonlocal log_complete_threw
log_complete_threw = True
raise LocalExpection("Expected to throw")
try:
delegate = ThrowingViewportReadyDelegate()
delegate.build_ui()
await self.wait_n_updates()
await self.capture_and_compare(golden_img_dir=TEST_PATH, golden_img_name=self.__test_image_name("test_ready_delegate_threw_begin"))
delegate.on_complete()
except LocalExpection:
pass
self.assertTrue(log_complete_threw)
await self.wait_n_updates()
await self.finalize_test(golden_img_dir=TEST_PATH, golden_img_name=self.__test_image_name("test_ready_delegate_threw_end"))
| 5,879 | Python | 38.463087 | 143 | 0.647899 |
omniverse-code/kit/exts/omni.kit.viewport.ready/docs/CHANGELOG.md | # CHANGELOG
This document records all notable changes to ``omni.kit.viewport.ready`` extension.
This project adheres to `Semantic Versioning <https://semver.org/>`_.
## [1.0.2] - 2022-10-28
### Fixed
- Possibility that RTX has sent a ready event before omni.ui has called the build_fn callback.
### Added
- Setting to force the message to only ever appear once in application session (on by default).
- Setting to specify a background color for the "RTX Loading" frame (transparent by default).
## [1.0.1] - 2022-04-22
### Added
- Log to stdout when requested
### Fixed
- Early exit wait loop after Viewport created
## [1.0.0] - 2022-04-12
### Added
- Initial release
| 672 | Markdown | 29.590908 | 95 | 0.720238 |
omniverse-code/kit/exts/omni.kit.viewport.ready/docs/README.md | # Viewport Ready extension [omni.kit.viewport.ready]
Utility extension to place a message (or omni.ui objects) in the Viewport until a rendered frame has been delivered.
| 170 | Markdown | 55.999981 | 116 | 0.8 |
omniverse-code/kit/exts/omni.volume_nodes/ogn/docs/SaveVDB.rst | .. _omni_volume_SaveVDB_1:
.. _omni_volume_SaveVDB:
.. ================================================================================
.. THIS PAGE IS AUTO-GENERATED. DO NOT MANUALLY EDIT.
.. ================================================================================
:orphan:
.. meta::
:title: Save VDB
:keywords: lang-en omnigraph node Omni Volume WriteOnly volume save-v-d-b
Save VDB
========
.. <description>
Saves a VDB from file and puts it in a memory buffer.
.. </description>
Installation
------------
To use this node enable :ref:`omni.volume_nodes<ext_omni_volume_nodes>` in the Extension Manager.
Inputs
------
.. csv-table::
:header: "Name", "Type", "Descripton", "Default"
:widths: 20, 20, 50, 10
"inputs:assetPath", "``token``", "Path to VDB file to save.", ""
"inputs:compressionMode", "``token``", "The compression mode to use when encoding", "None"
"", "*allowedTokens*", "None,Blosc,Zip", ""
"", "*default*", "None", ""
"inputs:data", "``uint[]``", "Data to save to file in NanoVDB or OpenVDB memory format.", "[]"
"inputs:execIn", "``execution``", "Input execution", "None"
Outputs
-------
.. csv-table::
:header: "Name", "Type", "Descripton", "Default"
:widths: 20, 20, 50, 10
"outputs:execOut", "``execution``", "Output execution", "None"
Metadata
--------
.. csv-table::
:header: "Name", "Value"
:widths: 30,70
"Unique ID", "omni.volume.SaveVDB"
"Version", "1"
"Extension", "omni.volume_nodes"
"Has State?", "True"
"Implementation Language", "C++"
"Default Memory Type", "cpu"
"Generated Code Exclusions", "tests"
"tags", "VDB"
"uiName", "Save VDB"
"__tokens", "{""none"": ""None"", ""blosc"": ""Blosc"", ""zip"": ""Zip""}"
"Categories", "Omni Volume"
"Generated Class Name", "SaveVDBDatabase"
"Python Module", "omni.volume_nodes"
| 1,894 | reStructuredText | 24.266666 | 98 | 0.541183 |
omniverse-code/kit/exts/omni.volume_nodes/ogn/docs/LoadVDB.rst | .. _omni_volume_LoadVDB_1:
.. _omni_volume_LoadVDB:
.. ================================================================================
.. THIS PAGE IS AUTO-GENERATED. DO NOT MANUALLY EDIT.
.. ================================================================================
:orphan:
.. meta::
:title: Load VDB
:keywords: lang-en omnigraph node Omni Volume ReadOnly volume load-v-d-b
Load VDB
========
.. <description>
Loads a VDB from file and puts it in a memory buffer.
.. </description>
Installation
------------
To use this node enable :ref:`omni.volume_nodes<ext_omni_volume_nodes>` in the Extension Manager.
Inputs
------
.. csv-table::
:header: "Name", "Type", "Descripton", "Default"
:widths: 20, 20, 50, 10
"inputs:assetPath", "``token``", "Path to VDB file to load.", ""
"inputs:execIn", "``execution``", "Input execution", "None"
"inputs:gridName", "``token``", "Optional name of the grid to extract. All grids are extracted if this is left empty.", "None"
Outputs
-------
.. csv-table::
:header: "Name", "Type", "Descripton", "Default"
:widths: 20, 20, 50, 10
"outputs:data", "``uint[]``", "Data loaded from VDB file in NanoVDB memory format.", "None"
"outputs:execOut", "``execution``", "Output execution", "None"
Metadata
--------
.. csv-table::
:header: "Name", "Value"
:widths: 30,70
"Unique ID", "omni.volume.LoadVDB"
"Version", "1"
"Extension", "omni.volume_nodes"
"Has State?", "True"
"Implementation Language", "C++"
"Default Memory Type", "cpu"
"Generated Code Exclusions", "tests"
"tags", "VDB"
"uiName", "Load VDB"
"__tokens", "{}"
"Categories", "Omni Volume"
"Generated Class Name", "LoadVDBDatabase"
"Python Module", "omni.volume_nodes"
| 1,789 | reStructuredText | 23.520548 | 131 | 0.553382 |
omniverse-code/kit/exts/omni.volume_nodes/omni/volume_nodes/ogn/SaveVDBDatabase.py | """Support for simplified access to data on nodes of type omni.volume.SaveVDB
Saves a VDB from file and puts it in a memory buffer.
"""
import numpy
import omni.graph.core as og
import omni.graph.core._omni_graph_core as _og
import omni.graph.tools.ogn as ogn
class SaveVDBDatabase(og.Database):
"""Helper class providing simplified access to data on nodes of type omni.volume.SaveVDB
Class Members:
node: Node being evaluated
Attribute Value Properties:
Inputs:
inputs.assetPath
inputs.compressionMode
inputs.data
inputs.execIn
Outputs:
outputs.execOut
Predefined Tokens:
tokens.none
tokens.blosc
tokens.zip
"""
# Imprint the generator and target ABI versions in the file for JIT generation
GENERATOR_VERSION = (1, 41, 3)
TARGET_VERSION = (2, 139, 12)
# This is an internal object that provides per-class storage of a per-node data dictionary
PER_NODE_DATA = {}
# This is an internal object that describes unchanging attributes in a generic way
# The values in this list are in no particular order, as a per-attribute tuple
# Name, Type, ExtendedTypeIndex, UiName, Description, Metadata,
# Is_Required, DefaultValue, Is_Deprecated, DeprecationMsg
# You should not need to access any of this data directly, use the defined database interfaces
INTERFACE = og.Database._get_interface([
('inputs:assetPath', 'token', 0, None, 'Path to VDB file to save.', {}, True, "", False, ''),
('inputs:compressionMode', 'token', 0, None, 'The compression mode to use when encoding', {ogn.MetadataKeys.ALLOWED_TOKENS: 'None,Blosc,Zip', 'default': 'None', ogn.MetadataKeys.ALLOWED_TOKENS_RAW: '{"none": "None", "blosc": "Blosc", "zip": "Zip"}'}, False, None, False, ''),
('inputs:data', 'uint[]', 0, None, 'Data to save to file in NanoVDB or OpenVDB memory format.', {}, True, [], False, ''),
('inputs:execIn', 'execution', 0, None, 'Input execution', {}, True, None, False, ''),
('outputs:execOut', 'execution', 0, None, 'Output execution', {}, True, None, False, ''),
])
class tokens:
none = "None"
blosc = "Blosc"
zip = "Zip"
@classmethod
def _populate_role_data(cls):
"""Populate a role structure with the non-default roles on this node type"""
role_data = super()._populate_role_data()
role_data.inputs.execIn = og.AttributeRole.EXECUTION
role_data.outputs.execOut = og.AttributeRole.EXECUTION
return role_data
class ValuesForInputs(og.DynamicAttributeAccess):
LOCAL_PROPERTY_NAMES = { }
"""Helper class that creates natural hierarchical access to input attributes"""
def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface):
"""Initialize simplified access for the attribute data"""
context = node.get_graph().get_default_graph_context()
super().__init__(context, node, attributes, dynamic_attributes)
self._batchedReadAttributes = []
self._batchedReadValues = []
@property
def assetPath(self):
data_view = og.AttributeValueHelper(self._attributes.assetPath)
return data_view.get()
@assetPath.setter
def assetPath(self, value):
if self._setting_locked:
raise og.ReadOnlyError(self._attributes.assetPath)
data_view = og.AttributeValueHelper(self._attributes.assetPath)
data_view.set(value)
@property
def compressionMode(self):
data_view = og.AttributeValueHelper(self._attributes.compressionMode)
return data_view.get()
@compressionMode.setter
def compressionMode(self, value):
if self._setting_locked:
raise og.ReadOnlyError(self._attributes.compressionMode)
data_view = og.AttributeValueHelper(self._attributes.compressionMode)
data_view.set(value)
@property
def data(self):
data_view = og.AttributeValueHelper(self._attributes.data)
return data_view.get()
@data.setter
def data(self, value):
if self._setting_locked:
raise og.ReadOnlyError(self._attributes.data)
data_view = og.AttributeValueHelper(self._attributes.data)
data_view.set(value)
self.data_size = data_view.get_array_size()
@property
def execIn(self):
data_view = og.AttributeValueHelper(self._attributes.execIn)
return data_view.get()
@execIn.setter
def execIn(self, value):
if self._setting_locked:
raise og.ReadOnlyError(self._attributes.execIn)
data_view = og.AttributeValueHelper(self._attributes.execIn)
data_view.set(value)
def _prefetch(self):
readAttributes = self._batchedReadAttributes
newValues = _og._prefetch_input_attributes_data(readAttributes)
if len(readAttributes) == len(newValues):
self._batchedReadValues = newValues
class ValuesForOutputs(og.DynamicAttributeAccess):
LOCAL_PROPERTY_NAMES = { }
"""Helper class that creates natural hierarchical access to output attributes"""
def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface):
"""Initialize simplified access for the attribute data"""
context = node.get_graph().get_default_graph_context()
super().__init__(context, node, attributes, dynamic_attributes)
self._batchedWriteValues = { }
@property
def execOut(self):
data_view = og.AttributeValueHelper(self._attributes.execOut)
return data_view.get()
@execOut.setter
def execOut(self, value):
data_view = og.AttributeValueHelper(self._attributes.execOut)
data_view.set(value)
def _commit(self):
_og._commit_output_attributes_data(self._batchedWriteValues)
self._batchedWriteValues = { }
class ValuesForState(og.DynamicAttributeAccess):
"""Helper class that creates natural hierarchical access to state attributes"""
def __init__(self, node: og.Node, attributes, dynamic_attributes: og.DynamicAttributeInterface):
"""Initialize simplified access for the attribute data"""
context = node.get_graph().get_default_graph_context()
super().__init__(context, node, attributes, dynamic_attributes)
def __init__(self, node):
super().__init__(node)
dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_INPUT)
self.inputs = SaveVDBDatabase.ValuesForInputs(node, self.attributes.inputs, dynamic_attributes)
dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_OUTPUT)
self.outputs = SaveVDBDatabase.ValuesForOutputs(node, self.attributes.outputs, dynamic_attributes)
dynamic_attributes = self.dynamic_attribute_data(node, og.AttributePortType.ATTRIBUTE_PORT_TYPE_STATE)
self.state = SaveVDBDatabase.ValuesForState(node, self.attributes.state, dynamic_attributes)
| 7,422 | Python | 42.409356 | 283 | 0.644301 |
omniverse-code/kit/exts/usdrt.gf.tests/config/extension.toml | [package]
# Semantic Versioning is used: https://semver.org/
version = "1.0.0"
# Lists people or organizations that are considered the "authors" of the package.
authors = ["NVIDIA"]
# The title and description fields are primarily for displaying extension info in UI
title = "usdrt.Gf tests"
description="Tests for usdrt.Gf library"
# Path (relative to the root) or content of readme markdown file for UI.
readme = "docs/README.md"
# URL of the extension source repository.
repository = ""
# One of categories for UI.
category = "Runtime"
# Keywords for the extension
keywords = ["usdrt", "runtime", "tests"]
# Location of change log file in target (final) folder of extension, relative to the root. Can also be just a content
# of it instead of file path. More info on writing changelog: https://keepachangelog.com/en/1.0.0/
# changelog="docs/CHANGELOG.md"
[dependencies]
"omni.usd.libs" = {}
# Main python module this extension provides, it will be publicly available as "import omni.example.hello".
[[python.module]]
name = "usdrt.gf.tests"
[[test]]
name="pxrGf"
dependencies = [
"omni.kit.test"
]
| 1,116 | TOML | 26.924999 | 117 | 0.729391 |
omniverse-code/kit/exts/omni.kit.property.audio/omni/kit/property/audio/scripts/audio_properties.py | import os
import carb
import omni.ext
from pathlib import Path
from .audio_settings_widget import AudioSettingsWidget
from pxr import Sdf, OmniAudioSchema, UsdMedia
TEST_DATA_PATH = ""
class AudioPropertyExtension(omni.ext.IExt):
def __init__(self):
self._registered = False
super().__init__()
def on_startup(self, ext_id):
self._register_widget()
manager = omni.kit.app.get_app().get_extension_manager()
extension_path = manager.get_extension_path(ext_id)
global TEST_DATA_PATH
TEST_DATA_PATH = Path(extension_path).joinpath("data").joinpath("tests")
def on_shutdown(self):
if self._registered:
self._unregister_widget()
def _register_widget(self):
import omni.kit.window.property as p
from omni.kit.window.property.property_scheme_delegate import PropertySchemeDelegate
from omni.kit.property.usd.usd_property_widget import SchemaPropertiesWidget, MultiSchemaPropertiesWidget
w = p.get_window()
if w:
w.register_widget("prim", "media", SchemaPropertiesWidget("Media", UsdMedia.SpatialAudio, False))
w.register_widget("prim", "audio_sound", SchemaPropertiesWidget("Sound", OmniAudioSchema.OmniSound, False))
w.register_widget("prim", "audio_listener", SchemaPropertiesWidget("Listener", OmniAudioSchema.OmniListener, False))
w.register_widget("layers", "audio_settings", AudioSettingsWidget())
self._registered = True
def _unregister_widget(self):
import omni.kit.window.property as p
w = p.get_window()
if w:
w.unregister_widget("prim", "media")
w.unregister_widget("prim", "audio_sound")
w.unregister_widget("prim", "audio_listener")
w.unregister_widget("layers", "audio_settings")
self._registered = False
| 1,899 | Python | 36.999999 | 128 | 0.657188 |
omniverse-code/kit/exts/omni.kit.property.audio/omni/kit/property/audio/scripts/__init__.py | from .audio_properties import *
| 32 | Python | 15.499992 | 31 | 0.78125 |
omniverse-code/kit/exts/omni.kit.property.audio/omni/kit/property/audio/scripts/audio_settings_widget.py | # Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import omni.kit.app
import omni.kit.ui
import omni.ui as ui
from pxr import Usd
import omni.usd
import omni.usd.audio
from omni.kit.property.usd.usd_property_widget import UsdPropertiesWidgetBuilder
from omni.kit.window.property.templates import SimplePropertyWidget
class AudioSettingsWidget(SimplePropertyWidget):
def __init__(self):
super().__init__(title="Audio Settings", collapsed=False)
self._stage = None
self._audio = omni.usd.audio.get_stage_audio_interface()
self._listener_setting_model = None
self._events = self._audio.get_metadata_change_stream()
if self._events is not None:
self._stage_event_sub = self._events.create_subscription_to_pop(
self._on_metadata_event, name="audio settings window"
)
self._doppler_setting = None
self._distance_delay_setting = None
self._interaural_delay_setting = None
self._concurrent_voices_setting = None
self._speed_of_sound_setting = None
self._doppler_scale_setting = None
self._doppler_limit_setting = None
self._spatial_timescale_setting = None
self._nonspatial_timescale_setting = None
def __del__(self):
self._stage_event_sub = None
self._events = None
def on_new_payload(self, payload):
if not super().on_new_payload(payload):
return False
# stage is not part of LayerItem payload
self.set_stage(omni.usd.get_context().get_stage())
return payload is not None
class ListenerComboBoxNotifier(omni.ui.AbstractItemModel):
class MinimalItem(omni.ui.AbstractItem):
def __init__(self, text):
super().__init__()
self.model = omni.ui.SimpleStringModel(text)
def __init__(self, refresh_items_callback, set_callback):
super().__init__()
self._current_index = omni.ui.SimpleIntModel()
self._current_index.add_value_changed_fn(self._changed_model)
self._set_callback = set_callback
self._refresh_items_callback = refresh_items_callback
self._options = []
self.refresh_items()
self._path = ""
def get_value_as_string(self):
return self._path
def refresh_items(self):
self._options = ["Active Camera"]
self._options.extend(self._refresh_items_callback())
self._items = [
AudioSettingsWidget.ListenerComboBoxNotifier.MinimalItem(text) for text in self._options
]
return self._items
def _changed_model(self, model):
self._set_callback(model.as_int - 1)
self._item_changed(None)
def set_value(self, value):
# item 0 will always be "Active Camera" => handle 'None' or an empty string specially
# as if it were that value.
if value is None or value == "":
self._current_index.as_int = 0
self._path = ""
return
for i in range(0, len(self._options)):
if self._options[i] == value:
self._current_index.as_int = i
break
self._path = value
def get_item_children(self, item):
return self._items
def get_item_value_model(self, item, column_id):
if item is None:
return self._current_index
return item.model
class DefaultsComboBoxNotifier(omni.ui.AbstractItemModel):
class MinimalItem(omni.ui.AbstractItem):
def __init__(self, text):
super().__init__()
self.model = omni.ui.SimpleStringModel(text)
def __init__(self, set_callback):
super().__init__()
self._options = [
["on", omni.usd.audio.FeatureDefault.ON],
["off", omni.usd.audio.FeatureDefault.OFF],
["force on", omni.usd.audio.FeatureDefault.FORCE_ON],
["force off", omni.usd.audio.FeatureDefault.FORCE_OFF],
]
self._current_index = omni.ui.SimpleIntModel()
self._current_index.add_value_changed_fn(self._changed_model)
self._set_callback = set_callback
self._items = [
AudioSettingsWidget.DefaultsComboBoxNotifier.MinimalItem(text) for (text, value) in self._options
]
def _changed_model(self, model):
self._set_callback(self._options[model.as_int][1])
self._item_changed(None)
def set_value(self, value):
for i in range(0, len(self._options) - 1):
if self._options[i][1] == value:
self._current_index.as_int = i
break
def get_item_children(self, item):
return self._items
def get_item_value_model(self, item, column_id):
if item is None:
return self._current_index
return item.model
class ChangeNotifier(omni.ui.AbstractValueModel):
def __init__(self, update_callback):
super(AudioSettingsWidget.ChangeNotifier, self).__init__()
self._update_callback = update_callback
self._value = 0
def get_value_as_string(self):
return str(self._value)
def get_value_as_int(self):
return int(self._value)
def get_value_as_float(self):
return float(self._value)
def begin_edit(self):
pass
class SlowChangeNotifier(ChangeNotifier):
def __init__(self, update_callback):
super(AudioSettingsWidget.SlowChangeNotifier, self).__init__(update_callback)
def set_value(self, value):
self._value = value
self._value_changed()
def end_edit(self):
self._update_callback(self._value)
pass
class FastChangeNotifier(ChangeNotifier):
def __init__(self, update_callback):
super(AudioSettingsWidget.FastChangeNotifier, self).__init__(update_callback)
def set_value(self, value):
self._value = value
self._value_changed()
self._update_callback(self._value)
def end_edit(self):
pass
def set_stage(self, stage: Usd.Stage):
self._stage = stage
def _caption(self, text, width=150):
"""Create a formated heading"""
with omni.ui.ZStack():
omni.ui.Rectangle(name="caption", width=width, style={"background_color": 0x454545})
omni.ui.Label(text, name="caption")
def _create_tooltip(self, text):
"""Create a tooltip in a fixed style"""
with omni.ui.VStack(width=400, style={"Label": {"color": 0xFF3B494B}}):
omni.ui.Label(text, word_wrap=True)
def _refresh_active_listener(self):
if not self._listener_setting_model:
return
active_listener = self._audio.get_active_listener()
if active_listener is None:
self._listener_setting_model.set_value(None)
else:
self._listener_setting_model.set_value(str(active_listener.GetPath()))
def _refresh(self):
self._refresh_active_listener()
if not self._listener_setting_model:
return
if self._doppler_setting:
self._doppler_setting.model.set_value(self._audio.get_doppler_default())
if self._distance_delay_setting:
self._distance_delay_setting.model.set_value(self._audio.get_distance_delay_default())
if self._interaural_delay_setting:
self._interaural_delay_setting.model.set_value(self._audio.get_interaural_delay_default())
if self._concurrent_voices_setting:
self._concurrent_voices_setting.model.set_value(self._audio.get_concurrent_voices())
if self._speed_of_sound_setting:
self._speed_of_sound_setting.model.set_value(self._audio.get_speed_of_sound())
if self._doppler_scale_setting:
self._doppler_scale_setting.model.set_value(self._audio.get_doppler_scale())
if self._doppler_limit_setting:
self._doppler_limit_setting.model.set_value(self._audio.get_doppler_limit())
if self._spatial_timescale_setting:
self._spatial_timescale_setting.model.set_value(self._audio.get_spatial_time_scale())
if self._nonspatial_timescale_setting:
self._nonspatial_timescale_setting.model.set_value(self._audio.get_nonspatial_time_scale())
def _on_metadata_event(self, event):
if event.type == int(omni.usd.audio.EventType.METADATA_CHANGE):
self._refresh()
elif event.type == int(omni.usd.audio.EventType.LISTENER_LIST_CHANGE):
if self._listener_setting_model is not None:
self._listener_setting_model.refresh_items()
elif event.type == int(omni.usd.audio.EventType.ACTIVE_LISTENER_CHANGE):
self._refresh_active_listener()
def _refresh_listeners(self):
list = []
count = self._audio.get_listener_count()
for i in range(0, count):
list.append(str(self._audio.get_listener_by_index(i).GetPath()))
return list
def _set_active_listener(self, index):
# a negative index means the active camera should be used as the listener.
if index < 0:
self._audio.set_active_listener(None)
# all other indices are assumed to be an index into the listener list.
else:
prim = self._audio.get_listener_by_index(index)
if prim is None:
return
self._audio.set_active_listener(prim)
def _add_label(self, label: str):
filter_text = self._filter.name
UsdPropertiesWidgetBuilder._create_label(label, {}, {"highlight": filter_text})
self._any_item_visible = True
def build_items(self):
def item_visible(label: str) -> bool:
return not self._filter or self._filter.matches(label)
with omni.ui.VStack(height=0, spacing=8):
if item_visible("Active Listener"):
with ui.HStack():
self._add_label("Active Listener")
self._listener_setting_model = AudioSettingsWidget.ListenerComboBoxNotifier(
self._refresh_listeners, self._set_active_listener
)
self._listener_setting = omni.ui.ComboBox(
self._listener_setting_model,
tooltip_fn=lambda: self._create_tooltip(
"The path to the active Listener prim in the current USD scene. "
+ "Spatial audio calculations use the active listener as the position "
+ "(and optionally orientation) in 3D space, where the audio is heard "
+ "from. This value must be set to a valid Listener prim if 'Use "
+ "active camera as listener' is unchecked."
),
)
if item_visible("Doppler default"):
with ui.HStack():
self._add_label("Doppler default")
self._doppler_setting_model = AudioSettingsWidget.DefaultsComboBoxNotifier(
self._audio.set_doppler_default
)
self._doppler_setting = omni.ui.ComboBox(
self._doppler_setting_model,
tooltip_fn=lambda: self._create_tooltip(
"This sets the value that Sound prims will use for enableDoppler "
+ "when that parameter is set to 'default'. This also allows the "
+ "setting to be forced on or off on all prims for testing purposes, "
+ "using the 'force on' and 'force off' values"
),
)
if item_visible("Distance delay default"):
with ui.HStack():
self._add_label("Distance delay default")
self._distance_delay_setting_model = AudioSettingsWidget.DefaultsComboBoxNotifier(
self._audio.set_distance_delay_default
)
self._distance_delay_setting = omni.ui.ComboBox(
self._distance_delay_setting_model,
tooltip_fn=lambda: self._create_tooltip(
"The value that Sound prims will use for enableDistanceDelay when "
+ "that parameter is set to 'default'. This also allows the setting "
+ "to be forced on or off on all prims for testing purposes, using "
+ "the 'force on' and 'force off' values"
),
)
if item_visible("Interaural delay default"):
with ui.HStack():
self._add_label("Interaural delay default")
self._interaural_delay_setting_model = AudioSettingsWidget.DefaultsComboBoxNotifier(
self._audio.set_interaural_delay_default
)
self._interaural_delay_setting = omni.ui.ComboBox(
self._interaural_delay_setting_model,
tooltip_fn=lambda: self._create_tooltip(
"The value that Sound prims will use for enableInterauralDelay when "
+ "that parameter is set to 'default'. This also allows the setting "
+ "to be forced on or off on all prims for testing purposes, using "
+ "the 'force on' and 'force off' values"
),
)
if item_visible("Concurrent voices"):
with ui.HStack():
self._add_label("Concurrent voices")
self._concurrent_voices_setting_notifier = AudioSettingsWidget.SlowChangeNotifier(
self._audio.set_concurrent_voices
)
self._concurrent_voices_setting = omni.ui.IntDrag(
self._concurrent_voices_setting_notifier,
min=2,
max=4096,
tooltip_fn=lambda: self._create_tooltip(
"The number of sounds in a scene that can be played concurrently. "
+ "In a scene where there this is set to N and N + 1 sounds are "
+ "played concurrently, the N + 1th sound will be simulated instead "
+ "of playing on the audio device and instead simulate that voice. "
+ "The simulated voice will begin playing again when fewer than N "
+ "voices are playing"
),
)
if item_visible("Speed of sound"):
with ui.HStack():
self._add_label("Speed of sound")
self._speed_of_sound_setting_notifier = AudioSettingsWidget.FastChangeNotifier(
self._audio.set_speed_of_sound
)
self._speed_of_sound_setting = omni.ui.FloatDrag(
self._speed_of_sound_setting_notifier,
min=0.0001,
max=float("inf"),
step=1.0,
tooltip_fn=lambda: self._create_tooltip(
"Sets the speed of sound in the medium surrounding the listener "
+ "(typically air). This is measured in meters per second. This would "
+ "typically be adjusted when doing an underwater scene (as an "
+ "example). The speed of sound in dry air at sea level is "
+ "approximately 340.0m/s."
),
)
if item_visible("Doppler scale"):
with ui.HStack():
self._add_label("Doppler scale")
self._doppler_scale_setting_notifier = AudioSettingsWidget.FastChangeNotifier(
self._audio.set_doppler_scale
)
self._doppler_scale_setting = omni.ui.FloatDrag(
self._doppler_scale_setting_notifier,
min=0.0001,
max=float("inf"),
tooltip_fn=lambda: self._create_tooltip(
"The scaler that can exaggerate or lessen the Doppler effect. Setting "
+ "this above 1.0 will exaggerate the Doppler effect. Setting this "
+ "below 1.0 will lessen the Doppler effect."
),
)
if item_visible("Doppler limit"):
with ui.HStack():
self._add_label("Doppler limit")
self._doppler_limit_setting_notifier = AudioSettingsWidget.FastChangeNotifier(
self._audio.set_doppler_limit
)
self._doppler_limit_setting = omni.ui.FloatDrag(
self._doppler_limit_setting_notifier,
min=1.0,
max=float("inf"),
tooltip_fn=lambda: self._create_tooltip(
"A Limit on the maximum Doppler pitch shift that can be applied to "
+ "a playing voice. Since supersonic spatial audio is not handled, a "
+ "maximum frequency shift must be set for prims that move toward the "
+ "listener at or faster than the speed of sound."
),
)
if item_visible("Spatial time scale"):
with ui.HStack():
self._add_label("Spatial time scale")
self._spatial_timescale_setting_notifier = AudioSettingsWidget.FastChangeNotifier(
self._audio.set_spatial_time_scale
)
self._spatial_timescale_setting = omni.ui.FloatDrag(
self._spatial_timescale_setting_notifier,
min=0.0001,
max=float("inf"),
tooltip_fn=lambda: self._create_tooltip(
"The timescale modifier for all spatial voices. Each spatial Sound "
+ "prim multiplies its timeScale attribute by this value. For "
+ "example, setting this to 0.5 will play all spatial sounds at half "
+ "speed and setting this to 2.0 will play all spatial sounds at "
+ "double speed. This affects delay times for the distance delay "
+ "effect. This feature is intended to allow time-dilation to be "
+ "performed with the sound effects in the scene without affecting "
+ "non-spatial elements like the background music."
),
)
if item_visible("Non-spatial time scale"):
with ui.HStack():
self._add_label("Non-spatial time scale")
self._nonspatial_timescale_setting_notifier = AudioSettingsWidget.FastChangeNotifier(
self._audio.set_nonspatial_time_scale
)
self._nonspatial_timescale_setting = omni.ui.FloatDrag(
self._nonspatial_timescale_setting_notifier,
min=0.0001,
max=float("inf"),
tooltip_fn=lambda: self._create_tooltip(
"The timescale modifier for all non-spatial voices. Each non-spatial "
+ "Sound prim multiplies its timeScale attribute by this value. For "
+ "example, setting this to 0.5 will play all non-spatial sounds at "
+ "half speed and setting this to 2.0 will play all non-spatial "
+ "sounds at double speed."
),
)
self._refresh()
| 21,187 | Python | 44.176972 | 113 | 0.539576 |
omniverse-code/kit/exts/omni.kit.property.audio/omni/kit/property/audio/tests/__init__.py | from .test_audio import *
from .test_layer_audio import *
| 58 | Python | 18.66666 | 31 | 0.741379 |
omniverse-code/kit/exts/omni.kit.property.audio/omni/kit/property/audio/tests/test_layer_audio.py | ## Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
##
## NVIDIA CORPORATION and its licensors retain all intellectual property
## and proprietary rights in and to this software, related documentation
## and any modifications thereto. Any use, reproduction, disclosure or
## distribution of this software and related documentation without an express
## license agreement from NVIDIA CORPORATION is strictly prohibited.
##
import omni.kit.app
import omni.kit.commands
import omni.kit.test
import omni.ui as ui
from omni.kit import ui_test
from omni.ui.tests.test_base import OmniUiTest
from omni.kit.test_suite.helpers import open_stage, get_test_data_path, select_prims, wait_stage_loading, arrange_windows
class TestLayerAudioWidget(OmniUiTest): # pragma: no cover
# Before running each test
async def setUp(self):
await super().setUp()
await arrange_windows("Layer")
from omni.kit.property.audio.scripts.audio_properties import TEST_DATA_PATH
self._usd_path = TEST_DATA_PATH.absolute()
usd_context = omni.usd.get_context()
test_file_path = self._usd_path.joinpath("audio_test.usda").absolute()
usd_context.open_stage(str(test_file_path))
# After running each test
async def tearDown(self):
await super().tearDown()
async def test_layer_sound_ui(self):
await ui_test.find("Layer").focus()
await ui_test.find("Layer//Frame/**/TreeView[*]").find(f"**/Label[*].text=='Root Layer (Authoring Layer)'").click()
await ui_test.human_delay(50)
| 1,565 | Python | 39.153845 | 123 | 0.714377 |
omniverse-code/kit/exts/omni.kit.property.audio/docs/CHANGELOG.md | # Changelog
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
## [1.0.6] - 2022-08-17
### Changes
- Updated golden image due to SdfAssetPath widget change.
## [1.0.5] - 2021-07-16
### Changes
- Fixed audio active listener tooltip color
## [1.0.4] - 2021-02-16
### Changes
- Added UI image test
## [1.0.3] - 2020-12-09
### Changes
- Added extension icon
- Added readme
- Updated preview image
## [1.0.2] - 2020-11-10
### Changes
- Moved audio settings window onto layer property window
## [1.0.1] - 2020-11-03
### Changes
- Moved audio settings window into property window
## [1.0.0] - 2020-10-13
### Changes
- Created
| 657 | Markdown | 18.352941 | 80 | 0.665145 |
omniverse-code/kit/exts/omni.kit.property.audio/docs/README.md | # omni.kit.property.audio
## Introduction
Property window extensions are for viewing and editing Usd Prim Attributes
## This extension supports editing of these Usd Types;
- UsdMedia.SpatialAudio
- AudioSchema.Sound
- AudioSchema.Listener
| 244 | Markdown | 17.846152 | 74 | 0.795082 |
omniverse-code/kit/exts/omni.kit.property.audio/docs/index.rst | omni.kit.property.audio
###########################
Property Audio Values
.. toctree::
:maxdepth: 1
CHANGELOG
| 121 | reStructuredText | 9.166666 | 27 | 0.528926 |
omniverse-code/kit/exts/omni.kit.test_async_rendering/omni/kit/test_async_rendering/__init__.py | from .test_async_rendering import *
| 36 | Python | 17.499991 | 35 | 0.777778 |
omniverse-code/kit/exts/omni.kit.test_async_rendering/omni/kit/test_async_rendering/test_async_rendering.py | import pathlib
import asyncio
import carb
import carb.settings
import carb.tokens
import omni.kit.app
import omni.kit.test
import omni.hydratexture
import omni.usd
from typing import Callable
# FIXME: omni.ui.ImageProvider holds the carb.Format conversion routine
import omni.ui
EXTENSION_FOLDER_PATH = pathlib.Path(omni.kit.app.get_app().get_extension_manager().get_extension_path_by_module(__name__))
DATA_DIR = EXTENSION_FOLDER_PATH.joinpath("data/tests")
class AsyncRenderingTest(omni.kit.test.AsyncTestCase):
async def setUp(self):
self._settings = carb.settings.acquire_settings_interface()
self._hydra_texture_factory = omni.hydratexture.acquire_hydra_texture_factory_interface()
self._usd_context_name = ''
self._usd_context = omni.usd.get_context(self._usd_context_name)
await self._usd_context.new_stage_async()
async def tearDown(self):
print("Tearing down..")
self._usd_context.close_stage()
omni.usd.release_all_hydra_engines(self._usd_context)
self._hydra_texture_factory = None
self._settings = None
wait_iterations = 6
for i in range(wait_iterations):
await omni.kit.app.get_app().next_update_async()
async def test_1_simple_context_attach(self):
renderer = "rtx"
if renderer not in self._usd_context.get_attached_hydra_engine_names():
omni.usd.add_hydra_engine(renderer, self._usd_context)
test_usd_asset = DATA_DIR.joinpath("simple_cubes_mat.usda")
print("Opening '%s'" % (test_usd_asset))
await self._usd_context.open_stage_async(str(test_usd_asset))
app = omni.kit.app.get_app()
self._update_counter = 0
self._render_counter = 0
self._asset_loaded = False
def on_update(event):
self._update_counter += 1
self._app_update_sub = app.get_update_event_stream().create_subscription_to_pop(on_update, name='async rendering test update')
is_async = self._settings.get("/app/asyncRendering")
print("Async is %s" % (is_async))
self._hydra_texture = self._hydra_texture_factory.create_hydra_texture(
"test_viewport",
1280,
720,
self._usd_context_name,
"/test_cam",
renderer,
is_async=is_async
)
def _on_stage_event(event):
ASSETS_LOADED = int(omni.usd.StageEventType.ASSETS_LOADED)
if (self._asset_loaded == False) and (event.type is ASSETS_LOADED):
self._asset_loaded = True
print("Assets loaded. UI frame: %d" % (self._update_counter))
self._stage_event_sub = self._usd_context.get_stage_event_stream().create_subscription_to_pop(_on_stage_event, name="stage events")
def on_drawable_changed(event: carb.events.IEvent):
# Renderer counter should start after usd assets are completely loaded in async mode
if self._asset_loaded or (is_async == False):
# +1 because the frame number is zero-based.
self._render_counter = self._hydra_texture.get_frame_info(event.payload['result_handle']).get('frame_number') + 1
print("Rendered %d frames (UI frames drawn: %d)" % (self._render_counter, self._update_counter))
self._drawable_changed_sub = self._hydra_texture.get_event_stream().create_subscription_to_push_by_type(
omni.hydratexture.EVENT_TYPE_DRAWABLE_CHANGED,
on_drawable_changed,
name='async rendering test drawable update',
)
print("Waiting the RTX renderer to load and start draw pictures")
MAX_WAIT_FRAMES_INIT = 50000
wait_frames_left = MAX_WAIT_FRAMES_INIT
while True:
await app.next_update_async()
if self._render_counter > 0 or wait_frames_left <= 0:
break
wait_frames_left -= 1
print("Waited %d frames before renderer initialized" % (MAX_WAIT_FRAMES_INIT - wait_frames_left))
# Set render mode and settings
self._settings.set("/rtx/rendermode", "PathTracing")
self._settings.set("/rtx/pathtracing/totalSpp", 0)
self._settings.set("/rtx/pathtracing/spp", 256)
self._settings.set("/rtx/pathtracing/clampSpp", 256)
print("Starting render loop")
test_updates_before = self._update_counter
test_renders_before = self._render_counter
wait_frames_left = 10000
while True:
await app.next_update_async()
if self._render_counter - test_renders_before > 10 or wait_frames_left <= 0:
break
wait_frames_left -= 1
self.assertTrue(self._render_counter > 10)
test_updates_delta = self._update_counter - test_updates_before
test_renders_delta = self._render_counter - test_renders_before
updates_renders_ratio = test_updates_delta / test_renders_delta
updates_renders_difference = test_updates_delta - test_renders_delta
print("Updates: %d, renders: %d, difference %d, ratio %3.2f" % (test_updates_delta, test_renders_delta, updates_renders_difference, updates_renders_ratio))
# Verify results, this depends on the Async mode
if is_async:
# When Async is On - we expect much more UI updates than render delegate renders
# R525+ drivers showing 3X+ UI draws than 10X+ in comparison to R470.
# OM-78715: lowering threshold even further since it occasionally fails on IPP agents.
self.assertTrue(updates_renders_ratio > 2.3)
else:
# When Async is Off - we expect UI updates come in a lockstep with render delegate renders
# Add some leeway for potential frame offsetting
self.assertTrue(updates_renders_ratio < 1.9)
NUM_FRAMES_IN_FLIGHT = 3
# The UI should be no more than NUM_FRAMES_IN_FLIGHT + 1 frames ahead of the renderer.
self.assertTrue(0 <= updates_renders_difference and updates_renders_difference <= NUM_FRAMES_IN_FLIGHT + 1)
self._drawable_changed_sub = None
self._hydra_texture = None
self._app_update_sub = None
self._stage_event_sub = None
| 6,284 | Python | 41.181208 | 163 | 0.635742 |
omniverse-code/kit/exts/omni.audioplayer/config/extension.toml | [package]
title = "Kit Audio Player"
category = "Audio"
feature = true
version = "0.2.0"
description = "An audio player API which is available from python and C++"
detailedDescription = """This is an audio player that's intended to be used for playing back audio assets.
The audio player only offers are few voices for he entire process, so this is
mainly suitable for tasks such as asset previewing.
For audio tasks that require more advanced audio functionality, carb.audio is
recommended.
This API is available in python and C++.
See omni.kit.window.audioplayer for an example use of this API.
"""
authors = ["NVIDIA"]
keywords = ["audio", "playback"]
[dependencies]
"carb.audio" = {}
"omni.usd.core" = {}
"omni.kit.audiodeviceenum" = {}
"omni.client" = {} # needed for carb.datasource-omniclient.plugin
[[native.plugin]]
path = "bin/*.plugin"
[[python.module]]
name = "omni.audioplayer"
[[test]]
dependencies = [
"omni.kit.test_helpers_gfx",
]
unreliable = true
| 975 | TOML | 26.11111 | 106 | 0.725128 |
omniverse-code/kit/exts/omni.audioplayer/omni/audioplayer/__init__.py | """
This module contains bindings to the C++ omni::audio::IAudioPlayer interface.
This provides functionality for playing sound assets. This is intended only
as a basic audio player interface for previewing assets. This is not intended
to play sounds in a USD stage or in the Kit UI. For sounds in the USD stage,
the omni.usd.audio interface should be used instead. For UI sounds, the
omni.kit.uiaudio interface should be used.
Sound files may be in RIFF/WAV, Ogg, or FLAC format.
Data in the sound files may use 8, 16, 24, or 32 bit integer samples,
or 32 bit floating point samples. Channel counts may be from 1 to 64
If more channels of data are provided than the audio device can play,
some channels will be blended together automatically.
"""
from ._audioplayer import *
| 872 | Python | 47.499997 | 86 | 0.694954 |
omniverse-code/kit/exts/omni.audioplayer/omni/audioplayer/_audio.pyi | """
This module contains bindings to the C++ omni::audio::IAudioPlayer interface.
This provides functionality for playing sound assets. This is intended only
as a basic audio player interface for previewing assets. This is not intended
to play sounds in a USD stage or in the Kit UI. For sounds in the USD stage,
the omni.usd.audio interface should be used instead. For UI sounds, the
omni.kit.uiaudio interface should be used.
Sound files may be in RIFF/WAV, Ogg, or FLAC format.
Data in the sound files may use 8, 16, 24, or 32 bit integer samples,
or 32 bit floating point samples. Channel counts may be from 1 to 64
If more channels of data are provided than the audio device can play,
some channels will be blended together automatically.
"""
import omni.audioplayer._audio
import typing
import carb._carb
import carb.events._events
__all__ = [
"AudioPlayer",
"CallbackType",
"create_audio_player"
]
class AudioPlayer():
"""
An individual audio player instance.
This must be created in order to have something to play sounds from.
"""
def draw_waveform(self, arg0: int, arg1: int, arg2: carb._carb.Float4, arg3: carb._carb.Float4) -> typing.List[int]:
"""
Render the waveform to an image to a buffer.
The functionality of writing to a file is a temporary workaround.
This will eventually be changed to output a memory buffer.
Args:
player The player whose image will be rendered.
width The width of the output image, in pixels.
height The height of the output image, in pixels.
fgColor The foreground color in normalized RGBA color.
bgColor The background color in normalized RGBA color.
Returns:
A list containing the raw RGBA8888 values for the image.
An empty array on failure.
"""
def draw_waveform_to_file(self, arg0: str, arg1: int, arg2: int, arg3: carb._carb.Float4, arg4: carb._carb.Float4) -> bool:
"""
Render the waveform to an image to a file.
The functionality of writing to a file is a temporary workaround.
This will eventually be changed to output a memory buffer.
Args:
player The player whose image will be rendered.
filename The name for the output image file.
width The width of the output image, in pixels.
height The height of the output image, in pixels.
fgColor The foreground color in normalized RGBA color.
bgColor The background color in normalized RGBA color.
Returns:
True if the operation was successful.
False if the file could not be generated.
"""
def get_event_stream(self) -> carb.events._events.IEventStream: ...
def get_play_cursor(self) -> float:
"""
Get the play cursor position in the currently playing sound.
Args:
No arguments
Returns:
The play cursor position in the currently playing sound in seconds.
0.0 if there is no playing sound.
"""
def get_sound_length(self) -> float:
"""
Get the length of the currently playing sound.
Args:
No arguments
Returns:
The length of the currently playing sound in seconds.
0.0 if there is no playing sound.
"""
def load_sound(self, path: str) -> None: ...
def pause_sound(self) -> None:
"""
Pause playback of a sound on a specific audio player.
Each player has only one voice, so this call pauses that voice.
Args:
No arguments
Returns:
No return value.
"""
def play_sound(self, path: str, startTime: float = 0.0) -> None: ...
def set_play_cursor(self, startTime: float = 0.0) -> None: ...
def stop_sound(self) -> None:
"""
Immediately stops the playback on a specific audio player
Each player has only one voice, so this call stops that voice.
Args:
No arguments
Returns:
No return value.
"""
def unpause_sound(self) -> None:
"""
Unpause playback of a sound on a specific audio player.
Each player has only one voice, so this call unpauses that voice.
If no voice is paused, this does nothing.
Args:
No arguments
Returns:
No return value.
"""
pass
class CallbackType():
"""
Members:
LOADED
ENDED
"""
def __init__(self, arg0: int) -> None: ...
def __int__(self) -> int: ...
@property
def name(self) -> str:
"""
(self: handle) -> str
:type: str
"""
ENDED: omni.audioplayer._audio.CallbackType # value = CallbackType.ENDED
LOADED: omni.audioplayer._audio.CallbackType # value = CallbackType.LOADED
__members__: dict # value = {'LOADED': CallbackType.LOADED, 'ENDED': CallbackType.ENDED}
pass
def create_audio_player(*args, **kwargs) -> typing.Any:
pass
| 5,779 | unknown | 33.819277 | 128 | 0.549057 |
omniverse-code/kit/exts/omni.audioplayer/omni/audioplayer/_audioplayer.pyi | """
This module contains bindings to the C++ omni::audio::IAudioPlayer interface.
This provides functionality for playing sound assets. This is intended only
as a basic audio player interface for previewing assets. This is not intended
to play sounds in a USD stage or in the Kit UI. For sounds in the USD stage,
the omni.usd.audio interface should be used instead. For UI sounds, the
omni.kit.uiaudio interface should be used.
Sound files may be in RIFF/WAV, Ogg, or FLAC format.
Data in the sound files may use 8, 16, 24, or 32 bit integer samples,
or 32 bit floating point samples. Channel counts may be from 1 to 64
If more channels of data are provided than the audio device can play,
some channels will be blended together automatically.
"""
from __future__ import annotations
import omni.audioplayer._audioplayer
import typing
import carb._carb
import carb.events._events
__all__ = [
"AudioPlayer",
"CallbackType",
"FLAG_FORCE_RELOAD",
"FLAG_RAW_DATA",
"RawPcmFormat",
"create_audio_player"
]
class AudioPlayer():
"""
An individual audio player instance.
This must be created in order to have something to play sounds from.
"""
def draw_waveform(self, width: int, height: int, fgColor: carb._carb.Float4, bgColor: carb._carb.Float4) -> typing.List[int]:
"""
Render the waveform to an image to a buffer.
The functionality of writing to a file is a temporary workaround.
This will eventually be changed to output a memory buffer.
Args:
width The width of the output image, in pixels.
height The height of the output image, in pixels.
fgColor The foreground color in normalized RGBA color.
bgColor The background color in normalized RGBA color.
Returns:
A list containing the raw RGBA8888 values for the image.
An empty array on failure.
"""
def draw_waveform_to_file(self, filename: str, width: int, height: int, fgColor: carb._carb.Float4, bgColor: carb._carb.Float4) -> bool:
"""
Render the waveform to an image to a file.
The functionality of writing to a file is a temporary workaround.
This will eventually be changed to output a memory buffer.
Args:
filename The name for the output image file.
width The width of the output image, in pixels.
height The height of the output image, in pixels.
fgColor The foreground color in normalized RGBA color.
bgColor The background color in normalized RGBA color.
Returns:
True if the operation was successful.
False if the file could not be generated.
"""
def get_event_stream(self) -> carb.events._events.IEventStream:
"""
Get a reference to the IEventStream for this audio player instance
The following event types will be sent:
CallbackType.LOADED when the requested audio has finished loading.
CallbackType.ENDED when the requested audio has finished playing.
You should remove your settings subscription before shutting down your
audio player, since events are sent asynchronously, so you could get an
event after your player has been destroyed.
Returns:
The IEventStream for the audio player.
"""
def get_play_cursor(self) -> float:
"""
Get the play cursor position in the currently playing sound.
Args:
No arguments
Returns:
The play cursor position in the currently playing sound in seconds.
0.0 if there is no playing sound.
"""
def get_sound_length(self) -> float:
"""
Get the length of the currently playing sound.
Args:
No arguments
Returns:
The length of the currently playing sound in seconds.
0.0 if there is no playing sound.
"""
def load_sound(self, path: str) -> None:
"""
Load a sound asset for future playback.
This will fetch an asset so that the next call to play_sound()
can begin playing the sound immediately.
This will also stop the currently playing sound, if any.
This function will also cause get_sound_length() to begin
returning the length of this sound.
This will send a CallbackType.LOADED when the asset has loaded.
Args:
path: The string path to the sound asset to play.
This must be an absolute file path to a sound asset.
startTime: The time offset, in seconds, to begin playing this
sound at.
Returns:
No return value.
"""
def load_sound_in_memory(self, name: str, bytes: str, frame_rate: int = 0, channels: int = 0, format: RawPcmFormat = RawPcmFormat.PCM_16, flags: int = 0) -> None:
"""
Loads a sound asset from a blob in memory for future playback.
The sound asset will not start playing when the asset has loaded.
It will however be cached so that a later request with the same
data and name can play the sound. The asset is given as a data
blob in memory as a `bytes` object. The data may either be a full
file loaded into memory or just raw PCM data. If raw PCM data is
given, additional parameters must be used to specify the sample
type, channel count, and frame rate so that the data can be
successfully decoded.
Args:
name: The name to give to this asset. This is only used
for caching purposes so the sound can be played
multiple times without having to reload it.
This must not be `None` or an empty string otherwise
the operation will just be ignored.
bytes: The `bytes` object that contains the data for the
sound. This is treated as a single binary blob of
data. This may not be `None`.
frame_rate: The frame rate to play the sound data at. If the
binary blob contains format information, this is
ignored. This is only needed if the `FLAG_RAW_DATA`
flag is specified in `flags`.
channels: The number of channels in each frame of the sound
data. If the binary block contains format information,
this can be ignored. This is only needed if the
`FLAG_RAW_DATA` flag is specified in `flags`.
format: The format of each sample of data. This must be
one of the `RawPcmFormat` values. This is only used
if the blob does not already contain format information.
flags: Flags to control how the sound data is loaded and cached.
If the data blob does not contain format information and
is just raw PCM sample data, the `FLAG_RAW_DATA` flag must be
specified and the `frame_rate`, `channels`, and `format`
arguments must also be given. Without these, the sound
data cannot be properly loaded.
Returns:
No return value.
"""
def pause_sound(self) -> None:
"""
Pause playback of a sound on a specific audio player.
Each player has only one voice, so this call pauses that voice.
Args:
No arguments
Returns:
No return value.
"""
def play_sound(self, path: str, startTime: float = 0.0) -> None:
"""
Play a sound asset.
The sound asset will start playing asynchronously when the asset has
loaded.
Args:
path: The string path to the sound asset to play.
This must be an absolute file path to a sound asset.
startTime: The time offset, in seconds, to begin playing this
sound at.
Returns:
No return value.
"""
@staticmethod
def play_sound_in_memory(*args, **kwargs) -> typing.Any:
"""
Play a sound asset from data in memory.
The sound asset will start playing asynchronously when the asset has
loaded. The asset is given as a data blob in memory as a `bytes`
object. The data may either be a full file loaded into memory or
just raw PCM data. If raw PCM data is given, additional parameters
must be used to specify the sample type, channel count, and frame
rate so that the data can be successfully decoded.
Args:
name: The name to give to this asset. This is only used
for caching purposes so the sound can be played
multiple times without having to reload it or so
it can be preloaded with load_sound_in_memory().
Set this to `None` or an empty string to disable
caching the sound and just play it. Note that if
the `FLAG_FORCE_RELOAD` flag is used, the cache check
will be ignored and the sound will be loaded and
cached again.
bytes: The `bytes` object that contains the data for the
sound. This is treated as a single binary blob of
data. This may not be `None`.
frame_rate: The frame rate to play the sound data at. If the
binary blob contains format information, this is
ignored. This is only needed if the `FLAG_RAW_DATA`
flag is specified in `flags`.
channels: The number of channels in each frame of the sound
data. If the binary block contains format information,
this can be ignored. This is only needed if the
`FLAG_RAW_DATA` flag is specified in `flags`.
format: The format of each sample of data. This must be
one of the `RawPcmFormat` values. This is only used
if the blob does not already contain format information.
startTime: The time offset, in seconds, to begin playing this
sound at.
flags: Flags to control how the sound data is loaded and cached.
If the data blob does not contain format information and
is just raw PCM sample data, the `FLAG_RAW_DATA` flag must be
specified and the `frame_rate`, `channels`, and `format`
arguments must also be given. Without these, the sound
data cannot be properly loaded.
Returns:
No return value.
"""
def set_play_cursor(self, startTime: float = 0.0) -> None:
"""
Set the play cursor position in the currently playing sound.
Args:
startTime: The time to set the cursor to in the sound.
Returns:
No return value.
"""
def stop_sound(self) -> None:
"""
Immediately stops the playback on a specific audio player
Each player has only one voice, so this call stops that voice.
Args:
No arguments
Returns:
No return value.
"""
def unpause_sound(self) -> None:
"""
Unpause playback of a sound on a specific audio player.
Each player has only one voice, so this call unpauses that voice.
If no voice is paused, this does nothing.
Args:
No arguments
Returns:
No return value.
"""
pass
class CallbackType():
"""
Members:
LOADED
ENDED
"""
def __eq__(self, other: object) -> bool: ...
def __getstate__(self) -> int: ...
def __hash__(self) -> int: ...
def __index__(self) -> int: ...
def __init__(self, value: int) -> None: ...
def __int__(self) -> int: ...
def __ne__(self, other: object) -> bool: ...
def __repr__(self) -> str: ...
def __setstate__(self, state: int) -> None: ...
@property
def name(self) -> str:
"""
:type: str
"""
@property
def value(self) -> int:
"""
:type: int
"""
ENDED: omni.audioplayer._audioplayer.CallbackType # value = <CallbackType.ENDED: 1>
LOADED: omni.audioplayer._audioplayer.CallbackType # value = <CallbackType.LOADED: 0>
__members__: dict # value = {'LOADED': <CallbackType.LOADED: 0>, 'ENDED': <CallbackType.ENDED: 1>}
pass
class RawPcmFormat():
"""
Members:
PCM_8 : Provided raw 8-bit unsigned PCM data.
PCM_16 : Provided raw 16-bit signed PCM data.
PCM_32 : Provided raw 32-bit signed PCM data.
PCM_FLOAT : Provided raw 32-bit floating point PCM data.
"""
def __eq__(self, other: object) -> bool: ...
def __getstate__(self) -> int: ...
def __hash__(self) -> int: ...
def __index__(self) -> int: ...
def __init__(self, value: int) -> None: ...
def __int__(self) -> int: ...
def __ne__(self, other: object) -> bool: ...
def __repr__(self) -> str: ...
def __setstate__(self, state: int) -> None: ...
@property
def name(self) -> str:
"""
:type: str
"""
@property
def value(self) -> int:
"""
:type: int
"""
PCM_16: omni.audioplayer._audioplayer.RawPcmFormat # value = <RawPcmFormat.PCM_16: 1>
PCM_32: omni.audioplayer._audioplayer.RawPcmFormat # value = <RawPcmFormat.PCM_32: 2>
PCM_8: omni.audioplayer._audioplayer.RawPcmFormat # value = <RawPcmFormat.PCM_8: 0>
PCM_FLOAT: omni.audioplayer._audioplayer.RawPcmFormat # value = <RawPcmFormat.PCM_FLOAT: 3>
__members__: dict # value = {'PCM_8': <RawPcmFormat.PCM_8: 0>, 'PCM_16': <RawPcmFormat.PCM_16: 1>, 'PCM_32': <RawPcmFormat.PCM_32: 2>, 'PCM_FLOAT': <RawPcmFormat.PCM_FLOAT: 3>}
pass
def create_audio_player(*args, **kwargs) -> typing.Any:
pass
FLAG_FORCE_RELOAD = 2
FLAG_RAW_DATA = 1
| 14,613 | unknown | 39.821229 | 180 | 0.581263 |
omniverse-code/kit/exts/omni.audioplayer/omni/audioplayer/tests/__init__.py | from .test_audio_player import * # pragma: no cover
| 54 | Python | 17.333328 | 52 | 0.703704 |
omniverse-code/kit/exts/omni.audioplayer/omni/audioplayer/tests/test_audio_player.py | import pathlib
import time
import pathlib
from PIL import Image
import carb.tokens
import omni.audioplayer
import os
import omni.kit.test
import omni.kit.test_helpers_gfx.compare_utils
OUTPUTS_DIR = pathlib.Path(omni.kit.test.get_test_output_path())
# This test needs to come first alphabetically since the bug this tests didn't
# trigger if the plugin was already loaded and carb plugins don't ever unload
# during normal kit operations.
class Test0AudioPlayerHang(omni.kit.test.AsyncTestCase): # pragma: no cover
def setUp(self):
extension_path = carb.tokens.get_tokens_interface().resolve("${omni.audioplayer}")
self._test_path = pathlib.Path(extension_path).joinpath("data").joinpath("tests").absolute()
self._player = omni.audioplayer.create_audio_player()
self.assertIsNotNone(self._player)
def tearDown(self):
# if you don't do this, the player never gets destroyed until plugin unload
self._player = None
def test_unload_during_load(self):
self._player.load_sound(str(self._test_path.joinpath("long.oga")))
# Finish the test so the player will close and the plugin will unload.
# At one point, this caused a GIL deadlock in ~AudioPlayer().
# This test should verify that this deadlock no longer happens.
class TestAudioPlayer(omni.kit.test.AsyncTestCase): # pragma: no cover
def setUp(self):
self._player = omni.audioplayer.create_audio_player()
self.assertIsNotNone(self._player)
extension_path = carb.tokens.get_tokens_interface().resolve("${omni.audioplayer}")
self._test_path = pathlib.Path(extension_path).joinpath("data").joinpath("tests").absolute()
self._golden_path = self._test_path.joinpath("golden")
def tearDown(self):
self._player = None
def test_basic(self):
events = []
expected_load_result = True
def event_callback(event):
nonlocal events
nonlocal expected_load_result
events.append(event.type)
if event.type == int(omni.audioplayer.CallbackType.LOADED):
self.assertEqual(event.payload["success"], expected_load_result)
sub = self._player.get_event_stream().create_subscription_to_pop(event_callback)
# load the sound and verify that a loaded event shows up
last = len(events)
self._player.load_sound(str(self._test_path.joinpath("long.oga")))
i = 0
while len(events) == last:
time.sleep(0.001)
if i > 30000:
raise Exception("hang while loading a test asset")
i += 1
self.assertEqual(len(events), last + 1)
self.assertEqual(events[-1], int(omni.audioplayer.CallbackType.LOADED))
# check that get_sound_length() works
self.assertLess(abs(self._player.get_sound_length() - 120.0), 0.001)
# play the sound and verify that a load event is sent
last = len(events)
self._player.play_sound(str(self._test_path.joinpath("long.oga")))
# it should load quickly since it's already been loaded once
time.sleep(1)
self.assertEqual(len(events), last + 1)
self.assertEqual(events[-1], int(omni.audioplayer.CallbackType.LOADED))
# pause and unpause it a few times
last = len(events)
self._player.pause_sound()
time.sleep(1)
self._player.unpause_sound()
time.sleep(1)
self._player.pause_sound()
time.sleep(1)
# nothing should have been sent
self.assertEqual(len(events), last)
self.assertGreater(self._player.get_play_cursor(), 0.0)
# modify the play cursor, this should send an ended then loaded event
last = len(events)
self._player.set_play_cursor(60.0)
i = 0
while len(events) <= last + 1:
time.sleep(0.001)
if i > 30000:
raise Exception("hang while loading a test asset")
i += 1
self.assertEqual(len(events), last + 2)
self.assertEqual(events[-2], int(omni.audioplayer.CallbackType.ENDED))
self.assertEqual(events[-1], int(omni.audioplayer.CallbackType.LOADED))
self.assertGreaterEqual(self._player.get_play_cursor(), 60.0)
# stop it and verify that the ended event is sent
last = len(events)
self._player.stop_sound()
i = 0
while len(events) == last:
time.sleep(0.001)
if i > 30000:
raise Exception("hang while loading a test asset")
i += 1
self.assertEqual(len(events), last + 1)
self.assertEqual(events[-1], int(omni.audioplayer.CallbackType.ENDED))
# play the same sound again and see if events are sent
last = len(events)
self._player.play_sound(str(self._test_path.joinpath("long.oga")))
i = 0
while len(events) == last:
time.sleep(0.001)
if i > 30000:
raise Exception("hang while loading a test asset")
i += 1
self.assertEqual(len(events), last + 1)
self.assertEqual(events[-1], int(omni.audioplayer.CallbackType.LOADED))
# play again to see the ended and loaded event
last = len(events)
self._player.play_sound(str(self._test_path.joinpath("long.oga")))
i = 0
while len(events) <= last + 1:
time.sleep(0.001)
if i > 30000:
raise Exception("hang while loading a test asset")
i += 1
self.assertEqual(len(events), last + 2)
self.assertEqual(events[-2], int(omni.audioplayer.CallbackType.ENDED))
self.assertEqual(events[-1], int(omni.audioplayer.CallbackType.LOADED))
# Note: there is no test for callbacks with play_sound() 2 times back to back
# because whether callbacks for both calls are sent is dependent on timing.
# The less recent sound playback may just be cancelled.
def test_draw_waveform(self):
# toggle in case you want to regenerate waveforms
GENERATE_GOLDEN_IMAGES = False
W = 256
H = 256
loaded = False
def event_callback(event):
if event.type == int(omni.audioplayer.CallbackType.LOADED):
nonlocal loaded
loaded = True
sub = self._player.get_event_stream().create_subscription_to_pop(event_callback)
self._player.load_sound(str(self._test_path.joinpath("short-2ch.oga")))
i = 0
while not loaded:
time.sleep(0.001)
if i > 30000:
raise Exception("hang while loading a test asset")
i += 1
self._player.draw_waveform_to_file(str(OUTPUTS_DIR.joinpath("waveform.png")), W, H, [1.0, 1.0, 1.0, 1.0], [0.0, 0.0, 0.0, 1.0])
if not GENERATE_GOLDEN_IMAGES:
self.assertLess(omni.kit.test_helpers_gfx.compare_utils.compare(
pathlib.Path(OUTPUTS_DIR).joinpath("waveform.png"),
self._golden_path.joinpath("waveform.png"),
pathlib.Path(OUTPUTS_DIR).joinpath("waveform.png.diff.png")),
0.1)
raw = self._player.draw_waveform(W, H, [1.0, 1.0, 1.0, 1.0], [0.0, 0.0, 0.0, 1.0])
self.assertEqual(len(raw), W * H * 4)
with Image.frombytes("RGBX", (W, H), bytes(raw), 'raw') as img:
img.convert("RGB").save(str(OUTPUTS_DIR.joinpath("waveform.raw.png")))
if not GENERATE_GOLDEN_IMAGES:
# this image is slightly different than the original and this compare
# helper function cannot handle even a slight difference, so we need to
# have a separate golden image for this
self.assertLess(omni.kit.test_helpers_gfx.compare_utils.compare(
pathlib.Path(OUTPUTS_DIR).joinpath("waveform.raw.png"),
self._golden_path.joinpath("waveform.raw.png"),
pathlib.Path(OUTPUTS_DIR).joinpath("waveform.raw.png.diff.png")),
0.1)
def _load_and_play_data_in_memory(self, filename, frame_rate, channels, format, flags = 0, expected_length = 2.0):
events = []
expected_load_result = True
def event_callback(event):
nonlocal events
nonlocal expected_load_result
events.append(event.type)
if event.type == int(omni.audioplayer.CallbackType.LOADED):
self.assertEqual(event.payload["success"], expected_load_result)
# subscribe to events from the player.
sub = self._player.get_event_stream().create_subscription_to_pop(event_callback)
# load the test file into a bytes object.
file_data = pathlib.Path(self._test_path.joinpath(filename)).read_bytes()
# load the sound and verify that a 'loaded' event shows up.
last = len(events)
self._player.load_sound_in_memory(filename, file_data, frame_rate, channels, format, flags)
i = 0
while len(events) == last:
time.sleep(0.001)
if i > 30000:
raise Exception("hang while loading a test asset")
i += 1
self.assertEqual(len(events), last + 1)
self.assertEqual(events[-1], int(omni.audioplayer.CallbackType.LOADED))
# check that get_sound_length() works.
self.assertLess(abs(self._player.get_sound_length() - expected_length), 0.001)
# play the sound and verify that a load event is sent.
last = len(events)
self._player.play_sound_in_memory(filename, file_data, frame_rate, channels, format, 0.0, flags)
# it should load quickly since it's already been loaded once.
time.sleep(1)
self.assertEqual(len(events), last + 1)
self.assertEqual(events[-1], int(omni.audioplayer.CallbackType.LOADED))
# stop it and verify that the ended event is sent.
last = len(events)
self._player.stop_sound()
i = 0
while len(events) == last:
time.sleep(0.001)
if i > 30000:
raise Exception("hang while loading a test asset")
i += 1
self.assertEqual(len(events), last + 1)
self.assertEqual(events[-1], int(omni.audioplayer.CallbackType.ENDED))
# clean up.
sub = None
def test_data_in_memory(self):
self._load_and_play_data_in_memory("tone-440-8.wav", 44100, 1, omni.audioplayer.RawPcmFormat.PCM_8)
self._load_and_play_data_in_memory("tone-440-16.wav", 44100, 1, omni.audioplayer.RawPcmFormat.PCM_16)
self._load_and_play_data_in_memory("tone-440-32.wav", 44100, 1, omni.audioplayer.RawPcmFormat.PCM_32)
self._load_and_play_data_in_memory("tone-440-float.wav", 44100, 1, omni.audioplayer.RawPcmFormat.PCM_FLOAT)
def test_raw_data_in_memory(self):
self._load_and_play_data_in_memory("tone-440-8.raw", 44100, 1, omni.audioplayer.RawPcmFormat.PCM_8, omni.audioplayer.FLAG_RAW_DATA)
self._load_and_play_data_in_memory("tone-440-16.raw", 44100, 1, omni.audioplayer.RawPcmFormat.PCM_16, omni.audioplayer.FLAG_RAW_DATA)
self._load_and_play_data_in_memory("tone-440-32.raw", 44100, 1, omni.audioplayer.RawPcmFormat.PCM_32, omni.audioplayer.FLAG_RAW_DATA)
self._load_and_play_data_in_memory("tone-440-float.raw", 44100, 1, omni.audioplayer.RawPcmFormat.PCM_FLOAT, omni.audioplayer.FLAG_RAW_DATA)
| 11,500 | Python | 39.354386 | 147 | 0.61913 |
omniverse-code/kit/exts/omni.rtx.shadercache.d3d12/omni/rtx/shadercache/d3d12/__init__.py | from .shadercache_d3d12 import ShaderCacheConfig
| 49 | Python | 23.999988 | 48 | 0.877551 |
omniverse-code/kit/exts/omni.kit.clipboard/config/extension.toml | [package]
# Semantic Versioning is used: https://semver.org/
version = "1.0.0"
category = "Internal"
# Lists people or organizations that are considered the "authors" of the package.
authors = ["NVIDIA"]
# The title and description fields are primarly for displaying extension info in UI
title = "Clipboard Utilities"
description="Cross-platform clipboard utilities for copy & paste functionality."
# URL of the extension source repository.
repository = ""
# Icon is shown in Extensions window, it is recommended to be square, of size 256x256.
icon = "data/icon.png"
# Keywords for the extension
keywords = ["kit", "clipboard", "copy", "paste"]
# Location of change log file in target (final) folder of extension, relative to the root.
# More info on writing changelog: https://keepachangelog.com/en/1.0.0/
changelog="docs/CHANGELOG.md"
# Path (relative to the root) or content of readme markdown file for UI.
readme = "docs/README.md"
# Main python module this extension provides, it will be publicly available as "import omni.kit.clipboard".
[[python.module]]
name = "omni.kit.clipboard"
[[test]]
dependencies = [
"omni.ui",
"omni.kit.ui_test",
]
| 1,168 | TOML | 28.974358 | 107 | 0.734589 |
omniverse-code/kit/exts/omni.kit.clipboard/omni/kit/clipboard/__init__.py |
def _clipboard_prep():
import omni.appwindow
import carb.windowing
_app_window_factory = omni.appwindow.acquire_app_window_factory_interface()
_app_window = _app_window_factory.get_default_window()
_carb_window = _app_window.get_window()
_windowing = carb.windowing.acquire_windowing_interface()
return _windowing, _carb_window
def copy(value_str):
"""
Platform-independent copy functionality.
Args:
value_str (str): String to put in the clipboard.
"""
_windowing, _carb_window = _clipboard_prep()
_windowing.set_clipboard(_carb_window, value_str)
def paste():
"""
Platform-independent paste functionality.
Returns:
str: Value pulled from the clipboard.
"""
_windowing, _carb_window = _clipboard_prep()
value_str = _windowing.get_clipboard(_carb_window)
return value_str
| 877 | Python | 23.388888 | 79 | 0.667047 |
omniverse-code/kit/exts/omni.kit.clipboard/omni/kit/clipboard/tests/__init__.py | from .test_clipboard import *
| 30 | Python | 14.499993 | 29 | 0.766667 |
omniverse-code/kit/exts/omni.kit.clipboard/omni/kit/clipboard/tests/test_clipboard.py | ## Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
##
## NVIDIA CORPORATION and its licensors retain all intellectual property
## and proprietary rights in and to this software, related documentation
## and any modifications thereto. Any use, reproduction, disclosure or
## distribution of this software and related documentation without an express
## license agreement from NVIDIA CORPORATION is strictly prohibited.
##
from omni.ui.tests.test_base import OmniUiTest
from omni.kit import ui_test
from .. import copy, paste
class ClipboardTest(OmniUiTest):
async def test_clipboard(self):
await self.create_test_window(width=400, height=40)
human_delay_speed = 2
await ui_test.wait_n_updates(human_delay_speed)
str_to_copy = "Testing, testing, 1, 2, 3."
copy(str_to_copy)
pasted_str = paste()
self.assertEqual(str_to_copy, pasted_str)
| 917 | Python | 31.785713 | 77 | 0.720829 |
omniverse-code/kit/exts/omni.kit.clipboard/docs/README.md | # omni.kit.clipboard
## Introduction
This extension contains basic clipboard utilities like copy and paste, that are meant to be
cross-platform.
| 147 | Markdown | 20.142854 | 91 | 0.795918 |
omniverse-code/kit/exts/omni.kit.usd.collect/docs/index.rst | omni.kit.usd.collect
#######################
Python extension to collect all dependencies of a USD.
| 101 | reStructuredText | 19.399996 | 54 | 0.613861 |
omniverse-code/kit/exts/omni.activity.freeze_monitor/config/extension.toml | [package]
title = "Omni Activity Freeze Monitor"
category = "Telemetry"
version = "1.0.1"
description = "Freeze Window"
authors = ["NVIDIA"]
keywords = ["activity"]
changelog = "docs/CHANGELOG.md"
[[python.module]]
name = "omni.activity.freeze_monitor"
[dependencies]
"omni.activity.core" = {}
[[native.plugin]]
path = "bin/*.plugin"
[settings]
exts."omni.activity.freeze_monitor".threshold_ms = 1000
exts."omni.activity.freeze_monitor".after_ms = 1000
exts."omni.activity.freeze_monitor".force = false
exts."omni.activity.freeze_monitor".wheel_image = "${kit}/exts/omni.activity.freeze_monitor/icons/kit.png"
[[test]]
args = [
"--/app/window/dpiScaleOverride=1.0",
"--/app/window/scaleToMonitor=false",
"--no-window"
]
dependencies = [
"omni.kit.mainwindow",
"omni.kit.renderer.capture",
"omni.ui",
"omni.usd",
]
| 848 | TOML | 21.342105 | 106 | 0.686321 |
omniverse-code/kit/exts/omni.kit.manipulator.transform/omni/kit/manipulator/transform/style.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from typing import Dict
from omni.ui import color as cl
COLOR_X = 0xFF6060AA
COLOR_Y = 0xFF76A371
COLOR_Z = 0xFFA07D4F
COLOR_SCREEN = 0x8AFEF68E
COLOR_FREE = 0x40404040
COLOR_FOCAL = 0xE6CCCCCC
def get_default_style():
return {
"Translate.Axis::x": {"color": COLOR_X},
"Translate.Axis::y": {"color": COLOR_Y},
"Translate.Axis::z": {"color": COLOR_Z},
"Translate.Plane::x_y": {"color": COLOR_Z},
"Translate.Plane::y_z": {"color": COLOR_X},
"Translate.Plane::z_x": {"color": COLOR_Y},
"Translate.Point": {"color": COLOR_SCREEN, "type": "point"},
"Translate.Focal": {"color": COLOR_FOCAL, "visible": False},
"Rotate.Arc::x": {"color": COLOR_X},
"Rotate.Arc::y": {"color": COLOR_Y},
"Rotate.Arc::z": {"color": COLOR_Z},
"Rotate.Arc::screen": {"color": COLOR_SCREEN},
"Rotate.Arc::free": {"color": COLOR_FREE},
"Scale.Axis::x": {"color": COLOR_X},
"Scale.Axis::y": {"color": COLOR_Y},
"Scale.Axis::z": {"color": COLOR_Z},
"Scale.Plane::x_y": {"color": COLOR_Z},
"Scale.Plane::y_z": {"color": COLOR_X},
"Scale.Plane::z_x": {"color": COLOR_Y},
"Scale.Point": {"color": COLOR_SCREEN},
}
def get_default_toolbar_style():
return {
"CollapsableFrame": {
"background_color": 0x00,
"secondary_color": 0x00,
"border_color": 0x0,
"border_width": 0,
"padding": 0,
"margin_height": 5,
"margin_width": 0,
},
"CollapsableFrame:hovered": {"secondary_color": 0x00},
"CollapsableFrame:pressed": {"secondary_color": 0x00},
"Line": {"color": 0xFFA1A1A1, "border_width": 2},
"Rectangle": {"background_color": 0x8F000000},
}
def abgr_to_color(abgr: int) -> cl:
# cl in rgba order
return cl((abgr & 0xFF) / 255, (abgr >> 8 & 0xFF) / 255, (abgr >> 16 & 0xFF) / 255, (abgr >> 24 & 0xFF) / 255)
# style is nested dict, can't simply call to_style.update(from_style)
def update_style(to_style: Dict, from_style: Dict):
if from_style:
for k, v in from_style.items():
if isinstance(v, dict):
to_style[k] = update_style(to_style.get(k, {}), v)
else:
to_style[k] = v
return to_style
| 2,774 | Python | 34.126582 | 114 | 0.583273 |
omniverse-code/kit/exts/omni.kit.manipulator.transform/omni/kit/manipulator/transform/extension.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import omni.ext
from .style import get_default_style
SHOW_EXAMPLE = False
class TransformManipulatorExt(omni.ext.IExt):
# ext_id is current extension id. It can be used with extension manager to query additional information, like where
# this extension is located on filesystem.
def on_startup(self, ext_id):
if SHOW_EXAMPLE:
from .example import SimpleManipulatorExample
self._example = SimpleManipulatorExample()
def on_shutdown(self):
if SHOW_EXAMPLE:
self._example.destroy()
self._example = None
| 1,015 | Python | 32.866666 | 119 | 0.733005 |
omniverse-code/kit/exts/omni.kit.manipulator.transform/omni/kit/manipulator/transform/subscription.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from typing import Callable
class Subscription:
def __init__(self, unsubscribe_fn: Callable):
self._unsubscribe_fn = unsubscribe_fn
def __del__(self):
self.unsubscribe()
def unsubscribe(self):
if self._unsubscribe_fn:
self._unsubscribe_fn()
self._unsubscribe_fn = None
| 762 | Python | 30.791665 | 76 | 0.720472 |
omniverse-code/kit/exts/omni.kit.manipulator.transform/omni/kit/manipulator/transform/settings_listener.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import weakref
from enum import Enum, auto
from typing import Callable
import carb.dictionary
import carb.settings
from .settings_constants import c
from .subscription import Subscription
class Listener:
def __init__(self):
self._callback_id = 0
self._callbacks = {}
def add_listener(self, callback: Callable) -> int:
self._callback_id += 1
self._callbacks[self._callback_id] = callback
return self._callback_id
def remove_listener(self, id: int):
self._callbacks.pop(id)
def subscribe_listener(self, callback: Callable) -> Subscription:
id = self.add_listener(callback)
return Subscription(
lambda listener=weakref.ref(self), id=id: listener().remove_listener(id) if listener() else None
)
def _invoke_callbacks(self, *args, **kwargs):
for cb in self._callbacks.values():
cb(*args, **kwargs)
class OpSettingsListener(Listener):
class CallbackType(Enum):
OP_CHANGED = auto()
TRANSLATION_MODE_CHANGED = auto()
ROTATION_MODE_CHANGED = auto()
def __init__(self) -> None:
super().__init__()
self._dict = carb.dictionary.get_dictionary()
self._settings = carb.settings.get_settings()
self._op_sub = self._settings.subscribe_to_node_change_events(c.TRANSFORM_OP_SETTING, self._on_op_changed)
self.selected_op = self._settings.get(c.TRANSFORM_OP_SETTING)
self._translation_mode_sub = self._settings.subscribe_to_node_change_events(
c.TRANSFORM_MOVE_MODE_SETTING, self._on_translate_mode_changed
)
self.translation_mode = self._settings.get(c.TRANSFORM_MOVE_MODE_SETTING)
self._rotation_mode_sub = self._settings.subscribe_to_node_change_events(
c.TRANSFORM_ROTATE_MODE_SETTING, self._on_rotation_mode_changed
)
self.rotation_mode = self._settings.get(c.TRANSFORM_ROTATE_MODE_SETTING)
def __del__(self):
self.destroy()
def destroy(self):
if self._op_sub:
self._settings.unsubscribe_to_change_events(self._op_sub)
self._op_sub = None
if self._translation_mode_sub:
self._settings.unsubscribe_to_change_events(self._translation_mode_sub)
self._translation_mode_sub = None
if self._rotation_mode_sub:
self._settings.unsubscribe_to_change_events(self._rotation_mode_sub)
self._rotation_mode_sub = None
def _on_op_changed(self, item, event_type):
selected_op = self._dict.get(item)
if selected_op != self.selected_op:
self.selected_op = selected_op
self._invoke_callbacks(OpSettingsListener.CallbackType.OP_CHANGED, self.selected_op)
def _on_translate_mode_changed(self, item, event_type):
translation_mode = self._dict.get(item)
if self.translation_mode != translation_mode:
self.translation_mode = translation_mode
self._invoke_callbacks(OpSettingsListener.CallbackType.TRANSLATION_MODE_CHANGED, self.translation_mode)
def _on_rotation_mode_changed(self, item, event_type):
rotation_mode = self._dict.get(item)
if self.rotation_mode != rotation_mode:
self.rotation_mode = rotation_mode
self._invoke_callbacks(OpSettingsListener.CallbackType.ROTATION_MODE_CHANGED, self.rotation_mode)
class SnapSettingsListener(Listener):
def __init__(
self,
enabled_setting_path: str = None,
move_x_setting_path: str = None,
move_y_setting_path: str = None,
move_z_setting_path: str = None,
rotate_setting_path: str = None,
scale_setting_path: str = None,
provider_setting_path: str = None,
) -> None:
super().__init__()
self._dict = carb.dictionary.get_dictionary()
self._settings = carb.settings.get_settings()
# keep around for backward compatibility
SNAP_ENABLED_SETTING = "/app/viewport/snapEnabled"
SNAP_MOVE_X_SETTING = "/persistent/app/viewport/stepMove/x"
SNAP_MOVE_Y_SETTING = "/persistent/app/viewport/stepMove/y"
SNAP_MOVE_Z_SETTING = "/persistent/app/viewport/stepMove/z"
SNAP_ROTATE_SETTING = "/persistent/app/viewport/stepRotate"
SNAP_SCALE_SETTING = "/persistent/app/viewport/stepScale"
SNAP_TO_SURFACE_SETTING = "/persistent/app/viewport/snapToSurface"
if not enabled_setting_path:
enabled_setting_path = SNAP_ENABLED_SETTING
if not move_x_setting_path:
move_x_setting_path = SNAP_MOVE_X_SETTING
if not move_y_setting_path:
move_y_setting_path = SNAP_MOVE_Y_SETTING
if not move_z_setting_path:
move_z_setting_path = SNAP_MOVE_Z_SETTING
if not rotate_setting_path:
rotate_setting_path = SNAP_ROTATE_SETTING
if not scale_setting_path:
scale_setting_path = SNAP_SCALE_SETTING
if not provider_setting_path:
provider_setting_path = SNAP_TO_SURFACE_SETTING
# subscribe to snap events
def subscribe_to_value_and_get_current(setting_val_name: str, setting_path: str):
def on_settings_changed(tree_item, changed_item, type):
# do not use `value = self._dict.get(tree_item)`, Dict does not work well with array setting
value = self._settings.get(setting_path)
setattr(self, setting_val_name, value)
self._invoke_callbacks(setting_val_name, value)
sub = self._settings.subscribe_to_tree_change_events(setting_path, on_settings_changed)
value = self._settings.get(setting_path)
setattr(self, setting_val_name, value)
return sub
self._snap_enabled_sub = subscribe_to_value_and_get_current("snap_enabled", enabled_setting_path)
self._snap_move_x_sub = subscribe_to_value_and_get_current("snap_move_x", move_x_setting_path)
self._snap_move_y_sub = subscribe_to_value_and_get_current("snap_move_y", move_y_setting_path)
self._snap_move_z_sub = subscribe_to_value_and_get_current("snap_move_z", move_z_setting_path)
self._snap_rotate_sub = subscribe_to_value_and_get_current("snap_rotate", rotate_setting_path)
self._snap_scale_sub = subscribe_to_value_and_get_current("snap_scale", scale_setting_path)
self._snap_to_surface_sub = subscribe_to_value_and_get_current("snap_to_surface", provider_setting_path)
self._snap_provider_sub = subscribe_to_value_and_get_current("snap_provider", provider_setting_path)
def __del__(self):
self.destroy()
def destroy(self):
if self._snap_enabled_sub:
self._settings.unsubscribe_to_change_events(self._snap_enabled_sub)
self._snap_enabled_sub = None
if self._snap_move_x_sub:
self._settings.unsubscribe_to_change_events(self._snap_move_x_sub)
self._snap_move_x_sub = None
if self._snap_move_y_sub:
self._settings.unsubscribe_to_change_events(self._snap_move_y_sub)
self._snap_move_y_sub = None
if self._snap_move_z_sub:
self._settings.unsubscribe_to_change_events(self._snap_move_z_sub)
self._snap_move_z_sub = None
if self._snap_rotate_sub:
self._settings.unsubscribe_to_change_events(self._snap_rotate_sub)
self._snap_rotate_sub = None
if self._snap_scale_sub:
self._settings.unsubscribe_to_change_events(self._snap_scale_sub)
self._snap_scale_sub = None
if self._snap_to_surface_sub:
self._settings.unsubscribe_to_change_events(self._snap_to_surface_sub)
self._snap_to_surface_sub = None
if self._snap_provider_sub:
self._settings.unsubscribe_to_change_events(self._snap_provider_sub)
self._snap_provider_sub = None
| 8,383 | Python | 40.92 | 115 | 0.646189 |
omniverse-code/kit/exts/omni.kit.manipulator.transform/omni/kit/manipulator/transform/example.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import omni.ui as ui
from omni.ui import scene as sc
from .manipulator import Axis, TransformManipulator
from .simple_transform_model import (
SimpleRotateChangedGesture,
SimpleScaleChangedGesture,
SimpleTranslateChangedGesture,
)
from .types import Operation
class Select(sc.ClickGesture):
def __init__(self, example):
super().__init__()
self._example = example
def on_ended(self):
self._example.on_point_clicked(self.sender)
class SimpleManipulatorExample:
def __init__(self):
projection = [0.011730205278592375, 0.0, 0.0, 0.0]
projection += [0.0, 0.02055498458376156, 0.0, 0.0]
projection += [0.0, 0.0, 2.00000020000002e-07, 0.0]
projection += [-0.0, -0.0, 1.00000020000002, 1.0]
view = [1.0, 0.0, 0.0, 0.0]
view += [0.0, 1.0, 0.0, 0.0]
view += [0.0, 0.0, 1.0, 0.0]
view += [-2.2368736267089844, 13.669827461242786, -5.0, 1.0]
self._selected_shape = None
self._window = ui.Window("Simple Manipulator Example")
with self._window.frame:
scene_view = sc.SceneView(projection=projection, view=view)
with scene_view.scene:
self._ma = TransformManipulator(
size=1,
axes=Axis.ALL & ~Axis.Z & ~Axis.SCREEN,
enabled=False,
gestures=[
SimpleTranslateChangedGesture(),
SimpleRotateChangedGesture(),
SimpleScaleChangedGesture(),
],
)
self._sub = self._ma.model.subscribe_item_changed_fn(self._on_item_changed)
sc.Points([[0, 0, 0]], colors=[ui.color.white], sizes=[10], gestures=[Select(self)])
sc.Points([[50, 0, 0]], colors=[ui.color.white], sizes=[10], gestures=[Select(self)])
sc.Points([[50, -50, 0]], colors=[ui.color.white], sizes=[10], gestures=[Select(self)])
sc.Points([[0, -50, 0]], colors=[ui.color.white], sizes=[10], gestures=[Select(self)])
def __del__(self):
self.destroy()
def destroy(self):
self._window = None
def on_point_clicked(self, shape):
self._selected_shape = shape
self._ma.enabled = True
model = self._ma.model
model.set_floats(
model.get_item("translate"),
[
self._selected_shape.positions[0][0],
self._selected_shape.positions[0][1],
self._selected_shape.positions[0][2],
],
)
def _on_item_changed(self, model, item):
if self._selected_shape is not None:
if item.operation == Operation.TRANSLATE:
self._selected_shape.positions = model.get_as_floats(item)
| 3,258 | Python | 35.211111 | 103 | 0.584408 |
omniverse-code/kit/exts/omni.kit.manipulator.transform/omni/kit/manipulator/transform/settings_constants.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
class Constants:
TRANSFORM_MOVE_MODE_SETTING = "/app/transform/moveMode"
TRANSFORM_ROTATE_MODE_SETTING = "/app/transform/rotateMode"
TRANSFORM_MODE_GLOBAL = "global"
TRANSFORM_MODE_LOCAL = "local"
TRANSFORM_OP_SETTING = "/app/transform/operation"
TRANSFORM_OP_SELECT = "select"
TRANSFORM_OP_MOVE = "move"
TRANSFORM_OP_ROTATE = "rotate"
TRANSFORM_OP_SCALE = "scale"
MANIPULATOR_SCALE_SETTING = "/persistent/exts/omni.kit.manipulator.transform/manipulator/scaleMultiplier"
FREE_ROTATION_ENABLED_SETTING = "/persistent/exts/omni.kit.manipulator.transform/manipulator/freeRotationEnabled"
FREE_ROTATION_TYPE_SETTING = "/persistent/exts/omni.kit.manipulator.transform/manipulator/freeRotationType"
FREE_ROTATION_TYPE_CLAMPED = "Clamped"
FREE_ROTATION_TYPE_CONTINUOUS = "Continuous"
INTERSECTION_THICKNESS_SETTING = "/persistent/exts/omni.kit.manipulator.transform/manipulator/intersectionThickness"
TOOLS_DEFAULT_COLLAPSED_SETTING = "/persistent/exts/omni.kit.manipulator.transform/tools/defaultCollapsed"
# backward compatibility
c = Constants
| 1,542 | Python | 39.605262 | 120 | 0.772374 |
omniverse-code/kit/exts/omni.kit.manipulator.transform/omni/kit/manipulator/transform/toolbar_registry.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from __future__ import annotations
import weakref
from weakref import ReferenceType
from typing import Any, Callable, Dict, List, Tuple, Type
from .toolbar_tool import ToolbarTool
class ToolbarRegistry:
class Subscription:
def __init__(self, registry: ReferenceType[ToolbarRegistry], id: str):
self._registry = registry
self._id = id
def __del__(self):
self.release()
def release(self):
registry = self._registry()
if self._id is not None and registry is not None:
registry.unsubscribe_to_registry_change(id)
self._id = None
def __init__(self):
self._tools: Dict[str, Type[ToolbarTool]] = {}
self._change_subscribers: Dict[int, Callable] = {}
self._next_change_subscriber_id: int = 1
self._sorted_tools: List[Type[ToolbarTool]] = []
self._sort_key: Callable[[Tuple[str, Type[ToolbarTool]]], Any] = None
@property
def tools(self) -> List[Type[ToolbarTool]]:
"""Gets a sorted list of all tool classes."""
return self._sorted_tools
def register_tool(self, tool_class: Type[ToolbarTool], id: str):
"""
Registers a tool class to the registry.
Args:
tool_class (Type[ToolbarTool]): The class of the tool to be registered.
id (str): Unique id of the tool. It must not already exist.
"""
if id in self._tools:
raise ValueError(f"{id} already exist!")
self._tools[id] = tool_class
self._notify_registry_changed()
def unregister_tool(self, id: str):
"""
Unregisters a tool class using its id.
Args:
id (str): The id used in `register_tool`
"""
self._tools.pop(id, None)
self._notify_registry_changed()
def subscribe_to_registry_change(self, callback: Callable[[], None]) -> int:
"""
Subscribes to registry changed event. Callback will be called when tool classes are registered or unregistered.
Args:
callback (Callable[[], None]): the callback to be called. It is called immediately before function returns.
Return:
An Subscription object. Call sub.release() to unsubscribe.
"""
id = self._next_change_subscriber_id
self._next_change_subscriber_id += 1
self._change_subscribers[id] = callback
self._notify_registry_changed(callback)
return ToolbarRegistry.Subscription(weakref.ref(self), id)
def unsubscribe_to_registry_change(self, id: int):
"""
Called be Subscription.release to unsubscribe from registry changed event. Do not call this function directly.
User should use Subscription object to unsubscribe.
Args:
id (int): id returned from subscribe_to_registry_change
"""
self._change_subscribers.pop(id, None)
def set_sort_key_function(self, key: Callable[[Tuple[str, Type[ToolbarTool]]], Any]):
"""
Set a custom key function to sort the registered tool classes.
Args:
key (Callable[[Tuple[str, Type[ToolbarTool]]], Any]): key function used for sorting.
"""
self._sort_key = key
def _notify_registry_changed(self, callback: Callable[[], None] = None):
self._sorted_tools = [value for key, value in sorted(self._tools.items(), key=self._sort_key)]
if not callback:
for sub in self._change_subscribers.values():
sub()
else:
callback()
| 4,027 | Python | 33.724138 | 119 | 0.625776 |
omniverse-code/kit/exts/omni.kit.manipulator.transform/omni/kit/manipulator/transform/model.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from __future__ import annotations
from typing import List
from omni.ui import scene as sc
from .types import Operation
class AbstractTransformManipulatorModel(sc.AbstractManipulatorModel):
class OperationItem(sc.AbstractManipulatorItem):
def __init__(self, op: Operation):
super().__init__()
self._op = op
@property
def operation(self):
return self._op
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._translate_item = AbstractTransformManipulatorModel.OperationItem(Operation.TRANSLATE)
self._rotate_item = AbstractTransformManipulatorModel.OperationItem(Operation.ROTATE)
self._scale_item = AbstractTransformManipulatorModel.OperationItem(Operation.SCALE)
self._transform_item = sc.AbstractManipulatorItem()
self._translate_delta_item = AbstractTransformManipulatorModel.OperationItem(Operation.TRANSLATE_DELTA)
self._rotate_delta_item = AbstractTransformManipulatorModel.OperationItem(Operation.ROTATE_DELTA)
self._scale_delta_item = AbstractTransformManipulatorModel.OperationItem(Operation.SCALE_DELTA)
self._items = {
"translate": self._translate_item,
"rotate": self._rotate_item,
"scale": self._scale_item,
"transform": self._transform_item,
"translate_delta": self._translate_delta_item,
"rotate_delta": self._rotate_delta_item,
"scale_delta": self._scale_delta_item,
}
def get_as_floats(self, item: sc.AbstractManipulatorItem) -> List[float]:
"""
Called by manipulator to fetch item values.
Returns a composed Matrix4x4 transform in world space as a list of float.
"""
...
def get_as_floats(self, item: sc.AbstractManipulatorItem) -> List[int]:
"""
Called by manipulator to fetch item values.
Returns a composed Matrix4x4 transform in world space as a list of int.
"""
...
def get_item(self, name: str) -> sc.AbstractManipulatorItem:
"""
See AbstractManipulatorItem.get_item
"""
return self._items.get(name, None)
def widget_enabled(self):
"""
Called by hosting manipulator widget(s) when they're enabled.
It can be used to track if any hosting manipulator is active to skip background model update (i.e. running listener for changes).
"""
...
def widget_disabled(self):
"""
Called by hosting manipulator widget(s) when they're disabled.
It can be used to track if any hosting manipulator is active to skip background model update (i.e. running listener for changes).
"""
...
def set_floats(self, item: sc.AbstractManipulatorItem, value: List[float]):
"""
Called when the manipulator is being dragged and value changes, or set by external code to overwrite the value.
The model should update value to underlying data holder(s) (e.g. a USD prim(s)).
Depending on the model implemetation, item and value can be customized to model's needs.
"""
...
def set_ints(self, item: sc.AbstractManipulatorItem, value: List[int]):
"""
Called when the manipulator is being dragged and value changes, or set by external code to overwrite the value.
The model should update value to underlying data holder(s) (e.g. a USD prim(s)).
Depending on the model implemetation, item and value can be customized to model's needs.
"""
...
def get_operation(self) -> Operation:
"""
Called by the manipulator to determine which operation is active.
"""
...
def get_snap(self, item: sc.AbstractManipulatorItem):
"""
Called by the manipulator, returns the minimal increment step for each operation. None if no snap should be performed.
Different Operation requires different return values:
- TRANSLATE: Tuple[float, float, float]. One entry for X/Y/Z axis.
- ROTATE: float. Angle in degree.
- SCALE: float
"""
return None
| 4,631 | Python | 37.924369 | 137 | 0.658821 |
omniverse-code/kit/exts/omni.kit.manipulator.transform/omni/kit/manipulator/transform/simple_transform_model.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from __future__ import annotations
import math
from typing import List
import carb
from omni.kit.manipulator.transform.gestures import (
RotateChangedGesture,
RotateDragGesturePayload,
ScaleChangedGesture,
ScaleDragGesturePayload,
TransformDragGesturePayload,
TranslateChangedGesture,
TranslateDragGesturePayload,
)
from omni.ui import scene as sc
from .model import AbstractTransformManipulatorModel
from .types import Operation
class SimpleTranslateChangedGesture(TranslateChangedGesture):
def on_changed(self):
if not self.gesture_payload or not self.sender or not isinstance(self.gesture_payload, TranslateDragGesturePayload):
return
model = self.sender.model
item = self.gesture_payload.changing_item
translated = self.gesture_payload.moved_delta
translation = [a + b for a, b in zip(translated, model.get_as_floats(item))]
model.set_floats(item, translation)
class SimpleRotateChangedGesture(RotateChangedGesture):
def on_began(self):
if not self.gesture_payload or not self.sender or not isinstance(self.gesture_payload, RotateDragGesturePayload):
return
model = self.sender.model
item = self.gesture_payload.changing_item
self._begin_rotation = model.get_as_floats(item).copy()
self._begin_rotation_matrix = sc.Matrix44.get_rotation_matrix(
self._begin_rotation[0], self._begin_rotation[1], self._begin_rotation[2], True
)
def on_changed(self):
if not self.gesture_payload or not self.sender or not isinstance(self.gesture_payload, RotateDragGesturePayload):
return
model = self.sender.model
item = self.gesture_payload.changing_item
axis = self.gesture_payload.axis
angle = self.gesture_payload.angle
# always reset begin_rotation in local rotation mode
if not model.global_mode:
self._begin_rotation = model.get_as_floats(item).copy()
self._begin_rotation_matrix = sc.Matrix44.get_rotation_matrix(
self._begin_rotation[0], self._begin_rotation[1], self._begin_rotation[2], True
)
# convert to radian
angle = angle / 180.0 * math.pi
# calculate rotation matrix around axis for angle in radian
matrix = [
math.cos(angle) + axis[0] ** 2 * (1 - math.cos(angle)),
axis[0] * axis[1] * (1 - math.cos(angle)) + axis[2] * math.sin(angle),
axis[0] * axis[2] * (1 - math.cos(angle)) - axis[1] * math.sin(angle),
0,
]
matrix += [
axis[0] * axis[1] * (1 - math.cos(angle)) - axis[2] * math.sin(angle),
math.cos(angle) + axis[1] ** 2 * (1 - math.cos(angle)),
axis[1] * axis[2] * (1 - math.cos(angle)) + axis[0] * math.sin(angle),
0,
]
matrix += [
axis[0] * axis[2] * (1 - math.cos(angle)) + axis[1] * math.sin(angle),
axis[1] * axis[2] * (1 - math.cos(angle)) - axis[0] * math.sin(angle),
math.cos(angle) + axis[2] ** 2 * (1 - math.cos(angle)),
0,
]
matrix += [0, 0, 0, 1]
matrix = sc.Matrix44(*matrix) # each 4 elements in list is a COLUMN in a row-vector matrix!
rotate = self._begin_rotation_matrix * matrix
# decompose back to x y z euler angle
sy = math.sqrt(rotate[0] ** 2 + rotate[1] ** 2)
is_singular = sy < 10 ** -6
if not is_singular:
z = math.atan2(rotate[1], rotate[0])
y = math.atan2(-rotate[2], sy)
x = math.atan2(rotate[6], rotate[10])
else:
z = math.atan2(-rotate[9], rotate[5])
y = math.atan2(-rotate[2], sy)
x = 0
x = x / math.pi * 180.0
y = y / math.pi * 180.0
z = z / math.pi * 180.0
model.set_floats(item, [x, y, z])
class SimpleScaleChangedGesture(ScaleChangedGesture):
def on_began(self):
if not self.gesture_payload or not self.sender or not isinstance(self.gesture_payload, ScaleDragGesturePayload):
return
model = self.sender.model
item = self.gesture_payload.changing_item
self._begin_scale = model.get_as_floats(item).copy()
def on_changed(self):
if not self.gesture_payload or not self.sender or not isinstance(self.gesture_payload, ScaleDragGesturePayload):
return
model = self.sender.model
item = self.gesture_payload.changing_item
axis = self.gesture_payload.axis
scale = self.gesture_payload.scale
scale_delta = [scale * v for v in axis]
scale_vec = [0, 0, 0]
for i in range(3):
scale_vec[i] = self._begin_scale[i] * scale_delta[i] if scale_delta[i] else self._scale[i]
model.set_floats(item, [scale_vec[0], scale_vec[1], scale_vec[2]])
class SimpleTransformModel(AbstractTransformManipulatorModel):
"""
SimpleTransformModel is a model that provides basic S/R/T transform data processing. You can subscribe to callback
to get manipulated data. Rotation is stored in degree and applis in XYZ order.
"""
def __init__(self):
super().__init__()
self._translation = [0, 0, 0]
self._rotation = [0, 0, 0]
self._scale = [1, 1, 1]
self._transform = sc.Matrix44(1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1)
self._op = Operation.TRANSLATE
self._global_mode = True
self._dirty = True
@property
def global_mode(self) -> bool:
"""
The Global vs Local transform mode.
"""
return self._global_mode
@global_mode.setter
def global_mode(self, value: bool):
if self._global_mode != value:
self._global_mode = value
self._dirty = True # global vs local mode returns different Transform matrix
self._item_changed(self._transform_item)
def set_floats(self, item: AbstractTransformManipulatorModel.OperationItem, value: List[float]):
if item == self._translate_item:
self._translation[0] = value[0]
self._translation[1] = value[1]
self._translation[2] = value[2]
self._dirty = True
self._item_changed(self._translate_item)
elif item == self._rotate_item:
self._rotation[0] = value[0]
self._rotation[1] = value[1]
self._rotation[2] = value[2]
self._dirty = True
self._item_changed(self._rotate_item)
elif item == self._scale_item:
self._scale[0] = value[0]
self._scale[1] = value[1]
self._scale[2] = value[2]
self._dirty = True
self._item_changed(self._scale_item)
else:
carb.log_warn(f"Unsupported item {item}")
def get_as_floats(self, item: AbstractTransformManipulatorModel.OperationItem) -> List[float]:
if item == self._translate_item:
return self._translation
elif item == self._rotate_item:
return self._rotation
elif item == self._scale_item:
return self._scale
elif item is None or item == self._transform_item:
if self._dirty:
# Scale is not put into the Transform matrix because we don't want the TransformManipulator itself to scale
# For a "global" style rotation gizmo (where the gizmo doesn't rotate), we don't want to put the rotation into Transform either.
if self._global_mode:
self._transform = sc.Matrix44.get_translation_matrix(
self._translation[0], self._translation[1], self._translation[2]
)
else:
self._transform = sc.Matrix44.get_translation_matrix(
self._translation[0], self._translation[1], self._translation[2]
) * sc.Matrix44.get_rotation_matrix(self._rotation[0], self._rotation[1], self._rotation[2], True)
self._dirty = False
return self._transform
else:
carb.log_warn(f"Unsupported item {item}")
return None
def get_operation(self) -> Operation:
return self._op
def set_operation(self, op: Operation):
if self._op != op:
self._op = op
self._item_changed(self._transform_item)
| 8,900 | Python | 37.532467 | 144 | 0.598764 |
omniverse-code/kit/exts/omni.kit.manipulator.transform/omni/kit/manipulator/transform/toolbar_tool.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from __future__ import annotations
import asyncio
import weakref
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Any, Callable, Dict
from weakref import ProxyType
import carb
import omni.kit.app
import omni.kit.context_menu
import omni.ui as ui
from .manipulator import TransformManipulator
from .types import Operation
ICON_FOLDER_PATH = Path(
f"{omni.kit.app.get_app().get_extension_manager().get_extension_path_by_module(__name__)}/data/icons"
)
class ToolbarTool(ABC):
def __init__(
self,
manipulator: ProxyType[TransformManipulator],
operation: Operation,
toolbar_height: int,
toolbar_payload: Dict[str, Any] = {},
tooltip_update_fn: Callable[[str, bool, float], None] = None,
):
self._manipulator = manipulator
self._operation = operation
self._model = manipulator.model
self._toolbar_height = toolbar_height
self._toolbar_payload = toolbar_payload
self._tooltip_update_fn = tooltip_update_fn
def destroy(self):
self._manipulator = None
self._model = None
self._toolbar_payload = None
self._tooltip_update_fn = None
def __del__(self):
self.destroy()
@classmethod
@abstractmethod
def can_build(cls, manipulator: TransformManipulator, operation: Operation) -> bool:
"""
Called right before a tool instance is to be instantiated to determine if this tool can be built on current toolbar.
Args:
manipulator (TransformManipulator): manipulator that hosts the toolbar
operation (Operation): The transform Operation the tool will be built for.
Return:
True if the tool can be built. Its constructor will be called.
False if not. The tool will be skipped and not placed on toolbar.
"""
raise NotImplementedError("You must override `can_build` in your derived class!")
return False
class DefaultMenuDelegate(ui.MenuDelegate):
def get_style(self):
from omni.kit.context_menu import style
return style.MENU_STYLE
class SimpleToolButton(ToolbarTool):
def __init__(self, menu_delegate: ui.MenuDelegate = None, *args, **kwargs):
super().__init__(*args, **kwargs)
self._show_menu_task = None
self._cancel_next_value_changed = False # workaround context menu triggering button state change
self._ignore_model_change = False
self._button = None
self._stack = None
self._menu_delegate = menu_delegate if menu_delegate else DefaultMenuDelegate()
def destroy(self):
self._menu_delegate = None
if self._show_menu_task is not None:
self._show_menu_task.cancel()
self._show_menu_task = None
if self._button:
self._button.set_mouse_hovered_fn(None)
self._sub = None
self._button = None
self._stack = None
super().destroy()
def _get_style(self) -> Dict:
return {
"Button": {"background_color": 0x0},
"Button:checked": {"background_color": 0x8FD1912E},
"Button:hovered": {"background_color": 0x0},
"Button:pressed": {"background_color": 0x0},
}
def _build_widget(
self,
button_name: str,
model: ui.AbstractValueModel,
enabled_img_url: str,
disabled_img_url: str = None,
menu_index: str = None,
menu_extension_id: str = None,
no_toggle: bool = False,
menu_on_left_click: bool = False,
menu_payload: Dict[str, Any] = {},
tooltip: str = "",
disabled_tooltip: str = "",
):
self._button_name = button_name
self._model = model
self._enabled_img_url = enabled_img_url
self._disabled_img_url = disabled_img_url
self._menu_index = menu_index
self._menu_extension_id = menu_extension_id
self._no_toggle = no_toggle
self._menu_on_left_click = menu_on_left_click
self._menu_payload = menu_payload
self._tooltip = tooltip
self._disabled_tooltip = disabled_tooltip
self._button_hovered = False
style = {}
if self._disabled_img_url:
style[f"Button.Image::{self._button_name}_enabled"] = {"image_url": f"{self._enabled_img_url}"}
style[f"Button.Image::{self._button_name}_disabled"] = {"image_url": f"{self._disabled_img_url}"}
else:
style[f"Button.Image::{self._button_name}"] = {"image_url": f"{self._enabled_img_url}"}
style.update(self._get_style())
self._stack = ui.ZStack(width=0, height=0, style=style)
with self._stack:
dimension = self._toolbar_height
self._button = ui.ToolButton(
name=self._button_name,
model=self._model,
image_width=dimension,
image_height=dimension,
)
self._button.set_mouse_hovered_fn(self._on_hovered)
if self._menu_index is not None and self._menu_extension_id is not None:
self._button.set_mouse_pressed_fn(lambda x, y, b, _: self._on_mouse_pressed(b, self._menu_index))
self._button.set_mouse_released_fn(lambda x, y, b, _: self._on_mouse_released(b))
self._build_flyout_indicator(dimension, dimension, self._menu_index, self._menu_extension_id)
# only update name if enabled and disabled imgs are different
if self._disabled_img_url or self._tooltip != self._disabled_tooltip:
self._update_name(self._button, self._button.model.as_bool)
self._sub = self._button.model.subscribe_value_changed_fn(self._on_model_changed)
def _on_hovered(self, state: bool):
self._button_hovered = state
if self._tooltip and self._tooltip_update_fn:
tooltip = self._disabled_tooltip if not self._button.model.as_bool and self._disabled_tooltip else self._tooltip
self._tooltip_update_fn(tooltip, state, self._button.screen_position_x)
def _on_model_changed(self, model):
if self._ignore_model_change:
return
if self._no_toggle:
self._ignore_model_change = True
model.set_value(not model.as_bool)
self._ignore_model_change = False
return
if self._cancel_next_value_changed:
self._cancel_next_value_changed = False
model.set_value(not model.as_bool)
if self._disabled_img_url or self._tooltip != self._disabled_tooltip:
self._update_name(self._button, model.as_bool)
def _update_name(self, button: ui.ToolButton, enabled: bool):
if self._disabled_img_url:
button.name = f"{self._button_name}_{'enabled' if enabled else 'disabled'}"
self._on_hovered(self._button_hovered)
self._manipulator.refresh_toolbar()
def _invoke_context_menu(self, button_id: str, right_click: bool, min_menu_entries: int = 1):
context_menu = omni.kit.context_menu.get_instance()
objects = {"widget_name": button_id, "manipulator": self._manipulator, "model": self._model}
objects.update(self._toolbar_payload)
objects.update(self._menu_payload)
menu_list = omni.kit.context_menu.get_menu_dict(self._menu_index, self._menu_extension_id)
context_menu.show_context_menu(
self._menu_index, objects, menu_list, min_menu_entries, delegate=self._menu_delegate
)
# if from long LMB hold
if not right_click:
self._cancel_next_value_changed = True
def _on_mouse_pressed(self, button, button_id: str, min_menu_entries: int = 1):
"""
Function to handle flyout menu. Either with LMB long press or RMB click.
Args:
button_id: button_id of the context menu to be invoked.
min_menu_entries: minimal number of menu entries required for menu to be visible (default 1).
"""
if button == 1 or button == 0 and self._menu_on_left_click: # show immediately
# We cannot call self._invoke_context_menu directly inside of sc.Widget's context, otherwise it will be draw
# on the auxiliary window. Schedule a async task so the menu is still in main window
self._show_menu_task = asyncio.ensure_future(
self._schedule_show_menu(button_id, wait_seconds=0.0, min_menu_entries=min_menu_entries)
)
elif button == 0: # Schedule a task if hold LMB long enough
self._show_menu_task = asyncio.ensure_future(
self._schedule_show_menu(button_id, min_menu_entries=min_menu_entries)
)
def _on_mouse_released(self, button):
if button == 0:
if self._show_menu_task:
self._show_menu_task.cancel()
async def _schedule_show_menu(self, button_id: str, min_menu_entries: int = 1, wait_seconds: float = 0.3):
if wait_seconds > 0.0:
await asyncio.sleep(wait_seconds)
try:
self._invoke_context_menu(button_id, False, min_menu_entries)
except Exception:
import traceback
carb.log_error(traceback.format_exc())
self._show_menu_task = None
def _build_flyout_indicator(self, width, height, index: str, extension_id: str, padding=-8, min_menu_count=1):
indicator_size = 4
with ui.Placer(offset_x=width - indicator_size - padding, offset_y=height - indicator_size - padding):
indicator = ui.Image(
f"{ICON_FOLDER_PATH}/flyout_indicator_dark.svg",
width=indicator_size,
height=indicator_size,
)
def on_menu_changed(evt: carb.events.IEvent):
try:
menu_list = omni.kit.context_menu.get_menu_dict(index, extension_id)
# TODO check the actual menu entry visibility with show_fn
indicator.visible = len(menu_list) >= min_menu_count
except AttributeError as exc:
carb.log_warn(f"on_menu_changed error {exc}")
# Check initial state
on_menu_changed(None)
event_stream = omni.kit.context_menu.get_menu_event_stream()
return event_stream.create_subscription_to_pop(on_menu_changed)
| 10,826 | Python | 38.370909 | 124 | 0.620081 |
omniverse-code/kit/exts/omni.kit.manipulator.transform/omni/kit/manipulator/transform/manipulator.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from __future__ import annotations
import weakref
from collections import defaultdict
from typing import TYPE_CHECKING
from weakref import ProxyType
if TYPE_CHECKING:
from .model import AbstractTransformManipulatorModel
from .toolbar_registry import ToolbarRegistry
from .toolbar_tool import ToolbarTool
from pathlib import Path
from typing import Any, DefaultDict, Dict, List, Type
import carb.dictionary
import carb.settings
import omni.kit.app
import omni.ui as ui
from omni.ui import color as cl
from omni.ui import scene as sc
from .gestures import (
DummyClickGesture,
DummyGesture,
HighlightControl,
HighlightGesture,
RotationGesture,
ScaleGesture,
TranslateGesture,
)
from .settings_constants import c
from .simple_transform_model import SimpleTransformModel
from .style import abgr_to_color, get_default_style, get_default_toolbar_style, update_style
from .types import Axis, Operation
ARROW_WIDTH = 4
ARROW_HEIGHT = 14
ARROW_P = [
[ARROW_WIDTH, ARROW_WIDTH, 0],
[-ARROW_WIDTH, ARROW_WIDTH, 0],
[0, 0, ARROW_HEIGHT],
#
[ARROW_WIDTH, -ARROW_WIDTH, 0],
[-ARROW_WIDTH, -ARROW_WIDTH, 0],
[0, 0, ARROW_HEIGHT],
#
[ARROW_WIDTH, ARROW_WIDTH, 0],
[ARROW_WIDTH, -ARROW_WIDTH, 0],
[0, 0, ARROW_HEIGHT],
#
[-ARROW_WIDTH, ARROW_WIDTH, 0],
[-ARROW_WIDTH, -ARROW_WIDTH, 0],
[0, 0, ARROW_HEIGHT],
#
[ARROW_WIDTH, ARROW_WIDTH, 0],
[-ARROW_WIDTH, ARROW_WIDTH, 0],
[-ARROW_WIDTH, -ARROW_WIDTH, 0],
[ARROW_WIDTH, -ARROW_WIDTH, 0],
]
ARROW_VC = [3, 3, 3, 3, 4]
ARROW_VI = [i for i in range(sum(ARROW_VC))]
LINE_THICKNESS = 2
ICON_FOLDER_PATH = Path(
f"{omni.kit.app.get_app().get_extension_manager().get_extension_path_by_module(__name__)}/data/icons"
)
TOOLBAR_WIDGET_HEIGHT = 114
class TransformManipulator(sc.Manipulator):
def __init__(
self,
size: float = 1.0,
enabled: bool = True,
axes: Axis = Axis.ALL,
model: AbstractTransformManipulatorModel = None,
style: Dict = {},
gestures: List[sc.ManipulatorGesture] = [],
tool_registry: ToolbarRegistry = None,
tool_button_additional_payload: Dict[str, Any] = {},
tools_default_collapsed: bool | None = None,
):
"""
Create a Transform Manipulator.
Args:
size: size of the TransformManipulator.
enabled: If false, Manipulator will be created but disabled (invisible).
axes: which axes to enable for the Manipulator. You can use this to create 2D or 1D manipulator.
model: The model for the Manipulator. If None provided, a default SimpleTransformModel will be created.
style: Use this to override the default style of the Manipulator.
tool_registry: Registry to hold the classes of ToolbarTool.
tool_button_additional_payload: Additional payload to be passed into toolbar's context menu.
tools_default_collapsed: Whether the toolbar should be collapsed by default. It overrides the carb setting so each manipulator may tweak this behavior differently.
"""
if model is None:
model = SimpleTransformModel()
super().__init__(model=model, gestures=gestures)
self._size: float = size
self._enabled: bool = enabled
self._transform = None
self._transform_screen = None
self._translate_gizmo = None
self._rotation_gizmo = None
self._scale_gizmo = None
self._toolbar_root = None
self._toolbar_height_offset_transform = None
self._toolbar_collapsable_frame = None
self._toolbars: Dict[Operation, ui.Frame] = {}
self._toolbar_widget: sc.Widget = None
self._header_line = None
self._tools_stacks: Dict[Operation, ui.HStack] = {}
self._tools: List[ToolbarTool] = []
self._operation = None
self._style = get_default_style()
self._style = update_style(self._style, style)
self._settings = carb.settings.get_settings()
self._dict = carb.dictionary.get_dictionary()
self._scale_sub = self._settings.subscribe_to_node_change_events(
c.MANIPULATOR_SCALE_SETTING, self._on_scale_changed
)
self._scale = self._settings.get(c.MANIPULATOR_SCALE_SETTING)
self._thickness_sub = self._settings.subscribe_to_node_change_events(
c.INTERSECTION_THICKNESS_SETTING, self._on_intersection_thickness_changed
)
self._free_rotation_enabled_sub = self._settings.subscribe_to_node_change_events(
c.FREE_ROTATION_ENABLED_SETTING, self._on_free_rotation_enabled_changed
)
self._axes = axes
self._tool_button_additional_payload = tool_button_additional_payload
self._toolbar_collapsed = (
tools_default_collapsed
if tools_default_collapsed is not None
else self._settings.get(c.TOOLS_DEFAULT_COLLAPSED_SETTING)
)
self._tool_registry = tool_registry
self._tool_registry_sub = None
if self._tool_registry:
self._tool_registry_sub = self._tool_registry.subscribe_to_registry_change(self._on_toolbar_changed)
if self.model:
if self._enabled:
self.model.widget_enabled()
def __del__(self):
self.destroy()
def destroy(self):
if self._scale_sub:
self._settings.unsubscribe_to_change_events(self._scale_sub)
self._scale_sub = None
if self._thickness_sub:
self._settings.unsubscribe_to_change_events(self._thickness_sub)
self._thickness_sub = None
if self._free_rotation_enabled_sub:
self._settings.unsubscribe_to_change_events(self._free_rotation_enabled_sub)
self._free_rotation_enabled_sub = None
if self._tool_registry and self._tool_registry_sub:
self._tool_registry_sub.release()
self._tool_registry_sub = None
for tool in self._tools:
tool.destroy()
self._tools.clear()
self._toolbar_widget = None
for key, stack in self._tools_stacks.items():
stack.set_computed_content_size_changed_fn(None)
self._tools_stacks.clear()
self.enabled = False
def on_build(self):
"""
if build_items_on_init = False is passed into __init__ function, you can manually call build_scene_items.
It has to be within a SceneView.scene scope.
"""
self._transform = sc.Transform(visible=self.enabled)
with self._transform:
final_size = self._get_final_size()
self._transform_screen = sc.Transform(
transform=sc.Matrix44.get_scale_matrix(final_size, final_size, final_size), scale_to=sc.Space.SCREEN
)
with self._transform_screen:
self._create_translate_manipulator()
self._create_rotation_manipulator()
self._create_scale_manipulator()
with sc.Transform(scale_to=sc.Space.SCREEN):
self._create_toolbar()
self._translate_gizmo.visible = True
self._update_axes_visibility()
self._update_from_model()
@property
def enabled(self) -> bool:
return self._enabled
@enabled.setter
def enabled(self, value: bool):
if value != self._enabled:
if self._transform:
self._transform.visible = value
self._enabled = value
if self.model:
if self._enabled:
self.model.widget_enabled()
else:
self.model.widget_disabled()
@property
def size(self) -> float:
return self._size
@size.setter
def size(self, value: float):
if self._size != value:
self._size = value
self._update_manipulator_final_size()
@property
def axes(self) -> float:
return self._axes
@axes.setter
def axes(self, value: float):
if self._axes != value:
self._axes = value
self._update_axes_visibility()
@property
def style(self) -> Dict:
return self._style
@style.setter
def style(self, value: Dict):
default_style = get_default_style()
style = update_style(default_style, value)
if self._style != style:
if self._transform:
self._translate_line_x.color = abgr_to_color(style["Translate.Axis::x"]["color"])
self._translate_line_y.color = abgr_to_color(style["Translate.Axis::y"]["color"])
self._translate_line_z.color = abgr_to_color(style["Translate.Axis::z"]["color"])
vert_count = len(ARROW_VI)
self._translate_arrow_x.colors = [abgr_to_color(style["Translate.Axis::x"]["color"])] * vert_count
self._translate_arrow_y.colors = [abgr_to_color(style["Translate.Axis::y"]["color"])] * vert_count
self._translate_arrow_z.colors = [abgr_to_color(style["Translate.Axis::z"]["color"])] * vert_count
self._translate_plane_yz.color = abgr_to_color(style["Translate.Axis::x"]["color"])
self._translate_plane_zx.color = abgr_to_color(style["Translate.Axis::y"]["color"])
self._translate_plane_xy.color = abgr_to_color(style["Translate.Axis::z"]["color"])
self._translate_point.color = abgr_to_color(style["Translate.Point"]["color"])
self._translate_point_square.color = abgr_to_color(style["Translate.Point"]["color"])
use_point = style["Translate.Point"]["type"] == "point"
self._translate_point.visible = use_point
self._translate_point_square.visible = not use_point
self._translate_point_focal_transform.visible = style["Translate.Focal"]["visible"]
self._rotation_arc_x.color = abgr_to_color(style["Rotate.Arc::x"]["color"])
self._rotation_arc_y.color = abgr_to_color(style["Rotate.Arc::y"]["color"])
self._rotation_arc_z.color = abgr_to_color(style["Rotate.Arc::z"]["color"])
self._rotation_arc_screen.color = abgr_to_color(style["Rotate.Arc::screen"]["color"])
self._scale_line_x.color = abgr_to_color(style["Scale.Axis::x"]["color"])
self._scale_line_y.color = abgr_to_color(style["Scale.Axis::y"]["color"])
self._scale_line_z.color = abgr_to_color(style["Scale.Axis::z"]["color"])
self._scale_point_x.color = abgr_to_color(style["Scale.Axis::x"]["color"])
self._scale_point_y.color = abgr_to_color(style["Scale.Axis::y"]["color"])
self._scale_point_z.color = abgr_to_color(style["Scale.Axis::z"]["color"])
self._scale_plane_yz.color = abgr_to_color(style["Scale.Axis::x"]["color"])
self._scale_plane_zx.color = abgr_to_color(style["Scale.Axis::y"]["color"])
self._scale_plane_xy.color = abgr_to_color(style["Scale.Axis::z"]["color"])
self._scale_point.color = abgr_to_color(style["Scale.Point"]["color"])
self._style = style
@sc.Manipulator.model.setter
def model(self, model):
if self.model is not None and self.enabled:
self.model.widget_disabled()
sc.Manipulator.model.fset(self, model)
if self.model is not None and self.enabled:
self.model.widget_enabled()
@property
def tool_registry(self) -> ToolbarRegistry:
return self._tool_registry
@tool_registry.setter
def tool_registry(self, value: ToolbarRegistry):
if self._tool_registry != value:
if self._tool_registry_sub:
self._tool_registry_sub.release()
self._tool_registry_sub = None
self._tool_registry = value
if self._tool_registry:
self._tool_registry_sub = self._tool_registry.subscribe_to_registry_change(self._on_toolbar_changed)
@property
def toolbar_visible(self) -> bool:
if self._toolbar_root:
return self._toolbar_root.visible
return False
@toolbar_visible.setter
def toolbar_visible(self, value: bool):
if self._toolbar_root:
self._toolbar_root.visible = value
def refresh_toolbar(self):
if self._toolbar_widget:
self._toolbar_widget.invalidate()
#############################################
# Beginning of internal functions. Do not use.
#############################################
def _create_translate_manipulator(self):
self._translate_gizmo = sc.Transform()
self._translate_gizmo.visible = False
AXIS_LEN = 100
RECT_SIZE = 50
POINT_RADIUS = 7
FOCAL_SIZE = 16
intersection_thickness = self._settings.get(c.INTERSECTION_THICKNESS_SETTING)
with self._translate_gizmo:
# Arrows
def make_arrow(
rot,
translate,
color,
):
vert_count = len(ARROW_VI)
with sc.Transform(
transform=sc.Matrix44.get_translation_matrix(translate[0], translate[1], translate[2])
* sc.Matrix44.get_rotation_matrix(rot[0], rot[1], rot[2], True)
):
return sc.PolygonMesh(ARROW_P, [color] * vert_count, ARROW_VC, ARROW_VI)
self._translate_arrow_x = make_arrow(
(0, 90, 0), (AXIS_LEN - ARROW_HEIGHT / 2, 0, 0), abgr_to_color(self.style["Translate.Axis::x"]["color"])
)
self._translate_arrow_y = make_arrow(
(-90, 0, 0),
(0, AXIS_LEN - ARROW_HEIGHT / 2, 0),
abgr_to_color(self.style["Translate.Axis::y"]["color"]),
)
self._translate_arrow_z = make_arrow(
(0, 0, 0), (0, 0, AXIS_LEN - ARROW_HEIGHT / 2), abgr_to_color(self.style["Translate.Axis::z"]["color"])
)
# Lines
def make_line(axis, color, arrow):
line = sc.Line(
[v * POINT_RADIUS for v in axis],
[v * AXIS_LEN for v in axis],
color=color,
thickness=LINE_THICKNESS,
intersection_thickness=intersection_thickness,
)
highlight_ctrl = HighlightControl([line, arrow])
line.gestures = [
TranslateGesture(self, axis, highlight_ctrl),
HighlightGesture(highlight_ctrl),
]
return line
self._translate_line_x = make_line(
[1, 0, 0], abgr_to_color(self.style["Translate.Axis::x"]["color"]), self._translate_arrow_x
)
self._translate_line_y = make_line(
[0, 1, 0], abgr_to_color(self.style["Translate.Axis::y"]["color"]), self._translate_arrow_y
)
self._translate_line_z = make_line(
[0, 0, 1], abgr_to_color(self.style["Translate.Axis::z"]["color"]), self._translate_arrow_z
)
# Rectangles
def make_plane(axis, color, axis_vec):
translate = [v * (AXIS_LEN - RECT_SIZE * 0.5) for v in axis_vec]
with sc.Transform(
transform=sc.Matrix44.get_translation_matrix(translate[0], translate[1], translate[2])
):
highlight_ctrl = HighlightControl()
return sc.Rectangle(
axis=axis,
width=RECT_SIZE / 2,
height=RECT_SIZE / 2,
color=color,
gestures=[TranslateGesture(self, axis_vec, highlight_ctrl), HighlightGesture(highlight_ctrl)],
)
self._translate_plane_yz = make_plane(0, abgr_to_color(self.style["Translate.Axis::x"]["color"]), (0, 1, 1))
self._translate_plane_zx = make_plane(1, abgr_to_color(self.style["Translate.Axis::y"]["color"]), (1, 0, 1))
self._translate_plane_xy = make_plane(2, abgr_to_color(self.style["Translate.Axis::z"]["color"]), (1, 1, 0))
# Points
with sc.Transform(look_at=sc.Transform.LookAt.CAMERA):
use_point = self.style["Translate.Point"]["type"] == "point"
self._translate_point = sc.Arc(
POINT_RADIUS,
tesselation=16,
color=abgr_to_color(self.style["Translate.Point"]["color"]),
visible=use_point,
)
highlight_ctrl = HighlightControl(
[
self._translate_plane_yz,
self._translate_plane_zx,
self._translate_plane_xy,
self._translate_point,
]
)
self._translate_point.gestures = [
TranslateGesture(self, [1, 1, 1], highlight_ctrl, order=-1),
HighlightGesture(highlight_ctrl=highlight_ctrl, order=-1),
]
self._translate_point_square = sc.Rectangle(
width=FOCAL_SIZE,
height=FOCAL_SIZE,
color=abgr_to_color(self.style["Translate.Point"]["color"]),
visible=not use_point,
)
highlight_ctrl = HighlightControl(
[
self._translate_plane_yz,
self._translate_plane_zx,
self._translate_plane_xy,
self._translate_point_square,
]
)
self._translate_point_square.gestures = [
TranslateGesture(self, [1, 1, 1], highlight_ctrl, order=-1),
HighlightGesture(highlight_ctrl=highlight_ctrl, order=-1),
]
show_focal = self.style["Translate.Focal"]["visible"]
self._translate_point_focal_transform = sc.Transform(visible=show_focal)
with self._translate_point_focal_transform:
def create_corner_line(begin, end):
sc.Line(
begin,
end,
color=abgr_to_color(self.style["Translate.Focal"]["color"]),
thickness=2,
)
offset_outer = FOCAL_SIZE
offset_inner = FOCAL_SIZE * 0.4
v_buffer = [
[offset_outer, offset_inner, 0], # 0
[offset_outer, offset_outer, 0],
[offset_inner, offset_outer, 0],
[-offset_outer, offset_inner, 0], # 1
[-offset_outer, offset_outer, 0],
[-offset_inner, offset_outer, 0],
[offset_outer, -offset_inner, 0], # 2
[offset_outer, -offset_outer, 0],
[offset_inner, -offset_outer, 0],
[-offset_outer, -offset_inner, 0], # 3
[-offset_outer, -offset_outer, 0],
[-offset_inner, -offset_outer, 0],
]
i_buffer = [(0, 1), (1, 2), (3, 4), (4, 5), (6, 7), (7, 8), (9, 10), (10, 11)]
for i in i_buffer:
create_corner_line(v_buffer[i[0]], v_buffer[i[1]])
def _create_rotation_manipulator(self):
self._rotation_gizmo = sc.Transform()
self._rotation_gizmo.visible = False
ARC_RADIUS = 100
intersection_thickness = self._settings.get(c.INTERSECTION_THICKNESS_SETTING)
with self._rotation_gizmo:
def make_arc(
radius, axis_vec, color, culling: sc.Culling, wireframe: bool = True, highlight_color=None, **kwargs
):
viz_color = [1.0, 1.0, 0.0, 0.3]
viz_arc = sc.Arc(radius, tesselation=36 * 3, color=viz_color, visible=False, **kwargs)
highlight_ctrl = HighlightControl(color=highlight_color)
return sc.Arc(
radius,
wireframe=wireframe,
tesselation=36 * 3,
thickness=LINE_THICKNESS,
intersection_thickness=intersection_thickness,
culling=culling,
color=color,
gestures=[
RotationGesture(self, axis_vec, viz_arc, highlight_ctrl),
HighlightGesture(highlight_ctrl),
],
**kwargs,
)
self._rotation_arc_x = make_arc(
ARC_RADIUS, [1, 0, 0], abgr_to_color(self.style["Rotate.Arc::x"]["color"]), sc.Culling.BACK, axis=0
)
self._rotation_arc_y = make_arc(
ARC_RADIUS, [0, 1, 0], abgr_to_color(self.style["Rotate.Arc::y"]["color"]), sc.Culling.BACK, axis=1
)
self._rotation_arc_z = make_arc(
ARC_RADIUS, [0, 0, 1], abgr_to_color(self.style["Rotate.Arc::z"]["color"]), sc.Culling.BACK, axis=2
)
self._screen_space_rotation_transform = sc.Transform(look_at=sc.Transform.LookAt.CAMERA)
with self._screen_space_rotation_transform:
self._rotation_arc_screen = make_arc(
ARC_RADIUS + 20,
[0, 0, 0],
abgr_to_color(self.style["Rotate.Arc::screen"]["color"]),
sc.Culling.NONE,
)
self._rotation_arc_free = make_arc(
ARC_RADIUS - 5,
None,
abgr_to_color(self.style["Rotate.Arc::free"]["color"]),
sc.Culling.NONE,
False,
highlight_color=cl("#8F8F8F8F"),
)
self._rotation_arc_free.visible = self._settings.get(c.FREE_ROTATION_ENABLED_SETTING)
def _create_scale_manipulator(self):
AXIS_LEN = 100
RECT_SIZE = 50
POINT_RADIUS = 7
intersection_thickness = self._settings.get(c.INTERSECTION_THICKNESS_SETTING)
self._scale_gizmo = sc.Transform()
self._scale_gizmo.visible = False
with self._scale_gizmo:
# Point
def make_point(translate, color):
scale_point_tr = sc.Transform(
transform=sc.Matrix44.get_translation_matrix(translate[0], translate[1], translate[2])
)
with scale_point_tr:
with sc.Transform(look_at=sc.Transform.LookAt.CAMERA):
scale_point = sc.Arc(POINT_RADIUS, tesselation=16, color=color)
return (scale_point_tr, scale_point)
(scale_point_tr_x, self._scale_point_x) = make_point(
(AXIS_LEN, 0, 0), abgr_to_color(self.style["Scale.Axis::x"]["color"])
)
(scale_point_tr_y, self._scale_point_y) = make_point(
(0, AXIS_LEN, 0), abgr_to_color(self.style["Scale.Axis::y"]["color"])
)
(scale_point_tr_z, self._scale_point_z) = make_point(
(0, 0, AXIS_LEN), abgr_to_color(self.style["Scale.Axis::z"]["color"])
)
# Line
def make_line(axis_vec, color, point_tr, point):
line_begin = [v * POINT_RADIUS for v in axis_vec]
line_end = [v * AXIS_LEN for v in axis_vec]
viz_line = sc.Line(line_begin, line_end, color=[0.5, 0.5, 0.5, 0.5], thickness=2)
scale_line = sc.Line(
line_begin,
line_end,
color=color,
thickness=LINE_THICKNESS,
intersection_thickness=intersection_thickness,
)
highlight_ctrl = HighlightControl([scale_line, point])
scale_line.gestures = [
ScaleGesture(self, axis_vec, highlight_ctrl, [viz_line], [point_tr]),
HighlightGesture(highlight_ctrl),
]
return (viz_line, scale_line)
(line_x, self._scale_line_x) = make_line(
[1, 0, 0], abgr_to_color(self.style["Scale.Axis::x"]["color"]), scale_point_tr_x, self._scale_point_x
)
(line_y, self._scale_line_y) = make_line(
[0, 1, 0], abgr_to_color(self.style["Scale.Axis::y"]["color"]), scale_point_tr_y, self._scale_point_y
)
(line_z, self._scale_line_z) = make_line(
[0, 0, 1], abgr_to_color(self.style["Scale.Axis::z"]["color"]), scale_point_tr_z, self._scale_point_z
)
# Rectangles
def make_plane(axis, axis_vec, color, lines, points):
axis_vec_t = [v * (AXIS_LEN - RECT_SIZE * 0.5) for v in axis_vec]
with sc.Transform(
transform=sc.Matrix44.get_translation_matrix(axis_vec_t[0], axis_vec_t[1], axis_vec_t[2])
):
highlight_ctrl = HighlightControl()
return sc.Rectangle(
axis=axis,
width=RECT_SIZE * 0.5,
height=RECT_SIZE * 0.5,
color=color,
gestures=[
ScaleGesture(self, axis_vec, highlight_ctrl, lines, points),
HighlightGesture(highlight_ctrl),
],
)
self._scale_plane_yz = make_plane(
0,
(0, 1, 1),
abgr_to_color(self.style["Scale.Axis::x"]["color"]),
[line_y, line_z],
[scale_point_tr_y, scale_point_tr_z],
)
self._scale_plane_zx = make_plane(
1,
(1, 0, 1),
abgr_to_color(self.style["Scale.Axis::y"]["color"]),
[line_x, line_z],
[scale_point_tr_x, scale_point_tr_z],
)
self._scale_plane_xy = make_plane(
2,
(1, 1, 0),
abgr_to_color(self.style["Scale.Axis::z"]["color"]),
[line_x, line_y],
[scale_point_tr_x, scale_point_tr_y],
)
# Points
with sc.Transform(look_at=sc.Transform.LookAt.CAMERA):
highlight_ctrl = HighlightControl()
self._scale_point = sc.Arc(
POINT_RADIUS, tesselation=16, color=abgr_to_color(self.style["Scale.Point"]["color"])
)
highlight_ctrl = HighlightControl(
[
self._scale_plane_yz,
self._scale_plane_zx,
self._scale_plane_xy,
self._scale_point,
]
)
self._scale_point.gestures = [
ScaleGesture(
self,
[1, 1, 1],
highlight_ctrl,
[line_x, line_y, line_z],
[scale_point_tr_x, scale_point_tr_y, scale_point_tr_z],
order=-1,
),
HighlightGesture(highlight_ctrl, order=-1),
]
def _create_toolbar(self):
self._toolbar_root = sc.Transform(look_at=sc.Transform.LookAt.CAMERA)
self._build_tools_widgets(self._toolbar_root)
def _get_toolbar_height_offset(self):
final_size = self._get_final_size()
return -120 * final_size - TOOLBAR_WIDGET_HEIGHT
def _build_tools_widgets(self, root: sc.Transform):
# clear existing widgets under root
root.clear()
self._toolbar_widget = None
if self._tool_registry is None:
return None
OPERATIONS = [Operation.TRANSLATE, Operation.ROTATE, Operation.SCALE]
tools = self._tool_registry.tools
tools_can_build: DefaultDict[Operation, List[Type[ToolbarTool]]] = defaultdict(list)
for op in OPERATIONS:
for tool in tools:
if tool.can_build(self, op):
tools_can_build[op].append(tool)
# Don't build toolbar if there's no tool
if len(tools_can_build) == 0:
return
with root:
# values for omni.ui inside of sc.Widget
CARAT_SIZE = 44
CRATE_SPACING = 10
TOOLBAR_HEIGHT = 44
TOOLBAR_SPACING = 5
height_offset = self._get_toolbar_height_offset()
toolbar_to_content_scale = 1.4 # a guesstimate
toolbar_widget_width = (
max(max([len(t) for t in tools_can_build.values()]), 3)
* ((TOOLBAR_HEIGHT + TOOLBAR_SPACING) * toolbar_to_content_scale)
+ TOOLBAR_SPACING
)
self._toolbar_height_offset_transform = sc.Transform(
sc.Matrix44.get_translation_matrix(0, height_offset + TOOLBAR_WIDGET_HEIGHT / 2, 0)
)
with self._toolbar_height_offset_transform:
# create a sc.Rectangle behind the widget to stop the click into Viewport with DummyGesture
dummy_transform = sc.Transform(sc.Matrix44.get_translation_matrix(0, 0, -1))
with dummy_transform:
dummy_rect = sc.Rectangle(
width=toolbar_widget_width,
height=TOOLBAR_WIDGET_HEIGHT,
color=0x0,
gestures=[
DummyGesture(self), # needed for VP1 only
DummyClickGesture(mouse_button=0),
DummyClickGesture(mouse_button=1),
DummyClickGesture(mouse_button=2),
], # hijack left/right/middle mouse click on the toolbar so it doesn't click into viewport
)
# Build a set of scene items to emulate the tooltip for omni.ui item inside of sc.Widget.
# This is to workaround chopped off tooltip in sc.Widget OM-49626
tooltip_style = ui.style.default["Tooltip"]
tooltip_color = tooltip_style["color"]
tooltip_bg_color = tooltip_style["background_color"]
tooltip_border_width = tooltip_style["border_width"]
with sc.Transform(
transform=sc.Matrix44.get_translation_matrix(-toolbar_widget_width / 2, TOOLBAR_HEIGHT, 10)
):
tooltip_transform = sc.Transform(visible=False)
with tooltip_transform:
tooltip_label = sc.Label("Tooltip", alignment=ui.Alignment.CENTER, color=tooltip_color, size=16)
tooltip_bg_outline = sc.Rectangle(
color=tooltip_color,
height=0,
width=0,
wireframe=True,
)
tooltip_bg = sc.Rectangle(
color=tooltip_bg_color,
height=0,
width=0,
thickness=tooltip_border_width,
)
def update_tooltip(label: str, visible: bool, x_offset: float):
tooltip_transform.visible = visible
if visible:
tooltip_transform.transform = sc.Matrix44.get_translation_matrix(
x_offset * toolbar_to_content_scale, 0, 0
)
padding = 10
tooltip_label.text = label
# / 1.2 due to no way to get a calculated width of string and not each character has same width
tooltip_bg.width = (tooltip_label.size * len(tooltip_label.text) + padding * 2) / 1.2
tooltip_bg.height = tooltip_label.size + padding * 2
tooltip_bg_outline.width = tooltip_bg.width + tooltip_border_width * 2
tooltip_bg_outline.height = tooltip_bg.height + tooltip_border_width * 2
self._toolbar_widget = sc.Widget(
toolbar_widget_width, TOOLBAR_WIDGET_HEIGHT, update_policy=sc.Widget.UpdatePolicy.ON_MOUSE_HOVERED
)
with self._toolbar_widget.frame:
def build_frame_header(manip: ProxyType[TransformManipulator], collapsed: bool, text: str):
header_stack = ui.HStack(spacing=8)
with header_stack:
with ui.VStack(width=0):
ui.Spacer()
styles = [
{
"": {"image_url": f"{ICON_FOLDER_PATH}/carat_close.svg"},
":hovered": {"image_url": f"{ICON_FOLDER_PATH}/carat_close_hover.svg"},
":pressed": {"image_url": f"{ICON_FOLDER_PATH}/carat_close_hover.svg"},
},
{
"": {"image_url": f"{ICON_FOLDER_PATH}/carat_open.svg"},
":hovered": {"image_url": f"{ICON_FOLDER_PATH}/carat_open_hover.svg"},
":pressed": {"image_url": f"{ICON_FOLDER_PATH}/carat_open_hover.svg"},
},
]
ui.Image(
width=CARAT_SIZE, height=CARAT_SIZE, style=styles[0] if collapsed else styles[1]
)
ui.Spacer()
if not collapsed:
operation = self.model.get_operation()
tools_stack = self._tools_stacks.get(operation, None)
self._header_line = ui.Line(
width=ui.Pixel(tools_stack.computed_width - CARAT_SIZE - CRATE_SPACING)
if tools_stack
else ui.Percent(100)
)
dummy_rect.height = TOOLBAR_WIDGET_HEIGHT
dummy_transform.transform = sc.Matrix44.get_translation_matrix(0, 0, -1)
else:
self._header_line = None
ui.Spacer(width=ui.Percent(100))
dummy_rect.height = TOOLBAR_WIDGET_HEIGHT * 0.4
dummy_transform.transform = sc.Matrix44.get_translation_matrix(
0, TOOLBAR_WIDGET_HEIGHT / 4, -1
)
with ui.HStack(style=get_default_toolbar_style(), content_clipping=True):
self._toolbar_collapsable_frame = ui.CollapsableFrame(
build_header_fn=lambda collapsed, text, manip=weakref.proxy(self): build_frame_header(
manip, collapsed, text
),
collapsed=self._toolbar_collapsed,
)
with self._toolbar_collapsable_frame:
with ui.ZStack():
def build_toolbar_frame(operation: Operation, visible: bool) -> ui.Frame:
toolbar_frame = ui.Frame(visible=visible)
with toolbar_frame:
with ui.ZStack():
with ui.VStack():
ui.Spacer(height=2)
with ui.ZStack():
bg = ui.Rectangle()
self._tools_stacks[operation] = ui.HStack(
width=0, spacing=TOOLBAR_SPACING
)
with self._tools_stacks[operation]:
ui.Spacer()
for tool in tools_can_build[operation]:
t = tool(
manipulator=weakref.proxy(self),
operation=operation,
toolbar_height=TOOLBAR_HEIGHT,
toolbar_payload=self._tool_button_additional_payload,
tooltip_update_fn=update_tooltip,
)
self._tools.append(t)
ui.Spacer()
def update_bg_width():
bg.width = ui.Pixel(
self._tools_stacks[operation].computed_width
)
if self._header_line:
self._header_line.width = ui.Pixel(
self._tools_stacks[operation].computed_width
- CARAT_SIZE
- CRATE_SPACING
)
self._tools_stacks[operation].set_computed_content_size_changed_fn(
update_bg_width
)
ui.Spacer(height=2)
return toolbar_frame
for op in OPERATIONS:
self._toolbars[op] = build_toolbar_frame(op, self._operation == op)
def _on_toolbar_changed(self):
if not self._toolbar_root:
return
for tool in self._tools:
tool.destroy()
self._tools.clear()
self._toolbars.clear()
self._build_tools_widgets(self._toolbar_root)
def on_model_updated(self, item):
self._update_from_model()
def _update_from_model(self):
if not self._transform:
return
if not self.model:
return
self._transform.transform = self.model.get_as_floats(self.model.get_item("transform"))
operation = self.model.get_operation()
if operation != self._operation:
self._operation = operation
self._translate_gizmo.visible = False
self._rotation_gizmo.visible = False
self._scale_gizmo.visible = False
for op, toolbar_frame in self._toolbars.items():
toolbar_frame.visible = False
if operation == Operation.TRANSLATE:
self._translate_gizmo.visible = True
elif operation == Operation.ROTATE:
self._rotation_gizmo.visible = True
elif operation == Operation.SCALE:
self._scale_gizmo.visible = True
if operation == Operation.NONE:
self._toolbar_root.visible = False
else:
self._toolbar_root.visible = True
if operation in self._toolbars:
self._toolbars[operation].visible = True
# update line width
if self._toolbar_collapsable_frame:
self._toolbar_collapsable_frame.rebuild()
# toolbar render must be manually updated
self.refresh_toolbar()
def _on_scale_changed(self, item, event_type):
scale = self._dict.get(item)
if scale != self._scale:
self._scale = scale
self._update_manipulator_final_size()
def _get_final_size(self):
return self._size * self._scale
def _update_manipulator_final_size(self):
final_size = self._get_final_size()
if self._transform_screen:
self._transform_screen.transform = sc.Matrix44.get_scale_matrix(final_size, final_size, final_size)
if self._toolbar_height_offset_transform:
self._toolbar_height_offset_transform.transform = sc.Matrix44.get_translation_matrix(
0, self._get_toolbar_height_offset(), 0
)
def _on_intersection_thickness_changed(self, item, event_type):
if self._transform:
thickness = self._dict.get(item)
self._translate_line_x.intersection_thickness = thickness
self._translate_line_y.intersection_thickness = thickness
self._translate_line_z.intersection_thickness = thickness
self._rotation_arc_x.intersection_thickness = thickness
self._rotation_arc_y.intersection_thickness = thickness
self._rotation_arc_z.intersection_thickness = thickness
self._rotation_arc_screen.intersection_thickness = thickness
self._scale_line_x.intersection_thickness = thickness
self._scale_line_y.intersection_thickness = thickness
self._scale_line_z.intersection_thickness = thickness
def _on_free_rotation_enabled_changed(self, item, event_type):
if self._transform:
enabled = self._dict.get(item)
self._rotation_arc_free.visible = enabled
def _update_axes_visibility(self):
if self._transform:
enable_x = bool(self.axes & Axis.X)
enable_y = bool(self.axes & Axis.Y)
enable_z = bool(self.axes & Axis.Z)
enable_screen = bool(self.axes & Axis.SCREEN)
self._translate_line_x.visible = (
self._translate_arrow_x.visible
) = self._rotation_arc_x.visible = self._scale_point_x.visible = self._scale_line_x.visible = enable_x
self._translate_line_y.visible = (
self._translate_arrow_y.visible
) = self._rotation_arc_y.visible = self._scale_point_y.visible = self._scale_line_y.visible = enable_y
self._translate_line_z.visible = (
self._translate_arrow_z.visible
) = self._rotation_arc_z.visible = self._scale_point_z.visible = self._scale_line_z.visible = enable_z
self._translate_plane_yz.visible = self._scale_plane_yz.visible = enable_y & enable_z
self._translate_plane_zx.visible = self._scale_plane_zx.visible = enable_z & enable_x
self._translate_plane_xy.visible = self._scale_plane_xy.visible = enable_x & enable_y
self._translate_point.visible = (
self._rotation_arc_screen.visible
) = self._scale_point.visible = enable_screen
self._rotation_arc_free.visible = self._settings.get(c.FREE_ROTATION_ENABLED_SETTING) and enable_screen
| 44,834 | Python | 42.826979 | 175 | 0.505286 |
omniverse-code/kit/exts/omni.kit.manipulator.transform/omni/kit/manipulator/transform/types.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from enum import Enum, Flag, auto
class Axis(Flag):
X = auto()
Y = auto()
Z = auto()
SCREEN = auto()
ALL = X | Y | Z | SCREEN
class Operation(Enum):
TRANSLATE = auto()
ROTATE = auto()
SCALE = auto()
NONE = auto()
TRANSLATE_DELTA = auto()
ROTATE_DELTA = auto()
SCALE_DELTA = auto()
| 762 | Python | 25.310344 | 76 | 0.691601 |
omniverse-code/kit/exts/omni.kit.manipulator.transform/omni/kit/manipulator/transform/gestures.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from __future__ import annotations
from collections import defaultdict
from dataclasses import dataclass
from typing import TYPE_CHECKING, List, Sequence, Tuple
if TYPE_CHECKING:
from .manipulator import TransformManipulator
import copy
import math
from functools import lru_cache
import carb.input
import carb.settings
import numpy as np
from omni.ui import color as cl
from omni.ui import scene as sc
from .settings_constants import c as CONSTANTS
@lru_cache()
def __get_input() -> carb.input.IInput:
return carb.input.acquire_input_interface()
def _is_alt_down() -> bool:
input = __get_input()
return (
input.get_keyboard_value(None, carb.input.KeyboardInput.LEFT_ALT)
+ input.get_keyboard_value(None, carb.input.KeyboardInput.RIGHT_ALT)
> 0
)
class PreventOthers(sc.GestureManager):
"""
Manager makes TransformGesture the priority gesture
"""
def can_be_prevented(self, gesture):
# Never prevent in the middle or at the end of drag
return (
gesture.state != sc.GestureState.CHANGED
and gesture.state != sc.GestureState.ENDED
and gesture.state != sc.GestureState.CANCELED
)
def should_prevent(self, gesture, preventer):
if (
(isinstance(preventer, TransformGesture) or isinstance(preventer, DummyClickGesture))
and (preventer.state == sc.GestureState.BEGAN or preventer.state == sc.GestureState.CHANGED)
and not _is_alt_down() # don't prevent other gesture if alt (camera manip) is down
):
if isinstance(gesture, TransformGesture):
if gesture.order > preventer.order:
return True
elif gesture.order == preventer.order:
# Transform vs Transform depth test
return gesture.gesture_payload.ray_distance > preventer.gesture_payload.ray_distance
else:
# Transform is the priority when it's against any other gesture
return True
return super().should_prevent(gesture, preventer)
class HoverDepthTest(sc.GestureManager):
"""
Manager that is doing depth test for hover gestures
"""
def can_be_prevented(self, gesture):
return isinstance(gesture, HighlightGesture)
def should_prevent(self, gesture, preventer):
if (
isinstance(gesture, HighlightGesture) and not _is_alt_down()
): # don't prevent other gesture if alt (camera manip) is down
if isinstance(preventer, HighlightGesture):
if preventer.state == sc.GestureState.BEGAN or preventer.state == sc.GestureState.CHANGED:
if gesture.order > preventer.order:
return True
elif gesture.order == preventer.order:
# Hover vs Hover depth test
return gesture.gesture_payload.ray_distance > preventer.gesture_payload.ray_distance
return False
class TransformDragGesturePayload(sc.AbstractGesture.GesturePayload):
def __init__(self, base: sc.AbstractGesture.GesturePayload, changing_item: sc.AbstractManipulatorItem):
super().__init__(base.item_closest_point, base.ray_closest_point, base.ray_distance)
self.changing_item = changing_item
@dataclass(init=False)
class TranslateDragGesturePayload(TransformDragGesturePayload):
"""
Payload this class has:
- axis (float3): The axis along which the translation happened.
- moved_delta (float3): Moved delta since last change for each axis.
- moved (float3): Total moved distance since beginning of the drag.
"""
axis: Sequence[float]
moved_delta: Sequence[float]
moved: Sequence[float]
@dataclass(init=False)
class RotateDragGesturePayload(TransformDragGesturePayload):
"""
Payload this class has:
- axis (float3): The axis around which the rotation happened.
- angle_delta (float): Rotated delta since last change.
- angle (float): Total angle rotated distance since beginning of the drag.
- screen_space (bool): If the rotation happened in screen space.
- free_rotation (bool): If the rotation is a free rotation (dragging on the center sphere).
"""
axis: Sequence[float]
angle_delta: float
angle: float
screen_space: bool
free_rotation: bool
@dataclass(init=False)
class ScaleDragGesturePayload(TransformDragGesturePayload):
"""
Payload this class has:
- axis (float3): The axis along which the scaling happened.
- scale (float3): Scaled value on each axis.
"""
axis: Sequence[float]
scale: Sequence[float]
class TransformChangedGesture(sc.ManipulatorGesture):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def process(self):
if not self.gesture_payload:
return
# Redirection to methods
if self.state == sc.GestureState.BEGAN:
self.on_began()
elif self.state == sc.GestureState.CHANGED:
self.on_changed()
elif self.state == sc.GestureState.ENDED:
self.on_ended()
elif self.state == sc.GestureState.CANCELED:
self.on_canceled()
# Public API:
def on_began(self):
...
def on_changed(self):
...
def on_ended(self):
...
def on_canceled(self):
...
class TranslateChangedGesture(TransformChangedGesture):
...
class RotateChangedGesture(TransformChangedGesture):
...
class ScaleChangedGesture(TransformChangedGesture):
...
####################
# Internal gestures
####################
class HighlightControl:
# Use static member to track across ALL HighlightControl instances,
# because more than one HighlightControl can highlight the same widget (e.g. translate center and plane quads)
__status_tracker = defaultdict(int)
__original_colors = {}
def __init__(self, items=None, color=None):
self._items = items
# From rendering\source\plugins\carb.imguizmo\ImGuizmo.cpp
# static const ImU32 selectionColor = 0x8A1CE6E6;
self._selection_color = color if color else cl("#e6e61c8a")
def highlight(self, sender):
if self._items is None:
HighlightControl.__status_tracker[sender] += 1
if HighlightControl.__status_tracker[sender] == 1:
self._highlight(sender)
else:
for item in self._items:
HighlightControl.__status_tracker[item] += 1
if HighlightControl.__status_tracker[item] == 1:
self._highlight(item)
def dehighlight(self, sender):
if self._items is None:
HighlightControl.__status_tracker[sender] -= 1
if HighlightControl.__status_tracker[sender] <= 0:
self._dehighlight(sender)
HighlightControl.__status_tracker.pop(sender)
else:
for item in self._items:
HighlightControl.__status_tracker[item] -= 1
if HighlightControl.__status_tracker[item] <= 0:
self._dehighlight(item)
HighlightControl.__status_tracker.pop(item)
def _highlight(self, item):
if hasattr(item, "color"):
HighlightControl.__original_colors[item] = item.color
item.color = self._selection_color
if hasattr(item, "colors"):
HighlightControl.__original_colors[item] = item.colors.copy()
colors = item.colors.copy()
for i in range(len(colors)):
colors[i] = self._selection_color
item.colors = colors
def _dehighlight(self, item):
if hasattr(item, "color"):
item.color = HighlightControl.__original_colors[item]
if hasattr(item, "colors"):
item.colors = HighlightControl.__original_colors[item]
class HighlightGesture(sc.HoverGesture):
def __init__(self, highlight_ctrl: HighlightControl, order: int = 0):
super().__init__(manager=HoverDepthTest())
self._highlight_ctrl = highlight_ctrl
self._begun = False
self._order = order
@property
def order(self) -> int:
return self._order
def process(self):
if isinstance(self.sender, sc.Arc) and self.sender.gesture_payload.culled:
# Turn down the gesture if it on the wrong side of the arc
if self.state == sc.GestureState.BEGAN:
self.state = sc.GestureState.POSSIBLE
if self.state == sc.GestureState.CHANGED:
self.state = sc.GestureState.ENDED
if _is_alt_down():
# Turn down the gesture if manipulating camera
# If gesture already began, don't turn it down as ALT won't trigger camera manipulation at this point
if self.state == sc.GestureState.BEGAN:
self.state = sc.GestureState.POSSIBLE
if self.state == sc.GestureState.BEGAN:
self._on_began()
elif self.state == sc.GestureState.ENDED:
self._on_ended()
elif self.state == sc.GestureState.CANCELED:
self._on_canceled()
elif self.state == sc.GestureState.CHANGED:
self._on_changed()
elif self.state == sc.GestureState.PREVENTED:
self._on_prevented()
super().process()
def _on_began(self):
self._begun = True
self._highlight_ctrl.highlight(self.sender)
def _on_ended(self):
if self._begun:
self._begun = False
self._highlight_ctrl.dehighlight(self.sender)
def _on_changed(self):
...
def _on_canceled(self):
self._on_ended()
def _on_prevented(self):
self._on_ended()
class TransformGesture(sc.DragGesture):
def __init__(
self, manipulator: TransformManipulator, highlight_ctrl: HighlightControl, order: int = 0, *args, **kwargs
):
super().__init__(*args, **kwargs)
self._manipulator = manipulator
self._highlight_ctrl = highlight_ctrl
self._order = order
self._toolbar_was_visible = None
self._began = False
@property
def order(self) -> int:
return self._order
def process(self):
if _is_alt_down():
# Turn down the gesture if manipulating camera
# If gesture already began, don't turn it down as ALT won't trigger camera manipulation at this point
if self.state == sc.GestureState.BEGAN:
self.state = sc.GestureState.POSSIBLE
if self.state == sc.GestureState.BEGAN:
self._on_began()
elif self.state == sc.GestureState.ENDED:
self._on_ended()
elif self.state == sc.GestureState.CANCELED:
self._on_canceled()
elif self.state == sc.GestureState.CHANGED:
self._on_changed()
elif self.state == sc.GestureState.PREVENTED:
self._on_prevented()
super().process()
def _on_began(self):
self._began = True
self._toolbar_was_visible = self._manipulator.toolbar_visible
self._manipulator.toolbar_visible = False
def _on_ended(self):
if self._began and self._toolbar_was_visible is not None:
self._manipulator.toolbar_visible = self._toolbar_was_visible
self._toolbar_was_visible = None
self._began = False
def _on_changed(self):
...
def _on_canceled(self):
if self._began:
self._on_ended()
if self._toolbar_was_visible is not None:
self._manipulator.toolbar_visible = self._toolbar_was_visible
self._toolbar_was_visible = None
def _on_prevented(self):
if self._began:
self._on_ended()
class TranslateGesture(TransformGesture):
def __init__(self, manipulator: TransformManipulator, axis, highlight_ctrl: HighlightControl, order: int = 0):
super().__init__(manipulator=manipulator, highlight_ctrl=highlight_ctrl, order=order, manager=PreventOthers())
self._axis = axis
def _on_began(self):
super()._on_began()
self._moved = [0, 0, 0] # moved since begin edit, non-snapped
self._moved_snap = [0, 0, 0]
self._changing_item = self._manipulator.model.get_item("translate")
payload = TranslateDragGesturePayload(self.gesture_payload, self._changing_item)
payload.axis = self._axis
self._manipulator._process_gesture(TranslateChangedGesture, self.state, payload)
if self._highlight_ctrl:
self._highlight_ctrl.highlight(self.sender)
def _on_changed(self):
super()._on_changed()
moved_delta = self.sender.gesture_payload.moved
moved_delta = self._manipulator._transform.transform_space(
sc.Space.WORLD, sc.Space.OBJECT, [moved_delta[0], moved_delta[1], moved_delta[2], 0]
)
moved_delta = moved_delta[0:3]
moved_prev = self._moved_snap[:]
self._moved = [sum(x) for x in zip(self._moved, moved_delta)]
self._moved_snap = self._moved.copy()
snap = self._manipulator.model.get_snap(self._changing_item)
if snap is not None:
for i, snap_axis in enumerate(snap):
if snap_axis:
self._moved_snap[i] = math.copysign(abs(self._moved[i]) // snap_axis * snap_axis, self._moved[i])
moved_delta = [m - mp for m, mp in zip(self._moved_snap, moved_prev)]
if moved_delta != [0, 0, 0]:
new_gesture_payload = TranslateDragGesturePayload(self.gesture_payload, self._changing_item)
new_gesture_payload.axis = self._axis
new_gesture_payload.moved_delta = moved_delta
new_gesture_payload.moved = self._moved_snap
self._manipulator._process_gesture(TranslateChangedGesture, self.state, new_gesture_payload)
def _on_ended(self):
super()._on_ended()
payload = TranslateDragGesturePayload(self.gesture_payload, self._changing_item)
payload.axis = self._axis
self._manipulator._process_gesture(TranslateChangedGesture, self.state, payload)
if self._highlight_ctrl:
self._highlight_ctrl.dehighlight(self.sender)
def _on_canceled(self):
self._on_ended()
class RotationGesture(TransformGesture):
def __init__(
self, manipulator: TransformManipulator, axis, viz_arc, highlight_ctrl: HighlightControl, order: int = 0
):
super().__init__(manipulator=manipulator, highlight_ctrl=highlight_ctrl, order=order, manager=PreventOthers())
self._viz_arc = viz_arc
self._axis = axis
self._angle = 0
self._last_free_intersect = None
self._settings = carb.settings.get_settings()
def process(self):
if isinstance(self.sender, sc.Arc) and self.sender.gesture_payload.culled:
# Turn down the gesture if it on the wrong side of the arc
if self.state == sc.GestureState.BEGAN:
self.state = sc.GestureState.POSSIBLE
super().process()
def _on_began(self):
super()._on_began()
self._original_tickness = self.sender.thickness
self.sender.thickness = self._original_tickness + 1
self._begin_angle = self.sender.gesture_payload.angle
if self._viz_arc:
self._viz_arc.begin = self._begin_angle
if self._axis is None:
self._last_free_intersect, _ = self._sphere_intersect()
self._changing_item = self._manipulator.model.get_item("rotate")
self._manipulator._process_gesture(
RotateChangedGesture, self.state, RotateDragGesturePayload(self.gesture_payload, self._changing_item)
)
if self._highlight_ctrl:
self._highlight_ctrl.highlight(self.sender)
def _on_changed(self):
super()._on_changed()
prev_angle = self._angle
self._angle = self.sender.gesture_payload.angle - self._begin_angle
self._angle = self._angle / math.pi * 180 # convert to degree
snap = self._manipulator.model.get_snap(self._changing_item)
if snap: # Not None and not zero
self._angle = self._angle // snap * snap
angle_delta = self._angle - prev_angle
else:
angle_delta = self.sender.gesture_payload.moved_angle
angle_delta = angle_delta / math.pi * 180 # convert to degree
screen_space = False
free_rotation = False
# handle the free rotation mode when grabbing the center of rotation manipulator
if self._axis is None:
axis, angle_delta = self._handle_free_rotation()
if axis is not None and angle_delta is not None:
free_rotation = True
else:
# early return, no gesture is generated for invalid free rotation
return
elif self._axis == [0, 0, 0]:
axis = self.sender.transform_space(sc.Space.OBJECT, sc.Space.WORLD, [0, 0, 1, 0])
screen_space = True
else:
axis = self._axis
# normalize
axis_len = math.sqrt(axis[0] ** 2 + axis[1] ** 2 + axis[2] ** 2)
axis = [v / axis_len for v in axis]
if not free_rotation:
# confine angle in [-180, 180) range so the overlay doesn't block manipulated object too much
angle = (self._angle - 360.0 * math.floor(self._angle / 360.0 + 0.5)) / 180.0 * math.pi
self._viz_arc.end = angle + self._viz_arc.begin
self._viz_arc.visible = True
if angle_delta:
new_gesture_payload = RotateDragGesturePayload(self.gesture_payload, self._changing_item)
new_gesture_payload.axis = axis
new_gesture_payload.angle = self._angle
new_gesture_payload.angle_delta = angle_delta
new_gesture_payload.screen_space = screen_space
new_gesture_payload.free_rotation = free_rotation
self._manipulator._process_gesture(RotateChangedGesture, self.state, new_gesture_payload)
def _on_ended(self):
super()._on_ended()
self.sender.thickness = self._original_tickness
if self._viz_arc:
self._viz_arc.visible = False
self._manipulator._process_gesture(
RotateChangedGesture, self.state, RotateDragGesturePayload(self.gesture_payload, self._changing_item)
)
if self._highlight_ctrl:
self._highlight_ctrl.dehighlight(self.sender)
if self._axis is None:
self._last_free_intersect = None
def _on_canceled(self):
self._on_ended()
def _sphere_intersect(self) -> Tuple[List[float], bool]:
intersect = self.sender.gesture_payload.ray_closest_point
ndc_location = self.sender.transform_space(sc.Space.WORLD, sc.Space.NDC, intersect)
ndc_ray_origin = copy.copy(ndc_location)
ndc_ray_origin[2] = 0.0
object_ray_origin = np.array(self.sender.transform_space(sc.Space.NDC, sc.Space.OBJECT, ndc_ray_origin))
object_ray_end = np.array(self.sender.transform_space(sc.Space.NDC, sc.Space.OBJECT, ndc_location))
dir = object_ray_end - object_ray_origin
dir = dir / np.linalg.norm(dir)
a = np.dot(dir, dir)
b = 2.0 * np.dot(object_ray_origin, dir)
c = np.dot(object_ray_origin, object_ray_origin) - self.sender.radius * self.sender.radius
discriminant = b * b - 4 * a * c
if discriminant >= 0.0:
sqrt_discriminant = math.sqrt(discriminant)
t = (-b - sqrt_discriminant) / (2.0 * a)
if t >= 0:
point = object_ray_origin + t * dir
world = self.sender.transform_space(sc.Space.OBJECT, sc.Space.WORLD, point.tolist())
return world, True
# If no intersection, get the neareast point on the ray to the sphere
nearest_dist = np.dot(-object_ray_origin, dir)
point = object_ray_origin + dir * nearest_dist
world = self.sender.transform_space(sc.Space.OBJECT, sc.Space.WORLD, point.tolist())
return world, False
def _handle_free_rotation(self) -> Tuple[List[float], float]:
with np.errstate(all="raise"):
try:
axis = None
angle_delta = None
nearest_ray_point, intersected = self._sphere_intersect()
last_intersect = copy.copy(self._last_free_intersect)
self._last_free_intersect = nearest_ray_point
mode = self._settings.get(CONSTANTS.FREE_ROTATION_TYPE_SETTING)
if intersected or mode == CONSTANTS.FREE_ROTATION_TYPE_CLAMPED:
# Use clamped mode as long as ray intersects with sphere to maintain consistent behavior when dragging within the sphere
# clamped at the edge of free rotation sphere
if nearest_ray_point is not None and last_intersect is not None:
# calculate everything in world space
origin = np.array(self.sender.transform_space(sc.Space.OBJECT, sc.Space.WORLD, [0, 0, 0]))
vec1 = last_intersect - origin
vec1 = vec1 / np.linalg.norm(vec1)
vec2 = nearest_ray_point - origin
vec2 = vec2 / np.linalg.norm(vec2)
axis = np.cross(vec1, vec2)
mag = np.linalg.norm(axis)
if mag > 0:
axis = axis / mag
dot = np.dot(vec1, vec2)
if dot >= -1 and dot <= 1:
angle_delta = np.arccos(dot) / math.pi * 180
if not math.isfinite(angle_delta):
raise FloatingPointError()
elif mode == CONSTANTS.FREE_ROTATION_TYPE_CONTINUOUS:
# Continuous mode. Only fallback to it if not intersected and continuous_mode is on.
# extend beyond sphere edge for free rotation based on moved distance
moved = np.array(self.sender.gesture_payload.moved)
forward = self.sender.transform_space(sc.Space.OBJECT, sc.Space.WORLD, [0, 0, 1, 0])
forward = np.array([forward[0], forward[1], forward[2]])
moved_len = math.sqrt(np.dot(moved, moved))
if moved_len > 0:
moved_dir = moved / moved_len
forward = forward / np.linalg.norm(forward)
axis = np.cross(forward, moved_dir).tolist()
angle_delta = moved_len
except Exception:
axis = None
angle_delta = None
finally:
return axis, angle_delta
class ScaleGesture(TransformGesture):
def __init__(
self,
manipulator: TransformManipulator,
axis,
highlight_ctrl: HighlightControl,
handle_lines=[],
handle_dots=[],
order: int = 0,
):
super().__init__(manipulator=manipulator, highlight_ctrl=highlight_ctrl, order=order, manager=PreventOthers())
self._axis = axis
self._handle_lines = handle_lines
self._handle_dots = handle_dots
def _get_dir_and_length_from_origin(self, point):
point = self.sender.transform_space(
sc.Space.WORLD, sc.Space.OBJECT, self.sender.gesture_payload.item_closest_point
)
transform = self._manipulator.model.get_as_floats(self._manipulator.model.get_item("transform"))
origin_in_local = self.sender.transform_space(
sc.Space.WORLD, sc.Space.OBJECT, [transform[12], transform[13], transform[14]]
)
diff = [s - o for s, o in zip(point, origin_in_local)]
length = math.sqrt(diff[0] ** 2 + diff[1] ** 2 + diff[2] ** 2)
dir = [x / length for x in diff]
return point, dir, length
def _on_began(self):
super()._on_began()
self._original_ends = [line.end.copy() for line in self._handle_lines]
self._original_dot_transform = [
[dot.transform[12], dot.transform[13], dot.transform[14]] for dot in self._handle_dots
]
self._start_point, self._direction, self._start_length = self._get_dir_and_length_from_origin(
self.sender.gesture_payload.item_closest_point
)
self._accumulated_distance = self._start_length
self._changing_item = self._manipulator.model.get_item("scale")
self._scale_prev = None
self._manipulator._process_gesture(
ScaleChangedGesture, self.state, ScaleDragGesturePayload(self.gesture_payload, self._changing_item)
)
if self._highlight_ctrl:
self._highlight_ctrl.highlight(self.sender)
self._first_change = False
def _on_changed(self):
super()._on_changed()
if isinstance(self.sender, sc.Arc) and self._first_change is False:
# handle the omni scale (center point) differently as the start point is very close to origin
# so we do it when it has enough distance
if self.sender.gesture_payload.distance_to_center > 10:
start_point, direction, length = self._get_dir_and_length_from_origin(
self.sender.gesture_payload.ray_closest_point
)
self._start_point = start_point
self._direction = direction
self._accumulated_distance = self._start_length = length * 5 # make the step smaller
self._first_change = True
else:
# early return. don't start change before we can determine those values
return
# use object space delta
moved = self.sender.gesture_payload.moved
moved = self.sender.transform_space(sc.Space.WORLD, sc.Space.OBJECT, [moved[0], moved[1], moved[2], 0])
distance = moved[0] * self._direction[0] + moved[1] * self._direction[1] + moved[2] * self._direction[2]
self._accumulated_distance += distance
if self._accumulated_distance == 0:
self._accumulated_distance = 0.001
scale = self._accumulated_distance / self._start_length
snap = self._manipulator.model.get_snap(self._changing_item)
if snap: # Not None and not zero
if abs(scale) < snap:
scale = math.copysign(1, scale)
else:
snap_scale = (abs(scale) - 1) // snap * snap + 1
scale = math.copysign(snap_scale, scale)
for i, line in enumerate(self._handle_lines):
line.end = [
self._original_ends[i][0] * scale,
self._original_ends[i][1] * scale,
self._original_ends[i][2] * scale,
]
for i, dot in enumerate(self._handle_dots):
t = dot.transform
t[12] = self._original_dot_transform[i][0] * scale
t[13] = self._original_dot_transform[i][1] * scale
t[14] = self._original_dot_transform[i][2] * scale
dot.transform = t
if scale != self._scale_prev:
self._scale_prev = scale
new_gesture_payload = ScaleDragGesturePayload(self.gesture_payload, self._changing_item)
new_gesture_payload.axis = self._axis
new_gesture_payload.scale = scale
self._manipulator._process_gesture(ScaleChangedGesture, self.state, new_gesture_payload)
def _on_ended(self):
super()._on_ended()
for i, line in enumerate(self._handle_lines):
line.end = self._original_ends[i]
for i, dot in enumerate(self._handle_dots):
dot.transform[12] = self._original_dot_transform[i][0]
dot.transform[13] = self._original_dot_transform[i][1]
dot.transform[14] = self._original_dot_transform[i][2]
self._manipulator._process_gesture(
ScaleChangedGesture, self.state, ScaleDragGesturePayload(self.gesture_payload, self._changing_item)
)
if self._highlight_ctrl:
self._highlight_ctrl.dehighlight(self.sender)
def _on_canceled(self):
self._on_ended()
# The sole purpose of this dummy gesture is to prevent further viewport drag interaction by emitting a TransformChangedGesture,
# which:
# Turns of selection rect in VP1
# Prevents other gestures in VP2
class DummyGesture(TransformGesture):
def __init__(self, manipulator: TransformManipulator):
# toolbar always has smaller order value to take precedence over manipulator handles.
super().__init__(manipulator, None, order=-999, manager=PreventOthers())
def _on_began(self):
self._manipulator._process_gesture(
TransformChangedGesture, self.state, TransformDragGesturePayload(self.gesture_payload, None)
)
def _on_changed(self):
...
def _on_ended(self):
self._manipulator._process_gesture(
TransformChangedGesture, self.state, TransformDragGesturePayload(self.gesture_payload, None)
)
def _on_canceled(self):
self._on_ended()
# The sole purpose of this dummy gesture is to prevent further viewport click interaction which:
# Prevents other gestures in VP2 (e.g. right click on manipulator toolbar won't further trigger viewport context menu)
# (no VP1 support for this!)
class DummyClickGesture(sc.ClickGesture):
def __init__(self, mouse_button: int):
super().__init__(mouse_button=mouse_button, manager=PreventOthers())
@property
def order(self):
return -1000
| 30,371 | Python | 37.348485 | 140 | 0.60594 |
omniverse-code/kit/exts/omni.kit.manipulator.transform/omni/kit/manipulator/transform/tests/test_manipulator_transform.py | ## Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
##
## NVIDIA CORPORATION and its licensors retain all intellectual property
## and proprietary rights in and to this software, related documentation
## and any modifications thereto. Any use, reproduction, disclosure or
## distribution of this software and related documentation without an express
## license agreement from NVIDIA CORPORATION is strictly prohibited.
##
from functools import lru_cache
from carb.input import MouseEventType
from omni.ui.tests.test_base import OmniUiTest
from pathlib import Path
from omni.ui import scene as sc
from omni.ui import color as cl
import carb
import omni.kit
import omni.kit.app
import omni.ui as ui
import omni.appwindow
import carb.windowing
from ..manipulator import TransformManipulator
from ..manipulator import Axis
from ..simple_transform_model import SimpleRotateChangedGesture
from ..simple_transform_model import SimpleScaleChangedGesture
from ..simple_transform_model import SimpleTranslateChangedGesture
from ..types import Operation
CURRENT_PATH = Path(carb.tokens.get_tokens_interface().resolve("${omni.kit.manipulator.transform}/data"))
# TODO: use ui_test when it's moved to kit-sdk
@lru_cache()
def _get_windowing() -> carb.windowing.IWindowing:
return carb.windowing.acquire_windowing_interface()
# TODO: use ui_test when it's moved to kit-sdk
@lru_cache()
def _get_input_provider() -> carb.input.InputProvider:
return carb.input.acquire_input_provider()
# TODO: use ui_test when it's moved to kit-sdk
def emulate_mouse(event_type: MouseEventType, pos):
app_window = omni.appwindow.get_default_app_window()
mouse = app_window.get_mouse()
window_width = ui.Workspace.get_main_window_width()
window_height = ui.Workspace.get_main_window_height()
_get_input_provider().buffer_mouse_event(
mouse, event_type, (pos[0] / window_width, pos[1] / window_height), 0, pos
)
if event_type == MouseEventType.MOVE:
_get_windowing().set_cursor_position(app_window.get_window(), (int(pos[0]), int(pos[1])))
class TestTransform(OmniUiTest):
# Before running each test
async def setUp(self):
await super().setUp()
self._golden_img_dir = CURRENT_PATH.absolute().resolve().joinpath("tests")
# After running each test
async def tearDown(self):
self._golden_img_dir = None
await super().tearDown()
async def test_transform(self):
window = await self.create_test_window(width=512, height=256)
with window.frame:
# Camera matrices
projection = [1e-2, 0, 0, 0]
projection += [0, 1e-2, 0, 0]
projection += [0, 0, -2e-7, 0]
projection += [0, 0, 1, 1]
view = sc.Matrix44.get_translation_matrix(0, 0, 5)
# Selected point
self._selected_item = None
def _on_point_clicked(shape):
"""Called when the user clicks the point"""
self._selected_item = shape
pos = self._selected_item.positions[0]
model = self._manipulator.model
model.set_floats(model.get_item("translate"), [pos[0], pos[1], pos[2]])
def _on_item_changed(model, item):
"""
Called when the user moves the manipulator. We need to move
the point here.
"""
if self._selected_item is not None:
if item.operation == Operation.TRANSLATE:
self._selected_item.positions = model.get_as_floats(item)
scene_view = sc.SceneView(sc.CameraModel(projection, view))
with scene_view.scene:
# The manipulator
self._manipulator = TransformManipulator(
size=1,
axes=Axis.ALL & ~Axis.Z & ~Axis.SCREEN,
gestures=[
SimpleTranslateChangedGesture(),
SimpleRotateChangedGesture(),
SimpleScaleChangedGesture(),
],
)
self._sub = \
self._manipulator.model.subscribe_item_changed_fn(_on_item_changed)
# 5 points
select = sc.ClickGesture(_on_point_clicked)
sc.Points([[-5, 5, 0]], colors=[ui.color.white], sizes=[10], gesture=select)
sc.Points([[5, 5, 0]], colors=[ui.color.white], sizes=[10], gesture=select)
sc.Points([[5, -5, 0]], colors=[ui.color.white], sizes=[10], gesture=select)
sc.Points([[-5, -5, 0]], colors=[ui.color.white], sizes=[10], gesture=select)
self._selected_item = sc.Points(
[[0, 0, 0]], colors=[ui.color.white], sizes=[10], gesture=select
)
for _ in range(30):
await omni.kit.app.get_app().next_update_async()
await self.finalize_test(golden_img_dir=self._golden_img_dir)
async def test_hovering(self):
app_window = omni.appwindow.get_default_app_window()
dpi_scale = ui.Workspace.get_dpi_scale()
window = await self.create_test_window()
with window.frame:
# Camera matrices
projection = [1e-2, 0, 0, 0]
projection += [0, 1e-2, 0, 0]
projection += [0, 0, 2e-7, 0]
projection += [0, 0, 1, 1]
view = sc.Matrix44.get_translation_matrix(0, 0, -5)
# Selected point
self._selected_item = None
scene_view = sc.SceneView(sc.CameraModel(projection, view))
with scene_view.scene:
# The manipulator
self._manipulator = TransformManipulator(
size=1,
axes=Axis.ALL & ~Axis.Z & ~Axis.SCREEN,
)
await omni.kit.app.get_app().next_update_async()
# Get NDC position of World-space point
transformed = scene_view.scene.transform_space(sc.Space.WORLD, sc.Space.NDC, (30, 0, 0))
# Convert it to the Screen space
transformed = [transformed[0] * 0.5 + 0.5, transformed[1] * 0.5 + 0.5]
# Unblock mouse
app_window.set_input_blocking_state(carb.input.DeviceType.MOUSE, False)
# Go to the computed position
x = dpi_scale * (window.frame.computed_width * transformed[0] + window.frame.screen_position_x)
y = dpi_scale * (window.frame.computed_height * transformed[1] + window.frame.screen_position_y)
emulate_mouse(MouseEventType.MOVE, (x, y))
for i in range(5):
await omni.kit.app.get_app().next_update_async()
await self.finalize_test(golden_img_dir=self._golden_img_dir)
| 6,769 | Python | 38.132948 | 105 | 0.603486 |
omniverse-code/kit/exts/omni.timeline.live_session/omni/timeline/live_session/sync_strategy.py | import enum
import omni.timeline
from carb import log_error
from typing import List
from .global_time import get_global_time_s
from .timeline_event import TimelineEvent
class SyncStrategyType(enum.Enum):
STRICT = 0
"""
Moves the time only when a new time update is received.
No difference is allowed from the last known current time.
"""
DIFFERENCE_LIMITED = 1
"""
When the timeline is not playing, no difference is allowed from the last
known current time.
When the timeline is playing, the listener's current time may differ from
that of the presenter. The maximum difference in seconds can be set in the
"max_time_diff_sec" parameter of the SyncStrategyDescriptor.
If the allowed difference is zero, this strategy falls back to "STRICT" mode.
Otherwise,
- the listener may run freely even if no new time update is received
until it reaches the maximum allowed difference, then it pauses to wait
for the presenter,
- when the listener is behind the presenter more than the maximum allowed
difference, it will speed up its playback speed for a short time to catch up
with the presenter instead of jumping directly to the last known current time.
"""
LOOSE = 2
"""
Synchronizes current time only when the timeline is not playing.
Useful when all we care about is whether the presented started playing and what
the current was before that.
Time difference can be unlimited if played for long.
"""
class SyncStrategyDescriptor:
def __init__(
self,
strategy_type: SyncStrategyType,
max_time_diff_sec: float = 1
) -> None:
self.strategy_type = strategy_type
self.max_time_diff_sec = max_time_diff_sec
class TimeInterpolator:
# wc_: wall clock time, sim_: simulation time
def __init__(self, sim_start = 0.0, sim_target = 0.0, wc_duration = 1.0):
self.start = sim_start
self.target = sim_target
self.wc_duration = min((sim_target - sim_start) / 4, wc_duration)
self.wc_start = get_global_time_s()
def get_interpolated(self, wc_time):
t = (wc_time - self.wc_start) / self.wc_duration # to [0,1]
t = min(max(0, t), 1)
t_sq = t * t
t = 3 * t_sq - 2 * t_sq * t # smoothstep
return (1 - t) * self.start + t * self.target # [0,1] to sim time
# TODO: find a better name...
class TimelineSyncStrategyExecutor:
def __init__(self, desc: SyncStrategyDescriptor, timeline):
self._strategy_desc = desc
self._timeline = timeline
self._is_presenter_playing = False
self.reset()
@property
def strategy_desc(self) -> SyncStrategyDescriptor:
return self._strategy_desc
@strategy_desc.setter
def strategy_desc(self, desc: SyncStrategyDescriptor):
# one of them is zero, reset status of diff limited
# (otherwise we do not need to)
if desc.max_time_diff_sec * self._strategy_desc.max_time_diff_sec < 0.00001:
self.reset()
self._strategy_desc = desc
def process_events(self, events: List[TimelineEvent]) -> List[TimelineEvent]:
"""
Processes the incoming timeline update events and decides what to do.
Returns a new list of timeline events that may not contain any of the input events.
"""
if self._strategy_desc.strategy_type == SyncStrategyType.STRICT:
return self._process_strict(events)
elif self._strategy_desc.strategy_type == SyncStrategyType.LOOSE:
return self._process_loose(events)
elif self.strategy_desc.strategy_type == SyncStrategyType.DIFFERENCE_LIMITED:
if self.strategy_desc.max_time_diff_sec < 0.0001:
return self._process_strict(events)
else:
return self._process_diff_limited(events)
else:
log_error(f'Sync strategy not implemented: {self._strategy_desc.strategy_type}')
def reset(self):
self._is_suspended = False # True: extrapolated too much, waiting
self._interpolating = False
self._time_interpolator = TimeInterpolator()
self._last_known_sim_time = None
# event processing shared between all strategy types
def _process_event_common(self, event: TimelineEvent, out_events: List[TimelineEvent]):
if event.type == int(omni.timeline.TimelineEventType.PLAY):
self._is_presenter_playing = True
elif event.type == int(omni.timeline.TimelineEventType.PAUSE):
self._is_presenter_playing = False
self.reset()
elif event.type == int(omni.timeline.TimelineEventType.STOP):
self._is_presenter_playing = False
self.reset()
elif event.type == int(omni.timeline.TimelineEventType.LOOP_MODE_CHANGED):
out_events.append(event)
elif event.type == int(omni.timeline.TimelineEventType.ZOOM_CHANGED):
out_events.append(event)
def _process_strict(self, events: List[TimelineEvent]) -> List[TimelineEvent]:
out_events = []
for event in events:
self._process_event_common(event, out_events)
if event.type == int(omni.timeline.TimelineEventType.PLAY):
# Play but then pause immediately
out_events.append(TimelineEvent(omni.timeline.TimelineEventType.PLAY, {}))
out_events.append(TimelineEvent(omni.timeline.TimelineEventType.PAUSE, {}))
elif event.type == int(omni.timeline.TimelineEventType.PAUSE):
out_events.append(event)
elif event.type == int(omni.timeline.TimelineEventType.STOP):
out_events.append(event)
elif event.type == int(omni.timeline.TimelineEventType.CURRENT_TIME_TICKED):
self._last_known_sim_time = event.payload['currentTime']
# Play if needed, Sync the time then pause
if not self._timeline.is_playing():
out_events.append(TimelineEvent(omni.timeline.TimelineEventType.PLAY, {}))
out_events.append(event)
out_events.append(TimelineEvent(omni.timeline.TimelineEventType.PAUSE, {}))
return out_events
def _process_loose(self, events: List[TimelineEvent]) -> List[TimelineEvent]:
out_events = []
for event in events:
self._process_event_common(event, out_events)
if event.type == int(omni.timeline.TimelineEventType.PLAY):
out_events.append(event)
elif event.type == int(omni.timeline.TimelineEventType.PAUSE):
out_events.append(event)
elif event.type == int(omni.timeline.TimelineEventType.STOP):
out_events.append(event)
elif event.type == int(omni.timeline.TimelineEventType.CURRENT_TIME_TICKED):
self._last_known_sim_time = event.payload['currentTime']
# Care about the time only when the not playing
if not self._timeline.is_playing():
out_events.append(event)
return out_events
def _start_interpolation(self, t0: float, t: float):
dt_max = self.strategy_desc.max_time_diff_sec
if not self._interpolating and t0 + dt_max < t:
self._interpolating = True
self._time_interpolator = TimeInterpolator(t0, t)
def _handle_time_difference(self, current_time: float, out_events: List[TimelineEvent]):
end_wait_scale_factor = 0.2
end_interpolation_scale_factor = 0.2
# Rules. Note that their order of application differs from this enumeration.
# Notation:
# current_time (new update from the presenter) -> t
# end_wait_scale_factor -> s
# end_interpolation_scale_factor -> si
# self._timeline.get_current_time() -> t0
# self.strategy_desc.max_time_diff_sec -> dt_max
# self._timeline.get_end_time() -> end
#
# 0. Looping is on and close to the end. end - K < t0 => set time and pause
# See below for K.
#
# 1. Suspend. t < t0 - dt_max => ahead of the presenter, pause and wait
#
# 2. Restart after suspend. is_suspended and t0 - s*dt_max < t => play
# The role of scale factor s is to avoid alternating pause/play when the presenter
# is slow/lagging. Instead, we let the listener run for a bit, then pause for longer.
# Thus, "s" basically controls the length of wait and extrapolation periods when
# the presenter is lagging behind.
#
# 3.1. Slightly behind, do nothing. Not interpolating and t0 < t < t0 + dt_max => ok
# 3.2. Got close enough, stop interpolation. t < t0 + si
#
# 4. Start interpolation (faster playback). t0 + dt_max < t => increase playback speed.
t = current_time
dt_max = self.strategy_desc.max_time_diff_sec
s = end_wait_scale_factor
si = end_interpolation_scale_factor
t0 = self._timeline.get_current_time()
K = max(0.5, dt_max)
end = self._timeline.get_end_time()
# 0. Close to the end: set and pause
if self._timeline.is_looping() and end - K < t0:
self._is_suspended = True
if t0 < t:
out_events.append(TimelineEvent(
omni.timeline.TimelineEventType.CURRENT_TIME_TICKED,
{'currentTime': t},
))
out_events.append(TimelineEvent(omni.timeline.TimelineEventType.PAUSE, {}))
self._interpolating = False
self._time_interpolator = TimeInterpolator()
return
# 1. time difference is too high, pause
if not self._is_suspended and t < t0 - dt_max:
out_events.append(TimelineEvent(omni.timeline.TimelineEventType.PAUSE, {}))
self._is_suspended = True
self._interpolating = False
return
# 2. end waiting
elif self._is_suspended and t0 - s * dt_max < t:
out_events.append(TimelineEvent(omni.timeline.TimelineEventType.PLAY, {}))
self._is_suspended = False
# 3.2
if self._interpolating and t < t0 + si:
self._interpolating = False
return
if self._interpolating:
self._time_interpolator.target = t
# 4. start interpolation
self._start_interpolation(t0, t)
def _process_diff_limited(self, events: List[TimelineEvent]) -> List[TimelineEvent]:
out_events = []
time_received = False
for event in events:
self._process_event_common(event, out_events)
if event.type == int(omni.timeline.TimelineEventType.PLAY):
out_events.append(TimelineEvent(omni.timeline.TimelineEventType.PLAY, {}))
elif event.type == int(omni.timeline.TimelineEventType.PAUSE):
out_events.append(event)
if 'currentTime' in event.payload.keys():
out_events.append(TimelineEvent(
omni.timeline.TimelineEventType.CURRENT_TIME_TICKED,
{'currentTime' : event.payload['currentTime']}
))
elif event.type == int(omni.timeline.TimelineEventType.STOP):
out_events.append(event)
elif event.type == int(omni.timeline.TimelineEventType.CURRENT_TIME_TICKED):
time_received = True
presenter_rewind = False
# presenter's time restart because of looping
if self._last_known_sim_time is not None and \
event.payload['currentTime'] < self._last_known_sim_time:
presenter_rewind = True
is_previous_time_known = self._last_known_sim_time is not None
self._last_known_sim_time = event.payload['currentTime']
# not is_previous_time_known: avoid rule 1. of _handle_time_difference when
# the listener re-enabled sync.
# in this case we should always jump to the new time
if is_previous_time_known and not presenter_rewind and self._is_presenter_playing:
self._handle_time_difference(event.payload['currentTime'], out_events)
else:
# just set the new time, restart playback if necessary
out_events.append(event)
self._interpolating = False
if self._is_presenter_playing:
out_events.append(TimelineEvent(omni.timeline.TimelineEventType.PLAY, {}))
self._is_suspended = False
# still apply rule 4. (interpolation).
# this case happens when sync was re-enabled. this can be removed if we
# don't want interpolation when sync was re-enabled,
# jumping instantly to the new time might be better.
self._start_interpolation(
self._timeline.get_current_time(),
self._last_known_sim_time
)
# no new information, use the last known time
if self._is_presenter_playing and not time_received and \
self._last_known_sim_time is not None:
self._handle_time_difference(self._last_known_sim_time, out_events)
# interpolation
if self._interpolating and self._time_interpolator is not None:
# avoid being stuck when a different strategy was set during interpolation
# and it pauses the timeline
if not self._timeline.is_playing():
out_events.append(TimelineEvent(omni.timeline.TimelineEventType.PLAY, {}))
t_interpolated = self._time_interpolator.get_interpolated(get_global_time_s())
out_events.append(TimelineEvent(
omni.timeline.TimelineEventType.CURRENT_TIME_TICKED,
{'currentTime' : t_interpolated}
))
return out_events | 14,270 | Python | 45.485342 | 98 | 0.605676 |
omniverse-code/kit/exts/omni.timeline.live_session/omni/timeline/live_session/global_time.py | import time
# For now we use system time, later we may use something from Nucleus
def get_global_time_s() -> float:
return time.time() | 140 | Python | 22.499996 | 69 | 0.714286 |
omniverse-code/kit/exts/omni.timeline.live_session/omni/timeline/live_session/timeline_serializer.py | import carb
import omni.kit.usd.layers as layers
import omni.timeline
from pxr import Sdf, Usd
from typing import List, Tuple
from .global_time import get_global_time_s
from .timeline_event import TimelineEvent
from .timeline_state import (
get_timeline_state,
get_timeline_next_events,
get_timeline_next_state,
PlayState,
TimelineState
)
TIMELINE_PRIM_PATH = '/__session_shared_data__/Omni_Timeline_Live_Sync'
TIME_ATTR_NAME = 'timeline:time'
CONTROL_ID_PREF = 'timeline:control:ID_'
LOOPING_ATTR_NAME = 'timeline:looping'
OWNER_ID_ATTR_NAME = 'timeline:owner:id'
PLAYSTATE_ATTR_NAME = 'timeline:playstate'
OWNER_NAME_ATTR_NAME = 'timeline:owner:name'
PRESENTER_ID_ATTR_NAME = 'timeline:presenter:id'
GLB_TIMESTAMP_ATTR_NAME = 'timeline:timestamp'
PRESENTER_NAME_ATTR_NAME = 'timeline:presenter:name'
ZOOM_RANGE_END_ATTR_NAME = 'timeline:zoom_end'
ZOOM_RANGE_START_ATTR_NAME = 'timeline:zoom_start'
def prop_path(prop_name: str) -> str:
return TIMELINE_PRIM_PATH + '.' + prop_name
class TimelineStateSerializer:
def __init__(self):
self._timeline = None
self._stage = None
def initialize(self, timeline, synced_stage: Usd.Stage, sending: bool):
self._timeline = timeline
self._stage = synced_stage
self._timeline_state = TimelineState(timeline)
def finalize(self):
pass
def sendTimelineUpdate(self, e: TimelineEvent):
pass
def receiveTimelineUpdate(self) -> List[TimelineEvent]:
return []
def receiveTimestamp(self) -> float:
return 0
def sendOwnerUpdate(self, user: layers.LiveSessionUser):
pass
def receiveOwnerUpdate(self) -> str:
"""
Returns user ID
"""
pass
def sendPresenterUpdate(self, user: layers.LiveSessionUser):
pass
def receivePresenterUpdate(self) -> str:
"""
Returns user ID
"""
pass
def sendControlRequest(self, user_id: str, want_control: bool, from_owner: bool):
pass
def receiveControlRequests(self) -> List[Tuple[str, bool]]:
pass
class TimelinePrimSerializer(TimelineStateSerializer):
"""
Communicates timeline messages via attributes of a single shared prim.
It transforms events to state and vice versa.
"""
def __init__(self):
super().__init__()
self._layer = None
def initialize(self, timeline, synced_stage: Usd.Stage, sending: bool):
super().initialize(timeline, synced_stage, sending)
self._layer = self._stage.GetRootLayer() if self._stage is not None else None
self._timeline_prim_path = TIMELINE_PRIM_PATH
if sending:
self._setup_timeline_prim()
else:
self._timeline_prim = self._stage.GetPrimAtPath(self._timeline_prim_path)
if not self._timeline_prim.IsValid():
carb.log_warn(f'{self.__class__}: Could not find timeline prim')
def sendTimelineUpdate(self, e: TimelineEvent):
super().sendTimelineUpdate(e)
if self._stage is None or self._layer is None:
return
if not self._timeline_prim.IsValid():
self._setup_timeline_prim()
attr_names = []
values = []
if e.type == int(omni.timeline.TimelineEventType.PLAY) or\
e.type == int(omni.timeline.TimelineEventType.PAUSE) or\
e.type == int(omni.timeline.TimelineEventType.STOP):
next_state = get_timeline_next_state(self._timeline_state.state, e.type)
if next_state != self._timeline_state.state:
# TODO: omni.timeline should send a time changed event instead when stopped
if e.type == int(omni.timeline.TimelineEventType.STOP):
self._timeline_state.current_time = self._timeline.get_start_time()
self._timeline_state.state = next_state
attr_names.append(PLAYSTATE_ATTR_NAME)
values.append(next_state.value)
attr_names.append(TIME_ATTR_NAME)
values.append(self._timeline_state.current_time)
elif e.type == int(omni.timeline.TimelineEventType.CURRENT_TIME_TICKED):
self._timeline_state.current_time = e.payload['currentTime']
attr_names.append(TIME_ATTR_NAME)
values.append(self._timeline_state.current_time)
elif e.type == int(omni.timeline.TimelineEventType.LOOP_MODE_CHANGED):
self._timeline_state.looping = e.payload['looping']
attr_names.append(LOOPING_ATTR_NAME)
values.append(self._timeline_state.looping)
elif e.type == int(omni.timeline.TimelineEventType.ZOOM_CHANGED):
self._timeline_state.set_zoom_range(e.payload['startTime'], e.payload['endTime'])
attr_names.append(ZOOM_RANGE_START_ATTR_NAME)
values.append(self._timeline_state.zoom_range[0])
attr_names.append(ZOOM_RANGE_END_ATTR_NAME)
values.append(self._timeline_state.zoom_range[1])
with Sdf.ChangeBlock():
for attr_name, value in zip(attr_names, values):
self._layer.GetAttributeAtPath(prop_path(attr_name)).default = value
if len(attr_names) > 0 or \
e.type == int(omni.timeline.TimelineEventType.CURRENT_TIME_TICKED_PERMANENT):
self._layer.GetAttributeAtPath(prop_path(GLB_TIMESTAMP_ATTR_NAME)).default = e.timestamp
def receiveTimelineUpdate(self) -> List[TimelineEvent]:
if self._stage is None or self._layer is None:
return []
if not self._timeline_prim.IsValid():
return []
events = []
play_state = self._layer.GetAttributeAtPath(prop_path(PLAYSTATE_ATTR_NAME)).default
try:
play_state = PlayState(play_state)
except:
carb.log_error(
f'Invalid invalid playstate in timeline event serialization: {play_state}')
play_state = self._timeline_state.state
current_time = self._layer.GetAttributeAtPath(prop_path(TIME_ATTR_NAME)).default
timestamp = self._layer.GetAttributeAtPath(prop_path(GLB_TIMESTAMP_ATTR_NAME)).default
looping = self._layer.GetAttributeAtPath(prop_path(LOOPING_ATTR_NAME)).default
zoom_start = self._layer.GetAttributeAtPath(prop_path(ZOOM_RANGE_START_ATTR_NAME)).default
zoom_end = self._layer.GetAttributeAtPath(prop_path(ZOOM_RANGE_END_ATTR_NAME)).default
if looping != self._timeline_state.looping:
self._timeline_state.looping = looping
event = TimelineEvent(
type=omni.timeline.TimelineEventType.LOOP_MODE_CHANGED,
payload={'looping' : looping},
timestamp=timestamp
)
events.append(event)
if play_state != self._timeline_state.state:
timeline_events = get_timeline_next_events(self._timeline_state.state, play_state)
self._timeline_state.state = play_state
for timeline_event in timeline_events:
event = TimelineEvent(
timeline_event,
payload={'currentTime' : current_time}, # for better sync strategies
timestamp=timestamp
)
events.append(event)
if current_time != self._timeline_state.current_time:
self._timeline_state.current_time = current_time
event = TimelineEvent(
type=omni.timeline.TimelineEventType.CURRENT_TIME_TICKED,
payload={'currentTime' : current_time},
timestamp=timestamp
)
events.append(event)
if zoom_start != self._timeline_state.zoom_range[0] or zoom_end != self._timeline_state.zoom_range[1]:
self._timeline_state.set_zoom_range(zoom_start, zoom_end)
event = TimelineEvent(
type=omni.timeline.TimelineEventType.ZOOM_CHANGED,
payload={'startTime' : zoom_start, 'endTime' : zoom_end},
timestamp=timestamp
)
events.append(event)
return events
def receiveTimestamp(self) -> float:
if self._stage is None or self._layer is None:
return 0
if not self._timeline_prim.IsValid():
return 0
return self._layer.GetAttributeAtPath(prop_path(GLB_TIMESTAMP_ATTR_NAME)).default
def sendOwnerUpdate(self, user: layers.LiveSessionUser):
super().sendOwnerUpdate(user)
if self._stage is None or self._layer is None:
return
if not self._timeline_prim.IsValid():
self._setup_timeline_prim()
self._layer.GetAttributeAtPath(prop_path(OWNER_ID_ATTR_NAME)).default = user.user_id
self._layer.GetAttributeAtPath(prop_path(OWNER_NAME_ATTR_NAME)).default = user.user_name
def receiveOwnerUpdate(self) -> str:
if self._stage is None or self._layer is None:
return None
if not self._timeline_prim.IsValid():
return None
return self._layer.GetAttributeAtPath(prop_path(OWNER_ID_ATTR_NAME)).default
def sendPresenterUpdate(self, user: layers.LiveSessionUser):
super().sendPresenterUpdate(user)
if self._stage is None or self._layer is None:
return
if not self._timeline_prim.IsValid():
self._setup_timeline_prim()
self._layer.GetAttributeAtPath(prop_path(PRESENTER_ID_ATTR_NAME)).default = user.user_id
self._layer.GetAttributeAtPath(prop_path(PRESENTER_NAME_ATTR_NAME)).default = user.user_name
def receivePresenterUpdate(self) -> str:
if self._stage is None or self._layer is None:
return None
if not self._timeline_prim.IsValid():
return None
return self._layer.GetAttributeAtPath(prop_path(PRESENTER_ID_ATTR_NAME)).default
def sendControlRequest(self, user_id: str, want_control: bool, from_owner: bool):
super().sendControlRequest(user_id, want_control, from_owner)
if self._stage is None or self._layer is None:
return
if not self._timeline_prim.IsValid():
if from_owner:
self._setup_timeline_prim()
else:
return
prim = self._timeline_prim
prefix = CONTROL_ID_PREF
attr_name = f'{prefix}{user_id}'
if want_control:
if not prim.HasAttribute(attr_name):
prim.CreateAttribute(attr_name, Sdf.ValueTypeNames.Int)
prim.GetAttribute(attr_name).Set(1)
elif prim.HasAttribute(attr_name): # Nothing to do if the attribute does not exist
if from_owner:
prim.RemoveProperty(attr_name)
else:
prim.GetAttribute(attr_name).Set(0)
def receiveControlRequests(self) -> List[Tuple[str, bool]]:
if self._stage is None or self._layer is None:
return []
if not self._timeline_prim.IsValid():
return []
requests = []
prim_spec = self._layer.GetPrimAtPath(self._timeline_prim_path)
for property in prim_spec.properties:
attr_name: str = property.name
prefix = CONTROL_ID_PREF
if attr_name.startswith(prefix):
user_id = attr_name.removeprefix(prefix)
want_control = property.default
requests.append([user_id, want_control != 0])
return requests
def _setup_timeline_prim(self):
self._stage.DefinePrim(self._timeline_prim_path)
self._timeline_prim = self._stage.GetPrimAtPath(self._timeline_prim_path)
prim = self._timeline_prim
prim.SetMetadata("hide_in_stage_window", True)
if not prim.IsValid():
carb.log_error("Coding error in TimelinePrimSerializer: trying to create attributes of an invalid prim")
return
if not prim.HasAttribute(PLAYSTATE_ATTR_NAME):
prim.CreateAttribute(PLAYSTATE_ATTR_NAME, Sdf.ValueTypeNames.Int)
if not prim.HasAttribute(TIME_ATTR_NAME):
prim.CreateAttribute(TIME_ATTR_NAME, Sdf.ValueTypeNames.Double)
if not prim.HasAttribute(LOOPING_ATTR_NAME):
prim.CreateAttribute(LOOPING_ATTR_NAME, Sdf.ValueTypeNames.Bool)
if not prim.HasAttribute(ZOOM_RANGE_START_ATTR_NAME):
prim.CreateAttribute(ZOOM_RANGE_START_ATTR_NAME, Sdf.ValueTypeNames.Double)
if not prim.HasAttribute(ZOOM_RANGE_END_ATTR_NAME):
prim.CreateAttribute(ZOOM_RANGE_END_ATTR_NAME, Sdf.ValueTypeNames.Double)
if not prim.HasAttribute(GLB_TIMESTAMP_ATTR_NAME):
prim.CreateAttribute(GLB_TIMESTAMP_ATTR_NAME, Sdf.ValueTypeNames.Double)
if not prim.HasAttribute(OWNER_ID_ATTR_NAME):
prim.CreateAttribute(OWNER_ID_ATTR_NAME, Sdf.ValueTypeNames.String)
if not prim.HasAttribute(OWNER_NAME_ATTR_NAME):
prim.CreateAttribute(OWNER_NAME_ATTR_NAME, Sdf.ValueTypeNames.String)
if not prim.HasAttribute(PRESENTER_ID_ATTR_NAME):
prim.CreateAttribute(PRESENTER_ID_ATTR_NAME, Sdf.ValueTypeNames.String)
if not prim.HasAttribute(PRESENTER_NAME_ATTR_NAME):
prim.CreateAttribute(PRESENTER_NAME_ATTR_NAME, Sdf.ValueTypeNames.String)
prim.GetAttribute(PLAYSTATE_ATTR_NAME).Set(get_timeline_state(self._timeline).value)
prim.GetAttribute(TIME_ATTR_NAME).Set(self._timeline.get_current_time())
prim.GetAttribute(LOOPING_ATTR_NAME).Set(self._timeline.is_looping())
prim.GetAttribute(ZOOM_RANGE_START_ATTR_NAME).Set(self._timeline.get_zoom_start_time())
prim.GetAttribute(ZOOM_RANGE_END_ATTR_NAME).Set(self._timeline.get_zoom_end_time())
prim.GetAttribute(GLB_TIMESTAMP_ATTR_NAME).Set(get_global_time_s())
| 13,867 | Python | 41.280488 | 116 | 0.635538 |
omniverse-code/kit/exts/omni.timeline.live_session/omni/timeline/live_session/timeline_director.py | import omni.usd
import omni.timeline
import carb.events
from .timeline_event import TimelineEvent
from .timeline_serializer import TimelineStateSerializer
from .timeline_session_role import TimelineSessionRole
class TimelineDirector(TimelineSessionRole):
def __init__(self, usd_context_name: str, serializer: TimelineStateSerializer, session):
super().__init__(usd_context_name, serializer, session)
self._timeline = self._main_timeline
self._timeline_sub = self._timeline.get_timeline_event_stream().create_subscription_to_pop(
self._on_timeline_event
)
def start_session(self) -> bool:
if not super().start_session():
return False
self._serializer.initialize(
timeline=self._timeline,
synced_stage=self._synced_stage,
sending=True
)
return True
def enable_sync(self, enabled: bool):
if enabled == self._enable_sync:
return
super().enable_sync(enabled)
if enabled:
self._timeline_sub = self._timeline.get_timeline_event_stream().create_subscription_to_pop(
self._on_timeline_event
)
self._update_timeline_state()
else:
self._timeline_sub = self._timeline.get_timeline_event_stream().create_subscription_to_pop(
self._on_timeline_permanent_tick
)
def stop_session(self):
super().stop_session()
self._serializer.finalize()
self._timeline_sub = None
def _on_timeline_event(self, e: carb.events.IEvent):
self._serializer.sendTimelineUpdate(TimelineEvent.from_carb_event(e))
def _on_timeline_permanent_tick(self, e: carb.events.IEvent):
if e.type == int(omni.timeline.TimelineEventType.CURRENT_TIME_TICKED_PERMANENT):
self._serializer.sendTimelineUpdate(TimelineEvent.from_carb_event(e))
def _update_timeline_state(self):
event = TimelineEvent(
omni.timeline.TimelineEventType.CURRENT_TIME_TICKED,
{'currentTime' : self._timeline.get_current_time()}
)
self._serializer.sendTimelineUpdate(event)
event = TimelineEvent(
omni.timeline.TimelineEventType.LOOP_MODE_CHANGED,
{'looping' : self._timeline.is_looping()}
)
self._serializer.sendTimelineUpdate(event)
event = TimelineEvent(
omni.timeline.TimelineEventType.ZOOM_CHANGED,
{
'startTime' : self._timeline.get_zoom_start_time(),
'endTime' : self._timeline.get_zoom_end_time(),
}
)
self._serializer.sendTimelineUpdate(event)
event = TimelineEvent(omni.timeline.TimelineEventType.STOP)
if self._timeline.is_playing():
event = TimelineEvent(omni.timeline.TimelineEventType.PLAY)
elif not self._timeline.is_stopped():
event = TimelineEvent(omni.timeline.TimelineEventType.PAUSE)
self._serializer.sendTimelineUpdate(event) | 3,054 | Python | 36.256097 | 103 | 0.636215 |
omniverse-code/kit/exts/omni.timeline.live_session/omni/timeline/live_session/timeline_event.py | import carb
import omni.timeline
from typing import Union
from .global_time import get_global_time_s
class TimelineEvent:
def __init__(
self,
type: Union[int, omni.timeline.TimelineEventType],
payload: dict = None,
timestamp: float = None
):
if isinstance(type, omni.timeline.TimelineEventType):
type = int(type)
self._type = type
if payload is None:
payload = {}
self._payload = payload
if timestamp is None:
self._timestamp = get_global_time_s()
else:
self._timestamp = timestamp
@classmethod
def from_carb_event(self, event: carb.events.IEvent):
return TimelineEvent(event.type, event.payload)
@property
def type(self) -> int:
return self._type
@property
def payload(self) -> dict:
return self._payload
@property
def timestamp(self) -> float:
return self._timestamp | 989 | Python | 24.384615 | 61 | 0.589484 |
omniverse-code/kit/exts/omni.timeline.live_session/omni/timeline/live_session/__init__.py | from .live_session_extension import (
get_session_state,
get_session_window,
get_timeline_session,
TimelineLiveSessionExtension
)
from .timeline_session_role import TimelineSessionRoleType
from .session_state import SessionState
from .timeline_session import TimelineSession | 290 | Python | 31.33333 | 58 | 0.803448 |
omniverse-code/kit/exts/omni.timeline.live_session/omni/timeline/live_session/session_watcher.py | import omni.kit.usd.layers as layers
import omni.usd
from omni.kit.collaboration.presence_layer import LAYER_SUBSCRIPTION_ORDER
from .session_state import SessionState
from .timeline_session import TimelineSession
from .timeline_session_role import TimelineSessionRoleType
class SessionWatcher:
def __init__(self, usd_context_name: str = ""):
self._usd_context = omni.usd.get_context(usd_context_name)
self._live_syncing = layers.get_live_syncing(self._usd_context)
self._layers = layers.get_layers(self._usd_context)
def start(self):
order = LAYER_SUBSCRIPTION_ORDER + 1
self._layers_event_subscription = None
self._layers_event_subscription = self._layers.get_event_stream().create_subscription_to_pop(
self._on_layers_event, name="omni.timeline.live_session", order=order
)
self._session_state = SessionState()
def stop(self):
if self._session_state.timeline_session is not None:
self._session_state.timeline_session.stop_session()
self._session_state.timeline_session = None
self._layers_event_subscription = None
self._session_state = None
def get_timeline_session(self) -> TimelineSession:
return self._session_state.timeline_session
def get_session_state(self) -> SessionState:
return self._session_state
def _on_layers_event(self, event):
payload = layers.get_layer_event_payload(event)
if not payload:
return
# Only events from root layer session are handled.
if payload.event_type == layers.LayerEventType.LIVE_SESSION_STATE_CHANGED:
if not payload.is_layer_influenced(self._usd_context.get_stage_url()):
return
self._on_session_state_changed()
elif payload.event_type == layers.LayerEventType.LIVE_SESSION_USER_JOINED:
if not payload.is_layer_influenced(self._usd_context.get_stage_url()):
return
self._on_user_joined(payload.user_id)
elif payload.event_type == layers.LayerEventType.LIVE_SESSION_USER_LEFT:
if not payload.is_layer_influenced(self._usd_context.get_stage_url()):
return
self._on_user_left(payload.user_id)
def _on_session_state_changed(self):
if not self._live_syncing.is_in_live_session():
if self._session_state.timeline_session is not None:
self._session_state.timeline_session.stop_session()
self._session_state.clear_users()
from .live_session_extension import get_session_window
window = get_session_window()
if window:
window.hide()
else:
self._session = self._get_current_session()
role = TimelineSessionRoleType.LISTENER
if self._is_owner(self._session):
role = TimelineSessionRoleType.PRESENTER
timeline_session = TimelineSession(
usd_context_name=self._usd_context.get_name(),
session_user=self._session.logged_user,
session_state=self._session_state,
role=role
)
self._session_state.add_users([self._session.logged_user])
self._session_state.timeline_session = timeline_session
timeline_session.start_session()
# This needs to happen after we have a session
if self._is_owner(self._session):
timeline_session.owner = self._session.logged_user
timeline_session.presenter = self._session.logged_user
def _on_user_joined(self, user_id):
user = self._session.get_peer_user_info(user_id)
if user is not None:
self._session_state.add_users([user])
timeline_session = self._session_state.timeline_session
if timeline_session is not None:
if timeline_session.owner_id == user_id:
timeline_session.owner = user
def _on_user_left(self, user_id):
self._session_state.remove_user(user_id)
def _get_current_session(self): # -> LiveSession, TODO: export it in omni.kit.usd.layers
return self._live_syncing.get_current_live_session()
def _is_owner(self, session) -> bool:
return session.merge_permission | 4,360 | Python | 41.339805 | 101 | 0.630734 |
omniverse-code/kit/exts/omni.timeline.live_session/omni/timeline/live_session/timeline_listener.py | import asyncio
import carb
import omni.kit.app
import omni.timeline
import omni.usd
from typing import List
from .global_time import get_global_time_s
from .timeline_serializer import TimelineStateSerializer
from .sync_strategy import SyncStrategyDescriptor, SyncStrategyType, TimelineSyncStrategyExecutor
from .timeline_event import TimelineEvent
from .timeline_session_role import TimelineSessionRole
TIMESTAMP_SYNC_EPS = 1e-3 # 1 ms
class TimelineListener(TimelineSessionRole):
def __init__(
self,
usd_context_name: str,
serializer: TimelineStateSerializer,
session,
sync_strategy: SyncStrategyDescriptor = None
):
super().__init__(usd_context_name, serializer, session)
self._app_sub = None
self._is_director_timeline_supported = hasattr(self._main_timeline, 'set_director')
self._timeline_name = 'timeline_director'
if not self._is_director_timeline_supported:
carb.log_warn("Timeline sync: use a newer Kit version to support all features.")
self._director_timeline = self._main_timeline
self._controlled_timeline = None
else:
self._controlled_timeline = self._main_timeline
self._initialize_director_timeline()
if sync_strategy is None:
sync_strategy = SyncStrategyDescriptor(SyncStrategyType.DIFFERENCE_LIMITED)
self._timeline_sync_executor = TimelineSyncStrategyExecutor(sync_strategy, self._director_timeline)
def start_session(self, catch_up: bool = True) -> bool:
if not super().start_session():
return False
self._initialize_director_timeline()
self._serializer.initialize(
timeline=self._director_timeline,
synced_stage=self._synced_stage,
sending=False
)
asyncio.ensure_future(self._start_session(catch_up))
return True
@property
def sync_strategy(self):
return self._timeline_sync_executor
async def _start_session(self, catch_up):
# make sure the director is set before it recieves the updates
await omni.kit.app.get_app().next_update_async()
# catch up with the current state
if catch_up:
self._check_time(None)
self._app_sub = self._app_stream.create_subscription_to_pop(self._check_time)
def stop_session(self):
super().stop_session()
self._serializer.finalize()
if self._controlled_timeline is not None:
self._controlled_timeline.set_director(None)
omni.timeline.destroy_timeline(self._timeline_name)
self._app_sub = None
def enable_sync(self, enabled: bool):
if enabled == self._enable_sync:
return
super().enable_sync(enabled)
if enabled:
if self._synced_stage is None:
carb.log_error(f"{self.__class__}: could not find presence layer")
return
self._initialize_director_timeline()
self._serializer.initialize(
timeline=self._director_timeline,
synced_stage=self._synced_stage,
sending=False
)
self._timeline_sync_executor.reset()
asyncio.ensure_future(self._start_session(True))
else:
if self._controlled_timeline:
self._controlled_timeline.set_director(None)
self._app_sub = None
def get_latency_estimate(self) -> float:
return max(get_global_time_s() - self._last_update_timestamp, 0)
def _warn_once(self, tag: str, msg: str):
attr_name = "__logged_" + tag
if not hasattr(self, attr_name):
setattr(self, attr_name, False)
if not getattr(self, attr_name):
carb.log_warn(msg)
setattr(self, attr_name, True)
def _save_timestamp(self, timestamp: float):
if get_global_time_s() + TIMESTAMP_SYNC_EPS < timestamp:
self._warn_once(
"global_time_sync",
"Future timestamp received. Global time is inaccurate, latency estimates are unreliable."
)
self._last_update_timestamp = max(self._last_update_timestamp, timestamp)
def _update_latency_timestamp(self, events: List[TimelineEvent]):
if len(events) == 0:
self._save_timestamp(self._serializer.receiveTimestamp())
for event in events:
self._save_timestamp(event.timestamp)
def _check_time(self, _):
events = self._serializer.receiveTimelineUpdate()
actions_to_execute = events
self._update_latency_timestamp(events)
if self._timeline_sync_executor is not None:
actions_to_execute = self._timeline_sync_executor.process_events(events)
for event in actions_to_execute:
self._apply_time_event(event)
def _apply_time_event(self, event: TimelineEvent):
type_id = int(event.type)
if type_id == int(omni.timeline.TimelineEventType.PLAY):
self._director_timeline.play()
elif type_id == int(omni.timeline.TimelineEventType.PAUSE):
self._director_timeline.pause()
elif type_id == int(omni.timeline.TimelineEventType.STOP):
self._director_timeline.stop()
elif type_id == int(omni.timeline.TimelineEventType.CURRENT_TIME_TICKED):
time = event.payload['currentTime']
self._director_timeline.set_current_time(time)
elif type_id == int(omni.timeline.TimelineEventType.LOOP_MODE_CHANGED):
looping = event.payload['looping']
self._director_timeline.set_looping(looping)
elif type_id == int(omni.timeline.TimelineEventType.ZOOM_CHANGED):
start_time, end_time = event.payload['startTime'], event.payload['endTime']
self._director_timeline.set_zoom_range(start_time, end_time)
def _initialize_director_timeline(self):
if self._controlled_timeline is None:
return
time = self._controlled_timeline.get_current_time()
playing = self._controlled_timeline.is_playing()
stopped = self._controlled_timeline.is_stopped()
self._director_timeline = omni.timeline.get_timeline_interface(self._timeline_name)
self._director_timeline.set_end_time(self._controlled_timeline.get_end_time())
self._director_timeline.set_start_time(self._controlled_timeline.get_start_time())
self._director_timeline.set_time_codes_per_second(self._controlled_timeline.get_time_codes_per_seconds())
self._director_timeline.set_current_time(time)
if not playing:
if stopped:
self._director_timeline.stop()
else:
self._director_timeline.pause()
self._director_timeline.commit()
self._controlled_timeline.set_director(self._director_timeline)
| 6,934 | Python | 39.086705 | 113 | 0.636141 |
omniverse-code/kit/exts/omni.timeline.live_session/omni/timeline/live_session/timeline_session_role.py | import carb
import enum
import omni.timeline
import omni.usd
import omni.kit.collaboration.presence_layer as pl
from typing import Callable
from .timeline_serializer import TimelineStateSerializer
class TimelineSessionRoleType(enum.Enum):
LISTENER = 0,
PRESENTER = 1,
class TimelineSessionRole:
def __init__(self, usd_context_name: str, serializer: TimelineStateSerializer, session):
self._main_timeline = omni.usd.get_context(usd_context_name).get_timeline()
self._context = omni.usd.get_context()
self._synced_stage = pl.get_presence_layer_interface(self._context).get_shared_data_stage()
self._stage = self._context.get_stage()
self._app_stream = omni.kit.app.get_app().get_update_event_stream()
self._app_sub_user = None
self._enable_sync = True
self._serializer = serializer
from .timeline_session import TimelineSession
self._session: TimelineSession = session
self._last_update_timestamp: float = 0
def start_session(self) -> bool:
if self._synced_stage is None:
carb.log_error(f"{self.__class__}: could not find presence layer")
return False
self._app_sub_user = self._app_stream.create_subscription_to_pop(self._check_user_updates)
return True
def stop_session(self):
self._app_sub_user = None
def enable_sync(self, enabled: bool):
self._enable_sync = enabled
self._session._on_enable_sync(enabled)
def is_sync_enabled(self) -> bool:
return self._enable_sync
def get_latency_estimate(self) -> float:
"""
Returns estimated latency in seconds.
The accuracy depends on the error of synchronized global time.
"""
return 0
# TODO: consider moving _check methods to TimelineSession
def _check_user_updates(self, _):
self._check_presenter()
self._check_control_requests()
def _check_presenter(self):
presenter_id = self._serializer.receivePresenterUpdate()
if (self._session.presenter is None and presenter_id is None) or\
(self._session.presenter is not None and self._session.presenter.user_id == presenter_id):
return
self._session._on_presenter_changed(presenter_id)
def _check_control_requests(self):
request_list = self._serializer.receiveControlRequests()
users_want_control = self._session.get_request_control_ids()
# current behavior of receiveControlRequests:
# - returns _all_ positive requests (want_control=True)
# - returns some of the negative requests but not necessarily all of them
all_want_control = []
for request in request_list:
user_id = request[0]
want_control = request[1]
if want_control:
all_want_control.append(user_id)
if (want_control and user_id not in users_want_control) or\
(not want_control and user_id in users_want_control):
self._session._on_control_request_received(user_id, want_control)
for user_id in self._session.get_request_control_ids():
# Remove if not in the set of all users that want control
if user_id not in all_want_control:
self._session._on_control_request_received(user_id, False)
| 3,373 | Python | 38.232558 | 102 | 0.648088 |
omniverse-code/kit/exts/omni.timeline.live_session/omni/timeline/live_session/ui_user_window.py | import omni.ui as ui
from .session_state import SessionState
from .sync_strategy import SyncStrategyType
from .timeline_listener import TimelineListener
from .timeline_session import TimelineSession
from .timeline_session_role import TimelineSessionRoleType
from functools import partial
scrollingframe_style = {
"background_color": 0xFF23211F,
"Label:selected": {"color": 0xFFFFFF00}
}
class UserWindow:
def __init__(self, session_state: SessionState):
self._session_state: SessionState = session_state
self._window = ui.Window(
"Timeline Session",
width=400,
height=500,
visible=False,
flags=0,
visibility_changed_fn=self.on_window_visibility_changed,
)
with self._window.frame:
self._ui_container = ui.Frame(build_fn=self._build_ui)
self._labels = []
self._selected_user = None
self._presenter_button = None
self._diff_slider = None
def show(self):
self._window.visible = True
self._ui_container.rebuild()
def hide(self):
self._window.visible = False
self._selected_user = None
def on_window_visibility_changed(self, visible):
if self._session_state is None:
return
if visible:
self._session_state.add_users_changed_fn(self._on_user_changed)
else:
self._session_state.remove_users_changed_fn(self._on_user_changed)
timeline_session = self._session_state.timeline_session
if timeline_session is not None:
if visible:
timeline_session.add_presenter_changed_fn(self._on_user_changed)
timeline_session.add_control_request_changed_fn(self._on_user_changed)
else:
timeline_session.remove_presenter_changed_fn(self._on_user_changed)
timeline_session.remove_control_request_changed_fn(self._on_user_changed)
def destroy(self):
self._presenter_button = None
self._diff_slider = None
self._ui_container.destroy()
self._ui_container = None
self._window.set_visibility_changed_fn(None)
self._window.destroy()
self._window = None
self._session_state = None
self._labels = []
self._selected_user = None
def _build_ui(self):
users = self._session_state.users
self._selected_user = None
timeline_session = self._session_state.timeline_session
if timeline_session is None:
with ui.HStack():
ui.Label("Timeline session is not available")
return
with ui.VStack():
ui.Label("Session Users", height=30)
self._labels = []
with ui.ScrollingFrame(width=390, height=200, style=scrollingframe_style):
with ui.VStack():
for user in users:
self._create_user_label(timeline_session, user)
ui.Label("Timeline Control Requests", height=30)
with ui.ScrollingFrame(width=390, height=100, style=scrollingframe_style):
with ui.VStack():
for user in timeline_session.get_request_controls():
self._create_user_label(timeline_session, user)
if timeline_session.am_i_owner():
with ui.HStack(height=50):
self._presenter_button = ui.Button("Set as Timeline Presenter",
clicked_fn=self._on_make_presenter_clicked,
visible=False)
elif not timeline_session.am_i_presenter():
with ui.HStack(height=50):
title = "Request Timeline Control"
if timeline_session.live_session_user in timeline_session.get_request_controls():
title = "Revoke Request"
self._presenter_button = ui.Button(title,
clicked_fn=self._on_request_control_clicked)
if not timeline_session.am_i_presenter():
with ui.VStack(height=50):
ui.Spacer()
with ui.HStack():
ui.Label('Allowed time difference from Presenter (sec): ')
self._diff_slider = ui.FloatSlider(name='tsync_maxdiff_slider', min=0, max=2)
if timeline_session.role is not None and\
hasattr(timeline_session.role, 'sync_strategy'):
listener: TimelineListener = timeline_session.role
strategy_desc = listener.sync_strategy.strategy_desc
self._diff_slider.model.set_value(strategy_desc.max_time_diff_sec)
self._diff_slider.model.add_value_changed_fn(self._on_diff_slider_changed)
ui.Spacer()
def _create_user_label(self, timeline_session: TimelineSession, user):
logged_str = f"[Current user]" if timeline_session.is_logged_user(user) else ""
owner_str = "[Owner]" if timeline_session.is_owner(user) else ""
presenter_str = "[Presenter]" if timeline_session.is_presenter(user) else ""
label = ui.Label(f'{user.user_name} ({user.from_app}) {owner_str}{presenter_str}{logged_str}',
height=0)
label.set_mouse_pressed_fn(partial(self._on_user_pressed, label, user))
self._labels.append(label)
def _on_user_pressed(self, label, user, x, y, a, b):
for l in self._labels:
l.selected = False
label.selected = True
self._selected_user = user
timeline_session = self._session_state.timeline_session
if self._presenter_button is not None and timeline_session is not None:
if timeline_session.am_i_owner():
self._presenter_button.visible = not timeline_session.is_presenter(user)
def _on_make_presenter_clicked(self):
if self._selected_user is not None and self._session_state.timeline_session is not None:
self._session_state.timeline_session.presenter = self._selected_user
def _on_request_control_clicked(self):
timeline_session = self._session_state.timeline_session
if timeline_session is not None:
current_user = timeline_session.live_session_user
want_control = current_user not in timeline_session.get_request_controls()
timeline_session.request_control(want_control)
def _on_user_changed(self, *_):
self._ui_container.rebuild()
def _on_diff_slider_changed(self, value):
timeline_session = self._session_state.timeline_session
if timeline_session is not None and timeline_session.role is not None and\
timeline_session.role_type == TimelineSessionRoleType.LISTENER and \
hasattr(timeline_session.role, 'sync_strategy'):
listener: TimelineListener = timeline_session.role
max_diff = value.as_float
strategy_desc = listener.sync_strategy.strategy_desc
strategy_desc.max_time_diff_sec = max_diff
strategy_desc.strategy_type = SyncStrategyType.DIFFERENCE_LIMITED
listener.sync_strategy.strategy_desc = strategy_desc
strategy_desc = listener.sync_strategy.strategy_desc
| 7,517 | Python | 44.289156 | 102 | 0.592657 |
omniverse-code/kit/exts/omni.timeline.live_session/omni/timeline/live_session/live_session_extension.py | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from .session_state import SessionState
from .session_watcher import SessionWatcher
from .timeline_session import TimelineSession
from .ui_user_window import UserWindow
import omni.ext
_session_watcher = None
_window = None
class TimelineLiveSessionExtension(omni.ext.IExt):
def on_startup(self, ext_id):
usd_context_name = ""
global _session_watcher
_session_watcher = SessionWatcher(usd_context_name)
_session_watcher.start()
global _window
_window = UserWindow(_session_watcher.get_session_state())
def on_shutdown(self):
global _session_watcher
if _session_watcher is not None:
_session_watcher.stop()
# TODO: _session_watcher.destroy()
_session_watcher = None
global _window
if _window is not None:
_window.hide()
_window.destroy()
_window = None
def get_timeline_session() -> TimelineSession:
global _session_watcher
if _session_watcher is None:
return None
return _session_watcher.get_timeline_session()
def get_session_state() -> SessionState:
global _session_watcher
if _session_watcher is None:
return None
return _session_watcher.get_session_state()
def get_session_window() -> UserWindow:
global _window
return _window
| 1,771 | Python | 28.533333 | 76 | 0.693958 |
omniverse-code/kit/exts/omni.timeline.live_session/omni/timeline/live_session/timeline_state.py | import enum
import omni.timeline
from typing import List, Union
# Consider moving these to the omni.timeline API
class PlayState(enum.Enum):
PLAYING = 0
PAUSED = 1
STOPPED = 2
def get_timeline_state(timeline) -> PlayState:
if timeline is None:
return None
if timeline.is_playing():
return PlayState.PLAYING
elif timeline.is_stopped():
return PlayState.STOPPED
return PlayState.PAUSED
def get_timeline_next_state(
current_state: PlayState,
event: Union[int, omni.timeline.TimelineEventType]
) -> PlayState:
if isinstance(event, omni.timeline.TimelineEventType):
event = int(event)
if current_state == PlayState.PLAYING:
if event == int(omni.timeline.TimelineEventType.STOP):
return PlayState.STOPPED
elif event == int(omni.timeline.TimelineEventType.PAUSE):
return PlayState.PAUSED
else:
return current_state
elif current_state == PlayState.PAUSED:
if event == int(omni.timeline.TimelineEventType.STOP):
return PlayState.STOPPED
elif event == int(omni.timeline.TimelineEventType.PLAY):
return PlayState.PLAYING
else:
return current_state
elif current_state == PlayState.STOPPED:
if event == int(omni.timeline.TimelineEventType.PLAY):
return PlayState.PLAYING
else:
return current_state
# Should not be called
return current_state
def get_timeline_next_events(
current_state: PlayState,
next_state: PlayState
) -> List[omni.timeline.TimelineEventType]:
if current_state == PlayState.PLAYING:
if next_state == PlayState.PAUSED:
return [omni.timeline.TimelineEventType.PAUSE]
elif next_state == PlayState.STOPPED:
return [omni.timeline.TimelineEventType.STOP]
elif current_state == PlayState.PAUSED:
if next_state == PlayState.PLAYING:
return [omni.timeline.TimelineEventType.PLAY]
elif next_state == PlayState.STOPPED:
return [omni.timeline.TimelineEventType.STOP]
elif current_state == PlayState.STOPPED:
if next_state == PlayState.PLAYING:
return [omni.timeline.TimelineEventType.PLAY]
elif next_state == PlayState.PAUSED:
return [omni.timeline.TimelineEventType.PLAY, omni.timeline.TimelineEventType.PAUSE]
return []
class TimelineState:
"""
All information that is required to cache timeline state.
Usually it is better to cache the state instead of using the timeline itself
because the timeline postpones state changes by one frame and we have no information
about its state change queue.
"""
def __init__(self, timeline = None):
state = PlayState.STOPPED
time = 0
looping = True
zoom_range = [0, 0]
if timeline is not None:
state = get_timeline_state(timeline)
time = timeline.get_current_time()
looping = timeline.is_looping()
zoom_range = [timeline.get_zoom_start_time(), timeline.get_zoom_end_time()]
self._state: PlayState = state
self._current_time: float = time
self._looping: bool = looping
self._zoom_range = zoom_range
@property
def state(self) -> PlayState:
return self._state
@state.setter
def state(self, state: PlayState):
self._state = state
@property
def current_time(self) -> float:
return self._current_time
@current_time.setter
def current_time(self, time: float):
self._current_time = time
@property
def looping(self) -> bool:
return self._looping
@looping.setter
def looping(self, value: bool):
self._looping = value
@property
def zoom_range(self):
return self._zoom_range
def set_zoom_range(self, start_time: float, end_time: float):
self._zoom_range = [start_time, end_time]
| 4,000 | Python | 29.541985 | 96 | 0.643 |
omniverse-code/kit/exts/omni.timeline.live_session/omni/timeline/live_session/timeline_session.py | from carb import log_error
import omni.kit.usd.layers as layers
from typing import Callable, List
from .timeline_director import TimelineDirector
from .timeline_listener import TimelineListener
from .timeline_session_role import TimelineSessionRoleType, TimelineSessionRole
from .timeline_serializer import TimelinePrimSerializer
import omni.kit.notification_manager as nm
class TimelineSession:
def __init__(
self,
usd_context_name: str,
session_user: layers.LiveSessionUser,
session_state,
role: TimelineSessionRoleType = None
):
self._usd_context_name = usd_context_name
self._serializer = TimelinePrimSerializer()
self._session_user = session_user
self._is_running: bool = False
from .session_state import SessionState
self._session_state: SessionState = session_state
role_type = role
if role_type is None:
role_type = TimelineSessionRoleType.LISTENER
self._role: TimelineSessionRole = None
self._role_type: TimelineSessionRoleType = None
self._change_role(role_type)
self._owner: layers.LiveSessionUser = None
# We may not have the user when we can query the ID already
self._owner_id: str = None
self._presenter: layers.LiveSessionUser = None
self._control_requests: List[str] = [] # List of user IDs
self._presenter_changed_callbacks = []
self._request_control_callbacks = []
self._enable_sync_callbacks = []
def __del__(self):
self.destroy()
def destroy(self):
# TODO: role, is_running, serializer, etc.
self._presenter_changed_callbacks = []
self._control_requests = []
self._request_control_callbacks = []
self._enable_sync_callbacks = []
@property
def live_session_user(self) -> layers.LiveSessionUser:
return self._session_user
@property
def role_type(self) -> TimelineSessionRoleType:
return self._role_type
@property
def role(self) -> TimelineSessionRole:
return self._role
def start_session(self):
if self._role is None:
return
self._is_running = True
success = self._role.start_session()
self._control_requests = []
if success:
self._owner_id = self._serializer.receiveOwnerUpdate()
def stop_session(self):
if self._role is None:
return
self._is_running = False
self._role.stop_session()
self._control_requests = []
def is_running(self) -> bool:
return self._is_running
def enable_sync(self, enabled: bool):
if self._role is not None:
self._role.enable_sync(enabled)
def is_sync_enabled(self) -> bool:
return self._role is not None and self._role.is_sync_enabled()
@property
def owner_id(self) -> str:
if self.owner is not None:
return self.owner.user_id
elif self._owner_id is not None:
return self._owner_id
return None
@property
def owner(self) -> layers.LiveSessionUser:
return self._owner
@owner.setter
def owner(self, user: layers.LiveSessionUser):
if not self._is_running:
log_error(f'Session must be running to set the owner')
return
if user is not None:
self._owner_id = user.user_id
if (self._owner is None and user is None) or\
self._owner is not None and user is not None and\
self._owner.user_id == user.user_id:
# No change
return
self._owner = user
self._serializer.sendOwnerUpdate(user)
# clear all requests
if self.am_i_owner():
requests = self._serializer.receiveControlRequests()
for request in requests:
self._serializer.sendControlRequest(
user_id=request[0],
want_control=False,
from_owner=True
)
@property
def presenter(self) -> layers.LiveSessionUser:
return self._presenter
@presenter.setter
def presenter(self, user: layers.LiveSessionUser):
if not self._is_running:
log_error(f'Session must be running to set the presenter')
return
if (self._presenter is None and user is None) or\
self._presenter is not None and user is not None and\
self._presenter.user_id == user.user_id:
# No change
return
if not self.am_i_owner():
log_error(f'Only the session owner is allowed to set the presenter')
return
# Remove user from control requests
if user.user_id in self._control_requests:
self._on_control_request_received(user.user_id, False)
self._serializer.sendControlRequest(user.user_id, False, self.am_i_owner())
if user is not None:
self._on_presenter_changed(user.user_id)
self._serializer.sendPresenterUpdate(user)
def request_control(self, want_control: bool = True):
if not self._is_running:
log_error(f'Session must be running to request timeline control')
return
user = self._session_user
if user is not None:
if (want_control and user.user_id not in self._control_requests) or \
(not want_control and user.user_id in self._control_requests):
self._on_control_request_received(user.user_id, want_control)
self._serializer.sendControlRequest(user.user_id, want_control, self.am_i_owner())
def get_request_controls(self) -> List[layers.LiveSessionUser]:
users = []
for user_id in self._control_requests:
user = self._session_state.find_user(user_id)
if user is not None:
users.append(user)
return users
def get_request_control_ids(self) -> List[str]:
return self._control_requests
def add_presenter_changed_fn(self, callback: Callable[[layers.LiveSessionUser], None]):
if callback not in self._presenter_changed_callbacks:
self._presenter_changed_callbacks.append(callback)
def remove_presenter_changed_fn(self, callback: Callable[[layers.LiveSessionUser], None]):
if callback in self._presenter_changed_callbacks:
self._presenter_changed_callbacks.remove(callback)
def add_control_request_changed_fn(self, callback: Callable[[layers.LiveSessionUser, bool], None]):
if callback not in self._request_control_callbacks:
self._request_control_callbacks.append(callback)
def remove_control_request_changed_fn(self, callback: Callable[[layers.LiveSessionUser, bool], None]):
if callback in self._request_control_callbacks:
self._request_control_callbacks.remove(callback)
def add_enable_sync_changed_fn(self, callback: Callable[[bool], None]):
if callback not in self._enable_sync_callbacks:
self._enable_sync_callbacks.append(callback)
def remove_enable_sync_changed_fn(self, callback: Callable[[bool], None]):
if callback in self._enable_sync_callbacks:
self._enable_sync_callbacks.remove(callback)
def is_owner(self, user: layers.LiveSessionUser) -> bool:
return user is not None and self._owner is not None and\
user.user_id == self._owner.user_id
def is_presenter(self, user: layers.LiveSessionUser) -> bool:
return user is not None and self._presenter is not None and\
user.user_id == self._presenter.user_id
def is_logged_user(self, user: layers.LiveSessionUser) -> bool:
return user is not None and self._session_user is not None and\
user.user_id == self._session_user.user_id
def am_i_owner(self) -> bool:
return self.is_owner(self._session_user)
def am_i_presenter(self) -> bool:
return self.is_presenter(self._session_user)
def _do_change_role(self, role_type: TimelineSessionRoleType):
# TODO: we don't really need the user change listener at the owner client
# (the owner sets it)
if self._role_type == TimelineSessionRoleType.LISTENER:
self._role = TimelineListener(self._usd_context_name, self._serializer, self)
elif self._role_type == TimelineSessionRoleType.PRESENTER:
self._role = TimelineDirector(self._usd_context_name, self._serializer, self)
def _change_role(self, role_type: TimelineSessionRoleType):
if self._role_type is not None and self._role_type == role_type:
return
self._role_type = role_type
if self._role is not None:
if self._is_running:
self._role.stop_session()
self._do_change_role(role_type)
if self._is_running:
# This also calls self._role.start_session()
self.start_session()
def _on_presenter_changed(self, user_id: str):
user = self._session_state.find_user(user_id)
if user is None:
# TODO: if None and we are Presenters, become listener?
return
was_current_user_presenter = self.am_i_presenter()
self._presenter = user
if user_id is not None and self._session_user is not None and\
user_id == self._session_user.user_id:
self._change_role(TimelineSessionRoleType.PRESENTER)
nm.post_notification(
"You are now the timeline presenter.",
status=nm.NotificationStatus.INFO
)
else:
if was_current_user_presenter:
nm.post_notification(
"You are no longer the timeline presenter.",
status=nm.NotificationStatus.INFO
)
self._change_role(TimelineSessionRoleType.LISTENER)
for callback in self._presenter_changed_callbacks:
callback(self._presenter)
def _on_control_request_received(self, user_id: str, want_control: bool):
user = self._session_state.find_user(user_id)
if user is None:
return
if self.am_i_owner() and want_control:
nm.post_notification(
f"User {user.user_name} requested to control the timeline.",
status=nm.NotificationStatus.INFO
)
# NOTE: checks are done on the caller side
if want_control:
self._control_requests.append(user_id)
else:
self._control_requests.remove(user_id)
for callback in self._request_control_callbacks:
callback(user, want_control)
def _on_enable_sync(self, is_sync_enabled: bool):
for callback in self._enable_sync_callbacks:
callback(is_sync_enabled)
| 10,930 | Python | 36.307167 | 106 | 0.617475 |
omniverse-code/kit/exts/omni.timeline.live_session/omni/timeline/live_session/session_state.py | from carb import log_error
import omni.kit.usd.layers as layers
from .timeline_session import TimelineSession
from typing import List, Callable
User = layers.LiveSessionUser
class SessionState:
def __init__(self):
self._users: List[User] = []
self._timeline_session: TimelineSession = None
self._users_changed_callbacks = []
def add_users(self, users: List[User]):
self._users.extend(users)
self._notify_users_changed(users)
def remove_user(self, user_id: str):
self._users = list(filter(lambda user: user.user_id != user_id, self._users))
# TODO: pass the removed user
self._notify_users_changed([])
def destroy(self):
self.clear_users()
self._timeline_session = None
self._users_changed_callbacks = []
def clear_users(self):
users = self._users
self._users = []
self._notify_users_changed(users)
def find_user(self, user_id: str) -> User:
for user in self._users:
if user.user_id == user_id:
return user
return None
@property
def users(self) -> List[User]:
return self._users
@property
def timeline_session(self) -> TimelineSession:
return self._timeline_session
@timeline_session.setter
def timeline_session(self, session: TimelineSession):
self._timeline_session = session
def add_users_changed_fn(self, callback: Callable[[List[User]], None]):
if callback not in self._users_changed_callbacks:
self._users_changed_callbacks.append(callback)
def remove_users_changed_fn(self, callback: Callable[[List[User]], None]):
if callback in self._users_changed_callbacks:
self._users_changed_callbacks.remove(callback)
def _notify_users_changed(self, users: List[User]):
for callback in self._users_changed_callbacks:
callback(users)
| 1,953 | Python | 29.53125 | 85 | 0.631848 |
omniverse-code/kit/exts/omni.timeline.live_session/omni/timeline/live_session/tests/test_ui_window.py | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import omni.kit.ui_test as ui_test
import omni.kit.usd.layers as layers
import omni.ui
import omni.usd
from omni.timeline.live_session.session_state import SessionState
from omni.timeline.live_session.timeline_session import TimelineSession
from omni.timeline.live_session.timeline_session_role import TimelineSessionRoleType
from omni.timeline.live_session.ui_user_window import UserWindow
from omni.ui.tests.test_base import OmniUiTest
# for clean up state
from ..live_session_extension import _session_watcher as g_session_watcher
class TestUiWindow(OmniUiTest):
fail_on_log_error = False
async def setUp(self):
await super().setUp()
self._window: UserWindow = None
self._usd_context = None
self._layers = None
self._live_syncing = None
g_session_watcher.start()
async def tearDown(self):
g_session_watcher.stop()
if self._window is not None:
self._window.hide()
self._window.destroy()
self._destroy_stage()
await super().tearDown()
async def _setup_stage(self):
self._usd_context = omni.usd.get_context()
self._layers = layers.get_layers(self._usd_context)
self._live_syncing = layers.get_live_syncing(self._usd_context)
await self._usd_context.new_stage_async()
def _destroy_stage(self):
self._usd_context = None
self._layers = None
self._live_syncing = None
async def test_show_hide_window(self):
self._window = UserWindow(SessionState())
self._window._window.title = "Timeline Session Test"
window_ref = ui_test.find("Timeline Session Test")
self.assertIsNotNone(window_ref)
self.assertFalse(window_ref.window.visible)
self._window.show()
self.assertTrue(window_ref.window.visible)
self._window.hide()
self.assertFalse(window_ref.window.visible)
async def test_no_session(self):
self._window = UserWindow(SessionState())
self._window._window.title = "Timeline Session Test"
window_ref = ui_test.find("Timeline Session Test")
label = window_ref.find_all("**/Label[*]")
self.assertEqual(len(label), 1)
self.assertTrue(label[0].widget.text == "Timeline session is not available")
async def test_display_user(self):
session_state = SessionState()
owner = layers.LiveSessionUser("owner", "owner_id", "test")
user1 = layers.LiveSessionUser("user1", "user1_id", "test")
user2 = layers.LiveSessionUser("user2", "user2_id", "test")
timeline_session = TimelineSession("", owner, session_state, TimelineSessionRoleType.PRESENTER)
session_state.timeline_session = timeline_session
self._window = UserWindow(session_state)
self._window._window.title = "Timeline Session Test"
# register ui building callback
self._window.show()
window_ref = ui_test.find("Timeline Session Test")
labels = window_ref.find_all("**/Label[*]")
# without users
field_texts = ["Session Users", "Timeline Control Requests", "Allowed time difference from Presenter (sec): "]
for index, label in enumerate(labels):
self.assertTrue(label.widget.text, field_texts[index])
# add users
session_state.add_users([owner, user1])
users = window_ref.find_all("**/ScrollingFrame[0]/VStack[0]/Label[*]")
self.assertEqual(len(users), 2)
current_users = ["owner (test) [Current user]", "user1 (test)"]
for index, user in enumerate(users):
self.assertTrue(user.widget.text, current_users[index])
session_state.add_users([user2])
users = window_ref.find_all("**/ScrollingFrame[0]/VStack[0]/Label[*]")
self.assertEqual(len(users), 3)
self.assertTrue(users[2].widget.text, "user2 (test)")
# remove user
session_state.remove_user("user2_id")
users = window_ref.find_all("**/ScrollingFrame[0]/VStack[0]/Label[*]")
self.assertEqual(len(users), 2)
for index, user in enumerate(users):
self.assertTrue(user.widget.text, current_users[index])
async def test_requst_control_list(self):
session_state = SessionState()
owner = layers.LiveSessionUser("owner", "owner_id", "test")
user1 = layers.LiveSessionUser("user1", "user1_id", "test")
timeline_session = TimelineSession("", owner, session_state, TimelineSessionRoleType.PRESENTER)
session_state.timeline_session = timeline_session
self._window = UserWindow(session_state)
self._window._window.title = "Timeline Session Test"
# register ui building callback
self._window.show()
window_ref = ui_test.find("Timeline Session Test")
session_state.add_users([owner, user1])
users = window_ref.find_all("**/ScrollingFrame[0]/VStack[0]/Label[*]")
# workaround to request timeline control for UI tests
timeline_session._is_running = True
request_control_btn = window_ref.find('**/Button[*].text == "Request Timeline Control"')
self.assertIsNotNone(request_control_btn)
await request_control_btn.click()
self.assertIsNotNone(
window_ref.find(f'**/ScrollingFrame[1]/VStack[0]/Label[0].text == "{users[0].widget.text}"')
)
revoke_btn = window_ref.find('**/Button[*].text == "Revoke Request"')
self.assertIsNotNone(revoke_btn)
await revoke_btn.click()
self.assertEqual(len(window_ref.find_all(f"**/ScrollingFrame[1]/VStack[0]/Label[*]")), 0)
# click user
await users[0].click()
self.assertIsNone(window_ref.find('**/Button[*].text == "Set as Timeline Presenter"'))
## request control list user click
request_ids = ["owner_id", "user1_id"]
timeline_session._control_requests.extend(request_ids)
## force rebuild ui
self._window.show()
request_control_users = window_ref.find_all("**/ScrollingFrame[1]/VStack[0]/Label[*]")
await request_control_users[0].click()
self.assertIsNone(window_ref.find('**/Button[*].text == "Set as Timeline Presenter"'))
async def test_btns(self):
session_state = SessionState()
owner = layers.LiveSessionUser("owner", "owner_id", "test")
user1 = layers.LiveSessionUser("user1", "user1_id", "test")
timeline_session = TimelineSession("", owner, session_state, TimelineSessionRoleType.PRESENTER)
session_state.timeline_session = timeline_session
self._window = UserWindow(session_state)
self._window._window.title = "Timeline Session Test"
# register ui building callback
self._window.show()
window_ref = ui_test.find("Timeline Session Test")
session_state.add_users([owner, user1])
users = window_ref.find_all("**/ScrollingFrame[0]/VStack[0]/Label[*]")
# non-owner buttons
self.assertIsNone(window_ref.find('**/Button[*].text == "Set as Timeline Presenter"'))
# set owner
# workaround to set owner for UI tests
timeline_session._is_running = True
timeline_session.owner = owner
## force rebuild ui
self._window.show()
users = window_ref.find_all("**/ScrollingFrame[0]/VStack[0]/Label[*]")
owner = window_ref.find("**/ScrollingFrame[0]/VStack[0]/Label[0]")
self.assertIsNotNone(owner)
self.assertTrue(users[0].widget.text, "owner (test) [Owner][Current user]")
presenter_btn = window_ref.find('**/Button[*].text == "Set as Timeline Presenter"')
self.assertIsNotNone(presenter_btn)
self.assertFalse(presenter_btn.widget.visible)
self.assertIsNone(window_ref.find('**/Button[*].text == "Request Timeline Control"'))
# click user as a onwer
## make myself as a presenter
await users[0].click()
self.assertTrue(presenter_btn.widget.visible)
await presenter_btn.click()
users = window_ref.find_all("**/ScrollingFrame[0]/VStack[0]/Label[*]")
owner = window_ref.find("**/ScrollingFrame[0]/VStack[0]/Label[0]")
self.assertIsNotNone(owner)
self.assertTrue(users[0].widget.text, "owner (test) [Owner][Presenter][Current user]")
self.assertIsNone(window_ref.find('**/Label[*].text == "Allowed time difference from Presenter (sec): "'))
presenter_btn = window_ref.find('**/Button[*].text == "Set as Timeline Presenter"')
self.assertFalse(presenter_btn.widget.visible)
await users[0].click()
self.assertFalse(presenter_btn.widget.visible)
## make user1 as a presenter from control request list
request_ids = ["owner_id", "user1_id"]
timeline_session._control_requests = request_ids
## force rebuild ui
self._window.show()
await window_ref.find('**/ScrollingFrame[1]/**/Label[*].text == "user1 (test) "').click()
presenter_btn = window_ref.find('**/Button[*].text == "Set as Timeline Presenter"')
self.assertTrue(presenter_btn.widget.visible)
# workaround for being a listener to test UI
timeline_session._role_type = TimelineSessionRoleType.LISTENER
await presenter_btn.click()
users = window_ref.find_all("**/ScrollingFrame[0]/VStack[0]/Label[*]")
self.assertIsNotNone(
window_ref.find('**/ScrollingFrame[0]/VStack[0]/Label[*].text == "user1 (test) [Presenter]"')
)
self.assertIsNone(window_ref.find('**/ScrollingFrame[1]/VStack[0]/Label[*].text == "user1 (test) "'))
self.assertIsNone(window_ref.find('**/ScrollingFrame[1]/VStack[0]/Label[*].text == "user1 (test) [Presenter]"'))
## make myself as a presenter from control request list
request_control_users = window_ref.find_all("**/ScrollingFrame[1]/VStack[0]/Label[*]")
await request_control_users[0].click()
self.assertTrue(presenter_btn.widget.visible)
# workaround for being a presenter to test UI
timeline_session._role_type = TimelineSessionRoleType.PRESENTER
await presenter_btn.click()
users = window_ref.find_all("**/ScrollingFrame[0]/VStack[0]/Label[*]")
self.assertIsNotNone(
window_ref.find(
'**/ScrollingFrame[0]/VStack[0]/Label[*].text == "owner (test) [Owner][Presenter][Current user]"'
)
)
self.assertIsNone(
window_ref.find('**/ScrollingFrame[1]/VStack[0]/Label[*].text == "owner (test) [Owner][Current user]"')
)
self.assertIsNone(
window_ref.find(
'**/ScrollingFrame[1]/VStack[0]/Label[*].text == "owner (test) [Owner][Presenter][Current user]"'
)
)
## make user1 as a presenter
users = window_ref.find_all("**/ScrollingFrame[0]/VStack[0]/Label[*]")
await users[1].click()
self.assertTrue(presenter_btn.widget.visible)
### workaround for not printing error log of timeline_session_role
### since we do not care the functionalities timeline_session_role in this test
timeline_session._role_type = TimelineSessionRoleType.LISTENER
await presenter_btn.click()
users = window_ref.find_all("**/ScrollingFrame[0]/VStack[0]/Label[*]")
self.assertIsNotNone(
window_ref.find('**/ScrollingFrame[0]/VStack[0]/Label[*].text == "user1 (test) [Presenter]"')
)
async def test_time_difference_slider(self):
session_state = SessionState()
owner = layers.LiveSessionUser("owner", "owner_id", "test")
user1 = layers.LiveSessionUser("user1", "user1_id", "test")
timeline_session = TimelineSession("", owner, session_state, TimelineSessionRoleType.LISTENER)
session_state.timeline_session = timeline_session
self._window = UserWindow(session_state)
self._window._window.title = "Timeline Session Test"
# register ui building callback
self._window.show()
window_ref = ui_test.find("Timeline Session Test")
session_state.add_users([owner, user1])
users = window_ref.find_all("**/ScrollingFrame[0]/VStack[0]/Label[*]")
slider = window_ref.find('**/FloatSlider[*].name == "tsync_maxdiff_slider"')
self.assertIsNotNone(slider)
await slider.input("0")
self.assertEqual(timeline_session.role.sync_strategy.strategy_desc.max_time_diff_sec, 0)
await slider.input("1")
self.assertEqual(timeline_session.role.sync_strategy.strategy_desc.max_time_diff_sec, 1.0)
await slider.input("1.7")
self.assertEqual(timeline_session.role.sync_strategy.strategy_desc.max_time_diff_sec, 1.7)
await slider.input("2")
self.assertEqual(timeline_session.role.sync_strategy.strategy_desc.max_time_diff_sec, 2.0)
| 13,361 | Python | 41.826923 | 120 | 0.642317 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.