file_path
stringlengths 21
207
| content
stringlengths 5
1.02M
| size
int64 5
1.02M
| lang
stringclasses 9
values | avg_line_length
float64 1.33
100
| max_line_length
int64 4
993
| alphanum_fraction
float64 0.27
0.93
|
---|---|---|---|---|---|---|
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/pydevd_attach_to_process/common/py_custom_pyeval_settrace_common.hpp | #ifndef _PY_CUSTOM_PYEVAL_SETTRACE_COMMON_HPP_
#define _PY_CUSTOM_PYEVAL_SETTRACE_COMMON_HPP_
#include "python.h"
#include "py_utils.hpp"
struct InternalInitializeCustomPyEvalSetTrace {
PyObject* pyNone;
PyTuple_New* pyTuple_New;
_PyObject_FastCallDict* pyObject_FastCallDict;
PyEval_CallObjectWithKeywords* pyEval_CallObjectWithKeywords;
PyUnicode_InternFromString* pyUnicode_InternFromString; // Note: in Py2 will be PyString_InternFromString.
PyTraceBack_Here* pyTraceBack_Here;
PyEval_SetTrace* pyEval_SetTrace;
bool isDebug;
PyUnicode_AsUTF8* pyUnicode_AsUTF8;
PyObject_Repr* pyObject_Repr;
};
/**
* Helper information to access CPython internals.
*/
static InternalInitializeCustomPyEvalSetTrace *internalInitializeCustomPyEvalSetTrace = NULL;
/*
* Cached interned string objects used for calling the profile and
* trace functions. Initialized by InternalTraceInit().
*/
static PyObject *InternalWhatstrings_37[8] = {NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL};
static int
InternalIsTraceInitialized()
{
return internalInitializeCustomPyEvalSetTrace != NULL;
}
static int
InternalTraceInit(InternalInitializeCustomPyEvalSetTrace *p_internalInitializeSettrace_37)
{
internalInitializeCustomPyEvalSetTrace = p_internalInitializeSettrace_37;
static const char * const whatnames[8] = {
"call", "exception", "line", "return",
"c_call", "c_exception", "c_return",
"opcode"
};
PyObject *name;
int i;
for (i = 0; i < 8; ++i) {
if (InternalWhatstrings_37[i] == NULL) {
name = internalInitializeCustomPyEvalSetTrace->pyUnicode_InternFromString(whatnames[i]);
if (name == NULL)
return -1;
InternalWhatstrings_37[i] = name;
}
}
return 0;
}
#endif //_PY_CUSTOM_PYEVAL_SETTRACE_COMMON_HPP_ | 1,870 | C++ | 29.177419 | 111 | 0.707487 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/pydevd_attach_to_process/common/py_settrace.hpp | #ifndef _PY_SETTRACE_HPP_
#define _PY_SETTRACE_HPP_
#include "ref_utils.hpp"
#include "py_utils.hpp"
#include "python.h"
#include "py_custom_pyeval_settrace.hpp"
#include <unordered_set>
#ifdef _WIN32
typedef HMODULE MODULE_TYPE;
#else // LINUX -----------------------------------------------------------------
typedef void* MODULE_TYPE;
typedef ssize_t SSIZE_T;
typedef unsigned int DWORD;
#endif
DWORD GetPythonThreadId(PythonVersion version, PyThreadState* curThread) {
DWORD threadId = 0;
if (PyThreadState_25_27::IsFor(version)) {
threadId = (DWORD)((PyThreadState_25_27*)curThread)->thread_id;
} else if (PyThreadState_30_33::IsFor(version)) {
threadId = (DWORD)((PyThreadState_30_33*)curThread)->thread_id;
} else if (PyThreadState_34_36::IsFor(version)) {
threadId = (DWORD)((PyThreadState_34_36*)curThread)->thread_id;
} else if (PyThreadState_37_38::IsFor(version)) {
threadId = (DWORD)((PyThreadState_37_38*)curThread)->thread_id;
} else if (PyThreadState_39::IsFor(version)) {
threadId = (DWORD)((PyThreadState_39*)curThread)->thread_id;
} else if (PyThreadState_310::IsFor(version)) {
threadId = (DWORD)((PyThreadState_310*)curThread)->thread_id;
} else if (PyThreadState_311::IsFor(version)) {
threadId = (DWORD)((PyThreadState_311*)curThread)->thread_id;
}
return threadId;
}
/**
* This function may be called to set a tracing function to existing python threads.
*/
int InternalSetSysTraceFunc(
MODULE_TYPE module,
bool isDebug,
bool showDebugInfo,
PyObjectHolder* traceFunc,
PyObjectHolder* setTraceFunc,
unsigned int threadId,
PyObjectHolder* pyNone)
{
if(showDebugInfo){
PRINT("InternalSetSysTraceFunc started.");
}
DEFINE_PROC(isInit, Py_IsInitialized*, "Py_IsInitialized", 100);
if (!isInit()) {
PRINT("Py_IsInitialized returned false.");
return 110;
}
auto version = GetPythonVersion(module);
// found initialized Python runtime, gather and check the APIs we need.
DEFINE_PROC(interpHead, PyInterpreterState_Head*, "PyInterpreterState_Head", 120);
DEFINE_PROC(gilEnsure, PyGILState_Ensure*, "PyGILState_Ensure", 130);
DEFINE_PROC(gilRelease, PyGILState_Release*, "PyGILState_Release", 140);
DEFINE_PROC(threadHead, PyInterpreterState_ThreadHead*, "PyInterpreterState_ThreadHead", 150);
DEFINE_PROC(threadNext, PyThreadState_Next*, "PyThreadState_Next", 160);
DEFINE_PROC(threadSwap, PyThreadState_Swap*, "PyThreadState_Swap", 170);
DEFINE_PROC(call, PyObject_CallFunctionObjArgs*, "PyObject_CallFunctionObjArgs", 180);
PyInt_FromLong* intFromLong;
if (version >= PythonVersion_30) {
DEFINE_PROC(intFromLongPy3, PyInt_FromLong*, "PyLong_FromLong", 190);
intFromLong = intFromLongPy3;
} else {
DEFINE_PROC(intFromLongPy2, PyInt_FromLong*, "PyInt_FromLong", 200);
intFromLong = intFromLongPy2;
}
DEFINE_PROC(pyGetAttr, PyObject_GetAttrString*, "PyObject_GetAttrString", 250);
DEFINE_PROC(pyHasAttr, PyObject_HasAttrString*, "PyObject_HasAttrString", 260);
DEFINE_PROC_NO_CHECK(PyCFrame_Type, PyTypeObject*, "PyCFrame_Type", 300); // optional
DEFINE_PROC_NO_CHECK(curPythonThread, PyThreadState**, "_PyThreadState_Current", 310); // optional
DEFINE_PROC_NO_CHECK(getPythonThread, _PyThreadState_UncheckedGet*, "_PyThreadState_UncheckedGet", 320); // optional
if (curPythonThread == nullptr && getPythonThread == nullptr) {
// we're missing some APIs, we cannot attach.
PRINT("Error, missing Python threading API!!");
return 330;
}
auto head = interpHead();
if (head == nullptr) {
// this interpreter is loaded but not initialized.
PRINT("Interpreter not initialized!");
return 340;
}
GilHolder gilLock(gilEnsure, gilRelease); // acquire and hold the GIL until done...
int retVal = 0;
// find what index is holding onto the thread state...
auto curPyThread = getPythonThread ? getPythonThread() : *curPythonThread;
if(curPyThread == nullptr){
PRINT("Getting the current python thread returned nullptr.");
return 345;
}
// We do what PyEval_SetTrace does, but for any target thread.
PyUnicode_InternFromString* pyUnicode_InternFromString;
if (version >= PythonVersion_30) {
DEFINE_PROC(unicodeFromString, PyUnicode_InternFromString*, "PyUnicode_InternFromString", 520);
pyUnicode_InternFromString = unicodeFromString;
} else {
DEFINE_PROC(stringFromString, PyUnicode_InternFromString*, "PyString_InternFromString", 525);
pyUnicode_InternFromString = stringFromString;
}
DEFINE_PROC_NO_CHECK(pyObject_FastCallDict, _PyObject_FastCallDict*, "_PyObject_FastCallDict", 530);
DEFINE_PROC(pyTuple_New, PyTuple_New*, "PyTuple_New", 531);
DEFINE_PROC(pyEval_CallObjectWithKeywords, PyEval_CallObjectWithKeywords*, "PyEval_CallObjectWithKeywords", 532);
if(pyObject_FastCallDict == nullptr) {
DEFINE_PROC_NO_CHECK(pyObject_FastCallDict, _PyObject_FastCallDict*, "PyObject_VectorcallDict", 533);
}
if(pyObject_FastCallDict == nullptr) {
// we have to use PyObject_FastCallDictCustom for older versions of CPython (pre 3.7).
pyObject_FastCallDict = reinterpret_cast<_PyObject_FastCallDict*>(&PyObject_FastCallDictCustom);
}
DEFINE_PROC(pyTraceBack_Here, PyTraceBack_Here*, "PyTraceBack_Here", 540);
DEFINE_PROC(pyEval_SetTrace, PyEval_SetTrace*, "PyEval_SetTrace", 550);
// These are defined mostly for printing info while debugging, so, if they're not there, don't bother reporting.
DEFINE_PROC_NO_CHECK(pyObject_Repr, PyObject_Repr*, "PyObject_Repr", 551);
DEFINE_PROC_NO_CHECK(pyUnicode_AsUTF8, PyUnicode_AsUTF8*, "PyUnicode_AsUTF8", 552);
bool found = false;
for (PyThreadState* curThread = threadHead(head); curThread != nullptr; curThread = threadNext(curThread)) {
if (GetPythonThreadId(version, curThread) != threadId) {
continue;
}
found = true;
if(showDebugInfo){
printf("setting trace for thread: %d\n", threadId);
}
if(!InternalIsTraceInitialized())
{
InternalInitializeCustomPyEvalSetTrace *internalInitializeCustomPyEvalSetTrace = new InternalInitializeCustomPyEvalSetTrace();
IncRef(pyNone->ToPython());
internalInitializeCustomPyEvalSetTrace->pyNone = pyNone->ToPython();
internalInitializeCustomPyEvalSetTrace->pyUnicode_InternFromString = pyUnicode_InternFromString;
internalInitializeCustomPyEvalSetTrace->pyObject_FastCallDict = pyObject_FastCallDict;
internalInitializeCustomPyEvalSetTrace->isDebug = isDebug;
internalInitializeCustomPyEvalSetTrace->pyTraceBack_Here = pyTraceBack_Here;
internalInitializeCustomPyEvalSetTrace->pyEval_SetTrace = pyEval_SetTrace;
internalInitializeCustomPyEvalSetTrace->pyTuple_New = pyTuple_New;
internalInitializeCustomPyEvalSetTrace->pyEval_CallObjectWithKeywords = pyEval_CallObjectWithKeywords;
internalInitializeCustomPyEvalSetTrace->pyObject_Repr = pyObject_Repr;
internalInitializeCustomPyEvalSetTrace->pyUnicode_AsUTF8 = pyUnicode_AsUTF8;
InternalTraceInit(internalInitializeCustomPyEvalSetTrace);
}
InternalPySetTrace(curThread, traceFunc, isDebug, version);
break;
}
if(!found) {
retVal = 501;
}
return retVal;
}
#endif // _PY_SETTRACE_HPP_
| 7,763 | C++ | 39.020618 | 138 | 0.680149 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/pydevd_attach_to_process/common/py_utils.hpp | #ifndef _PY_UTILS_HPP_
#define _PY_UTILS_HPP_
typedef int (Py_IsInitialized)();
typedef PyInterpreterState* (PyInterpreterState_Head)();
typedef enum { PyGILState_LOCKED, PyGILState_UNLOCKED } PyGILState_STATE;
typedef PyGILState_STATE(PyGILState_Ensure)();
typedef void (PyGILState_Release)(PyGILState_STATE);
typedef int (PyRun_SimpleString)(const char *command);
typedef PyThreadState* (PyInterpreterState_ThreadHead)(PyInterpreterState* interp);
typedef PyThreadState* (PyThreadState_Next)(PyThreadState *tstate);
typedef PyThreadState* (PyThreadState_Swap)(PyThreadState *tstate);
typedef PyThreadState* (_PyThreadState_UncheckedGet)();
typedef PyObject* (PyObject_CallFunctionObjArgs)(PyObject *callable, ...); // call w/ varargs, last arg should be nullptr
typedef PyObject* (PyInt_FromLong)(long);
typedef PyObject* (PyErr_Occurred)();
typedef void (PyErr_Fetch)(PyObject **ptype, PyObject **pvalue, PyObject **ptraceback);
typedef void (PyErr_Restore)(PyObject *type, PyObject *value, PyObject *traceback);
typedef PyObject* (PyImport_ImportModule) (const char *name);
typedef PyObject* (PyImport_ImportModuleNoBlock) (const char *name);
typedef PyObject* (PyObject_GetAttrString)(PyObject *o, const char *attr_name);
typedef PyObject* (PyObject_HasAttrString)(PyObject *o, const char *attr_name);
typedef void* (PyThread_get_key_value)(int);
typedef int (PyThread_set_key_value)(int, void*);
typedef void (PyThread_delete_key_value)(int);
typedef int (PyObject_Not) (PyObject *o);
typedef PyObject* (PyDict_New)();
typedef PyObject* (PyUnicode_InternFromString)(const char *u);
typedef PyObject * (_PyObject_FastCallDict)(
PyObject *callable, PyObject *const *args, Py_ssize_t nargs, PyObject *kwargs);
typedef int (PyTraceBack_Here)(PyFrameObject *frame);
typedef PyObject* PyTuple_New(Py_ssize_t len);
typedef PyObject* PyEval_CallObjectWithKeywords(PyObject *callable, PyObject *args, PyObject *kwargs);
typedef void (PyEval_SetTrace)(Py_tracefunc, PyObject *);
typedef int (*Py_tracefunc)(PyObject *, PyFrameObject *frame, int, PyObject *);
typedef int (_PyEval_SetTrace)(PyThreadState *tstate, Py_tracefunc func, PyObject *arg);
typedef PyObject* PyObject_Repr(PyObject *);
typedef const char* PyUnicode_AsUTF8(PyObject *unicode);
// holder to ensure we release the GIL even in error conditions
class GilHolder {
PyGILState_STATE _gilState;
PyGILState_Release* _release;
public:
GilHolder(PyGILState_Ensure* acquire, PyGILState_Release* release) {
_gilState = acquire();
_release = release;
}
~GilHolder() {
_release(_gilState);
}
};
#ifdef _WIN32
#define PRINT(msg) {std::cout << msg << std::endl << std::flush;}
#define DEFINE_PROC_NO_CHECK(func, funcType, funcNameStr, errorCode) \
funcType func=reinterpret_cast<funcType>(GetProcAddress(module, funcNameStr));
#define DEFINE_PROC(func, funcType, funcNameStr, errorCode) \
DEFINE_PROC_NO_CHECK(func, funcType, funcNameStr, errorCode); \
if(func == nullptr){std::cout << funcNameStr << " not found." << std::endl << std::flush; return errorCode;};
#else // LINUX -----------------------------------------------------------------
#define PRINT(msg) {printf(msg); printf("\n");}
#define CHECK_NULL(ptr, msg, errorCode) if(ptr == nullptr){printf(msg); return errorCode;}
#define DEFINE_PROC_NO_CHECK(func, funcType, funcNameStr, errorCode) \
funcType func; *(void**)(&func) = dlsym(module, funcNameStr);
#define DEFINE_PROC(func, funcType, funcNameStr, errorCode) \
DEFINE_PROC_NO_CHECK(func, funcType, funcNameStr, errorCode); \
if(func == nullptr){printf(funcNameStr); printf(" not found.\n"); return errorCode;};
#endif //_WIN32
#endif //_PY_UTILS_HPP_ | 3,811 | C++ | 44.380952 | 129 | 0.706639 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/pydevd_attach_to_process/common/ref_utils.hpp | #ifndef _REF_UTILS_HPP_
#define _REF_UTILS_HPP_
PyObject* GetPyObjectPointerNoDebugInfo(bool isDebug, PyObject* object) {
if (object != nullptr && isDebug) {
// debug builds have 2 extra pointers at the front that we don't care about
return (PyObject*)((size_t*)object + 2);
}
return object;
}
void DecRef(PyObject* object, bool isDebug) {
auto noDebug = GetPyObjectPointerNoDebugInfo(isDebug, object);
if (noDebug != nullptr && --noDebug->ob_refcnt == 0) {
((PyTypeObject*)GetPyObjectPointerNoDebugInfo(isDebug, noDebug->ob_type))->tp_dealloc(object);
}
}
void IncRef(PyObject* object) {
object->ob_refcnt++;
}
class PyObjectHolder {
private:
PyObject* _object;
public:
bool _isDebug;
PyObjectHolder(bool isDebug) {
_object = nullptr;
_isDebug = isDebug;
}
PyObjectHolder(bool isDebug, PyObject *object) {
_object = object;
_isDebug = isDebug;
};
PyObjectHolder(bool isDebug, PyObject *object, bool addRef) {
_object = object;
_isDebug = isDebug;
if (_object != nullptr && addRef) {
GetPyObjectPointerNoDebugInfo(_isDebug, _object)->ob_refcnt++;
}
};
PyObject* ToPython() {
return _object;
}
~PyObjectHolder() {
DecRef(_object, _isDebug);
}
PyObject* operator* () {
return GetPyObjectPointerNoDebugInfo(_isDebug, _object);
}
};
#endif //_REF_UTILS_HPP_ | 1,475 | C++ | 22.428571 | 102 | 0.616271 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/pydevd_attach_to_process/common/py_custom_pyeval_settrace_311.hpp | #ifndef _PY_CUSTOM_PYEVAL_SETTRACE_311_HPP_
#define _PY_CUSTOM_PYEVAL_SETTRACE_311_HPP_
#include "python.h"
#include "py_utils.hpp"
static PyObject *
InternalCallTrampoline311(PyObject* callback,
PyFrameObject311 *frame, int what, PyObject *arg)
{
PyObject *result;
PyObject *stack[3];
// Note: this is commented out from CPython (we shouldn't need it and it adds a reasonable overhead).
// if (PyFrame_FastToLocalsWithError(frame) < 0) {
// return NULL;
// }
//
stack[0] = (PyObject *)frame;
stack[1] = InternalWhatstrings_37[what];
stack[2] = (arg != NULL) ? arg : internalInitializeCustomPyEvalSetTrace->pyNone;
// Helper to print info.
//printf("--- start\n");
//printf("%s\n", internalInitializeCustomPyEvalSetTrace->pyUnicode_AsUTF8(internalInitializeCustomPyEvalSetTrace->pyObject_Repr((PyObject *)stack[0])));
//printf("%s\n", internalInitializeCustomPyEvalSetTrace->pyUnicode_AsUTF8(internalInitializeCustomPyEvalSetTrace->pyObject_Repr((PyObject *)stack[1])));
//printf("%s\n", internalInitializeCustomPyEvalSetTrace->pyUnicode_AsUTF8(internalInitializeCustomPyEvalSetTrace->pyObject_Repr((PyObject *)stack[2])));
//printf("--- end\n");
result = internalInitializeCustomPyEvalSetTrace->pyObject_FastCallDict(callback, stack, 3, NULL);
// Note: this is commented out from CPython (we shouldn't need it and it adds a reasonable overhead).
// PyFrame_LocalsToFast(frame, 1);
if (result == NULL) {
internalInitializeCustomPyEvalSetTrace->pyTraceBack_Here(frame);
}
return result;
}
// See: static int trace_trampoline(PyObject *self, PyFrameObject *frame, int what, PyObject *arg)
// in: https://github.com/python/cpython/blob/3.11/Python/sysmodule.c
static int
InternalTraceTrampoline311(PyObject *self, PyFrameObject *frameParam,
int what, PyObject *arg)
{
PyObject *callback;
PyObject *result;
PyFrameObject311 *frame = reinterpret_cast<PyFrameObject311*>(frameParam);
if (what == PyTrace_CALL){
callback = self;
} else {
callback = frame->f_trace;
}
if (callback == NULL){
return 0;
}
result = InternalCallTrampoline311(callback, frame, what, arg);
if (result == NULL) {
// Note: calling the original sys.settrace here.
internalInitializeCustomPyEvalSetTrace->pyEval_SetTrace(NULL, NULL);
PyObject *temp_f_trace = frame->f_trace;
frame->f_trace = NULL;
if(temp_f_trace != NULL){
DecRef(temp_f_trace, internalInitializeCustomPyEvalSetTrace->isDebug);
}
return -1;
}
if (result != internalInitializeCustomPyEvalSetTrace->pyNone) {
PyObject *tmp = frame->f_trace;
frame->f_trace = result;
DecRef(tmp, internalInitializeCustomPyEvalSetTrace->isDebug);
}
else {
DecRef(result, internalInitializeCustomPyEvalSetTrace->isDebug);
}
return 0;
}
// Based on ceval.c (PyEval_SetTrace(Py_tracefunc func, PyObject *arg))
// https://github.com/python/cpython/blob/3.11/Python/ceval.c
template<typename T>
void InternalPySetTrace_Template311(T tstate, PyObjectHolder* traceFunc, bool isDebug)
{
PyObject *traceobj = tstate->c_traceobj;
PyObject *arg = traceFunc->ToPython();
IncRef(arg);
tstate->c_tracefunc = NULL;
tstate->c_traceobj = NULL;
// This is different (previously it was just: tstate->use_tracing, now
// this flag is per-frame).
int use_tracing = (tstate->c_profilefunc != NULL);
// Note: before 3.11 this was just 1 or 0, now it needs to be 255 or 0.
tstate->cframe->use_tracing = (use_tracing ? 255 : 0);
if(traceobj != NULL){
DecRef(traceobj, isDebug);
}
tstate->c_tracefunc = InternalTraceTrampoline311;
tstate->c_traceobj = arg;
/* Flag that tracing or profiling is turned on */
use_tracing = ((InternalTraceTrampoline311 != NULL)
|| (tstate->c_profilefunc != NULL));
// Note: before 3.11 this was just 1 or 0, now it needs to be 255 or 0.
tstate->cframe->use_tracing = (use_tracing ? 255 : 0);
};
#endif //_PY_CUSTOM_PYEVAL_SETTRACE_311_HPP_ | 4,269 | C++ | 34.583333 | 156 | 0.658702 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/launcher/winapi.py | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE in the project root
# for license information.
import ctypes
from ctypes.wintypes import BOOL, DWORD, HANDLE, LARGE_INTEGER, LPCSTR, UINT
from debugpy.common import log
JOBOBJECTCLASS = ctypes.c_int
LPDWORD = ctypes.POINTER(DWORD)
LPVOID = ctypes.c_void_p
SIZE_T = ctypes.c_size_t
ULONGLONG = ctypes.c_ulonglong
class IO_COUNTERS(ctypes.Structure):
_fields_ = [
("ReadOperationCount", ULONGLONG),
("WriteOperationCount", ULONGLONG),
("OtherOperationCount", ULONGLONG),
("ReadTransferCount", ULONGLONG),
("WriteTransferCount", ULONGLONG),
("OtherTransferCount", ULONGLONG),
]
class JOBOBJECT_BASIC_LIMIT_INFORMATION(ctypes.Structure):
_fields_ = [
("PerProcessUserTimeLimit", LARGE_INTEGER),
("PerJobUserTimeLimit", LARGE_INTEGER),
("LimitFlags", DWORD),
("MinimumWorkingSetSize", SIZE_T),
("MaximumWorkingSetSize", SIZE_T),
("ActiveProcessLimit", DWORD),
("Affinity", SIZE_T),
("PriorityClass", DWORD),
("SchedulingClass", DWORD),
]
class JOBOBJECT_EXTENDED_LIMIT_INFORMATION(ctypes.Structure):
_fields_ = [
("BasicLimitInformation", JOBOBJECT_BASIC_LIMIT_INFORMATION),
("IoInfo", IO_COUNTERS),
("ProcessMemoryLimit", SIZE_T),
("JobMemoryLimit", SIZE_T),
("PeakProcessMemoryUsed", SIZE_T),
("PeakJobMemoryUsed", SIZE_T),
]
JobObjectExtendedLimitInformation = JOBOBJECTCLASS(9)
JOB_OBJECT_LIMIT_BREAKAWAY_OK = 0x00000800
JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE = 0x00002000
PROCESS_TERMINATE = 0x0001
PROCESS_SET_QUOTA = 0x0100
def _errcheck(is_error_result=(lambda result: not result)):
def impl(result, func, args):
if is_error_result(result):
log.debug("{0} returned {1}", func.__name__, result)
raise ctypes.WinError()
else:
return result
return impl
kernel32 = ctypes.windll.kernel32
kernel32.AssignProcessToJobObject.errcheck = _errcheck()
kernel32.AssignProcessToJobObject.restype = BOOL
kernel32.AssignProcessToJobObject.argtypes = (HANDLE, HANDLE)
kernel32.CreateJobObjectA.errcheck = _errcheck(lambda result: result == 0)
kernel32.CreateJobObjectA.restype = HANDLE
kernel32.CreateJobObjectA.argtypes = (LPVOID, LPCSTR)
kernel32.OpenProcess.errcheck = _errcheck(lambda result: result == 0)
kernel32.OpenProcess.restype = HANDLE
kernel32.OpenProcess.argtypes = (DWORD, BOOL, DWORD)
kernel32.QueryInformationJobObject.errcheck = _errcheck()
kernel32.QueryInformationJobObject.restype = BOOL
kernel32.QueryInformationJobObject.argtypes = (
HANDLE,
JOBOBJECTCLASS,
LPVOID,
DWORD,
LPDWORD,
)
kernel32.SetInformationJobObject.errcheck = _errcheck()
kernel32.SetInformationJobObject.restype = BOOL
kernel32.SetInformationJobObject.argtypes = (HANDLE, JOBOBJECTCLASS, LPVOID, DWORD)
kernel32.TerminateJobObject.errcheck = _errcheck()
kernel32.TerminateJobObject.restype = BOOL
kernel32.TerminateJobObject.argtypes = (HANDLE, UINT)
| 3,129 | Python | 28.809524 | 83 | 0.711409 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/launcher/handlers.py | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE in the project root
# for license information.
import os
import sys
import debugpy
from debugpy import launcher
from debugpy.common import json
from debugpy.launcher import debuggee
def launch_request(request):
debug_options = set(request("debugOptions", json.array(str)))
# Handling of properties that can also be specified as legacy "debugOptions" flags.
# If property is explicitly set to false, but the flag is in "debugOptions", treat
# it as an error. Returns None if the property wasn't explicitly set either way.
def property_or_debug_option(prop_name, flag_name):
assert prop_name[0].islower() and flag_name[0].isupper()
value = request(prop_name, bool, optional=True)
if value == ():
value = None
if flag_name in debug_options:
if value is False:
raise request.isnt_valid(
'{0}:false and "debugOptions":[{1}] are mutually exclusive',
json.repr(prop_name),
json.repr(flag_name),
)
value = True
return value
python = request("python", json.array(str, size=(1,)))
cmdline = list(python)
if not request("noDebug", json.default(False)):
# see https://github.com/microsoft/debugpy/issues/861
if sys.version_info[:2] >= (3, 11):
cmdline += ["-X", "frozen_modules=off"]
port = request("port", int)
cmdline += [
os.path.dirname(debugpy.__file__),
"--connect",
launcher.adapter_host + ":" + str(port),
]
if not request("subProcess", True):
cmdline += ["--configure-subProcess", "False"]
qt_mode = request(
"qt",
json.enum(
"none", "auto", "pyside", "pyside2", "pyqt4", "pyqt5", optional=True
),
)
cmdline += ["--configure-qt", qt_mode]
adapter_access_token = request("adapterAccessToken", str, optional=True)
if adapter_access_token != ():
cmdline += ["--adapter-access-token", adapter_access_token]
debugpy_args = request("debugpyArgs", json.array(str))
cmdline += debugpy_args
# Use the copy of arguments that was propagated via the command line rather than
# "args" in the request itself, to allow for shell expansion.
cmdline += sys.argv[1:]
process_name = request("processName", sys.executable)
env = os.environ.copy()
env_changes = request("env", json.object((str, type(None))))
if sys.platform == "win32":
# Environment variables are case-insensitive on Win32, so we need to normalize
# both dicts to make sure that env vars specified in the debug configuration
# overwrite the global env vars correctly. If debug config has entries that
# differ in case only, that's an error.
env = {k.upper(): v for k, v in os.environ.items()}
new_env_changes = {}
for k, v in env_changes.items():
k_upper = k.upper()
if k_upper in new_env_changes:
if new_env_changes[k_upper] == v:
continue
else:
raise request.isnt_valid(
'Found duplicate in "env": {0}.'.format(k_upper)
)
new_env_changes[k_upper] = v
env_changes = new_env_changes
if "DEBUGPY_TEST" in env:
# If we're running as part of a debugpy test, make sure that codecov is not
# applied to the debuggee, since it will conflict with pydevd.
env.pop("COV_CORE_SOURCE", None)
env.update(env_changes)
env = {k: v for k, v in env.items() if v is not None}
if request("gevent", False):
env["GEVENT_SUPPORT"] = "True"
console = request(
"console",
json.enum(
"internalConsole", "integratedTerminal", "externalTerminal", optional=True
),
)
redirect_output = property_or_debug_option("redirectOutput", "RedirectOutput")
if redirect_output is None:
# If neither the property nor the option were specified explicitly, choose
# the default depending on console type - "internalConsole" needs it to
# provide any output at all, but it's unnecessary for the terminals.
redirect_output = console == "internalConsole"
if redirect_output:
# sys.stdout buffering must be disabled - otherwise we won't see the output
# at all until the buffer fills up.
env["PYTHONUNBUFFERED"] = "1"
# Force UTF-8 output to minimize data loss due to re-encoding.
env["PYTHONIOENCODING"] = "utf-8"
if property_or_debug_option("waitOnNormalExit", "WaitOnNormalExit"):
if console == "internalConsole":
raise request.isnt_valid(
'"waitOnNormalExit" is not supported for "console":"internalConsole"'
)
debuggee.wait_on_exit_predicates.append(lambda code: code == 0)
if property_or_debug_option("waitOnAbnormalExit", "WaitOnAbnormalExit"):
if console == "internalConsole":
raise request.isnt_valid(
'"waitOnAbnormalExit" is not supported for "console":"internalConsole"'
)
debuggee.wait_on_exit_predicates.append(lambda code: code != 0)
debuggee.spawn(process_name, cmdline, env, redirect_output)
return {}
def terminate_request(request):
del debuggee.wait_on_exit_predicates[:]
request.respond({})
debuggee.kill()
def disconnect():
del debuggee.wait_on_exit_predicates[:]
debuggee.kill()
| 5,728 | Python | 36.444444 | 87 | 0.606145 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/launcher/__init__.py | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE in the project root
# for license information.
__all__ = []
adapter_host = None
"""The host on which adapter is running and listening for incoming connections
from the launcher and the servers."""
channel = None
"""DAP message channel to the adapter."""
def connect(host, port):
from debugpy.common import log, messaging, sockets
from debugpy.launcher import handlers
global channel, adapter_host
assert channel is None
assert adapter_host is None
log.info("Connecting to adapter at {0}:{1}", host, port)
sock = sockets.create_client()
sock.connect((host, port))
adapter_host = host
stream = messaging.JsonIOStream.from_socket(sock, "Adapter")
channel = messaging.JsonMessageChannel(stream, handlers=handlers)
channel.start()
| 890 | Python | 25.999999 | 78 | 0.719101 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/launcher/output.py | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE in the project root
# for license information.
import codecs
import os
import threading
from debugpy import launcher
from debugpy.common import log
class CaptureOutput(object):
"""Captures output from the specified file descriptor, and tees it into another
file descriptor while generating DAP "output" events for it.
"""
instances = {}
"""Keys are output categories, values are CaptureOutput instances."""
def __init__(self, whose, category, fd, stream):
assert category not in self.instances
self.instances[category] = self
log.info("Capturing {0} of {1}.", category, whose)
self.category = category
self._whose = whose
self._fd = fd
self._decoder = codecs.getincrementaldecoder("utf-8")(errors="surrogateescape")
if stream is None:
# Can happen if running under pythonw.exe.
self._stream = None
else:
self._stream = stream.buffer
encoding = stream.encoding
if encoding is None or encoding == "cp65001":
encoding = "utf-8"
try:
self._encode = codecs.getencoder(encoding)
except Exception:
log.swallow_exception(
"Unsupported {0} encoding {1!r}; falling back to UTF-8.",
category,
encoding,
level="warning",
)
self._encode = codecs.getencoder("utf-8")
else:
log.info("Using encoding {0!r} for {1}", encoding, category)
self._worker_thread = threading.Thread(target=self._worker, name=category)
self._worker_thread.start()
def __del__(self):
fd = self._fd
if fd is not None:
try:
os.close(fd)
except Exception:
pass
def _worker(self):
while self._fd is not None:
try:
s = os.read(self._fd, 0x1000)
except Exception:
break
if not len(s):
break
self._process_chunk(s)
# Flush any remaining data in the incremental decoder.
self._process_chunk(b"", final=True)
def _process_chunk(self, s, final=False):
s = self._decoder.decode(s, final=final)
if len(s) == 0:
return
try:
launcher.channel.send_event(
"output", {"category": self.category, "output": s.replace("\r\n", "\n")}
)
except Exception:
pass # channel to adapter is already closed
if self._stream is None:
return
try:
s, _ = self._encode(s, "surrogateescape")
size = len(s)
i = 0
while i < size:
written = self._stream.write(s[i:])
self._stream.flush()
if written == 0:
# This means that the output stream was closed from the other end.
# Do the same to the debuggee, so that it knows as well.
os.close(self._fd)
self._fd = None
break
i += written
except Exception:
log.swallow_exception("Error printing {0!r} to {1}", s, self.category)
def wait_for_remaining_output():
"""Waits for all remaining output to be captured and propagated."""
for category, instance in CaptureOutput.instances.items():
log.info("Waiting for remaining {0} of {1}.", category, instance._whose)
instance._worker_thread.join()
| 3,748 | Python | 31.885965 | 88 | 0.540822 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/launcher/__main__.py | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE in the project root
# for license information.
__all__ = ["main"]
import locale
import signal
import sys
# WARNING: debugpy and submodules must not be imported on top level in this module,
# and should be imported locally inside main() instead.
def main():
from debugpy import launcher
from debugpy.common import log
from debugpy.launcher import debuggee
log.to_file(prefix="debugpy.launcher")
log.describe_environment("debugpy.launcher startup environment:")
if sys.platform == "win32":
# For windows, disable exceptions on Ctrl+C - we want to allow the debuggee
# process to handle these, or not, as it sees fit. If the debuggee exits
# on Ctrl+C, the launcher will also exit, so it doesn't need to observe
# the signal directly.
signal.signal(signal.SIGINT, signal.SIG_IGN)
# Everything before "--" is command line arguments for the launcher itself,
# and everything after "--" is command line arguments for the debuggee.
log.info("sys.argv before parsing: {0}", sys.argv)
sep = sys.argv.index("--")
launcher_argv = sys.argv[1:sep]
sys.argv[:] = [sys.argv[0]] + sys.argv[sep + 1 :]
log.info("sys.argv after patching: {0}", sys.argv)
# The first argument specifies the host/port on which the adapter is waiting
# for launcher to connect. It's either host:port, or just port.
adapter = launcher_argv[0]
host, sep, port = adapter.partition(":")
if not sep:
host = "127.0.0.1"
port = adapter
port = int(port)
launcher.connect(host, port)
launcher.channel.wait()
if debuggee.process is not None:
sys.exit(debuggee.process.returncode)
if __name__ == "__main__":
# debugpy can also be invoked directly rather than via -m. In this case, the first
# entry on sys.path is the one added automatically by Python for the directory
# containing this file. This means that import debugpy will not work, since we need
# the parent directory of debugpy/ to be in sys.path, rather than debugpy/launcher/.
#
# The other issue is that many other absolute imports will break, because they
# will be resolved relative to debugpy/launcher/ - e.g. `import state` will then try
# to import debugpy/launcher/state.py.
#
# To fix both, we need to replace the automatically added entry such that it points
# at parent directory of debugpy/ instead of debugpy/launcher, import debugpy with that
# in sys.path, and then remove the first entry entry altogether, so that it doesn't
# affect any further imports we might do. For example, suppose the user did:
#
# python /foo/bar/debugpy/launcher ...
#
# At the beginning of this script, sys.path will contain "/foo/bar/debugpy/launcher"
# as the first entry. What we want is to replace it with "/foo/bar', then import
# debugpy with that in effect, and then remove the replaced entry before any more
# code runs. The imported debugpy module will remain in sys.modules, and thus all
# future imports of it or its submodules will resolve accordingly.
if "debugpy" not in sys.modules:
# Do not use dirname() to walk up - this can be a relative path, e.g. ".".
sys.path[0] = sys.path[0] + "/../../"
__import__("debugpy")
del sys.path[0]
# Apply OS-global and user-specific locale settings.
try:
locale.setlocale(locale.LC_ALL, "")
except Exception:
# On POSIX, locale is set via environment variables, and this can fail if
# those variables reference a non-existing locale. Ignore and continue using
# the default "C" locale if so.
pass
main()
| 3,812 | Python | 40.445652 | 91 | 0.678122 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/launcher/debuggee.py | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE in the project root
# for license information.
import atexit
import ctypes
import os
import signal
import struct
import subprocess
import sys
import threading
from debugpy import launcher
from debugpy.common import log, messaging
from debugpy.launcher import output
if sys.platform == "win32":
from debugpy.launcher import winapi
process = None
"""subprocess.Popen instance for the debuggee process."""
job_handle = None
"""On Windows, the handle for the job object to which the debuggee is assigned."""
wait_on_exit_predicates = []
"""List of functions that determine whether to pause after debuggee process exits.
Every function is invoked with exit code as the argument. If any of the functions
returns True, the launcher pauses and waits for user input before exiting.
"""
def describe():
return f"Debuggee[PID={process.pid}]"
def spawn(process_name, cmdline, env, redirect_output):
log.info(
"Spawning debuggee process:\n\n"
"Command line: {0!r}\n\n"
"Environment variables: {1!r}\n\n",
cmdline,
env,
)
close_fds = set()
try:
if redirect_output:
# subprocess.PIPE behavior can vary substantially depending on Python version
# and platform; using our own pipes keeps it simple, predictable, and fast.
stdout_r, stdout_w = os.pipe()
stderr_r, stderr_w = os.pipe()
close_fds |= {stdout_r, stdout_w, stderr_r, stderr_w}
kwargs = dict(stdout=stdout_w, stderr=stderr_w)
else:
kwargs = {}
if sys.platform != "win32":
def preexec_fn():
try:
# Start the debuggee in a new process group, so that the launcher can
# kill the entire process tree later.
os.setpgrp()
# Make the new process group the foreground group in its session, so
# that it can interact with the terminal. The debuggee will receive
# SIGTTOU when tcsetpgrp() is called, and must ignore it.
old_handler = signal.signal(signal.SIGTTOU, signal.SIG_IGN)
try:
tty = os.open("/dev/tty", os.O_RDWR)
try:
os.tcsetpgrp(tty, os.getpgrp())
finally:
os.close(tty)
finally:
signal.signal(signal.SIGTTOU, old_handler)
except Exception:
# Not an error - /dev/tty doesn't work when there's no terminal.
log.swallow_exception(
"Failed to set up process group", level="info"
)
kwargs.update(preexec_fn=preexec_fn)
try:
global process
process = subprocess.Popen(cmdline, env=env, bufsize=0, **kwargs)
except Exception as exc:
raise messaging.MessageHandlingError(
"Couldn't spawn debuggee: {0}\n\nCommand line:{1!r}".format(
exc, cmdline
)
)
log.info("Spawned {0}.", describe())
if sys.platform == "win32":
# Assign the debuggee to a new job object, so that the launcher can kill
# the entire process tree later.
try:
global job_handle
job_handle = winapi.kernel32.CreateJobObjectA(None, None)
job_info = winapi.JOBOBJECT_EXTENDED_LIMIT_INFORMATION()
job_info_size = winapi.DWORD(ctypes.sizeof(job_info))
winapi.kernel32.QueryInformationJobObject(
job_handle,
winapi.JobObjectExtendedLimitInformation,
ctypes.pointer(job_info),
job_info_size,
ctypes.pointer(job_info_size),
)
job_info.BasicLimitInformation.LimitFlags |= (
# Ensure that the job will be terminated by the OS once the
# launcher exits, even if it doesn't terminate the job explicitly.
winapi.JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE
|
# Allow the debuggee to create its own jobs unrelated to ours.
winapi.JOB_OBJECT_LIMIT_BREAKAWAY_OK
)
winapi.kernel32.SetInformationJobObject(
job_handle,
winapi.JobObjectExtendedLimitInformation,
ctypes.pointer(job_info),
job_info_size,
)
process_handle = winapi.kernel32.OpenProcess(
winapi.PROCESS_TERMINATE | winapi.PROCESS_SET_QUOTA,
False,
process.pid,
)
winapi.kernel32.AssignProcessToJobObject(job_handle, process_handle)
except Exception:
log.swallow_exception("Failed to set up job object", level="warning")
atexit.register(kill)
launcher.channel.send_event(
"process",
{
"startMethod": "launch",
"isLocalProcess": True,
"systemProcessId": process.pid,
"name": process_name,
"pointerSize": struct.calcsize("P") * 8,
},
)
if redirect_output:
for category, fd, tee in [
("stdout", stdout_r, sys.stdout),
("stderr", stderr_r, sys.stderr),
]:
output.CaptureOutput(describe(), category, fd, tee)
close_fds.remove(fd)
wait_thread = threading.Thread(target=wait_for_exit, name="wait_for_exit()")
wait_thread.daemon = True
wait_thread.start()
finally:
for fd in close_fds:
try:
os.close(fd)
except Exception:
log.swallow_exception(level="warning")
def kill():
if process is None:
return
try:
if process.poll() is None:
log.info("Killing {0}", describe())
# Clean up the process tree
if sys.platform == "win32":
# On Windows, kill the job object.
winapi.kernel32.TerminateJobObject(job_handle, 0)
else:
# On POSIX, kill the debuggee's process group.
os.killpg(process.pid, signal.SIGKILL)
except Exception:
log.swallow_exception("Failed to kill {0}", describe())
def wait_for_exit():
try:
code = process.wait()
if sys.platform != "win32" and code < 0:
# On POSIX, if the process was terminated by a signal, Popen will use
# a negative returncode to indicate that - but the actual exit code of
# the process is always an unsigned number, and can be determined by
# taking the lowest 8 bits of that negative returncode.
code &= 0xFF
except Exception:
log.swallow_exception("Couldn't determine process exit code")
code = -1
log.info("{0} exited with code {1}", describe(), code)
output.wait_for_remaining_output()
# Determine whether we should wait or not before sending "exited", so that any
# follow-up "terminate" requests don't affect the predicates.
should_wait = any(pred(code) for pred in wait_on_exit_predicates)
try:
launcher.channel.send_event("exited", {"exitCode": code})
except Exception:
pass
if should_wait:
_wait_for_user_input()
try:
launcher.channel.send_event("terminated")
except Exception:
pass
def _wait_for_user_input():
if sys.stdout and sys.stdin and sys.stdin.isatty():
from debugpy.common import log
try:
import msvcrt
except ImportError:
can_getch = False
else:
can_getch = True
if can_getch:
log.debug("msvcrt available - waiting for user input via getch()")
sys.stdout.write("Press any key to continue . . . ")
sys.stdout.flush()
msvcrt.getch()
else:
log.debug("msvcrt not available - waiting for user input via read()")
sys.stdout.write("Press Enter to continue . . . ")
sys.stdout.flush()
sys.stdin.read(1)
| 8,574 | Python | 33.3 | 89 | 0.553651 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/adapter/clients.py | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE in the project root
# for license information.
from __future__ import annotations
import atexit
import os
import sys
import debugpy
from debugpy import adapter, common, launcher
from debugpy.common import json, log, messaging, sockets
from debugpy.adapter import components, servers, sessions
class Client(components.Component):
"""Handles the client side of a debug session."""
message_handler = components.Component.message_handler
known_subprocesses: set[servers.Connection]
"""Server connections to subprocesses that this client has been made aware of.
"""
class Capabilities(components.Capabilities):
PROPERTIES = {
"supportsVariableType": False,
"supportsVariablePaging": False,
"supportsRunInTerminalRequest": False,
"supportsMemoryReferences": False,
"supportsArgsCanBeInterpretedByShell": False,
}
class Expectations(components.Capabilities):
PROPERTIES = {
"locale": "en-US",
"linesStartAt1": True,
"columnsStartAt1": True,
"pathFormat": json.enum("path", optional=True), # we don't support "uri"
}
def __init__(self, sock):
if sock == "stdio":
log.info("Connecting to client over stdio...", self)
stream = messaging.JsonIOStream.from_stdio()
# Make sure that nothing else tries to interfere with the stdio streams
# that are going to be used for DAP communication from now on.
sys.stdin = stdin = open(os.devnull, "r")
atexit.register(stdin.close)
sys.stdout = stdout = open(os.devnull, "w")
atexit.register(stdout.close)
else:
stream = messaging.JsonIOStream.from_socket(sock)
with sessions.Session() as session:
super().__init__(session, stream)
self.client_id = None
"""ID of the connecting client. This can be 'test' while running tests."""
self.has_started = False
"""Whether the "launch" or "attach" request was received from the client, and
fully handled.
"""
self.start_request = None
"""The "launch" or "attach" request as received from the client.
"""
self._initialize_request = None
"""The "initialize" request as received from the client, to propagate to the
server later."""
self._deferred_events = []
"""Deferred events from the launcher and the server that must be propagated
only if and when the "launch" or "attach" response is sent.
"""
self._forward_terminate_request = False
self.known_subprocesses = set()
session.client = self
session.register()
# For the transition period, send the telemetry events with both old and new
# name. The old one should be removed once the new one lights up.
self.channel.send_event(
"output",
{
"category": "telemetry",
"output": "ptvsd",
"data": {"packageVersion": debugpy.__version__},
},
)
self.channel.send_event(
"output",
{
"category": "telemetry",
"output": "debugpy",
"data": {"packageVersion": debugpy.__version__},
},
)
def propagate_after_start(self, event):
# pydevd starts sending events as soon as we connect, but the client doesn't
# expect to see any until it receives the response to "launch" or "attach"
# request. If client is not ready yet, save the event instead of propagating
# it immediately.
if self._deferred_events is not None:
self._deferred_events.append(event)
log.debug("Propagation deferred.")
else:
self.client.channel.propagate(event)
def _propagate_deferred_events(self):
log.debug("Propagating deferred events to {0}...", self.client)
for event in self._deferred_events:
log.debug("Propagating deferred {0}", event.describe())
self.client.channel.propagate(event)
log.info("All deferred events propagated to {0}.", self.client)
self._deferred_events = None
# Generic event handler. There are no specific handlers for client events, because
# there are no events from the client in DAP - but we propagate them if we can, in
# case some events appear in future protocol versions.
@message_handler
def event(self, event):
if self.server:
self.server.channel.propagate(event)
# Generic request handler, used if there's no specific handler below.
@message_handler
def request(self, request):
return self.server.channel.delegate(request)
@message_handler
def initialize_request(self, request):
if self._initialize_request is not None:
raise request.isnt_valid("Session is already initialized")
self.client_id = request("clientID", "")
self.capabilities = self.Capabilities(self, request)
self.expectations = self.Expectations(self, request)
self._initialize_request = request
exception_breakpoint_filters = [
{
"filter": "raised",
"label": "Raised Exceptions",
"default": False,
"description": "Break whenever any exception is raised.",
},
{
"filter": "uncaught",
"label": "Uncaught Exceptions",
"default": True,
"description": "Break when the process is exiting due to unhandled exception.",
},
{
"filter": "userUnhandled",
"label": "User Uncaught Exceptions",
"default": False,
"description": "Break when exception escapes into library code.",
},
]
return {
"supportsCompletionsRequest": True,
"supportsConditionalBreakpoints": True,
"supportsConfigurationDoneRequest": True,
"supportsDebuggerProperties": True,
"supportsDelayedStackTraceLoading": True,
"supportsEvaluateForHovers": True,
"supportsExceptionInfoRequest": True,
"supportsExceptionOptions": True,
"supportsFunctionBreakpoints": True,
"supportsHitConditionalBreakpoints": True,
"supportsLogPoints": True,
"supportsModulesRequest": True,
"supportsSetExpression": True,
"supportsSetVariable": True,
"supportsValueFormattingOptions": True,
"supportsTerminateRequest": True,
"supportsGotoTargetsRequest": True,
"supportsClipboardContext": True,
"exceptionBreakpointFilters": exception_breakpoint_filters,
"supportsStepInTargetsRequest": True,
}
# Common code for "launch" and "attach" request handlers.
#
# See https://github.com/microsoft/vscode/issues/4902#issuecomment-368583522
# for the sequence of request and events necessary to orchestrate the start.
def _start_message_handler(f):
@components.Component.message_handler
def handle(self, request):
assert request.is_request("launch", "attach")
if self._initialize_request is None:
raise request.isnt_valid("Session is not initialized yet")
if self.launcher or self.server:
raise request.isnt_valid("Session is already started")
self.session.no_debug = request("noDebug", json.default(False))
if self.session.no_debug:
servers.dont_wait_for_first_connection()
self.session.debug_options = debug_options = set(
request("debugOptions", json.array(str))
)
f(self, request)
if request.response is not None:
return
if self.server:
self.server.initialize(self._initialize_request)
self._initialize_request = None
arguments = request.arguments
if self.launcher:
redirecting = arguments.get("console") == "internalConsole"
if "RedirectOutput" in debug_options:
# The launcher is doing output redirection, so we don't need the
# server to do it, as well.
arguments = dict(arguments)
arguments["debugOptions"] = list(
debug_options - {"RedirectOutput"}
)
redirecting = True
if arguments.get("redirectOutput"):
arguments = dict(arguments)
del arguments["redirectOutput"]
redirecting = True
arguments["isOutputRedirected"] = redirecting
# pydevd doesn't send "initialized", and responds to the start request
# immediately, without waiting for "configurationDone". If it changes
# to conform to the DAP spec, we'll need to defer waiting for response.
try:
self.server.channel.request(request.command, arguments)
except messaging.NoMoreMessages:
# Server closed connection before we could receive the response to
# "attach" or "launch" - this can happen when debuggee exits shortly
# after starting. It's not an error, but we can't do anything useful
# here at this point, either, so just bail out.
request.respond({})
self.session.finalize(
"{0} disconnected before responding to {1}".format(
self.server,
json.repr(request.command),
)
)
return
except messaging.MessageHandlingError as exc:
exc.propagate(request)
if self.session.no_debug:
self.start_request = request
self.has_started = True
request.respond({})
self._propagate_deferred_events()
return
# Let the client know that it can begin configuring the adapter.
self.channel.send_event("initialized")
self.start_request = request
return messaging.NO_RESPONSE # will respond on "configurationDone"
return handle
@_start_message_handler
def launch_request(self, request):
from debugpy.adapter import launchers
if self.session.id != 1 or len(servers.connections()):
raise request.cant_handle('"attach" expected')
debug_options = set(request("debugOptions", json.array(str)))
# Handling of properties that can also be specified as legacy "debugOptions" flags.
# If property is explicitly set to false, but the flag is in "debugOptions", treat
# it as an error. Returns None if the property wasn't explicitly set either way.
def property_or_debug_option(prop_name, flag_name):
assert prop_name[0].islower() and flag_name[0].isupper()
value = request(prop_name, bool, optional=True)
if value == ():
value = None
if flag_name in debug_options:
if value is False:
raise request.isnt_valid(
'{0}:false and "debugOptions":[{1}] are mutually exclusive',
json.repr(prop_name),
json.repr(flag_name),
)
value = True
return value
# "pythonPath" is a deprecated legacy spelling. If "python" is missing, then try
# the alternative. But if both are missing, the error message should say "python".
python_key = "python"
if python_key in request:
if "pythonPath" in request:
raise request.isnt_valid(
'"pythonPath" is not valid if "python" is specified'
)
elif "pythonPath" in request:
python_key = "pythonPath"
python = request(python_key, json.array(str, vectorize=True, size=(0,)))
if not len(python):
python = [sys.executable]
python += request("pythonArgs", json.array(str, size=(0,)))
request.arguments["pythonArgs"] = python[1:]
request.arguments["python"] = python
launcher_python = request("debugLauncherPython", str, optional=True)
if launcher_python == ():
launcher_python = python[0]
program = module = code = ()
if "program" in request:
program = request("program", str)
args = [program]
request.arguments["processName"] = program
if "module" in request:
module = request("module", str)
args = ["-m", module]
request.arguments["processName"] = module
if "code" in request:
code = request("code", json.array(str, vectorize=True, size=(1,)))
args = ["-c", "\n".join(code)]
request.arguments["processName"] = "-c"
num_targets = len([x for x in (program, module, code) if x != ()])
if num_targets == 0:
raise request.isnt_valid(
'either "program", "module", or "code" must be specified'
)
elif num_targets != 1:
raise request.isnt_valid(
'"program", "module", and "code" are mutually exclusive'
)
console = request(
"console",
json.enum(
"internalConsole",
"integratedTerminal",
"externalTerminal",
optional=True,
),
)
console_title = request("consoleTitle", json.default("Python Debug Console"))
# Propagate "args" via CLI so that shell expansion can be applied if requested.
target_args = request("args", json.array(str, vectorize=True))
args += target_args
# If "args" was a single string rather than an array, shell expansion must be applied.
shell_expand_args = len(target_args) > 0 and isinstance(
request.arguments["args"], str
)
if shell_expand_args:
if not self.capabilities["supportsArgsCanBeInterpretedByShell"]:
raise request.isnt_valid(
'Shell expansion in "args" is not supported by the client'
)
if console == "internalConsole":
raise request.isnt_valid(
'Shell expansion in "args" is not available for "console":"internalConsole"'
)
cwd = request("cwd", str, optional=True)
if cwd == ():
# If it's not specified, but we're launching a file rather than a module,
# and the specified path has a directory in it, use that.
cwd = None if program == () else (os.path.dirname(program) or None)
sudo = bool(property_or_debug_option("sudo", "Sudo"))
if sudo and sys.platform == "win32":
raise request.cant_handle('"sudo":true is not supported on Windows.')
on_terminate = request("onTerminate", str, optional=True)
if on_terminate:
self._forward_terminate_request = on_terminate == "KeyboardInterrupt"
launcher_path = request("debugLauncherPath", os.path.dirname(launcher.__file__))
adapter_host = request("debugAdapterHost", "127.0.0.1")
try:
servers.serve(adapter_host)
except Exception as exc:
raise request.cant_handle(
"{0} couldn't create listener socket for servers: {1}",
self.session,
exc,
)
launchers.spawn_debuggee(
self.session,
request,
[launcher_python],
launcher_path,
adapter_host,
args,
shell_expand_args,
cwd,
console,
console_title,
sudo,
)
@_start_message_handler
def attach_request(self, request):
if self.session.no_debug:
raise request.isnt_valid('"noDebug" is not supported for "attach"')
host = request("host", str, optional=True)
port = request("port", int, optional=True)
listen = request("listen", dict, optional=True)
connect = request("connect", dict, optional=True)
pid = request("processId", (int, str), optional=True)
sub_pid = request("subProcessId", int, optional=True)
on_terminate = request("onTerminate", bool, optional=True)
if on_terminate:
self._forward_terminate_request = on_terminate == "KeyboardInterrupt"
if host != () or port != ():
if listen != ():
raise request.isnt_valid(
'"listen" and "host"/"port" are mutually exclusive'
)
if connect != ():
raise request.isnt_valid(
'"connect" and "host"/"port" are mutually exclusive'
)
if listen != ():
if connect != ():
raise request.isnt_valid(
'"listen" and "connect" are mutually exclusive'
)
if pid != ():
raise request.isnt_valid(
'"listen" and "processId" are mutually exclusive'
)
if sub_pid != ():
raise request.isnt_valid(
'"listen" and "subProcessId" are mutually exclusive'
)
if pid != () and sub_pid != ():
raise request.isnt_valid(
'"processId" and "subProcessId" are mutually exclusive'
)
if listen != ():
if servers.is_serving():
raise request.isnt_valid(
'Multiple concurrent "listen" sessions are not supported'
)
host = listen("host", "127.0.0.1")
port = listen("port", int)
adapter.access_token = None
host, port = servers.serve(host, port)
else:
if not servers.is_serving():
servers.serve()
host, port = servers.listener.getsockname()
# There are four distinct possibilities here.
#
# If "processId" is specified, this is attach-by-PID. We need to inject the
# debug server into the designated process, and then wait until it connects
# back to us. Since the injected server can crash, there must be a timeout.
#
# If "subProcessId" is specified, this is attach to a known subprocess, likely
# in response to a "debugpyAttach" event. If so, the debug server should be
# connected already, and thus the wait timeout is zero.
#
# If "listen" is specified, this is attach-by-socket with the server expected
# to connect to the adapter via debugpy.connect(). There is no PID known in
# advance, so just wait until the first server connection indefinitely, with
# no timeout.
#
# If "connect" is specified, this is attach-by-socket in which the server has
# spawned the adapter via debugpy.listen(). There is no PID known to the client
# in advance, but the server connection should be either be there already, or
# the server should be connecting shortly, so there must be a timeout.
#
# In the last two cases, if there's more than one server connection already,
# this is a multiprocess re-attach. The client doesn't know the PID, so we just
# connect it to the oldest server connection that we have - in most cases, it
# will be the one for the root debuggee process, but if it has exited already,
# it will be some subprocess.
if pid != ():
if not isinstance(pid, int):
try:
pid = int(pid)
except Exception:
raise request.isnt_valid('"processId" must be parseable as int')
debugpy_args = request("debugpyArgs", json.array(str))
def on_output(category, output):
self.channel.send_event(
"output",
{
"category": category,
"output": output,
},
)
try:
servers.inject(pid, debugpy_args, on_output)
except Exception as e:
log.swallow_exception()
self.session.finalize(
"Error when trying to attach to PID:\n%s" % (str(e),)
)
return
timeout = common.PROCESS_SPAWN_TIMEOUT
pred = lambda conn: conn.pid == pid
else:
if sub_pid == ():
pred = lambda conn: True
timeout = common.PROCESS_SPAWN_TIMEOUT if listen == () else None
else:
pred = lambda conn: conn.pid == sub_pid
timeout = 0
self.channel.send_event("debugpyWaitingForServer", {"host": host, "port": port})
conn = servers.wait_for_connection(self.session, pred, timeout)
if conn is None:
if sub_pid != ():
# If we can't find a matching subprocess, it's not always an error -
# it might have already exited, or didn't even get a chance to connect.
# To prevent the client from complaining, pretend that the "attach"
# request was successful, but that the session terminated immediately.
request.respond({})
self.session.finalize(
'No known subprocess with "subProcessId":{0}'.format(sub_pid)
)
return
raise request.cant_handle(
(
"Timed out waiting for debug server to connect."
if timeout
else "There is no debug server connected to this adapter."
),
sub_pid,
)
try:
conn.attach_to_session(self.session)
except ValueError:
request.cant_handle("{0} is already being debugged.", conn)
@message_handler
def configurationDone_request(self, request):
if self.start_request is None or self.has_started:
request.cant_handle(
'"configurationDone" is only allowed during handling of a "launch" '
'or an "attach" request'
)
try:
self.has_started = True
try:
result = self.server.channel.delegate(request)
except messaging.NoMoreMessages:
# Server closed connection before we could receive the response to
# "configurationDone" - this can happen when debuggee exits shortly
# after starting. It's not an error, but we can't do anything useful
# here at this point, either, so just bail out.
request.respond({})
self.start_request.respond({})
self.session.finalize(
"{0} disconnected before responding to {1}".format(
self.server,
json.repr(request.command),
)
)
return
else:
request.respond(result)
except messaging.MessageHandlingError as exc:
self.start_request.cant_handle(str(exc))
finally:
if self.start_request.response is None:
self.start_request.respond({})
self._propagate_deferred_events()
# Notify the client of any child processes of the debuggee that aren't already
# being debugged.
for conn in servers.connections():
if conn.server is None and conn.ppid == self.session.pid:
self.notify_of_subprocess(conn)
@message_handler
def evaluate_request(self, request):
propagated_request = self.server.channel.propagate(request)
def handle_response(response):
request.respond(response.body)
propagated_request.on_response(handle_response)
return messaging.NO_RESPONSE
@message_handler
def pause_request(self, request):
request.arguments["threadId"] = "*"
return self.server.channel.delegate(request)
@message_handler
def continue_request(self, request):
request.arguments["threadId"] = "*"
try:
return self.server.channel.delegate(request)
except messaging.NoMoreMessages:
# pydevd can sometimes allow the debuggee to exit before the queued
# "continue" response gets sent. Thus, a failed "continue" response
# indicating that the server disconnected should be treated as success.
return {"allThreadsContinued": True}
@message_handler
def debugpySystemInfo_request(self, request):
result = {"debugpy": {"version": debugpy.__version__}}
if self.server:
try:
pydevd_info = self.server.channel.request("pydevdSystemInfo")
except Exception:
# If the server has already disconnected, or couldn't handle it,
# report what we've got.
pass
else:
result.update(pydevd_info)
return result
@message_handler
def terminate_request(self, request):
if self._forward_terminate_request:
# According to the spec, terminate should try to do a gracefull shutdown.
# We do this in the server by interrupting the main thread with a Ctrl+C.
# To force the kill a subsequent request would do a disconnect.
#
# We only do this if the onTerminate option is set though (the default
# is a hard-kill for the process and subprocesses).
return self.server.channel.delegate(request)
self.session.finalize('client requested "terminate"', terminate_debuggee=True)
return {}
@message_handler
def disconnect_request(self, request):
terminate_debuggee = request("terminateDebuggee", bool, optional=True)
if terminate_debuggee == ():
terminate_debuggee = None
self.session.finalize('client requested "disconnect"', terminate_debuggee)
return {}
def notify_of_subprocess(self, conn):
log.info("{1} is a subprocess of {0}.", self, conn)
with self.session:
if self.start_request is None or conn in self.known_subprocesses:
return
if "processId" in self.start_request.arguments:
log.warning(
"Not reporting subprocess for {0}, because the parent process "
'was attached to using "processId" rather than "port".',
self.session,
)
return
log.info("Notifying {0} about {1}.", self, conn)
body = dict(self.start_request.arguments)
self.known_subprocesses.add(conn)
self.session.notify_changed()
for key in "processId", "listen", "preLaunchTask", "postDebugTask":
body.pop(key, None)
body["name"] = "Subprocess {0}".format(conn.pid)
body["request"] = "attach"
body["subProcessId"] = conn.pid
for key in "args", "processName", "pythonArgs":
body.pop(key, None)
host = body.pop("host", None)
port = body.pop("port", None)
if "connect" not in body:
body["connect"] = {}
if "host" not in body["connect"]:
body["connect"]["host"] = host if host is not None else "127.0.0.1"
if "port" not in body["connect"]:
if port is None:
_, port = listener.getsockname()
body["connect"]["port"] = port
self.channel.send_event("debugpyAttach", body)
def serve(host, port):
global listener
listener = sockets.serve("Client", Client, host, port)
return listener.getsockname()
def stop_serving():
try:
listener.close()
except Exception:
log.swallow_exception(level="warning")
| 29,037 | Python | 38.997245 | 96 | 0.560526 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/adapter/launchers.py | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE in the project root
# for license information.
import os
import subprocess
import sys
from debugpy import adapter, common
from debugpy.common import log, messaging, sockets
from debugpy.adapter import components, servers
class Launcher(components.Component):
"""Handles the launcher side of a debug session."""
message_handler = components.Component.message_handler
def __init__(self, session, stream):
with session:
assert not session.launcher
super().__init__(session, stream)
self.pid = None
"""Process ID of the debuggee process, as reported by the launcher."""
self.exit_code = None
"""Exit code of the debuggee process."""
session.launcher = self
@message_handler
def process_event(self, event):
self.pid = event("systemProcessId", int)
self.client.propagate_after_start(event)
@message_handler
def output_event(self, event):
self.client.propagate_after_start(event)
@message_handler
def exited_event(self, event):
self.exit_code = event("exitCode", int)
# We don't want to tell the client about this just yet, because it will then
# want to disconnect, and the launcher might still be waiting for keypress
# (if wait-on-exit was enabled). Instead, we'll report the event when we
# receive "terminated" from the launcher, right before it exits.
@message_handler
def terminated_event(self, event):
try:
self.client.channel.send_event("exited", {"exitCode": self.exit_code})
except Exception:
pass
self.channel.close()
def terminate_debuggee(self):
with self.session:
if self.exit_code is None:
try:
self.channel.request("terminate")
except Exception:
pass
def spawn_debuggee(
session,
start_request,
python,
launcher_path,
adapter_host,
args,
shell_expand_args,
cwd,
console,
console_title,
sudo,
):
# -E tells sudo to propagate environment variables to the target process - this
# is necessary for launcher to get DEBUGPY_LAUNCHER_PORT and DEBUGPY_LOG_DIR.
cmdline = ["sudo", "-E"] if sudo else []
cmdline += python
cmdline += [launcher_path]
env = {}
arguments = dict(start_request.arguments)
if not session.no_debug:
_, arguments["port"] = servers.listener.getsockname()
arguments["adapterAccessToken"] = adapter.access_token
def on_launcher_connected(sock):
listener.close()
stream = messaging.JsonIOStream.from_socket(sock)
Launcher(session, stream)
try:
listener = sockets.serve(
"Launcher", on_launcher_connected, adapter_host, backlog=1
)
except Exception as exc:
raise start_request.cant_handle(
"{0} couldn't create listener socket for launcher: {1}", session, exc
)
try:
launcher_host, launcher_port = listener.getsockname()
launcher_addr = (
launcher_port
if launcher_host == "127.0.0.1"
else f"{launcher_host}:{launcher_port}"
)
cmdline += [str(launcher_addr), "--"]
cmdline += args
if log.log_dir is not None:
env[str("DEBUGPY_LOG_DIR")] = log.log_dir
if log.stderr.levels != {"warning", "error"}:
env[str("DEBUGPY_LOG_STDERR")] = str(" ".join(log.stderr.levels))
if console == "internalConsole":
log.info("{0} spawning launcher: {1!r}", session, cmdline)
try:
# If we are talking to the client over stdio, sys.stdin and sys.stdout
# are redirected to avoid mangling the DAP message stream. Make sure
# the launcher also respects that.
subprocess.Popen(
cmdline,
cwd=cwd,
env=dict(list(os.environ.items()) + list(env.items())),
stdin=sys.stdin,
stdout=sys.stdout,
stderr=sys.stderr,
)
except Exception as exc:
raise start_request.cant_handle("Failed to spawn launcher: {0}", exc)
else:
log.info('{0} spawning launcher via "runInTerminal" request.', session)
session.client.capabilities.require("supportsRunInTerminalRequest")
kinds = {"integratedTerminal": "integrated", "externalTerminal": "external"}
request_args = {
"kind": kinds[console],
"title": console_title,
"args": cmdline,
"env": env,
}
if cwd is not None:
request_args["cwd"] = cwd
if shell_expand_args:
request_args["argsCanBeInterpretedByShell"] = True
try:
# It is unspecified whether this request receives a response immediately, or only
# after the spawned command has completed running, so do not block waiting for it.
session.client.channel.send_request("runInTerminal", request_args)
except messaging.MessageHandlingError as exc:
exc.propagate(start_request)
# If using sudo, it might prompt for password, and launcher won't start running
# until the user enters it, so don't apply timeout in that case.
if not session.wait_for(
lambda: session.launcher,
timeout=(None if sudo else common.PROCESS_SPAWN_TIMEOUT),
):
raise start_request.cant_handle("Timed out waiting for launcher to connect")
try:
session.launcher.channel.request(start_request.command, arguments)
except messaging.MessageHandlingError as exc:
exc.propagate(start_request)
if not session.wait_for(
lambda: session.launcher.pid is not None,
timeout=common.PROCESS_SPAWN_TIMEOUT,
):
raise start_request.cant_handle(
'Timed out waiting for "process" event from launcher'
)
if session.no_debug:
return
# Wait for the first incoming connection regardless of the PID - it won't
# necessarily match due to the use of stubs like py.exe or "conda run".
conn = servers.wait_for_connection(
session, lambda conn: True, timeout=common.PROCESS_SPAWN_TIMEOUT
)
if conn is None:
raise start_request.cant_handle("Timed out waiting for debuggee to spawn")
conn.attach_to_session(session)
finally:
listener.close()
| 6,864 | Python | 34.755208 | 98 | 0.594988 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/adapter/__init__.py | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE in the project root
# for license information.
from __future__ import annotations
import typing
if typing.TYPE_CHECKING:
__all__: list[str]
__all__ = []
access_token = None
"""Access token used to authenticate with this adapter."""
| 346 | Python | 22.133332 | 65 | 0.725434 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/adapter/components.py | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE in the project root
# for license information.
import functools
from debugpy.common import json, log, messaging, util
ACCEPT_CONNECTIONS_TIMEOUT = 10
class ComponentNotAvailable(Exception):
def __init__(self, type):
super().__init__(f"{type.__name__} is not available")
class Component(util.Observable):
"""A component managed by a debug adapter: client, launcher, or debug server.
Every component belongs to a Session, which is used for synchronization and
shared data.
Every component has its own message channel, and provides message handlers for
that channel. All handlers should be decorated with @Component.message_handler,
which ensures that Session is locked for the duration of the handler. Thus, only
one handler is running at any given time across all components, unless the lock
is released explicitly or via Session.wait_for().
Components report changes to their attributes to Session, allowing one component
to wait_for() a change caused by another component.
"""
def __init__(self, session, stream=None, channel=None):
assert (stream is None) ^ (channel is None)
try:
lock_held = session.lock.acquire(blocking=False)
assert lock_held, "__init__ of a Component subclass must lock its Session"
finally:
session.lock.release()
super().__init__()
self.session = session
if channel is None:
stream.name = str(self)
channel = messaging.JsonMessageChannel(stream, self)
channel.start()
else:
channel.name = channel.stream.name = str(self)
channel.handlers = self
self.channel = channel
self.is_connected = True
# Do this last to avoid triggering useless notifications for assignments above.
self.observers += [lambda *_: self.session.notify_changed()]
def __str__(self):
return f"{type(self).__name__}[{self.session.id}]"
@property
def client(self):
return self.session.client
@property
def launcher(self):
return self.session.launcher
@property
def server(self):
return self.session.server
def wait_for(self, *args, **kwargs):
return self.session.wait_for(*args, **kwargs)
@staticmethod
def message_handler(f):
"""Applied to a message handler to automatically lock and unlock the session
for its duration, and to validate the session state.
If the handler raises ComponentNotAvailable or JsonIOError, converts it to
Message.cant_handle().
"""
@functools.wraps(f)
def lock_and_handle(self, message):
try:
with self.session:
return f(self, message)
except ComponentNotAvailable as exc:
raise message.cant_handle("{0}", exc, silent=True)
except messaging.MessageHandlingError as exc:
if exc.cause is message:
raise
else:
exc.propagate(message)
except messaging.JsonIOError as exc:
raise message.cant_handle(
"{0} disconnected unexpectedly", exc.stream.name, silent=True
)
return lock_and_handle
def disconnect(self):
with self.session:
self.is_connected = False
self.session.finalize("{0} has disconnected".format(self))
def missing(session, type):
class Missing(object):
"""A dummy component that raises ComponentNotAvailable whenever some
attribute is accessed on it.
"""
__getattr__ = __setattr__ = lambda self, *_: report()
__bool__ = __nonzero__ = lambda self: False
def report():
try:
raise ComponentNotAvailable(type)
except Exception as exc:
log.reraise_exception("{0} in {1}", exc, session)
return Missing()
class Capabilities(dict):
"""A collection of feature flags for a component. Corresponds to JSON properties
in the DAP "initialize" request or response, other than those that identify the
party.
"""
PROPERTIES = {}
"""JSON property names and default values for the the capabilities represented
by instances of this class. Keys are names, and values are either default values
or validators.
If the value is callable, it must be a JSON validator; see debugpy.common.json for
details. If the value is not callable, it is as if json.default(value) validator
was used instead.
"""
def __init__(self, component, message):
"""Parses an "initialize" request or response and extracts the feature flags.
For every "X" in self.PROPERTIES, sets self["X"] to the corresponding value
from message.payload if it's present there, or to the default value otherwise.
"""
assert message.is_request("initialize") or message.is_response("initialize")
self.component = component
payload = message.payload
for name, validate in self.PROPERTIES.items():
value = payload.get(name, ())
if not callable(validate):
validate = json.default(validate)
try:
value = validate(value)
except Exception as exc:
raise message.isnt_valid("{0} {1}", json.repr(name), exc)
assert (
value != ()
), f"{validate} must provide a default value for missing properties."
self[name] = value
log.debug("{0}", self)
def __repr__(self):
return f"{type(self).__name__}: {json.repr(dict(self))}"
def require(self, *keys):
for key in keys:
if not self[key]:
raise messaging.MessageHandlingError(
f"{self.component} does not have capability {json.repr(key)}",
)
| 6,081 | Python | 32.054348 | 87 | 0.617333 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/adapter/__main__.py | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE in the project root
# for license information.
import argparse
import atexit
import codecs
import locale
import os
import sys
# WARNING: debugpy and submodules must not be imported on top level in this module,
# and should be imported locally inside main() instead.
def main(args):
# If we're talking DAP over stdio, stderr is not guaranteed to be read from,
# so disable it to avoid the pipe filling and locking up. This must be done
# as early as possible, before the logging module starts writing to it.
if args.port is None:
sys.stderr = stderr = open(os.devnull, "w")
atexit.register(stderr.close)
from debugpy import adapter
from debugpy.common import json, log, sockets
from debugpy.adapter import clients, servers, sessions
if args.for_server is not None:
if os.name == "posix":
# On POSIX, we need to leave the process group and its session, and then
# daemonize properly by double-forking (first fork already happened when
# this process was spawned).
# NOTE: if process is already the session leader, then
# setsid would fail with `operation not permitted`
if os.getsid(os.getpid()) != os.getpid():
os.setsid()
if os.fork() != 0:
sys.exit(0)
for stdio in sys.stdin, sys.stdout, sys.stderr:
if stdio is not None:
stdio.close()
if args.log_stderr:
log.stderr.levels |= set(log.LEVELS)
if args.log_dir is not None:
log.log_dir = args.log_dir
log.to_file(prefix="debugpy.adapter")
log.describe_environment("debugpy.adapter startup environment:")
servers.access_token = args.server_access_token
if args.for_server is None:
adapter.access_token = codecs.encode(os.urandom(32), "hex").decode("ascii")
endpoints = {}
try:
client_host, client_port = clients.serve(args.host, args.port)
except Exception as exc:
if args.for_server is None:
raise
endpoints = {"error": "Can't listen for client connections: " + str(exc)}
else:
endpoints["client"] = {"host": client_host, "port": client_port}
if args.for_server is not None:
try:
server_host, server_port = servers.serve()
except Exception as exc:
endpoints = {"error": "Can't listen for server connections: " + str(exc)}
else:
endpoints["server"] = {"host": server_host, "port": server_port}
log.info(
"Sending endpoints info to debug server at localhost:{0}:\n{1}",
args.for_server,
json.repr(endpoints),
)
try:
sock = sockets.create_client()
try:
sock.settimeout(None)
sock.connect(("127.0.0.1", args.for_server))
sock_io = sock.makefile("wb", 0)
try:
sock_io.write(json.dumps(endpoints).encode("utf-8"))
finally:
sock_io.close()
finally:
sockets.close_socket(sock)
except Exception:
log.reraise_exception("Error sending endpoints info to debug server:")
if "error" in endpoints:
log.error("Couldn't set up endpoints; exiting.")
sys.exit(1)
listener_file = os.getenv("DEBUGPY_ADAPTER_ENDPOINTS")
if listener_file is not None:
log.info(
"Writing endpoints info to {0!r}:\n{1}", listener_file, json.repr(endpoints)
)
def delete_listener_file():
log.info("Listener ports closed; deleting {0!r}", listener_file)
try:
os.remove(listener_file)
except Exception:
log.swallow_exception(
"Failed to delete {0!r}", listener_file, level="warning"
)
try:
with open(listener_file, "w") as f:
atexit.register(delete_listener_file)
print(json.dumps(endpoints), file=f)
except Exception:
log.reraise_exception("Error writing endpoints info to file:")
if args.port is None:
clients.Client("stdio")
# These must be registered after the one above, to ensure that the listener sockets
# are closed before the endpoint info file is deleted - this way, another process
# can wait for the file to go away as a signal that the ports are no longer in use.
atexit.register(servers.stop_serving)
atexit.register(clients.stop_serving)
servers.wait_until_disconnected()
log.info("All debug servers disconnected; waiting for remaining sessions...")
sessions.wait_until_ended()
log.info("All debug sessions have ended; exiting.")
def _parse_argv(argv):
parser = argparse.ArgumentParser()
parser.add_argument(
"--for-server", type=int, metavar="PORT", help=argparse.SUPPRESS
)
parser.add_argument(
"--port",
type=int,
default=None,
metavar="PORT",
help="start the adapter in debugServer mode on the specified port",
)
parser.add_argument(
"--host",
type=str,
default="127.0.0.1",
metavar="HOST",
help="start the adapter in debugServer mode on the specified host",
)
parser.add_argument(
"--access-token", type=str, help="access token expected from the server"
)
parser.add_argument(
"--server-access-token", type=str, help="access token expected by the server"
)
parser.add_argument(
"--log-dir",
type=str,
metavar="DIR",
help="enable logging and use DIR to save adapter logs",
)
parser.add_argument(
"--log-stderr", action="store_true", help="enable logging to stderr"
)
args = parser.parse_args(argv[1:])
if args.port is None:
if args.log_stderr:
parser.error("--log-stderr requires --port")
if args.for_server is not None:
parser.error("--for-server requires --port")
return args
if __name__ == "__main__":
# debugpy can also be invoked directly rather than via -m. In this case, the first
# entry on sys.path is the one added automatically by Python for the directory
# containing this file. This means that import debugpy will not work, since we need
# the parent directory of debugpy/ to be in sys.path, rather than debugpy/adapter/.
#
# The other issue is that many other absolute imports will break, because they
# will be resolved relative to debugpy/adapter/ - e.g. `import state` will then try
# to import debugpy/adapter/state.py.
#
# To fix both, we need to replace the automatically added entry such that it points
# at parent directory of debugpy/ instead of debugpy/adapter, import debugpy with that
# in sys.path, and then remove the first entry entry altogether, so that it doesn't
# affect any further imports we might do. For example, suppose the user did:
#
# python /foo/bar/debugpy/adapter ...
#
# At the beginning of this script, sys.path will contain "/foo/bar/debugpy/adapter"
# as the first entry. What we want is to replace it with "/foo/bar', then import
# debugpy with that in effect, and then remove the replaced entry before any more
# code runs. The imported debugpy module will remain in sys.modules, and thus all
# future imports of it or its submodules will resolve accordingly.
if "debugpy" not in sys.modules:
# Do not use dirname() to walk up - this can be a relative path, e.g. ".".
sys.path[0] = sys.path[0] + "/../../"
__import__("debugpy")
del sys.path[0]
# Apply OS-global and user-specific locale settings.
try:
locale.setlocale(locale.LC_ALL, "")
except Exception:
# On POSIX, locale is set via environment variables, and this can fail if
# those variables reference a non-existing locale. Ignore and continue using
# the default "C" locale if so.
pass
main(_parse_argv(sys.argv))
| 8,257 | Python | 35.219298 | 90 | 0.619232 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/adapter/servers.py | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE in the project root
# for license information.
from __future__ import annotations
import os
import subprocess
import sys
import threading
import time
import debugpy
from debugpy import adapter
from debugpy.common import json, log, messaging, sockets
from debugpy.adapter import components
import traceback
import io
access_token = None
"""Access token used to authenticate with the servers."""
listener = None
"""Listener socket that accepts server connections."""
_lock = threading.RLock()
_connections = []
"""All servers that are connected to this adapter, in order in which they connected.
"""
_connections_changed = threading.Event()
class Connection(object):
"""A debug server that is connected to the adapter.
Servers that are not participating in a debug session are managed directly by the
corresponding Connection instance.
Servers that are participating in a debug session are managed by that sessions's
Server component instance, but Connection object remains, and takes over again
once the session ends.
"""
disconnected: bool
process_replaced: bool
"""Whether this is a connection to a process that is being replaced in situ
by another process, e.g. via exec().
"""
server: Server | None
"""The Server component, if this debug server belongs to Session.
"""
pid: int | None
ppid: int | None
channel: messaging.JsonMessageChannel
def __init__(self, sock):
from debugpy.adapter import sessions
self.disconnected = False
self.process_replaced = False
self.server = None
self.pid = None
stream = messaging.JsonIOStream.from_socket(sock, str(self))
self.channel = messaging.JsonMessageChannel(stream, self)
self.channel.start()
try:
self.authenticate()
info = self.channel.request("pydevdSystemInfo")
process_info = info("process", json.object())
self.pid = process_info("pid", int)
self.ppid = process_info("ppid", int, optional=True)
if self.ppid == ():
self.ppid = None
self.channel.name = stream.name = str(self)
with _lock:
# The server can disconnect concurrently before we get here, e.g. if
# it was force-killed. If the disconnect() handler has already run,
# don't register this server or report it, since there's nothing to
# deregister it.
if self.disconnected:
return
# An existing connection with the same PID and process_replaced == True
# corresponds to the process that replaced itself with this one, so it's
# not an error.
if any(
conn.pid == self.pid and not conn.process_replaced
for conn in _connections
):
raise KeyError(f"{self} is already connected to this adapter")
is_first_server = len(_connections) == 0
_connections.append(self)
_connections_changed.set()
except Exception:
log.swallow_exception("Failed to accept incoming server connection:")
self.channel.close()
# If this was the first server to connect, and the main thread is inside
# wait_until_disconnected(), we want to unblock it and allow it to exit.
dont_wait_for_first_connection()
# If we couldn't retrieve all the necessary info from the debug server,
# or there's a PID clash, we don't want to track this debuggee anymore,
# but we want to continue accepting connections.
return
parent_session = sessions.get(self.ppid)
if parent_session is None:
parent_session = sessions.get(self.pid)
if parent_session is None:
log.info("No active debug session for parent process of {0}.", self)
else:
if self.pid == parent_session.pid:
parent_server = parent_session.server
if not (parent_server and parent_server.connection.process_replaced):
log.error("{0} is not expecting replacement.", parent_session)
self.channel.close()
return
try:
parent_session.client.notify_of_subprocess(self)
return
except Exception:
# This might fail if the client concurrently disconnects from the parent
# session. We still want to keep the connection around, in case the
# client reconnects later. If the parent session was "launch", it'll take
# care of closing the remaining server connections.
log.swallow_exception(
"Failed to notify parent session about {0}:", self
)
# If we got to this point, the subprocess notification was either not sent,
# or not delivered successfully. For the first server, this is expected, since
# it corresponds to the root process, and there is no other debug session to
# notify. But subsequent server connections represent subprocesses, and those
# will not start running user code until the client tells them to. Since there
# isn't going to be a client without the notification, such subprocesses have
# to be unblocked.
if is_first_server:
return
log.info("No clients to wait for - unblocking {0}.", self)
try:
self.channel.request("initialize", {"adapterID": "debugpy"})
self.channel.request("attach", {"subProcessId": self.pid})
self.channel.request("configurationDone")
self.channel.request("disconnect")
except Exception:
log.swallow_exception("Failed to unblock orphaned subprocess:")
self.channel.close()
def __str__(self):
return "Server" + ("[?]" if self.pid is None else f"[pid={self.pid}]")
def authenticate(self):
if access_token is None and adapter.access_token is None:
return
auth = self.channel.request(
"pydevdAuthorize", {"debugServerAccessToken": access_token}
)
if auth["clientAccessToken"] != adapter.access_token:
self.channel.close()
raise RuntimeError('Mismatched "clientAccessToken"; server not authorized.')
def request(self, request):
raise request.isnt_valid(
"Requests from the debug server to the client are not allowed."
)
def event(self, event):
pass
def terminated_event(self, event):
self.channel.close()
def disconnect(self):
with _lock:
self.disconnected = True
if self.server is not None:
# If the disconnect happened while Server was being instantiated,
# we need to tell it, so that it can clean up via Session.finalize().
# It will also take care of deregistering the connection in that case.
self.server.disconnect()
elif self in _connections:
_connections.remove(self)
_connections_changed.set()
def attach_to_session(self, session):
"""Attaches this server to the specified Session as a Server component.
Raises ValueError if the server already belongs to some session.
"""
with _lock:
if self.server is not None:
raise ValueError
log.info("Attaching {0} to {1}", self, session)
self.server = Server(session, self)
class Server(components.Component):
"""Handles the debug server side of a debug session."""
message_handler = components.Component.message_handler
connection: Connection
class Capabilities(components.Capabilities):
PROPERTIES = {
"supportsCompletionsRequest": False,
"supportsConditionalBreakpoints": False,
"supportsConfigurationDoneRequest": False,
"supportsDataBreakpoints": False,
"supportsDelayedStackTraceLoading": False,
"supportsDisassembleRequest": False,
"supportsEvaluateForHovers": False,
"supportsExceptionInfoRequest": False,
"supportsExceptionOptions": False,
"supportsFunctionBreakpoints": False,
"supportsGotoTargetsRequest": False,
"supportsHitConditionalBreakpoints": False,
"supportsLoadedSourcesRequest": False,
"supportsLogPoints": False,
"supportsModulesRequest": False,
"supportsReadMemoryRequest": False,
"supportsRestartFrame": False,
"supportsRestartRequest": False,
"supportsSetExpression": False,
"supportsSetVariable": False,
"supportsStepBack": False,
"supportsStepInTargetsRequest": False,
"supportsTerminateRequest": True,
"supportsTerminateThreadsRequest": False,
"supportsValueFormattingOptions": False,
"exceptionBreakpointFilters": [],
"additionalModuleColumns": [],
"supportedChecksumAlgorithms": [],
}
def __init__(self, session, connection):
assert connection.server is None
with session:
assert not session.server
super().__init__(session, channel=connection.channel)
self.connection = connection
assert self.session.pid is None
if self.session.launcher and self.session.launcher.pid != self.pid:
log.info(
"Launcher reported PID={0}, but server reported PID={1}",
self.session.launcher.pid,
self.pid,
)
self.session.pid = self.pid
session.server = self
@property
def pid(self):
"""Process ID of the debuggee process, as reported by the server."""
return self.connection.pid
@property
def ppid(self):
"""Parent process ID of the debuggee process, as reported by the server."""
return self.connection.ppid
def initialize(self, request):
assert request.is_request("initialize")
self.connection.authenticate()
request = self.channel.propagate(request)
request.wait_for_response()
self.capabilities = self.Capabilities(self, request.response)
# Generic request handler, used if there's no specific handler below.
@message_handler
def request(self, request):
# Do not delegate requests from the server by default. There is a security
# boundary between the server and the adapter, and we cannot trust arbitrary
# requests sent over that boundary, since they may contain arbitrary code
# that the client will execute - e.g. "runInTerminal". The adapter must only
# propagate requests that it knows are safe.
raise request.isnt_valid(
"Requests from the debug server to the client are not allowed."
)
# Generic event handler, used if there's no specific handler below.
@message_handler
def event(self, event):
self.client.propagate_after_start(event)
@message_handler
def initialized_event(self, event):
# pydevd doesn't send it, but the adapter will send its own in any case.
pass
@message_handler
def process_event(self, event):
# If there is a launcher, it's handling the process event.
if not self.launcher:
self.client.propagate_after_start(event)
@message_handler
def continued_event(self, event):
# https://github.com/microsoft/ptvsd/issues/1530
#
# DAP specification says that a step request implies that only the thread on
# which that step occurred is resumed for the duration of the step. However,
# for VS compatibility, pydevd can operate in a mode that resumes all threads
# instead. This is set according to the value of "steppingResumesAllThreads"
# in "launch" or "attach" request, which defaults to true. If explicitly set
# to false, pydevd will only resume the thread that was stepping.
#
# To ensure that the client is aware that other threads are getting resumed in
# that mode, pydevd sends a "continued" event with "allThreadsResumed": true.
# when responding to a step request. This ensures correct behavior in VSCode
# and other DAP-conformant clients.
#
# On the other hand, VS does not follow the DAP specification in this regard.
# When it requests a step, it assumes that all threads will be resumed, and
# does not expect to see "continued" events explicitly reflecting that fact.
# If such events are sent regardless, VS behaves erratically. Thus, we have
# to suppress them specifically for VS.
if self.client.client_id not in ("visualstudio", "vsformac"):
self.client.propagate_after_start(event)
@message_handler
def exited_event(self, event: messaging.Event):
if event("pydevdReason", str, optional=True) == "processReplaced":
# The parent process used some API like exec() that replaced it with another
# process in situ. The connection will shut down immediately afterwards, but
# we need to keep the corresponding session alive long enough to report the
# subprocess to it.
self.connection.process_replaced = True
else:
# If there is a launcher, it's handling the exit code.
if not self.launcher:
self.client.propagate_after_start(event)
@message_handler
def terminated_event(self, event):
# Do not propagate this, since we'll report our own.
self.channel.close()
def detach_from_session(self):
with _lock:
self.is_connected = False
self.channel.handlers = self.connection
self.channel.name = self.channel.stream.name = str(self.connection)
self.connection.server = None
def disconnect(self):
if self.connection.process_replaced:
# Wait for the replacement server to connect to the adapter, and to report
# itself to the client for this session if there is one.
log.info("{0} is waiting for replacement subprocess.", self)
session = self.session
if not session.client or not session.client.is_connected:
wait_for_connection(
session, lambda conn: conn.pid == self.pid, timeout=30
)
else:
self.wait_for(
lambda: (
not session.client
or not session.client.is_connected
or any(
conn.pid == self.pid
for conn in session.client.known_subprocesses
)
),
timeout=30,
)
with _lock:
_connections.remove(self.connection)
_connections_changed.set()
super().disconnect()
def serve(host="127.0.0.1", port=0):
global listener
listener = sockets.serve("Server", Connection, host, port)
return listener.getsockname()
def is_serving():
return listener is not None
def stop_serving():
global listener
try:
if listener is not None:
listener.close()
listener = None
except Exception:
log.swallow_exception(level="warning")
def connections():
with _lock:
return list(_connections)
def wait_for_connection(session, predicate, timeout=None):
"""Waits until there is a server matching the specified predicate connected to
this adapter, and returns the corresponding Connection.
If there is more than one server connection already available, returns the oldest
one.
"""
def wait_for_timeout():
time.sleep(timeout)
wait_for_timeout.timed_out = True
with _lock:
_connections_changed.set()
wait_for_timeout.timed_out = timeout == 0
if timeout:
thread = threading.Thread(
target=wait_for_timeout, name="servers.wait_for_connection() timeout"
)
thread.daemon = True
thread.start()
if timeout != 0:
log.info("{0} waiting for connection from debug server...", session)
while True:
with _lock:
_connections_changed.clear()
conns = (conn for conn in _connections if predicate(conn))
conn = next(conns, None)
if conn is not None or wait_for_timeout.timed_out:
return conn
_connections_changed.wait()
def wait_until_disconnected():
"""Blocks until all debug servers disconnect from the adapter.
If there are no server connections, waits until at least one is established first,
before waiting for it to disconnect.
"""
while True:
_connections_changed.wait()
with _lock:
_connections_changed.clear()
if not len(_connections):
return
def dont_wait_for_first_connection():
"""Unblocks any pending wait_until_disconnected() call that is waiting on the
first server to connect.
"""
with _lock:
_connections_changed.set()
def inject(pid, debugpy_args, on_output):
host, port = listener.getsockname()
cmdline = [
sys.executable,
os.path.dirname(debugpy.__file__),
"--connect",
host + ":" + str(port),
]
if adapter.access_token is not None:
cmdline += ["--adapter-access-token", adapter.access_token]
cmdline += debugpy_args
cmdline += ["--pid", str(pid)]
log.info("Spawning attach-to-PID debugger injector: {0!r}", cmdline)
try:
injector = subprocess.Popen(
cmdline,
bufsize=0,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
except Exception as exc:
log.swallow_exception(
"Failed to inject debug server into process with PID={0}", pid
)
raise messaging.MessageHandlingError(
"Failed to inject debug server into process with PID={0}: {1}".format(
pid, exc
)
)
# We need to capture the output of the injector - needed so that it doesn't
# get blocked on a write() syscall (besides showing it to the user if it
# is taking longer than expected).
output_collected = []
output_collected.append("--- Starting attach to pid: {0} ---\n".format(pid))
def capture(stream):
nonlocal output_collected
try:
while True:
line = stream.readline()
if not line:
break
line = line.decode("utf-8", "replace")
output_collected.append(line)
log.info("Injector[PID={0}] output: {1}", pid, line.rstrip())
log.info("Injector[PID={0}] exited.", pid)
except Exception:
s = io.StringIO()
traceback.print_exc(file=s)
on_output("stderr", s.getvalue())
threading.Thread(
target=capture,
name=f"Injector[PID={pid}] stdout",
args=(injector.stdout,),
daemon=True,
).start()
def info_on_timeout():
nonlocal output_collected
taking_longer_than_expected = False
initial_time = time.time()
while True:
time.sleep(1)
returncode = injector.poll()
if returncode is not None:
if returncode != 0:
# Something didn't work out. Let's print more info to the user.
on_output(
"stderr",
"Attach to PID failed.\n\n",
)
old = output_collected
output_collected = []
contents = "".join(old)
on_output("stderr", "".join(contents))
break
elapsed = time.time() - initial_time
on_output(
"stdout", "Attaching to PID: %s (elapsed: %.2fs).\n" % (pid, elapsed)
)
if not taking_longer_than_expected:
if elapsed > 10:
taking_longer_than_expected = True
if sys.platform in ("linux", "linux2"):
on_output(
"stdout",
"\nThe attach to PID is taking longer than expected.\n",
)
on_output(
"stdout",
"On Linux it's possible to customize the value of\n",
)
on_output(
"stdout",
"`PYDEVD_GDB_SCAN_SHARED_LIBRARIES` so that fewer libraries.\n",
)
on_output(
"stdout",
"are scanned when searching for the needed symbols.\n\n",
)
on_output(
"stdout",
"i.e.: set in your environment variables (and restart your editor/client\n",
)
on_output(
"stdout",
"so that it picks up the updated environment variable value):\n\n",
)
on_output(
"stdout",
"PYDEVD_GDB_SCAN_SHARED_LIBRARIES=libdl, libltdl, libc, libfreebl3\n\n",
)
on_output(
"stdout",
"-- the actual library may be different (the gdb output typically\n",
)
on_output(
"stdout",
"-- writes the libraries that will be used, so, it should be possible\n",
)
on_output(
"stdout",
"-- to test other libraries if the above doesn't work).\n\n",
)
if taking_longer_than_expected:
# If taking longer than expected, start showing the actual output to the user.
old = output_collected
output_collected = []
contents = "".join(old)
if contents:
on_output("stderr", contents)
threading.Thread(
target=info_on_timeout, name=f"Injector[PID={pid}] info on timeout", daemon=True
).start()
| 23,348 | Python | 36.720517 | 104 | 0.575338 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/adapter/sessions.py | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE in the project root
# for license information.
import itertools
import os
import signal
import threading
import time
from debugpy import common
from debugpy.common import log, util
from debugpy.adapter import components, launchers, servers
_lock = threading.RLock()
_sessions = set()
_sessions_changed = threading.Event()
class Session(util.Observable):
"""A debug session involving a client, an adapter, a launcher, and a debug server.
The client and the adapter are always present, and at least one of launcher and debug
server is present, depending on the scenario.
"""
_counter = itertools.count(1)
def __init__(self):
from debugpy.adapter import clients
super().__init__()
self.lock = threading.RLock()
self.id = next(self._counter)
self._changed_condition = threading.Condition(self.lock)
self.client = components.missing(self, clients.Client)
"""The client component. Always present."""
self.launcher = components.missing(self, launchers.Launcher)
"""The launcher componet. Always present in "launch" sessions, and never
present in "attach" sessions.
"""
self.server = components.missing(self, servers.Server)
"""The debug server component. Always present, unless this is a "launch"
session with "noDebug".
"""
self.no_debug = None
"""Whether this is a "noDebug" session."""
self.pid = None
"""Process ID of the debuggee process."""
self.debug_options = {}
"""Debug options as specified by "launch" or "attach" request."""
self.is_finalizing = False
"""Whether finalize() has been invoked."""
self.observers += [lambda *_: self.notify_changed()]
def __str__(self):
return f"Session[{self.id}]"
def __enter__(self):
"""Lock the session for exclusive access."""
self.lock.acquire()
return self
def __exit__(self, exc_type, exc_value, exc_tb):
"""Unlock the session."""
self.lock.release()
def register(self):
with _lock:
_sessions.add(self)
_sessions_changed.set()
def notify_changed(self):
with self:
self._changed_condition.notify_all()
# A session is considered ended once all components disconnect, and there
# are no further incoming messages from anything to handle.
components = self.client, self.launcher, self.server
if all(not com or not com.is_connected for com in components):
with _lock:
if self in _sessions:
log.info("{0} has ended.", self)
_sessions.remove(self)
_sessions_changed.set()
def wait_for(self, predicate, timeout=None):
"""Waits until predicate() becomes true.
The predicate is invoked with the session locked. If satisfied, the method
returns immediately. Otherwise, the lock is released (even if it was held
at entry), and the method blocks waiting for some attribute of either self,
self.client, self.server, or self.launcher to change. On every change, session
is re-locked and predicate is re-evaluated, until it is satisfied.
While the session is unlocked, message handlers for components other than
the one that is waiting can run, but message handlers for that one are still
blocked.
If timeout is not None, the method will unblock and return after that many
seconds regardless of whether the predicate was satisfied. The method returns
False if it timed out, and True otherwise.
"""
def wait_for_timeout():
time.sleep(timeout)
wait_for_timeout.timed_out = True
self.notify_changed()
wait_for_timeout.timed_out = False
if timeout is not None:
thread = threading.Thread(
target=wait_for_timeout, name="Session.wait_for() timeout"
)
thread.daemon = True
thread.start()
with self:
while not predicate():
if wait_for_timeout.timed_out:
return False
self._changed_condition.wait()
return True
def finalize(self, why, terminate_debuggee=None):
"""Finalizes the debug session.
If the server is present, sends "disconnect" request with "terminateDebuggee"
set as specified request to it; waits for it to disconnect, allowing any
remaining messages from it to be handled; and closes the server channel.
If the launcher is present, sends "terminate" request to it, regardless of the
value of terminate; waits for it to disconnect, allowing any remaining messages
from it to be handled; and closes the launcher channel.
If the client is present, sends "terminated" event to it.
If terminate_debuggee=None, it is treated as True if the session has a Launcher
component, and False otherwise.
"""
if self.is_finalizing:
return
self.is_finalizing = True
log.info("{0}; finalizing {1}.", why, self)
if terminate_debuggee is None:
terminate_debuggee = bool(self.launcher)
try:
self._finalize(why, terminate_debuggee)
except Exception:
# Finalization should never fail, and if it does, the session is in an
# indeterminate and likely unrecoverable state, so just fail fast.
log.swallow_exception("Fatal error while finalizing {0}", self)
os._exit(1)
log.info("{0} finalized.", self)
def _finalize(self, why, terminate_debuggee):
# If the client started a session, and then disconnected before issuing "launch"
# or "attach", the main thread will be blocked waiting for the first server
# connection to come in - unblock it, so that we can exit.
servers.dont_wait_for_first_connection()
if self.server:
if self.server.is_connected:
if terminate_debuggee and self.launcher and self.launcher.is_connected:
# If we were specifically asked to terminate the debuggee, and we
# can ask the launcher to kill it, do so instead of disconnecting
# from the server to prevent debuggee from running any more code.
self.launcher.terminate_debuggee()
else:
# Otherwise, let the server handle it the best it can.
try:
self.server.channel.request(
"disconnect", {"terminateDebuggee": terminate_debuggee}
)
except Exception:
pass
self.server.detach_from_session()
if self.launcher and self.launcher.is_connected:
# If there was a server, we just disconnected from it above, which should
# cause the debuggee process to exit, unless it is being replaced in situ -
# so let's wait for that first.
if self.server and not self.server.connection.process_replaced:
log.info('{0} waiting for "exited" event...', self)
if not self.wait_for(
lambda: self.launcher.exit_code is not None,
timeout=common.PROCESS_EXIT_TIMEOUT,
):
log.warning('{0} timed out waiting for "exited" event.', self)
# Terminate the debuggee process if it's still alive for any reason -
# whether it's because there was no server to handle graceful shutdown,
# or because the server couldn't handle it for some reason - unless the
# process is being replaced in situ.
if not (self.server and self.server.connection.process_replaced):
self.launcher.terminate_debuggee()
# Wait until the launcher message queue fully drains. There is no timeout
# here, because the final "terminated" event will only come after reading
# user input in wait-on-exit scenarios. In addition, if the process was
# replaced in situ, the launcher might still have more output to capture
# from its replacement.
log.info("{0} waiting for {1} to disconnect...", self, self.launcher)
self.wait_for(lambda: not self.launcher.is_connected)
try:
self.launcher.channel.close()
except Exception:
log.swallow_exception()
if self.client:
if self.client.is_connected:
# Tell the client that debugging is over, but don't close the channel until it
# tells us to, via the "disconnect" request.
try:
self.client.channel.send_event("terminated")
except Exception:
pass
if (
self.client.start_request is not None
and self.client.start_request.command == "launch"
and not (self.server and self.server.connection.process_replaced)
):
servers.stop_serving()
log.info(
'"launch" session ended - killing remaining debuggee processes.'
)
pids_killed = set()
if self.launcher and self.launcher.pid is not None:
# Already killed above.
pids_killed.add(self.launcher.pid)
while True:
conns = [
conn
for conn in servers.connections()
if conn.pid not in pids_killed
]
if not len(conns):
break
for conn in conns:
log.info("Killing {0}", conn)
try:
os.kill(conn.pid, signal.SIGTERM)
except Exception:
log.swallow_exception("Failed to kill {0}", conn)
pids_killed.add(conn.pid)
def get(pid):
with _lock:
return next((session for session in _sessions if session.pid == pid), None)
def wait_until_ended():
"""Blocks until all sessions have ended.
A session ends when all components that it manages disconnect from it.
"""
while True:
with _lock:
if not len(_sessions):
return
_sessions_changed.clear()
_sessions_changed.wait()
| 10,889 | Python | 37.617021 | 94 | 0.584994 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/common/util.py | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE in the project root
# for license information.
import inspect
import os
import sys
def evaluate(code, path=__file__, mode="eval"):
# Setting file path here to avoid breaking here if users have set
# "break on exception raised" setting. This code can potentially run
# in user process and is indistinguishable if the path is not set.
# We use the path internally to skip exception inside the debugger.
expr = compile(code, path, "eval")
return eval(expr, {}, sys.modules)
class Observable(object):
"""An object with change notifications."""
observers = () # used when attributes are set before __init__ is invoked
def __init__(self):
self.observers = []
def __setattr__(self, name, value):
try:
return super().__setattr__(name, value)
finally:
for ob in self.observers:
ob(self, name)
class Env(dict):
"""A dict for environment variables."""
@staticmethod
def snapshot():
"""Returns a snapshot of the current environment."""
return Env(os.environ)
def copy(self, updated_from=None):
result = Env(self)
if updated_from is not None:
result.update(updated_from)
return result
def prepend_to(self, key, entry):
"""Prepends a new entry to a PATH-style environment variable, creating
it if it doesn't exist already.
"""
try:
tail = os.path.pathsep + self[key]
except KeyError:
tail = ""
self[key] = entry + tail
def force_str(s, encoding, errors="strict"):
"""Converts s to str, using the provided encoding. If s is already str,
it is returned as is.
"""
return s.decode(encoding, errors) if isinstance(s, bytes) else str(s)
def force_bytes(s, encoding, errors="strict"):
"""Converts s to bytes, using the provided encoding. If s is already bytes,
it is returned as is.
If errors="strict" and s is bytes, its encoding is verified by decoding it;
UnicodeError is raised if it cannot be decoded.
"""
if isinstance(s, str):
return s.encode(encoding, errors)
else:
s = bytes(s)
if errors == "strict":
# Return value ignored - invoked solely for verification.
s.decode(encoding, errors)
return s
def force_ascii(s, errors="strict"):
"""Same as force_bytes(s, "ascii", errors)"""
return force_bytes(s, "ascii", errors)
def force_utf8(s, errors="strict"):
"""Same as force_bytes(s, "utf8", errors)"""
return force_bytes(s, "utf8", errors)
def nameof(obj, quote=False):
"""Returns the most descriptive name of a Python module, class, or function,
as a Unicode string
If quote=True, name is quoted with repr().
Best-effort, but guaranteed to not fail - always returns something.
"""
try:
name = obj.__qualname__
except Exception:
try:
name = obj.__name__
except Exception:
# Fall back to raw repr(), and skip quoting.
try:
name = repr(obj)
except Exception:
return "<unknown>"
else:
quote = False
if quote:
try:
name = repr(name)
except Exception:
pass
return force_str(name, "utf-8", "replace")
def srcnameof(obj):
"""Returns the most descriptive name of a Python module, class, or function,
including source information (filename and linenumber), if available.
Best-effort, but guaranteed to not fail - always returns something.
"""
name = nameof(obj, quote=True)
# Get the source information if possible.
try:
src_file = inspect.getsourcefile(obj)
except Exception:
pass
else:
name += f" (file {src_file!r}"
try:
_, src_lineno = inspect.getsourcelines(obj)
except Exception:
pass
else:
name += f", line {src_lineno}"
name += ")"
return name
def hide_debugpy_internals():
"""Returns True if the caller should hide something from debugpy."""
return "DEBUGPY_TRACE_DEBUGPY" not in os.environ
def hide_thread_from_debugger(thread):
"""Disables tracing for the given thread if DEBUGPY_TRACE_DEBUGPY is not set.
DEBUGPY_TRACE_DEBUGPY is used to debug debugpy with debugpy
"""
if hide_debugpy_internals():
thread.pydev_do_not_trace = True
thread.is_pydev_daemon_thread = True
| 4,646 | Python | 27.163636 | 81 | 0.610848 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/common/stacks.py | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE in the project root
# for license information.
"""Provides facilities to dump all stacks of all threads in the process.
"""
import os
import sys
import time
import threading
import traceback
from debugpy.common import log
def dump():
"""Dump stacks of all threads in this process, except for the current thread."""
tid = threading.current_thread().ident
pid = os.getpid()
log.info("Dumping stacks for process {0}...", pid)
for t_ident, frame in sys._current_frames().items():
if t_ident == tid:
continue
for t in threading.enumerate():
if t.ident == tid:
t_name = t.name
t_daemon = t.daemon
break
else:
t_name = t_daemon = "<unknown>"
stack = "".join(traceback.format_stack(frame))
log.info(
"Stack of thread {0} (tid={1}, pid={2}, daemon={3}):\n\n{4}",
t_name,
t_ident,
pid,
t_daemon,
stack,
)
log.info("Finished dumping stacks for process {0}.", pid)
def dump_after(secs):
"""Invokes dump() on a background thread after waiting for the specified time."""
def dumper():
time.sleep(secs)
try:
dump()
except:
log.swallow_exception()
thread = threading.Thread(target=dumper)
thread.daemon = True
thread.start()
| 1,526 | Python | 23.238095 | 85 | 0.574705 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/common/log.py | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE in the project root
# for license information.
import atexit
import contextlib
import functools
import inspect
import io
import os
import platform
import sys
import threading
import traceback
import debugpy
from debugpy.common import json, timestamp, util
LEVELS = ("debug", "info", "warning", "error")
"""Logging levels, lowest to highest importance.
"""
log_dir = os.getenv("DEBUGPY_LOG_DIR")
"""If not None, debugger logs its activity to a file named debugpy.*-<pid>.log
in the specified directory, where <pid> is the return value of os.getpid().
"""
timestamp_format = "09.3f"
"""Format spec used for timestamps. Can be changed to dial precision up or down.
"""
_lock = threading.RLock()
_tls = threading.local()
_files = {} # filename -> LogFile
_levels = set() # combined for all log files
def _update_levels():
global _levels
_levels = frozenset(level for file in _files.values() for level in file.levels)
class LogFile(object):
def __init__(self, filename, file, levels=LEVELS, close_file=True):
info("Also logging to {0}.", json.repr(filename))
self.filename = filename
self.file = file
self.close_file = close_file
self._levels = frozenset(levels)
with _lock:
_files[self.filename] = self
_update_levels()
info(
"{0} {1}\n{2} {3} ({4}-bit)\ndebugpy {5}",
platform.platform(),
platform.machine(),
platform.python_implementation(),
platform.python_version(),
64 if sys.maxsize > 2 ** 32 else 32,
debugpy.__version__,
_to_files=[self],
)
@property
def levels(self):
return self._levels
@levels.setter
def levels(self, value):
with _lock:
self._levels = frozenset(LEVELS if value is all else value)
_update_levels()
def write(self, level, output):
if level in self.levels:
try:
self.file.write(output)
self.file.flush()
except Exception:
pass
def close(self):
with _lock:
del _files[self.filename]
_update_levels()
info("Not logging to {0} anymore.", json.repr(self.filename))
if self.close_file:
try:
self.file.close()
except Exception:
pass
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
class NoLog(object):
file = filename = None
__bool__ = __nonzero__ = lambda self: False
def close(self):
pass
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
# Used to inject a newline into stderr if logging there, to clean up the output
# when it's intermixed with regular prints from other sources.
def newline(level="info"):
with _lock:
stderr.write(level, "\n")
def write(level, text, _to_files=all):
assert level in LEVELS
t = timestamp.current()
format_string = "{0}+{1:" + timestamp_format + "}: "
prefix = format_string.format(level[0].upper(), t)
text = getattr(_tls, "prefix", "") + text
indent = "\n" + (" " * len(prefix))
output = indent.join(text.split("\n"))
output = prefix + output + "\n\n"
with _lock:
if _to_files is all:
_to_files = _files.values()
for file in _to_files:
file.write(level, output)
return text
def write_format(level, format_string, *args, **kwargs):
# Don't spend cycles doing expensive formatting if we don't have to. Errors are
# always formatted, so that error() can return the text even if it's not logged.
if level != "error" and level not in _levels:
return
try:
text = format_string.format(*args, **kwargs)
except Exception:
reraise_exception()
return write(level, text, kwargs.pop("_to_files", all))
debug = functools.partial(write_format, "debug")
info = functools.partial(write_format, "info")
warning = functools.partial(write_format, "warning")
def error(*args, **kwargs):
"""Logs an error.
Returns the output wrapped in AssertionError. Thus, the following::
raise log.error(s, ...)
has the same effect as::
log.error(...)
assert False, (s.format(...))
"""
return AssertionError(write_format("error", *args, **kwargs))
def _exception(format_string="", *args, **kwargs):
level = kwargs.pop("level", "error")
exc_info = kwargs.pop("exc_info", sys.exc_info())
if format_string:
format_string += "\n\n"
format_string += "{exception}\nStack where logged:\n{stack}"
exception = "".join(traceback.format_exception(*exc_info))
f = inspect.currentframe()
f = f.f_back if f else f # don't log this frame
try:
stack = "".join(traceback.format_stack(f))
finally:
del f # avoid cycles
write_format(
level, format_string, *args, exception=exception, stack=stack, **kwargs
)
def swallow_exception(format_string="", *args, **kwargs):
"""Logs an exception with full traceback.
If format_string is specified, it is formatted with format(*args, **kwargs), and
prepended to the exception traceback on a separate line.
If exc_info is specified, the exception it describes will be logged. Otherwise,
sys.exc_info() - i.e. the exception being handled currently - will be logged.
If level is specified, the exception will be logged as a message of that level.
The default is "error".
"""
_exception(format_string, *args, **kwargs)
def reraise_exception(format_string="", *args, **kwargs):
"""Like swallow_exception(), but re-raises the current exception after logging it."""
assert "exc_info" not in kwargs
_exception(format_string, *args, **kwargs)
raise
def to_file(filename=None, prefix=None, levels=LEVELS):
"""Starts logging all messages at the specified levels to the designated file.
Either filename or prefix must be specified, but not both.
If filename is specified, it designates the log file directly.
If prefix is specified, the log file is automatically created in options.log_dir,
with filename computed as prefix + os.getpid(). If log_dir is None, no log file
is created, and the function returns immediately.
If the file with the specified or computed name is already being used as a log
file, it is not overwritten, but its levels are updated as specified.
The function returns an object with a close() method. When the object is closed,
logs are not written into that file anymore. Alternatively, the returned object
can be used in a with-statement:
with log.to_file("some.log"):
# now also logging to some.log
# not logging to some.log anymore
"""
assert (filename is not None) ^ (prefix is not None)
if filename is None:
if log_dir is None:
return NoLog()
try:
os.makedirs(log_dir)
except OSError:
pass
filename = f"{log_dir}/{prefix}-{os.getpid()}.log"
file = _files.get(filename)
if file is None:
file = LogFile(filename, io.open(filename, "w", encoding="utf-8"), levels)
else:
file.levels = levels
return file
@contextlib.contextmanager
def prefixed(format_string, *args, **kwargs):
"""Adds a prefix to all messages logged from the current thread for the duration
of the context manager.
"""
prefix = format_string.format(*args, **kwargs)
old_prefix = getattr(_tls, "prefix", "")
_tls.prefix = prefix + old_prefix
try:
yield
finally:
_tls.prefix = old_prefix
def describe_environment(header):
import sysconfig
import site # noqa
result = [header, "\n\n"]
def report(s, *args, **kwargs):
result.append(s.format(*args, **kwargs))
def report_paths(get_paths, label=None):
prefix = f" {label or get_paths}: "
expr = None
if not callable(get_paths):
expr = get_paths
get_paths = lambda: util.evaluate(expr)
try:
paths = get_paths()
except AttributeError:
report("{0}<missing>\n", prefix)
return
except Exception:
swallow_exception(
"Error evaluating {0}",
repr(expr) if expr else util.srcnameof(get_paths),
)
return
if not isinstance(paths, (list, tuple)):
paths = [paths]
for p in sorted(paths):
report("{0}{1}", prefix, p)
if p is not None:
rp = os.path.realpath(p)
if p != rp:
report("({0})", rp)
report("\n")
prefix = " " * len(prefix)
report("System paths:\n")
report_paths("sys.prefix")
report_paths("sys.base_prefix")
report_paths("sys.real_prefix")
report_paths("site.getsitepackages()")
report_paths("site.getusersitepackages()")
site_packages = [
p
for p in sys.path
if os.path.exists(p) and os.path.basename(p) == "site-packages"
]
report_paths(lambda: site_packages, "sys.path (site-packages)")
for name in sysconfig.get_path_names():
expr = "sysconfig.get_path({0!r})".format(name)
report_paths(expr)
report_paths("os.__file__")
report_paths("threading.__file__")
report_paths("debugpy.__file__")
result = "".join(result).rstrip("\n")
info("{0}", result)
stderr = LogFile(
"<stderr>",
sys.stderr,
levels=os.getenv("DEBUGPY_LOG_STDERR", "warning error").split(),
close_file=False,
)
@atexit.register
def _close_files():
for file in tuple(_files.values()):
file.close()
# The following are helper shortcuts for printf debugging. They must never be used
# in production code.
def _repr(value): # pragma: no cover
warning("$REPR {0!r}", value)
def _vars(*names): # pragma: no cover
locals = inspect.currentframe().f_back.f_locals
if names:
locals = {name: locals[name] for name in names if name in locals}
warning("$VARS {0!r}", locals)
def _stack(): # pragma: no cover
stack = "\n".join(traceback.format_stack())
warning("$STACK:\n\n{0}", stack)
def _threads(): # pragma: no cover
output = "\n".join([str(t) for t in threading.enumerate()])
warning("$THREADS:\n\n{0}", output)
| 10,723 | Python | 26.782383 | 89 | 0.604775 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/common/timestamp.py | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE in the project root
# for license information.
"""Provides monotonic timestamps with a resetable zero.
"""
import time
__all__ = ["current", "reset"]
def current():
return time.monotonic() - timestamp_zero
def reset():
global timestamp_zero
timestamp_zero = time.monotonic()
reset()
| 410 | Python | 16.869564 | 65 | 0.707317 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/common/__init__.py | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE in the project root
# for license information.
from __future__ import annotations
import os
import typing
if typing.TYPE_CHECKING:
__all__: list[str]
__all__ = []
# The lower time bound for assuming that the process hasn't spawned successfully.
PROCESS_SPAWN_TIMEOUT = float(os.getenv("DEBUGPY_PROCESS_SPAWN_TIMEOUT", 15))
# The lower time bound for assuming that the process hasn't exited gracefully.
PROCESS_EXIT_TIMEOUT = float(os.getenv("DEBUGPY_PROCESS_EXIT_TIMEOUT", 5))
| 592 | Python | 30.210525 | 81 | 0.753378 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/common/sockets.py | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE in the project root
# for license information.
import socket
import sys
import threading
from debugpy.common import log
from debugpy.common.util import hide_thread_from_debugger
def create_server(host, port=0, backlog=socket.SOMAXCONN, timeout=None):
"""Return a local server socket listening on the given port."""
assert backlog > 0
if host is None:
host = "127.0.0.1"
if port is None:
port = 0
try:
server = _new_sock()
if port != 0:
# If binding to a specific port, make sure that the user doesn't have
# to wait until the OS times out the socket to be able to use that port
# again.if the server or the adapter crash or are force-killed.
if sys.platform == "win32":
server.setsockopt(socket.SOL_SOCKET, socket.SO_EXCLUSIVEADDRUSE, 1)
else:
try:
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
except (AttributeError, OSError):
pass # Not available everywhere
server.bind((host, port))
if timeout is not None:
server.settimeout(timeout)
server.listen(backlog)
except Exception:
server.close()
raise
return server
def create_client():
"""Return a client socket that may be connected to a remote address."""
return _new_sock()
def _new_sock():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP)
# Set TCP keepalive on an open socket.
# It activates after 1 second (TCP_KEEPIDLE,) of idleness,
# then sends a keepalive ping once every 3 seconds (TCP_KEEPINTVL),
# and closes the connection after 5 failed ping (TCP_KEEPCNT), or 15 seconds
try:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
except (AttributeError, OSError):
pass # May not be available everywhere.
try:
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, 1)
except (AttributeError, OSError):
pass # May not be available everywhere.
try:
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, 3)
except (AttributeError, OSError):
pass # May not be available everywhere.
try:
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPCNT, 5)
except (AttributeError, OSError):
pass # May not be available everywhere.
return sock
def shut_down(sock, how=socket.SHUT_RDWR):
"""Shut down the given socket."""
sock.shutdown(how)
def close_socket(sock):
"""Shutdown and close the socket."""
try:
shut_down(sock)
except Exception:
pass
sock.close()
def serve(name, handler, host, port=0, backlog=socket.SOMAXCONN, timeout=None):
"""Accepts TCP connections on the specified host and port, and invokes the
provided handler function for every new connection.
Returns the created server socket.
"""
assert backlog > 0
try:
listener = create_server(host, port, backlog, timeout)
except Exception:
log.reraise_exception(
"Error listening for incoming {0} connections on {1}:{2}:", name, host, port
)
host, port = listener.getsockname()
log.info("Listening for incoming {0} connections on {1}:{2}...", name, host, port)
def accept_worker():
while True:
try:
sock, (other_host, other_port) = listener.accept()
except (OSError, socket.error):
# Listener socket has been closed.
break
log.info(
"Accepted incoming {0} connection from {1}:{2}.",
name,
other_host,
other_port,
)
handler(sock)
thread = threading.Thread(target=accept_worker)
thread.daemon = True
hide_thread_from_debugger(thread)
thread.start()
return listener
| 4,064 | Python | 30.269231 | 88 | 0.621801 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/common/json.py | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE in the project root
# for license information.
"""Improved JSON serialization.
"""
import builtins
import json
import numbers
import operator
JsonDecoder = json.JSONDecoder
class JsonEncoder(json.JSONEncoder):
"""Customizable JSON encoder.
If the object implements __getstate__, then that method is invoked, and its
result is serialized instead of the object itself.
"""
def default(self, value):
try:
get_state = value.__getstate__
except AttributeError:
pass
else:
return get_state()
return super().default(value)
class JsonObject(object):
"""A wrapped Python object that formats itself as JSON when asked for a string
representation via str() or format().
"""
json_encoder_factory = JsonEncoder
"""Used by __format__ when format_spec is not empty."""
json_encoder = json_encoder_factory(indent=4)
"""The default encoder used by __format__ when format_spec is empty."""
def __init__(self, value):
assert not isinstance(value, JsonObject)
self.value = value
def __getstate__(self):
raise NotImplementedError
def __repr__(self):
return builtins.repr(self.value)
def __str__(self):
return format(self)
def __format__(self, format_spec):
"""If format_spec is empty, uses self.json_encoder to serialize self.value
as a string. Otherwise, format_spec is treated as an argument list to be
passed to self.json_encoder_factory - which defaults to JSONEncoder - and
then the resulting formatter is used to serialize self.value as a string.
Example::
format("{0} {0:indent=4,sort_keys=True}", json.repr(x))
"""
if format_spec:
# At this point, format_spec is a string that looks something like
# "indent=4,sort_keys=True". What we want is to build a function call
# from that which looks like:
#
# json_encoder_factory(indent=4,sort_keys=True)
#
# which we can then eval() to create our encoder instance.
make_encoder = "json_encoder_factory(" + format_spec + ")"
encoder = eval(
make_encoder, {"json_encoder_factory": self.json_encoder_factory}
)
else:
encoder = self.json_encoder
return encoder.encode(self.value)
# JSON property validators, for use with MessageDict.
#
# A validator is invoked with the actual value of the JSON property passed to it as
# the sole argument; or if the property is missing in JSON, then () is passed. Note
# that None represents an actual null in JSON, while () is a missing value.
#
# The validator must either raise TypeError or ValueError describing why the property
# value is invalid, or else return the value of the property, possibly after performing
# some substitutions - e.g. replacing () with some default value.
def _converter(value, classinfo):
"""Convert value (str) to number, otherwise return None if is not possible"""
for one_info in classinfo:
if issubclass(one_info, numbers.Number):
try:
return one_info(value)
except ValueError:
pass
def of_type(*classinfo, **kwargs):
"""Returns a validator for a JSON property that requires it to have a value of
the specified type. If optional=True, () is also allowed.
The meaning of classinfo is the same as for isinstance().
"""
assert len(classinfo)
optional = kwargs.pop("optional", False)
assert not len(kwargs)
def validate(value):
if (optional and value == ()) or isinstance(value, classinfo):
return value
else:
converted_value = _converter(value, classinfo)
if converted_value:
return converted_value
if not optional and value == ():
raise ValueError("must be specified")
raise TypeError("must be " + " or ".join(t.__name__ for t in classinfo))
return validate
def default(default):
"""Returns a validator for a JSON property with a default value.
The validator will only allow property values that have the same type as the
specified default value.
"""
def validate(value):
if value == ():
return default
elif isinstance(value, type(default)):
return value
else:
raise TypeError("must be {0}".format(type(default).__name__))
return validate
def enum(*values, **kwargs):
"""Returns a validator for a JSON enum.
The validator will only allow the property to have one of the specified values.
If optional=True, and the property is missing, the first value specified is used
as the default.
"""
assert len(values)
optional = kwargs.pop("optional", False)
assert not len(kwargs)
def validate(value):
if optional and value == ():
return values[0]
elif value in values:
return value
else:
raise ValueError("must be one of: {0!r}".format(list(values)))
return validate
def array(validate_item=False, vectorize=False, size=None):
"""Returns a validator for a JSON array.
If the property is missing, it is treated as if it were []. Otherwise, it must
be a list.
If validate_item=False, it's treated as if it were (lambda x: x) - i.e. any item
is considered valid, and is unchanged. If validate_item is a type or a tuple,
it's treated as if it were json.of_type(validate).
Every item in the list is replaced with validate_item(item) in-place, propagating
any exceptions raised by the latter. If validate_item is a type or a tuple, it is
treated as if it were json.of_type(validate_item).
If vectorize=True, and the value is neither a list nor a dict, it is treated as
if it were a single-element list containing that single value - e.g. "foo" is
then the same as ["foo"]; but {} is an error, and not [{}].
If size is not None, it can be an int, a tuple of one int, a tuple of two ints,
or a set. If it's an int, the array must have exactly that many elements. If it's
a tuple of one int, it's the minimum length. If it's a tuple of two ints, they
are the minimum and the maximum lengths. If it's a set, it's the set of sizes that
are valid - e.g. for {2, 4}, the array can be either 2 or 4 elements long.
"""
if not validate_item:
validate_item = lambda x: x
elif isinstance(validate_item, type) or isinstance(validate_item, tuple):
validate_item = of_type(validate_item)
if size is None:
validate_size = lambda _: True
elif isinstance(size, set):
size = {operator.index(n) for n in size}
validate_size = lambda value: (
len(value) in size
or "must have {0} elements".format(
" or ".join(str(n) for n in sorted(size))
)
)
elif isinstance(size, tuple):
assert 1 <= len(size) <= 2
size = tuple(operator.index(n) for n in size)
min_len, max_len = (size + (None,))[0:2]
validate_size = lambda value: (
"must have at least {0} elements".format(min_len)
if len(value) < min_len
else "must have at most {0} elements".format(max_len)
if max_len is not None and len(value) < max_len
else True
)
else:
size = operator.index(size)
validate_size = lambda value: (
len(value) == size or "must have {0} elements".format(size)
)
def validate(value):
if value == ():
value = []
elif vectorize and not isinstance(value, (list, dict)):
value = [value]
of_type(list)(value)
size_err = validate_size(value) # True if valid, str if error
if size_err is not True:
raise ValueError(size_err)
for i, item in enumerate(value):
try:
value[i] = validate_item(item)
except (TypeError, ValueError) as exc:
raise type(exc)(f"[{repr(i)}] {exc}")
return value
return validate
def object(validate_value=False):
"""Returns a validator for a JSON object.
If the property is missing, it is treated as if it were {}. Otherwise, it must
be a dict.
If validate_value=False, it's treated as if it were (lambda x: x) - i.e. any
value is considered valid, and is unchanged. If validate_value is a type or a
tuple, it's treated as if it were json.of_type(validate_value).
Every value in the dict is replaced with validate_value(value) in-place, propagating
any exceptions raised by the latter. If validate_value is a type or a tuple, it is
treated as if it were json.of_type(validate_value). Keys are not affected.
"""
if isinstance(validate_value, type) or isinstance(validate_value, tuple):
validate_value = of_type(validate_value)
def validate(value):
if value == ():
return {}
of_type(dict)(value)
if validate_value:
for k, v in value.items():
try:
value[k] = validate_value(v)
except (TypeError, ValueError) as exc:
raise type(exc)(f"[{repr(k)}] {exc}")
return value
return validate
def repr(value):
return JsonObject(value)
dumps = json.dumps
loads = json.loads
| 9,674 | Python | 32.020478 | 88 | 0.620219 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/common/messaging.py | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE in the project root
# for license information.
"""An implementation of the session and presentation layers as used in the Debug
Adapter Protocol (DAP): channels and their lifetime, JSON messages, requests,
responses, and events.
https://microsoft.github.io/debug-adapter-protocol/overview#base-protocol
"""
from __future__ import annotations
import collections
import contextlib
import functools
import itertools
import os
import socket
import sys
import threading
from debugpy.common import json, log, util
from debugpy.common.util import hide_thread_from_debugger
class JsonIOError(IOError):
"""Indicates that a read or write operation on JsonIOStream has failed."""
def __init__(self, *args, **kwargs):
stream = kwargs.pop("stream")
cause = kwargs.pop("cause", None)
if not len(args) and cause is not None:
args = [str(cause)]
super().__init__(*args, **kwargs)
self.stream = stream
"""The stream that couldn't be read or written.
Set by JsonIOStream.read_json() and JsonIOStream.write_json().
JsonMessageChannel relies on this value to decide whether a NoMoreMessages
instance that bubbles up to the message loop is related to that loop.
"""
self.cause = cause
"""The underlying exception, if any."""
class NoMoreMessages(JsonIOError, EOFError):
"""Indicates that there are no more messages that can be read from or written
to a stream.
"""
def __init__(self, *args, **kwargs):
args = args if len(args) else ["No more messages"]
super().__init__(*args, **kwargs)
class JsonIOStream(object):
"""Implements a JSON value stream over two byte streams (input and output).
Each value is encoded as a DAP packet, with metadata headers and a JSON payload.
"""
MAX_BODY_SIZE = 0xFFFFFF
json_decoder_factory = json.JsonDecoder
"""Used by read_json() when decoder is None."""
json_encoder_factory = json.JsonEncoder
"""Used by write_json() when encoder is None."""
@classmethod
def from_stdio(cls, name="stdio"):
"""Creates a new instance that receives messages from sys.stdin, and sends
them to sys.stdout.
"""
return cls(sys.stdin.buffer, sys.stdout.buffer, name)
@classmethod
def from_process(cls, process, name="stdio"):
"""Creates a new instance that receives messages from process.stdin, and sends
them to process.stdout.
"""
return cls(process.stdout, process.stdin, name)
@classmethod
def from_socket(cls, sock, name=None):
"""Creates a new instance that sends and receives messages over a socket."""
sock.settimeout(None) # make socket blocking
if name is None:
name = repr(sock)
# TODO: investigate switching to buffered sockets; readline() on unbuffered
# sockets is very slow! Although the implementation of readline() itself is
# native code, it calls read(1) in a loop - and that then ultimately calls
# SocketIO.readinto(), which is implemented in Python.
socket_io = sock.makefile("rwb", 0)
# SocketIO.close() doesn't close the underlying socket.
def cleanup():
try:
sock.shutdown(socket.SHUT_RDWR)
except Exception:
pass
sock.close()
return cls(socket_io, socket_io, name, cleanup)
def __init__(self, reader, writer, name=None, cleanup=lambda: None):
"""Creates a new JsonIOStream.
reader must be a BytesIO-like object, from which incoming messages will be
read by read_json().
writer must be a BytesIO-like object, into which outgoing messages will be
written by write_json().
cleanup must be a callable; it will be invoked without arguments when the
stream is closed.
reader.readline() must treat "\n" as the line terminator, and must leave "\r"
as is - it must not replace "\r\n" with "\n" automatically, as TextIO does.
"""
if name is None:
name = f"reader={reader!r}, writer={writer!r}"
self.name = name
self._reader = reader
self._writer = writer
self._cleanup = cleanup
self._closed = False
def close(self):
"""Closes the stream, the reader, and the writer."""
if self._closed:
return
self._closed = True
log.debug("Closing {0} message stream", self.name)
try:
try:
# Close the writer first, so that the other end of the connection has
# its message loop waiting on read() unblocked. If there is an exception
# while closing the writer, we still want to try to close the reader -
# only one exception can bubble up, so if both fail, it'll be the one
# from reader.
try:
self._writer.close()
finally:
if self._reader is not self._writer:
self._reader.close()
finally:
self._cleanup()
except Exception:
log.reraise_exception("Error while closing {0} message stream", self.name)
def _log_message(self, dir, data, logger=log.debug):
return logger("{0} {1} {2}", self.name, dir, data)
def _read_line(self, reader):
line = b""
while True:
try:
line += reader.readline()
except Exception as exc:
raise NoMoreMessages(str(exc), stream=self)
if not line:
raise NoMoreMessages(stream=self)
if line.endswith(b"\r\n"):
line = line[0:-2]
return line
def read_json(self, decoder=None):
"""Read a single JSON value from reader.
Returns JSON value as parsed by decoder.decode(), or raises NoMoreMessages
if there are no more values to be read.
"""
decoder = decoder if decoder is not None else self.json_decoder_factory()
reader = self._reader
read_line = functools.partial(self._read_line, reader)
# If any error occurs while reading and parsing the message, log the original
# raw message data as is, so that it's possible to diagnose missing or invalid
# headers, encoding issues, JSON syntax errors etc.
def log_message_and_reraise_exception(format_string="", *args, **kwargs):
if format_string:
format_string += "\n\n"
format_string += "{name} -->\n{raw_lines}"
raw_lines = b"".join(raw_chunks).split(b"\n")
raw_lines = "\n".join(repr(line) for line in raw_lines)
log.reraise_exception(
format_string, *args, name=self.name, raw_lines=raw_lines, **kwargs
)
raw_chunks = []
headers = {}
while True:
try:
line = read_line()
except Exception:
# Only log it if we have already read some headers, and are looking
# for a blank line terminating them. If this is the very first read,
# there's no message data to log in any case, and the caller might
# be anticipating the error - e.g. NoMoreMessages on disconnect.
if headers:
log_message_and_reraise_exception(
"Error while reading message headers:"
)
else:
raise
raw_chunks += [line, b"\n"]
if line == b"":
break
key, _, value = line.partition(b":")
headers[key] = value
try:
length = int(headers[b"Content-Length"])
if not (0 <= length <= self.MAX_BODY_SIZE):
raise ValueError
except (KeyError, ValueError):
try:
raise IOError("Content-Length is missing or invalid:")
except Exception:
log_message_and_reraise_exception()
body_start = len(raw_chunks)
body_remaining = length
while body_remaining > 0:
try:
chunk = reader.read(body_remaining)
if not chunk:
raise EOFError
except Exception as exc:
# Not logged due to https://github.com/microsoft/ptvsd/issues/1699
raise NoMoreMessages(str(exc), stream=self)
raw_chunks.append(chunk)
body_remaining -= len(chunk)
assert body_remaining == 0
body = b"".join(raw_chunks[body_start:])
try:
body = body.decode("utf-8")
except Exception:
log_message_and_reraise_exception()
try:
body = decoder.decode(body)
except Exception:
log_message_and_reraise_exception()
# If parsed successfully, log as JSON for readability.
self._log_message("-->", body)
return body
def write_json(self, value, encoder=None):
"""Write a single JSON value into writer.
Value is written as encoded by encoder.encode().
"""
if self._closed:
# Don't log this - it's a common pattern to write to a stream while
# anticipating EOFError from it in case it got closed concurrently.
raise NoMoreMessages(stream=self)
encoder = encoder if encoder is not None else self.json_encoder_factory()
writer = self._writer
# Format the value as a message, and try to log any failures using as much
# information as we already have at the point of the failure. For example,
# if it fails after it is serialized to JSON, log that JSON.
try:
body = encoder.encode(value)
except Exception:
self._log_message("<--", repr(value), logger=log.reraise_exception)
body = body.encode("utf-8")
header = f"Content-Length: {len(body)}\r\n\r\n".encode("ascii")
data = header + body
data_written = 0
try:
while data_written < len(data):
written = writer.write(data[data_written:])
data_written += written
writer.flush()
except Exception as exc:
self._log_message("<--", value, logger=log.swallow_exception)
raise JsonIOError(stream=self, cause=exc)
self._log_message("<--", value)
def __repr__(self):
return f"{type(self).__name__}({self.name!r})"
class MessageDict(collections.OrderedDict):
"""A specialized dict that is used for JSON message payloads - Request.arguments,
Response.body, and Event.body.
For all members that normally throw KeyError when a requested key is missing, this
dict raises InvalidMessageError instead. Thus, a message handler can skip checks
for missing properties, and just work directly with the payload on the assumption
that it is valid according to the protocol specification; if anything is missing,
it will be reported automatically in the proper manner.
If the value for the requested key is itself a dict, it is returned as is, and not
automatically converted to MessageDict. Thus, to enable convenient chaining - e.g.
d["a"]["b"]["c"] - the dict must consistently use MessageDict instances rather than
vanilla dicts for all its values, recursively. This is guaranteed for the payload
of all freshly received messages (unless and until it is mutated), but there is no
such guarantee for outgoing messages.
"""
def __init__(self, message, items=None):
assert message is None or isinstance(message, Message)
if items is None:
super().__init__()
else:
super().__init__(items)
self.message = message
"""The Message object that owns this dict.
For any instance exposed via a Message object corresponding to some incoming
message, it is guaranteed to reference that Message object. There is no similar
guarantee for outgoing messages.
"""
def __repr__(self):
try:
return format(json.repr(self))
except Exception:
return super().__repr__()
def __call__(self, key, validate, optional=False):
"""Like get(), but with validation.
The item is first retrieved as if with self.get(key, default=()) - the default
value is () rather than None, so that JSON nulls are distinguishable from
missing properties.
If optional=True, and the value is (), it's returned as is. Otherwise, the
item is validated by invoking validate(item) on it.
If validate=False, it's treated as if it were (lambda x: x) - i.e. any value
is considered valid, and is returned unchanged. If validate is a type or a
tuple, it's treated as json.of_type(validate). Otherwise, if validate is not
callable(), it's treated as json.default(validate).
If validate() returns successfully, the item is substituted with the value
it returns - thus, the validator can e.g. replace () with a suitable default
value for the property.
If validate() raises TypeError or ValueError, raises InvalidMessageError with
the same text that applies_to(self.messages).
See debugpy.common.json for reusable validators.
"""
if not validate:
validate = lambda x: x
elif isinstance(validate, type) or isinstance(validate, tuple):
validate = json.of_type(validate, optional=optional)
elif not callable(validate):
validate = json.default(validate)
value = self.get(key, ())
try:
value = validate(value)
except (TypeError, ValueError) as exc:
message = Message if self.message is None else self.message
err = str(exc)
if not err.startswith("["):
err = " " + err
raise message.isnt_valid("{0}{1}", json.repr(key), err)
return value
def _invalid_if_no_key(func):
def wrap(self, key, *args, **kwargs):
try:
return func(self, key, *args, **kwargs)
except KeyError:
message = Message if self.message is None else self.message
raise message.isnt_valid("missing property {0!r}", key)
return wrap
__getitem__ = _invalid_if_no_key(collections.OrderedDict.__getitem__)
__delitem__ = _invalid_if_no_key(collections.OrderedDict.__delitem__)
pop = _invalid_if_no_key(collections.OrderedDict.pop)
del _invalid_if_no_key
def _payload(value):
"""JSON validator for message payload.
If that value is missing or null, it is treated as if it were {}.
"""
if value is not None and value != ():
if isinstance(value, dict): # can be int, str, list...
assert isinstance(value, MessageDict)
return value
# Missing payload. Construct a dummy MessageDict, and make it look like it was
# deserialized. See JsonMessageChannel._parse_incoming_message for why it needs
# to have associate_with().
def associate_with(message):
value.message = message
value = MessageDict(None)
value.associate_with = associate_with
return value
class Message(object):
"""Represents a fully parsed incoming or outgoing message.
https://microsoft.github.io/debug-adapter-protocol/specification#protocolmessage
"""
def __init__(self, channel, seq, json=None):
self.channel = channel
self.seq = seq
"""Sequence number of the message in its channel.
This can be None for synthesized Responses.
"""
self.json = json
"""For incoming messages, the MessageDict containing raw JSON from which
this message was originally parsed.
"""
def __str__(self):
return json.repr(self.json) if self.json is not None else repr(self)
def describe(self):
"""A brief description of the message that is enough to identify it.
Examples:
'#1 request "launch" from IDE'
'#2 response to #1 request "launch" from IDE'.
"""
raise NotImplementedError
@property
def payload(self) -> MessageDict:
"""Payload of the message - self.body or self.arguments, depending on the
message type.
"""
raise NotImplementedError
def __call__(self, *args, **kwargs):
"""Same as self.payload(...)."""
return self.payload(*args, **kwargs)
def __contains__(self, key):
"""Same as (key in self.payload)."""
return key in self.payload
def is_event(self, *event):
"""Returns True if this message is an Event of one of the specified types."""
if not isinstance(self, Event):
return False
return event == () or self.event in event
def is_request(self, *command):
"""Returns True if this message is a Request of one of the specified types."""
if not isinstance(self, Request):
return False
return command == () or self.command in command
def is_response(self, *command):
"""Returns True if this message is a Response to a request of one of the
specified types.
"""
if not isinstance(self, Response):
return False
return command == () or self.request.command in command
def error(self, exc_type, format_string, *args, **kwargs):
"""Returns a new exception of the specified type from the point at which it is
invoked, with the specified formatted message as the reason.
The resulting exception will have its cause set to the Message object on which
error() was called. Additionally, if that message is a Request, a failure
response is immediately sent.
"""
assert issubclass(exc_type, MessageHandlingError)
silent = kwargs.pop("silent", False)
reason = format_string.format(*args, **kwargs)
exc = exc_type(reason, self, silent) # will log it
if isinstance(self, Request):
self.respond(exc)
return exc
def isnt_valid(self, *args, **kwargs):
"""Same as self.error(InvalidMessageError, ...)."""
return self.error(InvalidMessageError, *args, **kwargs)
def cant_handle(self, *args, **kwargs):
"""Same as self.error(MessageHandlingError, ...)."""
return self.error(MessageHandlingError, *args, **kwargs)
class Event(Message):
"""Represents an incoming event.
https://microsoft.github.io/debug-adapter-protocol/specification#event
It is guaranteed that body is a MessageDict associated with this Event, and so
are all the nested dicts in it. If "body" was missing or null in JSON, body is
an empty dict.
To handle the event, JsonMessageChannel tries to find a handler for this event in
JsonMessageChannel.handlers. Given event="X", if handlers.X_event exists, then it
is the specific handler for this event. Otherwise, handlers.event must exist, and
it is the generic handler for this event. A missing handler is a fatal error.
No further incoming messages are processed until the handler returns, except for
responses to requests that have wait_for_response() invoked on them.
To report failure to handle the event, the handler must raise an instance of
MessageHandlingError that applies_to() the Event object it was handling. Any such
failure is logged, after which the message loop moves on to the next message.
Helper methods Message.isnt_valid() and Message.cant_handle() can be used to raise
the appropriate exception type that applies_to() the Event object.
"""
def __init__(self, channel, seq, event, body, json=None):
super().__init__(channel, seq, json)
self.event = event
if isinstance(body, MessageDict) and hasattr(body, "associate_with"):
body.associate_with(self)
self.body = body
def describe(self):
return f"#{self.seq} event {json.repr(self.event)} from {self.channel}"
@property
def payload(self):
return self.body
@staticmethod
def _parse(channel, message_dict):
seq = message_dict("seq", int)
event = message_dict("event", str)
body = message_dict("body", _payload)
message = Event(channel, seq, event, body, json=message_dict)
channel._enqueue_handlers(message, message._handle)
def _handle(self):
channel = self.channel
handler = channel._get_handler_for("event", self.event)
try:
try:
result = handler(self)
assert (
result is None
), f"Handler {util.srcnameof(handler)} tried to respond to {self.describe()}."
except MessageHandlingError as exc:
if not exc.applies_to(self):
raise
log.error(
"Handler {0}\ncouldn't handle {1}:\n{2}",
util.srcnameof(handler),
self.describe(),
str(exc),
)
except Exception:
log.reraise_exception(
"Handler {0}\ncouldn't handle {1}:",
util.srcnameof(handler),
self.describe(),
)
NO_RESPONSE = object()
"""Can be returned from a request handler in lieu of the response body, to indicate
that no response is to be sent.
Request.respond() must be invoked explicitly at some later point to provide a response.
"""
class Request(Message):
"""Represents an incoming or an outgoing request.
Incoming requests are represented directly by instances of this class.
Outgoing requests are represented by instances of OutgoingRequest, which provides
additional functionality to handle responses.
For incoming requests, it is guaranteed that arguments is a MessageDict associated
with this Request, and so are all the nested dicts in it. If "arguments" was missing
or null in JSON, arguments is an empty dict.
To handle the request, JsonMessageChannel tries to find a handler for this request
in JsonMessageChannel.handlers. Given command="X", if handlers.X_request exists,
then it is the specific handler for this request. Otherwise, handlers.request must
exist, and it is the generic handler for this request. A missing handler is a fatal
error.
The handler is then invoked with the Request object as its sole argument.
If the handler itself invokes respond() on the Request at any point, then it must
not return any value.
Otherwise, if the handler returns NO_RESPONSE, no response to the request is sent.
It must be sent manually at some later point via respond().
Otherwise, a response to the request is sent with the returned value as the body.
To fail the request, the handler can return an instance of MessageHandlingError,
or respond() with one, or raise one such that it applies_to() the Request object
being handled.
Helper methods Message.isnt_valid() and Message.cant_handle() can be used to raise
the appropriate exception type that applies_to() the Request object.
"""
def __init__(self, channel, seq, command, arguments, json=None):
super().__init__(channel, seq, json)
self.command = command
if isinstance(arguments, MessageDict) and hasattr(arguments, "associate_with"):
arguments.associate_with(self)
self.arguments = arguments
self.response = None
"""Response to this request.
For incoming requests, it is set as soon as the request handler returns.
For outgoing requests, it is set as soon as the response is received, and
before self._handle_response is invoked.
"""
def describe(self):
return f"#{self.seq} request {json.repr(self.command)} from {self.channel}"
@property
def payload(self):
return self.arguments
def respond(self, body):
assert self.response is None
d = {"type": "response", "request_seq": self.seq, "command": self.command}
if isinstance(body, Exception):
d["success"] = False
d["message"] = str(body)
else:
d["success"] = True
if body is not None and body != {}:
d["body"] = body
with self.channel._send_message(d) as seq:
pass
self.response = Response(self.channel, seq, self, body)
@staticmethod
def _parse(channel, message_dict):
seq = message_dict("seq", int)
command = message_dict("command", str)
arguments = message_dict("arguments", _payload)
message = Request(channel, seq, command, arguments, json=message_dict)
channel._enqueue_handlers(message, message._handle)
def _handle(self):
channel = self.channel
handler = channel._get_handler_for("request", self.command)
try:
try:
result = handler(self)
except MessageHandlingError as exc:
if not exc.applies_to(self):
raise
result = exc
log.error(
"Handler {0}\ncouldn't handle {1}:\n{2}",
util.srcnameof(handler),
self.describe(),
str(exc),
)
if result is NO_RESPONSE:
assert self.response is None, (
"Handler {0} for {1} must not return NO_RESPONSE if it has already "
"invoked request.respond().".format(
util.srcnameof(handler), self.describe()
)
)
elif self.response is not None:
assert result is None or result is self.response.body, (
"Handler {0} for {1} must not return a response body if it has "
"already invoked request.respond().".format(
util.srcnameof(handler), self.describe()
)
)
else:
assert result is not None, (
"Handler {0} for {1} must either call request.respond() before it "
"returns, or return the response body, or return NO_RESPONSE.".format(
util.srcnameof(handler), self.describe()
)
)
try:
self.respond(result)
except NoMoreMessages:
log.warning(
"Channel was closed before the response from handler {0} to {1} could be sent",
util.srcnameof(handler),
self.describe(),
)
except Exception:
log.reraise_exception(
"Handler {0}\ncouldn't handle {1}:",
util.srcnameof(handler),
self.describe(),
)
class OutgoingRequest(Request):
"""Represents an outgoing request, for which it is possible to wait for a
response to be received, and register a response handler.
"""
_parse = _handle = None
def __init__(self, channel, seq, command, arguments):
super().__init__(channel, seq, command, arguments)
self._response_handlers = []
def describe(self):
return f"{self.seq} request {json.repr(self.command)} to {self.channel}"
def wait_for_response(self, raise_if_failed=True):
"""Waits until a response is received for this request, records the Response
object for it in self.response, and returns response.body.
If no response was received from the other party before the channel closed,
self.response is a synthesized Response with body=NoMoreMessages().
If raise_if_failed=True and response.success is False, raises response.body
instead of returning.
"""
with self.channel:
while self.response is None:
self.channel._handlers_enqueued.wait()
if raise_if_failed and not self.response.success:
raise self.response.body
return self.response.body
def on_response(self, response_handler):
"""Registers a handler to invoke when a response is received for this request.
The handler is invoked with Response as its sole argument.
If response has already been received, invokes the handler immediately.
It is guaranteed that self.response is set before the handler is invoked.
If no response was received from the other party before the channel closed,
self.response is a dummy Response with body=NoMoreMessages().
The handler is always invoked asynchronously on an unspecified background
thread - thus, the caller of on_response() can never be blocked or deadlocked
by the handler.
No further incoming messages are processed until the handler returns, except for
responses to requests that have wait_for_response() invoked on them.
"""
with self.channel:
self._response_handlers.append(response_handler)
self._enqueue_response_handlers()
def _enqueue_response_handlers(self):
response = self.response
if response is None:
# Response._parse() will submit the handlers when response is received.
return
def run_handlers():
for handler in handlers:
try:
try:
handler(response)
except MessageHandlingError as exc:
if not exc.applies_to(response):
raise
log.error(
"Handler {0}\ncouldn't handle {1}:\n{2}",
util.srcnameof(handler),
response.describe(),
str(exc),
)
except Exception:
log.reraise_exception(
"Handler {0}\ncouldn't handle {1}:",
util.srcnameof(handler),
response.describe(),
)
handlers = self._response_handlers[:]
self.channel._enqueue_handlers(response, run_handlers)
del self._response_handlers[:]
class Response(Message):
"""Represents an incoming or an outgoing response to a Request.
https://microsoft.github.io/debug-adapter-protocol/specification#response
error_message corresponds to "message" in JSON, and is renamed for clarity.
If success is False, body is None. Otherwise, it is a MessageDict associated
with this Response, and so are all the nested dicts in it. If "body" was missing
or null in JSON, body is an empty dict.
If this is a response to an outgoing request, it will be handled by the handler
registered via self.request.on_response(), if any.
Regardless of whether there is such a handler, OutgoingRequest.wait_for_response()
can also be used to retrieve and handle the response. If there is a handler, it is
executed before wait_for_response() returns.
No further incoming messages are processed until the handler returns, except for
responses to requests that have wait_for_response() invoked on them.
To report failure to handle the event, the handler must raise an instance of
MessageHandlingError that applies_to() the Response object it was handling. Any
such failure is logged, after which the message loop moves on to the next message.
Helper methods Message.isnt_valid() and Message.cant_handle() can be used to raise
the appropriate exception type that applies_to() the Response object.
"""
def __init__(self, channel, seq, request, body, json=None):
super().__init__(channel, seq, json)
self.request = request
"""The request to which this is the response."""
if isinstance(body, MessageDict) and hasattr(body, "associate_with"):
body.associate_with(self)
self.body = body
"""Body of the response if the request was successful, or an instance
of some class derived from Exception it it was not.
If a response was received from the other side, but request failed, it is an
instance of MessageHandlingError containing the received error message. If the
error message starts with InvalidMessageError.PREFIX, then it's an instance of
the InvalidMessageError specifically, and that prefix is stripped.
If no response was received from the other party before the channel closed,
it is an instance of NoMoreMessages.
"""
def describe(self):
return f"#{self.seq} response to {self.request.describe()}"
@property
def payload(self):
return self.body
@property
def success(self):
"""Whether the request succeeded or not."""
return not isinstance(self.body, Exception)
@property
def result(self):
"""Result of the request. Returns the value of response.body, unless it
is an exception, in which case it is raised instead.
"""
if self.success:
return self.body
else:
raise self.body
@staticmethod
def _parse(channel, message_dict, body=None):
seq = message_dict("seq", int) if (body is None) else None
request_seq = message_dict("request_seq", int)
command = message_dict("command", str)
success = message_dict("success", bool)
if body is None:
if success:
body = message_dict("body", _payload)
else:
error_message = message_dict("message", str)
exc_type = MessageHandlingError
if error_message.startswith(InvalidMessageError.PREFIX):
error_message = error_message[len(InvalidMessageError.PREFIX) :]
exc_type = InvalidMessageError
body = exc_type(error_message, silent=True)
try:
with channel:
request = channel._sent_requests.pop(request_seq)
known_request = True
except KeyError:
# Synthetic Request that only has seq and command as specified in response
# JSON, for error reporting purposes.
request = OutgoingRequest(channel, request_seq, command, "<unknown>")
known_request = False
if not success:
body.cause = request
response = Response(channel, seq, request, body, json=message_dict)
with channel:
request.response = response
request._enqueue_response_handlers()
if known_request:
return response
else:
raise response.isnt_valid(
"request_seq={0} does not match any known request", request_seq
)
class Disconnect(Message):
"""A dummy message used to represent disconnect. It's always the last message
received from any channel.
"""
def __init__(self, channel):
super().__init__(channel, None)
def describe(self):
return f"disconnect from {self.channel}"
class MessageHandlingError(Exception):
"""Indicates that a message couldn't be handled for some reason.
If the reason is a contract violation - i.e. the message that was handled did not
conform to the protocol specification - InvalidMessageError, which is a subclass,
should be used instead.
If any message handler raises an exception not derived from this class, it will
escape the message loop unhandled, and terminate the process.
If any message handler raises this exception, but applies_to(message) is False, it
is treated as if it was a generic exception, as desribed above. Thus, if a request
handler issues another request of its own, and that one fails, the failure is not
silently propagated. However, a request that is delegated via Request.delegate()
will also propagate failures back automatically. For manual propagation, catch the
exception, and call exc.propagate().
If any event handler raises this exception, and applies_to(event) is True, the
exception is silently swallowed by the message loop.
If any request handler raises this exception, and applies_to(request) is True, the
exception is silently swallowed by the message loop, and a failure response is sent
with "message" set to str(reason).
Note that, while errors are not logged when they're swallowed by the message loop,
by that time they have already been logged by their __init__ (when instantiated).
"""
def __init__(self, reason, cause=None, silent=False):
"""Creates a new instance of this class, and immediately logs the exception.
Message handling errors are logged immediately unless silent=True, so that the
precise context in which they occured can be determined from the surrounding
log entries.
"""
self.reason = reason
"""Why it couldn't be handled. This can be any object, but usually it's either
str or Exception.
"""
assert cause is None or isinstance(cause, Message)
self.cause = cause
"""The Message object for the message that couldn't be handled. For responses
to unknown requests, this is a synthetic Request.
"""
if not silent:
try:
raise self
except MessageHandlingError:
log.swallow_exception()
def __hash__(self):
return hash((self.reason, id(self.cause)))
def __eq__(self, other):
if not isinstance(other, MessageHandlingError):
return NotImplemented
if type(self) is not type(other):
return NotImplemented
if self.reason != other.reason:
return False
if self.cause is not None and other.cause is not None:
if self.cause.seq != other.cause.seq:
return False
return True
def __ne__(self, other):
return not self == other
def __str__(self):
return str(self.reason)
def __repr__(self):
s = type(self).__name__
if self.cause is None:
s += f"reason={self.reason!r})"
else:
s += f"channel={self.cause.channel.name!r}, cause={self.cause.seq!r}, reason={self.reason!r})"
return s
def applies_to(self, message):
"""Whether this MessageHandlingError can be treated as a reason why the
handling of message failed.
If self.cause is None, this is always true.
If self.cause is not None, this is only true if cause is message.
"""
return self.cause is None or self.cause is message
def propagate(self, new_cause):
"""Propagates this error, raising a new instance of the same class with the
same reason, but a different cause.
"""
raise type(self)(self.reason, new_cause, silent=True)
class InvalidMessageError(MessageHandlingError):
"""Indicates that an incoming message did not follow the protocol specification -
for example, it was missing properties that are required, or the message itself
is not allowed in the current state.
Raised by MessageDict in lieu of KeyError for missing keys.
"""
PREFIX = "Invalid message: "
"""Automatically prepended to the "message" property in JSON responses, when the
handler raises InvalidMessageError.
If a failed response has "message" property that starts with this prefix, it is
reported as InvalidMessageError rather than MessageHandlingError.
"""
def __str__(self):
return InvalidMessageError.PREFIX + str(self.reason)
class JsonMessageChannel(object):
"""Implements a JSON message channel on top of a raw JSON message stream, with
support for DAP requests, responses, and events.
The channel can be locked for exclusive use via the with-statement::
with channel:
channel.send_request(...)
# No interleaving messages can be sent here from other threads.
channel.send_event(...)
"""
def __init__(self, stream, handlers=None, name=None):
self.stream = stream
self.handlers = handlers
self.name = name if name is not None else stream.name
self.started = False
self._lock = threading.RLock()
self._closed = False
self._seq_iter = itertools.count(1)
self._sent_requests = {} # {seq: Request}
self._handler_queue = [] # [(what, handler)]
self._handlers_enqueued = threading.Condition(self._lock)
self._handler_thread = None
self._parser_thread = None
def __str__(self):
return self.name
def __repr__(self):
return f"{type(self).__name__}({self.name!r})"
def __enter__(self):
self._lock.acquire()
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self._lock.release()
def close(self):
"""Closes the underlying stream.
This does not immediately terminate any handlers that are already executing,
but they will be unable to respond. No new request or event handlers will
execute after this method is called, even for messages that have already been
received. However, response handlers will continue to executed for any request
that is still pending, as will any handlers registered via on_response().
"""
with self:
if not self._closed:
self._closed = True
self.stream.close()
def start(self):
"""Starts a message loop which parses incoming messages and invokes handlers
for them on a background thread, until the channel is closed.
Incoming messages, including responses to requests, will not be processed at
all until this is invoked.
"""
assert not self.started
self.started = True
self._parser_thread = threading.Thread(
target=self._parse_incoming_messages, name=f"{self} message parser"
)
hide_thread_from_debugger(self._parser_thread)
self._parser_thread.daemon = True
self._parser_thread.start()
def wait(self):
"""Waits for the message loop to terminate, and for all enqueued Response
message handlers to finish executing.
"""
parser_thread = self._parser_thread
try:
if parser_thread is not None:
parser_thread.join()
except AssertionError:
log.debug("Handled error joining parser thread.")
try:
handler_thread = self._handler_thread
if handler_thread is not None:
handler_thread.join()
except AssertionError:
log.debug("Handled error joining handler thread.")
# Order of keys for _prettify() - follows the order of properties in
# https://microsoft.github.io/debug-adapter-protocol/specification
_prettify_order = (
"seq",
"type",
"request_seq",
"success",
"command",
"event",
"message",
"arguments",
"body",
"error",
)
def _prettify(self, message_dict):
"""Reorders items in a MessageDict such that it is more readable."""
for key in self._prettify_order:
if key not in message_dict:
continue
value = message_dict[key]
del message_dict[key]
message_dict[key] = value
@contextlib.contextmanager
def _send_message(self, message):
"""Sends a new message to the other party.
Generates a new sequence number for the message, and provides it to the
caller before the message is sent, using the context manager protocol::
with send_message(...) as seq:
# The message hasn't been sent yet.
...
# Now the message has been sent.
Safe to call concurrently for the same channel from different threads.
"""
assert "seq" not in message
with self:
seq = next(self._seq_iter)
message = MessageDict(None, message)
message["seq"] = seq
self._prettify(message)
with self:
yield seq
self.stream.write_json(message)
def send_request(self, command, arguments=None, on_before_send=None):
"""Sends a new request, and returns the OutgoingRequest object for it.
If arguments is None or {}, "arguments" will be omitted in JSON.
If on_before_send is not None, invokes on_before_send() with the request
object as the sole argument, before the request actually gets sent.
Does not wait for response - use OutgoingRequest.wait_for_response().
Safe to call concurrently for the same channel from different threads.
"""
d = {"type": "request", "command": command}
if arguments is not None and arguments != {}:
d["arguments"] = arguments
with self._send_message(d) as seq:
request = OutgoingRequest(self, seq, command, arguments)
if on_before_send is not None:
on_before_send(request)
self._sent_requests[seq] = request
return request
def send_event(self, event, body=None):
"""Sends a new event.
If body is None or {}, "body" will be omitted in JSON.
Safe to call concurrently for the same channel from different threads.
"""
d = {"type": "event", "event": event}
if body is not None and body != {}:
d["body"] = body
with self._send_message(d):
pass
def request(self, *args, **kwargs):
"""Same as send_request(...).wait_for_response()"""
return self.send_request(*args, **kwargs).wait_for_response()
def propagate(self, message):
"""Sends a new message with the same type and payload.
If it was a request, returns the new OutgoingRequest object for it.
"""
assert message.is_request() or message.is_event()
if message.is_request():
return self.send_request(message.command, message.arguments)
else:
self.send_event(message.event, message.body)
def delegate(self, message):
"""Like propagate(message).wait_for_response(), but will also propagate
any resulting MessageHandlingError back.
"""
try:
result = self.propagate(message)
if result.is_request():
result = result.wait_for_response()
return result
except MessageHandlingError as exc:
exc.propagate(message)
def _parse_incoming_messages(self):
log.debug("Starting message loop for channel {0}", self)
try:
while True:
self._parse_incoming_message()
except NoMoreMessages as exc:
log.debug("Exiting message loop for channel {0}: {1}", self, exc)
with self:
# Generate dummy responses for all outstanding requests.
err_message = str(exc)
# Response._parse() will remove items from _sent_requests, so
# make a snapshot before iterating.
sent_requests = list(self._sent_requests.values())
for request in sent_requests:
response_json = MessageDict(
None,
{
"seq": -1,
"request_seq": request.seq,
"command": request.command,
"success": False,
"message": err_message,
},
)
Response._parse(self, response_json, body=exc)
assert not len(self._sent_requests)
self._enqueue_handlers(Disconnect(self), self._handle_disconnect)
self.close()
_message_parsers = {
"event": Event._parse,
"request": Request._parse,
"response": Response._parse,
}
def _parse_incoming_message(self):
"""Reads incoming messages, parses them, and puts handlers into the queue
for _run_handlers() to invoke, until the channel is closed.
"""
# Set up a dedicated decoder for this message, to create MessageDict instances
# for all JSON objects, and track them so that they can be later wired up to
# the Message they belong to, once it is instantiated.
def object_hook(d):
d = MessageDict(None, d)
if "seq" in d:
self._prettify(d)
d.associate_with = associate_with
message_dicts.append(d)
return d
# A hack to work around circular dependency between messages, and instances of
# MessageDict in their payload. We need to set message for all of them, but it
# cannot be done until the actual Message is created - which happens after the
# dicts are created during deserialization.
#
# So, upon deserialization, every dict in the message payload gets a method
# that can be called to set MessageDict.message for *all* dicts belonging to
# that message. This method can then be invoked on the top-level dict by the
# parser, after it has parsed enough of the dict to create the appropriate
# instance of Event, Request, or Response for this message.
def associate_with(message):
for d in message_dicts:
d.message = message
del d.associate_with
message_dicts = []
decoder = self.stream.json_decoder_factory(object_hook=object_hook)
message_dict = self.stream.read_json(decoder)
assert isinstance(message_dict, MessageDict) # make sure stream used decoder
msg_type = message_dict("type", json.enum("event", "request", "response"))
parser = self._message_parsers[msg_type]
try:
parser(self, message_dict)
except InvalidMessageError as exc:
log.error(
"Failed to parse message in channel {0}: {1} in:\n{2}",
self,
str(exc),
json.repr(message_dict),
)
except Exception as exc:
if isinstance(exc, NoMoreMessages) and exc.stream is self.stream:
raise
log.swallow_exception(
"Fatal error in channel {0} while parsing:\n{1}",
self,
json.repr(message_dict),
)
os._exit(1)
def _enqueue_handlers(self, what, *handlers):
"""Enqueues handlers for _run_handlers() to run.
`what` is the Message being handled, and is used for logging purposes.
If the background thread with _run_handlers() isn't running yet, starts it.
"""
with self:
self._handler_queue.extend((what, handler) for handler in handlers)
self._handlers_enqueued.notify_all()
# If there is anything to handle, but there's no handler thread yet,
# spin it up. This will normally happen only once, on the first call
# to _enqueue_handlers(), and that thread will run all the handlers
# for parsed messages. However, this can also happen is somebody calls
# Request.on_response() - possibly concurrently from multiple threads -
# after the channel has already been closed, and the initial handler
# thread has exited. In this case, we spin up a new thread just to run
# the enqueued response handlers, and it will exit as soon as it's out
# of handlers to run.
if len(self._handler_queue) and self._handler_thread is None:
self._handler_thread = threading.Thread(
target=self._run_handlers,
name=f"{self} message handler",
)
hide_thread_from_debugger(self._handler_thread)
self._handler_thread.start()
def _run_handlers(self):
"""Runs enqueued handlers until the channel is closed, or until the handler
queue is empty once the channel is closed.
"""
while True:
with self:
closed = self._closed
if closed:
# Wait for the parser thread to wrap up and enqueue any remaining
# handlers, if it is still running.
self._parser_thread.join()
# From this point on, _enqueue_handlers() can only get called
# from Request.on_response().
with self:
if not closed and not len(self._handler_queue):
# Wait for something to process.
self._handlers_enqueued.wait()
# Make a snapshot before releasing the lock.
handlers = self._handler_queue[:]
del self._handler_queue[:]
if closed and not len(handlers):
# Nothing to process, channel is closed, and parser thread is
# not running anymore - time to quit! If Request.on_response()
# needs to call _enqueue_handlers() later, it will spin up
# a new handler thread.
self._handler_thread = None
return
for what, handler in handlers:
# If the channel is closed, we don't want to process any more events
# or requests - only responses and the final disconnect handler. This
# is to guarantee that if a handler calls close() on its own channel,
# the corresponding request or event is the last thing to be processed.
if closed and handler in (Event._handle, Request._handle):
continue
with log.prefixed("/handling {0}/\n", what.describe()):
try:
handler()
except Exception:
# It's already logged by the handler, so just fail fast.
self.close()
os._exit(1)
def _get_handler_for(self, type, name):
"""Returns the handler for a message of a given type."""
with self:
handlers = self.handlers
for handler_name in (name + "_" + type, type):
try:
return getattr(handlers, handler_name)
except AttributeError:
continue
raise AttributeError(
"handler object {0} for channel {1} has no handler for {2} {3!r}".format(
util.srcnameof(handlers),
self,
type,
name,
)
)
def _handle_disconnect(self):
handler = getattr(self.handlers, "disconnect", lambda: None)
try:
handler()
except Exception:
log.reraise_exception(
"Handler {0}\ncouldn't handle disconnect from {1}:",
util.srcnameof(handler),
self,
)
class MessageHandlers(object):
"""A simple delegating message handlers object for use with JsonMessageChannel.
For every argument provided, the object gets an attribute with the corresponding
name and value.
"""
def __init__(self, **kwargs):
for name, func in kwargs.items():
setattr(self, name, func)
| 56,396 | Python | 36.448207 | 106 | 0.601337 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/common/singleton.py | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE in the project root
# for license information.
import functools
import threading
class Singleton(object):
"""A base class for a class of a singleton object.
For any derived class T, the first invocation of T() will create the instance,
and any future invocations of T() will return that instance.
Concurrent invocations of T() from different threads are safe.
"""
# A dual-lock scheme is necessary to be thread safe while avoiding deadlocks.
# _lock_lock is shared by all singleton types, and is used to construct their
# respective _lock instances when invoked for a new type. Then _lock is used
# to synchronize all further access for that type, including __init__. This way,
# __init__ for any given singleton can access another singleton, and not get
# deadlocked if that other singleton is trying to access it.
_lock_lock = threading.RLock()
_lock = None
# Specific subclasses will get their own _instance set in __new__.
_instance = None
_is_shared = None # True if shared, False if exclusive
def __new__(cls, *args, **kwargs):
# Allow arbitrary args and kwargs if shared=False, because that is guaranteed
# to construct a new singleton if it succeeds. Otherwise, this call might end
# up returning an existing instance, which might have been constructed with
# different arguments, so allowing them is misleading.
assert not kwargs.get("shared", False) or (len(args) + len(kwargs)) == 0, (
"Cannot use constructor arguments when accessing a Singleton without "
"specifying shared=False."
)
# Avoid locking as much as possible with repeated double-checks - the most
# common path is when everything is already allocated.
if not cls._instance:
# If there's no per-type lock, allocate it.
if cls._lock is None:
with cls._lock_lock:
if cls._lock is None:
cls._lock = threading.RLock()
# Now that we have a per-type lock, we can synchronize construction.
if not cls._instance:
with cls._lock:
if not cls._instance:
cls._instance = object.__new__(cls)
# To prevent having __init__ invoked multiple times, call
# it here directly, and then replace it with a stub that
# does nothing - that stub will get auto-invoked on return,
# and on all future singleton accesses.
cls._instance.__init__()
cls.__init__ = lambda *args, **kwargs: None
return cls._instance
def __init__(self, *args, **kwargs):
"""Initializes the singleton instance. Guaranteed to only be invoked once for
any given type derived from Singleton.
If shared=False, the caller is requesting a singleton instance for their own
exclusive use. This is only allowed if the singleton has not been created yet;
if so, it is created and marked as being in exclusive use. While it is marked
as such, all attempts to obtain an existing instance of it immediately raise
an exception. The singleton can eventually be promoted to shared use by calling
share() on it.
"""
shared = kwargs.pop("shared", True)
with self:
if shared:
assert (
type(self)._is_shared is not False
), "Cannot access a non-shared Singleton."
type(self)._is_shared = True
else:
assert type(self)._is_shared is None, "Singleton is already created."
def __enter__(self):
"""Lock this singleton to prevent concurrent access."""
type(self)._lock.acquire()
return self
def __exit__(self, exc_type, exc_value, exc_tb):
"""Unlock this singleton to allow concurrent access."""
type(self)._lock.release()
def share(self):
"""Share this singleton, if it was originally created with shared=False."""
type(self)._is_shared = True
class ThreadSafeSingleton(Singleton):
"""A singleton that incorporates a lock for thread-safe access to its members.
The lock can be acquired using the context manager protocol, and thus idiomatic
use is in conjunction with a with-statement. For example, given derived class T::
with T() as t:
t.x = t.frob(t.y)
All access to the singleton from the outside should follow this pattern for both
attributes and method calls. Singleton members can assume that self is locked by
the caller while they're executing, but recursive locking of the same singleton
on the same thread is also permitted.
"""
threadsafe_attrs = frozenset()
"""Names of attributes that are guaranteed to be used in a thread-safe manner.
This is typically used in conjunction with share() to simplify synchronization.
"""
readonly_attrs = frozenset()
"""Names of attributes that are readonly. These can be read without locking, but
cannot be written at all.
Every derived class gets its own separate set. Thus, for any given singleton type
T, an attribute can be made readonly after setting it, with T.readonly_attrs.add().
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Make sure each derived class gets a separate copy.
type(self).readonly_attrs = set(type(self).readonly_attrs)
# Prevent callers from reading or writing attributes without locking, except for
# reading attributes listed in threadsafe_attrs, and methods specifically marked
# with @threadsafe_method. Such methods should perform the necessary locking to
# ensure thread safety for the callers.
@staticmethod
def assert_locked(self):
lock = type(self)._lock
assert lock.acquire(blocking=False), (
"ThreadSafeSingleton accessed without locking. Either use with-statement, "
"or if it is a method or property, mark it as @threadsafe_method or with "
"@autolocked_method, as appropriate."
)
lock.release()
def __getattribute__(self, name):
value = object.__getattribute__(self, name)
if name not in (type(self).threadsafe_attrs | type(self).readonly_attrs):
if not getattr(value, "is_threadsafe_method", False):
ThreadSafeSingleton.assert_locked(self)
return value
def __setattr__(self, name, value):
assert name not in type(self).readonly_attrs, "This attribute is read-only."
if name not in type(self).threadsafe_attrs:
ThreadSafeSingleton.assert_locked(self)
return object.__setattr__(self, name, value)
def threadsafe_method(func):
"""Marks a method of a ThreadSafeSingleton-derived class as inherently thread-safe.
A method so marked must either not use any singleton state, or lock it appropriately.
"""
func.is_threadsafe_method = True
return func
def autolocked_method(func):
"""Automatically synchronizes all calls of a method of a ThreadSafeSingleton-derived
class by locking the singleton for the duration of each call.
"""
@functools.wraps(func)
@threadsafe_method
def lock_and_call(self, *args, **kwargs):
with self:
return func(self, *args, **kwargs)
return lock_and_call
| 7,666 | Python | 40.22043 | 89 | 0.644665 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/server/attach_pid_injected.py | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE in the project root
# for license information.
"""Script injected into the debuggee process during attach-to-PID."""
import os
__file__ = os.path.abspath(__file__)
_debugpy_dir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
def attach(setup):
log = None
try:
import sys
if "threading" not in sys.modules:
try:
def on_warn(msg):
print(msg, file=sys.stderr)
def on_exception(msg):
print(msg, file=sys.stderr)
def on_critical(msg):
print(msg, file=sys.stderr)
pydevd_attach_to_process_path = os.path.join(
_debugpy_dir,
"debugpy",
"_vendored",
"pydevd",
"pydevd_attach_to_process",
)
assert os.path.exists(pydevd_attach_to_process_path)
sys.path.insert(0, pydevd_attach_to_process_path)
# NOTE: that it's not a part of the pydevd PYTHONPATH
import attach_script
attach_script.fix_main_thread_id(
on_warn=on_warn, on_exception=on_exception, on_critical=on_critical
)
# NOTE: At this point it should be safe to remove this.
sys.path.remove(pydevd_attach_to_process_path)
except:
import traceback
traceback.print_exc()
raise
sys.path.insert(0, _debugpy_dir)
try:
import debugpy
import debugpy.server
from debugpy.common import json, log
import pydevd
finally:
assert sys.path[0] == _debugpy_dir
del sys.path[0]
py_db = pydevd.get_global_debugger()
if py_db is not None:
py_db.dispose_and_kill_all_pydevd_threads(wait=False)
if setup["log_to"] is not None:
debugpy.log_to(setup["log_to"])
log.info("Configuring injected debugpy: {0}", json.repr(setup))
if setup["mode"] == "listen":
debugpy.listen(setup["address"])
elif setup["mode"] == "connect":
debugpy.connect(
setup["address"], access_token=setup["adapter_access_token"]
)
else:
raise AssertionError(repr(setup))
except:
import traceback
traceback.print_exc()
if log is None:
raise
else:
log.reraise_exception()
log.info("debugpy injected successfully")
| 2,734 | Python | 28.408602 | 87 | 0.525969 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/server/__init__.py | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE in the project root
# for license information.
# "force_pydevd" must be imported first to ensure (via side effects)
# that the debugpy-vendored copy of pydevd gets used.
import debugpy._vendored.force_pydevd # noqa
| 323 | Python | 39.499995 | 68 | 0.773994 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/server/api.py | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE in the project root
# for license information.
import codecs
import os
import pydevd
import socket
import sys
import threading
import debugpy
from debugpy import adapter
from debugpy.common import json, log, sockets
from _pydevd_bundle.pydevd_constants import get_global_debugger
from pydevd_file_utils import absolute_path
from debugpy.common.util import hide_debugpy_internals
_tls = threading.local()
# TODO: "gevent", if possible.
_config = {
"qt": "none",
"subProcess": True,
"python": sys.executable,
"pythonEnv": {},
}
_config_valid_values = {
# If property is not listed here, any value is considered valid, so long as
# its type matches that of the default value in _config.
"qt": ["auto", "none", "pyside", "pyside2", "pyqt4", "pyqt5"],
}
# This must be a global to prevent it from being garbage collected and triggering
# https://bugs.python.org/issue37380.
_adapter_process = None
def _settrace(*args, **kwargs):
log.debug("pydevd.settrace(*{0!r}, **{1!r})", args, kwargs)
# The stdin in notification is not acted upon in debugpy, so, disable it.
kwargs.setdefault("notify_stdin", False)
try:
return pydevd.settrace(*args, **kwargs)
except Exception:
raise
else:
_settrace.called = True
_settrace.called = False
def ensure_logging():
"""Starts logging to log.log_dir, if it hasn't already been done."""
if ensure_logging.ensured:
return
ensure_logging.ensured = True
log.to_file(prefix="debugpy.server")
log.describe_environment("Initial environment:")
if log.log_dir is not None:
pydevd.log_to(log.log_dir + "/debugpy.pydevd.log")
ensure_logging.ensured = False
def log_to(path):
if ensure_logging.ensured:
raise RuntimeError("logging has already begun")
log.debug("log_to{0!r}", (path,))
if path is sys.stderr:
log.stderr.levels |= set(log.LEVELS)
else:
log.log_dir = path
def configure(properties=None, **kwargs):
if _settrace.called:
raise RuntimeError("debug adapter is already running")
ensure_logging()
log.debug("configure{0!r}", (properties, kwargs))
if properties is None:
properties = kwargs
else:
properties = dict(properties)
properties.update(kwargs)
for k, v in properties.items():
if k not in _config:
raise ValueError("Unknown property {0!r}".format(k))
expected_type = type(_config[k])
if type(v) is not expected_type:
raise ValueError("{0!r} must be a {1}".format(k, expected_type.__name__))
valid_values = _config_valid_values.get(k)
if (valid_values is not None) and (v not in valid_values):
raise ValueError("{0!r} must be one of: {1!r}".format(k, valid_values))
_config[k] = v
def _starts_debugging(func):
def debug(address, **kwargs):
if _settrace.called:
raise RuntimeError("this process already has a debug adapter")
try:
_, port = address
except Exception:
port = address
address = ("127.0.0.1", port)
try:
port.__index__() # ensure it's int-like
except Exception:
raise ValueError("expected port or (host, port)")
if not (0 <= port < 2 ** 16):
raise ValueError("invalid port number")
ensure_logging()
log.debug("{0}({1!r}, **{2!r})", func.__name__, address, kwargs)
log.info("Initial debug configuration: {0}", json.repr(_config))
qt_mode = _config.get("qt", "none")
if qt_mode != "none":
pydevd.enable_qt_support(qt_mode)
settrace_kwargs = {
"suspend": False,
"patch_multiprocessing": _config.get("subProcess", True),
}
if hide_debugpy_internals():
debugpy_path = os.path.dirname(absolute_path(debugpy.__file__))
settrace_kwargs["dont_trace_start_patterns"] = (debugpy_path,)
settrace_kwargs["dont_trace_end_patterns"] = (str("debugpy_launcher.py"),)
try:
return func(address, settrace_kwargs, **kwargs)
except Exception:
log.reraise_exception("{0}() failed:", func.__name__, level="info")
return debug
@_starts_debugging
def listen(address, settrace_kwargs, in_process_debug_adapter=False):
# Errors below are logged with level="info", because the caller might be catching
# and handling exceptions, and we don't want to spam their stderr unnecessarily.
if in_process_debug_adapter:
host, port = address
log.info("Listening: pydevd without debugpy adapter: {0}:{1}", host, port)
settrace_kwargs['patch_multiprocessing'] = False
_settrace(
host=host,
port=port,
wait_for_ready_to_run=False,
block_until_connected=False,
**settrace_kwargs
)
return
import subprocess
server_access_token = codecs.encode(os.urandom(32), "hex").decode("ascii")
try:
endpoints_listener = sockets.create_server("127.0.0.1", 0, timeout=10)
except Exception as exc:
log.swallow_exception("Can't listen for adapter endpoints:")
raise RuntimeError("can't listen for adapter endpoints: " + str(exc))
try:
endpoints_host, endpoints_port = endpoints_listener.getsockname()
log.info(
"Waiting for adapter endpoints on {0}:{1}...",
endpoints_host,
endpoints_port,
)
host, port = address
adapter_args = [
_config.get("python", sys.executable),
os.path.dirname(adapter.__file__),
"--for-server",
str(endpoints_port),
"--host",
host,
"--port",
str(port),
"--server-access-token",
server_access_token,
]
if log.log_dir is not None:
adapter_args += ["--log-dir", log.log_dir]
log.info("debugpy.listen() spawning adapter: {0}", json.repr(adapter_args))
# On Windows, detach the adapter from our console, if any, so that it doesn't
# receive Ctrl+C from it, and doesn't keep it open once we exit.
creationflags = 0
if sys.platform == "win32":
creationflags |= 0x08000000 # CREATE_NO_WINDOW
creationflags |= 0x00000200 # CREATE_NEW_PROCESS_GROUP
# On embedded applications, environment variables might not contain
# Python environment settings.
python_env = _config.get("pythonEnv")
if not bool(python_env):
python_env = None
# Adapter will outlive this process, so we shouldn't wait for it. However, we
# need to ensure that the Popen instance for it doesn't get garbage-collected
# by holding a reference to it in a non-local variable, to avoid triggering
# https://bugs.python.org/issue37380.
try:
global _adapter_process
_adapter_process = subprocess.Popen(
adapter_args, close_fds=True, creationflags=creationflags, env=python_env
)
if os.name == "posix":
# It's going to fork again to daemonize, so we need to wait on it to
# clean it up properly.
_adapter_process.wait()
else:
# Suppress misleading warning about child process still being alive when
# this process exits (https://bugs.python.org/issue38890).
_adapter_process.returncode = 0
pydevd.add_dont_terminate_child_pid(_adapter_process.pid)
except Exception as exc:
log.swallow_exception("Error spawning debug adapter:", level="info")
raise RuntimeError("error spawning debug adapter: " + str(exc))
try:
sock, _ = endpoints_listener.accept()
try:
sock.settimeout(None)
sock_io = sock.makefile("rb", 0)
try:
endpoints = json.loads(sock_io.read().decode("utf-8"))
finally:
sock_io.close()
finally:
sockets.close_socket(sock)
except socket.timeout:
log.swallow_exception(
"Timed out waiting for adapter to connect:", level="info"
)
raise RuntimeError("timed out waiting for adapter to connect")
except Exception as exc:
log.swallow_exception("Error retrieving adapter endpoints:", level="info")
raise RuntimeError("error retrieving adapter endpoints: " + str(exc))
finally:
endpoints_listener.close()
log.info("Endpoints received from adapter: {0}", json.repr(endpoints))
if "error" in endpoints:
raise RuntimeError(str(endpoints["error"]))
try:
server_host = str(endpoints["server"]["host"])
server_port = int(endpoints["server"]["port"])
client_host = str(endpoints["client"]["host"])
client_port = int(endpoints["client"]["port"])
except Exception as exc:
log.swallow_exception(
"Error parsing adapter endpoints:\n{0}\n",
json.repr(endpoints),
level="info",
)
raise RuntimeError("error parsing adapter endpoints: " + str(exc))
log.info(
"Adapter is accepting incoming client connections on {0}:{1}",
client_host,
client_port,
)
_settrace(
host=server_host,
port=server_port,
wait_for_ready_to_run=False,
block_until_connected=True,
access_token=server_access_token,
**settrace_kwargs
)
log.info("pydevd is connected to adapter at {0}:{1}", server_host, server_port)
return client_host, client_port
@_starts_debugging
def connect(address, settrace_kwargs, access_token=None):
host, port = address
_settrace(host=host, port=port, client_access_token=access_token, **settrace_kwargs)
class wait_for_client:
def __call__(self):
ensure_logging()
log.debug("wait_for_client()")
pydb = get_global_debugger()
if pydb is None:
raise RuntimeError("listen() or connect() must be called first")
cancel_event = threading.Event()
self.cancel = cancel_event.set
pydevd._wait_for_attach(cancel=cancel_event)
@staticmethod
def cancel():
raise RuntimeError("wait_for_client() must be called first")
wait_for_client = wait_for_client()
def is_client_connected():
return pydevd._is_attached()
def breakpoint():
ensure_logging()
if not is_client_connected():
log.info("breakpoint() ignored - debugger not attached")
return
log.debug("breakpoint()")
# Get the first frame in the stack that's not an internal frame.
pydb = get_global_debugger()
stop_at_frame = sys._getframe().f_back
while (
stop_at_frame is not None
and pydb.get_file_type(stop_at_frame) == pydb.PYDEV_FILE
):
stop_at_frame = stop_at_frame.f_back
_settrace(
suspend=True,
trace_only_current_thread=True,
patch_multiprocessing=False,
stop_at_frame=stop_at_frame,
)
stop_at_frame = None
def debug_this_thread():
ensure_logging()
log.debug("debug_this_thread()")
_settrace(suspend=False)
def trace_this_thread(should_trace):
ensure_logging()
log.debug("trace_this_thread({0!r})", should_trace)
pydb = get_global_debugger()
if should_trace:
pydb.enable_tracing()
else:
pydb.disable_tracing()
| 11,789 | Python | 31.213115 | 89 | 0.603359 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/server/cli.py | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE in the project root
# for license information.
import json
import os
import re
import sys
from importlib.util import find_spec
# debugpy.__main__ should have preloaded pydevd properly before importing this module.
# Otherwise, some stdlib modules above might have had imported threading before pydevd
# could perform the necessary detours in it.
assert "pydevd" in sys.modules
import pydevd
# Note: use the one bundled from pydevd so that it's invisible for the user.
from _pydevd_bundle import pydevd_runpy as runpy
import debugpy
from debugpy.common import log
from debugpy.server import api
TARGET = "<filename> | -m <module> | -c <code> | --pid <pid>"
HELP = """debugpy {0}
See https://aka.ms/debugpy for documentation.
Usage: debugpy --listen | --connect
[<host>:]<port>
[--wait-for-client]
[--configure-<name> <value>]...
[--log-to <path>] [--log-to-stderr]
{1}
[<arg>]...
""".format(
debugpy.__version__, TARGET
)
class Options(object):
mode = None
address = None
log_to = None
log_to_stderr = False
target = None
target_kind = None
wait_for_client = False
adapter_access_token = None
options = Options()
options.config = {"qt": "none", "subProcess": True}
def in_range(parser, start, stop):
def parse(s):
n = parser(s)
if start is not None and n < start:
raise ValueError("must be >= {0}".format(start))
if stop is not None and n >= stop:
raise ValueError("must be < {0}".format(stop))
return n
return parse
pid = in_range(int, 0, None)
def print_help_and_exit(switch, it):
print(HELP, file=sys.stderr)
sys.exit(0)
def print_version_and_exit(switch, it):
print(debugpy.__version__)
sys.exit(0)
def set_arg(varname, parser=(lambda x: x)):
def do(arg, it):
value = parser(next(it))
setattr(options, varname, value)
return do
def set_const(varname, value):
def do(arg, it):
setattr(options, varname, value)
return do
def set_address(mode):
def do(arg, it):
if options.address is not None:
raise ValueError("--listen and --connect are mutually exclusive")
# It's either host:port, or just port.
value = next(it)
host, sep, port = value.partition(":")
if not sep:
host = "127.0.0.1"
port = value
try:
port = int(port)
except Exception:
port = -1
if not (0 <= port < 2 ** 16):
raise ValueError("invalid port number")
options.mode = mode
options.address = (host, port)
return do
def set_config(arg, it):
prefix = "--configure-"
assert arg.startswith(prefix)
name = arg[len(prefix) :]
value = next(it)
if name not in options.config:
raise ValueError("unknown property {0!r}".format(name))
expected_type = type(options.config[name])
try:
if expected_type is bool:
value = {"true": True, "false": False}[value.lower()]
else:
value = expected_type(value)
except Exception:
raise ValueError("{0!r} must be a {1}".format(name, expected_type.__name__))
options.config[name] = value
def set_target(kind, parser=(lambda x: x), positional=False):
def do(arg, it):
options.target_kind = kind
target = parser(arg if positional else next(it))
if isinstance(target, bytes):
# target may be the code, so, try some additional encodings...
try:
target = target.decode(sys.getfilesystemencoding())
except UnicodeDecodeError:
try:
target = target.decode("utf-8")
except UnicodeDecodeError:
import locale
target = target.decode(locale.getpreferredencoding(False))
options.target = target
return do
# fmt: off
switches = [
# Switch Placeholder Action
# ====== =========== ======
# Switches that are documented for use by end users.
("-(\\?|h|-help)", None, print_help_and_exit),
("-(V|-version)", None, print_version_and_exit),
("--log-to" , "<path>", set_arg("log_to")),
("--log-to-stderr", None, set_const("log_to_stderr", True)),
("--listen", "<address>", set_address("listen")),
("--connect", "<address>", set_address("connect")),
("--wait-for-client", None, set_const("wait_for_client", True)),
("--configure-.+", "<value>", set_config),
# Switches that are used internally by the client or debugpy itself.
("--adapter-access-token", "<token>", set_arg("adapter_access_token")),
# Targets. The "" entry corresponds to positional command line arguments,
# i.e. the ones not preceded by any switch name.
("", "<filename>", set_target("file", positional=True)),
("-m", "<module>", set_target("module")),
("-c", "<code>", set_target("code")),
("--pid", "<pid>", set_target("pid", pid)),
]
# fmt: on
def consume_argv():
while len(sys.argv) >= 2:
value = sys.argv[1]
del sys.argv[1]
yield value
def parse_argv():
seen = set()
it = consume_argv()
while True:
try:
arg = next(it)
except StopIteration:
raise ValueError("missing target: " + TARGET)
switch = arg
if not switch.startswith("-"):
switch = ""
for pattern, placeholder, action in switches:
if re.match("^(" + pattern + ")$", switch):
break
else:
raise ValueError("unrecognized switch " + switch)
if switch in seen:
raise ValueError("duplicate switch " + switch)
else:
seen.add(switch)
try:
action(arg, it)
except StopIteration:
assert placeholder is not None
raise ValueError("{0}: missing {1}".format(switch, placeholder))
except Exception as exc:
raise ValueError("invalid {0} {1}: {2}".format(switch, placeholder, exc))
if options.target is not None:
break
if options.mode is None:
raise ValueError("either --listen or --connect is required")
if options.adapter_access_token is not None and options.mode != "connect":
raise ValueError("--adapter-access-token requires --connect")
if options.target_kind == "pid" and options.wait_for_client:
raise ValueError("--pid does not support --wait-for-client")
assert options.target is not None
assert options.target_kind is not None
assert options.address is not None
def start_debugging(argv_0):
# We need to set up sys.argv[0] before invoking either listen() or connect(),
# because they use it to report the "process" event. Thus, we can't rely on
# run_path() and run_module() doing that, even though they will eventually.
sys.argv[0] = argv_0
log.debug("sys.argv after patching: {0!r}", sys.argv)
debugpy.configure(options.config)
if options.mode == "listen":
debugpy.listen(options.address)
elif options.mode == "connect":
debugpy.connect(options.address, access_token=options.adapter_access_token)
else:
raise AssertionError(repr(options.mode))
if options.wait_for_client:
debugpy.wait_for_client()
def run_file():
target = options.target
start_debugging(target)
# run_path has one difference with invoking Python from command-line:
# if the target is a file (rather than a directory), it does not add its
# parent directory to sys.path. Thus, importing other modules from the
# same directory is broken unless sys.path is patched here.
if os.path.isfile(target):
dir = os.path.dirname(target)
sys.path.insert(0, dir)
else:
log.debug("Not a file: {0!r}", target)
log.describe_environment("Pre-launch environment:")
log.info("Running file {0!r}", target)
runpy.run_path(target, run_name="__main__")
def run_module():
# Add current directory to path, like Python itself does for -m. This must
# be in place before trying to use find_spec below to resolve submodules.
sys.path.insert(0, str(""))
# We want to do the same thing that run_module() would do here, without
# actually invoking it.
argv_0 = sys.argv[0]
try:
spec = find_spec(options.target)
if spec is not None:
argv_0 = spec.origin
except Exception:
log.swallow_exception("Error determining module path for sys.argv")
start_debugging(argv_0)
log.describe_environment("Pre-launch environment:")
log.info("Running module {0!r}", options.target)
# Docs say that runpy.run_module is equivalent to -m, but it's not actually
# the case for packages - -m sets __name__ to "__main__", but run_module sets
# it to "pkg.__main__". This breaks everything that uses the standard pattern
# __name__ == "__main__" to detect being run as a CLI app. On the other hand,
# runpy._run_module_as_main is a private function that actually implements -m.
try:
run_module_as_main = runpy._run_module_as_main
except AttributeError:
log.warning("runpy._run_module_as_main is missing, falling back to run_module.")
runpy.run_module(options.target, alter_sys=True)
else:
run_module_as_main(options.target, alter_argv=True)
def run_code():
# Add current directory to path, like Python itself does for -c.
sys.path.insert(0, str(""))
code = compile(options.target, str("<string>"), str("exec"))
start_debugging(str("-c"))
log.describe_environment("Pre-launch environment:")
log.info("Running code:\n\n{0}", options.target)
eval(code, {})
def attach_to_pid():
pid = options.target
log.info("Attaching to process with PID={0}", pid)
encode = lambda s: list(bytearray(s.encode("utf-8"))) if s is not None else None
script_dir = os.path.dirname(debugpy.server.__file__)
assert os.path.exists(script_dir)
script_dir = encode(script_dir)
setup = {
"mode": options.mode,
"address": options.address,
"wait_for_client": options.wait_for_client,
"log_to": options.log_to,
"adapter_access_token": options.adapter_access_token,
}
setup = encode(json.dumps(setup))
python_code = """
import codecs;
import json;
import sys;
decode = lambda s: codecs.utf_8_decode(bytearray(s))[0] if s is not None else None;
script_dir = decode({script_dir});
setup = json.loads(decode({setup}));
sys.path.insert(0, script_dir);
import attach_pid_injected;
del sys.path[0];
attach_pid_injected.attach(setup);
"""
python_code = (
python_code.replace("\r", "")
.replace("\n", "")
.format(script_dir=script_dir, setup=setup)
)
log.info("Code to be injected: \n{0}", python_code.replace(";", ";\n"))
# pydevd restriction on characters in injected code.
assert not (
{'"', "'", "\r", "\n"} & set(python_code)
), "Injected code should not contain any single quotes, double quotes, or newlines."
pydevd_attach_to_process_path = os.path.join(
os.path.dirname(pydevd.__file__), "pydevd_attach_to_process"
)
assert os.path.exists(pydevd_attach_to_process_path)
sys.path.append(pydevd_attach_to_process_path)
try:
import add_code_to_python_process # noqa
log.info("Injecting code into process with PID={0} ...", pid)
add_code_to_python_process.run_python_code(
pid,
python_code,
connect_debugger_tracing=True,
show_debug_info=int(os.getenv("DEBUGPY_ATTACH_BY_PID_DEBUG_INFO", "0")),
)
except Exception:
log.reraise_exception("Code injection into PID={0} failed:", pid)
log.info("Code injection into PID={0} completed.", pid)
def main():
original_argv = list(sys.argv)
try:
parse_argv()
except Exception as exc:
print(str(HELP) + str("\nError: ") + str(exc), file=sys.stderr)
sys.exit(2)
if options.log_to is not None:
debugpy.log_to(options.log_to)
if options.log_to_stderr:
debugpy.log_to(sys.stderr)
api.ensure_logging()
log.info(
str("sys.argv before parsing: {0!r}\n" " after parsing: {1!r}"),
original_argv,
sys.argv,
)
try:
run = {
"file": run_file,
"module": run_module,
"code": run_code,
"pid": attach_to_pid,
}[options.target_kind]
run()
except SystemExit as exc:
log.reraise_exception(
"Debuggee exited via SystemExit: {0!r}", exc.code, level="debug"
)
| 13,289 | Python | 29.551724 | 89 | 0.589059 |
omniverse-code/kit/exts/omni.kit.debug.python/PACKAGE-LICENSES/omni.kit.debug.python-LICENSE.md | Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
NVIDIA CORPORATION and its licensors retain all intellectual property
and proprietary rights in and to this software, related documentation
and any modifications thereto. Any use, reproduction, disclosure or
distribution of this software and related documentation without an express
license agreement from NVIDIA CORPORATION is strictly prohibited. | 412 | Markdown | 57.999992 | 74 | 0.839806 |
omniverse-code/kit/exts/omni.kit.debug.python/config/extension.toml | [package]
version = "0.2.0"
title = "A debugger for Python"
description="Uses debugpy which implements the Debug Adapter Protocol"
authors = ["NVIDIA"]
repository = ""
category = "Internal"
keywords = ["kit"]
readme = "docs/README.md"
changelog="docs/CHANGELOG.md"
[dependencies]
"omni.kit.pip_archive" = {}
[[python.module]]
name = "omni.kit.debug.python"
[settings.exts."omni.kit.debug.python"]
# Host and port for listen to debugger for
host = "127.0.0.1"
port = 3000
# Enable debugpy builtin logging, logs go into ${logs} folder
debugpyLogging = false
# Block until client (debugger) connected
waitForClient = false
# break immediately (also waits for client)
break = false
[[test]]
pyCoverageEnabled = false
waiver = "Developer tool. Brings 3rd party (debugpy) to another 3rd party (VSCode), doesn't have much to test and building an integration test is not worth it."
| 882 | TOML | 24.228571 | 160 | 0.732426 |
omniverse-code/kit/exts/omni.kit.debug.python/omni/kit/debug/python.py | import sys
import carb.tokens
import carb.settings
import omni.ext
import omni.kit.pipapi
# Try import to allow documentation build without debugpy
try:
import debugpy
except ModuleNotFoundError:
pass
def _print(msg):
print(f"[omni.kit.debug.python] {msg}")
def wait_for_client():
_print("Waiting for debugger to connect...")
debugpy.wait_for_client()
def breakpoint():
wait_for_client()
debugpy.breakpoint()
def is_attached() -> bool:
return debugpy.is_client_connected()
def enable_logging():
path = carb.tokens.get_tokens_interface().resolve("${logs}")
_print(f"Enabled logging to '{path}'")
debugpy.log_to(path)
_listen_address = None
def get_listen_address():
return _listen_address
class Extension(omni.ext.IExt):
def on_startup(self):
settings = carb.settings.get_settings()
host = settings.get("/exts/omni.kit.debug.python/host")
port = settings.get("/exts/omni.kit.debug.python/port")
debugpyLogging = settings.get("/exts/omni.kit.debug.python/debugpyLogging")
waitForClient = settings.get("/exts/omni.kit.debug.python/waitForClient")
break_ = settings.get("/exts/omni.kit.debug.python/break")
if debugpyLogging:
enable_logging()
# Start debugging server
python_exe = "python.exe" if sys.platform == "win32" else "bin/python3"
python_cmd = sys.prefix + "/" + python_exe
debugpy.configure(python=python_cmd)
global _listen_address
try:
_listen_address = debugpy.listen((host, port))
_print(f"Running python debugger on: {_listen_address}")
except Exception as e:
_print(f"Error running python debugger: {e}")
if waitForClient:
wait_for_client()
if break_:
breakpoint()
| 1,852 | Python | 23.706666 | 83 | 0.641469 |
omniverse-code/kit/exts/omni.kit.debug.python/docs/CHANGELOG.md | # CHANGELOG
## [0.2.0] - 2022-09-28
- Prebundle debugpy
## [0.1.0] - 2020-02-22
- Initial commit
| 101 | Markdown | 9.199999 | 23 | 0.594059 |
omniverse-code/kit/exts/omni.kit.debug.python/docs/README.md | # Debug Utils
## omni.kit.debug.python
Python debugger support. | 65 | Markdown | 12.199998 | 24 | 0.753846 |
omniverse-code/kit/exts/omni.kit.debug.python/docs/index.rst | omni.kit.debug.python
###########################
.. toctree::
:maxdepth: 1
CHANGELOG
Python API Reference
*********************
.. automodule:: omni.kit.debug.python
:platform: Windows-x86_64, Linux-x86_64
:members:
:undoc-members:
:imported-members:
| 280 | reStructuredText | 13.789473 | 43 | 0.546429 |
omniverse-code/kit/exts/omni.kit.manipulator.tool.snap/docs/index.rst | omni.kit.manipulator.tool.snap
###################################
Extension that provides snap functionality to transform manipulator as well as a registry for external snap tools.
.. toctree::
:maxdepth: 1
README
CHANGELOG
.. automodule:: omni.kit.manipulator.tool.snap
:platform: Windows-x86_64, Linux-x86_64
:members:
:undoc-members:
:show-inheritance:
:imported-members:
:exclude-members: contextmanager
| 448 | reStructuredText | 20.380951 | 114 | 0.662946 |
omniverse-code/kit/exts/omni.activity.core/PACKAGE-LICENSES/omni.activity.core-LICENSE.md | Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
NVIDIA CORPORATION and its licensors retain all intellectual property
and proprietary rights in and to this software, related documentation
and any modifications thereto. Any use, reproduction, disclosure or
distribution of this software and related documentation without an express
license agreement from NVIDIA CORPORATION is strictly prohibited. | 412 | Markdown | 57.999992 | 74 | 0.839806 |
omniverse-code/kit/exts/omni.activity.core/config/extension.toml | [package]
title = "Omni Activity Core"
category = "Telemetry"
version = "1.0.1"
description = "The activity and the progress processor"
authors = ["NVIDIA"]
keywords = ["activity"]
[[python.module]]
name = "omni.activity.core"
[[native.plugin]]
path = "bin/*.plugin"
| 269 | TOML | 18.285713 | 55 | 0.69145 |
omniverse-code/kit/exts/omni.activity.core/omni/activity/core/_activity.pyi | from __future__ import annotations
import omni.activity.core._activity
import typing
import omni.core._core
__all__ = [
"EventType",
"IActivity",
"IEvent",
"INode",
"began",
"disable",
"enable",
"ended",
"get_instance",
"progress",
"updated"
]
class EventType():
"""
Members:
BEGAN : /< The activity is started
UPDATED : /< The activity is changed
ENDED : /< The activity is finished
"""
def __eq__(self, other: object) -> bool: ...
def __getstate__(self) -> int: ...
def __hash__(self) -> int: ...
def __index__(self) -> int: ...
def __init__(self, value: int) -> None: ...
def __int__(self) -> int: ...
def __ne__(self, other: object) -> bool: ...
def __repr__(self) -> str: ...
def __setstate__(self, state: int) -> None: ...
@property
def name(self) -> str:
"""
:type: str
"""
@property
def value(self) -> int:
"""
:type: int
"""
BEGAN: omni.activity.core._activity.EventType # value = <EventType.BEGAN: 0>
ENDED: omni.activity.core._activity.EventType # value = <EventType.ENDED: 2>
UPDATED: omni.activity.core._activity.EventType # value = <EventType.UPDATED: 1>
__members__: dict # value = {'BEGAN': <EventType.BEGAN: 0>, 'UPDATED': <EventType.UPDATED: 1>, 'ENDED': <EventType.ENDED: 2>}
pass
class IActivity(_IActivity, omni.core._core.IObject):
"""
@brief The activity and the progress processor.
"""
@typing.overload
def __init__(self, arg0: omni.core._core.IObject) -> None: ...
@typing.overload
def __init__(self) -> None: ...
def create_callback_to_pop(self, fn: typing.Callable[[INode], None]) -> int:
"""
Subscribes to event dispatching on the stream.
See :class:`.Subscription` for more information on subscribing mechanism.
Args:
fn: The callback to be called on event dispatch.
Returns:
The subscription holder.
"""
def pump(self) -> None:
"""
@brief Process the callback. Should be called once per frame.
"""
def push_event_dict(self, node_path: str, type: EventType, event_payload: capsule) -> bool:
"""
@brief Push event to the system.
TODO: eventPayload is a placeholder
TODO: eventPayload should be carb::dictionary
"""
def remove_callback(self, id: int) -> None:
"""
@brief Remove the callback.
"""
@property
def current_timestamp(self) -> int:
"""
:type: int
"""
@property
def enabled(self) -> None:
"""
:type: None
"""
@enabled.setter
def enabled(self, arg1: bool) -> None:
pass
pass
class IEvent(_IEvent, omni.core._core.IObject):
"""
@brief The event contains custom data. It can be speed, progress, etc.
"""
@typing.overload
def __init__(self, arg0: omni.core._core.IObject) -> None: ...
@typing.overload
def __init__(self) -> None: ...
@property
def event_timestamp(self) -> int:
"""
:type: int
"""
@property
def event_type(self) -> EventType:
"""
:type: EventType
"""
@property
def payload(self) -> carb.dictionary._dictionary.Item:
"""
:type: carb.dictionary._dictionary.Item
"""
pass
class INode(_INode, omni.core._core.IObject):
"""
@brief Node can contain other nodes and events.
"""
@typing.overload
def __init__(self, arg0: omni.core._core.IObject) -> None: ...
@typing.overload
def __init__(self) -> None: ...
def get_child(self, n: int) -> INode:
"""
@brief Get the child node
"""
def get_event(self, n: int) -> IEvent:
"""
@brief Get the activity
"""
@property
def child_count(self) -> int:
"""
:type: int
"""
@property
def event_count(self) -> int:
"""
:type: int
"""
@property
def name(self) -> str:
"""
:type: str
"""
pass
class _IActivity(omni.core._core.IObject):
pass
class _IEvent(omni.core._core.IObject):
pass
class _INode(omni.core._core.IObject):
pass
def began(arg0: str, **kwargs) -> None:
pass
def disable() -> None:
pass
def enable() -> None:
pass
def ended(arg0: str, **kwargs) -> None:
pass
def get_instance() -> IActivity:
pass
def progress(arg0: str, arg1: float, **kwargs) -> None:
pass
def updated(arg0: str, **kwargs) -> None:
pass
| 4,644 | unknown | 24.805555 | 129 | 0.543712 |
omniverse-code/kit/exts/omni.activity.core/omni/activity/core/__init__.py | ## Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
##
## NVIDIA CORPORATION and its licensors retain all intellectual property
## and proprietary rights in and to this software, related documentation
## and any modifications thereto. Any use, reproduction, disclosure or
## distribution of this software and related documentation without an express
## license agreement from NVIDIA CORPORATION is strictly prohibited.
##
# Required to be able to instantiate the object types
import omni.core
from ._activity import *
| 534 | Python | 37.214283 | 77 | 0.78839 |
omniverse-code/kit/exts/omni.activity.core/omni/activity/core/tests/__init__.py | ## Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
##
## NVIDIA CORPORATION and its licensors retain all intellectual property
## and proprietary rights in and to this software, related documentation
## and any modifications thereto. Any use, reproduction, disclosure or
## distribution of this software and related documentation without an express
## license agreement from NVIDIA CORPORATION is strictly prohibited.
##
from .test_activity import TestActivity
| 477 | Python | 42.454542 | 77 | 0.796646 |
omniverse-code/kit/exts/omni.activity.core/omni/activity/core/tests/test_activity.py | ## Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
##
## NVIDIA CORPORATION and its licensors retain all intellectual property
## and proprietary rights in and to this software, related documentation
## and any modifications thereto. Any use, reproduction, disclosure or
## distribution of this software and related documentation without an express
## license agreement from NVIDIA CORPORATION is strictly prohibited.
##
import omni.activity.core as act
import omni.kit.app
import omni.kit.test
class TestActivity(omni.kit.test.AsyncTestCase):
async def test_general(self):
called = []
def callback(node: act.INode):
self.assertEqual(node.name, "Test")
self.assertEqual(node.child_count, 1)
child = node.get_child(0)
self.assertEqual(child.name, "SubTest")
self.assertEqual(child.event_count, 2)
began = child.get_event(0)
ended = child.get_event(1)
self.assertEqual(began.event_type, act.EventType.BEGAN)
self.assertEqual(ended.event_type, act.EventType.ENDED)
self.assertEqual(began.payload["progress"], 0.0)
self.assertEqual(ended.payload["progress"], 1.0)
called.append(True)
id = act.get_instance().create_callback_to_pop(callback)
act.enable()
act.began("Test|SubTest", progress=0.0)
act.ended("Test|SubTest", progress=1.0)
act.disable()
act.get_instance().pump()
act.get_instance().remove_callback(id)
self.assertEquals(len(called), 1)
| 1,599 | Python | 31.653061 | 77 | 0.65666 |
omniverse-code/kit/exts/omni.hydra.index/omni/hydra/index/tests/__init__.py | from .nvindex_render_test import *
| 35 | Python | 16.999992 | 34 | 0.771429 |
omniverse-code/kit/exts/omni.hydra.index/omni/hydra/index/tests/nvindex_render_test.py | #!/usr/bin/env python3
import omni.kit.commands
import omni.kit.test
import omni.usd
from omni.rtx.tests import RtxTest, testSettings, postLoadTestSettings
from omni.rtx.tests.test_common import set_transform_helper, wait_for_update
from omni.kit.test_helpers_gfx.compare_utils import ComparisonMetric
from pxr import Sdf, Gf, UsdVol
from pathlib import Path
EXTENSION_DIR = Path(omni.kit.app.get_app().get_extension_manager().get_extension_path_by_module(__name__))
TESTS_DIR = EXTENSION_DIR.joinpath('data', 'tests')
USD_DIR = TESTS_DIR.joinpath('usd')
GOLDEN_IMAGES_DIR = TESTS_DIR.joinpath('golden')
VOLUMES_DIR = TESTS_DIR.joinpath('volumes')
OUTPUTS_DIR = Path(omni.kit.test.get_test_output_path())
# This class is auto-discoverable by omni.kit.test
class IndexRenderTest(RtxTest):
WINDOW_SIZE = (512, 512)
THRESHOLD = 1e-5
async def setUp(self):
await super().setUp()
self.set_settings(testSettings)
self.ctx.new_stage()
self.add_dir_light()
await omni.kit.app.get_app().next_update_async()
self.set_settings(postLoadTestSettings)
# Overridden with custom paths
async def capture_and_compare(self, img_subdir: Path = "", golden_img_name=None, threshold=THRESHOLD,
metric: ComparisonMetric = ComparisonMetric.MEAN_ERROR_SQUARED):
golden_img_dir = GOLDEN_IMAGES_DIR.joinpath(img_subdir)
output_img_dir = OUTPUTS_DIR.joinpath(img_subdir)
if not golden_img_name:
golden_img_name = f"{self.__test_name}.png"
return await self._capture_and_compare(golden_img_name, threshold, output_img_dir, golden_img_dir, metric)
# Overridden with custom paths
def open_usd(self, usdSubpath: Path):
path = USD_DIR.joinpath(usdSubpath)
self.ctx.open_stage(str(path))
def get_volumes_path(self, volumeSubPath: Path):
return Sdf.AssetPath(str(VOLUMES_DIR.joinpath(volumeSubPath)))
def change_reference(self, prim_path, reference):
stage = self.ctx.get_stage()
prim = stage.GetPrimAtPath(prim_path)
ref = prim.GetReferences()
ref.ClearReferences()
ref.AddReference(Sdf.Reference(reference))
async def run_imge_test(self, usd_file: str, image_name: str, fn = None):
if usd_file:
self.open_usd(usd_file)
if fn:
fn()
await wait_for_update()
await self.capture_and_compare(golden_img_name=image_name)
#
# The tests
#
async def test_render_torus(self):
await self.run_imge_test('torus.usda', 'nvindex-torus.png')
async def test_render_torus_colormap(self):
await self.run_imge_test('torus-colormap.usda', 'nvindex-torus-colormap-2.png')
colormap_prim_path = '/World/Volume/mat/Colormap'
# Switch to another colormap
await self.run_imge_test(None, 'nvindex-torus-colormap-3.png',
lambda: self.change_reference(colormap_prim_path, './colormap3.usda'))
# Switch to colormap defined by RGBA control-points
colormap = self.ctx.get_stage().GetPrimAtPath(colormap_prim_path)
colormap.CreateAttribute("domain", Sdf.ValueTypeNames.Float2).Set((0.0, 1.0))
colormap.CreateAttribute("colormapSource", Sdf.ValueTypeNames.Token).Set("rgbaPoints")
colormap.CreateAttribute("xPoints", Sdf.ValueTypeNames.FloatArray).Set([0.5, 0.7, 1.0])
colormap.CreateAttribute("rgbaPoints", Sdf.ValueTypeNames.Float4Array).Set([(0, 1, 0, 0.14), (1, 0, 0, 0.25), (1, 1, 0, 0.25)])
await self.run_imge_test(None, 'nvindex-torus-colormap-rgba-points.png')
async def test_render_torus_shader(self):
await self.run_imge_test('torus-shader.usda', 'nvindex-torus-shader.png')
async def test_render_torus_settings(self):
await self.run_imge_test('torus-settings.usda', 'nvindex-torus-settings.png')
async def test_render_torus_create(self):
PRIM_PATH = "/World/volume"
open_vdb_asset = UsdVol.OpenVDBAsset.Define(self.ctx.get_stage(), PRIM_PATH + "/OpenVDBAsset")
open_vdb_asset.GetFilePathAttr().Set(self.get_volumes_path("torus.vdb"))
# Use default field in the OpenVDB file, instead of setting it explicitly:
# open_vdb_asset.GetFieldNameAttr().Set("torus_fog")
volume = UsdVol.Volume.Define(self.ctx.get_stage(), PRIM_PATH)
volume.CreateFieldRelationship("torus", open_vdb_asset.GetPath())
set_transform_helper(volume.GetPath(), translate=Gf.Vec3d(0, 150, 0), scale=Gf.Vec3d(20.0, 20.0, 20.0))
await wait_for_update()
await self.run_imge_test(None, 'nvindex-torus-create.png')
| 4,689 | Python | 40.504424 | 135 | 0.670719 |
omniverse-code/kit/exts/omni.kit.compatibility_checker/PACKAGE-LICENSES/omni.kit.compatibility_checker-LICENSE.md | Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
NVIDIA CORPORATION and its licensors retain all intellectual property
and proprietary rights in and to this software, related documentation
and any modifications thereto. Any use, reproduction, disclosure or
distribution of this software and related documentation without an express
license agreement from NVIDIA CORPORATION is strictly prohibited. | 412 | Markdown | 57.999992 | 74 | 0.839806 |
omniverse-code/kit/exts/omni.kit.compatibility_checker/config/extension.toml | [package]
version = "0.1.0"
title = "Kit Compatibility Checker"
category = "Internal"
[dependencies]
"omni.appwindow" = { }
"omni.gpu_foundation" = { }
"omni.kit.notification_manager" = { optional=true }
"omni.kit.window.viewport" = { optional=true }
"omni.kit.renderer.core" = { optional=true }
[[python.module]]
name = "omni.kit.compatibility_checker"
[settings]
[settings.exts."omni.kit.compatibility_checker"]
# supportedGpus = [
# "*GeForce RTX ????*",
# "*Quadro RTX ????*",
# "*RTX ?????*",
# "*TITAN RTX*"
# ]
"windows-x86_64".minOsVersion = 0 #19042
# "windows-x86_64".driverBlacklistRanges = [
# ["0.0", "456.39", "The minimum RTX requirement"],
# ["460.0", "461.92", "Driver crashes or image artifacts"],
# ]
"linux-x86_64".minOsVersion = 0
# "linux-x86_64".driverBlacklistRanges = [
# ["0.0", "450.57", "The minimum RTX requirement"],
# ["455.23", "455.24", "NVIDIA OptiX bug"],
# ["460.0", "460.62", "Driver crashes or image artifacts"],
# ]
"linux-aarch64".minOsVersion = 0
# "linux-aarch64".driverBlacklistRanges = [
# ["0.0", "450.57", "The minimum RTX requirement"]
# ]
[[test]]
args = []
dependencies = ["omni.kit.renderer.core"]
| 1,194 | TOML | 25.555555 | 63 | 0.623953 |
omniverse-code/kit/exts/omni.kit.compatibility_checker/omni/kit/compatibility_checker/__init__.py | from .scripts.extension import *
| 33 | Python | 15.999992 | 32 | 0.787879 |
omniverse-code/kit/exts/omni.kit.compatibility_checker/omni/kit/compatibility_checker/scripts/extension.py | import asyncio
import importlib
import fnmatch
import carb
import carb.settings
import omni.appwindow
import omni.ext
import omni.gpu_foundation_factory
import omni.kit.app
import omni.kit.renderer.bind
RENDERER_ENABLED_SETTINGS_PATH = "/renderer/enabled"
CHECKER_SETTINGS_PATH = "exts/omni.kit.compatibility_checker/"
def escape_for_fnmatch(s: str) -> str:
return s.replace("[", "[[]")
def get_gpus_list_from_device(device):
gpus_list = omni.gpu_foundation_factory.get_gpus_list(device)
try:
gpus_list.remove("Microsoft Basic Render Driver")
except ValueError:
pass
return gpus_list
def get_driver_version_from_device(device):
driver_version = omni.gpu_foundation_factory.get_driver_version(device)
return driver_version
def check_supported_gpu(gpus_list, supported_gpus):
gpu_good = False
for gpu in gpus_list:
for supported_gpu in supported_gpus:
if fnmatch.fnmatch(escape_for_fnmatch(gpu), supported_gpu):
gpu_good = True
break
return gpu_good
def check_driver_version(driver_version, driver_blacklist_ranges):
for driver_blacklist_range in driver_blacklist_ranges:
version_from = [int(i) for i in driver_blacklist_range[0].split(".")]
version_to = [int(i) for i in driver_blacklist_range[1].split(".")]
def is_driver_in_range(version, version_from, version_to):
version_int = version[0] * 100 + version[1]
version_from_int = version_from[0] * 100 + version_from[1]
version_to_int = version_to[0] * 100 + version_to[1]
if version_int < version_from_int or version_int >= version_to_int:
return False
return True
if is_driver_in_range(driver_version, version_from, version_to):
return False, driver_blacklist_range
return True, []
def check_os_version(min_os_version):
os_good = True
os_version = omni.gpu_foundation_factory.get_os_build_number()
if os_version < min_os_version:
os_good = False
return os_good, os_version
class Extension(omni.ext.IExt):
def __init__(self):
super().__init__()
pass
def _check_rtx_renderer_state(self):
if self._module_viewport:
enabled_renderers = self._settings.get(RENDERER_ENABLED_SETTINGS_PATH).lower().split(',')
rtx_enabled = 'rtx' in enabled_renderers
# Viewport 1.5 doesn't have methods to return attached context, but it also doesn't
# quite work with multiple contexts out of the box, so we just assume default context.
# In VP 2.0, the hope is to have better communication protocol with HydraEngine
# so there will be no need for this whole compatibility checker anyway.
usd_context = omni.usd.get_context("")
attached_hydra_engines = usd_context.get_attached_hydra_engine_names()
if rtx_enabled and ("rtx" not in attached_hydra_engines):
return False
return True
async def _compatibility_check(self):
# Wait a couple of frames to make sure all subsystems are properly initialized
await omni.kit.app.get_app().next_update_async()
await omni.kit.app.get_app().next_update_async()
rtx_renderer_good = self._check_rtx_renderer_state()
if rtx_renderer_good is True:
# Early exit if renderer is OK, no need to check driver, GPU and OS version in this case
return
# Renderer wasn't initialized properly
message = "RTX engine creation failed. RTX renderers in viewport will be disabled. Please make sure selected GPU is RTX-capable, and GPU drivers meet requirements."
carb.log_error(message)
if self._module_notification_manager is not None:
def open_log():
log_file_path = self._settings.get("/log/file")
import webbrowser
webbrowser.open(log_file_path)
dismiss_button = self._module_notification_manager.NotificationButtonInfo("Dismiss", None)
show_log_file_button = self._module_notification_manager.NotificationButtonInfo("Open log", open_log)
self._module_notification_manager.post_notification(message, hide_after_timeout=False, button_infos=[dismiss_button, show_log_file_button])
platform_info = omni.kit.app.get_app().get_platform_info()
platform_settings_path = CHECKER_SETTINGS_PATH + platform_info['platform'] + "/"
min_os_version = self._settings.get(platform_settings_path + "minOsVersion")
driver_blacklist_ranges = self._settings.get(platform_settings_path + "driverBlacklistRanges")
supported_gpus = self._settings.get(CHECKER_SETTINGS_PATH + "supportedGpus")
self._renderer = omni.kit.renderer.bind.acquire_renderer_interface()
device = self._renderer.get_graphics_device(omni.appwindow.get_default_app_window())
if supported_gpus is None:
carb.log_warn("Supported GPUs list is empty, skipping GPU compatibility check!")
else:
gpus_list = get_gpus_list_from_device(device)
gpu_good = check_supported_gpu(gpus_list, supported_gpus)
if gpu_good is not True:
message = "Available GPUs are not supported. List of available GPUs:\n%s" % (gpus_list)
carb.log_error(message)
if self._module_notification_manager is not None:
self._module_notification_manager.post_notification(message, hide_after_timeout=False)
driver_good = True
if driver_blacklist_ranges is None:
carb.log_warn("Supported drivers list is empty, skipping GPU compatibility check!")
else:
driver_version = get_driver_version_from_device(device)
driver_good, driver_blacklist_range = check_driver_version(driver_version, driver_blacklist_ranges)
if driver_good is not True:
message = "Driver version %d.%d is not supported. Driver is in blacklisted range %s-%s. Reason for blacklisting: %s." % (
driver_version[0],
driver_version[1],
driver_blacklist_range[0],
driver_blacklist_range[1],
driver_blacklist_range[2]
)
carb.log_error(message)
if self._module_notification_manager is not None:
self._module_notification_manager.post_notification(message, hide_after_timeout=False)
os_good, os_version = check_os_version(min_os_version)
if os_good is not True:
message = "Please update the OS. Minimum required OS build number for %s is %d, current version is %d." % (platform_info['platform'], min_os_version, os_version)
carb.log_error(message)
if self._module_notification_manager is not None:
self._module_notification_manager.post_notification(message, hide_after_timeout=False)
def on_startup(self):
self._settings = carb.settings.get_settings()
try:
self._module_viewport = importlib.import_module('omni.kit.viewport_legacy')
except ImportError:
self._module_viewport = None
try:
self._module_notification_manager = importlib.import_module('omni.kit.notification_manager')
except ImportError:
self._module_notification_manager = None
asyncio.ensure_future(self._compatibility_check())
def on_shutdown(self):
self._settings = None
| 7,626 | Python | 43.086705 | 173 | 0.64385 |
omniverse-code/kit/exts/omni.kit.compatibility_checker/omni/kit/compatibility_checker/tests/test_compatibility_checker.py | import inspect
import pathlib
import carb
import carb.settings
import carb.tokens
import omni.kit.app
import omni.kit.test
import omni.kit.compatibility_checker
class CompatibilityCheckerTest(omni.kit.test.AsyncTestCase):
async def setUp(self):
self._settings = carb.settings.acquire_settings_interface()
self._app_window_factory = omni.appwindow.acquire_app_window_factory_interface()
self._renderer = omni.kit.renderer.bind.acquire_renderer_interface()
def __test_name(self) -> str:
return f"{self.__module__}.{self.__class__.__name__}.{inspect.stack()[2][3]}"
async def tearDown(self):
self._renderer = None
self._app_window_factory = None
self._settings = None
async def test_1_check_gpu(self):
supported_gpus = [
"*GeForce RTX ????*",
"*Quadro RTX ????*",
"*RTX ?????*",
"*TITAN RTX*"
]
gpu_good = omni.kit.compatibility_checker.check_supported_gpu(["NVIDIA GeForce RTX 2060 OC"], supported_gpus)
self.assertTrue(gpu_good)
gpu_good = omni.kit.compatibility_checker.check_supported_gpu(["GeForce RTX 2080 Ti"], supported_gpus)
self.assertTrue(gpu_good)
gpu_good = omni.kit.compatibility_checker.check_supported_gpu(["NVIDIA GeForce RTX 3080"], supported_gpus)
self.assertTrue(gpu_good)
gpu_good = omni.kit.compatibility_checker.check_supported_gpu(["NVIDIA Quadro RTX A6000"], supported_gpus)
self.assertTrue(gpu_good)
gpu_good = omni.kit.compatibility_checker.check_supported_gpu(["Quadro RTX 8000"], supported_gpus)
self.assertTrue(gpu_good)
gpu_good = omni.kit.compatibility_checker.check_supported_gpu(["RTX A6000"], supported_gpus)
self.assertTrue(gpu_good)
gpu_good = omni.kit.compatibility_checker.check_supported_gpu(["TITAN RTX"], supported_gpus)
self.assertTrue(gpu_good)
gpu_good = omni.kit.compatibility_checker.check_supported_gpu(["NVIDIA GeForce GTX 1060 OC"], supported_gpus)
self.assertTrue(gpu_good is not True)
gpu_good = omni.kit.compatibility_checker.check_supported_gpu(["NVIDIA GeForce RTX 2060 OC", "Not NVIDIA not GeForce"], supported_gpus)
self.assertTrue(gpu_good)
gpu_good = omni.kit.compatibility_checker.check_supported_gpu(["Not NVIDIA not GeForce", "NVIDIA GeForce RTX 2060 OC"], supported_gpus)
self.assertTrue(gpu_good)
async def test_2_check_drivers_blacklist(self):
driver_blacklist_ranges = [
["0.0", "100.0", "Minimum RTX req"],
["110.0", "111.0", "Bugs"],
["120.3", "120.5", "More bugs"],
]
driver_good, driver_blacklist_range = omni.kit.compatibility_checker.check_driver_version([95, 0], driver_blacklist_ranges)
self.assertTrue(driver_good is not True)
driver_good, driver_blacklist_range = omni.kit.compatibility_checker.check_driver_version([105, 0], driver_blacklist_ranges)
self.assertTrue(driver_good)
driver_good, driver_blacklist_range = omni.kit.compatibility_checker.check_driver_version([110, 5], driver_blacklist_ranges)
self.assertTrue(driver_good is not True)
driver_good, driver_blacklist_range = omni.kit.compatibility_checker.check_driver_version([115, 0], driver_blacklist_ranges)
self.assertTrue(driver_good)
driver_good, driver_blacklist_range = omni.kit.compatibility_checker.check_driver_version([120, 3], driver_blacklist_ranges)
self.assertTrue(driver_good is not True)
driver_good, driver_blacklist_range = omni.kit.compatibility_checker.check_driver_version([120, 4], driver_blacklist_ranges)
self.assertTrue(driver_good is not True)
driver_good, driver_blacklist_range = omni.kit.compatibility_checker.check_driver_version([120, 5], driver_blacklist_ranges)
self.assertTrue(driver_good is True)
driver_good, driver_blacklist_range = omni.kit.compatibility_checker.check_driver_version([130, 5], driver_blacklist_ranges)
self.assertTrue(driver_good)
| 4,128 | Python | 42.010416 | 143 | 0.67563 |
omniverse-code/kit/exts/omni.kit.compatibility_checker/omni/kit/compatibility_checker/tests/__init__.py | from .test_compatibility_checker import *
| 42 | Python | 20.49999 | 41 | 0.809524 |
omniverse-code/kit/exts/omni.kit.documentation.ui.style/PACKAGE-LICENSES/omni.kit.documentation.ui.style-LICENSE.md | Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
NVIDIA CORPORATION and its licensors retain all intellectual property
and proprietary rights in and to this software, related documentation
and any modifications thereto. Any use, reproduction, disclosure or
distribution of this software and related documentation without an express
license agreement from NVIDIA CORPORATION is strictly prohibited. | 412 | Markdown | 57.999992 | 74 | 0.839806 |
omniverse-code/kit/exts/omni.kit.documentation.ui.style/config/extension.toml | [package]
version = "1.0.3"
authors = ["NVIDIA"]
title = "Omni.UI Style Documentation"
description="The interactive documentation for omni.ui style"
readme = "docs/README.md"
repository = ""
category = "Documentation"
keywords = ["ui", "style", "docs", "documentation"]
changelog="docs/CHANGELOG.md"
preview_image = "data/preview.png"
icon = "data/icon.png"
[dependencies]
"omni.ui" = {}
"omni.kit.documentation.builder" = {}
[[python.module]]
name = "omni.kit.documentation.ui.style"
[documentation]
pages = [
"docs/overview.md",
"docs/styling.md",
"docs/shades.md",
"docs/fonts.md",
"docs/units.md",
"docs/shapes.md",
"docs/line.md",
"docs/buttons.md",
"docs/sliders.md",
"docs/widgets.md",
"docs/containers.md",
"docs/window.md",
"docs/CHANGELOG.md",
]
args = []
menu = "Help/API/Omni.UI Style"
title = "Omni.UI Style Documentation"
| 892 | TOML | 21.324999 | 61 | 0.653587 |
omniverse-code/kit/exts/omni.kit.documentation.ui.style/omni/kit/documentation/ui/style/ui_style_docs_window.py | # Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
__all__ = ["UIStyleDocsWindow"]
from omni.kit.documentation.builder import DocumentationBuilderWindow
from pathlib import Path
CURRENT_PATH = Path(__file__).parent
DOCS_PATH = CURRENT_PATH.parent.parent.parent.parent.parent.joinpath("docs")
PAGES = ["overview.md", "styling.md", "shades.md", "fonts.md", "units.md", "shapes.md", "line.md", "buttons.md",
"sliders.md", "widgets.md", "containers.md", "window.md"]
class UIStyleDocsWindow(DocumentationBuilderWindow):
"""The window with the documentation"""
def __init__(self, title: str, **kwargs):
filenames = [f"{DOCS_PATH.joinpath(page_source)}" for page_source in PAGES]
super().__init__(title, filenames=filenames, **kwargs)
| 1,149 | Python | 43.230768 | 112 | 0.733681 |
omniverse-code/kit/exts/omni.kit.documentation.ui.style/omni/kit/documentation/ui/style/ui_style_docs_extension.py | # Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
__all__ = ["UIStyleDocsExtension"]
import omni.ext
from .ui_style_docs_window import UIStyleDocsWindow
class UIStyleDocsExtension(omni.ext.IExt):
def on_startup(self, ext_id):
self._window = UIStyleDocsWindow("Omni.UI Style Documentation")
def on_shutdown(self):
self._window.destroy()
self._window = None
| 775 | Python | 34.272726 | 76 | 0.756129 |
omniverse-code/kit/exts/omni.kit.documentation.ui.style/omni/kit/documentation/ui/style/__init__.py | # Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from .ui_style_docs_extension import *
| 472 | Python | 46.299995 | 76 | 0.805085 |
omniverse-code/kit/exts/omni.kit.documentation.ui.style/docs/styling.md | # The Style Sheet Syntax
omni.ui Style Sheet rules are almost identical to those of HTML CSS. It applies to the style of all omni ui elements.
Style sheets consist of a sequence of style rules. A style rule is made up of a selector and a declaration. The selector specifies which widgets are affected by the rule. The declaration specifies which properties should be set on the widget. For example:
```execute 200
## Double comment means hide from shippet
from omni.ui import color as cl
##
with ui.VStack(width=0, style={"Button": {"background_color": cl("#097eff")}}):
ui.Button("Style Example")
```
In the above style rule, Button is the selector, and {"background_color": cl("#097eff")} is the declaration. The rule specifies that Button should use blue as its background color.
## Selector
There are three types of selectors, type Selector, name Selector and state Selector They are structured as:
Type Selector :: Name Selector : State Selector
e.g.,Button::okButton:hovered
### Type Selector
where `Button` is the type selector, which matches the ui.Button's type.
### Name Selector
`okButton` is the type selector, which selects all Button instances whose object name is okButton. It separates from the type selector with `::`.
### State Selector
`hovered` is the state selector, which by itself matches all Button instances whose state is hovered. It separates from the other selectors with `:`.
When type, name and state selector are used together, it defines the style of all Button typed instances named as `okButton` and in hovered, while `Button:hovered` defines the style of all Button typed instances which are in hovered states.
These are the states recognized by omni.ui:
* hovered : the mouse in the widget area
* pressed : the mouse is pressing in the widget area
* selected : the widget is selected
* disabled : the widget is disabled
* checked : the widget is checked
* drop : the rectangle is accepting a drop. For example,
style = {"Rectangle:drop" : {"background_color": cl.blue}} meaning if the drop is acceptable, the rectangle is blue.
## Style Override
### Omit the selector
It's possible to omit the selector and override the property in all the widget types.
In this example, the style is set to VStack. The style will be propagated to all the widgets in VStack including VStack itself. Since only `background_color` is in the style, only the widgets which have `background_color` as the style will have the background color set. For VStack and Label which don't have `background_color`, the style is ignored. Button and FloatField get the blue background color.
```execute 200
from omni.ui import color as cl
with ui.VStack(width=400, style={"background_color": cl("#097eff")}, spacing=5):
ui.Button("One")
ui.Button("Two")
ui.FloatField()
ui.Label("Label doesn't have background_color style")
```
### Style overridden with name and state selector
In this example, we set the "Button" style for all the buttons, then override different buttons with name selector style, e.g. "Button::one" and "Button::two". Furthermore, the we also set different style for Button::one when pressed or hovered, e.g. "Button::one:hovered" and "Button::one:pressed", which will override "Button::one" style when button is pressed or hovered.
```execute 200
from omni.ui import color as cl
style1 = {
"Button": {"border_width": 0.5, "border_radius": 0.0, "margin": 5.0, "padding": 5.0},
"Button::one": {
"background_color": cl("#097eff"),
"background_gradient_color": cl("#6db2fa"),
"border_color": cl("#1d76fd"),
},
"Button.Label::one": {"color": cl.white},
"Button::one:hovered": {"background_color": cl("#006eff"), "background_gradient_color": cl("#5aaeff")},
"Button::one:pressed": {"background_color": cl("#6db2fa"), "background_gradient_color": cl("#097eff")},
"Button::two": {"background_color": cl.white, "border_color": cl("#B1B1B1")},
"Button.Label::two": {"color": cl("#272727")},
"Button::three:hovered": {
"background_color": cl("#006eff"),
"background_gradient_color": cl("#5aaeff"),
"border_color": cl("#1d76fd"),
},
"Button::four:pressed": {
"background_color": cl("#6db2fa"),
"background_gradient_color": cl("#097eff"),
"border_color": cl("#1d76fd"),
},
}
with ui.HStack(style=style1):
ui.Button("One", name="one")
ui.Button("Two", name="two")
ui.Button("Three", name="three")
ui.Button("Four", name="four")
ui.Button("Five", name="five")
```
### Style override to different levels of the widgets
It's possible to assign any style override to any level of the widgets. It can be assigned to both parents and children at the same time.
In this example, we have style_system which will be propagated to all buttons, but buttons with its own style will override the style_system.
```execute 200
from omni.ui import color as cl
style_system = {
"Button": {
"background_color": cl("#E1E1E1"),
"border_color": cl("#ADADAD"),
"border_width": 0.5,
"border_radius": 3.0,
"margin": 5.0,
"padding": 5.0,
},
"Button.Label": {
"color": cl.black,
},
"Button:hovered": {
"background_color": cl("#e5f1fb"),
"border_color": cl("#0078d7"),
},
"Button:pressed": {
"background_color": cl("#cce4f7"),
"border_color": cl("#005499"),
"border_width": 1.0
},
}
with ui.HStack(style=style_system):
ui.Button("One")
ui.Button("Two", style={"color": cl("#AAAAAA")})
ui.Button("Three", style={"background_color": cl("#097eff"), "background_gradient_color": cl("#6db2fa")})
ui.Button(
"Four", style={":hovered": {"background_color": cl("#006eff"), "background_gradient_color": cl("#5aaeff")}}
)
ui.Button(
"Five",
style={"Button:pressed": {"background_color": cl("#6db2fa"), "background_gradient_color": cl("#097eff")}},
)
```
### Customize the selector type using style_type_name_override
What if the user has a customized widget which is not a standard omni.ui one. How to define that Type Selector? In this case, We can use `style_type_name_override` to override the type name. `name` attribute is the Name Selector and State Selector can be added as usual.
Another use case is when we have a giant list of the same typed widgets, for example `Button`, but some of the Buttons are in the main window, and some of the Buttons are in the pop-up window, which we want to differentiate for easy look-up. Instead of calling all of them the same Type Selector as `Button` and only have different Name Selectors, we can override the type name for the main window buttons as `WindowButton` and the pop-up window buttons as `PopupButton`. This groups the style-sheet into categories and makes the change of the look or debug much easier.
Here is an example where we use `style_type_name_override` to override the style type name.
```execute 200
from omni.ui import color as cl
style={
"WindowButton::one": {"background_color": cl("#006eff")},
"WindowButton::one:hovered": {"background_color": cl("#006eff"), "background_gradient_color": cl("#FFAEFF")},
"PopupButton::two": {"background_color": cl("#6db2fa")},
"PopupButton::two:hovered": {"background_color": cl("#6db2fa"), "background_gradient_color": cl("#097eff")},
}
with ui.HStack(width=400, style=style, spacing=5):
ui.Button("Open", style_type_name_override="WindowButton", name="one")
ui.Button("Save", style_type_name_override="PopupButton", name="two")
```
### Default style override
From the above examples, we know that if we want to propagate the style to all children, we just need to set the style to the parent widget, but this rule doesn't apply to windows. The style set to the window will not propagate to its widgets. If we want to propagate the style to ui.Window and their widgets, we should set the default style with `ui.style.default`.
```python
from omni.ui import color as cl
ui.style.default = {
"background_color": cl.blue,
"border_radius": 10,
"border_width": 5,
"border_color": cl.red,
}
```
## Debug Color
All shapes or widgets can be styled to use a debug color that enables you to visualize their frame. It is very useful when debugging complicated ui layout with overlaps.
Here we use red as the debug_color to indicate the label widget:
```execute 200
from omni.ui import color as cl
style = {"background_color": cl("#DDDD00"), "color": cl.white, "debug_color":cl("#FF000055")}
ui.Label("Label with Debug", width=200, style=style)
```
If several widgets are adjacent, we can use the `debug_color` in the `hovered` state to differentiate the widget with others.
```execute 200
from omni.ui import color as cl
style = {
"Label": {"padding": 3, "background_color": cl("#DDDD00"),"color": cl.white},
"Label:hovered": {"debug_color": cl("#00FFFF55")},}
with ui.HStack(width=500, style=style):
ui.Label("Label 1", width=50)
ui.Label("Label 2")
ui.Label("Label 3", width=100, alignment=ui.Alignment.CENTER)
ui.Spacer()
ui.Label("Label 3", width=50)
```
## Visibility
This property holds whether the shape or widget is visible. Invisible shape or widget is not rendered, and it doesn't take part in the layout. The layout skips it.
In the following example, click the button from one to five to hide itself. The `Visible all` button brings them all back.
```execute 200
def invisible(button):
button.visible = False
def visible(buttons):
for button in buttons:
button.visible = True
buttons = []
with ui.HStack():
for n in ["One", "Two", "Three", "Four", "Five"]:
button = ui.Button(n, width=0)
button.set_clicked_fn(lambda b=button: invisible(b))
buttons.append(button)
ui.Spacer()
button = ui.Button("Visible all", width=0)
button.set_clicked_fn(lambda b=buttons: visible(b))
``` | 9,962 | Markdown | 43.878378 | 570 | 0.691929 |
omniverse-code/kit/exts/omni.kit.documentation.ui.style/docs/widgets.md | # Widgets
## Label
Labels are used everywhere in omni.ui. They are text only objects.
Here is a list of styles you can customize on Label:
> color (color): the color of the text
> font_size (float): the size of the text
> margin (float): the distance between the label and the parent widget defined boundary
> margin_width (float): the width distance between the label and the parent widget defined boundary
> margin_height (float): the height distance between the label and the parent widget defined boundary
> alignment (enum): defines how the label is positioned in the parent defined space. There are 9 alignments supported which are quite self-explanatory.
* ui.Alignment.LEFT_CENTER
* ui.Alignment.LEFT_TOP
* ui.Alignment.LEFT_BOTTOM
* ui.Alignment.RIGHT_CENTER
* ui.Alignment.RIGHT_TOP
* ui.Alignment.RIGHT_BOTTOM
* ui.Alignment.CENTER
* ui.Alignment.CENTER_TOP
* ui.Alignment.CENTER_BOTTOM
Here are a few examples of labels:
```execute 200
from omni.ui import color as cl
ui.Label("this is a simple label", style={"color":cl.red, "margin": 5})
```
```execute 200
from omni.ui import color as cl
ui.Label("label with alignment", style={"color":cl.green, "margin": 5}, alignment=ui.Alignment.CENTER)
```
Notice that alignment could be either a property or a style.
```execute 200
from omni.ui import color as cl
label_style = {
"Label": {"font_size": 20, "color": cl.blue, "alignment":ui.Alignment.RIGHT, "margin_height": 20}
}
ui.Label("Label with style", style=label_style)
```
When the text of the Label is too long, it can be elided by `...`:
```execute 200
from omni.ui import color as cl
ui.Label(
"Label can be elided: Lorem ipsum dolor "
"sit amet, consectetur adipiscing elit, sed do "
"eiusmod tempor incididunt ut labore et dolore "
"magna aliqua. Ut enim ad minim veniam, quis "
"nostrud exercitation ullamco laboris nisi ut "
"aliquip ex ea commodo consequat. Duis aute irure "
"dolor in reprehenderit in voluptate velit esse "
"cillum dolore eu fugiat nulla pariatur. Excepteur "
"sint occaecat cupidatat non proident, sunt in "
"culpa qui officia deserunt mollit anim id est "
"laborum.",
style={"color":cl.white},
elided_text=True,
)
```
## CheckBox
A CheckBox is an option button that can be switched on (checked) or off (unchecked). Checkboxes are typically used to represent features in an application that can be enabled or disabled without affecting others.
The checkbox is implemented using the model-delegate-view pattern. The model is the central component of this system. It is the application's dynamic data structure independent of the widget. It directly manages the data, logic and rules of the checkbox. If the model is not specified, the simple one is created automatically when the object is constructed.
Here is a list of styles you can customize on Line:
> color (color): the color of the tick
> background_color (color): the background color of the check box
> font_size: the size of the tick
> border_radius (float): the radius of the corner angle if the user wants to round the check box.
Default checkbox
```execute 200
with ui.HStack(width=0, spacing=5):
ui.CheckBox().model.set_value(True)
ui.CheckBox()
ui.Label("Default")
```
Disabled checkbox:
```execute 200
with ui.HStack(width=0, spacing=5):
ui.CheckBox(enabled=False).model.set_value(True)
ui.CheckBox(enabled=False)
ui.Label("Disabled")
```
In the following example, the models of two checkboxes are connected, and if one checkbox is changed, it makes another checkbox change as well.
```execute 200
from omni.ui import color as cl
with ui.HStack(width=0, spacing=5):
# Create two checkboxes
style = {"CheckBox":{
"color": cl.white, "border_radius": 0, "background_color": cl("#ff5555"), "font_size": 30}}
first = ui.CheckBox(style=style)
second = ui.CheckBox(style=style)
# Connect one to another
first.model.add_value_changed_fn(lambda a, b=second: b.model.set_value(not a.get_value_as_bool()))
second.model.add_value_changed_fn(lambda a, b=first: b.model.set_value(not a.get_value_as_bool()))
# Set the first one to True
first.model.set_value(True)
ui.Label("One of two")
```
In the following example, that is a bit more complicated, only one checkbox can be enabled.
```execute 200
from omni.ui import color as cl
style = {"CheckBox":{
"color": cl("#ff5555"), "border_radius": 5, "background_color": cl(0.35), "font_size": 20}}
with ui.HStack(width=0, spacing=5):
# Create two checkboxes
first = ui.CheckBox(style=style)
second = ui.CheckBox(style=style)
third = ui.CheckBox(style=style)
def like_radio(model, first, second):
"""Turn on the model and turn off two checkboxes"""
if model.get_value_as_bool():
model.set_value(True)
first.model.set_value(False)
second.model.set_value(False)
# Connect one to another
first.model.add_value_changed_fn(lambda a, b=second, c=third: like_radio(a, b, c))
second.model.add_value_changed_fn(lambda a, b=first, c=third: like_radio(a, b, c))
third.model.add_value_changed_fn(lambda a, b=first, c=second: like_radio(a, b, c))
# Set the first one to True
first.model.set_value(True)
ui.Label("Almost like radio box")
```
## ComboBox
The ComboBox widget is a combination of a button and a drop-down list. A ComboBox is a selection widget that displays the current item and can pop up a list of selectable items.
Here is a list of styles you can customize on ComboBox:
> color (color): the color of the combo box text and the arrow of the drop-down button
> background_color (color): the background color of the combo box
> secondary_color (color): the color of the drop-down button's background
> selected_color (color): the selected highlight color of option items
> secondary_selected_color (color): the color of the option item text
> font_size (float): the size of the text
> border_radius (float): the border radius if the user wants to round the ComboBox
> padding (float): the overall padding of the ComboBox. If padding is defined, padding_height and padding_width will have no effects.
> padding_height (float): the width padding of the drop-down list
> padding_width (float): the height padding of the drop-down list
> secondary_padding (float): the height padding between the ComboBox and options
Default ComboBox:
```execute 200
ui.ComboBox(1, "Option 1", "Option 2", "Option 3")
```
ComboBox with style
```execute 200
from omni.ui import color as cl
style={"ComboBox":{
"color": cl.red,
"background_color": cl(0.15),
"secondary_color": cl("#1111aa"),
"selected_color": cl.green,
"secondary_selected_color": cl.white,
"font_size": 15,
"border_radius": 20,
"padding_height": 2,
"padding_width": 20,
"secondary_padding": 30,
}}
with ui.VStack():
ui.ComboBox(1, "Option 1", "Option 2", "Option 3", style=style)
ui.Spacer(height=20)
```
The following example demonstrates how to add items to the ComboBox.
```execute 200
editable_combo = ui.ComboBox()
ui.Button(
"Add item to combo",
clicked_fn=lambda m=editable_combo.model: m.append_child_item(
None, ui.SimpleStringModel("Hello World")),
)
```
The minimal model implementation to have more flexibility of the data. It requires holding the value models and reimplementing two methods: `get_item_children` and `get_item_value_model`.
```execute 200
class MinimalItem(ui.AbstractItem):
def __init__(self, text):
super().__init__()
self.model = ui.SimpleStringModel(text)
class MinimalModel(ui.AbstractItemModel):
def __init__(self):
super().__init__()
self._current_index = ui.SimpleIntModel()
self._current_index.add_value_changed_fn(
lambda a: self._item_changed(None))
self._items = [
MinimalItem(text)
for text in ["Option 1", "Option 2"]
]
def get_item_children(self, item):
return self._items
def get_item_value_model(self, item, column_id):
if item is None:
return self._current_index
return item.model
self._minimal_model = MinimalModel()
with ui.VStack():
ui.ComboBox(self._minimal_model, style={"font_size": 22})
ui.Spacer(height=10)
```
The example of communication between widgets. Type anything in the field and it will appear in the combo box.
```execute 200
editable_combo = None
class StringModel(ui.SimpleStringModel):
'''
String Model activated when editing is finished.
Adds item to combo box.
'''
def __init__(self):
super().__init__("")
def end_edit(self):
combo_model = editable_combo.model
# Get all the options ad list of strings
all_options = [
combo_model.get_item_value_model(child).as_string
for child in combo_model.get_item_children()
]
# Get the current string of this model
fieldString = self.as_string
if fieldString:
if fieldString in all_options:
index = all_options.index(fieldString)
else:
# It's a new string in the combo box
combo_model.append_child_item(
None,
ui.SimpleStringModel(fieldString)
)
index = len(all_options)
combo_model.get_item_value_model().set_value(index)
self._field_model = StringModel()
def combo_changed(combo_model, item):
all_options = [
combo_model.get_item_value_model(child).as_string
for child in combo_model.get_item_children()
]
current_index = combo_model.get_item_value_model().as_int
self._field_model.as_string = all_options[current_index]
with ui.HStack():
ui.StringField(self._field_model)
editable_combo = ui.ComboBox(width=0, arrow_only=True)
editable_combo.model.add_item_changed_fn(combo_changed)
```
## TreeView
TreeView is a widget that presents a hierarchical view of information. Each item can have a number of subitems. An indentation often visualizes this in a list. An item can be expanded to reveal subitems, if any exist, and collapsed to hide subitems.
TreeView can be used in file manager applications, where it allows the user to navigate the file system directories. They are also used to present hierarchical data, such as the scene object hierarchy.
TreeView uses a model-delegate-view pattern to manage the relationship between data and the way it is presented. The separation of functionality gives developers greater flexibility to customize the presentation of items and provides a standard interface to allow a wide range of data sources to be used with other widgets.
Here is a list of styles you can customize on TreeView:
> background_color (color): the background color of the TreeView
> background_selected_color (color): the hover color of the TreeView selected item. The actual selected color of the TreeView selected item should be defined by the "background_color" of "TreeView:selected"
> secondary_color (color): the TreeView slider color
Here is a list of styles you can customize on TreeView.Item:
> margin (float): the margin between TreeView items. This will be overridden by the value of margin_width or margin_height
> margin_width (float): the margin width between TreeView items
> margin_height (float): the margin height between TreeView items
> color (color): the text color of the TreeView items
> font_size (float): the text size of the TreeView items
The following example demonstrates how to make a single level tree appear like a simple list.
```execute 200
from omni.ui import color as cl
style = {"TreeView": {
"background_color": cl("#E0FFE0"),
"background_selected_color": cl("#FF905C"),
"secondary_color": cl("#00FF00"),
},
"TreeView:selected": {"background_color": cl("#888888")},
"TreeView.Item": {
"margin": 4,
"margin_width": 20,
"color": cl("#555555"),
"font_size": 15,
},
"TreeView.Item:selected": {"color": cl("#DD2825")},
}
class CommandItem(ui.AbstractItem):
"""Single item of the model"""
def __init__(self, text):
super().__init__()
self.name_model = ui.SimpleStringModel(text)
class CommandModel(ui.AbstractItemModel):
"""
Represents the list of commands registered in Kit.
It is used to make a single level tree appear like a simple list.
"""
def __init__(self):
super().__init__()
self._commands = []
try:
import omni.kit.commands
except ModuleNotFoundError:
return
omni.kit.commands.subscribe_on_change(self._commands_changed)
self._commands_changed()
def _commands_changed(self):
"""Called by subscribe_on_change"""
self._commands = []
import omni.kit.commands
for cmd_list in omni.kit.commands.get_commands().values():
for k in cmd_list.values():
self._commands.append(CommandItem(k.__name__))
self._item_changed(None)
def get_item_children(self, item):
"""Returns all the children when the widget asks it."""
if item is not None:
# Since we are doing a flat list, we return the children of root only.
# If it's not root we return.
return []
return self._commands
def get_item_value_model_count(self, item):
"""The number of columns"""
return 1
def get_item_value_model(self, item, column_id):
"""
Return value model.
It's the object that tracks the specific value.
In our case we use ui.SimpleStringModel.
"""
if item and isinstance(item, CommandItem):
return item.name_model
with ui.ScrollingFrame(
height=400,
horizontal_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_ALWAYS_OFF,
vertical_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_ALWAYS_ON,
style_type_name_override="TreeView",
style=style
):
self._command_model = CommandModel()
tree_view = ui.TreeView(self._command_model, root_visible=False, header_visible=False)
```
The following example demonstrates reordering with drag and drop. You can drag one item of the TreeView and move to the position you want to insert the item to.
```execute 200
from omni.ui import color as cl
class ListItem(ui.AbstractItem):
"""Single item of the model"""
def __init__(self, text):
super().__init__()
self.name_model = ui.SimpleStringModel(text)
def __repr__(self):
return f'"{self.name_model.as_string}"'
class ListModel(ui.AbstractItemModel):
"""
Represents the model for lists. It's very easy to initialize it
with any string list:
string_list = ["Hello", "World"]
model = ListModel(*string_list)
ui.TreeView(model)
"""
def __init__(self, *args):
super().__init__()
self._children = [ListItem(t) for t in args]
def get_item_children(self, item):
"""Returns all the children when the widget asks it."""
if item is not None:
# Since we are doing a flat list, we return the children of root only.
# If it's not root we return.
return []
return self._children
def get_item_value_model_count(self, item):
"""The number of columns"""
return 1
def get_item_value_model(self, item, column_id):
"""
Return value model.
It's the object that tracks the specific value.
In our case we use ui.SimpleStringModel.
"""
return item.name_model
class ListModelWithReordering(ListModel):
"""
Represents the model for the list with the ability to reorder the
list with drag and drop.
"""
def __init__(self, *args):
super().__init__(*args)
def get_drag_mime_data(self, item):
"""Returns Multipurpose Internet Mail Extensions (MIME) data for be able to drop this item somewhere"""
# As we don't do Drag and Drop to the operating system, we return the string.
return item.name_model.as_string
def drop_accepted(self, target_item, source, drop_location=-1):
"""Reimplemented from AbstractItemModel. Called to highlight target when drag and drop."""
# If target_item is None, it's the drop to root. Since it's
# list model, we support reorganization of root only and we
# don't want to create a tree.
return not target_item and drop_location >= 0
def drop(self, target_item, source, drop_location=-1):
"""Reimplemented from AbstractItemModel. Called when dropping something to the item."""
try:
source_id = self._children.index(source)
except ValueError:
# Not in the list. This is the source from another model.
return
if source_id == drop_location:
# Nothing to do
return
self._children.remove(source)
if drop_location > len(self._children):
# Drop it to the end
self._children.append(source)
else:
if source_id < drop_location:
# Because when we removed source, the array became shorter
drop_location = drop_location - 1
self._children.insert(drop_location, source)
self._item_changed(None)
with ui.ScrollingFrame(
height=150,
horizontal_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_ALWAYS_OFF,
vertical_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_ALWAYS_ON,
style_type_name_override="TreeView",
):
self._list_model = ListModelWithReordering("Simplest", "List", "Model", "With", "Reordering")
tree_view = ui.TreeView(
self._list_model,
root_visible=False,
header_visible=False,
style={"TreeView.Item": {"margin": 4}},
drop_between_items=True,
)
```
The following example demonstrates the ability to edit TreeView items.
```execute 200
from omni.ui import color as cl
class FloatModel(ui.AbstractValueModel):
"""An example of custom float model that can be used for formatted string output"""
def __init__(self, value: float):
super().__init__()
self._value = value
def get_value_as_float(self):
"""Reimplemented get float"""
return self._value or 0.0
def get_value_as_string(self):
"""Reimplemented get string"""
# This string goes to the field.
if self._value is None:
return ""
# General format. This prints the number as a fixed-point
# number, unless the number is too large, in which case it
# switches to 'e' exponent notation.
return "{0:g}".format(self._value)
def set_value(self, value):
"""Reimplemented set"""
try:
value = float(value)
except ValueError:
value = None
if value != self._value:
# Tell the widget that the model is changed
self._value = value
self._value_changed()
class NameValueItem(ui.AbstractItem):
"""Single item of the model"""
def __init__(self, text, value):
super().__init__()
self.name_model = ui.SimpleStringModel(text)
self.value_model = FloatModel(value)
def __repr__(self):
return f'"{self.name_model.as_string} {self.value_model.as_string}"'
class NameValueModel(ui.AbstractItemModel):
"""
Represents the model for name-value tables. It's very easy to initialize it
with any string-float list:
my_list = ["Hello", 1.0, "World", 2.0]
model = NameValueModel(*my_list)
ui.TreeView(model)
"""
def __init__(self, *args):
super().__init__()
# ["Hello", 1.0, "World", 2.0"] -> [("Hello", 1.0), ("World", 2.0)]
regrouped = zip(*(iter(args),) * 2)
self._children = [NameValueItem(*t) for t in regrouped]
def get_item_children(self, item):
"""Returns all the children when the widget asks it."""
if item is not None:
# Since we are doing a flat list, we return the children of root only.
# If it's not root we return.
return []
return self._children
def get_item_value_model_count(self, item):
"""The number of columns"""
return 2
def get_item_value_model(self, item, column_id):
"""
Return value model.
It's the object that tracks the specific value.
In our case we use ui.SimpleStringModel for the first column
and SimpleFloatModel for the second column.
"""
return item.value_model if column_id == 1 else item.name_model
class EditableDelegate(ui.AbstractItemDelegate):
"""
Delegate is the representation layer. TreeView calls the methods
of the delegate to create custom widgets for each item.
"""
def __init__(self):
super().__init__()
self.subscription = None
def build_branch(self, model, item, column_id, level, expanded):
"""Create a branch widget that opens or closes subtree"""
pass
def build_widget(self, model, item, column_id, level, expanded):
"""Create a widget per column per item"""
stack = ui.ZStack(height=20)
with stack:
value_model = model.get_item_value_model(item, column_id)
label = ui.Label(value_model.as_string)
if column_id == 1:
field = ui.FloatField(value_model, visible=False)
else:
field = ui.StringField(value_model, visible=False)
# Start editing when double clicked
stack.set_mouse_double_clicked_fn(lambda x, y, b, m, f=field, l=label: self.on_double_click(b, f, l))
def on_double_click(self, button, field, label):
"""Called when the user double-clicked the item in TreeView"""
if button != 0:
return
# Make Field visible when double clicked
field.visible = True
field.focus_keyboard()
# When editing is finished (enter pressed of mouse clicked outside of the viewport)
self.subscription = field.model.subscribe_end_edit_fn(
lambda m, f=field, l=label: self.on_end_edit(m, f, l)
)
def on_end_edit(self, model, field, label):
"""Called when the user is editing the item and pressed Enter or clicked outside of the item"""
field.visible = False
label.text = model.as_string
self.subscription = None
with ui.ScrollingFrame(
height=100,
horizontal_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_ALWAYS_OFF,
vertical_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_ALWAYS_ON,
style_type_name_override="TreeView",
style={"Field": {"background_color": cl.black}},
):
self._name_value_model = NameValueModel("First", 0.2, "Second", 0.3, "Last", 0.4)
self._name_value_delegate = EditableDelegate()
tree_view = ui.TreeView(
self._name_value_model,
delegate=self._name_value_delegate,
root_visible=False,
header_visible=False,
style={"TreeView.Item": {"margin": 4}},
)
```
This is an example of async filling the TreeView model. It's collecting only as many as it's possible of USD prims for 0.016s and waits for the next frame, so the UI is not locked even if the USD Stage is extremely big.
To play with it, create several materials in the stage or open a stage which contains materials, click "Traverse All" or "Stop Traversing".
```execute 200
import asyncio
import time
from omni.ui import color as cl
class ListItem(ui.AbstractItem):
"""Single item of the model"""
def __init__(self, text):
super().__init__()
self.name_model = ui.SimpleStringModel(text)
def __repr__(self):
return f'"{self.name_model.as_string}"'
class ListModel(ui.AbstractItemModel):
"""
Represents the model for lists. It's very easy to initialize it
with any string list:
string_list = ["Hello", "World"]
model = ListModel(*string_list)
ui.TreeView(model)
"""
def __init__(self, *args):
super().__init__()
self._children = [ListItem(t) for t in args]
def get_item_children(self, item):
"""Returns all the children when the widget asks it."""
if item is not None:
# Since we are doing a flat list, we return the children of root only.
# If it's not root we return.
return []
return self._children
def get_item_value_model_count(self, item):
"""The number of columns"""
return 1
def get_item_value_model(self, item, column_id):
"""
Return value model.
It's the object that tracks the specific value.
In our case we use ui.SimpleStringModel.
"""
return item.name_model
class AsyncQueryModel(ListModel):
"""
This is an example of async filling the TreeView model. It's
collecting only as many as it's possible of USD prims for 0.016s
and waits for the next frame, so the UI is not locked even if the
USD Stage is extremely big.
"""
def __init__(self):
super().__init__()
self._stop_event = None
def destroy(self):
self.stop()
def stop(self):
"""Stop traversing the stage"""
if self._stop_event:
self._stop_event.set()
def reset(self):
"""Traverse the stage and keep materials"""
self.stop()
self._stop_event = asyncio.Event()
self._children.clear()
self._item_changed(None)
asyncio.ensure_future(self.__get_all(self._stop_event))
def __push_collected(self, collected):
"""Add given array to the model"""
for c in collected:
self._children.append(c)
self._item_changed(None)
async def __get_all(self, stop_event):
"""Traverse the stage portion at time, so it doesn't freeze"""
stop_event.clear()
start_time = time.time()
# The widget will be updated not faster than 60 times a second
update_every = 1.0 / 60.0
import omni.usd
from pxr import Usd
from pxr import UsdShade
context = omni.usd.get_context()
stage = context.get_stage()
if not stage:
return
# Buffer to keep the portion of the items before sending to the
# widget
collected = []
for p in stage.Traverse(
Usd.TraverseInstanceProxies(Usd.PrimIsActive and Usd.PrimIsDefined and Usd.PrimIsLoaded)
):
if stop_event.is_set():
break
if p.IsA(UsdShade.Material):
# Collect materials only
collected.append(ListItem(str(p.GetPath())))
elapsed_time = time.time()
# Loop some amount of time so fps will be about 60FPS
if elapsed_time - start_time > update_every:
start_time = elapsed_time
# Append the portion and update the widget
if collected:
self.__push_collected(collected)
collected = []
# Wait one frame to let other tasks go
await omni.kit.app.get_app().next_update_async()
self.__push_collected(collected)
try:
import omni.usd
from pxr import Usd
usd_available = True
except ModuleNotFoundError:
usd_available = False
if usd_available:
with ui.ScrollingFrame(
height=200,
horizontal_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_ALWAYS_OFF,
vertical_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_ALWAYS_ON,
style_type_name_override="TreeView",
style={"Field": {"background_color": cl.black}},
):
self._async_query_model = AsyncQueryModel()
ui.TreeView(
self._async_query_model,
root_visible=False,
header_visible=False,
style={"TreeView.Item": {"margin": 4}},
)
self._loaded_label = ui.Label("Press Button to Load Materials", name="text")
with ui.HStack():
ui.Button("Traverse All", clicked_fn=self._async_query_model.reset)
ui.Button("Stop Traversing", clicked_fn=self._async_query_model.stop)
def _item_changed(model, item):
if item is None:
count = len(model._children)
self._loaded_label.text = f"{count} Materials Traversed"
self._async_query_sub = self._async_query_model.subscribe_item_changed_fn(_item_changed)
``` | 28,899 | Markdown | 34.503685 | 357 | 0.641718 |
omniverse-code/kit/exts/omni.kit.documentation.ui.style/docs/sliders.md | # Fields and Sliders
## Common Styling for Fields and Sliders
Here is a list of common style you can customize on Fields and Sliders:
> background_color (color): the background color of the field or slider
> border_color (color): the border color if the field or slider background has a border
> border_radius (float): the border radius if the user wants to round the field or slider
> border_width (float): the border width if the field or slider background has a border
> padding (float): the distance between the text and the border of the field or slider
> font_size (float): the size of the text in the field or slider
## Field
There are fields for string, float and int models.
Except the common style for Fields and Sliders, here is a list of styles you can customize on Field:
> color (color): the color of the text
> background_selected_color (color): the background color of the selected text
### StringField
The StringField widget is a one-line text editor. A field allows the user to enter and edit a single line of plain text. It's implemented using the model-delegate-view pattern and uses AbstractValueModel as the central component of the system.
The following example demonstrates how to connect a StringField and a Label. You can type anything into the StringField.
```execute 200
from omni.ui import color as cl
field_style = {
"Field": {
"background_color": cl(0.8),
"border_color": cl.blue,
"background_selected_color": cl.yellow,
"border_radius": 5,
"border_width": 1,
"color": cl.red,
"font_size": 20.0,
"padding": 5,
},
"Field:pressed": {"background_color": cl.white, "border_color": cl.green, "border_width": 2, "padding": 8},
}
def setText(label, text):
"""Sets text on the label"""
# This function exists because lambda cannot contain assignment
label.text = f"You wrote '{text}'"
with ui.HStack():
field = ui.StringField(style=field_style)
ui.Spacer(width=5)
label = ui.Label("", name="text")
field.model.add_value_changed_fn(lambda m, label=label: setText(label, m.get_value_as_string()))
ui.Spacer(width=10)
```
The following example demonstrates that the CheckBox's model decides the content of the Field. Click to edit and update the string field value also updates the value of the CheckBox. The field can only have one of the two options, either 'True' or 'False', because the model only supports those two possibilities.
```execute 200
from omni.ui import color as cl
with ui.HStack():
field = ui.StringField(width=100, style={"background_color": cl.black})
checkbox = ui.CheckBox(width=0)
field.model = checkbox.model
```
In this example, the field can have anything because the model accepts any string. The model returns bool for checkbox, and the checkbox is unchecked when the string is empty or 'False'.
```execute 200
from omni.ui import color as cl
with ui.HStack():
field = ui.StringField(width=100, style={"background_color": cl.black})
checkbox = ui.CheckBox(width=0)
checkbox.model = field.model
```
The Field widget doesn't keep the data due to the model-delegate-view pattern. However, there are two ways to track the state of the widget. It's possible to re-implement the AbstractValueModel. The second way is using the callbacks of the model. Here is a minimal example of callbacks. When you start editing the field, you will see "Editing is started", and when you finish editing by press `enter`, you will see "Editing is finished".
```execute 200
def on_value(label):
label.text = "Value is changed"
def on_begin(label):
label.text = "Editing is started"
def on_end(label):
label.text = "Editing is finished"
label = ui.Label("Nothing happened", name="text")
model = ui.StringField().model
model.add_value_changed_fn(lambda m, l=label: on_value(l))
model.add_begin_edit_fn(lambda m, l=label: on_begin(l))
model.add_end_edit_fn(lambda m, l=label: on_end(l))
```
### Multiline StringField
Property `multiline` of `StringField` allows users to press enter and create a new line. It's possible to finish editing with Ctrl-Enter.
```execute 200
from omni.ui import color as cl
import inspect
field_style = {
"Field": {
"background_color": cl(0.8),
"color": cl.black,
},
"Field:pressed": {"background_color": cl(0.8)},
}
field_callbacks = lambda: field_callbacks()
with ui.Frame(style=field_style, height=200):
model = ui.SimpleStringModel("hello \nworld \n")
field = ui.StringField(model, multiline=True)
```
### FloatField and IntField
The following example shows how string field, float field and int field interact with each other. All three fields share the same default FloatModel:
```execute 200
with ui.HStack(spacing=5):
ui.Label("FloatField")
ui.Label("IntField")
ui.Label("StringField")
with ui.HStack(spacing=5):
left = ui.FloatField()
center = ui.IntField()
right = ui.StringField()
center.model = left.model
right.model = left.model
ui.Spacer(height=5)
```
## MultiField
MultiField widget groups the widgets that have multiple similar widgets to represent each item in the model. It's handy to use them for arrays and multi-component data like float3, matrix, and color.
MultiField is using `Field` as the Type Selector. Therefore, the list of styless we can customize on MultiField is the same as Field
### MultiIntField
Each of the field value could be changed by editing
```execute 200
from omni.ui import color as cl
field_style = {
"Field": {
"background_color": cl(0.8),
"border_color": cl.blue,
"border_radius": 5,
"border_width": 1,
"color": cl.red,
"font_size": 20.0,
"padding": 5,
},
"Field:pressed": {"background_color": cl.white, "border_color": cl.green, "border_width": 2, "padding": 8},
}
ui.MultiIntField(0, 0, 0, 0, style=field_style)
```
### MultiFloatField
Use MultiFloatField to construct a matrix field:
```execute 200
args = [1.0 if i % 5 == 0 else 0.0 for i in range(16)]
ui.MultiFloatField(*args, width=ui.Percent(50), h_spacing=5, v_spacing=2)
```
### MultiFloatDragField
Each of the field value could be changed by dragging
```execute 200
ui.MultiFloatDragField(0.0, 0.0, 0.0, 0.0)
```
## Sliders
The Sliders are more like a traditional slider that can be dragged and snapped where you click. The value of the slider can be shown on the slider or not, but can not be edited directly by clicking.
Except the common style for Fields and Sliders, here is a list of styles you can customize on ProgressBar:
> color (color): the color of the text
> secondary_color (color): the color of the handle in `ui.SliderDrawMode.HANDLE` draw_mode or the background color of the left portion of the slider in `ui.SliderDrawMode.DRAG` draw_mode
> secondary_selected_color (color): the color of the handle when selected, not useful when the draw_mode is FILLED since there is no handle drawn.
> draw_mode (enum): defines how the slider handle is drawn. There are three types of draw_mode.
* ui.SliderDrawMode.HANDLE: draw the handle as a knob at the slider position
* ui.SliderDrawMode.DRAG: the same as `ui.SliderDrawMode.HANDLE` for now
* ui.SliderDrawMode.FILLED: the handle is eventually the boundary between the `secondary_color` and `background_color`
Sliders with different draw_mode:
```execute 200
from omni.ui import color as cl
with ui.VStack(spacing=5):
ui.FloatSlider(style={"background_color": cl(0.8),
"secondary_color": cl(0.6),
"color": cl(0.1),
"draw_mode": ui.SliderDrawMode.HANDLE}
).model.set_value(0.5)
ui.FloatSlider(style={"background_color": cl(0.8),
"secondary_color": cl(0.6),
"color": cl(0.1),
"draw_mode": ui.SliderDrawMode.DRAG}
).model.set_value(0.5)
ui.FloatSlider(style={"background_color": cl(0.8),
"secondary_color": cl(0.6),
"color": cl(0.1),
"draw_mode": ui.SliderDrawMode.FILLED}
).model.set_value(0.5)
```
### FloatSlider
Default slider whose range is between 0 to 1:
```execute 200
ui.FloatSlider()
```
With defined Min/Max whose range is between min to max:
```execute 200
ui.FloatSlider(min=0, max=10)
```
With defined Min/Max from the model. Notice the model allows the value range between 0 to 100, but the FloatSlider has a more strict range between 0 to 10.
```execute 200
model = ui.SimpleFloatModel(1.0, min=0, max=100)
ui.FloatSlider(model, min=0, max=10)
```
With styles and rounded slider:
```execute 200
from omni.ui import color as cl
with ui.HStack(width=200):
ui.Spacer(width=20)
with ui.VStack():
ui.Spacer(height=5)
ui.FloatSlider(
min=-180,
max=180,
style={
"color": cl.blue,
"background_color": cl(0.8),
"draw_mode": ui.SliderDrawMode.HANDLE,
"secondary_color": cl.red,
"secondary_selected_color": cl.green,
"font_size": 20,
"border_width": 3,
"border_color": cl.black,
"border_radius": 10,
"padding": 10,
}
)
ui.Spacer(height=5)
ui.Spacer(width=20)
```
Filled mode slider with style:
```execute 200
from omni.ui import color as cl
with ui.HStack(width=200):
ui.Spacer(width=20)
with ui.VStack():
ui.Spacer(height=5)
ui.FloatSlider(
min=-180,
max=180,
style={
"color": cl.blue,
"background_color": cl(0.8),
"draw_mode": ui.SliderDrawMode.FILLED,
"secondary_color": cl.red,
"font_size": 20,
"border_radius": 10,
"padding": 10,
}
)
ui.Spacer(height=5)
ui.Spacer(width=20)
```
Transparent background:
```execute 200
from omni.ui import color as cl
with ui.HStack(width=200):
ui.Spacer(width=20)
with ui.VStack():
ui.Spacer(height=5)
ui.FloatSlider(
min=-180,
max=180,
style={
"draw_mode": ui.SliderDrawMode.HANDLE,
"background_color": cl.transparent,
"color": cl.red,
"border_width": 1,
"border_color": cl.white,
}
)
ui.Spacer(height=5)
ui.Spacer(width=20)
```
Slider with transparent value. Notice the use of `step` attribute
```execute 200
from omni.ui import color as cl
with ui.HStack():
# a separate float field
field = ui.FloatField(height=15, width=50)
# a slider using field's model
ui.FloatSlider(
min=0,
max=20,
step=0.25,
model=field.model,
style={
"color":cl.transparent,
"background_color": cl(0.3),
"draw_mode": ui.SliderDrawMode.HANDLE}
)
# default value
field.model.set_value(12.0)
```
### IntSlider
Default slider whose range is between 0 to 100:
```execute 200
ui.IntSlider()
```
With defined Min/Max whose range is between min to max. Note that the handle width is much wider.
```execute 200
ui.IntSlider(min=0, max=20)
```
With style:
```execute 200
from omni.ui import color as cl
with ui.HStack(width=200):
ui.Spacer(width=20)
with ui.VStack():
ui.Spacer(height=5)
ui.IntSlider(
min=0,
max=20,
style={
"background_color": cl("#BBFFBB"),
"color": cl.purple,
"draw_mode": ui.SliderDrawMode.HANDLE,
"secondary_color": cl.green,
"secondary_selected_color": cl.red,
"font_size": 14.0,
"border_width": 3,
"border_color": cl.green,
"padding": 5,
}
).model.set_value(4)
ui.Spacer(height=5)
ui.Spacer(width=20)
```
## Drags
The Drags are very similar to Sliders, but more like Field in the way that they behave. You can double click to edit the value but they also have a mean to be 'Dragged' to increase or decrease the value.
Except the common style for Fields and Sliders, here is a list of styles you can customize on ProgressBar:
> color (color): the color of the text
> secondary_color (color): the left portion of the slider in `ui.SliderDrawMode.DRAG` draw_mode
### FloatDrag
Default float drag whose range is -inf and +inf
```execute 200
ui.FloatDrag()
```
With defined Min/Max whose range is between min to max:
```execute 200
ui.FloatDrag(min=-10, max=10, step=0.1)
```
With styles and rounded shape:
```execute 200
from omni.ui import color as cl
with ui.HStack(width=200):
ui.Spacer(width=20)
with ui.VStack():
ui.Spacer(height=5)
ui.FloatDrag(
min=-180,
max=180,
style={
"color": cl.blue,
"background_color": cl(0.8),
"secondary_color": cl.red,
"font_size": 20,
"border_width": 3,
"border_color": cl.black,
"border_radius": 10,
"padding": 10,
}
)
ui.Spacer(height=5)
ui.Spacer(width=20)
```
### IntDrag
Default int drag whose range is -inf and +inf
```execute 200
ui.IntDrag()
```
With defined Min/Max whose range is between min to max:
```execute 200
ui.IntDrag(min=-10, max=10)
```
With styles and rounded slider:
```execute 200
from omni.ui import color as cl
with ui.HStack(width=200):
ui.Spacer(width=20)
with ui.VStack():
ui.Spacer(height=5)
ui.IntDrag(
min=-180,
max=180,
style={
"color": cl.blue,
"background_color": cl(0.8),
"secondary_color": cl.purple,
"font_size": 20,
"border_width": 4,
"border_color": cl.black,
"border_radius": 20,
"padding": 5,
}
)
ui.Spacer(height=5)
ui.Spacer(width=20)
```
## ProgressBar
A ProgressBar is a widget that indicates the progress of an operation.
Except the common style for Fields and Sliders, here is a list of styles you can customize on ProgressBar:
> color (color): the color of the progress bar indicating the progress value of the progress bar in the portion of the overall value
> secondary_color (color): the color of the text indicating the progress value
In the following example, it shows how to use ProgressBar and override the style of the overlay text.
```execute 200
from omni.ui import color as cl
class CustomProgressValueModel(ui.AbstractValueModel):
"""An example of custom float model that can be used for progress bar"""
def __init__(self, value: float):
super().__init__()
self._value = value
def set_value(self, value):
"""Reimplemented set"""
try:
value = float(value)
except ValueError:
value = None
if value != self._value:
# Tell the widget that the model is changed
self._value = value
self._value_changed()
def get_value_as_float(self):
return self._value
def get_value_as_string(self):
return "Custom Overlay"
with ui.VStack(spacing=5):
# Create ProgressBar
first = ui.ProgressBar()
# Range is [0.0, 1.0]
first.model.set_value(0.5)
second = ui.ProgressBar()
second.model.set_value(1.0)
# Overrides the overlay of ProgressBar
model = CustomProgressValueModel(0.8)
third = ui.ProgressBar(model)
third.model.set_value(0.1)
# Styling its color
fourth = ui.ProgressBar(style={"color": cl("#0000dd")})
fourth.model.set_value(0.3)
# Styling its border width
ui.ProgressBar(style={"border_width": 2, "border_color": cl("#dd0000"), "color": cl("#0000dd")}).model.set_value(0.7)
# Styling its border radius
ui.ProgressBar(style={"border_radius": 100, "color": cl("#0000dd")}).model.set_value(0.6)
# Styling its background color
ui.ProgressBar(style={"border_radius": 10, "background_color": cl("#0000dd")}).model.set_value(0.6)
# Styling the text color
ui.ProgressBar(style={"ProgressBar":{"border_radius": 30, "secondary_color": cl("#00dddd"), "font_size": 20}}).model.set_value(0.6)
# Two progress bars in a row with padding
with ui.HStack():
ui.ProgressBar(style={"color": cl("#0000dd"), "padding": 100}).model.set_value(1.0)
ui.ProgressBar().model.set_value(0.0)
```
## Tooltip
All Widget can be augmented with a tooltip. It can take 2 forms, either a simple ui.Label or a callback when using the callback of `tooltip_fn=` or `widget.set_tooltip_fn()`. You can create the tooltip for any widget.
Except the common style for Fields and Sliders, here is a list of styles you can customize on Line:
> color (color): the color of the text of the tooltip.
> margin_width (float): the width distance between the tooltip content and the parent widget defined boundary
> margin_height (float): the height distance between the tooltip content and the parent widget defined boundary
Here is a simple label tooltip with style when you hover over a button:
```execute 200
from omni.ui import color as cl
tooltip_style = {
"Tooltip": {
"background_color": cl("#DDDD00"),
"color": cl(0.2),
"padding": 10,
"border_width": 3,
"border_color": cl.red,
"font_size": 20,
"border_radius": 10}}
ui.Button("Simple Label Tooltip", name="tooltip", width=200, tooltip="I am a text ToolTip", style=tooltip_style)
```
You can create a callback function as the tooltip where you can create any types of widgets you like in the tooltip and layout them. Make the tooltip very illustrative to have Image or Field or Label etc.
```execute 200
from omni.ui import color as cl
def create_tooltip():
with ui.VStack(width=200, style=tooltip_style):
with ui.HStack():
ui.Label("Fancy tooltip", width=150)
ui.IntField().model.set_value(12)
ui.Line(height=2, style={"color":cl.white})
with ui.HStack():
ui.Label("Anything is possible", width=150)
ui.StringField().model.set_value("you bet")
image_source = "resources/desktop-icons/omniverse_512.png"
ui.Image(
image_source,
width=200,
height=200,
alignment=ui.Alignment.CENTER,
style={"margin": 0},
)
tooltip_style = {
"Tooltip": {
"background_color": cl(0.2),
"border_width": 2,
"border_radius": 5,
"margin_width": 5,
"margin_height": 10
},
}
ui.Button("Callback function Tooltip", width=200, style=tooltip_style, tooltip_fn=create_tooltip)
```
You can define a fixed position for tooltip:
```execute 200
ui.Button("Fixed-position Tooltip", width=200, tooltip="Hello World", tooltip_offset_y=22)
```
You can also define a random position for tooltip:
```execute 200
import random
button = ui.Button("Random-position Tooltip", width=200, tooltip_offset_y=22)
def create_tooltip(button=button):
button.tooltip_offset_x = random.randint(0, 200)
ui.Label("Hello World")
button.set_tooltip_fn(create_tooltip)
``` | 20,428 | Markdown | 34.777583 | 437 | 0.609898 |
omniverse-code/kit/exts/omni.kit.documentation.ui.style/docs/fonts.md | # Fonts
## Font style
It's possible to set different font types with the style. The style key 'font' should point to the font file, which allows packaging of the font to the extension. We support both TTF and OTF formats. All text-based widgets support custom fonts.
```execute 200
with ui.VStack():
ui.Label("Omniverse", style={"font":"${fonts}/OpenSans-SemiBold.ttf", "font_size": 40.0})
ui.Label("Omniverse", style={"font":"${fonts}/roboto_medium.ttf", "font_size": 40.0})
```
## Font size
It's possible to set the font size with the style.
Drag the following slider to change the size of the text.
```execute 200
def value_changed(label, value):
label.style = {"color": ui.color(0), "font_size": value.as_float}
slider = ui.FloatSlider(min=1.0, max=150.0)
slider.model.as_float = 10.0
label = ui.Label("Omniverse", style={"color": ui.color(0), "font_size": 7.0})
slider.model.add_value_changed_fn(partial(value_changed, label))
## Double comment means hide from shippet
ui.Spacer(height=30)
##
``` | 1,019 | Markdown | 35.42857 | 244 | 0.705594 |
omniverse-code/kit/exts/omni.kit.documentation.ui.style/docs/containers.md | # Container widgets
Container widgets are used for grouping items. It's possible to add children to the container with Python's `with` statement. It's not possible to reparent items. Instead, it's necessary to remove the item and recreate a similar item under another parent.
## Stack
We have three main components: VStack, HStack, and ZStack.
Here is a list of styles you can customize on Stack:
> margin (float): the distance between the stack items and the parent widget defined boundary
> margin_width (float): the width distance between the stack items and the parent widget defined boundary
> margin_height (float): the height distance between the stack items and the parent widget defined boundary
It's possible to determine the direction of a stack with the property `direction`. Here is an example of a stack which is able to change its direction dynamically by clicking the button `Change`.
```execute 200
def rotate(dirs, stack, label):
dirs[0] = (dirs[0] + 1) % len(dirs[1])
stack.direction = dirs[1][dirs[0]]
label.text = str(stack.direction)
dirs = [
0,
[
ui.Direction.LEFT_TO_RIGHT,
ui.Direction.RIGHT_TO_LEFT,
ui.Direction.TOP_TO_BOTTOM,
ui.Direction.BOTTOM_TO_TOP,
],
]
stack = ui.Stack(ui.Direction.LEFT_TO_RIGHT, width=0, height=0, style={"margin_height": 5, "margin_width": 10})
with stack:
for name in ["One", "Two", "Three", "Four"]:
ui.Button(name)
ui.Spacer(height=100)
with ui.HStack():
ui.Label("Current direction is ", name="text", width=0)
label = ui.Label("", name="text")
button = ui.Button("Change")
button.set_clicked_fn(lambda d=dirs, s=stack, l=label: rotate(d, s, l))
rotate(dirs, stack, label)
```
### HStack
This class is used to construct horizontal layout objects.
The simplest use of the class is like this:
```execute 200
with ui.HStack(style={"margin": 10}):
ui.Button("One")
ui.Button("Two")
ui.Button("Three")
ui.Button("Four")
ui.Button("Five")
```
### VStack
The VStack class lines up widgets vertically.
```execute 200
with ui.VStack(width=100.0, style={"margin": 5}):
with ui.VStack():
ui.Button("One")
ui.Button("Two")
ui.Button("Three")
ui.Button("Four")
ui.Button("Five")
```
### ZStack
ZStack is a view that overlays its children, aligning them on top of each other. The later one is on top of the previous ones.
```execute 200
with ui.VStack(width=100.0, style={"margin": 5}):
with ui.ZStack():
ui.Button("Very Long Text to See How Big it Can Be", height=0)
ui.Button("Another\nMultiline\nButton", width=0)
```
### Layout
Here is an example of using combined HStack and VStack:
```execute 200
with ui.VStack():
for i in range(2):
with ui.HStack():
ui.Spacer(width=50)
with ui.VStack(height=0):
ui.Button("Left {}".format(i), height=0)
ui.Button("Vertical {}".format(i), height=50)
with ui.HStack(width=ui.Fraction(2)):
ui.Button("Right {}".format(i))
ui.Button("Horizontal {}".format(i), width=ui.Fraction(2))
ui.Spacer(width=50)
```
### Spacing
Spacing is a property of Stack. It defines the non-stretchable space in pixels between child items of the layout.
Here is an example that you can change the HStack spacing by a slider
```execute 200
from omni.ui import color as cl
SPACING = 5
def set_spacing(stack, spacing):
stack.spacing = spacing
ui.Spacer(height=SPACING)
spacing_stack = ui.HStack(style={"margin": 0})
with spacing_stack:
for name in ["One", "Two", "Three", "Four"]:
ui.Button(name)
ui.Spacer(height=SPACING)
with ui.HStack(spacing=SPACING):
with ui.HStack(width=100):
ui.Spacer()
ui.Label("spacing", width=0, name="text")
with ui.HStack(width=ui.Percent(20)):
field = ui.FloatField(width=50)
slider = ui.FloatSlider(min=0, max=50, style={"color": cl.transparent})
# Link them together
slider.model = field.model
slider.model.add_value_changed_fn(
lambda m, s=spacing_stack: set_spacing(s, m.get_value_as_float()))
```
## Frame
Frame is a container that can keep only one child. Each child added to Frame overrides the previous one. This feature is used for creating dynamic layouts. The whole layout can be easily recreated with a simple callback.
Here is a list of styles you can customize on Frame:
> padding (float): the distance between the child widgets and the border of the button
In the following example, you can drag the IntDrag to change the slider value. The buttons are recreated each time the slider changes.
```execute 200
self._recreate_ui = ui.Frame(height=40, style={"Frame":{"padding": 5}})
def changed(model, recreate_ui=self._recreate_ui):
with recreate_ui:
with ui.HStack():
for i in range(model.get_value_as_int()):
ui.Button(f"Button #{i}")
model = ui.IntDrag(min=0, max=10).model
self._sub_recreate = model.subscribe_value_changed_fn(changed)
```
Another feature of Frame is the ability to clip its child. When the content of Frame is bigger than Frame itself, the exceeding part is not drawn if the clipping is on. There are two clipping types: `horizontal_clipping` and `vertical_clipping`.
Here is an example of vertical clipping.
```execute 200
with ui.Frame(vertical_clipping=True, height=20):
ui.Label("This should be clipped vertically. " * 10, word_wrap=True)
```
## CanvasFrame
CanvasFrame is the widget that allows the user to pan and zoom its children with a mouse. It has a layout that can be infinitely moved in any direction.
Here is a list of styles you can customize on CanvasFrame:
> background_color (color): the main color of the rectangle
Here is an example of a CanvasFrame, you can scroll the middle mouse to zoom the canvas and middle mouse move to pan in it (press CTRL to avoid scrolling the docs).
```execute 200
from omni.ui import color as cl
TEXT = (
"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum."
)
IMAGE = "resources/icons/ov_logo_square.png"
with ui.CanvasFrame(height=256, style={"CanvasFrame":{"background_color": cl("#aa4444")}}):
with ui.VStack(height=0, spacing=10):
ui.Label(TEXT, name="text", word_wrap=True)
ui.Button("Button")
ui.Image(IMAGE, width=128, height=128)
```
## ScrollingFrame
The ScrollingFrame class provides the ability to scroll onto other widgets. ScrollingFrame is used to display the contents of children widgets within a frame. If the widget exceeds the size of the frame, the frame can provide scroll bars so that the entire area of the child widget can be viewed by scrolling.
Here is a list of styles you can customize on ScrollingFrame:
> scrollbar_size (float): the width of the scroll bar
> secondary_color (color): the color the scroll bar
> background_color (color): the background color the scroll frame
Here is an example of a ScrollingFrame, you can scroll the middle mouse to scroll the frame.
```execute 200
from omni.ui import color as cl
with ui.HStack():
left_frame = ui.ScrollingFrame(
height=250,
horizontal_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_ALWAYS_OFF,
vertical_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_ALWAYS_ON,
style={"ScrollingFrame":{
"scrollbar_size":10,
"secondary_color": cl.red,
"background_color": cl("#4444dd")}}
)
with left_frame:
with ui.VStack(height=0):
for i in range(20):
ui.Button(f"Button Left {i}")
right_frame = ui.ScrollingFrame(
height=250,
horizontal_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_ALWAYS_OFF,
vertical_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_ALWAYS_ON,
style={"ScrollingFrame":{
"scrollbar_size":30,
"secondary_color": cl.blue,
"background_color": cl("#44dd44")}}
)
with right_frame:
with ui.VStack(height=0):
for i in range(20):
ui.Button(f"Button Right {i}")
# Synchronize the scroll position of two frames
def set_scroll_y(frame, y):
frame.scroll_y = y
left_frame.set_scroll_y_changed_fn(lambda y, frame=right_frame: set_scroll_y(frame, y))
right_frame.set_scroll_y_changed_fn(lambda y, frame=left_frame: set_scroll_y(frame, y))
```
## CollapsableFrame
CollapsableFrame is a frame widget that can hide or show its content. It has two states: expanded and collapsed. When it's collapsed, it looks like a button. If it's expanded, it looks like a button and a frame with the content. It's handy to group properties, and temporarily hide them to get more space for something else.
Here is a list of styles you can customize on Image:
> background_color (color): the background color of the CollapsableFrame widget
> secondary_color (color): the background color of the CollapsableFrame's header
> border_radius (float): the border radius if user wants to round the CollapsableFrame
> border_color (color): the border color if the CollapsableFrame has a border
> border_width (float): the border width if the CollapsableFrame has a border
> padding (float): the distance between the header or the content to the border of the CollapsableFrame
> margin (float): the distance between the CollapsableFrame and other widgets
Here is a default `CollapsableFrame` example:
```execute 200
with ui.CollapsableFrame("Header"):
with ui.VStack(height=0):
ui.Button("Hello World")
ui.Button("Hello World")
```
It's possible to use a custom header.
```execute 200
from omni.ui import color as cl
def custom_header(collapsed, title):
with ui.HStack():
with ui.ZStack(width=30):
ui.Circle(name="title")
with ui.HStack():
ui.Spacer()
align = ui.Alignment.V_CENTER
ui.Line(name="title", width=6, alignment=align)
ui.Spacer()
if collapsed:
with ui.VStack():
ui.Spacer()
align = ui.Alignment.H_CENTER
ui.Line(name="title", height=6, alignment=align)
ui.Spacer()
ui.Label(title, name="title")
style = {
"CollapsableFrame": {
"background_color": cl(0.5),
"secondary_color": cl("#CC211B"),
"border_radius": 10,
"border_color": cl.blue,
"border_width": 2,
},
"CollapsableFrame:hovered": {"secondary_color": cl("#FF4321")},
"CollapsableFrame:pressed": {"secondary_color": cl.red},
"Label::title": {"color": cl.white},
"Circle::title": {
"color": cl.yellow,
"background_color": cl.transparent,
"border_color": cl(0.9),
"border_width": 0.75,
},
"Line::title": {"color": cl(0.9), "border_width": 1},
}
ui.Spacer(height=5)
with ui.HStack():
ui.Spacer(width=5)
with ui.CollapsableFrame("Header", build_header_fn=custom_header, style=style):
with ui.VStack(height=0):
ui.Button("Hello World")
ui.Button("Hello World")
ui.Spacer(width=5)
ui.Spacer(height=5)
```
This example demonstrates how padding and margin work in the collapsable frame.
```execute 200
from omni.ui import color as cl
style = {
"CollapsableFrame": {
"border_color": cl("#005B96"),
"border_radius": 4,
"border_width": 2,
"padding": 0,
"margin": 0,
}
}
frame = ui.CollapsableFrame("Header", style=style)
with frame:
with ui.VStack(height=0):
ui.Button("Hello World")
ui.Button("Hello World")
def set_style(field, model, style=style, frame=frame):
frame_style = style["CollapsableFrame"]
frame_style[field] = model.get_value_as_float()
frame.set_style(style)
with ui.HStack():
ui.Label("Padding:", width=ui.Percent(10), name="text")
model = ui.FloatSlider(min=0, max=50).model
model.add_value_changed_fn(lambda m: set_style("padding", m))
with ui.HStack():
ui.Label("Margin:", width=ui.Percent(10), name="text")
model = ui.FloatSlider(min=0, max=50).model
model.add_value_changed_fn(lambda m: set_style("margin", m))
```
## Order in Stack and use of content_clipping
Due to Imgui, ScrollingFrame and CanvasFrame will create a new window, meaning if we have them in a ZStack, they don't respect the Stack order. To fix that we need to create a separate window, with the widget wrapped in a `ui.Frame(separate_window=True)` will fix the order issue. And if we also want the mouse input in the new separate window, we use `ui.HStack(content_clipping=True)` for that.
In the following example, you won't see the red rectangle.
```execute 200
from omni.ui import color as cl
with ui.ZStack():
ui.Rectangle(width=200, height=200, style={'background_color':cl.green})
with ui.CanvasFrame(width=150, height=150):
ui.Rectangle(style={'background_color':cl.blue})
ui.Rectangle(width=100, height=100, style={'background_color':cl.red})
```
With the use of `separate_window=True` or `content_clipping=True`, you will see the red rectangle.
```execute 200
from omni.ui import color as cl
with ui.ZStack():
ui.Rectangle(width=200, height=200, style={'background_color':cl.green})
with ui.CanvasFrame(width=150, height=150):
ui.Rectangle(style={'background_color':cl.blue})
with ui.Frame(separate_window=True):
ui.Rectangle(width=100, height=100, style={'background_color':cl.red})
```
```execute 200
from omni.ui import color as cl
with ui.ZStack():
ui.Rectangle(width=200, height=200, style={'background_color':cl.green})
with ui.CanvasFrame(width=150, height=150):
ui.Rectangle(style={'background_color':cl.blue})
with ui.HStack(content_clipping=True):
ui.Rectangle(width=100, height=100, style={'background_color':cl.red})
```
In the following example, you will see the button click action is captured on Button 1.
```execute 200
from functools import partial
def clicked(name):
print(f'clicked {name}')
with ui.ZStack():
b1 = ui.Button('Button 1')
b1.set_clicked_fn(partial(clicked, b1.text))
b2 = ui.Button('Button 2')
b2.set_clicked_fn(partial(clicked, b2.text))
```
With the use of `content_clipping=True`, you will see the button click action is now fixed and captured on Button 2.
```execute 200
from functools import partial
def clicked(name):
print(f'clicked {name}')
with ui.ZStack():
b1 = ui.Button('Button 1')
b1.set_clicked_fn(partial(clicked, b1.text))
with ui.VStack(content_clipping=1):
b2 = ui.Button('Button 2')
b2.set_clicked_fn(partial(clicked, b2.text))
```
## Grid
Grid is a container that arranges its child views in a grid. Depends on the direction the grid size grows with creating more children, we call it VGrid (grow in vertical direction) and HGrid (grow in horizontal direction)
There is currently no style you can customize on Grid.
### VGrid
VGrid has two modes for cell width:
- If the user sets column_count, the column width is computed from the grid width.
- If the user sets column_width, the column count is computed from the grid width.
VGrid also has two modes for height:
- If the user sets row_height, VGrid uses it to set the height for all the cells. It's the fast mode because it's considered that the cell height never changes. VGrid easily predicts which cells are visible.
- If the user sets nothing, VGrid computes the size of the children. This mode is slower than the previous one, but the advantage is that all the rows can be different custom sizes. VGrid still only draws visible items, but to predict it, it uses cache, which can be big if VGrid has hundreds of thousands of items.
Here is an example of VGrid:
```execute 200
from omni.ui import color as cl
with ui.ScrollingFrame(
height=250,
horizontal_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_ALWAYS_OFF,
vertical_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_ALWAYS_ON,
):
with ui.VGrid(column_width=100, row_height=100):
for i in range(100):
with ui.ZStack():
ui.Rectangle(
style={
"border_color": cl.red,
"background_color": cl.white,
"border_width": 1,
"margin": 0,
}
)
ui.Label(f"{i}", style={"margin": 5})
```
### HGrid
HGrid works exactly like VGrid, but with swapped width and height.
```execute 200
from omni.ui import color as cl
with ui.ScrollingFrame(
height=250,
horizontal_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_ALWAYS_ON,
vertical_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_ALWAYS_OFF,
):
with ui.HGrid(column_width=100, row_height=100):
for i in range(100):
with ui.ZStack():
ui.Rectangle(
style={
"border_color": cl.red,
"background_color": cl.white,
"border_width": 1,
"margin": 0,
}
)
ui.Label(f"{i}", style={"margin": 5})
```
## Placer
Placer enables you to place a widget precisely with offset. Placer's property `draggable` allows changing the position of the child widget by dragging it with the mouse.
There is currently no style you can customize on Placer.
Here is an example of 4 Placers. Two of them have fixed positions, each with a ui.Button as the child. You can see the buttons are moved to the exact place by the parent Placer, one at (100, 10) and the other at (200, 50). The third one is `draggable`, which has a Circle as the child, so that you can move the circle freely with mouse drag in the frame. The fourth one is also `draggable`, which has a ZStack as the child. The ZStack is composed of Rectangle and HStack and Label. This Placer is only draggable on the Y-axis, defined by `drag_axis=ui.Axis.Y`, so that you can only move the ZStack on the y-axis.
```execute 200
from omni.ui import color as cl
with ui.ScrollingFrame(
height=170,
horizontal_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_ALWAYS_OFF,
vertical_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_ALWAYS_OFF,
):
with ui.ZStack():
with ui.HStack():
for index in range(60):
ui.Line(width=10, style={"color": cl.black, "border_width": 0.5}, alignment=ui.Alignment.LEFT)
with ui.VStack():
ui.Line(
height=10,
width=600,
style={"color": cl.black, "border_width": 0.5},
alignment=ui.Alignment.TOP,
)
for index in range(15):
ui.Line(
height=10,
width=600,
style={"color": cl.black, "border_width": 0.5},
alignment=ui.Alignment.TOP,
)
ui.Line(
height=10,
width=600,
style={"color": cl.black, "border_width": 0.5},
alignment=ui.Alignment.TOP,
)
with ui.Placer(offset_x=100, offset_y=10):
ui.Button("moved 100px in X, and 10px in Y", width=0, height=20, name="placed")
with ui.Placer(offset_x=200, offset_y=50):
ui.Button("moved 200px X , and 50 Y", width=0, height=0)
def set_text(widget, text):
widget.text = text
with ui.Placer(draggable=True, offset_x=300, offset_y=100):
ui.Circle(radius=50, width=50, height=50, size_policy=ui.CircleSizePolicy.STRETCH, name="placed")
placer = ui.Placer(draggable=True, drag_axis=ui.Axis.Y, offset_x=400, offset_y=120)
with placer:
with ui.ZStack(width=180, height=40):
ui.Rectangle(name="placed")
with ui.HStack(spacing=5):
ui.Circle(
radius=3,
width=15,
size_policy=ui.CircleSizePolicy.FIXED,
style={"background_color": cl.white},
)
ui.Label("UP / Down", style={"color": cl.white, "font_size": 16.0})
offset_label = ui.Label("120", style={"color": cl.white})
placer.set_offset_y_changed_fn(lambda o: set_text(offset_label, str(o)))
```
The following example shows the way to interact between three Placers to create a resizable rectangle's body, left handle and right handle. The rectangle can be moved on X-axis and can be resized with small orange handles.
When multiple widgets fire the callbacks simultaneously, it's possible to collect the event data and process them one frame later using asyncio.
```execute 200
import asyncio
import omni.kit.app
from omni.ui import color as cl
def placer_track(self, id):
# Initial size
BEGIN = 50 + 100 * id
END = 120 + 100 * id
HANDLE_WIDTH = 10
class EditScope:
"""The class to avoid circular event calling"""
def __init__(self):
self.active = False
def __enter__(self):
self.active = True
def __exit__(self, type, value, traceback):
self.active = False
def __bool__(self):
return not self.active
class DoLater:
"""A helper to collect data and process it one frame later"""
def __init__(self):
self.__task = None
self.__data = []
def do(self, data):
# Collect data
self.__data.append(data)
# Update in the next frame. We need it because we want to accumulate the affected prims
if self.__task is None or self.__task.done():
self.__task = asyncio.ensure_future(self.__delayed_do())
async def __delayed_do(self):
# Wait one frame
await omni.kit.app.get_app().next_update_async()
print(f"In the previous frame the user clicked the rectangles: {self.__data}")
self.__data.clear()
self.edit = EditScope()
self.dolater = DoLater()
def start_moved(start, body, end):
if not self.edit:
# Something already edits it
return
with self.edit:
body.offset_x = start.offset_x
rect.width = ui.Pixel(end.offset_x - start.offset_x + HANDLE_WIDTH)
def body_moved(start, body, end, rect):
if not self.edit:
# Something already edits it
return
with self.edit:
start.offset_x = body.offset_x
end.offset_x = body.offset_x + rect.width.value - HANDLE_WIDTH
def end_moved(start, body, end, rect):
if not self.edit:
# Something already edits it
return
with self.edit:
body.offset_x = start.offset_x
rect.width = ui.Pixel(end.offset_x - start.offset_x + HANDLE_WIDTH)
with ui.ZStack(height=30):
# Body
body = ui.Placer(draggable=True, drag_axis=ui.Axis.X, offset_x=BEGIN)
with body:
rect = ui.Rectangle(width=END - BEGIN + HANDLE_WIDTH)
rect.set_mouse_pressed_fn(lambda x, y, b, m, id=id: self.dolater.do(id))
# Left handle
start = ui.Placer(draggable=True, drag_axis=ui.Axis.X, offset_x=BEGIN)
with start:
ui.Rectangle(width=HANDLE_WIDTH, style={"background_color": cl("#FF660099")})
# Right handle
end = ui.Placer(draggable=True, drag_axis=ui.Axis.X, offset_x=END)
with end:
ui.Rectangle(width=HANDLE_WIDTH, style={"background_color": cl("#FF660099")})
# Connect them together
start.set_offset_x_changed_fn(lambda _, s=start, b=body, e=end: start_moved(s, b, e))
body.set_offset_x_changed_fn(lambda _, s=start, b=body, e=end, r=rect: body_moved(s, b, e, r))
end.set_offset_x_changed_fn(lambda _, s=start, b=body, e=end, r=rect: end_moved(s, b, e, r))
ui.Spacer(height=5)
with ui.ZStack():
placer_track(self, 0)
placer_track(self, 1)
ui.Spacer(height=5)
```
It's possible to set `offset_x` and `offset_y` in percentages. It allows stacking the children to the proportions of the parent widget. If the parent size is changed, then the offset is updated accordingly.
```execute 200
from omni.ui import color as cl
# The size of the rectangle
SIZE = 20.0
with ui.ZStack(height=200):
# Background
ui.Rectangle(style={"background_color": cl(0.6)})
# Small rectangle
p = ui.Percent(50)
placer = ui.Placer(draggable=True, offset_x=p, offset_y=p)
with placer:
ui.Rectangle(width=SIZE, height=SIZE)
def clamp_x(offset):
if offset.value < 0:
placer.offset_x = ui.Percent(0)
max_per = 100.0 - SIZE / placer.computed_width * 100.0
if offset.value > max_per:
placer.offset_x = ui.Percent(max_per)
def clamp_y(offset):
if offset.value < 0:
placer.offset_y = ui.Percent(0)
max_per = 100.0 - SIZE / placer.computed_height * 100.0
if offset.value > max_per:
placer.offset_y = ui.Percent(max_per)
# Callbacks
placer.set_offset_x_changed_fn(clamp_x)
placer.set_offset_y_changed_fn(clamp_y)
```
| 25,689 | Markdown | 37.115727 | 612 | 0.647592 |
omniverse-code/kit/exts/omni.kit.documentation.ui.style/docs/shapes.md | # Shapes
Shapes enable you to build custom widgets with specific looks. There are many shapes you can stylize: Rectangle, Circle, Ellipse, Triangle and FreeShapes of FreeRectangle, FreeCircle, FreeEllipse, FreeTriangle. In most cases those shapes will fit into the widget size which is defined by the parent widget they are in.
The FreeShapes are the shapes that are independent of the layout. It means it can be stuck to other shapes. It means it is possible to stick the freeshape to the layout's widgets, and the freeshape will follow the changes of the layout automatically.
## Common Style of shapes
Here is a list of common style you can customize on all the Shapes:
> background_color (color): the background color of the shape
> border_width (float): the border width if the shape has a border
> border_color (color): the border color if the shape has a border
## Rectangle
Rectangle is a shape with four sides and four corners. You can use Rectangle to draw rectangle shapes, or mix it with other controls e.g. using ZStack to create an advanced look.
Except the common style for shapes, here is a list of styles you can customize on Rectangle:
> background_gradient_color (color): the gradient color on the top part of the rectangle
> border_radius (float): default rectangle has 4 right corner angles, border_radius defines the radius of the corner angle if the user wants to round the rectangle corner. We only support one border_radius across all the corners, but users can choose which corner to be rounded.
> corner_flag (enum): defines which corner or corners to be rounded
Here is a list of the supported corner flags:
```execute 200
from omni.ui import color as cl
corner_flags = {
"ui.CornerFlag.NONE": ui.CornerFlag.NONE,
"ui.CornerFlag.TOP_LEFT": ui.CornerFlag.TOP_LEFT,
"ui.CornerFlag.TOP_RIGHT": ui.CornerFlag.TOP_RIGHT,
"ui.CornerFlag.BOTTOM_LEFT": ui.CornerFlag.BOTTOM_LEFT,
"ui.CornerFlag.BOTTOM_RIGHT": ui.CornerFlag.BOTTOM_RIGHT,
"ui.CornerFlag.TOP": ui.CornerFlag.TOP,
"ui.CornerFlag.BOTTOM": ui.CornerFlag.BOTTOM,
"ui.CornerFlag.LEFT": ui.CornerFlag.LEFT,
"ui.CornerFlag.RIGHT": ui.CornerFlag.RIGHT,
"ui.CornerFlag.ALL": ui.CornerFlag.ALL,
}
with ui.ScrollingFrame(
height=100,
vertical_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_ALWAYS_OFF,
style={"ScrollingFrame": {"background_color": cl.transparent}},
):
with ui.HStack():
for key, value in corner_flags.items():
with ui.ZStack():
ui.Rectangle(name="table")
with ui.VStack(style={"VStack": {"margin": 10}}):
ui.Rectangle(
style={"background_color": cl("#aa4444"), "border_radius": 20.0, "corner_flag": value}
)
ui.Spacer(height=10)
ui.Label(key, style={"color": cl.white, "font_size": 12}, alignment=ui.Alignment.CENTER)
```
Here are a few examples of Rectangle using different selections of styles:
Default rectangle which is scaled to fit:
```execute 200
with ui.Frame(height=20):
ui.Rectangle(name="default")
```
This rectangle uses its own style to control colors and shape. Notice how three colors "background_color", "border_color" and "border_color" are affecting the look of the rectangle:
```execute 200
from omni.ui import color as cl
with ui.Frame(height=40):
ui.Rectangle(style={"Rectangle":{
"background_color":cl("#aa4444"),
"border_color":cl("#22FF22"),
"background_gradient_color": cl("#4444aa"),
"border_width": 2.0,
"border_radius": 5.0}})
```
This rectangle uses fixed width and height. Notice the `border_color` is not doing anything if `border_width` is not defined.
```execute 200
from omni.ui import color as cl
with ui.Frame(height=20):
ui.Rectangle(width=40, height=10, style={"background_color":cl(0.6), "border_color":cl("#ff2222")})
```
Compose with ZStack for an advanced look
```execute 200
from omni.ui import color as cl
with ui.Frame(height=20):
with ui.ZStack(height=20):
ui.Rectangle(width=150,
style={"background_color":cl(0.6),
"border_color":cl(0.1),
"border_width": 1.0,
"border_radius": 8.0} )
with ui.HStack():
ui.Spacer(width=10)
ui.Image("resources/icons/Cloud.png", width=20, height=20 )
ui.Label( "Search Field", style={"color":cl(0.875)})
```
## FreeRectangle
FreeRectangle is a rectangle whose width and height will be determined by other widgets. The supported style list is the same as Rectangle.
Here is an example of a FreeRectangle with style following two draggable circles:
```execute 200
from omni.ui import color as cl
with ui.Frame(height=200):
with ui.ZStack():
# Four draggable rectangles that represent the control points
with ui.Placer(draggable=True, offset_x=0, offset_y=0):
control1 = ui.Circle(width=10, height=10)
with ui.Placer(draggable=True, offset_x=150, offset_y=150):
control2 = ui.Circle(width=10, height=10)
# The rectangle that fits to the control points
ui.FreeRectangle(control1, control2, style={
"background_color":cl(0.6),
"border_color":cl(0.1),
"border_width": 1.0,
"border_radius": 8.0})
```
## Circle
You can use Circle to draw a circular shape. Circle doesn't have any other style except the common style for shapes.
Here is some of the properties you can customize on Circle:
> size_policy (enum): there are two types of the size_policy, fixed and stretch.
* ui.CircleSizePolicy.FIXED: the size of the circle is defined by the radius and is fixed without being affected by the parent scaling.
* ui.CircleSizePolicy.STRETCH: the size of the circle is defined by the parent and will be stretched if the parent widget size changed.
> alignment (enum): the position of the circle in the parent defined space
> arc (enum): this property defines the way to draw a half or a quarter of the circle.
Here is a list of the supported Alignment and Arc value for the Circle:
```execute 200
from omni.ui import color as cl
alignments = {
"ui.Alignment.CENTER": ui.Alignment.CENTER,
"ui.Alignment.LEFT_TOP": ui.Alignment.LEFT_TOP,
"ui.Alignment.LEFT_CENTER": ui.Alignment.LEFT_CENTER,
"ui.Alignment.LEFT_BOTTOM": ui.Alignment.LEFT_BOTTOM,
"ui.Alignment.CENTER_TOP": ui.Alignment.CENTER_TOP,
"ui.Alignment.CENTER_BOTTOM": ui.Alignment.CENTER_BOTTOM,
"ui.Alignment.RIGHT_TOP": ui.Alignment.RIGHT_TOP,
"ui.Alignment.RIGHT_CENTER": ui.Alignment.RIGHT_CENTER,
"ui.Alignment.RIGHT_BOTTOM": ui.Alignment.RIGHT_BOTTOM,
}
ui.Label("Alignment: ")
with ui.ScrollingFrame(
height=150,
vertical_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_ALWAYS_OFF,
style={"ScrollingFrame": {"background_color": cl.transparent}},
):
with ui.HStack():
for key, value in alignments.items():
with ui.ZStack():
ui.Rectangle(name="table")
with ui.VStack(style={"VStack": {"margin": 10}}, spacing=10):
with ui.ZStack():
ui.Rectangle(name="table", style={"border_color":cl.white, "border_width": 1.0})
ui.Circle(
radius=10,
size_policy=ui.CircleSizePolicy.FIXED,
name="orientation",
alignment=value,
style={"background_color": cl("#aa4444")},
)
ui.Label(key, style={"color": cl.white, "font_size": 12}, alignment=ui.Alignment.CENTER)
ui.Spacer(height=10)
ui.Label("Arc: ")
with ui.ScrollingFrame(
height=150,
vertical_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_ALWAYS_OFF,
style={"ScrollingFrame": {"background_color": cl.transparent}},
):
with ui.HStack():
for key, value in alignments.items():
with ui.ZStack():
ui.Rectangle(name="table")
with ui.VStack(style={"VStack": {"margin": 10}}, spacing=10):
with ui.ZStack():
ui.Rectangle(name="table", style={"border_color":cl.white, "border_width": 1.0})
ui.Circle(
radius=10,
size_policy=ui.CircleSizePolicy.FIXED,
name="orientation",
arc=value,
style={
"background_color": cl("#aa4444"),
"border_color": cl.blue,
"border_width": 2,
},
)
ui.Label(key, style={"color": cl.white, "font_size": 12}, alignment=ui.Alignment.CENTER)
```
Default circle which is scaled to fit, the alignment is centered:
```execute 200
with ui.Frame(height=20):
ui.Circle(name="default")
```
This circle is scaled to fit with 100 height:
```execute 200
with ui.Frame(height=100):
ui.Circle(name="default")
```
This circle has a fixed radius of 20, the alignment is LEFT_CENTER:
```execute 200
from omni.ui import color as cl
style = {"Circle": {"background_color": cl("#1111ff"), "border_color": cl("#cc0000"), "border_width": 4}}
with ui.Frame(height=100, style=style):
with ui.HStack():
ui.Rectangle(width=40, style={"background_color": cl.white})
ui.Circle(radius=20, size_policy=ui.CircleSizePolicy.FIXED, alignment=ui.Alignment.LEFT_CENTER)
```
This circle has a fixed radius of 10, the alignment is RIGHT_CENTER
```execute 200
from omni.ui import color as cl
style = {"Circle": {"background_color": cl("#ff1111"), "border_color": cl.blue, "border_width": 2}}
with ui.Frame(height=100, width=200, style=style):
with ui.ZStack():
ui.Rectangle(style={"background_color": cl(0.4)})
ui.Circle(radius=10, size_policy=ui.CircleSizePolicy.FIXED, alignment=ui.Alignment.RIGHT_CENTER)
```
This circle has a fixed radius of 10, it has all the same style as the previous one, except its size_policy is `ui.CircleSizePolicy.STRETCH`
```execute 200
from omni.ui import color as cl
style = {"Circle": {"background_color": cl("#ff1111"), "border_color": cl.blue, "border_width": 2}}
with ui.Frame(height=100, width=200, style=style):
with ui.ZStack():
ui.Rectangle(style={"background_color": cl(0.4)})
ui.Circle(radius=10, size_policy=ui.CircleSizePolicy.STRETCH, alignment=ui.Alignment.RIGHT_CENTER)
```
## FreeCircle
FreeCircle is a circle whose radius will be determined by other widgets. The supported style list is the same as Circle.
Here is an example of a FreeCircle with style following two draggable rectangles:
```execute 200
from omni.ui import color as cl
with ui.Frame(height=200):
with ui.ZStack():
# Four draggable rectangles that represent the control points
with ui.Placer(draggable=True, offset_x=0, offset_y=0):
control1 = ui.Rectangle(width=10, height=10)
with ui.Placer(draggable=True, offset_x=150, offset_y=150):
control2 = ui.Rectangle(width=10, height=10)
# The rectangle that fits to the control points
ui.FreeCircle(control1, control2, style={
"background_color":cl.transparent,
"border_color":cl.red,
"border_width": 2.0})
```
## Ellipse
Ellipse is drawn in a rectangle bounding box, and It is always scaled to fit the rectangle's width and height. Ellipse doesn't have any other style except the common style for shapes.
Default ellipse is scaled to fit:
```execute 200
with ui.Frame(height=20, width=150):
ui.Ellipse(name="default")
```
Stylish ellipse with border and colors:
```execute 200
from omni.ui import color as cl
style = {"Ellipse": {"background_color": cl("#1111ff"), "border_color": cl("#cc0000"), "border_width": 4}}
with ui.Frame(height=100, width=50):
ui.Ellipse(style=style)
```
## FreeEllipse
FreeEllipse is an ellipse whose width and height will be determined by other widgets. The supported style list is the same as Ellipse.
Here is an example of a FreeEllipse with style following two draggable circles:
```execute 200
from omni.ui import color as cl
with ui.Frame(height=200):
with ui.ZStack():
# Four draggable rectangles that represent the control points
with ui.Placer(draggable=True, offset_x=0, offset_y=0):
control1 = ui.Circle(width=10, height=10)
with ui.Placer(draggable=True, offset_x=150, offset_y=200):
control2 = ui.Circle(width=10, height=10)
# The rectangle that fits to the control points
ui.FreeEllipse(control1, control2, style={
"background_color":cl.purple})
```
## Triangle
You can use Triangle to draw Triangle shape. Triangle doesn't have any other style except the common style for shapes.
Here is some of the properties you can customize on Triangle:
> alignment (enum): the alignment defines where the tip of the triangle is, base will be at the opposite side
Here is a list of the supported alignment value for the triangle:
```execute 200
from omni.ui import color as cl
alignments = {
"ui.Alignment.LEFT_TOP": ui.Alignment.LEFT_TOP,
"ui.Alignment.LEFT_CENTER": ui.Alignment.LEFT_CENTER,
"ui.Alignment.LEFT_BOTTOM": ui.Alignment.LEFT_BOTTOM,
"ui.Alignment.CENTER_TOP": ui.Alignment.CENTER_TOP,
"ui.Alignment.CENTER_BOTTOM": ui.Alignment.CENTER_BOTTOM,
"ui.Alignment.RIGHT_TOP": ui.Alignment.RIGHT_TOP,
"ui.Alignment.RIGHT_CENTER": ui.Alignment.RIGHT_CENTER,
"ui.Alignment.RIGHT_BOTTOM": ui.Alignment.RIGHT_BOTTOM,
}
colors = [cl.red, cl.yellow, cl.purple, cl("#ff0ff0"), cl.green, cl("#f00fff"), cl("#fff000"), cl("#aa3333")]
index = 0
with ui.ScrollingFrame(
height=160,
vertical_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_ALWAYS_OFF,
style={"ScrollingFrame": {"background_color": cl.transparent}},
):
with ui.HStack():
for key, value in alignments.items():
with ui.ZStack():
ui.Rectangle(name="table")
with ui.VStack(style={"VStack": {"margin": 10}}):
color = colors[index]
index = index + 1
ui.Triangle(alignment=value, style={"Triangle":{"background_color": color}})
ui.Label(key, style={"color": cl.white, "font_size": 12}, alignment=ui.Alignment.CENTER, height=20)
```
Here are a few examples of Triangle using different selections of styles:
The triangle is scaled to fit, base on the left and tip on the center right. Users can define the border_color and border_width but without background_color to make the triangle look like it's drawn in wireframe style.
```execute 200
from omni.ui import color as cl
style = {
"Triangle::default":
{
"background_color": cl.green,
"border_color": cl.white,
"border_width": 1
},
"Triangle::transparent":
{
"border_color": cl.purple,
"border_width": 4,
},
}
with ui.Frame(height=100, width=200, style=style):
with ui.HStack(spacing=10, style={"margin": 5}):
ui.Triangle(name="default")
ui.Triangle(name="transparent", alignment=ui.Alignment.CENTER_TOP)
```
## FreeTriangle
FreeTriangle is a triangle whose width and height will be determined by other widgets. The supported style list is the same as Triangle.
Here is an example of a FreeTriangle with style following two draggable rectangles. The default alignment is `ui.Alignment.RIGHT_CENTER`. We make the alignment as `ui.Alignment.CENTER_BOTTOM`.
```execute 200
from omni.ui import color as cl
with ui.Frame(height=200):
with ui.ZStack():
# Four draggable rectangles that represent the control points
with ui.Placer(draggable=True, offset_x=0, offset_y=0):
control1 = ui.Rectangle(width=10, height=10)
with ui.Placer(draggable=True, offset_x=150, offset_y=200):
control2 = ui.Rectangle(width=10, height=10)
# The rectangle that fits to the control points
ui.FreeTriangle(control1, control2, alignment=ui.Alignment.CENTER_BOTTOM, style={
"background_color":cl.blue,
"border_color":cl.red,
"border_width": 2.0})
``` | 16,520 | Markdown | 43.292225 | 318 | 0.656416 |
omniverse-code/kit/exts/omni.kit.documentation.ui.style/docs/buttons.md | # Buttons and Images
## Common Styling for Buttons and Images
Here is a list of common style you can customize on Buttons and Images:
> border_color (color): the border color if the button or image background has a border
> border_radius (float): the border radius if the user wants to round the button or image
> border_width (float): the border width if the button or image or image background has a border
> margin (float): the distance between the widget content and the parent widget defined boundary
> margin_width (float): the width distance between the widget content and the parent widget defined boundary
> margin_height (float): the height distance between the widget content and the parent widget defined boundary
## Button
The Button widget provides a command button. Click a button to execute a command. The command button is perhaps the most commonly used widget in any graphical user interface. It is rectangular and typically displays a text label or image describing its action.
Except the common style for Buttons and Images, here is a list of styles you can customize on Button:
> background_color (color): the background color of the button
> padding (float): the distance between the content widgets (e.g. Image or Label) and the border of the button
> stack_direction (enum): defines how the content widgets (e.g. Image or Label) on the button are placed.
There are 6 types of stack_directions supported
* ui.Direction.TOP_TO_BOTTOM : layout from top to bottom
* ui.Direction.BOTTOM_TO_TOP : layout from bottom to top
* ui.Direction.LEFT_TO_RIGHT : layout from left to right
* ui.Direction.RIGHT_TO_LEFT : layout from right to left
* ui.Direction.BACK_TO_FRONT : layout from back to front
* ui.Direction.FRONT_TO_BACK : layout from front to back
To control the style of the button content, you can customize `Button.Image` when image on button and `Button.Label` when text on button.
Here is an example showing a list of buttons with different types of the stack directions:
```execute 200
from omni.ui import color as cl
direction_flags = {
"ui.Direction.TOP_TO_BOTTOM": ui.Direction.TOP_TO_BOTTOM,
"ui.Direction.BOTTOM_TO_TOP": ui.Direction.BOTTOM_TO_TOP,
"ui.Direction.LEFT_TO_RIGHT": ui.Direction.LEFT_TO_RIGHT,
"ui.Direction.RIGHT_TO_LEFT": ui.Direction.RIGHT_TO_LEFT,
"ui.Direction.BACK_TO_FRONT": ui.Direction.BACK_TO_FRONT,
"ui.Direction.FRONT_TO_BACK": ui.Direction.FRONT_TO_BACK,
}
with ui.ScrollingFrame(
height=50,
vertical_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_ALWAYS_OFF,
style={"ScrollingFrame": {"background_color": cl.transparent}},
):
with ui.HStack():
for key, value in direction_flags.items():
button_style = {"Button": {"stack_direction": value}}
ui_button = ui.Button(
key,
image_url="resources/icons/Nav_Flymode.png",
image_width=24,
height=40,
style=button_style
)
```
Here is an example of two buttons. Pressing the second button makes the name of the first button longer. And press the first button makes the name of itself shorter:
```execute 200
from omni.ui import color as cl
style_system = {
"Button": {
"background_color": cl(0.85),
"border_color": cl.yellow,
"border_width": 2,
"border_radius": 5,
"padding": 5,
},
"Button.Label": {"color": cl.red, "font_size": 17},
"Button:hovered": {"background_color": cl("#E5F1FB"), "border_color": cl("#0078D7"), "border_width": 2.0},
"Button:pressed": {"background_color": cl("#CCE4F7"), "border_color": cl("#005499"), "border_width": 2.0},
}
def make_longer_text(button):
"""Set the text of the button longer"""
button.text = "Longer " + button.text
def make_shorter_text(button):
"""Set the text of the button shorter"""
splitted = button.text.split(" ", 1)
button.text = splitted[1] if len(splitted) > 1 else splitted[0]
with ui.HStack(style=style_system):
btn_with_text = ui.Button("Text", width=0)
ui.Button("Press me", width=0, clicked_fn=lambda b=btn_with_text: make_longer_text(b))
btn_with_text.set_clicked_fn(lambda b=btn_with_text: make_shorter_text(b))
```
Here is an example where you can tweak most of the Button's style and see the results:
```execute 200
from omni.ui import color as cl
style = {
"Button": {"stack_direction": ui.Direction.TOP_TO_BOTTOM},
"Button.Image": {
"color": cl("#99CCFF"),
"image_url": "resources/icons/Learn_128.png",
"alignment": ui.Alignment.CENTER,
},
"Button.Label": {"alignment": ui.Alignment.CENTER},
}
def direction(model, button, style=style):
value = model.get_item_value_model().get_value_as_int()
direction = (
ui.Direction.TOP_TO_BOTTOM,
ui.Direction.BOTTOM_TO_TOP,
ui.Direction.LEFT_TO_RIGHT,
ui.Direction.RIGHT_TO_LEFT,
ui.Direction.BACK_TO_FRONT,
ui.Direction.FRONT_TO_BACK,
)[value]
style["Button"]["stack_direction"] = direction
button.set_style(style)
def align(model, button, image, style=style):
value = model.get_item_value_model().get_value_as_int()
alignment = (
ui.Alignment.LEFT_TOP,
ui.Alignment.LEFT_CENTER,
ui.Alignment.LEFT_BOTTOM,
ui.Alignment.CENTER_TOP,
ui.Alignment.CENTER,
ui.Alignment.CENTER_BOTTOM,
ui.Alignment.RIGHT_TOP,
ui.Alignment.RIGHT_CENTER,
ui.Alignment.RIGHT_BOTTOM,
)[value]
if image:
style["Button.Image"]["alignment"] = alignment
else:
style["Button.Label"]["alignment"] = alignment
button.set_style(style)
def layout(model, button, padding, style=style):
if padding == 0:
padding = "padding"
elif padding == 1:
padding = "margin"
elif padding == 2:
padding = "margin_width"
else:
padding = "margin_height"
style["Button"][padding] = model.get_value_as_float()
button.set_style(style)
def spacing(model, button):
button.spacing = model.get_value_as_float()
button = ui.Button("Label", style=style, width=64, height=64)
with ui.HStack(width=ui.Percent(50)):
ui.Label('"Button": {"stack_direction"}', name="text")
options = (
0,
"TOP_TO_BOTTOM",
"BOTTOM_TO_TOP",
"LEFT_TO_RIGHT",
"RIGHT_TO_LEFT",
"BACK_TO_FRONT",
"FRONT_TO_BACK",
)
model = ui.ComboBox(*options).model
model.add_item_changed_fn(lambda m, i, b=button: direction(m, b))
alignment = (
4,
"LEFT_TOP",
"LEFT_CENTER",
"LEFT_BOTTOM",
"CENTER_TOP",
"CENTER",
"CENTER_BOTTOM",
"RIGHT_TOP",
"RIGHT_CENTER",
"RIGHT_BOTTOM",
)
with ui.HStack(width=ui.Percent(50)):
ui.Label('"Button.Image": {"alignment"}', name="text")
model = ui.ComboBox(*alignment).model
model.add_item_changed_fn(lambda m, i, b=button: align(m, b, 1))
with ui.HStack(width=ui.Percent(50)):
ui.Label('"Button.Label": {"alignment"}', name="text")
model = ui.ComboBox(*alignment).model
model.add_item_changed_fn(lambda m, i, b=button: align(m, b, 0))
with ui.HStack(width=ui.Percent(50)):
ui.Label("padding", name="text")
model = ui.FloatSlider(min=0, max=500).model
model.add_value_changed_fn(lambda m, b=button: layout(m, b, 0))
with ui.HStack(width=ui.Percent(50)):
ui.Label("margin", name="text")
model = ui.FloatSlider(min=0, max=500).model
model.add_value_changed_fn(lambda m, b=button: layout(m, b, 1))
with ui.HStack(width=ui.Percent(50)):
ui.Label("margin_width", name="text")
model = ui.FloatSlider(min=0, max=500).model
model.add_value_changed_fn(lambda m, b=button: layout(m, b, 2))
with ui.HStack(width=ui.Percent(50)):
ui.Label("margin_height", name="text")
model = ui.FloatSlider(min=0, max=500).model
model.add_value_changed_fn(lambda m, b=button: layout(m, b, 3))
with ui.HStack(width=ui.Percent(50)):
ui.Label("Button.spacing", name="text")
model = ui.FloatSlider(min=0, max=50).model
model.add_value_changed_fn(lambda m, b=button: spacing(m, b))
```
## Radio Button
RadioButton is the widget that allows the user to choose only one from a predefined set of mutually exclusive options.
RadioButtons are arranged in collections of two or more buttons within a RadioCollection, which is the central component of the system and controls the behavior of all the RadioButtons in the collection.
Except the common style for Buttons and Images, here is a list of styles you can customize on RadioButton:
> background_color (color): the background color of the RadioButton
> padding (float): the distance between the the RadioButton content widget (e.g. Image) and the RadioButton border
To control the style of the button image, you can customize `RadioButton.Image`. For example RadioButton.Image's image_url defines the image when it's not checked. You can define the image for checked status with `RadioButton.Image:checked` style.
Here is an example of RadioCollection which contains 5 RadioButtons with style. Also there is an IntSlider which shares the model with the RadioCollection, so that when RadioButton value or the IntSlider value changes, the other one will update too.
```execute 200
from omni.ui import color as cl
style = {
"RadioButton": {
"background_color": cl.cyan,
"margin_width": 2,
"padding": 1,
"border_radius": 0,
"border_color": cl.white,
"border_width": 1.0},
"RadioButton.Image": {
"image_url": f"../exts/omni.kit.documentation.ui.style/icons/radio_off.svg",
},
"RadioButton.Image:checked": {
"image_url": f"../exts/omni.kit.documentation.ui.style/icons/radio_on.svg"},
}
collection = ui.RadioCollection()
for i in range(5):
with ui.HStack(style=style):
ui.RadioButton(radio_collection=collection, width=30, height=30)
ui.Label(f"Option {i}", name="text")
ui.IntSlider(collection.model, min=0, max=4)
```
## ToolButton
ToolButton is functionally similar to Button, but provides a model that determines if the button is checked. This button toggles between checked (on) and unchecked (off) when the user clicks it.
Here is an example of a ToolButton:
```execute 200
def update_label(model, label):
checked = model.get_value_as_bool()
label.text = f"The check status button is {checked}"
with ui.VStack(spacing=5):
model = ui.ToolButton(text="click", name="toolbutton", width=100).model
checked = model.get_value_as_bool()
label = ui.Label(f"The check status button is {checked}")
model.add_value_changed_fn(lambda m, l=label: update_label(m, l))
```
## ColorWidget
The ColorWidget is a button that displays the color from the item model and can open a picker window. The color dialog's function is to allow users to choose color.
Except the common style for Buttons and Images, here is a list of styles you can customize on ColorWidget:
> background_color (color): the background color of the tooltip widget when hover over onto the ColorWidget
> color (color): the text color of the tooltip widget when hover over onto the ColorWidget
Here is an example of a ColorWidget with three FloatFields. The ColorWidget model is shared with the FloatFields so that users can click and edit the field value to change the ColorWidget's color, and the value change of the ColorWidget will also reflect in the value change of the FloatFields.
```execute 200
from omni.ui import color as cl
with ui.HStack(spacing=5):
color_model = ui.ColorWidget(width=0, height=0, style={"ColorWidget":{
"border_width": 2,
"border_color": cl.white,
"border_radius": 4,
"color": cl.pink,
"margin": 2
}}).model
for item in color_model.get_item_children():
component = color_model.get_item_value_model(item)
ui.FloatField(component)
```
Here is an example of a ColorWidget with three FloatDrags. The ColorWidget model is shared with the FloatDrags so that users can drag the field value to change the color, and the value change of the ColorWidget will also reflect in the value change of the FloatDrags.
```execute 200
from omni.ui import color as cl
with ui.HStack(spacing=5):
color_model = ui.ColorWidget(0.125, 0.25, 0.5, width=0, height=0, style={
"background_color": cl.pink
}).model
for item in color_model.get_item_children():
component = color_model.get_item_value_model(item)
ui.FloatDrag(component, min=0, max=1)
```
Here is an example of a ColorWidget with a ComboBox. The ColorWidget model is shared with the ComboBox. Only the value change of the ColorWidget will reflect in the value change of the ComboBox.
```execute 200
with ui.HStack(spacing=5):
color_model = ui.ColorWidget(width=0, height=0).model
ui.ComboBox(color_model)
```
Here is an interactive example with USD. You can create a Mesh in the Stage. Choose `Pixar Storm` as the render. Select the mesh and use this ColorWidget to change the color of the mesh. You can use `Ctrl+z` for undoing and `Ctrl+y` for redoing.
```execute 200
import omni.kit.commands
from omni.usd.commands import UsdStageHelper
from pxr import UsdGeom
from pxr import Gf
import omni.usd
class SetDisplayColorCommand(omni.kit.commands.Command, UsdStageHelper):
"""
Change prim display color undoable **Command**. Unlike ChangePropertyCommand, it can undo property creation.
Args:
gprim (Gprim): Prim to change display color on.
value: Value to change to.
value: Value to undo to.
"""
def __init__(self, gprim: UsdGeom.Gprim, color: Any, prev: Any):
self._gprim = gprim
self._color = color
self._prev = prev
def do(self):
color_attr = self._gprim.CreateDisplayColorAttr()
color_attr.Set([self._color])
def undo(self):
color_attr = self._gprim.GetDisplayColorAttr()
if self._prev is None:
color_attr.Clear()
else:
color_attr.Set([self._prev])
omni.kit.commands.register(SetDisplayColorCommand)
class FloatModel(ui.SimpleFloatModel):
def __init__(self, parent):
super().__init__()
self._parent = weakref.ref(parent)
def begin_edit(self):
parent = self._parent()
parent.begin_edit(None)
def end_edit(self):
parent = self._parent()
parent.end_edit(None)
class USDColorItem(ui.AbstractItem):
def __init__(self, model):
super().__init__()
self.model = model
class USDColorModel(ui.AbstractItemModel):
def __init__(self):
super().__init__()
# Create root model
self._root_model = ui.SimpleIntModel()
self._root_model.add_value_changed_fn(lambda a: self._item_changed(None))
# Create three models per component
self._items = [USDColorItem(FloatModel(self)) for i in range(3)]
for item in self._items:
item.model.add_value_changed_fn(lambda a, item=item: self._on_value_changed(item))
# Omniverse contexts
self._usd_context = omni.usd.get_context()
self._selection = self._usd_context.get_selection()
self._events = self._usd_context.get_stage_event_stream()
self._stage_event_sub = self._events.create_subscription_to_pop(
self._on_stage_event, name="omni.example.ui ColorWidget stage update"
)
# Privates
self._subscription = None
self._gprim = None
self._prev_color = None
self._edit_mode_counter = 0
def _on_stage_event(self, event):
"""Called with subscription to pop"""
if event.type == int(omni.usd.StageEventType.SELECTION_CHANGED):
self._on_selection_changed()
def _on_selection_changed(self):
"""Called when the user changes the selection"""
selection = self._selection.get_selected_prim_paths()
stage = self._usd_context.get_stage()
self._subscription = None
self._gprim = None
# When TC runs tests, it's possible that stage is None
if selection and stage:
self._gprim = UsdGeom.Gprim.Get(stage, selection[0])
if self._gprim:
color_attr = self._gprim.GetDisplayColorAttr()
usd_watcher = omni.usd.get_watcher()
self._subscription = usd_watcher.subscribe_to_change_info_path(
color_attr.GetPath(), self._on_usd_changed
)
# Change the widget color
self._on_usd_changed()
def _on_value_changed(self, item):
"""Called when the submodel is changed"""
if not self._gprim:
return
if self._edit_mode_counter > 0:
# Change USD only if we are in edit mode.
color_attr = self._gprim.CreateDisplayColorAttr()
color = Gf.Vec3f(
self._items[0].model.get_value_as_float(),
self._items[1].model.get_value_as_float(),
self._items[2].model.get_value_as_float(),
)
color_attr.Set([color])
self._item_changed(item)
def _on_usd_changed(self, path=None):
"""Called with UsdWatcher when something in USD is changed"""
color = self._get_current_color() or Gf.Vec3f(0.0)
for i in range(len(self._items)):
self._items[i].model.set_value(color[i])
def _get_current_color(self):
"""Returns color of the current object"""
if self._gprim:
color_attr = self._gprim.GetDisplayColorAttr()
if color_attr:
color_array = color_attr.Get()
if color_array:
return color_array[0]
def get_item_children(self, item):
"""Reimplemented from the base class"""
return self._items
def get_item_value_model(self, item, column_id):
"""Reimplemented from the base class"""
if item is None:
return self._root_model
return item.model
def begin_edit(self, item):
"""
Reimplemented from the base class.
Called when the user starts editing.
"""
if self._edit_mode_counter == 0:
self._prev_color = self._get_current_color()
self._edit_mode_counter += 1
def end_edit(self, item):
"""
Reimplemented from the base class.
Called when the user finishes editing.
"""
self._edit_mode_counter -= 1
if not self._gprim or self._edit_mode_counter > 0:
return
color = Gf.Vec3f(
self._items[0].model.get_value_as_float(),
self._items[1].model.get_value_as_float(),
self._items[2].model.get_value_as_float(),
)
omni.kit.commands.execute("SetDisplayColor", gprim=self._gprim, color=color, prev=self._prev_color)
with ui.HStack(spacing=5):
ui.ColorWidget(USDColorModel(), width=0)
ui.Label("Interactive ColorWidget with USD", name="text")
```
## Image
The Image type displays an image. The source of the image is specified as a URL using the source property. By default, specifying the width and height of the item makes the image to be scaled to fit that size. This behavior can be changed by setting the `fill_policy` property, allowing the image to be stretched or scaled instead. The property alignment controls how the scaled image is aligned in the parent defined space.
Except the common style for Buttons and Images, here is a list of styles you can customize on Image:
> image_url (str): the url path of the image source
> color (color): the overlay color of the image
> corner_flag (enum): defines which corner or corners to be rounded. The supported corner flags are the same as Rectangle since Image is eventually an image on top of a rectangle under the hood.
> fill_policy (enum): defines how the Image fills the rectangle.
There are three types of fill_policy
* ui.FillPolicy.STRETCH: stretch the image to fill the entire rectangle.
* ui.FillPolicy.PRESERVE_ASPECT_FIT: uniformly to fit the image without stretching or cropping.
* ui.FillPolicy.PRESERVE_ASPECT_CROP: scaled uniformly to fill, cropping if necessary
> alignment (enum): defines how the image is positioned in the parent defined space. There are 9 alignments supported which are quite self-explanatory.
* ui.Alignment.LEFT_CENTER
* ui.Alignment.LEFT_TOP
* ui.Alignment.LEFT_BOTTOM
* ui.Alignment.RIGHT_CENTER
* ui.Alignment.RIGHT_TOP
* ui.Alignment.RIGHT_BOTTOM
* ui.Alignment.CENTER
* ui.Alignment.CENTER_TOP
* ui.Alignment.CENTER_BOTTOM
Default Image is scaled uniformly to fit without stretching or cropping (ui.FillPolicy.PRESERVE_ASPECT_FIT), and aligned to ui.Alignment.CENTER:
```execute 200
source = "resources/desktop-icons/omniverse_512.png"
with ui.Frame(width=200, height=100):
ui.Image(source)
```
The image is stretched to fit and aligned to the left
```execute 200
source = "resources/desktop-icons/omniverse_512.png"
with ui.Frame(width=200, height=100):
ui.Image(source, fill_policy=ui.FillPolicy.STRETCH, alignment=ui.Alignment.LEFT_CENTER)
```
The image is scaled uniformly to fill, cropping if necessary and aligned to the top
```execute 200
source = "resources/desktop-icons/omniverse_512.png"
with ui.Frame(width=200, height=100):
ui.Image(source, fill_policy=ui.FillPolicy.PRESERVE_ASPECT_CROP,
alignment=ui.Alignment.CENTER_TOP)
```
The image is scaled uniformly to fit without cropping and aligned to the right. Notice the fill_policy and alignment are defined in style.
```execute 200
source = "resources/desktop-icons/omniverse_512.png"
with ui.Frame(width=200, height=100):
ui.Image(source, style={
"Image": {
"fill_policy": ui.FillPolicy.PRESERVE_ASPECT_FIT,
"alignment": ui.Alignment.RIGHT_CENTER,
"margin": 5}})
```
The image has rounded corners and an overlayed color. Note image_url is in the style dictionary.
```execute 200
from omni.ui import color as cl
source = "resources/desktop-icons/omniverse_512.png"
with ui.Frame(width=200, height=100):
ui.Image(style={"image_url": source, "border_radius": 10, "color": cl("#5eb3ff")})
```
The image is scaled uniformly to fill, cropping if necessary and aligned to the bottom, with a blue border.
```execute 200
from omni.ui import color as cl
source = "resources/desktop-icons/omniverse_512.png"
with ui.Frame(width=200, height=100):
ui.Image(
source,
fill_policy=ui.FillPolicy.PRESERVE_ASPECT_CROP,
alignment=ui.Alignment.CENTER_BOTTOM,
style={"Image":{
"border_width": 5,
"border_color": cl("#1ab3ff"),
"corner_flag": ui.CornerFlag.TOP,
"border_radius": 15}})
```
The image is arranged in a HStack with different margin styles defined. Note image_url is in the style dict.
```execute 200
source = "resources/desktop-icons/omniverse_512.png"
with ui.Frame(height=100):
with ui.HStack(spacing =5, style={"Image":{'image_url': source}}):
ui.Image()
ui.Image(style={"Image":{"margin_height": 15}})
ui.Image()
ui.Image(style={"Image":{"margin_width": 20}})
ui.Image()
ui.Image(style={"Image":{"margin": 10}})
ui.Image()
```
It's possible to set a different image per style state. And switch them depending on the mouse hovering, selection state, etc.
```execute 200
styles = [
{
"": {"image_url": "resources/icons/Nav_Walkmode.png"},
":hovered": {"image_url": "resources/icons/Nav_Flymode.png"},
},
{
"": {"image_url": "resources/icons/Move_local_64.png"},
":hovered": {"image_url": "resources/icons/Move_64.png"},
},
{
"": {"image_url": "resources/icons/Rotate_local_64.png"},
":hovered": {"image_url": "resources/icons/Rotate_global.png"},
},
]
def set_image(model, image):
value = model.get_item_value_model().get_value_as_int()
image.set_style(styles[value])
with ui.Frame(height=80):
with ui.VStack():
image = ui.Image(width=64, height=64, style=styles[0])
with ui.HStack(width=ui.Percent(50)):
ui.Label("Select a texture to display", name="text")
model = ui.ComboBox(0, "Navigation", "Move", "Rotate").model
model.add_item_changed_fn(lambda m, i, im=image: set_image(m, im))
```
## ImageWithProvider
ImageWithProvider also displays an image just like Image. It is a much more advanced image widget. ImageWithProvider blocks until the image is loaded, Image doesn't block. Sometimes Image blinks because when the first frame is created, the image is not loaded. Users are recommended to use ImageWithProvider if the UI is updated pretty often. Because it doesn't blink when recreating.
It has the almost the same style list as Image, except the fill_policy has different enum values.
> fill_policy (enum): defines how the Image fills the rectangle.
There are three types of fill_policy
* ui.IwpFillPolicy.IWP_STRETCH: stretch the image to fill the entire rectangle.
* ui.IwpFillPolicy.IWP_PRESERVE_ASPECT_FIT: uniformly to fit the image without stretching or cropping.
* ui.IwpFillPolicy.IWP_PRESERVE_ASPECT_CROP: scaled uniformly to fill, cropping if necessary
The image source comes from `ImageProvider` which could be `ByteImageProvider`, `RasterImageProvider` or `VectorImageProvider`.
`RasterImageProvider` and `VectorImageProvider` are using image urls like Image. Here is an example taken from Image. Notice the fill_policy value difference.
```execute 200
from omni.ui import color as cl
source = "resources/desktop-icons/omniverse_512.png"
with ui.Frame(width=200, height=100):
ui.ImageWithProvider(
source,
style={
"ImageWithProvider": {
"border_width": 5,
"border_color": cl("#1ab3ff"),
"corner_flag": ui.CornerFlag.TOP,
"border_radius": 15,
"fill_policy": ui.IwpFillPolicy.IWP_PRESERVE_ASPECT_CROP,
"alignment": ui.Alignment.CENTER_BOTTOM}})
```
`ByteImageProvider` is really useful to create gradient images. Here is an example:
```execute 200
self._byte_provider = ui.ByteImageProvider()
self._byte_provider.set_bytes_data([
255, 0, 0, 255, # red
255, 255, 0, 255, # yellow
0, 255, 0, 255, # green
0, 255, 255, 255, # cyan
0, 0, 255, 255], # blue
[5, 1]) # size
with ui.Frame(height=20):
ui.ImageWithProvider(self._byte_provider,fill_policy=ui.IwpFillPolicy.IWP_STRETCH)
```
## Plot
The Plot class displays a line or histogram image. The data of the image is specified as a data array or a provider function.
Except the common style for Buttons and Images, here is a list of styles you can customize on Plot:
> color (color): the color of the plot, line color in the line typed plot or rectangle bar color in the histogram typed plot
> selected_color (color): the selected color of the plot, dot in the line typed plot and rectangle bar in the histogram typed plot
> background_color (color): the background color of the plot
> secondary_color (color): the color of the text and the border of the text box which shows the plot selection value
> background_selected_color (color): the background color of the text box which shows the plot selection value
Here are couple of examples of Plots:
```execute 200
import math
from omni.ui import color as cl
data = []
for i in range(360):
data.append(math.cos(math.radians(i)))
def on_data_provider(index):
return math.sin(math.radians(index))
with ui.Frame(height=20):
with ui.HStack():
plot_1 = ui.Plot(ui.Type.LINE, -1.0, 1.0, *data, width=360, height=100,
style={"Plot":{
"color": cl.red,
"background_color": cl(0.08),
"secondary_color": cl("#aa1111"),
"selected_color": cl.green,
"background_selected_color": cl.white,
"border_width":5,
"border_color": cl.blue,
"border_radius": 20
}})
ui.Spacer(width = 20)
plot_2 = ui.Plot(ui.Type.HISTOGRAM, -1.0, 1.0, on_data_provider, 360, width=360, height=100,
style={"Plot":{
"color": cl.blue,
"background_color": cl("#551111"),
"secondary_color": cl("#11AA11"),
"selected_color": cl(0.67),
"margin_height": 10,
}})
plot_2.value_stride = 6
``` | 28,831 | Markdown | 39.211994 | 424 | 0.660296 |
omniverse-code/kit/exts/omni.kit.documentation.ui.style/docs/CHANGELOG.md | # Changelog
The documentation for omni.ui style
## [1.0.3] - 2022-10-20
### Fixed
- Fixed font session crash
### Added
- The extension to the doc system
## [1.0.2] - 2022-07-20
### Added
- Order in Stack and use of content_clipping section
- ToolButton section
## [1.0.1] - 2022-07-20
### Changed
- Added help menu API doc entrance
- Clarified the window style
## [1.0.0] - 2022-06-15
### Added
- The initial documentation
| 428 | Markdown | 16.874999 | 52 | 0.675234 |
omniverse-code/kit/exts/omni.kit.documentation.ui.style/docs/overview.md | # Overview
OmniUI style allows users to build customized widgets, make these widgets visually pleasant and functionally indicative with user interactions.
Each widget has its own style to be tweaked with based on their use cases and behaviors, while they also follow the same syntax rules. The container widgets provide a customized style for the widgets layout, providing flexibility for the arrangement of elements.
Each omni ui item has its own style to be tweaked with based on their use cases and behaviors, while they also follow the same syntax rules for the style definition.
Shades are used to have different themes for the entire ui, e.g. dark themed ui and light themed ui. Omni.ui also supports different font styles and sizes. Different length units allows users to define the widgets accurate to exact pixel or proportional to the parent widget or siblings.
Shapes are the most basic elements in the ui, which allows users to create stylish ui shapes, rectangles, circles, triangles, line and curve. Freeshapes are the extended shapes, which allows users to control some of the attributes dynamically through bounded widgets.
Widgets are mostly a combination of shapes, images or texts, which are created to be stepping stones for the entire ui window. Each of the widget has its own style to be characterized.
The container widgets provide a customized style for the widgets layout, providing flexibility for the arrangement of elements and possibility of creating more complicated and customized widgets.
| 1,528 | Markdown | 94.562494 | 287 | 0.816099 |
omniverse-code/kit/exts/omni.kit.documentation.ui.style/docs/window.md | # Window Widgets
## MainWindow
The MainWindow represents the main window for an application. There should only be one MainWindow in each application.
Here is a list of styles you can customize on MainWindow:
> background_color (color): the background color of the main window.
> margin_height (float): the height distance between the window content and the window border.
> margin_width (float): the width distance between the window content and the window border.
Here is an example of a main window with style. Click the button to show the main window. Since the example is running within a MainWindow already, creating a new MainWindow will not run correctly in this example, but it demonstrates how to set the style of the `MainWindow`. And note the style of MainWindow is not propagated to other windows.
```execute 200
from omni.ui import color as cl
self._main_window = None
self._window1 = None
self._window2 = None
def create_main_window():
if not self._main_window:
self._main_window = ui.MainWindow()
self._main_window.main_frame.set_style({
"MainWindow": {
"background_color": cl.purple,
"margin_height": 20,
"margin_width": 10
}})
self._window1 = ui.Window("window 1", width=300, height=300)
self._window2 = ui.Window("window 2", width=300, height=300)
main_dockspace = ui.Workspace.get_window("DockSpace")
self._window1.dock_in(main_dockspace, ui.DockPosition.SAME)
self._window2.dock_in(main_dockspace, ui.DockPosition.SAME)
self._window2.focus()
self._window2.visible = True
ui.Button("click for Main Window", width=180, clicked_fn=create_main_window)
```
## Window
The window is a child window of the MainWindow. And it can be docked. You can have any type of widgets as the window content widgets.
Here is a list of styles you can customize on Window:
> background_color (color): the background color of the window.
> border_color (color): the border color if the window has a border.
> border_radius (float): the radius of the corner angle if the user wants to round the window.
> border_width (float): the border width if the window has a border.
Here is an example of a window with style. Click the button to show the window.
```execute 200
from omni.ui import color as cl
self._style_window_example = None
def create_styled_window():
if not self._style_window_example:
self._style_window_example = ui.Window("Styled Window Example", width=300, height=300)
self._style_window_example.frame.set_style({
"Window": {
"background_color": cl.blue,
"border_radius": 10,
"border_width": 5,
"border_color": cl.red,
}})
self._style_window_example.visible = True
ui.Button("click for Styled Window", width=180, clicked_fn=create_styled_window)
```
Note that a window's style is set from its frame since ui.Window itself is not a widget. We can't set style to it like other widgets. ui.Window's frame is a normal ui.Frame widget which itself doesn't have styles like `background_color` or `border_radius` (see `Container Widgets`->`Frame`). We specifically interpret the input ui.Window's frame style as the window style here. Therefore, the window style is not propagated to the content widget either just like the MainWindow.
If you want to set up a default style for the entire window. You should use `ui.style.default`. More details in `The Style Sheet Syntax` -> `Style Override` -> `Default style override`.
## Menu
The Menu class provides a menu widget for use in menu bars, context menus, and other popup menus. It can be either a pull-down menu in a menu bar or a standalone context menu. Pull-down menus are shown by the menu bar when the user clicks on the respective item. Context menus are usually invoked by some special keyboard key or by right-clicking.
Here is a list of styles you can customize on Menu:
> color (color): the color of the menu text
> background_color (color): the background color of sub menu window
> background_selected_color (color): the background color when the current menu is selected
> border_color (color): the border color of the sub menu window if it has a border
> border_width (float): the border width of the sub menu window if it has a border
> border_radius (float): the border radius of the sub menu window if user wants to round the sub menu window
> padding (float): the padding size of the sub menu window
Here is a list of styles you can customize on MenuItem:
> color (color): the color of the menu Item text
> background_selected_color (color): the background color when the current menu is selected
Right click for the context menu with customized menu style:
```execute 200
from omni.ui import color as cl
self.context_menu = None
def show_context_menu(x, y, button, modifier, widget):
if button != 1:
return
self.context_menu = ui.Menu("Context menu",
style={
"Menu": {
"background_color": cl.blue,
"color": cl.pink,
"background_selected_color": cl.green,
"border_radius": 5,
"border_width": 2,
"border_color": cl.yellow,
"padding": 15
},
"MenuItem": {
"color": cl.white,
"background_selected_color": cl.cyan},
"Separator": {
"color": cl.red},
},)
with self.context_menu:
ui.MenuItem("Delete Shot")
ui.Separator()
ui.MenuItem("Attach Selected Camera")
with ui.Menu("Sub-menu"):
ui.MenuItem("One")
ui.MenuItem("Two")
ui.MenuItem("Three")
ui.Separator()
ui.MenuItem("Four")
with ui.Menu("Five"):
ui.MenuItem("Six")
ui.MenuItem("Seven")
self.context_menu.show()
with ui.VStack():
button = ui.Button("Right click to context menu", height=0, width=0)
button.set_mouse_pressed_fn(lambda x, y, b, m, widget=button: show_context_menu(x, y, b, m, widget))
```
Left click for the push button menu with default menu style:
```execute 200
self.pushed_menu = None
def show_pushed_menu(x, y, button, modifier, widget):
self.pushed_menu = ui.Menu("Pushed menu")
with self.pushed_menu:
ui.MenuItem("Camera 1")
ui.MenuItem("Camera 2")
ui.MenuItem("Camera 3")
ui.Separator()
with ui.Menu("More Cameras"):
ui.MenuItem("This Menu is Pushed")
ui.MenuItem("and Aligned with a widget")
self.pushed_menu.show_at(
(int)(widget.screen_position_x), (int)(widget.screen_position_y + widget.computed_content_height)
)
with ui.VStack():
button = ui.Button("Pushed Button Menu", height=0, width=0)
button.set_mouse_pressed_fn(lambda x, y, b, m, widget=button: show_pushed_menu(x, y, b, m, widget))
```
### Separator
Separator is a type of MenuItem which creates a separator line in the UI elements.
From the above example, you can see the use of Separator in Menu.
Here is a list of styles you can customize on Separator:
> color (color): the color of the Separator
## MenuBar
All the Windows in Omni.UI can have a MenuBar. To add a MenuBar to your window add this flag to your constructor: omni.ui.Window(flags=ui.WINDOW_FLAGS_MENU_BAR). The MenuBar object can then be accessed through the menu_bar read-only property on your window.
A MenuBar is a container so it is built like a Frame or Stack but only takes Menu objects as children. You can leverage the 'priority' property on the Menu to order them. They will automatically be sorted when they are added, but if you change the priority of an item then you need to explicitly call sort().
MenuBar has exactly the same style list you can customize as Menu.
Here is an example of MenuBar with style for the Window:
```execute 200
from omni.ui import color as cl
style={"MenuBar": {
"background_color": cl.blue,
"color": cl.pink,
"background_selected_color": cl.green,
"border_radius": 2,
"border_width": 1,
"border_color": cl.yellow,
"padding": 2}}
self._window_menu_example = None
def create_and_show_window_with_menu():
if not self._window_menu_example:
self._window_menu_example = ui.Window(
"Window Menu Example",
width=300,
height=300,
flags=ui.WINDOW_FLAGS_MENU_BAR | ui.WINDOW_FLAGS_NO_BACKGROUND,
)
menu_bar = self._window_menu_example.menu_bar
menu_bar.style = style
with menu_bar:
with ui.Menu("File"):
ui.MenuItem("Load")
ui.MenuItem("Save")
ui.MenuItem("Export")
with ui.Menu("Window"):
ui.MenuItem("Hide")
with self._window_menu_example.frame:
with ui.VStack():
ui.Button("This Window has a Menu")
def show_hide_menu(menubar):
menubar.visible = not menubar.visible
ui.Button("Click here to show/hide Menu", clicked_fn=lambda m=menu_bar: show_hide_menu(m))
def add_menu(menubar):
with menubar:
with ui.Menu("New Menu"):
ui.MenuItem("I don't do anything")
ui.Button("Add New Menu", clicked_fn=lambda m=menu_bar: add_menu(m))
self._window_menu_example.visible = True
with ui.HStack(width=0):
ui.Button("window with MenuBar Example", width=180, clicked_fn=create_and_show_window_with_menu)
ui.Label("this populates the menuBar", name="text", width=180, style={"margin_width": 10})
```
| 9,911 | Markdown | 43.25 | 478 | 0.649379 |
omniverse-code/kit/exts/omni.kit.documentation.ui.style/docs/shades.md | # Shades
Shades are used to have multiple named color palettes with the ability for runtime switch. For example, one App could have several ui themes users can switch during using the App.
The shade can be defined with the following code:
```python
cl.shade(cl("#FF6600"), red=cl("#0000FF"), green=cl("#66FF00"))
```
It can be assigned to the color style. It's possible to switch the color with the following command globally:
```python
cl.set_shade("red")
```
## Example
```execute 200
from omni.ui import color as cl
from omni.ui import constant as fl
def set_color(color):
cl.example_color = color
def set_width(value):
fl.example_width = value
cl.example_color = cl.green
fl.example_width = 1.0
with ui.HStack(height=100, spacing=5):
with ui.ZStack():
ui.Rectangle(
style={
"background_color": cl.shade(
"aqua",
orange=cl.orange,
another=cl.example_color,
transparent=cl(0, 0, 0, 0),
black=cl.black,
),
"border_width": fl.shade(1, orange=4, another=8),
"border_radius": fl.one,
"border_color": cl.black,
},
)
ui.Label(
"ui.Rectangle(\n"
"\tstyle={\n"
'\t\t"background_color":\n'
"\t\t\tcl.shade(\n"
'\t\t\t\t"aqua",\n'
"\t\t\t\torange=cl(1, 0.5, 0),\n"
"\t\t\t\tanother=cl.example_color),\n"
'\t\t"border_width":\n'
"\t\t\tfl.shade(1, orange=4, another=8)})",
alignment=ui.Alignment.CENTER,
word_wrap=True,
style={"color": cl.black, "margin": 15},
)
with ui.ZStack():
ui.Rectangle(
style={
"background_color": cl.example_color,
"border_width": fl.example_width,
"border_radius": fl.one,
"border_color": cl.black,
}
)
ui.Label(
"ui.Rectangle(\n"
"\tstyle={\n"
'\t\t"background_color": cl.example_color,\n'
'\t\t"border_width": fl.example_width)})',
alignment=ui.Alignment.CENTER,
word_wrap=True,
style={"color": cl.black, "margin": 15},
)
with ui.VStack(style={"Button": {"background_color": cl("097EFF")}}):
ui.Label("Click the following buttons to change the shader of the left rectangle")
with ui.HStack():
ui.Button("cl.set_shade()", clicked_fn=partial(cl.set_shade, ""))
ui.Button('cl.set_shade("orange")', clicked_fn=partial(cl.set_shade, "orange"))
ui.Button('cl.set_shade("another")', clicked_fn=partial(cl.set_shade, "another"))
ui.Label("Click the following buttons to change the border width of the right rectangle")
with ui.HStack():
ui.Button("fl.example_width = 1", clicked_fn=partial(set_width, 1))
ui.Button("fl.example_width = 4", clicked_fn=partial(set_width, 4))
ui.Label("Click the following buttons to change the background color of both rectangles")
with ui.HStack():
ui.Button('cl.example_color = "green"', clicked_fn=partial(set_color, "green"))
ui.Button("cl.example_color = cl(0.8)", clicked_fn=partial(set_color, cl(0.8)))
## Double comment means hide from shippet
ui.Spacer(height=15)
##
```
## URL Shades Example
It's also possible to use shades for specifying shortcuts to the images and style-based paths.
```execute 200
from omni.ui import color as cl
from omni.ui.url_utils import url
def set_url(url_path: str):
url.example_url = url_path
walk = "resources/icons/Nav_Walkmode.png"
fly = "resources/icons/Nav_Flymode.png"
url.example_url = walk
with ui.HStack(height=100, spacing=5):
with ui.ZStack():
ui.Image(style={"image_url": url.example_url})
ui.Label(
'ui.Image(\n\tstyle={"image_url": cl.example_url})\n',
alignment=ui.Alignment.CENTER,
word_wrap=True,
style={"color": cl.black, "margin": 15},
)
with ui.ZStack():
ui.ImageWithProvider(
style={
"image_url": url.shade(
"resources/icons/Move_local_64.png",
another="resources/icons/Move_64.png",
orange="resources/icons/Rotate_local_64.png",
)
}
)
ui.Label(
"ui.ImageWithProvider(\n"
"\tstyle={\n"
'\t\t"image_url":\n'
"\t\t\tst.shade(\n"
'\t\t\t\t"Move_local_64.png",\n'
'\t\t\t\tanother="Move_64.png")})\n',
alignment=ui.Alignment.CENTER,
word_wrap=True,
style={"color": cl.black, "margin": 15},
)
with ui.HStack():
# buttons to change the url for the image
with ui.VStack():
ui.Button("url.example_url = Nav_Walkmode.png", clicked_fn=partial(set_url, walk))
ui.Button("url.example_url = Nav_Flymode.png", clicked_fn=partial(set_url, fly))
# buttons to switch between shades to different image
with ui.VStack():
ui.Button("ui.set_shade()", clicked_fn=partial(ui.set_shade, ""))
ui.Button('ui.set_shade("another")', clicked_fn=partial(ui.set_shade, "another"))
```
| 5,364 | Markdown | 33.612903 | 179 | 0.560962 |
omniverse-code/kit/exts/omni.kit.documentation.ui.style/docs/line.md | # Lines and Curves
## Common Style of Lines and Curves
Here is a list of common style you can customize on all the Lines and Curves:
> color (color): the color of the line or curve
> border_width (float): the thickness of the line or curve
## Line
Line is the simplest shape that represents a straight line. It has two points, color and thickness. You can use Line to draw line shapes. Line doesn't have any other style except the common style for Lines and Curves.
Here is some of the properties you can customize on Line:
> alignment (enum): the Alignment defines where the line is in parent defined space. It is always scaled to fit.
Here is a list of the supported Alignment value for the line:
```execute 200
from omni.ui import color as cl
style ={
"Rectangle::table": {"background_color": cl.transparent, "border_color": cl(0.8), "border_width": 0.25},
"Line::demo": {"color": cl("#007777"), "border_width": 3},
"ScrollingFrame": {"background_color": cl.transparent},
}
alignments = {
"ui.Alignment.LEFT": ui.Alignment.LEFT,
"ui.Alignment.RIGHT": ui.Alignment.RIGHT,
"ui.Alignment.H_CENTER": ui.Alignment.H_CENTER,
"ui.Alignment.TOP": ui.Alignment.TOP,
"ui.Alignment.BOTTOM": ui.Alignment.BOTTOM,
"ui.Alignment.V_CENTER": ui.Alignment.V_CENTER,
}
with ui.ScrollingFrame(
height=100,
vertical_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_ALWAYS_OFF,
style=style,
):
with ui.HStack(height=100):
for key, value in alignments.items():
with ui.ZStack():
ui.Rectangle(name="table")
with ui.VStack(style={"VStack": {"margin": 10}}, spacing=10):
ui.Line(name="demo", alignment=value)
ui.Label(key, style={"color": cl.white, "font_size": 12}, alignment=ui.Alignment.CENTER)
```
By default, the line is scaled to fit.
```execute 200
from omni.ui import color as cl
style = {"Line::default": {"color": cl.red, "border_width": 1}}
with ui.Frame(height=50, style=style):
ui.Line(name="default")
```
Users can define the color and border_width to make customized lines.
```execute 200
from omni.ui import color as cl
with ui.Frame(height=50):
with ui.ZStack(width=200):
ui.Rectangle(style={"background_color": cl(0.4)})
ui.Line(alignment=ui.Alignment.H_CENTER, style={"border_width":5, "color": cl("#880088")})
```
## FreeLine
FreeLine is a line whose length will be determined by other widgets. The supported style list is the same as Line.
Here is an example of a FreeLine with style following two draggable circles. Notice the control widgets are not the start and end points of the line. By default, the alignment of the line is `ui.Alighment.V_CENTER`, and the line direction won't be changed by the control widgets.
```execute 200
from omni.ui import color as cl
with ui.Frame(height=200):
with ui.ZStack():
# Four draggable rectangles that represent the control points
with ui.Placer(draggable=True, offset_x=0, offset_y=0):
control1 = ui.Circle(width=10, height=10)
with ui.Placer(draggable=True, offset_x=150, offset_y=200):
control2 = ui.Circle(width=10, height=10)
# The rectangle that fits to the control points
ui.FreeLine(control1, control2, style={"color":cl.yellow})
```
## BezierCurve
BezierCurve is a shape drawn with multiple lines which has a bent or turns in it. They are used to model smooth curves that can be scaled indefinitely. BezierCurve doesn't have any other style except the common style for Lines and Curves.
Here is a BezierCurve with style:
```execute 200
from omni.ui import color as cl
style = {"BezierCurve": {"color": cl.red, "border_width": 2}}
ui.Spacer(height=2)
with ui.Frame(height=50, style=style):
ui.BezierCurve()
ui.Spacer(height=2)
```
## FreeBezierCurve
FreeBezierCurve is using two widgets to get the position of the curve ends. This is super useful to build graph connections. The supported style list is the same as BezierCurve.
Here is an example of a FreeBezierCurve which is controlled by 4 control points.
```execute 200
from omni.ui import color as cl
with ui.ZStack(height=400):
# The Bezier tangents
tangents = [(50, 50), (-50, -50)]
# Four draggable rectangles that represent the control points
placer1 = ui.Placer(draggable=True, offset_x=0, offset_y=0)
with placer1:
rect1 = ui.Rectangle(width=20, height=20)
placer2 = ui.Placer(draggable=True, offset_x=50, offset_y=50)
with placer2:
rect2 = ui.Rectangle(width=20, height=20)
placer3 = ui.Placer(draggable=True, offset_x=100, offset_y=100)
with placer3:
rect3 = ui.Rectangle(width=20, height=20)
placer4 = ui.Placer(draggable=True, offset_x=150, offset_y=150)
with placer4:
rect4 = ui.Rectangle(width=20, height=20)
# The bezier curve
curve = ui.FreeBezierCurve(rect1, rect4, style={"color": cl.red, "border_width": 5})
curve.start_tangent_width = ui.Pixel(tangents[0][0])
curve.start_tangent_height = ui.Pixel(tangents[0][1])
curve.end_tangent_width = ui.Pixel(tangents[1][0])
curve.end_tangent_height = ui.Pixel(tangents[1][1])
# The logic of moving the control points
def left_moved(_):
x = placer1.offset_x
y = placer1.offset_y
tangent = tangents[0]
placer2.offset_x = x + tangent[0]
placer2.offset_y = y + tangent[1]
def right_moved(_):
x = placer4.offset_x
y = placer4.offset_y
tangent = tangents[1]
placer3.offset_x = x + tangent[0]
placer3.offset_y = y + tangent[1]
def left_tangent_moved(_):
x1 = placer1.offset_x
y1 = placer1.offset_y
x2 = placer2.offset_x
y2 = placer2.offset_y
tangent = (x2 - x1, y2 - y1)
tangents[0] = tangent
curve.start_tangent_width = ui.Pixel(tangent[0])
curve.start_tangent_height = ui.Pixel(tangent[1])
def right_tangent_moved(_):
x1 = placer4.offset_x
y1 = placer4.offset_y
x2 = placer3.offset_x
y2 = placer3.offset_y
tangent = (x2 - x1, y2 - y1)
tangents[1] = tangent
curve.end_tangent_width = ui.Pixel(tangent[0])
curve.end_tangent_height = ui.Pixel(tangent[1])
# Callback for moving the control points
placer1.set_offset_x_changed_fn(left_moved)
placer1.set_offset_y_changed_fn(left_moved)
placer2.set_offset_x_changed_fn(left_tangent_moved)
placer2.set_offset_y_changed_fn(left_tangent_moved)
placer3.set_offset_x_changed_fn(right_tangent_moved)
placer3.set_offset_y_changed_fn(right_tangent_moved)
placer4.set_offset_x_changed_fn(right_moved)
placer4.set_offset_y_changed_fn(right_moved)
``` | 6,767 | Markdown | 38.811764 | 279 | 0.67578 |
omniverse-code/kit/exts/omni.kit.documentation.ui.style/docs/README.md | # The documentation for omni.ui style
The interactive documentation for omni.ui style | 86 | Markdown | 27.999991 | 47 | 0.825581 |
omniverse-code/kit/exts/omni.kit.documentation.ui.style/docs/units.md | # Length Units
The Framework UI offers several different units for expressing length: Pixel, Percent and Fraction. There is no restriction on where certain units should be used.
## Pixel
Pixel is the size in pixels and scaled with the HiDPI scale factor. Pixel is the default unit. If a number is not specified to be a certain unit, it is Pixel. e.g. `width=100` meaning `width=ui.Pixel(100)`.
```execute 200
with ui.HStack():
ui.Button("40px", width=ui.Pixel(40))
ui.Button("60px", width=ui.Pixel(60))
ui.Button("100px", width=100)
ui.Button("120px", width=120)
ui.Button("150px", width=150)
```
## Percent
Percent and Fraction units make it possible to specify sizes relative to the parent size. 1 Percent is 1/100 of the parent size.
```execute 200
with ui.HStack():
ui.Button("5%", width=ui.Percent(5))
ui.Button("10%", width=ui.Percent(10))
ui.Button("15%", width=ui.Percent(15))
ui.Button("20%", width=ui.Percent(20))
ui.Button("25%", width=ui.Percent(25))
```
## Fraction
Fraction length is made to take the available space of the parent widget and then divide it among all the child widgets with Fraction length in proportion to their Fraction factor.
```execute 200
with ui.HStack():
ui.Button("One", width=ui.Fraction(1))
ui.Button("Two", width=ui.Fraction(2))
ui.Button("Three", width=ui.Fraction(3))
ui.Button("Four", width=ui.Fraction(4))
ui.Button("Five", width=ui.Fraction(5))
```
| 1,462 | Markdown | 36.51282 | 206 | 0.697674 |
omniverse-code/kit/exts/omni.graph.docs/docs/tutorials/tutorial23.rst | .. _ogn_tutorial_cpu_gpu_extended:
Tutorial 23 - Extended Attributes On The GPU
============================================
Extended attributes are no different from other types of attributes with respect to where their memory will be located.
The difference is that there is a slightly different API for accessing their data, illustrating by these examples.
This node also illustrates the new concept of having a node create an ABI function override that handles the runtime
type resolution of extended attribute types. In this case when any of the two input attributes or one output attribute
become resolved then the other two attributes are resolved to the same type, if possible.
OgnTutorialCpuGpuExtended.ogn
-----------------------------
The *ogn* file shows the implementation of a node named "omni.graph.tutorials.CpuGpuExtended" with an input **'any'**
attribute on the CPU, an input **'any'** attribute on the GPU, and an output whose memory location is decided at runtime
by a boolean.
.. literalinclude:: ../../../../../source/extensions/omni.graph.tutorials/tutorials/tutorial23/OgnTutorialCpuGpuExtended.ogn
:linenos:
:language: json
OgnTutorialCpuGpuExtended.cpp
-----------------------------
The *cpp* file contains the implementation of the compute method. It sums two inputs on either the
CPU or GPU based on the input boolean. For simplicity only the **float[3][]** attribute type is processed, with
all others resulting in a compute failure.
.. literalinclude:: ../../../../../source/extensions/omni.graph.tutorials/tutorials/tutorial23/OgnTutorialCpuGpuExtended.cpp
:linenos:
:language: c++
OgnTutorialCpuGpuExtendedPy.py
------------------------------
The *py* file contains the same algorithm as the C++ node, with the node implementation language being different.
.. literalinclude:: ../../../../../source/extensions/omni.graph.tutorials/tutorials/tutorial23/OgnTutorialCpuGpuExtendedPy.py
:linenos:
:language: python
| 1,977 | reStructuredText | 48.449999 | 125 | 0.723824 |
omniverse-code/kit/exts/omni.graph.docs/docs/tutorials/tutorial3.rst | .. _ogn_tutorial_abi:
Tutorial 3 - ABI Override Node
==============================
Although the .ogn format creates an easy-to-use interface to the ABI of the OmniGraph node and the associated
data model, there may be cases where you want to override the ABI to perform special processing.
OgnTutorialABI.ogn
------------------
The *ogn* file shows the implementation of a node named "omni.graph.tutorials.Abi", in its first
version, with a simple description. The single attribute serves mostly to provide a framework for the ABI discussion.
.. literalinclude:: ../../../../../source/extensions/omni.graph.tutorials/tutorials/tutorial3/OgnTutorialABI.ogn
:linenos:
:language: json
OgnTutorialABI.cpp
------------------
The *cpp* file contains the implementation of the node class with every possible
ABI method replaced with customized processing. The node still functions the same as any other node, although it
is forced to write a lot of extra boilerplate code to do so.
.. literalinclude:: ../../../../../source/extensions/omni.graph.tutorials/tutorials/tutorial3/OgnTutorialABI.cpp
:linenos:
:language: c++
Node Type Metadata
------------------
This file introduces the *metadata* keyword, whose value is a dictionary of key/value pairs associated with the
node type that may be extracted using the ABI metadata functions. These are not persisted in any files and so must be
set either in the .ogn file or in an override of the **initializeType()** method in the node definition.
Exclusions
----------
Note the use of the **exclude** keyword in the .ogn file. This allows you to prevent generation of any of the default
files. In this case, since the ABI is handling everything the Python database will not be able to access the node's
information so it is excluded.
| 1,798 | reStructuredText | 45.128204 | 117 | 0.731924 |
omniverse-code/kit/exts/omni.graph.docs/docs/tutorials/tutorial18.rst | .. _ogn_tutorial_state:
Tutorial 18 - Node With Internal State
======================================
This node illustrates how you can use internal state information, so long as you inform OmniGraph that you are
doing so in order for it to make more intelligent execution scheduling decisions.
The advantage of using internal state data rather than state attributes is that the data can be in any structure
you choose, not just those supported by OmniGraph. The disadvantage is that being opaque, none of the generic UI
will be able to show information about that data.
OgnTutorialState.ogn
--------------------
The *.ogn* file containing the implementation of a node named "omni.graph.tutorials.State". Unlike Python nodes with
internal state the C++ nodes do not require and empty `"state"` section as the presence of state information is
inferred from the data members in the node implementation class (i.e. `mIncrementValue` in this node).
.. literalinclude:: ../../../../../source/extensions/omni.graph.tutorials/tutorials/tutorial18/OgnTutorialState.ogn
:linenos:
:language: json
OgnTutorialState.cpp
--------------------
The *.cpp* file contains the compute method and the internal state information used to run the algorithm.
By adding non-static class members to your node OmniGraph will know to instantiate a unique instance of your
node for every evaluation context, letting you use those members as state data. The data in the node will be
invisible to OmniGraph as a whole and will be persistent between evaluations of the node.
.. literalinclude:: ../../../../../source/extensions/omni.graph.tutorials/tutorials/tutorial18/OgnTutorialState.cpp
:linenos:
:language: cpp
| 1,708 | reStructuredText | 49.264704 | 116 | 0.745902 |
omniverse-code/kit/exts/omni.graph.docs/docs/tutorials/tutorial17.rst | .. _ogn_tutorial_state_attributes_py:
Tutorial 17 - Python State Attributes Node
==========================================
This node illustrates how you can use state attributes. These are attributes that are not meant to be connected to
other nodes as they maintain a node's internal state, persistent from one evaluation to the next.
As they are persistent, care must be taken that they be initialized properly. This can take the form of a reset
flag, as seen on this node, state flag values with known defaults that describe the validity of the state attribute
data, or using a checksum on inputs, among other possibilities.
State attributes can be both read and written, like output attributes. The presence of state attributes will also
inform the evaluators on what type of parallel scheduling is appropriate.
These attributes provide a similar functionality to those found in :ref:`ogn_tutorial_state_py`, except that being
node attributes the structure is visible to the outside world, making it easier to construct UI and visualizers for
it.
OgnTutorialStateAttributesPy.ogn
--------------------------------
The *.ogn* file containing the implementation of a node named "omni.graph.tutorials.StateAttributesPy", with a couple of state
attributes that both read and write values during the compute.
.. literalinclude:: ../../../../../source/extensions/omni.graph.tutorials/tutorials/tutorial17/OgnTutorialStateAttributesPy.ogn
:linenos:
:language: json
OgnTutorialStateAttributesPy.py
-------------------------------
The *.py* file contains the compute method that uses the state attributes to run the algorithm.
.. literalinclude:: ../../../../../source/extensions/omni.graph.tutorials/tutorials/tutorial17/OgnTutorialStateAttributesPy.py
:linenos:
:language: python
Test Script
-----------
The .ogn test infrastructure currently only supports single evaluation, which will not be sufficient to test state
attribute manipulations. This test script runs multiple evaluations and verifies that the state information is
updated as expected after each evaluation.
.. literalinclude:: ../../../../../source/extensions/omni.graph.tutorials/python/tests/test_tutorial_state_attributes_py.py
:linenos:
:language: python
| 2,258 | reStructuredText | 47.063829 | 127 | 0.748893 |
omniverse-code/kit/exts/omni.graph.docs/docs/tutorials/tutorial28.rst | .. _ogn_tutorial_simple_ogn_compute_vectorized_node:
Tutorial 28 - Node with simple OGN computeVectorized
====================================================
This tutorial demonstrates how to compose nodes that implements a very simple computeVectorized function. It shows how to access the data,
using the different available methods.
OgnTutorialVectorizedPassthrough.ogn
------------------------------------
The *ogn* file shows the implementation of a node named "omni.graph.tutorials.TutorialVectorizedPassThrough", which takes input
of a floating point value, and just copy it to its output.
.. literalinclude:: ../../../../../source/extensions/omni.graph.tutorials/tutorials/tutorial28/OgnTutorialVectorizedPassthrough.ogn
:linenos:
:language: json
OgnTutorialVectorizedPassthrough.cpp
------------------------------------
The *cpp* file contains the implementation of the node. It takes a floating point input and just copy it to its output,
demonstrating how to handle a vectorized compute. It shows what would be the implementation for a regular `compute` function,
and the different way it could implement a `computeVectorized` function.
- method #1: by switching the entire database to the next instance, while performing the computation in a loop
- method #2: by directly indexing attributes for the right instance in a loop
- method #3: by retrieving the raw data, and working directly with it
.. literalinclude:: ../../../../../source/extensions/omni.graph.tutorials/tutorials/tutorial28/OgnTutorialVectorizedPassthrough.cpp
:linenos:
:language: c++
| 1,593 | reStructuredText | 52.133332 | 138 | 0.723792 |
omniverse-code/kit/exts/omni.graph.docs/docs/tutorials/tutorial7.rst | .. _ogn_tutorial_roleData:
Tutorial 7 - Role-Based Data Node
=================================
The role-based data node creates one input attribute and one output attribute of each of the role-based type. A
role-based type is defined as data with an underlying simple data type, with an interpretation of that simple
data, called a "role".
Examples of roles are **color**, **quat**, and **timecode**. For consistency the tuple counts for each of the
roles are included in the declaration so that the "shape" of the underlying data is more obvious.
OgnTutorialRoleData.ogn
-----------------------
The *ogn* file shows the implementation of a node named "omni.graph.tutorials.RoleData", which has one
input and one output attribute of each Role type.
.. literalinclude:: ../../../../../source/extensions/omni.graph.tutorials/tutorials/tutorial7/OgnTutorialRoleData.ogn
:linenos:
:language: json
OgnTutorialRoleData.cpp
-----------------------
The *cpp* file contains the implementation of the compute method, which modifies
each of the inputs by adding 1.0 to all components to create outputs that have different, testable, values.
.. literalinclude:: ../../../../../source/extensions/omni.graph.tutorials/tutorials/tutorial7/OgnTutorialRoleData.cpp
:linenos:
:language: c++
Role-Based Attribute Access
---------------------------
Here is a subset of the generated role-based attributes from the database. It contains color attributes, a matrix
attribute, and a timecode attribute. Notice how the underlying data types of the attributes are provided, again
with the ability to cast to different interface classes with the same memory layout.
+----------------------+-------------------+
| Database Function | Returned Type |
+======================+===================+
| inputs.a_color3d() | const GfVec3d& |
+----------------------+-------------------+
| inputs.a_color4f() | const GfVec4f& |
+----------------------+-------------------+
| inputs.a_frame() | const GfMatrix4d& |
+----------------------+-------------------+
| inputs.a_timecode() | const double& |
+----------------------+-------------------+
| outputs.a_color3d() | GfVec3d& |
+----------------------+-------------------+
| outputs.a_color4f() | GfVec4f& |
+----------------------+-------------------+
| outputs.a_frame() | GfMatrix4d& |
+----------------------+-------------------+
| outputs.a_timecode() | double& |
+----------------------+-------------------+
The full set of corresponding data types can be found in :ref:`ogn_attribute_roles`.
This role information is available on all attribute interfaces through the ``role()`` method. For example you can
find that the first attribute is a color by making this check:
.. code-block:: c++
static bool compute(OgnTutorialRoleDataDatabase& db)
{
if (db.inputs.a_color3d.role == eColor )
{
processValueAsAColor( db.inputs.a_color3d() );
}
}
| 3,004 | reStructuredText | 40.736111 | 117 | 0.582889 |
omniverse-code/kit/exts/omni.graph.docs/docs/tutorials/tutorial29.rst | .. _ogn_tutorial_simple_abi_compute_vectorized_node:
Tutorial 29 - Node with simple ABI computeVectorized
====================================================
This tutorial demonstrates how to compose nodes that implements a very simple computeVectorized function using directly ABIs.
It shows how to access the data, using the different available methods.
OgnTutorialVectorizedABIPassThrough.cpp
---------------------------------------
The *cpp* file contains the implementation of the node. It takes a floating point input and just copy it to its output,
demonstrating how to handle a vectorized compute. It shows what would be the implementation for a regular `compute` function,
and the different way it could implement a `computeVectorized` function.
- method #1: by indexing attribute retrieval ABI function directly in a loop
- method #2: by mutating the attribute data handle in a loop
- method #3: by retrieving the raw data, and working directly with it
.. literalinclude:: ../../../../../source/extensions/omni.graph.tutorials/tutorials/tutorial29/OgnTutorialVectorizedABIPassthrough.cpp
:linenos:
:language: c++
| 1,142 | reStructuredText | 53.428569 | 134 | 0.724168 |
omniverse-code/kit/exts/omni.graph.docs/docs/tutorials/tutorial8.rst | .. _ogn_tutorial_cudaData:
Tutorial 8 - GPU Data Node
==========================
The GPU data node creates various attributes for use in a CUDA-based GPU compute. Several representative
types are used, though the list of potential attribute types is not exhaustive. See :ref:`ogn_attribute_types`
for the full list.
This node also introduces the notion of attribute typedefs; a useful concept when passing data around in functions.
OgnTutorialCudaData.ogn
-----------------------
The *ogn* file shows the implementation of a node named "omni.graph.tutorials.CudaData", which has inputs and outputs of
various types to use in various computations. Three different CUDA methods are created to show how each of the
types is passed through to the GPU and used by CUDA.
.. literalinclude:: ../../../../../source/extensions/omni.graph.tutorials/tutorials/tutorial8/OgnTutorialCudaData.ogn
:linenos:
:language: json
OgnTutorialCudaData.cpp
-----------------------
The *cpp* file contains the implementation of the compute method, which in turn calls the three CUDA algorithms.
.. literalinclude:: ../../../../../source/extensions/omni.graph.tutorials/tutorials/tutorial8/OgnTutorialCudaData.cpp
:linenos:
:language: c++
OgnTutorialCudaData_CUDA.cu
---------------------------
The *cu* file contains the implementation of the algorithms on the GPU using CUDA.
.. literalinclude:: ../../../../../source/extensions/omni.graph.tutorials/tutorials/tutorial8/OgnTutorialCudaData_CUDA.cu
:linenos:
:language: c++
GPU Attribute Access
--------------------
Here is the set of generated attributes from the database. The attributes living on the GPU return pointers to
memory as the CPU side cannot dereference it into its actual type (e.g. a `float` value, which would be returned as
a `float&` on the CPU side is returned instead as a `float*` on the GPU side.)
In addition, when calling into CUDA code the data changes type as it crosses the GPU boundary. On the CUDA side it
uses the CUDA native data types when it can, which are bytewise compatible with their CPU counterparts. Note that
in the case of the CPU attribute *multiplier* the data is passed to the CUDA code by value, since it has to be
copied from CPU to GPU.
+---------------------+---------+--------------------+-----------------+
| Database Function | Is GPU? | CPU Type | CUDA Type |
+=====================+=========+====================+=================+
| inputs.a() | Yes | const float* | const float* |
+---------------------+---------+--------------------+-----------------+
| inputs.b() | Yes | const float* | const float* |
+---------------------+---------+--------------------+-----------------+
| outputs.sum() | Yes | float* | float* |
+---------------------+---------+--------------------+-----------------+
| inputs.half() | Yes | const pxr::GfHalf* | __half* |
+---------------------+---------+--------------------+-----------------+
| outputs.half() | Yes | pxr::GfHalf* | __half* |
+---------------------+---------+--------------------+-----------------+
| inputs.color() | Yes | const GfVec3d* | const double3* |
+---------------------+---------+--------------------+-----------------+
| outputs.color() | Yes | GfVec3d* | double3* |
+---------------------+---------+--------------------+-----------------+
| inputs.matrix() | Yes | const GfMatrix4d* | const Matrix4d* |
+---------------------+---------+--------------------+-----------------+
| outputs.matrix() | Yes | GfMatrix4d* | Matrix4d* |
+---------------------+---------+--------------------+-----------------+
| inputs.multiplier() | No | const GfVec3f& | const float3 |
+---------------------+---------+--------------------+-----------------+
| inputs.points() | Yes | const GfVec3f* | const float3** |
+---------------------+---------+--------------------+-----------------+
| outputs.points() | Yes | GfVec3f* | float3** |
+---------------------+---------+--------------------+-----------------+
The array attribute *points* does not have an array-like wrapper as the CUDA code would rather deal with
raw pointers. In order to provide the size information, when calling the CUDA code the value ``inputs.points.size()``
should also be passed in.
Notice the subtle difference in types on the CPU side for GPU-based data. Instead of references to data there are
pointers, necessary since the data lives in a different memory-space, and all pointers have an extra level of
indirection for the same reason.
There is also a section of this generated file dedicated to information relevant to the CUDA code. In this section
the CUDA attribute data types are defined.
It is protected with ``#ifdef __CUDACC__`` so that it is only processed when included through the CUDA
compiler (and vice versa, so none of the other setup code will be processed on the CUDA side).
.. code-block:: c++
#include <cuda_fp16.h>
#include <omni/graph/core/cuda/CUDAUtils.h>
#include <omni/graph/core/cuda/Matrix4d.h>
namespace OgnTutorialCudaDataCudaTypes
{
namespace inputs
{
using a_t = const float*;
using b_t = const float*;
using points_t = const float3**;
using multiplier_t = const float3*;
using half_t = const __half*;
using color_t = const double3*;
using matrix_t = const Matrix4d*;
}
namespace outputs
{
using sum_t = float*;
using points_t = float3**;
using half_t = __half*;
using color_t = double3*;
using matrix_t = Matrix4d*;
}
}
using namespace OgnTutorialCudaDataCudaTypes;
Notice the inclusion of the file *cuda_fp16.h*, needed due to the use of the CUDA type *__half*, and the files
*omni/graph/core/cuda/CUDAUtils.h* and *omni/graph/core/cuda/Matrix4d.h*, which provide support functions for CUDA
math.
The data types used by CUDA are compatible with their equivalents on the C++ side, so you can specify passing arguments
into CUDA from C++ by using a declaration such as this on the C++ side:
.. code-block:: c++
// In this code "inputs::points_t" is the type "GfVec3f*".
// The size of that array must be passed in as well since it is not implicit in the data type.
extern "C" void cudaCompute(
inputs::points_t*,
size_t pointSize,
inputs::multiplier_t*,
outputs::points_t*
);
which corresponds to this function defined in the .cu file:
.. code-block:: c++
// In this code "inputs::points_t" is the type "float3*"
extern "C" void cudaCompute(
inputs::points_t* inPoints,
size_t pointSize,
inputs::multiplier_t* multiplier,
outputs::points_t* outPoints
)
{...}
Pointers are used in the calls rather than being part of the type definitions in order to emphasize the fact that the
values passed through are pointers to the real data in the fabric, which in this case is data in GPU memory.
| 7,115 | reStructuredText | 45.815789 | 121 | 0.575404 |
omniverse-code/kit/exts/omni.graph.docs/docs/tutorials/tutorial21.rst | .. _ogn_tutorial_bundle_add_attributes:
Tutorial 21 - Adding Bundled Attributes
=======================================
Sometimes instead of simply copying data from an input or input bundle into an output bundle you might want to
construct a bundle from some other criteria. For example a bundle construction node could take in an array of names
and attribute types and output a bundle consisting of those attributes with some default values.
The bundle accessor provides a simple method that can accomplish this task. Adding a new attribute is as simple as
providing those two values to the bundle for every attribute you wish to add.
There is also a complementary function to remove named bundle attributes.
OgnTutorialBundleAddAttributes.ogn
----------------------------------
The *ogn* file shows the implementation of a node named "omni.graph.tutorials.BundleData", which has one input bundle and one
output bundle.
.. literalinclude:: ../../../../../source/extensions/omni.graph.tutorials/tutorials/tutorial21/OgnTutorialBundleAddAttributes.ogn
:linenos:
:language: json
OgnTutorialBundleAddAttributes.cpp
----------------------------------
The *cpp* file contains the implementation of the compute method. It accesses the attribute descriptions on the inputs
and creates a bundle with attributes matching those descriptions as its output.
.. literalinclude:: ../../../../../source/extensions/omni.graph.tutorials/tutorials/tutorial21/OgnTutorialBundleAddAttributes.cpp
:linenos:
:language: c++
OgnTutorialBundleAddAttributesPy.py
-----------------------------------
The *py* file contains the same algorithm as the C++ node, with only the implementation language being different.
.. literalinclude:: ../../../../../source/extensions/omni.graph.tutorials/tutorials/tutorial21/OgnTutorialBundleAddAttributesPy.py
:linenos:
:language: python
| 1,881 | reStructuredText | 46.049999 | 130 | 0.729931 |
omniverse-code/kit/exts/omni.graph.docs/docs/tutorials/tutorial26.rst | .. _ogn_tutorial_generic_math_node:
Tutorial 26 - Generic Math Node
================================
This tutorial demonstrates how to compose nodes that perform mathematical operations in python using numpy. Using numpy
has the advantage that it is api-compatible to cuNumeric. As demonstrated in the Extended Attributes tutorial, generic math nodes
use extended attributes to allow inputs and outputs of arbitrary numeric types, specified using the "numerics" keyword.
.. code-block:: json
"inputs": {
"myNumbericAttribute": {
"description": "Accepts an incoming connection from any type of numeric value",
"type": ["numerics"]
}
}
OgnTutorialGenericMathNode.ogn
--------------------------------
The *ogn* file shows the implementation of a node named "omni.graph.tutorials.GenericMathNode", which takes inputs
of any numeric types and performs a multiplication.
.. literalinclude:: ../../../../../source/extensions/omni.graph.tutorials/tutorials/tutorial26/OgnTutorialGenericMathNode.ogn
:linenos:
:language: json
OgnTutorialGenericMathNode.py
---------------------------------
The *py* file contains the implementation of the node. It takes two numeric inputs and performs a multiplication,
demonstrating how to handle cases where the inputs are both numeric types but vary in precision, format or
dimension.
.. literalinclude:: ../../../../../source/extensions/omni.graph.tutorials/tutorials/tutorial26/OgnTutorialGenericMathNode.py
:linenos:
:language: python
| 1,538 | reStructuredText | 40.594593 | 129 | 0.706112 |
omniverse-code/kit/exts/omni.graph.docs/docs/tutorials/tutorial2.rst | .. _ogn_tutorial_simpleData:
Tutorial 2 - Simple Data Node
=============================
The simple data node creates one input attribute and one output attribute of each of the simple types, where "simple"
refers to data types that have a single component and are not arrays. (e.g. "float" is simple, "float[3]" is not, nor is
"float[]"). See also :ref:`ogn_tutorial_simpleDataPy` for a similar example in Python.
OgnTutorialSimpleData.ogn
-------------------------
The *ogn* file shows the implementation of a node named "omni.graph.tutorials.SimpleData", which has one
input and one output attribute of each simple type.
.. literalinclude:: ../../../../../source/extensions/omni.graph.tutorials/tutorials/tutorial2/OgnTutorialSimpleData.ogn
:linenos:
:language: json
OgnTutorialSimpleData.cpp
-------------------------
The *cpp* file contains the implementation of the compute method, which modifies
each of the inputs in a simple way to create outputs that have different values.
.. literalinclude:: ../../../../../source/extensions/omni.graph.tutorials/tutorials/tutorial2/OgnTutorialSimpleData.cpp
:linenos:
:language: c++
Note how the attribute values are available through the OgnTutorialSimpleDataDatabase class. The generated interface
creates access methods for every attribute, named for the attribute itself. Inputs will be returned as const
references, outputs will be returned as non-const references.
Attribute Data
--------------
Two types of attribute data are created, which help with ease of access and of use - the attribute name lookup
information, and the attribute type definition.
Attribute data is accessed via a name-based lookup. This is not particularly efficient, so to facilitate this process
the attribute name is translated into a fast access token. In addition, the information about the attribute's type and
default value is constant for all nodes of the same type so that is stored as well, in static data.
Normally you would use an *auto* declaration for attribute types. Sometimes you want to pass around attribute data so
it is helpful to have access to the attribute's data type. In the generated code a ``using namespace`` is set up to
provide a very simple syntax for accessing the attribute's metadata from within the node:
.. code-block:: c++
std::cout << "Attribute name is " << inputs::a_bool.m_name << std::endl;
std::cout << "Attribute type is " << inputs::a_bool.m_dataType << std::endl;
extern "C" void processAttribute(inputs::a_bool_t& value);
// Equivalent to extern "C" void processAttribute(bool& value);
Attribute Data Access
---------------------
The attributes are automatically namespaced with *inputs* and *outputs*. In the USD file the attribute names
will appear as *inputs:XXX* or *outputs:XXX*. In the C++ interface the colon is illegal so a contained struct
is used to make use of the period equivalent, as *inputs.XXX* or *outputs.XXX*.
The minimum information provided by these wrapper classes is a reference to the underlying data, accessed by
``operator()``. For this class, these are the types it provides:
+--------------------+--------------------+
| Database Function | Returned Type |
+====================+====================+
| inputs.a_bool() | const bool& |
+--------------------+--------------------+
| inputs.a_half() | const pxr::GfHalf& |
+--------------------+--------------------+
| inputs.a_int() | const int& |
+--------------------+--------------------+
| inputs.a_int64() | const int64_t& |
+--------------------+--------------------+
| inputs.a_float() | const float& |
+--------------------+--------------------+
| inputs.a_double() | const double& |
+--------------------+--------------------+
| inputs.a_path() | const std::string& |
+--------------------+--------------------+
| inputs.a_string() | const std::string& |
+--------------------+--------------------+
| inputs.a_token() | const NameToken& |
+--------------------+--------------------+
| outputs.a_bool() | bool& |
+--------------------+--------------------+
| outputs.a_half() | pxr::GfHalf& |
+--------------------+--------------------+
| outputs.a_int() | int& |
+--------------------+--------------------+
| outputs.a_int64() | int64_t& |
+--------------------+--------------------+
| outputs.a_float() | float& |
+--------------------+--------------------+
| outputs.a_double() | double& |
+--------------------+--------------------+
| outputs.a_string() | std::string& |
+--------------------+--------------------+
| outputs.a_token() | NameToken& |
+--------------------+--------------------+
The data returned are all references to the real data in the Fabric, our managed memory store, pointed to the
correct location at evaluation time.
Note how input attributes return *const* data while output attributes do not. This reinforces the restriction that
input data should never be written to, as it would cause graph synchronization problems.
The type *pxr::GfHalf* is an implementation of a 16-bit floating point value, though any other may also be used with
a runtime cast of the value. *omni::graph::core::NameToken* is a simple token through which a unique string can be
looked up at runtime.
Helpers
-------
A few helpers are provided in the database class definition to help make coding with it more natural.
initializeType
++++++++++++++
Function signature ``static void initializeType(const NodeTypeObj& nodeTypeObj)``
is an implementation of the :cpp:member:`ABI function<omni::graph::core::INodeType::initializeType>` that is called once for each node type,
initializing such things as its mandatory attributes and their default values.
validate
++++++++
Function signature ``bool validate()``. If any of the mandatory attributes do not have values then the generated code
will exit early with an error message and not actually call the node's compute method.
token
+++++
Function signature ``NameToken token(const char* tokenName)``.
Provides a simple conversion from a string to the unique token representing that string, for fast comparison of
strings and for use with the attributes whose data types are *token*.
Compute Status Logging
++++++++++++++++++++++
Two helper functions are providing in the database class to help provide more information when the compute method of
a node has failed. Two methods are provided, both taking printf-like variable sets of parameters.
:cpp:member:`void logError(Args...)<omni::graph::core::ogn::Database::logError>` is used when the compute has run into some
inconsistent or unexpected data, such as two
input arrays that are supposed to have the same size but do not, like the normals and vertexes on a mesh.
:cpp:member:`void logWarning(Args...)<omni::graph::core::ogn::Database::logWarning>` can be used when the compute has hit an
unusual case but can still provide a consistent
output for it, for example the deformation of an empty mesh would result in an empty mesh and a warning since that is
not a typical use for the node.
typedefs
++++++++
Although not part of the database class per se, a typedef alias is created for every attribute so that you can
use its type directly without knowing the detailed type; a midway point between exact types and *auto*. The main
use for such types might be passing attribute data between functions.
Here are the corresponding typedef names for each of the attributes:
+--------------------+--------------------+
| Typedef Alias | Actual Type |
+====================+====================+
| inputs.a_bool_t | const bool& |
+--------------------+--------------------+
| inputs.a_half_t | const pxr::GfHalf& |
+--------------------+--------------------+
| inputs.a_int_t | const int& |
+--------------------+--------------------+
| inputs.a_int64_t | const int64_t& |
+--------------------+--------------------+
| inputs.a_float_t | const float& |
+--------------------+--------------------+
| inputs.a_double_t | const double& |
+--------------------+--------------------+
| inputs.a_token_t | const NameToken& |
+--------------------+--------------------+
| outputs.a_bool_t | bool& |
+--------------------+--------------------+
| outputs.a_half_t | pxr::GfHalf& |
+--------------------+--------------------+
| outputs.a_int_t | int& |
+--------------------+--------------------+
| outputs.a_int64_t | int64_t& |
+--------------------+--------------------+
| outputs.a_float_t | float& |
+--------------------+--------------------+
| outputs.a_double_t | double& |
+--------------------+--------------------+
| outputs.a_token_t | NameToken& |
+--------------------+--------------------+
Notice the similarity between this table and the one above. The typedef name is formed by adding the extension *_t*
to the attribute accessor name, similar to C++ standard type naming conventions. The typedef should always correspond
to the return value of the attribute's ``operator()``.
Direct ABI Access
+++++++++++++++++
All of the generated database classes provide access to the underlying *INodeType* ABI for those rare situations
where you want to access the ABI directly. There are two methods provided, which correspond to the objects passed
in to the ABI compute method.
Context function signature :cpp:member:`const GraphContextObj& abi_context() const<omni::graph::core::ogn::Database::abi_context>`,
for accessing the underlying OmniGraph evaluation context and its interface.
Node function signature :cpp:member:`const NodeObj& nodeObj abi_node() const<omni::graph::core::ogn::Database::abi_node>`,
for accessing the underlying OmniGraph node object and its interface.
In addition, the attribute ABI objects are extracted into a shared structure so that they can be accessed in a
manner similar to the attribute data. For example ``db.attributes.inputs.a_bool()`` returns the `AttributeObj` that
refers to the input attribute named `a_bool`. It can be used to directly call ABI functions when required, though
again it should be emphasized that this will be a rare occurrence - all of the common operations can be performed
more easily using the database interfaces.
Node Computation Tests
----------------------
The "tests" section of the .ogn file contains a list of tests consisting of a description and attribute values,
both inputs and outputs, that will be used for the test.
The test runs by setting all of the named input attributes to their values, running the compute, then comparing the
resulting output attribute values against those specified by the test.
For example to test the computation of the boolean attribute, whose output is the negation of the input, these
two test values could be specified:
.. code::json
{
"tests": [
{
"description": "Check that true becomes false",
"inputs": {
"a_bool": true
},
"outputs": {
"a_bool": false
}
}
]
}
The "description" field is optional, though highly recommended to aid in debugging which tests are failing.
Any unspecified inputs take their default value, and any unspecified outputs do not get checked after the compute.
For simple attribute lists an abbreviated version of the syntax can be used, where the inputs and outputs get their
fully namespaced names so that there is no need for the "inputs" and "outputs" objects.
.. code::json
{
"tests": [
{
"description": "Check that false becomes true",
"inputs:a_bool": false,
"outputs:a_bool": true
}
]
}
| 11,918 | reStructuredText | 45.925197 | 140 | 0.611596 |
omniverse-code/kit/exts/omni.graph.docs/docs/tutorials/tutorial11.rst | .. _ogn_tutorial_complexData_py:
Tutorial 11 - Complex Data Node in Python
=========================================
This node fills on the remainder of the (CPU for now) data types available through Python. It combines the
progressive introduction in C++ of :ref:`ogn_tutorial_tupleData`, :ref:`ogn_tutorial_arrayData`,
:ref:`ogn_tutorial_tupleArrays`, and :ref:`ogn_tutorial_roleData`.
Rather than providing an exhaustive set of attribute types there will be one chosen from each of the
aforementioned categories of types. See the section `Pythonic Complex Attribute Type Access`_ for details on
how to access the representative types.
OgnTutorialComplexDataPy.ogn
----------------------------
The *ogn* file shows the implementation of a node named "omni.graph.tutorials.ComplexDataPy", which has one
input and one output attribute of each complex (arrays, tuples, roles) type.
.. literalinclude:: ../../../../../source/extensions/omni.graph.tutorials/tutorials/tutorial11/OgnTutorialComplexDataPy.ogn
:linenos:
:language: json
OgnTutorialComplexDataPy.py
---------------------------
The *py* file contains the implementation of the compute method, which modifies
each of the inputs in a simple way to create outputs that have different values.
.. literalinclude:: ../../../../../source/extensions/omni.graph.tutorials/tutorials/tutorial11/OgnTutorialComplexDataPy.py
:linenos:
:language: python
Note how the attribute values are available through the ``OgnTutorialComplexDataPyDatabase`` class. The generated
interface creates access methods for every attribute, named for the attribute itself. They are all implemented as Python
properties, where inputs only have get methods and outputs have both get and set methods.
Pythonic Complex Attribute Type Access
--------------------------------------
Complex data in Python takes advantage of the numpy library to handle arrays so you should always include this
line at the top of your node if you have array data:
.. code-block:: python
import numpy
+----------------------+---------------------+-------------------------+
| Database Property | Representative Type | Returned Type |
+======================+=====================+=========================+
| inputs.a_float3 | Tuple | [float, float, float] |
+----------------------+---------------------+-------------------------+
| inputs.a_floatArray | Array | numpy.ndarray[float, 1] |
+----------------------+---------------------+-------------------------+
| inputs.a_point3Array | Role-Based | numpy.ndarray[float, 3] |
+----------------------+---------------------+-------------------------+
As with simple data, the values returned are all references to the real data in the Fabric, our managed memory store,
pointing to the correct location at evaluation time.
Python Role Information
-----------------------
The attribute roles can be checked in Python similar to C++ by using the ``role()`` method on the generated database
class.
.. code-block:: python
def compute(db) -> bool:
"""Run my algorithm"""
if db.role(db.outputs.a_pointArray) == db.ROLE_POINT:
print("Hey, I did get the correct role")
This table shows the list of Python role names and the corresponding attribute types that match them:
+-----------------+---------------------------------+
| Python Role | Attribute Types |
+=================+=================================+
| ROLE_COLOR | colord, colorf, colorh |
+-----------------+---------------------------------+
| ROLE_FRAME | frame |
+-----------------+---------------------------------+
| ROLE_NORMAL | normald, normalf, normalh |
+-----------------+---------------------------------+
| ROLE_POSITION | positiond, positionf, positionh |
+-----------------+---------------------------------+
| ROLE_QUATERNION | quatd, quatf, quath |
+-----------------+---------------------------------+
| ROLE_TEXCOORD | texcoordd, texcoordf, texcoordh |
+-----------------+---------------------------------+
| ROLE_TIMECODE | timecode |
+-----------------+---------------------------------+
| ROLE_TRANSFORM | transform |
+-----------------+---------------------------------+
| ROLE_VECTOR | vectord, vectorf, vectorh |
+-----------------+---------------------------------+
| 4,478 | reStructuredText | 45.175257 | 123 | 0.531487 |
omniverse-code/kit/exts/omni.graph.docs/docs/tutorials/tutorial6.rst | .. _ogn_tutorial_tupleArrays:
Tutorial 6 - Array of Tuples
============================
Arrays and tuples can be combined to create common attribute types such as **float[3][]**, and array of
3 floats. This node takes two arrays of float[3]s and generates an output array consisting of the element-wise
dot products.
The *ogn* file shows the implementation of a node named
"omni.graph.tutorials.TupleArrays", which has two tuple-array inputs and a simple array output.
OgnTutorialTupleArrays.ogn
--------------------------
.. literalinclude:: ../../../../../source/extensions/omni.graph.tutorials/tutorials/tutorial6/OgnTutorialTupleArrays.ogn
:linenos:
:language: json
OgnTutorialTupleArrays.cpp
--------------------------
The *cpp* file contains the implementation of the compute method, which computes the dot products.
.. literalinclude:: ../../../../../source/extensions/omni.graph.tutorials/tutorials/tutorial6/OgnTutorialTupleArrays.cpp
:linenos:
:language: c++
There are typedefs set up for USD-compatible types, e.g. for *float[3]* you get *GfVec3f*. Other types, for whom
there is no USD equivalent, are implemented as *ogn::tuple<TYPE, N>*. See the complete table
of data types in :ref:`ogn_attribute_types`.
+-------------------+----------------------------------+
| Database Function | Returned Type |
+===================+==================================+
| inputs.a() | const ogn::const_array<GfVec3f>& |
+-------------------+----------------------------------+
| inputs.b() | const ogn::const_array<GfVec3f>& |
+-------------------+----------------------------------+
| outputs.result() | ogn::array<GfVec3f>& |
+-------------------+----------------------------------+
Note that the tuple array access is identical to the simple data array access, except that the types are now
the compound tuple types.
| 1,893 | reStructuredText | 43.046511 | 120 | 0.58373 |
omniverse-code/kit/exts/omni.graph.docs/docs/tutorials/tutorial1.rst | .. _ogn_tutorial_empty:
Tutorial 1 - Trivial Node
=========================
The simplest possible node is one that implements only the mandatory fields in a node. These are the "version" and
"description" fields.
The existence of the file *OgnTutorialEmpty.svg* will automatically install this icon into the build directory and
add its path to the node type's metadata. The installed file will be named after the node type, not the class type,
so it will be installed at the path `$BUILD/exts/omni.graph.tutorials/ogn/icons/Empty.svg`.
OgnTutorialEmpty.ogn
--------------------
The *.ogn* file containing the implementation of a node named "omni.graph.tutorials.Empty", in its first
version, with a simple description.
.. literalinclude:: ../../../../../source/extensions/omni.graph.tutorials/tutorials/tutorial1/OgnTutorialEmpty.ogn
:linenos:
:language: json
OgnTutorialEmpty.cpp
--------------------
The *.cpp* file contains the minimum necessary implementation of the node class, which
contains only the empty compute method. It contains a detailed description of the necessary code components.
.. literalinclude:: ../../../../../source/extensions/omni.graph.tutorials/tutorials/tutorial1/OgnTutorialEmpty.cpp
:linenos:
:language: c++
| 1,261 | reStructuredText | 41.066665 | 115 | 0.727201 |
omniverse-code/kit/exts/omni.graph.docs/docs/tutorials/tutorial4.rst | .. _ogn_tutorial_tupleData:
Tutorial 4 - Tuple Data Node
============================
Tuple data, also referred to as fixed array data, consists of multiple elements of a simple type.
For example *float[3]* or *double[4]*. This node creates one input attribute and one output attribute
of each of the simple data types with an element count greater than 1.
OgnTutorialTupleData.ogn
------------------------
The *ogn* file shows the implementation of a node named
"omni.tutorials.TupleData", which has one input and one matching output attribute of each simple type with element counts
greater than one.
.. literalinclude:: ../../../../../source/extensions/omni.graph.tutorials/tutorials/tutorial4/OgnTutorialTupleData.ogn
:linenos:
:language: json
:emphasize-lines: 2,12
New Concept - Tags
------------------
Often it is helpful to group nodes with common functionality together in some way in the UI. To help with this you
can specific values for the **tags** keyword. The values can either be a comma-separated string, or a list, that will
be rendered into a comma-separated string when added to the metadata.
New Concept - Namespaced Node Type Name
---------------------------------------
The standard naming convention uses a simple ``CamelCase`` name, with the extension of origin prepended onto the name
to ensure uniqueness. Sometimes you may wish to manage your own namespace, e.g. when you anticipate moving nodes
between extensions so the extension name will not be consistent. All you have to do to override the default behaviour
is to specify a namespace for the node type name (i.e. include a `.` separator in it).
.. warning::
Once you have overridden the node type name with such an absolute value you are now responsible for ensuring
uniqueness so be sure you have some scheme that will help you with that. The prefix **omni.** is reserved for
NVIDIA nodes. Everything else is legal, so long as the entire name itself is legal.
OgnTutorialTupleData.cpp
------------------------
The *cpp* file contains the implementation of the compute method, which
modifies each of the inputs in a simple way to create outputs that have different values.
.. literalinclude:: ../../../../../source/extensions/omni.graph.tutorials/tutorials/tutorial4/OgnTutorialTupleData.cpp
:linenos:
:language: c++
Note how by default some of the attribute value types are USD types and some are generic *ogn::tuple* types.
See :ref:`ogn_attribute_types` for the full set of type definitions.
Tuple Attribute Access
----------------------
The attribute access is as described in :ref:`ogn_tutorial_simpleData` except that the exact return types
of the attributes are different in order to support tuple member access. In practice you would use
an *auto* declaration. The types are shown only for illustrative purposes.
The data types for tuples that correspond to existing USD types use the ``pxr::gf`` versions of those types,
so the database accessors in this node will return these types:
+---------------------+----------------+
| Database Function | Returned Type |
+=====================+================+
| inputs.a_double2() | const GfVec2d& |
+---------------------+----------------+
| inputs.a_float2() | const GfVec2f& |
+---------------------+----------------+
| inputs.a_half2() | const GfVec2h& |
+---------------------+----------------+
| inputs.a_int2() | const GfVec2i& |
+---------------------+----------------+
| inputs.a_float3() | const GfVec3f& |
+---------------------+----------------+
| outputs.a_double2() | GfVec2d& |
+---------------------+----------------+
| outputs.a_float2() | GfVec2f& |
+---------------------+----------------+
| outputs.a_half2() | GfVec2h& |
+---------------------+----------------+
| outputs.a_int2() | GfVec2i& |
+---------------------+----------------+
| outputs.a_float3() | GfVec3f& |
+---------------------+----------------+
Tuple Data Compute Validation
-----------------------------
As with simple data types the existence of the mandatory inputs is confirmed before proceeding to the compute
method.
Tuple Data Node Computation Tests
---------------------------------
In the *"tests"* section of the .ogn file there are some simple tests exercising the basic functionality of the
compute method. In practice it is a good idea to include more thorough tests which exercise different data values,
especially potential edge cases.
| 4,474 | reStructuredText | 46.105263 | 121 | 0.633438 |
omniverse-code/kit/exts/omni.graph.docs/docs/tutorials/tutorial22.rst | .. _ogn_tutorial_cpu_gpu_bundles:
Tutorial 22 - Bundles On The GPU
================================
Bundles are not exactly data themselves, they are a representation of a collection of attributes whose composition is
determined at runtime. As such, they will always live on the CPU. However the attributes they are encapsulating have
the same flexibility as other attributes to live on the CPU, GPU, or have their location decided at runtime.
For that reason it's convenient to use the same "cpu", "cuda", and "any" memory types for the bundle attributes, with
a slightly different interpretation.
- **cpu** all attributes in the bundle will be on the CPU
- **gpu** all attributes in the bundle will be on the GPU
- **any** either some attributes in the bundle are on the CPU and some are on the GPU, or that decision will be made at runtime
For example if you had a bundle of attributes consisting of a large array of points and a boolean that controls the
type of operation you will perform on them it makes sense to leave the boolean on the CPU and move the points to the
GPU for more efficient processing.
OgnTutorialCpuGpuBundles.ogn
----------------------------------
The *ogn* file shows the implementation of a node named "omni.graph.tutorials.CpuGpuBundles" with an input bundle on
the CPU, an input bundle on the GPU, and an output bundle whose memory location is decided at runtime by a boolean.
.. literalinclude:: ../../../../../source/extensions/omni.graph.tutorials/tutorials/tutorial22/OgnTutorialCpuGpuBundles.ogn
:linenos:
:language: json
OgnTutorialCpuGpuBundles.cpp
----------------------------------
The *cpp* file contains the implementation of the compute method. It creates a merged bundle in either the CPU or
GPU based on the input boolean and runs an algorithm on the output location.
.. literalinclude:: ../../../../../source/extensions/omni.graph.tutorials/tutorials/tutorial22/OgnTutorialCpuGpuBundles.cpp
:linenos:
:language: c++
OgnTutorialCpuGpuBundlesPy.py
-----------------------------------
The *py* file contains the same algorithm as the C++ node, with the node implementation language being different.
.. literalinclude:: ../../../../../source/extensions/omni.graph.tutorials/tutorials/tutorial22/OgnTutorialCpuGpuBundlesPy.py
:linenos:
:language: python
| 2,331 | reStructuredText | 48.61702 | 127 | 0.726298 |
omniverse-code/kit/exts/omni.graph.docs/docs/tutorials/tutorial9.rst | .. _ogn_tutorial_cpuGpuData:
Tutorial 9 - Runtime CPU/GPU Decision
=====================================
The CPU/GPU data node creates various attributes for use in a CUDA-based GPU compute or a CPU-based compute, where the
decision of which to use is made at runtime rather than compile time. A few representative
types are used, though the list of potential attribute types is not exhaustive. See :ref:`ogn_attribute_types`
for the full list.
OgnTutorialCpuGpuData.ogn
-------------------------
The *ogn* file shows the implementation of a node named "omni.graph.tutorials.CpuGpuData", which has attributes whose memory
type is determined at runtime by the input named *isGPU*. The algorithm of the node is implemented in CUDA, but
in such a way that it can run on either the CPU or the GPU, depending on where the attribute data lives.
.. literalinclude:: ../../../../../source/extensions/omni.graph.tutorials/tutorials/tutorial9/OgnTutorialCpuGpuData.ogn
:linenos:
:language: json
OgnTutorialCpuGpuData.cpp
-------------------------
The *cpp* file contains the implementation of the compute method, which checks the value of the *isGPU* attribute
and then extracts the data of the specified type to pass to the algorithm in the .cu file.
.. literalinclude:: ../../../../../source/extensions/omni.graph.tutorials/tutorials/tutorial9/OgnTutorialCpuGpuData.cpp
:linenos:
:language: c++
OgnTutorialCpuGpuData_CUDA.cu
-----------------------------
The *cu* file contains the implementation of the deformation on the CPU and GPU using CUDA.
.. literalinclude:: ../../../../../source/extensions/omni.graph.tutorials/tutorials/tutorial9/OgnTutorialCpuGpuData_CUDA.cu
:linenos:
:language: c++
CPU/GPU Attribute Access
------------------------
Here is how the attribute values are returned from the database. Up until now the attribute name has sufficed as the
database member that accesses the value through its *operator()*. The addition of the runtime switch of memory
locations is facilitated by the addition of the *gpu()* and *cpu()* members.
+-------------------------+----------------+-------------------------+-----------------+----------------+
| CPU Function | CPU Type | GPU Function | GPU Type | CUDA Type |
+=========================+================+=========================+=================+================+
| inputs.a.cpu() | const float& | inputs.a_t_gpu | const float* | const float* |
+-------------------------+----------------+-------------------------+-----------------+----------------+
| inputs.b.cpu() | const float& | inputs.b.gpu() | const float* | const float* |
+-------------------------+----------------+-------------------------+-----------------+----------------+
| outputs.sum.cpu() | float& | outputs.sum.gpu() | float* | float* |
+-------------------------+----------------+-------------------------+-----------------+----------------+
| inputs.multiplier.cpu() | const GfVec3f& | inputs.multiplier.gpu() | const GfVec3f* | const float3 |
+-------------------------+----------------+-------------------------+-----------------+----------------+
| inputs.points.cpu() | const GfVec3f* | inputs.points.gpu() | const GfVec3f** | const float3** |
+-------------------------+----------------+-------------------------+-----------------+----------------+
| outputs.points.cpu() | GfVec3f* | outputs.points.gpu() | const GfVec3f** | float3** |
+-------------------------+----------------+-------------------------+-----------------+----------------+
Type Information
++++++++++++++++
As there are three different potential types for each attribute when it varies location at runtime (CPU, CPU being
passed to GPU, and GPU) there are extra types introduced in order to handle each of them. The CUDA types are handled
as before, but on the CPU side there are extra types for the data being passed from the CPU to the GPU.
+----------------------+----------------+--------------------------+-----------------+
| CPU Type Method | CPU Data Type | GPU Type Method | GPU Data Type |
+======================+================+==========================+=================+
| inputs::a_t | const float& | inputs::a_t_gpu | const float* |
+----------------------+----------------+--------------------------+-----------------+
| inputs::b_t | const float& | inputs::b_t_gpu | const float* |
+----------------------+----------------+--------------------------+-----------------+
| outputs::sum_t | float& | outputs::sum_t_gpu | float* |
+----------------------+----------------+--------------------------+-----------------+
| inputs::multiplier_t | const GfVec3f& | inputs::multiplier_t_gpu | const GfVec3f* |
+----------------------+----------------+--------------------------+-----------------+
| inputs::points_t | const GfVec3f* | inputs::points_t_gpu | const GfVec3f** |
+----------------------+----------------+--------------------------+-----------------+
| outputs::points_t | GfVec3f* | outputs::points_t_gpu | const GfVec3f** |
+----------------------+----------------+--------------------------+-----------------+
On the C++ side the functions defined in the CUDA file are declared as:
.. code-block:: c++
extern "C" void cpuGpuMultiplierCPU(outputs::points_t, inputs::multiplier_t, inputs::points_t, size_t);
extern "C" void cpuGpuMultiplierGPU(outputs::points_t_gpu, inputs::multiplier_t_gpu, inputs::points_t_gpu, size_t);
The addition of the **_gpu** suffix mostly adds an extra layer of indirection to the values, since they exist in the
GPU memory namespace. Care must be taken to call the correct version with the correctly extracted data:
.. code-block:: c++
if (db.inputs.is_gpu())
{
cpuGpuMultiplierGPU(
db.outputs.points.gpu(),
db.inputs.multiplier.gpu(),
db.inputs.points.gpu(),
numberOfPoints
);
}
else
{
// Note how array data is extracted in its raw form for passing to the function on the CUDA side.
// This would be unnecessary if the implementation were entirely on the CPU side.
cpuGpuMultiplierCPU(
db.outputs.points.cpu().data(),
db.inputs.multiplier.cpu(),
db.inputs.points.cpu().data(),
numberOfPoints
);
}
On the CUDA side the function definitions use the existing CUDA types, so their signatures are:
.. code-block:: c++
extern "C" void cpuGpuMultiplierCPU(outputs::points_t, inputs::multiplier_t, inputs::points_t, size_t);
extern "C" void cpuGpuMultiplierGPU(outputs::points_t, inputs::multiplier_t, inputs::points_t, size_t);
| 6,880 | reStructuredText | 54.943089 | 124 | 0.506105 |
omniverse-code/kit/exts/omni.graph.docs/docs/tutorials/tutorial13.rst | .. _ogn_tutorial_state_py:
Tutorial 13 - Python State Node
===============================
This node illustrates how you can use internal state information, so long as you inform OmniGraph that you are
doing so in order for it to make more intelligent execution scheduling decisions.
OgnTutorialStatePy.ogn
----------------------
The *.ogn* file containing the implementation of a node named "omni.graph.tutorials.StatePy", with an empty state set to
inform OmniGraph of its intention to compute using internal state information.
.. literalinclude:: ../../../../../source/extensions/omni.graph.tutorials/tutorials/tutorial13/OgnTutorialStatePy.ogn
:linenos:
:language: json
OgnTutorialStatePy.py
---------------------
The *.py* file contains the compute method and the internal state information used to run the algorithm.
By overriding the special method ``internal_state`` you can define an object that will contain per-node data that
you can manage yourself. It will not be visible to OmniGraph.
.. literalinclude:: ../../../../../source/extensions/omni.graph.tutorials/tutorials/tutorial13/OgnTutorialStatePy.py
:linenos:
:language: python
| 1,167 | reStructuredText | 40.714284 | 120 | 0.723222 |
omniverse-code/kit/exts/omni.graph.docs/docs/tutorials/tutorial30.rst | .. _ogn_tutorial_advanced_compute_vectorized_node:
Tutorial 30 - Node with more advanced computeVectorized
=======================================================
This tutorial demonstrates how to compose nodes that implements a computeVectorized function. It shows how to access the raw vectorized data,
and how it can be used to write a performant tight loop using SIMD instructions.
OgnTutorialSIMDAdd.ogn
--------------------------------
The *ogn* file shows the implementation of a node named "omni.graph.tutorials.TutorialSIMDFloatAdd", which takes inputs
of 2 floating point values, and performs a sum.
.. literalinclude:: ../../../../../source/extensions/omni.graph.tutorials/tutorials/tutorial30/OgnTutorialSIMDAdd.ogn
:linenos:
:language: json
OgnTutorialSIMDAdd.cpp
---------------------------------
The *cpp* file contains the implementation of the node. It takes two floating point inputs and performs a sum,
demonstrating how to handle a vectorized compute. It shows how to retrieve the vectorized array of inputs and output,
how to reason about the number of instances provided, and how to optimize the compute taking advantage of those vectorized inputs.
Since a SIMD instruction requires a given alignment for its arguments, the compute is divided in 3 sections:
- a first section that does a regular sum input on the few first instances that don't have a proper alignment
- a second, the heart of the function, that does as much SIMD adds as it can, performing them 4 elements by 4 elements
- a last section that perform regular sum on the few remaining items that did not fit in the SIMD register
.. literalinclude:: ../../../../../source/extensions/omni.graph.tutorials/tutorials/tutorial30/OgnTutorialSIMDAdd.cpp
:linenos:
:language: c++
| 1,781 | reStructuredText | 56.483869 | 141 | 0.732173 |
omniverse-code/kit/exts/omni.graph.docs/docs/tutorials/tutorial12.rst | .. _ogn_tutorial_abi_py:
Tutorial 12 - Python ABI Override Node
======================================
Although the .ogn format creates an easy-to-use interface to the ABI of the OmniGraph node and the associated
data model, there may be cases where you want to override the ABI to perform special processing.
OgnTutorialABIPy.ogn
--------------------
The *ogn* file shows the implementation of a node named "omni.graph.tutorials.AbiPy", in its first
version, with a simple description.
.. literalinclude:: ../../../../../source/extensions/omni.graph.tutorials/tutorials/tutorial12/OgnTutorialABIPy.ogn
:linenos:
:language: json
OgnTutorialABIPy.py
-------------------
The *py* file contains the implementation of the node class with every possible
ABI method replaced with customized processing. The node still functions the same as any other node, although it
is forced to write a lot of extra boilerplate code to do so.
.. literalinclude:: ../../../../../source/extensions/omni.graph.tutorials/tutorials/tutorial12/OgnTutorialABIPy.py
:linenos:
:language: python
Metadata Attached To Attributes
-------------------------------
This file introduces the *metadata* keyword to attributes, whose value is a dictionary of key/value pairs associated
with the attribute in which it appears that may be extracted using the ABI metadata functions. These are not persisted
in any files and so must be set either in the .ogn file or in an override of the **initialize()** method in the node
definition.
| 1,518 | reStructuredText | 43.676469 | 118 | 0.718709 |
omniverse-code/kit/exts/omni.graph.docs/docs/tutorials/tutorial14.rst | .. _ogn_tutorial_defaults:
Tutorial 14 - Defaults
======================
While most inputs are required to have default values it's not strictly necessary to provide explicit values
for those defaults. If a default is required and not specified then it will get a default value equal to an
empty value. See the table at the bottom for what is considered an "empty" value for each type of attribute.
OgnTutorialDefaults.ogn
------------------------
The *ogn* file shows the implementation of a node named
"omni.graph.tutorials.Defaults", which has sample inputs of several types without default values and matching outputs.
.. literalinclude:: ../../../../../source/extensions/omni.graph.tutorials/tutorials/tutorial14/OgnTutorialDefaults.ogn
:linenos:
:language: json
OgnTutorialDefaults.cpp
------------------------
The *cpp* file contains the implementation of the compute method, which
copies the input values over to the corresponding outputs. All values should be the empty defaults.
.. literalinclude::../../../../../source/extensions/omni.graph.tutorials/tutorials/tutorial14/ OgnTutorialDefaults.cpp
:linenos:
:language: c++
Empty Values For Attribute Types
--------------------------------
The empty values for each of the attribute types is defined below. Having no default specified in the .ogn file
for any of them is equivalent to defining a default of the given value.
+-----------+---------------+
| Type Name | Empty Default |
+===========+===============+
| bool | False |
+-----------+---------------+
| double | 0.0 |
+-----------+---------------+
| float | 0.0 |
+-----------+---------------+
| half | 0.0 |
+-----------+---------------+
| int | 0 |
+-----------+---------------+
| int64 | 0 |
+-----------+---------------+
| string | "" |
+-----------+---------------+
| token | "" |
+-----------+---------------+
| uchar | 0 |
+-----------+---------------+
| uint | 0 |
+-----------+---------------+
| uint64 | 0 |
+-----------+---------------+
.. note::
All attributes that are array types have empty defaults equal to the empty array []
.. note::
All tuple types have empty defaults equal to a tuple of the correct count, each member containing the empty
value for the base type. e.g. a float[2] will have empty default [0.0, 0.0], and a matrix[2] will have
empty default [[0.0, 0.0], [0.0, 0.0]]
| 2,531 | reStructuredText | 36.235294 | 118 | 0.535757 |
omniverse-code/kit/exts/omni.graph.docs/docs/tutorials/tutorial16.rst | .. _ogn_tutorial_bundle_data:
Tutorial 16 - Bundle Data
=========================
Attribute bundles are a construct that packages up groups of attributes into a single entity that can be passed
around the graph. These attributes have all of the same properties as a regular attribute, you just have to go
through an extra step to access their values. This node illustrates how to break open a bundle to access and
modify values in the bundled attributes.
OgnTutorialBundleData.ogn
-------------------------
The *ogn* file shows the implementation of a node named "omni.graph.tutorials.BundleData", which has one input bundle and one
output bundle.
.. literalinclude:: ../../../../../source/extensions/omni.graph.tutorials/tutorials/tutorial16/OgnTutorialBundleData.ogn
:linenos:
:language: json
OgnTutorialBundleData.cpp
-------------------------
The *cpp* file contains the implementation of the compute method. It accesses any attributes in the bundle that
have integral base types and doubles the values of those attributes.
.. literalinclude:: ../../../../../source/extensions/omni.graph.tutorials/tutorials/tutorial16/OgnTutorialBundleData.cpp
:linenos:
:language: c++
Bundled Attribute Data Manipulation Methods
-------------------------------------------
These are the methods for accessing the data that the bundled attributes encapsulate.
In regular attributes the code generated from the .ogn file provides accessors with predetermined data types.
The data types of attributes within bundles are unknown until compute time so it is up to the node writer to
explicitly cast to the correct data type.
Extracting Bundled Attribute Data - Simple Types
++++++++++++++++++++++++++++++++++++++++++++++++
For reference, simple types, tuple types, array types, tuple array types, and role types are all described in
:ref:`ogn_attribute_types`. However, unlike normal attributes the bundled attributes are always accessed as their
raw native data types. For example instead of ``pxr::GfVec3f`` you will access with ``float[3]``, which can always
be cast to the explicit types if desired.
.. note::
One exception to the type casting is tokens. In normal attributes you retrieve tokens as ``NameToken``.
Due to certain compiler restrictions the bundled attributes will be retrieved as the helper type
``OgnToken``, which is castable to ``NameToken`` for subsequent use.
.. code-block:: cpp
:emphasize-lines: 5
// As the attribute data types are only known at runtime you must perform a type-specific cast
// to get the data out in its native form.
const auto& inputBundle = db.inputs.bundle();
// Note the "const" here, to ensure we are not inadvertently modifying the input data.
const auto weight = inputBundle.attributeByName(weightToken);
const float* weightValue = weight.value<float>();
// nullptr return means the data is not of the requested type
asssert( nullptr == weight.value<int>() );
Extracting Bundled Attribute Data - Tuple Types
+++++++++++++++++++++++++++++++++++++++++++++++
.. code-block:: cpp
:emphasize-lines: 4
// The tuple data types can be accessed in exactly the same way as simple data types, with the proper cast.
const auto& inputBundle = db.inputs.bundle();
const auto weight3 = inputBundle.attributeByName(weight3Token);
const auto weight3Value = weight3.value<float[3]>();
// type of weight3Value == const float[3]*
// If you have a preferred library for manipulating complex types you can cast to them if they are compatible.
static_assert( std::is_convertible(pxr::GfVec3f, float[3]) );
const pxr::GfVec3f* usdWeight = reinterpret_cast<const pxr::GfVec3f*>(weight3Value);
Extracting Bundled Attribute Data - Array Types
+++++++++++++++++++++++++++++++++++++++++++++++
.. code-block:: cpp
:emphasize-lines: 4,11
// As with tuple types, the array types are extracted directly with the native array cast
const auto& inputBundle = db.inputs.bundle();
const auto weights = inputBundle.attributeByName(weightsToken);
const auto weightsValue = weights.value<float[]>();
// type == const float[]*
auto& outputBundle = db.outputs.bundle();
// As this is an output, the const is omitted so that the data can be modified
auto nWeights = outputBundle.attributeByName(nWeightsToken);
// As with regular attributes, bundled array outputs must be resized to allocate space before filling them.
// These array types also have the normal array capabilities, with a size() method and range-based for loops.
nWeights.resize( weights.size() );
size_t index = 0;
for (const auto& weightValue : *weightsValue)
{
nWeights[index++] = weightValue / 256.0f;
}
Extracting Bundled Attribute Data - Tuple Array Types
+++++++++++++++++++++++++++++++++++++++++++++++++++++
.. code-block:: cpp
:emphasize-lines: 4
// Tuple-arrays behave as you would expect, using the native tuple-array as the cast type
const auto& inputBundle = db.inputs.bundle();
const auto weights3 = inputBundle.attributeByName(weights3Token);
const auto weights3Value = weights.value<float[][3]>();
// type == const float[][3]*
OgnTutorialBundleDataPy.py
--------------------------
This is a Python version of the above C++ node with exactly the same set of attributes and a similar algorithm.
The main difference is that for the Python version the type definitions are much more flexible so the algorithm
can be applied to every type of bundled attribute with minimal code. (The .ogn file is omitted for
brevity, being identical to the previous one save for the addition of a ``"language": "python"`` property.)
.. literalinclude:: ../../../../../source/extensions/omni.graph.tutorials/tutorials/tutorial16/OgnTutorialBundleDataPy.py
:linenos:
:language: python
| 5,864 | reStructuredText | 47.07377 | 125 | 0.700887 |
omniverse-code/kit/exts/omni.graph.docs/docs/tutorials/tutorial10.rst | .. _ogn_tutorial_simpleDataPy:
Tutorial 10 - Simple Data Node in Python
========================================
The simple data node creates one input attribute and one output attribute of each of the simple types, where "simple"
refers to data types that have a single component and are not arrays. (e.g. "float" is simple, "float[3]" is not, nor is
"float[]"). See also :ref:`ogn_tutorial_simpleData` for a similar example in C++.
Automatic Python Node Registration
----------------------------------
By implementing the standard Carbonite extension interfact in Python, OmniGraph will know to scan your Python import
path for to recursively scan the directory, import all Python node files it finds, and register those nodes.
It will also deregister those nodes when the extension shuts down. Here is an example of the directory structure for
an extension with a single node in it. (For extensions that have a `premake5.lua` build script this will be in the
build directory. For standalone extensions it is in your source directory.)
.. code-block:: text
omni.my.extension/
omni/
my/
extension/
nodes/
OgnMyNode.ogn
OgnMyNode.py
OgnTutorialSimpleDataPy.ogn
---------------------------
The *ogn* file shows the implementation of a node named "omni.graph.tutorials.SimpleDataPy", which has one
input and one output attribute of each simple type.
.. literalinclude:: ../../../../../source/extensions/omni.graph.tutorials/tutorials/tutorial10/OgnTutorialSimpleDataPy.ogn
:linenos:
:language: json
OgnTutorialSimpleDataPy.py
--------------------------
The *py* file contains the implementation of the compute method, which modifies
each of the inputs in a simple way to create outputs that have different values.
.. literalinclude:: ../../../../../source/extensions/omni.graph.tutorials/tutorials/tutorial10/OgnTutorialSimpleDataPy.py
:linenos:
:language: python
Note how the attribute values are available through the ``OgnTutorialSimpleDataPyDatabase`` class. The generated
interface creates access methods for every attribute, named for the attribute itself. They are all implemented as Python
properties, where inputs only have get methods and outputs have both get and set methods.
Pythonic Attribute Data
-----------------------
Three subsections are creating in the generated database class. The main section implements the node type ABI methods
and uses introspection on your node class to call any versions of the ABI methods you have defined (see later
tutorials for examples of how this works).
The other two subsections are classes containing attribute access properties for inputs and outputs. For naming
consistency the class members are called *inputs* and *outputs*. For example, you can access the value of the input
attribute named *foo* by referencing ``db.inputs.foo``.
Pythonic Attribute Access
-------------------------
In the USD file the attribute names are automatically namespaced as *inputs:FOO* or *outputs:BAR*. In the Python
interface the colon is illegal so the contained classes above are used to make use of the dot-separated equivalent,
as *inputs.FOO* or *outputs.BAR*.
While the underlying data types are stored in their exact form there is conversion when they are passed back to Python
as Python has a more limited set of data types, though they all have compatible ranges. For this class, these are the
types the properties provide:
+-------------------+---------------+
| Database Property | Returned Type |
+===================+===============+
| inputs.a_bool | bool |
+-------------------+---------------+
| inputs.a_half | float |
+-------------------+---------------+
| inputs.a_int | int |
+-------------------+---------------+
| inputs.a_int64 | int |
+-------------------+---------------+
| inputs.a_float | float |
+-------------------+---------------+
| inputs.a_double | float |
+-------------------+---------------+
| inputs.a_token | str |
+-------------------+---------------+
| outputs.a_bool | bool |
+-------------------+---------------+
| outputs.a_half | float |
+-------------------+---------------+
| outputs.a_int | int |
+-------------------+---------------+
| outputs.a_int64 | int |
+-------------------+---------------+
| outputs.a_float | float |
+-------------------+---------------+
| outputs.a_double | float |
+-------------------+---------------+
| outputs.a_token | str |
+-------------------+---------------+
The data returned are all references to the real data in the Fabric, our managed memory store, pointed to the
correct location at evaluation time.
Python Helpers
--------------
A few helpers are provided in the database class definition to help make coding with it more natural.
Python logging
++++++++++++++
Two helper functions are providing in the database class to help provide more information when the compute method of
a node has failed. Two methods are provided, both taking a formatted string describing the problem.
:py:meth:`log_error(message)<omni.graph.core.Database.log_error>` is used when the compute has run into some
inconsistent or unexpected data, such as two input arrays that are supposed to have the same size but do not,
like the normals and vertexes on a mesh.
:py:meth:`log_warning(message)<omni.graph.core.Database.log_warning>` can be used when the compute has hit an unusual
case but can still provide a consistent output for it, for example the deformation of an empty mesh would result in an
empty mesh and a warning since that is not a typical use for the node.
Direct Pythonic ABI Access
++++++++++++++++++++++++++
All of the generated database classes provide access to the underlying *INodeType* ABI for those rare situations
where you want to access the ABI directly. There are two members provided, which correspond to the objects passed
in to the ABI compute method.
There is the graph evaluation context member, :py:attr:`db.abi_context<omni.graph.core.Database.abi_context>`,
for accessing the underlying OmniGraph evaluation context and its interface.
There is also the OmniGraph node member, :py:attr:`db.abi_node<omni.graph.core.Database.abi_node>`, for accessing
the underlying OmniGraph node object and its interface.
| 6,464 | reStructuredText | 45.847826 | 122 | 0.64604 |
omniverse-code/kit/exts/omni.graph.docs/docs/tutorials/tutorial27.rst | .. _ogn_tutorial_cudaDataOnCpu:
Tutorial 27 - GPU Data Node with CPU Array Pointers
===================================================
The GPU data node illustrates the alternative method of extracting array data from the GPU by returning a CPU pointer
to the GPU array. Normally the data returns a GPU pointer to an array of GPU pointers, optimized for future use in
parallel processing of GPU array data. By returning a CPU pointer to the array you can use host-side processing to
dereference the pointers.
OgnTutorialCudaDataCpu.ogn
--------------------------
The *ogn* file shows the implementation of a node named "omni.graph.tutorials.CudaCpuArrays", which has an input and
an output of type `float[3][]`, along with the special keyword to indicate that the pointer to the CUDA arrays should
be in CPU space.
.. literalinclude:: ../../../../../source/extensions/omni.graph.tutorials/tutorials/tutorial27/OgnTutorialCudaDataCpu.ogn
:linenos:
:language: json
:emphasize-lines: 5
OgnTutorialCudaDataCpu.cpp
--------------------------
The *cpp* file contains the implementation of the compute method, which in turn calls the CUDA algorithm.
.. literalinclude:: ../../../../../source/extensions/omni.graph.tutorials/tutorials/tutorial27/OgnTutorialCudaDataCpu.cpp
:linenos:
:language: c++
:emphasize-lines: 31-35
OgnTutorialCudaDataCpu_CUDA.cu
------------------------------
The *cu* file contains the implementation of the algorithm on the GPU using CUDA.
.. literalinclude:: ../../../../../source/extensions/omni.graph.tutorials/tutorials/tutorial27/OgnTutorialCudaDataCpu_CUDA.cu
:linenos:
:language: c++
OgnTutorialCudaDataCpuPy.py
---------------------------
The *py* file contains the implementation of the compute method, which for this example doesn't actually compute as
extra extension support is required for Python to run on the GPU (e.g. a Python -> CUDA compiler).
.. literalinclude:: ../../../../../source/extensions/omni.graph.tutorials/tutorials/tutorial27/OgnTutorialCudaDataCpuPy.py
:linenos:
:language: python
:emphasize-lines: 31-35
| 2,111 | reStructuredText | 42.10204 | 125 | 0.702037 |
omniverse-code/kit/exts/omni.graph.docs/docs/tutorials/tutorial19.rst | .. _ogn_tutorial_extended_types:
Tutorial 19 - Extended Attribute Types
======================================
Extended attribute types are so-named because they extend the types of data an attribute can accept from one type
to several types. Extended attributes come in two flavours. The _any_ type is the most flexible. It allows a connection
with any other attribute type:
.. code-block:: json
:emphasize-lines: 4
"inputs": {
"myAnyAttribute": {
"description": "Accepts an incoming connection from any type of attribute",
"type": "any",
}
}
The union type, represented as an array of type names, allows a connection from a limited subset of attribute types.
Here's one that can connect to attributes of type _float[3]_ and _double[3]_:
.. code-block:: json
:emphasize-lines: 4
"inputs": {
"myUnionAttribute": {
"description": "Accepts an incoming connection from attributes with a vector of a 3-tuple of numbers",
"type": ["float[3]", "double[3]"],
}
}
.. note::
"union" is not an actual type name, as the type names are specified by a list. It is just the nomenclature used
for the set of all attributes that can be specified in this way. More details about union types can be
found in :ref:`ogn_attribute_types`.
As you will see in the code examples, the value extracted from the database for such attributes has to be checked for
the actual resolved data type. Until an extended attribute is connected its data type will be unresolved and it will
not have a value. For this reason _"default"_ values are not allowed on extended attributes.
OgnTutorialExtendedTypes.ogn
----------------------------
The *ogn* file shows the implementation of a node named "omni.graph.tutorials.ExtendedTypes", which has inputs and outputs
with the extended attribute types.
.. literalinclude:: ../../../../../source/extensions/omni.graph.tutorials/tutorials/tutorial19/OgnTutorialExtendedTypes.ogn
:linenos:
:language: json
OgnTutorialExtendedTypes.cpp
----------------------------
The *cpp* file contains the implementation of the compute method. It illustrates how to determine and set the data
types on extended attribute types.
.. literalinclude:: ../../../../../source/extensions/omni.graph.tutorials/tutorials/tutorial19/OgnTutorialExtendedTypes.cpp
:linenos:
:language: c++
Information on the raw types extracted from the extended type values can be seen in :ref:`ogn_tutorial_bundle_data`.
OgnTutorialExtendedTypesPy.py
-----------------------------
This is a Python version of the above C++ node with exactly the same set of attributes and the same algorithm. It
shows the parallels between manipulating extended attribute types in both languages. (The .ogn file is omitted for
brevity, being identical to the previous one save for the addition of a ``"language": "python"`` property.
.. literalinclude:: ../../../../../source/extensions/omni.graph.tutorials/tutorials/tutorial19/OgnTutorialExtendedTypesPy.py
:linenos:
:language: python
| 3,093 | reStructuredText | 41.383561 | 124 | 0.702554 |
omniverse-code/kit/exts/omni.graph.docs/docs/tutorials/tutorial25.rst | .. _ogn_tutorial_dynamic_attributes:
Tutorial 25 - Dynamic Attributes
================================
A dynamic attribute is like any other attribute on a node, except that it is added at runtime rather than being part
of the .ogn specification. These are added through the ABI function ``INode::createAttribute`` and removed from the
node through the ABI function ``INode::removeAttribute``.
Once a dynamic attribute is added it can be accessed through the same ABI and script functions as regular attributes.
.. warning::
While the Python node database is able to handle the dynamic attributes through the same interface as regular
attributes (e.g. ``db.inputs.dynAttr``), the C++ node database is not yet similarly flexible and access to
dynamic attribute values must be done directly through the ABI calls.
OgnTutorialDynamicAttributes.ogn
--------------------------------
The *ogn* file shows the implementation of a node named "omni.graph.tutorials.DynamicAttributes", which has a simple
float input and output.
.. literalinclude:: ../../../../../source/extensions/omni.graph.tutorials/tutorials/tutorial25/OgnTutorialDynamicAttributes.ogn
:linenos:
:language: json
OgnTutorialDynamicAttributes.cpp
--------------------------------
The *cpp* file contains the implementation of the compute method. It passes the input directly to the output unless it
finds a dynamic attribute named "multiplier", in which case it multiplies by that amount instead.
.. literalinclude:: ../../../../../source/extensions/omni.graph.tutorials/tutorials/tutorial25/OgnTutorialDynamicAttributes.cpp
:linenos:
:language: c++
OgnTutorialDynamicAttributesPy.py
---------------------------------
The *py* file contains the same algorithm as the C++ node, with only the implementation language being different.
.. literalinclude:: ../../../../../source/extensions/omni.graph.tutorials/tutorials/tutorial25/OgnTutorialDynamicAttributesPy.py
:linenos:
:language: python
Adding And Removing Dynamic Attributes
--------------------------------------
In addition to the above ABI functions the Python ``og.Controller`` class provides the ability to add and remove
dynamic attributes from a script.
To create a dynamic attribute you would use this function:
.. literalinclude:: ../../../../../source/extensions/omni.graph/python/_impl/node_controller.py
:language: python
:start-after: begin-create-attribute-function
:end-before: end-create-attribute-function
For example this is the code to create a `float[3]` input and a bundle output on an existing node:
.. code-block:: py
import omni.graph.core as og
new_input = og.Controller.create_attribute("/World/MyNode", "newInput", "float[3]")
new_output = og.Controller.create_attribute("/World/MyNode", "newOutput", "bundle", og.AttributePortType.ATTRIBUTE_PORT_TYPE_OUTPUT)
# The proper namespace will be added to the attribute, though you can also be explicit about it
other_input = og.Controller.create_attribute("/World/MyNode", "inputs:otherInput", "float[3]")
When the node is deleted the dynamic attribute will also be deleted, and the attribute will be stored in the USD file.
If you want to remove the attribute from the node at any time you would use this function:
.. literalinclude:: ../../../../../source/extensions/omni.graph/python/_impl/node_controller.py
:language: python
:start-after: begin-remove-attribute-function
:end-before: end-remove-attribute-function
The second optional parameter is only needed when the attribute is passed as a string. When passing an `og.Attribute`
the node is already known, being part of the attribute.
.. code-block:: py
import omni.graph.core as og
new_attr = og.Controller.create_attribute("/World/MyNode", "newInput", "float[3]")
# When passing the attribute the node is not necessary
og.Controller.remove_attribute(new_attr)
# However if you don't have the attribute available you can still use the name, noting that the
# namespace must be present.
# og.Controller.remove_attribute("inputs:newInput", "/World/MyNode")
Adding More Information
+++++++++++++++++++++++
While the attribute name and type are sufficient to unambiguously create it there is other information you can add
that would normally be present in the .ogn file. It's a good idea to add some of the basic metadata for the UI.
.. code-block:: py
import omni.graph.core as og
new_attr = og.Controller.create_attribute("/World/MyNode", "newInput", "vectorf[3]")
new_attr.set_metadata(og.MetadataKeys.DESCRIPTION, "This is a new input with a vector in it")
new_attr.set_metadata(og.MetadataKeys.UI_NAME, "Input Vector")
While dynamic attributes don't have default values you can do the equivalent by setting a value as soon as you
create the attribute:
.. code-block:: py
import omni.graph.core as og
new_attr = og.Controller.create_attribute("/World/MyNode", "newInput", "vectorf[3]")
og.Controller.set(new_attr, [1.0, 2.0, 3.0])
This default value can also be changed at any time (even when the attribute is already connected):
.. code-block:: py
new_attr.set_default([1.0, 0.0, 0.0])
| 5,203 | reStructuredText | 44.252174 | 136 | 0.721507 |
omniverse-code/kit/exts/omni.graph.docs/docs/tutorials/tutorial5.rst | .. _ogn_tutorial_arrayData:
Tutorial 5 - Array Data Node
============================
Array data consists of multiple elements of a simple type whose count is only known at runtime.
For example *float[]* or *double[]*. This node takes an array of floats and a multiplier and generates an
output array consisting of the product of the two.
OgnTutorialArrayData.ogn
------------------------
The *ogn* file shows the implementation of a node named
"omni.graph.tutorials.ArrayData", which has a float value and float array input with one float array output.
.. literalinclude:: ../../../../../source/extensions/omni.graph.tutorials/tutorials/tutorial5/OgnTutorialArrayData.ogn
:linenos:
:language: json
OgnTutorialArrayData.cpp
------------------------
The *cpp* file contains the implementation of the compute method, which
multiplies the float value by each member of the float array.
Note how the attribute Array values can be accessed as though they were a simple *std::vector* type.
.. literalinclude:: ../../../../../source/extensions/omni.graph.tutorials/tutorials/tutorial5/OgnTutorialArrayData.cpp
:linenos:
:language: c++
Array Attribute Access
----------------------
The attribute access is as described in :ref:`ogn_tutorial_simpleData` except that the exact return types
of the attributes are different in order to support array member access.
Full definition of the array wrapper classes can be found in the interface file *omni/graph/core/ogn/array.h*,
though they do behave in a manner consistent with *std::array*, supporting iterators, standard algorithms, random access
through either ``operator[]`` or the ``at(index)`` function, the ``empty()`` and ``size()`` functions, and access to
the raw underlying data using the function ``data()``. The non-const version also supports assignment and the
``resize()`` function for modifying its contents.
+---------------------+--------------------------------+
| Database Function | Returned Type |
+=====================+================================+
| inputs.original() | const ogn::const_array<float>& |
+---------------------+--------------------------------+
| inputs.multiplier() | const float& |
+---------------------+--------------------------------+
| outputs.result() | ogn::array<float>& |
+---------------------+--------------------------------+
These wrapper classes are similar in concept to ``std::span``, which handles unmanaged pointer+size data. In this case
the data is being managed by the Fabric. Modifications to the wrapper class data will directly modify the underlying
data in the Fabric.
You can still use the *auto* declarations on these types, and the array attributes have an additional ``size()``
method added for convenience.
.. code-block:: c++
bool compute(OgnTutorialArrayDataDatabase& db)
{
const auto& multiplier = db.inputs.multiplier();
const auto& original = db.inputs.original();
size_t originalSize = db.inputs.original.size();
auto& result = db.outputs.result();
};
| 3,099 | reStructuredText | 45.268656 | 120 | 0.640207 |
omniverse-code/kit/exts/omni.graph.docs/docs/tutorials/tutorial15.rst | .. _ogn_tutorial_bundle_manipulation:
Tutorial 15 - Bundle Manipulation
=================================
Attribute bundles are a construct that packages up groups of attributes into a single entity that can be passed
around the graph. Some advantages of a bundle are that they greatly simplify graph connections, only requiring a single
connection between nodes rather than dozens or even hundreds, and they do not require static definition of the data
they contain so it can change as the evaluation of the nodes dictate. The only disadvantage is that the node writer is
responsible for analyzing the contents of the bundle and deciding what to do with them.
OgnTutorialBundles.ogn
----------------------
The *ogn* file shows the implementation of a node named "omni.graph.tutorials.BundleManipulation", which has some bundles as
inputs and outputs. It's called "manipulation" as the focus of this tutorial node is on operations applied directly
to the bundle itself, as opposed to on the data on the attributes contained within the bundles. See future tutorials
for information on how to deal with that.
.. literalinclude:: ../../../../../source/extensions/omni.graph.tutorials/tutorials/tutorial15/OgnTutorialBundles.ogn
:linenos:
:language: json
OgnTutorialBundles.cpp
----------------------
The *cpp* file contains the implementation of the compute method. It exercises each of the available bundle
manipulation functions.
.. literalinclude:: ../../../../../source/extensions/omni.graph.tutorials/tutorials/tutorial15/OgnTutorialBundles.cpp
:linenos:
:language: c++
OgnTutorialBundlesPy.py
-----------------------
The *py* file duplicates the functionality in the *cpp* file, except that it is implemented in Python.
.. literalinclude:: ../../../../../source/extensions/omni.graph.tutorials/tutorials/tutorial15/OgnTutorialBundlesPy.py
:linenos:
:language: python
Bundle Notes
------------
Bundles are implemented in USD as "virtual primitives". That is, while regular attributes appear in a USD file
as attributes on a primitive, a bundle appears as a nested primitive with no members.
Naming Convention
-----------------
Attributes can and do contain namespaces to make them easier to work with. For example, ``outputs:operations`` is
the namespaced name for the output attribute ``operations``. However as USD does not allow colons in the names of
the primitives used for implementing attriute bundles they will be replaced by underscores, vis ``outputs_operations``.
Bundled Attribute Manipulation Methods
--------------------------------------
There are a few methods for manipulating the bundle contents, independent of the actual data inside. The actual
implementation of these methods may change over time however the usage should remain the same.
The Bundle As A Whole
+++++++++++++++++++++
.. code-block:: cpp
:emphasize-lines: 2,5,8,11
// The bundle attribute is extracted from the database in exactly the same way as any other attribute.
const auto& inputBundle = db.inputs.myBundle();
// Output and state bundles are the same, except not const
auto& outputBundle = db.outputs.myBundle();
// The size of a bundle is the number of attributes it contains
auto bundleAttributeCount = inputBundle.size();
// Full bundles can be copied using the assignment operator
outputBundle = inputBundle;
Accessing Attributes By Name
++++++++++++++++++++++++++++
.. code-block:: cpp
:emphasize-lines: 2,8-9
// The attribute names should be cached somewhere as a token for fast access.
static const NameToken normalsName = db.stringToToken("normals");
// Then it's a call into the bundle to find an attribute with matching name.
// Names are unique so there is at most one match, and bundled attributes do not have the usual attribute
// namespace prefixes "inputs:", "outputs:", or "state:"
const auto& inputBundle = db.inputs.myBundle();
auto normals = inputBundle.attributeByName(normalsName);
if (normals.isValid())
{
// If the attribute is not found in the bundle then isValid() will return false.
}
Putting An Attribute Into A Bundle
++++++++++++++++++++++++++++++++++
.. code-block:: cpp
:emphasize-lines: 8,10
// Once an attribute has been extracted from a bundle a copy of it can be added to a writable bundle.
const auto& inputBundle = db.inputs.myBundle();
auto& outputBundle = db.outputs.myBundle();
auto normals = inputBundle.attributeByName(normalsToken);
if (normals.isValid())
{
// Clear the contents of stale data first since it will not be reused here.
outputBundle.clear();
// The attribute wrapper knows how to insert a copy into a bundle
outputBundle.insertAttribute(normals);
}
Iterating Over Attributes
+++++++++++++++++++++++++
.. code-block:: cpp
:emphasize-lines: 3,7
// The range-based for loop provides a method for iterating over the bundle contents.
const auto& inputBundle = db.inputs.myBundle();
for (const auto& bundledAttribute : inputBundle)
{
// Type information is available from a bundled attribute, consisting of a structure defined in
// include/omni/graph/core/Type.h
auto type = bundledAttribute.type();
// The type has four pieces, the first is the basic data type...
assert( type.baseType == BaseDataType::eFloat );
// .. the second is the role, if any
assert( type.role == AttributeRole::eNormal );
// .. the third is the number of tuple components (e.g. 3 for float[3] types)
assert( type.componentCount == 3 );
// .. the last is the array depth, either 0 or 1
assert( type.arrayDepth == 0 );
}
| 5,757 | reStructuredText | 42.293233 | 124 | 0.697238 |
omniverse-code/kit/exts/omni.graph.docs/docs/tutorials/tutorial24.rst | .. _ogn_tutorial_overrideTypes:
Tutorial 24 - Overridden Types
==============================
By default the code generator will provide POD types for simple data, and USD types for tuple data (e.g. ``float`` and
``pxr::GfVec3f``). Sometimes you may have your own favourite math library and want to use its data types directly
rather than constantly using a _reinterpret_cast_ on the attribute values. To facilitate this, JSON data which
contains type overrides for one or more of the attribute types may be provided so that the generated code will use
those types directly.
OgnTutorialOverrideType.ogn
---------------------------
The *ogn* file shows the implementation of a node named "omni.graph.tutorials.OverrideType", which has one
input and one output attribute that use an overridden type for ``float[3]``.
.. literalinclude:: ../../../../../source/extensions/omni.graph.tutorials/tutorials/tutorial24/OgnTutorialOverrideType.ogn
:linenos:
:language: json
OgnTutorialOverrideType.cpp
---------------------------
The *cpp* file contains the implementation of the compute method. The default type implementation would have a return
type of ``pxr::GfVec3f`` but this one uses the override type of ``carb::Float3``
.. literalinclude:: ../../../../../source/extensions/omni.graph.tutorials/tutorials/tutorial24/OgnTutorialOverrideType.cpp
:linenos:
:language: c++
| 1,390 | reStructuredText | 45.366665 | 122 | 0.718705 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.