file_path
stringlengths 21
202
| content
stringlengths 19
1.02M
| size
int64 19
1.02M
| lang
stringclasses 8
values | avg_line_length
float64 5.88
100
| max_line_length
int64 12
993
| alphanum_fraction
float64 0.27
0.93
|
---|---|---|---|---|---|---|
USwampertor/OmniverseJS/ov/python/pxr/Usdviewq/primLegendUI.py | # -*- coding: utf-8 -*-
################################################################################
## Form generated from reading UI file 'primLegendUI.ui'
##
## Created by: Qt User Interface Compiler version 5.15.2
##
## WARNING! All changes made in this file will be lost when recompiling UI file!
################################################################################
from PySide2.QtCore import *
from PySide2.QtGui import *
from PySide2.QtWidgets import *
class Ui_PrimLegend(object):
def setupUi(self, PrimLegend):
if not PrimLegend.objectName():
PrimLegend.setObjectName(u"PrimLegend")
PrimLegend.resize(438, 131)
sizePolicy = QSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(PrimLegend.sizePolicy().hasHeightForWidth())
PrimLegend.setSizePolicy(sizePolicy)
self.primLegendLayoutContainer = QVBoxLayout(PrimLegend)
self.primLegendLayoutContainer.setObjectName(u"primLegendLayoutContainer")
self.primLegendLayout = QGridLayout()
self.primLegendLayout.setObjectName(u"primLegendLayout")
self.primLegendColorHasArcs = QGraphicsView(PrimLegend)
self.primLegendColorHasArcs.setObjectName(u"primLegendColorHasArcs")
self.primLegendColorHasArcs.setMaximumSize(QSize(20, 15))
self.primLegendLayout.addWidget(self.primLegendColorHasArcs, 0, 0, 1, 1)
self.primLegendLabelHasArcs = QLabel(PrimLegend)
self.primLegendLabelHasArcs.setObjectName(u"primLegendLabelHasArcs")
font = QFont()
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.primLegendLabelHasArcs.setFont(font)
self.primLegendLayout.addWidget(self.primLegendLabelHasArcs, 0, 1, 1, 1)
self.primLegendColorInstance = QGraphicsView(PrimLegend)
self.primLegendColorInstance.setObjectName(u"primLegendColorInstance")
self.primLegendColorInstance.setMaximumSize(QSize(20, 15))
self.primLegendLayout.addWidget(self.primLegendColorInstance, 0, 2, 1, 1)
self.primLegendLabelInstance = QLabel(PrimLegend)
self.primLegendLabelInstance.setObjectName(u"primLegendLabelInstance")
self.primLegendLabelInstance.setFont(font)
self.primLegendLayout.addWidget(self.primLegendLabelInstance, 0, 3, 1, 1)
self.primLegendColorMaster = QGraphicsView(PrimLegend)
self.primLegendColorMaster.setObjectName(u"primLegendColorMaster")
self.primLegendColorMaster.setMaximumSize(QSize(20, 15))
self.primLegendLayout.addWidget(self.primLegendColorMaster, 0, 4, 1, 1)
self.primLegendLabelMaster = QLabel(PrimLegend)
self.primLegendLabelMaster.setObjectName(u"primLegendLabelMaster")
self.primLegendLabelMaster.setFont(font)
self.primLegendLayout.addWidget(self.primLegendLabelMaster, 0, 5, 1, 1)
self.primLegendColorNormal = QGraphicsView(PrimLegend)
self.primLegendColorNormal.setObjectName(u"primLegendColorNormal")
self.primLegendColorNormal.setMaximumSize(QSize(20, 15))
self.primLegendLayout.addWidget(self.primLegendColorNormal, 0, 6, 1, 1)
self.primLegendLabelNormal = QLabel(PrimLegend)
self.primLegendLabelNormal.setObjectName(u"primLegendLabelNormal")
self.primLegendLabelNormal.setFont(font)
self.primLegendLayout.addWidget(self.primLegendLabelNormal, 0, 7, 1, 1)
self.primLegendLayoutContainer.addLayout(self.primLegendLayout)
self.primLegendLabelContainer = QVBoxLayout()
self.primLegendLabelContainer.setObjectName(u"primLegendLabelContainer")
self.primLegendLabelDimmed = QLabel(PrimLegend)
self.primLegendLabelDimmed.setObjectName(u"primLegendLabelDimmed")
self.primLegendLabelContainer.addWidget(self.primLegendLabelDimmed)
self.primLegendLabelFontsAbstract = QLabel(PrimLegend)
self.primLegendLabelFontsAbstract.setObjectName(u"primLegendLabelFontsAbstract")
self.primLegendLabelContainer.addWidget(self.primLegendLabelFontsAbstract)
self.primLegendLabelFontsUndefined = QLabel(PrimLegend)
self.primLegendLabelFontsUndefined.setObjectName(u"primLegendLabelFontsUndefined")
self.primLegendLabelContainer.addWidget(self.primLegendLabelFontsUndefined)
self.primLegendLabelFontsDefined = QLabel(PrimLegend)
self.primLegendLabelFontsDefined.setObjectName(u"primLegendLabelFontsDefined")
self.primLegendLabelContainer.addWidget(self.primLegendLabelFontsDefined)
self.primLegendLayoutContainer.addLayout(self.primLegendLabelContainer)
self.retranslateUi(PrimLegend)
QMetaObject.connectSlotsByName(PrimLegend)
# setupUi
def retranslateUi(self, PrimLegend):
PrimLegend.setProperty("comment", QCoreApplication.translate("PrimLegend", u"\n"
" Copyright 2017 Pixar \n"
" \n"
" Licensed under the Apache License, Version 2.0 (the \"Apache License\") \n"
" with the following modification; you may not use this file except in \n"
" compliance with the Apache License and the following modification to it: \n"
" Section 6. Trademarks. is deleted and replaced with: \n"
" \n"
" 6. Trademarks. This License does not grant permission to use the trade \n"
" names, trademarks, service marks, or product names of the Licensor \n"
" and its affiliates, except as required to comply with Section 4(c) of \n"
" the License and to reproduce the content of the NOTI"
"CE file. \n"
" \n"
" You may obtain a copy of the Apache License at \n"
" \n"
" http://www.apache.org/licenses/LICENSE-2.0 \n"
" \n"
" Unless required by applicable law or agreed to in writing, software \n"
" distributed under the Apache License with the above modification is \n"
" distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY \n"
" KIND, either express or implied. See the Apache License for the specific \n"
" language governing permissions and limitations under the Apache License. \n"
" ", None))
self.primLegendLabelHasArcs.setText(QCoreApplication.translate("PrimLegend", u"HasArcs", None))
self.primLegendLabelInstance.setText(QCoreApplication.translate("PrimLegend", u"Instance", None))
self.primLegendLabelMaster.setText(QCoreApplication.translate("PrimLegend", u"Master", None))
self.primLegendLabelNormal.setText(QCoreApplication.translate("PrimLegend", u"Normal", None))
self.primLegendLabelDimmed.setText(QCoreApplication.translate("PrimLegend", u"Dimmed colors denote inactive prims", None))
self.primLegendLabelFontsAbstract.setText(QCoreApplication.translate("PrimLegend", u"Normal font indicates abstract prims(class and children)", None))
self.primLegendLabelFontsUndefined.setText(QCoreApplication.translate("PrimLegend", u"Italic font indicates undefined prims(declared with over)", None))
self.primLegendLabelFontsDefined.setText(QCoreApplication.translate("PrimLegend", u"Bold font indicates defined prims(declared with def)", None))
# retranslateUi
| 8,092 | Python | 52.596026 | 160 | 0.652373 |
USwampertor/OmniverseJS/ov/python/pxr/Usdviewq/usdviewContextMenuItem.py | #
# Copyright 2016 Pixar
#
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
#
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
#
# You may obtain a copy of the Apache License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
#
#
# A base class for all context menu items.
# This provides a simple behavior to ensure that a chosen
# context menu item is valid. This helps us avoid a situation
# in which a user right-clicks in an area with no item but still
# receives a context menu.
#
class UsdviewContextMenuItem():
def isValid(self):
''' Menu items which have an invalid internal item are considered invalid.
Header menus don't contain an internal _item attribute, so we
return true in the case of the attribute being undefined.
We use this function to give this state a clearer name.
'''
return not hasattr(self, "_item") or self._item is not None
| 1,748 | Python | 43.846153 | 82 | 0.736842 |
USwampertor/OmniverseJS/ov/python/pxr/Usdviewq/primContextMenu.py | #
# Copyright 2016 Pixar
#
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
#
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
#
# You may obtain a copy of the Apache License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
#
from .qt import QtWidgets
from .primContextMenuItems import _GetContextMenuItems
#
# Specialized context menu for prim selection.
#
# It uses the per-prim context menus referenced by the _GetContextMenuItems
# function in primContextMenuItems. To add a new context menu item,
# see comments in that file.
#
class PrimContextMenu(QtWidgets.QMenu):
def __init__(self, parent, item, appController):
QtWidgets.QMenu.__init__(self, parent)
self._menuItems = _GetContextMenuItems(appController, item)
for menuItem in self._menuItems:
if menuItem.IsSeparator():
self.addSeparator()
continue
elif not menuItem.isValid():
continue
action = self.addAction(menuItem.GetText(),
menuItem.RunCommand)
if not menuItem.IsEnabled():
action.setEnabled( False )
| 1,984 | Python | 36.45283 | 75 | 0.703629 |
USwampertor/OmniverseJS/ov/python/pxr/Usdviewq/customAttributes.py | #
# Copyright 2016 Pixar
#
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
#
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
#
# You may obtain a copy of the Apache License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
#
from pxr import Usd, UsdGeom, UsdShade
from .constantGroup import ConstantGroup
class ComputedPropertyNames(ConstantGroup):
"""Names of all available computed properties."""
WORLD_BBOX = "World Bounding Box"
LOCAL_WORLD_XFORM = "Local to World Xform"
RESOLVED_PREVIEW_MATERIAL = "Resolved Preview Material"
RESOLVED_FULL_MATERIAL = "Resolved Full Material"
#
# Edit the following to alter the set of custom attributes.
#
# Every entry should be an object derived from CustomAttribute,
# defined below.
#
def _GetCustomAttributes(currentPrim, rootDataModel):
currentPrimIsImageable = currentPrim.IsA(UsdGeom.Imageable)
# If the currentPrim is imageable or if it is a typeless def, it
# participates in imageable computations.
currentPrimGetsImageableComputations = currentPrim.IsA(UsdGeom.Imageable) \
or not currentPrim.GetTypeName()
if currentPrimGetsImageableComputations:
return [BoundingBoxAttribute(currentPrim, rootDataModel),
LocalToWorldXformAttribute(currentPrim,
rootDataModel),
ResolvedPreviewMaterial(currentPrim, rootDataModel),
ResolvedFullMaterial(currentPrim, rootDataModel)]
return []
#
# The base class for per-prim custom attributes.
#
class CustomAttribute:
def __init__(self, currentPrim, rootDataModel):
self._currentPrim = currentPrim
self._rootDataModel = rootDataModel
def IsVisible(self):
return True
# GetName function to match UsdAttribute API
def GetName(self):
return ""
# Get function to match UsdAttribute API
def Get(self, frame):
return ""
# convenience function to make this look more like a UsdAttribute
def GetTypeName(self):
return ""
# GetPrimPath function to match UsdAttribute API
def GetPrimPath(self):
return self._currentPrim.GetPath()
#
# Displays the bounding box of a prim
#
class BoundingBoxAttribute(CustomAttribute):
def __init__(self, currentPrim, rootDataModel):
CustomAttribute.__init__(self, currentPrim, rootDataModel)
def GetName(self):
return ComputedPropertyNames.WORLD_BBOX
def Get(self, frame):
try:
bbox = self._rootDataModel.computeWorldBound(self._currentPrim)
bbox = bbox.ComputeAlignedRange()
except RuntimeError as err:
bbox = "Invalid: " + str(err)
return bbox
#
# Displays the Local to world xform of a prim
#
class LocalToWorldXformAttribute(CustomAttribute):
def __init__(self, currentPrim, rootDataModel):
CustomAttribute.__init__(self, currentPrim, rootDataModel)
def GetName(self):
return ComputedPropertyNames.LOCAL_WORLD_XFORM
def Get(self, frame):
try:
pwt = self._rootDataModel.getLocalToWorldTransform(self._currentPrim)
except RuntimeError as err:
pwt = "Invalid: " + str(err)
return pwt
class ResolvedBoundMaterial(CustomAttribute):
def __init__(self, currentPrim, rootDataModel, purpose):
CustomAttribute.__init__(self, currentPrim, rootDataModel)
self._purpose = purpose
def GetName(self):
if self._purpose == UsdShade.Tokens.full:
return ComputedPropertyNames.RESOLVED_FULL_MATERIAL
elif self._purpose == UsdShade.Tokens.preview:
return ComputedPropertyNames.RESOLVED_PREVIEW_MATERIAL
else:
raise ValueError("Invalid purpose '{}'.".format(self._purpose))
def Get(self, frame):
try:
(boundMaterial, bindingRel) = \
self._rootDataModel.computeBoundMaterial(self._currentPrim,
self._purpose)
boundMatPath = boundMaterial.GetPrim().GetPath() if boundMaterial \
else "<unbound>"
except RuntimeError as err:
boundMatPath = "Invalid: " + str(err)
return boundMatPath
class ResolvedFullMaterial(ResolvedBoundMaterial):
def __init__(self, currentPrim, rootDataModel):
ResolvedBoundMaterial.__init__(self, currentPrim, rootDataModel,
UsdShade.Tokens.full)
class ResolvedPreviewMaterial(ResolvedBoundMaterial):
def __init__(self, currentPrim, rootDataModel):
ResolvedBoundMaterial.__init__(self, currentPrim, rootDataModel,
UsdShade.Tokens.preview)
class ComputedPropertyFactory:
"""Creates computed properties."""
def __init__(self, rootDataModel):
self._rootDataModel = rootDataModel
def getComputedProperty(self, prim, propName):
"""Create a new computed property from a prim and property name."""
if propName == ComputedPropertyNames.WORLD_BBOX:
return BoundingBoxAttribute(prim, self._rootDataModel)
elif propName == ComputedPropertyNames.LOCAL_WORLD_XFORM:
return LocalToWorldXformAttribute(prim, self._rootDataModel)
elif propName == ComputedPropertyNames.RESOLVED_FULL_MATERIAL:
return ResolvedFullMaterial(prim, self._rootDataModel)
elif propName == ComputedPropertyNames.RESOLVED_PREVIEW_MATERIAL:
return ResolvedPreviewMaterial(prim, self._rootDataModel)
else:
raise ValueError("Cannot create computed property '{}'.".format(
propName))
| 6,465 | Python | 35.325842 | 81 | 0.685538 |
USwampertor/OmniverseJS/ov/python/pxr/Tf/__init__.py | #
# Copyright 2016 Pixar
#
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
#
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
#
# You may obtain a copy of the Apache License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
#
"""
Tf -- Tools Foundation
"""
def PrepareModule(module, result):
"""PrepareModule(module, result) -- Prepare an extension module at import
time. Generally, this should only be called by the __init__.py script for a
module upon loading a boost python module (generally '_LibName.so')."""
# inject into result.
ignore = frozenset(['__name__', '__builtins__',
'__doc__', '__file__', '__path__'])
newModuleName = result.get('__name__')
for key, value in list(module.__dict__.items()):
if not key in ignore:
result[key] = value
# Lie about the module from which value came.
if newModuleName and hasattr(value, '__module__'):
try:
setattr(value, '__module__', newModuleName)
except AttributeError as e:
# The __module__ attribute of Boost.Python.function
# objects is not writable, so we get this exception
# a lot. Just ignore it. We're really only concerned
# about the data objects like enum values and such.
#
pass
def GetCodeLocation(framesUp):
"""Returns a tuple (moduleName, functionName, fileName, lineNo).
To trace the current location of python execution, use GetCodeLocation().
By default, the information is returned at the current stack-frame; thus
info = GetCodeLocation()
will return information about the line that GetCodeLocation() was called
from. One can write:
def genericDebugFacility():
info = GetCodeLocation(1)
# print out data
def someCode():
...
if bad:
genericDebugFacility()
and genericDebugFacility() will get information associated with its caller,
i.e. the function someCode()."""
import sys
f_back = sys._getframe(framesUp).f_back
return (f_back.f_globals['__name__'], f_back.f_code.co_name,
f_back.f_code.co_filename, f_back.f_lineno)
# for some strange reason, this errors out when we try to reload it,
# which is odd since _tf is a DSO and can't be reloaded anyway:
import sys
if "pxr.Tf._tf" not in sys.modules:
from . import _tf
PrepareModule(_tf, locals())
del _tf
del sys
# Need to provide an exception type that tf errors will show up as.
class ErrorException(RuntimeError):
def __init__(self, *args):
RuntimeError.__init__(self, *args)
self.__TfException = True
def __str__(self):
return '\n\t' + '\n\t'.join([str(e) for e in self.args])
__SetErrorExceptionClass(ErrorException)
try:
from . import __DOC
__DOC.Execute(locals())
del __DOC
except Exception:
pass
def Warn(msg, template=""):
"""Issue a warning via the TfDiagnostic system.
At this time, template is ignored.
"""
codeInfo = GetCodeLocation(framesUp=1)
_Warn(msg, codeInfo[0], codeInfo[1], codeInfo[2], codeInfo[3])
def Status(msg, verbose=True):
"""Issues a status update to the Tf diagnostic system.
If verbose is True (the default) then information about where in the code
the status update was issued from is included.
"""
if verbose:
codeInfo = GetCodeLocation(framesUp=1)
_Status(msg, codeInfo[0], codeInfo[1], codeInfo[2], codeInfo[3])
else:
_Status(msg, "", "", "", 0)
def RaiseCodingError(msg):
"""Raise a coding error to the Tf Diagnostic system."""
codeInfo = GetCodeLocation(framesUp=1)
_RaiseCodingError(msg, codeInfo[0], codeInfo[1], codeInfo[2], codeInfo[3])
def RaiseRuntimeError(msg):
"""Raise a runtime error to the Tf Diagnostic system."""
codeInfo = GetCodeLocation(framesUp=1)
_RaiseRuntimeError(msg, codeInfo[0], codeInfo[1], codeInfo[2], codeInfo[3])
def Fatal(msg):
"""Raise a fatal error to the Tf Diagnostic system."""
codeInfo = GetCodeLocation(framesUp=1)
_Fatal(msg, codeInfo[0], codeInfo[1], codeInfo[2], codeInfo[3])
class NamedTemporaryFile(object):
"""A named temporary file which keeps the internal file handle closed.
A class which constructs a temporary file(that isn't open) on __enter__,
provides its name as an attribute, and deletes it on __exit__.
Note: The constructor args for this object match those of
python's tempfile.mkstemp() function, and will have the same effect on
the underlying file created."""
def __init__(self, suffix='', prefix='', dir=None, text=False):
# Note that we defer creation until the enter block to
# prevent users from unintentionally creating a bunch of
# temp files that don't get cleaned up.
self._args = (suffix, prefix, dir, text)
def __enter__(self):
from tempfile import mkstemp
from os import close
fd, path = mkstemp(*self._args)
close(fd)
# XXX: We currently only expose the name attribute
# more can be added based on client needs in the future.
self._name = path
return self
def __exit__(self, *args):
import os
os.remove(self.name)
@property
def name(self):
"""The path for the temporary file created."""
return self._name
| 6,386 | Python | 34.681564 | 80 | 0.644065 |
USwampertor/OmniverseJS/ov/python/pxr/Tf/testenv/testTfScriptModuleLoader_Unknown.py | #
# Copyright 2016 Pixar
#
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
#
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
#
# You may obtain a copy of the Apache License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
#
from pxr import Tf
# Register this library with a new, previously unknown dependency. Note, we're
# just using 'sys' as the module to load here, which is a lie. It should really
# be this module, but it doesn't matter, and it's not trivial to come up with a
# robust, correct module name for this module.
Tf.ScriptModuleLoader()._RegisterLibrary('Unknown', 'sys',
['NewDynamicDependency'])
# Register the dependency. In this case we use 'sys' just because it saves us
# from creating a real module called NewDynamicDependency.
Tf.ScriptModuleLoader()._RegisterLibrary('NewDynamicDependency', 'sys', [])
# Load dependencies for this module, which includes the
# NewDynamicDependency. This is a reentrant load that will be handled
# immediately by TfScriptModuleLoader.
Tf.ScriptModuleLoader()._LoadModulesForLibrary('Unknown')
| 1,934 | Python | 43.999999 | 80 | 0.751293 |
USwampertor/OmniverseJS/ov/python/pxr/UsdUtils/complianceChecker.py | #
# Copyright 2018 Pixar
#
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
#
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
#
# You may obtain a copy of the Apache License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
#
from __future__ import print_function
from pxr import Ar
def _IsPackageOrPackagedLayer(layer):
return layer.GetFileFormat().IsPackage() or \
Ar.IsPackageRelativePath(layer.identifier)
class BaseRuleChecker(object):
"""This is Base class for all the rule-checkers."""
def __init__(self, verbose):
self._verbose = verbose
self._failedChecks = []
self._errors = []
def _AddFailedCheck(self, msg):
self._failedChecks.append(msg)
def _AddError(self, msg):
self._errors.append(msg)
def _Msg(self, msg):
if self._verbose:
print(msg)
def GetFailedChecks(self):
return self._failedChecks
def GetErrors(self):
return self._errors
# -------------------------------------------------------------------------
# Virtual methods that any derived rule-checker may want to override.
# Default implementations do nothing.
#
# A rule-checker may choose to override one or more of the virtual methods.
# The callbacks are invoked in the order they are defined here (i.e.
# CheckStage is invoked first, followed by CheckDiagnostics, followed by
# CheckUnresolvedPaths and so on until CheckPrim). Some of the callbacks may
# be invoked multiple times per-rule with different parameters, for example,
# CheckLayer, CheckPrim and CheckZipFile.
def CheckStage(self, usdStage):
""" Check the given usdStage. """
pass
def CheckDiagnostics(self, diagnostics):
""" Check the diagnostic messages that were generated when opening the
USD stage. The diagnostic messages are collected using a
UsdUtilsCoalescingDiagnosticDelegate.
"""
pass
def CheckUnresolvedPaths(self, unresolvedPaths):
""" Check or process any unresolved asset paths that were found when
analysing the dependencies.
"""
pass
def CheckDependencies(self, usdStage, layerDeps, assetDeps):
""" Check usdStage's layer and asset dependencies that were gathered
using UsdUtils.ComputeAllDependencies().
"""
pass
def CheckLayer(self, layer):
""" Check the given SdfLayer. """
pass
def CheckZipFile(self, zipFile, packagePath):
""" Check the zipFile object created by opening the package at path
packagePath.
"""
pass
def CheckPrim(self, prim):
""" Check the given prim, which may only exist is a specific combination
of variant selections on the UsdStage.
"""
pass
# -------------------------------------------------------------------------
class ByteAlignmentChecker(BaseRuleChecker):
@staticmethod
def GetDescription():
return "Files within a usdz package must be laid out properly, "\
"i.e. they should be aligned to 64 bytes."
def __init__(self, verbose):
super(ByteAlignmentChecker, self).__init__(verbose)
def CheckZipFile(self, zipFile, packagePath):
fileNames = zipFile.GetFileNames()
for fileName in fileNames:
fileExt = Ar.GetResolver().GetExtension(fileName)
fileInfo = zipFile.GetFileInfo(fileName)
offset = fileInfo.dataOffset
if offset % 64 != 0:
self._AddFailedCheck("File '%s' in package '%s' has an "
"invalid offset %s." %
(fileName, packagePath, offset))
class CompressionChecker(BaseRuleChecker):
@staticmethod
def GetDescription():
return "Files withing a usdz package should not be compressed or "\
"encrypted."
def __init__(self, verbose):
super(CompressionChecker, self).__init__(verbose)
def CheckZipFile(self, zipFile, packagePath):
fileNames = zipFile.GetFileNames()
for fileName in fileNames:
fileExt = Ar.GetResolver().GetExtension(fileName)
fileInfo = zipFile.GetFileInfo(fileName)
if fileInfo.compressionMethod != 0:
self._AddFailedCheck("File '%s' in package '%s' has "
"compression. Compression method is '%s', actual size "
"is %s. Uncompressed size is %s." % (
fileName, packagePath, fileInfo.compressionMethod,
fileInfo.size, fileInfo.uncompressedSize))
class MissingReferenceChecker(BaseRuleChecker):
@staticmethod
def GetDescription():
return "The composed USD stage should not contain any unresolvable"\
" asset dependencies (in every possible variation of the "\
"asset), when using the default asset resolver. "
def __init__(self, verbose):
super(MissingReferenceChecker, self).__init__(verbose)
def CheckDiagnostics(self, diagnostics):
for diag in diagnostics:
# "_ReportErrors" is the name of the function that issues
# warnings about unresolved references, sublayers and other
# composition arcs.
if '_ReportErrors' in diag.sourceFunction and \
'usd/stage.cpp' in diag.sourceFileName:
self._AddFailedCheck(diag.commentary)
def CheckUnresolvedPaths(self, unresolvedPaths):
for unresolvedPath in unresolvedPaths:
self._AddFailedCheck("Found unresolvable external dependency "
"'%s'." % unresolvedPath)
class TextureChecker(BaseRuleChecker):
# Allow just png and jpg for now.
_allowedImageFormats = ("jpg", "png")
# Include a list of "unsupported" image formats to provide better error
# messages whwn we find one of these.
_unsupportedImageFormats = ["bmp", "tga", "hdr", "exr", "tif", "zfile",
"tx"]
@staticmethod
def GetDescription():
return "Texture files should be .jpg or .png."
def __init__(self, verbose):
# Check if the prim has an allowed type.
super(TextureChecker, self).__init__(verbose)
def _CheckTexture(self, texAssetPath):
self._Msg("Checking texture <%s>." % texAssetPath)
texFileExt = Ar.GetResolver().GetExtension(texAssetPath).lower()
if texFileExt in \
TextureChecker._unsupportedImageFormats:
self._AddFailedCheck("Found texture file '%s' with unsupported "
"file format." % texAssetPath)
elif texFileExt not in \
TextureChecker._allowedImageFormats:
self._AddFailedCheck("Found texture file '%s' with unknown file "
"format." % texAssetPath)
def CheckPrim(self, prim):
# Right now, we find texture referenced by looking at the asset-valued
# shader inputs. However, it is entirely legal to feed the "fileName"
# input of a UsdUVTexture shader from a UsdPrimvarReader_string.
# Hence, ideally we would also check "the right" primvars on
# geometry prims here. However, identifying the right primvars is
# non-trivial. We probably need to pre-analyze all the materials.
# Not going to try to do this yet, but it raises an interesting
# validation pattern -
from pxr import Sdf, UsdShade
# Check if the prim is a shader.
if not prim.IsA(UsdShade.Shader):
return
shader = UsdShade.Shader(prim)
shaderInputs = shader.GetInputs()
for ip in shaderInputs:
if ip.GetTypeName() == Sdf.ValueTypeNames.Asset:
texFilePath = str(ip.Get()).strip('@')
self._CheckTexture(texFilePath)
elif ip.GetTypeName() == Sdf.ValueTypeNames.AssetArray:
texPathArray = ip.Get()
texPathArray = [str(i).strip('@') for i in texPathArray]
for texPath in texPathArray:
self._CheckTexture(texFilePath)
class ARKitPackageEncapsulationChecker(BaseRuleChecker):
@staticmethod
def GetDescription():
return "If the root layer is a package, then the composed stage "\
"should not contain references to files outside the package. "\
"In other words, the package should be entirely self-contained."
def __init__(self, verbose):
super(ARKitPackageEncapsulationChecker, self).__init__(verbose)
def CheckDependencies(self, usdStage, layerDeps, assetDeps):
rootLayer = usdStage.GetRootLayer()
if not _IsPackageOrPackagedLayer(rootLayer):
return
packagePath = usdStage.GetRootLayer().realPath
if packagePath:
if Ar.IsPackageRelativePath(packagePath):
packagePath = Ar.SplitPackageRelativePathOuter(
packagePath)[0]
for layer in layerDeps:
# In-memory layers like session layers (which we must skip when
# doing this check) won't have a real path.
if layer.realPath:
if not layer.realPath.startswith(packagePath):
self._AddFailedCheck("Found loaded layer '%s' that "
"does not belong to the package '%s'." %
(layer.identifier, packagePath))
for asset in assetDeps:
if not asset.startswith(packagePath):
self._AddFailedCheck("Found asset reference '%s' that "
"does not belong to the package '%s'." %
(asset, packagePath))
class ARKitLayerChecker(BaseRuleChecker):
# Only core USD file formats are allowed.
_allowedLayerFormatIds = ('usd', 'usda', 'usdc', 'usdz')
@staticmethod
def GetDescription():
return "All included layers that participate in composition should"\
" have one of the core supported file formats."
def __init__(self, verbose):
# Check if the prim has an allowed type.
super(ARKitLayerChecker, self).__init__(verbose)
def CheckLayer(self, layer):
self._Msg("Checking layer <%s>." % layer.identifier)
formatId = layer.GetFileFormat().formatId
if not formatId in \
ARKitLayerChecker._allowedLayerFormatIds:
self._AddFailedCheck("Layer '%s' has unsupported formatId "
"'%s'." % (layer.identifier, formatId))
class ARKitPrimTypeChecker(BaseRuleChecker):
# All core prim types other than UsdGeomPointInstancers, Curve types, Nurbs,
# and the types in UsdLux are allowed.
_allowedPrimTypeNames = ('', 'Scope', 'Xform', 'Camera',
'Shader', 'Material',
'Mesh', 'Sphere', 'Cube', 'Cylinder', 'Cone',
'Capsule', 'GeomSubset', 'Points',
'SkelRoot', 'Skeleton', 'SkelAnimation',
'BlendShape', 'SpatialAudio')
@staticmethod
def GetDescription():
return "UsdGeomPointInstancers and custom schemas not provided by "\
"core USD are not allowed."
def __init__(self, verbose):
# Check if the prim has an allowed type.
super(ARKitPrimTypeChecker, self).__init__(verbose)
def CheckPrim(self, prim):
self._Msg("Checking prim <%s>." % prim.GetPath())
if prim.GetTypeName() not in \
ARKitPrimTypeChecker._allowedPrimTypeNames:
self._AddFailedCheck("Prim <%s> has unsupported type '%s'." %
(prim.GetPath(), prim.GetTypeName()))
class ARKitStageYupChecker(BaseRuleChecker):
@staticmethod
def GetDescription():
return "The stage and all fo the assets referenced within it "\
"should be Y-up.",
def __init__(self, verbose):
# Check if the prim has an allowed type.
super(ARKitStageYupChecker, self).__init__(verbose)
def CheckStage(self, usdStage):
from pxr import UsdGeom
upAxis = UsdGeom.GetStageUpAxis(usdStage)
if upAxis != UsdGeom.Tokens.y:
self._AddFailedCheck("Stage has upAxis '%s'. upAxis should be "
"'%s'." % (upAxis, UsdGeom.Tokens.y))
class ARKitShaderChecker(BaseRuleChecker):
@staticmethod
def GetDescription():
return "Shader nodes must have \"id\" as the implementationSource, " \
"with id values that begin with \"Usd*\". Also, shader inputs "\
"with connections must each have a single, valid connection " \
"source."
def __init__(self, verbose):
super(ARKitShaderChecker, self).__init__(verbose)
def CheckPrim(self, prim):
from pxr import UsdShade
if not prim.IsA(UsdShade.Shader):
return
shader = UsdShade.Shader(prim)
if not shader:
self._AddError("Invalid shader prim <%s>." % prim.GetPath())
return
self._Msg("Checking shader <%s>." % prim.GetPath())
implSource = shader.GetImplementationSource()
if implSource != UsdShade.Tokens.id:
self._AddFailedCheck("Shader <%s> has non-id implementation "
"source '%s'." % (prim.GetPath(), implSource))
shaderId = shader.GetShaderId()
if not shaderId or \
not (shaderId in ['UsdPreviewSurface', 'UsdUVTexture'] or
shaderId.startswith('UsdPrimvarReader')) :
self._AddFailedCheck("Shader <%s> has unsupported info:id '%s'."
% (prim.GetPath(), shaderId))
# Check shader input connections
shaderInputs = shader.GetInputs()
for shdInput in shaderInputs:
connections = shdInput.GetAttr().GetConnections()
# If an input has one or more connections, ensure that the
# connections are valid.
if len(connections) > 0:
if len(connections) > 1:
self._AddFailedCheck("Shader input <%s> has %s connection "
"sources, but only one is allowed." %
(shdInput.GetAttr.GetPath(), len(connections)))
connectedSource = shdInput.GetConnectedSource()
if connectedSource is None:
self._AddFailedCheck("Connection source <%s> for shader "
"input <%s> is missing." % (connections[0],
shdInput.GetAttr().GetPath()))
else:
# The source must be a valid shader or material prim.
source = connectedSource[0]
if not source.GetPrim().IsA(UsdShade.Shader) and \
not source.GetPrim().IsA(UsdShade.Material):
self._AddFailedCheck("Shader input <%s> has an invalid "
"connection source prim of type '%s'." %
(shdInput.GetAttr().GetPath(),
source.GetPrim().GetTypeName()))
class ARKitMaterialBindingChecker(BaseRuleChecker):
@staticmethod
def GetDescription():
return "All material binding relationships must have valid targets."
def __init__(self, verbose):
super(ARKitMaterialBindingChecker, self).__init__(verbose)
def CheckPrim(self, prim):
from pxr import UsdShade
relationships = prim.GetRelationships()
bindingRels = [rel for rel in relationships if
rel.GetName().startswith(UsdShade.Tokens.materialBinding)]
for bindingRel in bindingRels:
targets = bindingRel.GetTargets()
if len(targets) == 1:
directBinding = UsdShade.MaterialBindingAPI.DirectBinding(
bindingRel)
if not directBinding.GetMaterial():
self._AddFailedCheck("Direct material binding <%s> targets "
"an invalid material <%s>." % (bindingRel.GetPath(),
directBinding.GetMaterialPath()))
elif len(targets) == 2:
collBinding = UsdShade.MaterialBindingAPI.CollectionBinding(
bindingRel)
if not collBinding.GetMaterial():
self._AddFailedCheck("Collection-based material binding "
"<%s> targets an invalid material <%s>." %
(bindingRel.GetPath(), collBinding.GetMaterialPath()))
if not collBinding.GetCollection():
self._AddFailedCheck("Collection-based material binding "
"<%s> targets an invalid collection <%s>." %
(bindingRel.GetPath(), collBinding.GetCollectionPath()))
class ARKitFileExtensionChecker(BaseRuleChecker):
_allowedFileExtensions = \
ARKitLayerChecker._allowedLayerFormatIds + \
TextureChecker._allowedImageFormats
@staticmethod
def GetDescription():
return "Only layer files and textures are allowed in a package."
def __init__(self, verbose):
super(ARKitFileExtensionChecker, self).__init__(verbose)
def CheckZipFile(self, zipFile, packagePath):
fileNames = zipFile.GetFileNames()
for fileName in fileNames:
fileExt = Ar.GetResolver().GetExtension(fileName)
if fileExt not in ARKitFileExtensionChecker._allowedFileExtensions:
self._AddFailedCheck("File '%s' in package '%s' has an "
"unknown or unsupported extension '%s'." %
(fileName, packagePath, fileExt))
class ARKitRootLayerChecker(BaseRuleChecker):
@staticmethod
def GetDescription():
return "The root layer of the package must be a usdc file and " \
"must not include any external dependencies that participate in "\
"stage composition."
def __init__(self, verbose):
super(ARKitRootLayerChecker, self).__init__(verbose=verbose)
def CheckStage(self, usdStage):
usedLayers = usdStage.GetUsedLayers()
# This list excludes any session layers.
usedLayersOnDisk = [i for i in usedLayers if i.realPath]
if len(usedLayersOnDisk) > 1:
self._AddFailedCheck("The stage uses %s layers. It should "
"contain a single usdc layer to be compatible with ARKit's "
"implementation of usdz." % len(usedLayersOnDisk))
rootLayerRealPath = usdStage.GetRootLayer().realPath
if rootLayerRealPath.endswith(".usdz"):
# Check if the root layer in the package is a usdc.
from pxr import Usd
zipFile = Usd.ZipFile.Open(rootLayerRealPath)
if not zipFile:
self._AddError("Could not open package at path '%s'." %
resolvedPath)
return
fileNames = zipFile.GetFileNames()
if not fileNames[0].endswith(".usdc"):
self._AddFailedCheck("First file (%s) in usdz package '%s' "
"does not have the .usdc extension." % (fileNames[0],
rootLayerRealPath))
elif not rootLayerRealPath.endswith(".usdc"):
self._AddFailedCheck("Root layer of the stage '%s' does not "
"have the '.usdc' extension." % (rootLayerRealPath))
class ComplianceChecker(object):
""" A utility class for checking compliance of a given USD asset or a USDZ
package.
Since usdz files are zip files, someone could use generic zip tools to
create an archive and just change the extension, producing a .usdz file that
does not honor the additional constraints that usdz files require. Even if
someone does use our official archive creation tools, though, we
intentionally allow creation of usdz files that can be very "permissive" in
their contents for internal studio uses, where portability outside the
studio is not a concern. For content meant to be delivered over the web
(eg. ARKit assets), however, we must be much more restrictive.
This class provides two levels of compliance checking:
* "structural" validation that is represented by a set of base rules.
* "ARKit" compatibility validation, which includes many more restrictions.
Calling ComplianceChecker.DumpAllRules() will print an enumeration of the
various rules in the two categories of compliance checking.
"""
@staticmethod
def GetBaseRules():
return [ByteAlignmentChecker, CompressionChecker,
MissingReferenceChecker, TextureChecker]
@staticmethod
def GetARKitRules(skipARKitRootLayerCheck=False):
arkitRules = [ARKitLayerChecker, ARKitPrimTypeChecker,
ARKitStageYupChecker, ARKitShaderChecker,
ARKitMaterialBindingChecker,
ARKitFileExtensionChecker,
ARKitPackageEncapsulationChecker]
if not skipARKitRootLayerCheck:
arkitRules.append(ARKitRootLayerChecker)
return arkitRules
@staticmethod
def GetRules(arkit=False, skipARKitRootLayerCheck=False):
allRules = ComplianceChecker.GetBaseRules()
if arkit:
arkitRules = ComplianceChecker.GetARKitRules(
skipARKitRootLayerCheck=skipARKitRootLayerCheck)
allRules += arkitRules
return allRules
@staticmethod
def DumpAllRules():
print('Base rules:')
for ruleNum, rule in enumerate(GetBaseRules()):
print('[%s] %s' % (ruleNum + 1, rule.GetDescription()))
print('-' * 30)
print('ARKit rules: ')
for ruleNum, rule in enumerate(GetBaseRules()):
print('[%s] %s' % (ruleNum + 1, rule.GetDescription()))
print('-' * 30)
def __init__(self, arkit=False, skipARKitRootLayerCheck=False,
rootPackageOnly=False, skipVariants=False, verbose=False):
self._rootPackageOnly = rootPackageOnly
self._doVariants = not skipVariants
self._verbose = verbose
self._errors = []
# Once a package has been checked, it goes into this set.
self._checkedPackages = set()
# Instantiate an instance of every rule checker and store in a list.
self._rules = [Rule(self._verbose) for Rule in
ComplianceChecker.GetRules(arkit, skipARKitRootLayerCheck)]
def _Msg(self, msg):
if self._verbose:
print(msg)
def _AddError(self, errMsg):
self._errors.append(errMsg)
def GetErrors(self):
errors = self._errors
for rule in self._rules:
errs = rule.GetErrors()
for err in errs:
errors.append("Error checking rule '%s': %s" %
(type(rule).__name__, err))
return errors
def DumpRules(self):
descriptions = [rule.GetDescription() for rule in self._rules]
print('Checking rules: ')
for ruleNum, rule in enumerate(descriptions):
print('[%s] %s' % (ruleNum + 1, rule))
print('-' * 30)
def GetFailedChecks(self):
failedChecks = []
for rule in self._rules:
fcs = rule.GetFailedChecks()
for fc in fcs:
failedChecks.append("%s (fails '%s')" % (fc,
type(rule).__name__))
return failedChecks
def CheckCompliance(self, inputFile):
from pxr import Sdf, Usd, UsdUtils
if not Usd.Stage.IsSupportedFile(inputFile):
_AddError("Cannot open file '%s' on a USD stage." % args.inputFile)
return
# Collect all warnings using a diagnostic delegate.
delegate = UsdUtils.CoalescingDiagnosticDelegate()
usdStage = Usd.Stage.Open(inputFile)
stageOpenDiagnostics = delegate.TakeUncoalescedDiagnostics()
for rule in self._rules:
rule.CheckStage(usdStage)
rule.CheckDiagnostics(stageOpenDiagnostics)
with Ar.ResolverContextBinder(usdStage.GetPathResolverContext()):
# This recursively computes all of inputFiles's external
# dependencies.
(allLayers, allAssets, unresolvedPaths) = \
UsdUtils.ComputeAllDependencies(Sdf.AssetPath(inputFile))
for rule in self._rules:
rule.CheckUnresolvedPaths(unresolvedPaths)
rule.CheckDependencies(usdStage, allLayers, allAssets)
if self._rootPackageOnly:
rootLayer = usdStage.GetRootLayer()
if rootLayer.GetFileFormat().IsPackage():
packagePath = Ar.SplitPackageRelativePathInner(
rootLayer.identifier)[0]
self._CheckPackage(packagePath)
else:
self._AddError("Root layer of the USD stage (%s) doesn't belong to "
"a package, but 'rootPackageOnly' is True!" %
Usd.Describe(usdStage))
else:
# Process every package just once by storing them all in a set.
packages = set()
for layer in allLayers:
if _IsPackageOrPackagedLayer(layer):
packagePath = Ar.SplitPackageRelativePathInner(
layer.identifier)[0]
packages.add(packagePath)
self._CheckLayer(layer)
for package in packages:
self._CheckPackage(package)
# Traverse the entire stage and check every prim.
from pxr import Usd
# Author all variant switches in the session layer.
usdStage.SetEditTarget(usdStage.GetSessionLayer())
allPrimsIt = iter(Usd.PrimRange.Stage(usdStage,
Usd.TraverseInstanceProxies()))
self._TraverseRange(allPrimsIt, isStageRoot=True)
def _CheckPackage(self, packagePath):
self._Msg("Checking package <%s>." % packagePath)
# XXX: Should we open the package on a stage to ensure that it is valid
# and entirely self-contained.
from pxr import Usd
pkgExt = Ar.GetResolver().GetExtension(packagePath)
if pkgExt != "usdz":
self._AddError("Package at path %s has an invalid extension."
% packagePath)
return
# Check the parent package first.
if Ar.IsPackageRelativePath(packagePath):
parentPackagePath = Ar.SplitPackageRelativePathInner(packagePath)[0]
self._CheckPackage(parentPackagePath)
# Avoid checking the same parent package multiple times.
if packagePath in self._checkedPackages:
return
self._checkedPackages.add(packagePath)
resolvedPath = Ar.GetResolver().Resolve(packagePath)
if len(resolvedPath) == 0:
self._AddError("Failed to resolve package path '%s'." % packagePath)
return
zipFile = Usd.ZipFile.Open(resolvedPath)
if not zipFile:
self._AddError("Could not open package at path '%s'." %
resolvedPath)
return
for rule in self._rules:
rule.CheckZipFile(zipFile, packagePath)
def _CheckLayer(self, layer):
for rule in self._rules:
rule.CheckLayer(layer)
def _CheckPrim(self, prim):
for rule in self._rules:
rule.CheckPrim(prim)
def _TraverseRange(self, primRangeIt, isStageRoot):
primsWithVariants = []
rootPrim = primRangeIt.GetCurrentPrim()
for prim in primRangeIt:
# Skip variant set check on the root prim if it is the stage'.
if not self._doVariants or (not isStageRoot and prim == rootPrim):
self._CheckPrim(prim)
continue
vSets = prim.GetVariantSets()
vSetNames = vSets.GetNames()
if len(vSetNames) == 0:
self._CheckPrim(prim)
else:
primsWithVariants.append(prim)
primRangeIt.PruneChildren()
for prim in primsWithVariants:
self._TraverseVariants(prim)
def _TraverseVariants(self, prim):
from pxr import Usd
if prim.IsInstanceProxy():
return True
vSets = prim.GetVariantSets()
vSetNames = vSets.GetNames()
allVariantNames = []
for vSetName in vSetNames:
vSet = vSets.GetVariantSet(vSetName)
vNames = vSet.GetVariantNames()
allVariantNames.append(vNames)
import itertools
allVariations = itertools.product(*allVariantNames)
for variation in allVariations:
self._Msg("Testing variation %s of prim <%s>" %
(variation, prim.GetPath()))
for (idx, sel) in enumerate(variation):
vSets.SetSelection(vSetNames[idx], sel)
primRangeIt = iter(Usd.PrimRange(prim,
Usd.TraverseInstanceProxies()))
self._TraverseRange(primRangeIt, isStageRoot=False)
| 30,368 | Python | 40.715659 | 88 | 0.600797 |
USwampertor/OmniverseJS/ov/python/pxr/UsdMdl/__init__.py | #******************************************************************************
# * Copyright 2019 NVIDIA Corporation. All rights reserved.
# *****************************************************************************
#
# NVIDIA Material Definition Language (MDL) USD plugins.
#
# MDL Search Paths
# ================
# At startup (i.e. when the MDL SDK is loaded) MDL search path is set in this order:
# 1/ Dedicated environement variable
# If it is set, PXR_USDMDL_PLUGIN_SEARCH_PATHS overwrites any MDL search path.
# PXR_USDMDL_PLUGIN_SEARCH_PATHS can be set to a list of paths.
# 2/ System and User Path
# if PXR_USDMDL_PLUGIN_SEARCH_PATHS is not set:
# a/ If set, add MDL_SYSTEM_PATH to the MDL search path
# b/ If set, add MDL_USER_PATH to the MDL search path
#
# Discovery plugin
# ================
# MDL discovery plugin is derived from NdrDiscoveryPlugin interface.
# This plugin finds MDL functions and materials from all the modules found in the
# MDL search paths.
# This discovery plugin is executed as soon as the registry is instantiated,
# for example in Python:
#
# >>> from pxr import Sdr
# >>> reg = Sdr.Registry()
#
# MDL discovery plugin creates a discovery result (NdrNodeDiscoveryResult)
# for each material and each function that is found.
#
# Parser plugin
# ================
# MDL parser plugin is derived from NdrParserPlugin interface.
# This plugin is responsible to parse a given MDL function or material and
# create an NdrNode instance.
# The parser plugin which is run is decided based on the discovery result discoveryType.
# The parser plugin is invoked whenever a shader node is requested, for example in Python:
#
# >>> from pxr import Sdr
# >>> MDLQualifiedName = "::material_examples::architectural::architectural"
# >>> Sdr.Registry().GetShaderNodeByIdentifierAndType(MDLQualifiedName, "mdl")
#
# NdrNodes which is created contains a list of properties which are translated
# from MDL parameters.
#
from . import _usdMdl
from pxr import Tf
Tf.PrepareModule(_usdMdl, locals())
del Tf
try:
import __DOC
__DOC.Execute(locals())
del __DOC
except Exception:
try:
import __tmpDoc
__tmpDoc.Execute(locals())
del __tmpDoc
except:
pass
| 2,253 | Python | 34.777777 | 90 | 0.656458 |
USwampertor/OmniverseJS/ov/python/pxr/UsdAppUtils/rendererArgs.py | #
# Copyright 2019 Pixar
#
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
#
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
#
# You may obtain a copy of the Apache License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
#
class RendererPlugins(object):
"""
An enum-like container of the available Hydra renderer plugins.
"""
class _RendererPlugin(object):
"""
Class which represents a Hydra renderer plugin. Each one has a plugin
identifier and a display name.
"""
def __init__(self, pluginId, displayName):
self._pluginId = pluginId
self._displayName = displayName
def __repr__(self):
return self.displayName
@property
def id(self):
return self._pluginId
@property
def displayName(self):
return self._displayName
@classmethod
def allPlugins(cls):
"""
Get a tuple of all available renderer plugins.
"""
if not hasattr(cls, '_allPlugins'):
from pxr import UsdImagingGL
cls._allPlugins = tuple(cls._RendererPlugin(pluginId,
UsdImagingGL.Engine.GetRendererDisplayName(pluginId))
for pluginId in UsdImagingGL.Engine.GetRendererPlugins())
return cls._allPlugins
@classmethod
def fromId(cls, pluginId):
"""
Get a renderer plugin from its identifier.
"""
matches = [plugin for plugin in cls.allPlugins() if plugin.id == pluginId]
if len(matches) == 0:
raise ValueError("No renderer plugin with id '{}'".format(pluginId))
return matches[0]
@classmethod
def fromDisplayName(cls, displayName):
"""
Get a renderer plugin from its display name.
"""
matches = [plugin for plugin in cls.allPlugins() if plugin.displayName == displayName]
if len(matches) == 0:
raise ValueError("No renderer plugin with display name '{}'".format(displayName))
return matches[0]
def AddCmdlineArgs(argsParser, altHelpText=''):
"""
Adds Hydra renderer-related command line arguments to argsParser.
The resulting 'rendererPlugin' argument will be a _RendererPlugin instance
representing one of the available Hydra renderer plugins.
"""
from pxr import UsdImagingGL
helpText = altHelpText
if not helpText:
helpText = (
'Hydra renderer plugin to use when generating images')
argsParser.add_argument('--renderer', '-r', action='store',
type=RendererPlugins.fromDisplayName,
dest='rendererPlugin',
choices=[p for p in RendererPlugins.allPlugins()],
help=helpText)
| 3,564 | Python | 33.61165 | 94 | 0.660494 |
USwampertor/OmniverseJS/ov/usd/usd/resources/codegenTemplates/schemaClass.cpp | //
// Copyright 2016 Pixar
//
// Licensed under the Apache License, Version 2.0 (the "Apache License")
// with the following modification; you may not use this file except in
// compliance with the Apache License and the following modification to it:
// Section 6. Trademarks. is deleted and replaced with:
//
// 6. Trademarks. This License does not grant permission to use the trade
// names, trademarks, service marks, or product names of the Licensor
// and its affiliates, except as required to comply with Section 4(c) of
// the License and to reproduce the content of the NOTICE file.
//
// You may obtain a copy of the Apache License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the Apache License with the above modification is
// distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the Apache License for the specific
// language governing permissions and limitations under the Apache License.
//
#include "{{ libraryPath }}/{{ cls.GetHeaderFile() }}"
#include "pxr/usd/usd/schemaRegistry.h"
#include "pxr/usd/usd/typed.h"
{% if cls.isApi %}
#include "pxr/usd/usd/tokens.h"
{% endif %}
#include "pxr/usd/sdf/types.h"
#include "pxr/usd/sdf/assetPath.h"
{% if useExportAPI %}
{{ namespaceOpen }}
{% endif %}
// Register the schema with the TfType system.
TF_REGISTRY_FUNCTION(TfType)
{
TfType::Define<{{ cls.cppClassName }},
TfType::Bases< {{ cls.parentCppClassName }} > >();
{% if cls.isConcrete %}
// Register the usd prim typename as an alias under UsdSchemaBase. This
// enables one to call
// TfType::Find<UsdSchemaBase>().FindDerivedByName("{{ cls.usdPrimTypeName }}")
// to find TfType<{{ cls.cppClassName }}>, which is how IsA queries are
// answered.
TfType::AddAlias<UsdSchemaBase, {{ cls.cppClassName }}>("{{ cls.usdPrimTypeName }}");
{% endif %}
}
{% if cls.isApi %}
TF_DEFINE_PRIVATE_TOKENS(
_schemaTokens,
({{ cls.primName }})
{% if cls.isMultipleApply and cls.propertyNamespacePrefix %}
({{ cls.propertyNamespacePrefix }})
{% endif %}
);
{% endif %}
/* virtual */
{{ cls.cppClassName }}::~{{ cls.cppClassName }}()
{
}
{% if not cls.isAPISchemaBase %}
/* static */
{{ cls.cppClassName }}
{{ cls.cppClassName }}::Get(const UsdStagePtr &stage, const SdfPath &path)
{
if (!stage) {
TF_CODING_ERROR("Invalid stage");
return {{ cls.cppClassName }}();
}
{% if cls.isMultipleApply and cls.propertyNamespacePrefix %}
TfToken name;
if (!Is{{ cls.usdPrimTypeName }}Path(path, &name)) {
TF_CODING_ERROR("Invalid {{ cls.propertyNamespacePrefix }} path <%s>.", path.GetText());
return {{ cls.cppClassName }}();
}
return {{ cls.cppClassName }}(stage->GetPrimAtPath(path.GetPrimPath()), name);
{% else %}
return {{ cls.cppClassName }}(stage->GetPrimAtPath(path));
{% endif %}
}
{% if cls.isMultipleApply %}
{{ cls.cppClassName }}
{{ cls.cppClassName }}::Get(const UsdPrim &prim, const TfToken &name)
{
return {{ cls.cppClassName }}(prim, name);
}
{% endif %}
{% endif %}
{% if cls.isConcrete %}
/* static */
{{ cls.cppClassName }}
{{ cls.cppClassName }}::Define(
const UsdStagePtr &stage, const SdfPath &path)
{
static TfToken usdPrimTypeName("{{ cls.usdPrimTypeName }}");
if (!stage) {
TF_CODING_ERROR("Invalid stage");
return {{ cls.cppClassName }}();
}
return {{ cls.cppClassName }}(
stage->DefinePrim(path, usdPrimTypeName));
}
{% endif %}
{% if cls.isMultipleApply and cls.propertyNamespacePrefix %}
/* static */
bool
{{ cls.cppClassName }}::IsSchemaPropertyBaseName(const TfToken &baseName)
{
static TfTokenVector attrsAndRels = {
{% for attrName in cls.attrOrder %}
{% set attr = cls.attrs[attrName] %}
{{ tokensPrefix }}Tokens->{{ attr.name }},
{% endfor %}
{% for relName in cls.relOrder %}
{% set rel = cls.rels[relName] %}
{{ tokensPrefix }}Tokens->{{ rel.name }},
{% endfor %}
};
return find(attrsAndRels.begin(), attrsAndRels.end(), baseName)
!= attrsAndRels.end();
}
/* static */
bool
{{ cls.cppClassName }}::Is{{ cls.usdPrimTypeName }}Path(
const SdfPath &path, TfToken *name)
{
if (!path.IsPropertyPath()) {
return false;
}
std::string propertyName = path.GetName();
TfTokenVector tokens = SdfPath::TokenizeIdentifierAsTokens(propertyName);
// The baseName of the {{ cls.usdPrimTypename }} path can't be one of the
// schema properties. We should validate this in the creation (or apply)
// API.
TfToken baseName = *tokens.rbegin();
if (IsSchemaPropertyBaseName(baseName)) {
return false;
}
if (tokens.size() >= 2
&& tokens[0] == _schemaTokens->{{ cls.propertyNamespacePrefix }}) {
*name = TfToken(propertyName.substr(
_schemaTokens->{{ cls.propertyNamespacePrefix }}.GetString().size() + 1));
return true;
}
return false;
}
{% endif %}
/* virtual */
UsdSchemaType {{ cls.cppClassName }}::_GetSchemaType() const {
return {{ cls.cppClassName }}::schemaType;
}
{% if cls.isAppliedAPISchema %}
/* static */
{{ cls.cppClassName }}
{% if cls.isPrivateApply %}
{% if not cls.isMultipleApply %}
{{ cls.cppClassName }}::_Apply(const UsdPrim &prim)
{% else %}
{{ cls.cppClassName }}::_Apply(const UsdPrim &prim, const TfToken &name)
{% endif %}
{% else %}
{% if not cls.isMultipleApply %}
{{ cls.cppClassName }}::Apply(const UsdPrim &prim)
{% else %}
{{ cls.cppClassName }}::Apply(const UsdPrim &prim, const TfToken &name)
{% endif %}
{% endif %}
{
{% if cls.isMultipleApply %}
if (prim.ApplyAPI<{{ cls.cppClassName }}>(name)) {
return {{ cls.cppClassName }}(prim, name);
}
{% else %}
if (prim.ApplyAPI<{{ cls.cppClassName }}>()) {
return {{ cls.cppClassName }}(prim);
}
{% endif %}
return {{ cls.cppClassName }}();
}
{% endif %}
/* static */
const TfType &
{{ cls.cppClassName }}::_GetStaticTfType()
{
static TfType tfType = TfType::Find<{{ cls.cppClassName }}>();
return tfType;
}
/* static */
bool
{{ cls.cppClassName }}::_IsTypedSchema()
{
static bool isTyped = _GetStaticTfType().IsA<UsdTyped>();
return isTyped;
}
/* virtual */
const TfType &
{{ cls.cppClassName }}::_GetTfType() const
{
return _GetStaticTfType();
}
{% if cls.isMultipleApply and cls.propertyNamespacePrefix %}
/// Returns the property name prefixed with the correct namespace prefix, which
/// is composed of the the API's propertyNamespacePrefix metadata and the
/// instance name of the API.
static inline
TfToken
_GetNamespacedPropertyName(const TfToken instanceName, const TfToken propName)
{
TfTokenVector identifiers =
{_schemaTokens->{{ cls.propertyNamespacePrefix }}, instanceName, propName};
return TfToken(SdfPath::JoinIdentifier(identifiers));
}
{% endif %}
{% for attrName in cls.attrOrder %}
{% set attr = cls.attrs[attrName] %}
{# Only emit Create/Get API and doxygen if apiName is not empty string. #}
{% if attr.apiName != '' %}
{% if attr.apiGet != "custom" %}
UsdAttribute
{{ cls.cppClassName }}::Get{{ Proper(attr.apiName) }}Attr() const
{
{% if cls.isMultipleApply and cls.propertyNamespacePrefix %}
return GetPrim().GetAttribute(
_GetNamespacedPropertyName(
GetName(),
{{ tokensPrefix }}Tokens->{{ attr.name }}));
{% else %}
return GetPrim().GetAttribute({{ tokensPrefix }}Tokens->{{ attr.name }});
{% endif %}
}
{% endif %}
UsdAttribute
{{ cls.cppClassName }}::Create{{ Proper(attr.apiName) }}Attr(VtValue const &defaultValue, bool writeSparsely) const
{
{% if cls.isMultipleApply and cls.propertyNamespacePrefix %}
return UsdSchemaBase::_CreateAttr(
_GetNamespacedPropertyName(
GetName(),
{{ tokensPrefix }}Tokens->{{ attr.name }}),
{% else %}
return UsdSchemaBase::_CreateAttr({{ tokensPrefix }}Tokens->{{ attr.name }},
{% endif %}
{{ attr.usdType }},
/* custom = */ {{ "true" if attr.custom else "false" }},
{{ attr.variability }},
defaultValue,
writeSparsely);
}
{% endif %}
{% endfor %}
{% for relName in cls.relOrder %}
{% set rel = cls.rels[relName] %}
{# Only emit Create/Get API and doxygen if apiName is not empty string. #}
{% if rel.apiName != '' %}
{% if rel.apiGet != "custom" %}
UsdRelationship
{{ cls.cppClassName }}::Get{{ Proper(rel.apiName) }}Rel() const
{
{% if cls.isMultipleApply and cls.propertyNamespacePrefix %}
return GetPrim().GetRelationship(
_GetNamespacedPropertyName(
GetName(),
{{ tokensPrefix }}Tokens->{{ rel.name }}));
{% else %}
return GetPrim().GetRelationship({{ tokensPrefix }}Tokens->{{ rel.name }});
{% endif %}
}
{% endif %}
UsdRelationship
{{ cls.cppClassName }}::Create{{ Proper(rel.apiName) }}Rel() const
{
{% if cls.isMultipleApply and cls.propertyNamespacePrefix %}
return GetPrim().CreateRelationship(
_GetNamespacedPropertyName(
GetName(),
{{ tokensPrefix }}Tokens->{{ rel.name }}),
{% else %}
return GetPrim().CreateRelationship({{ tokensPrefix }}Tokens->{{rel.name}},
{% endif %}
/* custom = */ {{ "true" if rel.custom else "false" }});
}
{% endif %}
{% endfor %}
{% if cls.attrOrder|length > 0 %}
namespace {
static inline TfTokenVector
{% if cls.isMultipleApply %}
_ConcatenateAttributeNames(
const TfToken instanceName,
const TfTokenVector& left,
const TfTokenVector& right)
{% else %}
_ConcatenateAttributeNames(const TfTokenVector& left,const TfTokenVector& right)
{% endif %}
{
TfTokenVector result;
result.reserve(left.size() + right.size());
result.insert(result.end(), left.begin(), left.end());
{% if cls.isMultipleApply %}
for (const TfToken attrName : right) {
result.push_back(
_GetNamespacedPropertyName(instanceName, attrName));
}
{% endif %}
result.insert(result.end(), right.begin(), right.end());
return result;
}
}
{% endif %}
/*static*/
const TfTokenVector&
{% if cls.isMultipleApply %}
{{ cls.cppClassName }}::GetSchemaAttributeNames(
bool includeInherited, const TfToken instanceName)
{% else %}
{{ cls.cppClassName }}::GetSchemaAttributeNames(bool includeInherited)
{% endif %}
{
{% if cls.attrOrder|length > 0 %}
static TfTokenVector localNames = {
{% for attrName in cls.attrOrder %}
{% set attr = cls.attrs[attrName] %}
{{ tokensPrefix }}Tokens->{{ attr.name }},
{% endfor %}
};
{% if cls.isMultipleApply %}
static TfTokenVector allNames =
_ConcatenateAttributeNames(
instanceName,
{# The schema generator has already validated whether our parent is #}
{# a multiple apply schema or UsdSchemaBaseAPI, choose the correct function #}
{# depending on the situation #}
{% if cls.parentCppClassName == "UsdAPISchemaBase" %}
{{ cls.parentCppClassName }}::GetSchemaAttributeNames(true),
{% else %}
{{ cls.parentCppClassName }}::GetSchemaAttributeNames(true, instanceName),
{% endif %}
localNames);
{% else %}
static TfTokenVector allNames =
_ConcatenateAttributeNames(
{{ cls.parentCppClassName }}::GetSchemaAttributeNames(true),
localNames);
{% endif %}
{% else %}
static TfTokenVector localNames;
static TfTokenVector allNames =
{{ cls.parentCppClassName }}::GetSchemaAttributeNames(true);
{% endif %}
if (includeInherited)
return allNames;
else
return localNames;
}
{% if useExportAPI %}
{{ namespaceClose }}
{% endif %}
// ===================================================================== //
// Feel free to add custom code below this line. It will be preserved by
// the code generator.
{% if useExportAPI %}
//
// Just remember to wrap code in the appropriate delimiters:
// '{{ namespaceOpen }}', '{{ namespaceClose }}'.
{% endif %}
// ===================================================================== //
// --(BEGIN CUSTOM CODE)--
| 12,227 | C++ | 29.41791 | 115 | 0.63278 |
USwampertor/OmniverseJS/ov/usd/usd/resources/codegenTemplates/tokens.h | //
// Copyright 2016 Pixar
//
// Licensed under the Apache License, Version 2.0 (the "Apache License")
// with the following modification; you may not use this file except in
// compliance with the Apache License and the following modification to it:
// Section 6. Trademarks. is deleted and replaced with:
//
// 6. Trademarks. This License does not grant permission to use the trade
// names, trademarks, service marks, or product names of the Licensor
// and its affiliates, except as required to comply with Section 4(c) of
// the License and to reproduce the content of the NOTICE file.
//
// You may obtain a copy of the Apache License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the Apache License with the above modification is
// distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the Apache License for the specific
// language governing permissions and limitations under the Apache License.
//
#ifndef {{ Upper(tokensPrefix) }}_TOKENS_H
#define {{ Upper(tokensPrefix) }}_TOKENS_H
/// \file {{ libraryName }}/tokens.h
// XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
//
// This is an automatically generated file (by usdGenSchema.py).
// Do not hand-edit!
//
// XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
{% if useExportAPI %}
#include "pxr/pxr.h"
#include "{{ libraryPath }}/api.h"
{% endif %}
#include "pxr/base/tf/staticData.h"
#include "pxr/base/tf/token.h"
#include <vector>
{% if useExportAPI %}
{{ namespaceOpen }}
{% endif %}
/// \class {{ tokensPrefix }}TokensType
///
/// \link {{ tokensPrefix }}Tokens \endlink provides static, efficient
/// \link TfToken TfTokens\endlink for use in all public USD API.
///
/// These tokens are auto-generated from the module's schema, representing
/// property names, for when you need to fetch an attribute or relationship
/// directly by name, e.g. UsdPrim::GetAttribute(), in the most efficient
/// manner, and allow the compiler to verify that you spelled the name
/// correctly.
///
/// {{ tokensPrefix }}Tokens also contains all of the \em allowedTokens values
/// declared for schema builtin attributes of 'token' scene description type.
{% if tokens %}
/// Use {{ tokensPrefix }}Tokens like so:
///
/// \code
/// gprim.GetMyTokenValuedAttr().Set({{ tokensPrefix }}Tokens->{{ tokens[0].id }});
/// \endcode
{% endif %}
struct {{ tokensPrefix }}TokensType {
{% if useExportAPI %}{{ Upper(libraryName) }}_API {% endif %}{{ tokensPrefix }}TokensType();
{% for token in tokens %}
/// \brief "{{ token.value }}"
///
/// {{ token.desc }}
const TfToken {{ token.id }};
{% endfor %}
/// A vector of all of the tokens listed above.
const std::vector<TfToken> allTokens;
};
/// \var {{ tokensPrefix }}Tokens
///
/// A global variable with static, efficient \link TfToken TfTokens\endlink
/// for use in all public USD API. \sa {{ tokensPrefix }}TokensType
extern{% if useExportAPI %} {{ Upper(libraryName) }}_API{% endif %} TfStaticData<{{ tokensPrefix }}TokensType> {{ tokensPrefix }}Tokens;
{% if useExportAPI %}
{{ namespaceClose }}
{% endif %}
#endif
| 3,255 | C | 34.391304 | 136 | 0.701382 |
USwampertor/OmniverseJS/ov/usd/usd/resources/codegenTemplates/wrapSchemaClass.cpp | //
// Copyright 2016 Pixar
//
// Licensed under the Apache License, Version 2.0 (the "Apache License")
// with the following modification; you may not use this file except in
// compliance with the Apache License and the following modification to it:
// Section 6. Trademarks. is deleted and replaced with:
//
// 6. Trademarks. This License does not grant permission to use the trade
// names, trademarks, service marks, or product names of the Licensor
// and its affiliates, except as required to comply with Section 4(c) of
// the License and to reproduce the content of the NOTICE file.
//
// You may obtain a copy of the Apache License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the Apache License with the above modification is
// distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the Apache License for the specific
// language governing permissions and limitations under the Apache License.
//
#include "{{ libraryPath }}/{{ cls.GetHeaderFile() }}"
#include "pxr/usd/usd/schemaBase.h"
#include "pxr/usd/sdf/primSpec.h"
#include "pxr/usd/usd/pyConversions.h"
#include "pxr/base/tf/pyContainerConversions.h"
#include "pxr/base/tf/pyResultConversions.h"
#include "pxr/base/tf/pyUtils.h"
#include "pxr/base/tf/wrapTypeHelpers.h"
#include <boost/python.hpp>
#include <string>
using namespace boost::python;
{% if useExportAPI %}
{{ namespaceUsing }}
namespace {
{% endif %}
#define WRAP_CUSTOM \
template <class Cls> static void _CustomWrapCode(Cls &_class)
// fwd decl.
WRAP_CUSTOM;
{% for attrName in cls.attrOrder -%}
{% set attr = cls.attrs[attrName] %}
{# Only emit Create/Get API if apiName is not empty string. #}
{% if attr.apiName != '' %}
static UsdAttribute
_Create{{ Proper(attr.apiName) }}Attr({{ cls.cppClassName }} &self,
object defaultVal, bool writeSparsely) {
return self.Create{{ Proper(attr.apiName) }}Attr(
UsdPythonToSdfType(defaultVal, {{ attr.usdType }}), writeSparsely);
}
{% endif %}
{% endfor %}
{% if cls.isMultipleApply and cls.propertyNamespacePrefix %}
static bool _WrapIs{{ cls.usdPrimTypeName }}Path(const SdfPath &path) {
TfToken collectionName;
return {{ cls.cppClassName }}::Is{{ cls.usdPrimTypeName }}Path(
path, &collectionName);
}
{% endif %}
{% if not cls.isAPISchemaBase %}
static std::string
_Repr(const {{ cls.cppClassName }} &self)
{
std::string primRepr = TfPyRepr(self.GetPrim());
{% if cls.isMultipleApply %}
std::string instanceName = self.GetName();
return TfStringPrintf(
"{{ libraryName[0]|upper }}{{ libraryName[1:] }}.{{ cls.className }}(%s, '%s')",
primRepr.c_str(), instanceName.c_str());
{% else %}
return TfStringPrintf(
"{{ libraryName[0]|upper }}{{ libraryName[1:] }}.{{ cls.className }}(%s)",
primRepr.c_str());
{% endif %}
}
{% endif %}
{% if useExportAPI %}
} // anonymous namespace
{% endif %}
void wrap{{ cls.cppClassName }}()
{
typedef {{ cls.cppClassName }} This;
{% if cls.isAPISchemaBase %}
class_< This , bases<{{ cls.parentCppClassName }}>, boost::noncopyable> cls ("APISchemaBase", "", no_init);
{% else %}
class_<This, bases<{{ cls.parentCppClassName }}> >
cls("{{ cls.className }}");
{% endif %}
cls
{% if not cls.isAPISchemaBase %}
{% if cls.isMultipleApply %}
.def(init<UsdPrim, TfToken>())
.def(init<UsdSchemaBase const&, TfToken>())
{% else %}
.def(init<UsdPrim>(arg("prim")))
.def(init<UsdSchemaBase const&>(arg("schemaObj")))
{% endif %}
{% endif %}
.def(TfTypePythonClass())
{% if not cls.isAPISchemaBase %}
{% if cls.isMultipleApply %}
.def("Get",
({{ cls.cppClassName }}(*)(const UsdStagePtr &stage,
const SdfPath &path))
&This::Get,
(arg("stage"), arg("path")))
.def("Get",
({{ cls.cppClassName }}(*)(const UsdPrim &prim,
const TfToken &name))
&This::Get,
(arg("prim"), arg("name")))
{% else %}
.def("Get", &This::Get, (arg("stage"), arg("path")))
{% endif %}
.staticmethod("Get")
{% endif %}
{% if cls.isConcrete %}
.def("Define", &This::Define, (arg("stage"), arg("path")))
.staticmethod("Define")
{% endif %}
{% if cls.isAppliedAPISchema and not cls.isMultipleApply and not cls.isPrivateApply %}
.def("Apply", &This::Apply, (arg("prim")))
.staticmethod("Apply")
{% endif %}
{% if cls.isAppliedAPISchema and cls.isMultipleApply and not cls.isPrivateApply %}
.def("Apply", &This::Apply, (arg("prim"), arg("name")))
.staticmethod("Apply")
{% endif %}
.def("GetSchemaAttributeNames",
&This::GetSchemaAttributeNames,
arg("includeInherited")=true,
{% if cls.isMultipleApply %}
arg("instanceName")=TfToken(),
{% endif %}
return_value_policy<TfPySequenceToList>())
.staticmethod("GetSchemaAttributeNames")
.def("_GetStaticTfType", (TfType const &(*)()) TfType::Find<This>,
return_value_policy<return_by_value>())
.staticmethod("_GetStaticTfType")
.def(!self)
{% for attrName in cls.attrOrder -%}
{% set attr = cls.attrs[attrName] %}
{# Only emit Create/Get API if apiName is not empty string. #}
{% if attr.apiName != '' %}
.def("Get{{ Proper(attr.apiName) }}Attr",
&This::Get{{ Proper(attr.apiName) }}Attr)
.def("Create{{ Proper(attr.apiName) }}Attr",
&_Create{{ Proper(attr.apiName) }}Attr,
(arg("defaultValue")=object(),
arg("writeSparsely")=false))
{% endif %}
{% endfor %}
{% for relName in cls.relOrder -%}
{# Only emit Create/Get API and doxygen if apiName is not empty string. #}
{% set rel = cls.rels[relName] %}
{% if rel.apiName != '' %}
.def("Get{{ Proper(rel.apiName) }}Rel",
&This::Get{{ Proper(rel.apiName) }}Rel)
.def("Create{{ Proper(rel.apiName) }}Rel",
&This::Create{{ Proper(rel.apiName) }}Rel)
{% endif %}
{% endfor %}
{% if cls.isMultipleApply and cls.propertyNamespacePrefix %}
.def("Is{{ cls.usdPrimTypeName }}Path", _WrapIs{{ cls.usdPrimTypeName }}Path)
.staticmethod("Is{{ cls.usdPrimTypeName }}Path")
{% endif %}
{% if not cls.isAPISchemaBase %}
.def("__repr__", ::_Repr)
{% endif %}
;
_CustomWrapCode(cls);
}
// ===================================================================== //
// Feel free to add custom code below this line, it will be preserved by
// the code generator. The entry point for your custom code should look
// minimally like the following:
//
// WRAP_CUSTOM {
// _class
// .def("MyCustomMethod", ...)
// ;
// }
//
// Of course any other ancillary or support code may be provided.
{% if useExportAPI %}
//
// Just remember to wrap code in the appropriate delimiters:
// 'namespace {', '}'.
//
{% endif %}
// ===================================================================== //
// --(BEGIN CUSTOM CODE)--
| 7,316 | C++ | 31.376106 | 111 | 0.597184 |
USwampertor/OmniverseJS/ov/usd/usd/resources/codegenTemplates/wrapTokens.cpp | //
// Copyright 2016 Pixar
//
// Licensed under the Apache License, Version 2.0 (the "Apache License")
// with the following modification; you may not use this file except in
// compliance with the Apache License and the following modification to it:
// Section 6. Trademarks. is deleted and replaced with:
//
// 6. Trademarks. This License does not grant permission to use the trade
// names, trademarks, service marks, or product names of the Licensor
// and its affiliates, except as required to comply with Section 4(c) of
// the License and to reproduce the content of the NOTICE file.
//
// You may obtain a copy of the Apache License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the Apache License with the above modification is
// distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the Apache License for the specific
// language governing permissions and limitations under the Apache License.
//
// GENERATED FILE. DO NOT EDIT.
#include <boost/python/class.hpp>
#include "{{ libraryPath }}/tokens.h"
{% if useExportAPI %}
{{ namespaceUsing }}
{% endif %}
namespace {
// Helper to return a static token as a string. We wrap tokens as Python
// strings and for some reason simply wrapping the token using def_readonly
// bypasses to-Python conversion, leading to the error that there's no
// Python type for the C++ TfToken type. So we wrap this functor instead.
class _WrapStaticToken {
public:
_WrapStaticToken(const TfToken* token) : _token(token) { }
std::string operator()() const
{
return _token->GetString();
}
private:
const TfToken* _token;
};
template <typename T>
void
_AddToken(T& cls, const char* name, const TfToken& token)
{
cls.add_static_property(name,
boost::python::make_function(
_WrapStaticToken(&token),
boost::python::return_value_policy<
boost::python::return_by_value>(),
boost::mpl::vector1<std::string>()));
}
} // anonymous
void wrap{{ tokensPrefix }}Tokens()
{
boost::python::class_<{{ tokensPrefix }}TokensType, boost::noncopyable>
cls("Tokens", boost::python::no_init);
{% for token in tokens %}
_AddToken(cls, "{{ token.id }}", {{ tokensPrefix }}Tokens->{{ token.id }});
{% endfor %}
}
| 2,521 | C++ | 33.547945 | 79 | 0.662436 |
USwampertor/OmniverseJS/ov/usd/usd/resources/codegenTemplates/schemaClass.h | //
// Copyright 2016 Pixar
//
// Licensed under the Apache License, Version 2.0 (the "Apache License")
// with the following modification; you may not use this file except in
// compliance with the Apache License and the following modification to it:
// Section 6. Trademarks. is deleted and replaced with:
//
// 6. Trademarks. This License does not grant permission to use the trade
// names, trademarks, service marks, or product names of the Licensor
// and its affiliates, except as required to comply with Section 4(c) of
// the License and to reproduce the content of the NOTICE file.
//
// You may obtain a copy of the Apache License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the Apache License with the above modification is
// distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the Apache License for the specific
// language governing permissions and limitations under the Apache License.
//
#ifndef {{ Upper(libraryName) }}_GENERATED_{{ Upper(cls.className) }}_H
#define {{ Upper(libraryName) }}_GENERATED_{{ Upper(cls.className) }}_H
/// \file {{ libraryName }}/{{ cls.GetHeaderFile() }}
{% if useExportAPI %}
#include "pxr/pxr.h"
#include "{{ libraryPath }}/api.h"
{% endif %}
#include "{{ cls.parentLibPath }}/{{ cls.GetParentHeaderFile() }}"
#include "pxr/usd/usd/prim.h"
#include "pxr/usd/usd/stage.h"
{% if cls.tokens -%}
#include "{{ libraryPath }}/tokens.h"
{% endif %}
{% if cls.extraIncludes -%}
{{ cls.extraIncludes }}
{% endif %}
#include "pxr/base/vt/value.h"
#include "pxr/base/gf/vec3d.h"
#include "pxr/base/gf/vec3f.h"
#include "pxr/base/gf/matrix4d.h"
#include "pxr/base/tf/token.h"
#include "pxr/base/tf/type.h"
{% if useExportAPI %}
{{ namespaceOpen }}
{% endif %}
class SdfAssetPath;
// -------------------------------------------------------------------------- //
// {{ Upper(cls.usdPrimTypeName) }}{{' ' * (74 - cls.usdPrimTypeName|count)}} //
// -------------------------------------------------------------------------- //
/// \class {{ cls.cppClassName }}
///
{% if cls.doc -%}
/// {{ cls.doc }}
{% endif %}
{% if cls.doc and hasTokenAttrs -%}
///
{%endif%}
{% if hasTokenAttrs -%}
/// For any described attribute \em Fallback \em Value or \em Allowed \em Values below
/// that are text/tokens, the actual token is published and defined in \ref {{ tokensPrefix }}Tokens.
/// So to set an attribute to the value "rightHanded", use {{ tokensPrefix }}Tokens->rightHanded
/// as the value.
{% endif %}
///
class {{ cls.cppClassName }} : public {{ cls.parentCppClassName }}
{
public:
/// Compile time constant representing what kind of schema this class is.
///
/// \sa UsdSchemaType
static const UsdSchemaType schemaType = {{cls.schemaType }};
{% if cls.isMultipleApply %}
/// Construct a {{ cls.cppClassName }} on UsdPrim \p prim with
/// name \p name . Equivalent to
/// {{ cls.cppClassName }}::Get(
/// prim.GetStage(),
/// prim.GetPath().AppendProperty(
/// "{{ cls.propertyNamespacePrefix }}:name"));
///
/// for a \em valid \p prim, but will not immediately throw an error for
/// an invalid \p prim
explicit {{ cls.cppClassName }}(
const UsdPrim& prim=UsdPrim(), const TfToken &name=TfToken())
: {{ cls.parentCppClassName }}(prim, /*instanceName*/ name)
{ }
/// Construct a {{ cls.cppClassName }} on the prim held by \p schemaObj with
/// name \p name. Should be preferred over
/// {{ cls.cppClassName }}(schemaObj.GetPrim(), name), as it preserves
/// SchemaBase state.
explicit {{ cls.cppClassName }}(
const UsdSchemaBase& schemaObj, const TfToken &name)
: {{ cls.parentCppClassName }}(schemaObj, /*instanceName*/ name)
{ }
{% else %}
/// Construct a {{ cls.cppClassName }} on UsdPrim \p prim .
/// Equivalent to {{ cls.cppClassName }}::Get(prim.GetStage(), prim.GetPath())
/// for a \em valid \p prim, but will not immediately throw an error for
/// an invalid \p prim
explicit {{ cls.cppClassName }}(const UsdPrim& prim=UsdPrim())
: {{ cls.parentCppClassName }}(prim)
{
}
/// Construct a {{ cls.cppClassName }} on the prim held by \p schemaObj .
/// Should be preferred over {{ cls.cppClassName }}(schemaObj.GetPrim()),
/// as it preserves SchemaBase state.
explicit {{ cls.cppClassName }}(const UsdSchemaBase& schemaObj)
: {{ cls.parentCppClassName }}(schemaObj)
{
}
{% endif %}
/// Destructor.
{% if useExportAPI -%}
{{ Upper(libraryName) }}_API
{% endif -%}
virtual ~{{ cls.cppClassName }}() {%- if cls.isAPISchemaBase %} = 0{% endif %};
{% if cls.isMultipleApply %}
/// Return a vector of names of all pre-declared attributes for this schema
/// class and all its ancestor classes for a given instance name. Does not
/// include attributes that may be authored by custom/extended methods of
/// the schemas involved. The names returned will have the proper namespace
/// prefix.
{% else %}
/// Return a vector of names of all pre-declared attributes for this schema
/// class and all its ancestor classes. Does not include attributes that
/// may be authored by custom/extended methods of the schemas involved.
{% endif %}
{% if useExportAPI -%}
{{ Upper(libraryName) }}_API
{% endif -%}
static const TfTokenVector &
{% if cls.isMultipleApply %}
GetSchemaAttributeNames(
bool includeInherited=true, const TfToken instanceName=TfToken());
{% else %}
GetSchemaAttributeNames(bool includeInherited=true);
{% endif %}
{% if cls.isMultipleApply %}
/// Returns the name of this multiple-apply schema instance
TfToken GetName() const {
return _GetInstanceName();
}
{% endif %}
{% if not cls.isAPISchemaBase %}
/// Return a {{ cls.cppClassName }} holding the prim adhering to this
/// schema at \p path on \p stage. If no prim exists at \p path on
/// \p stage, or if the prim at that path does not adhere to this schema,
{% if cls.isMultipleApply and cls.propertyNamespacePrefix %}
/// return an invalid schema object. \p path must be of the format
/// <path>.{{ cls.propertyNamespacePrefix }}:name .
///
/// This is shorthand for the following:
///
/// \code
/// TfToken name = SdfPath::StripNamespace(path.GetToken());
/// {{ cls.cppClassName }}(
/// stage->GetPrimAtPath(path.GetPrimPath()), name);
/// \endcode
{% else %}
/// return an invalid schema object. This is shorthand for the following:
///
/// \code
/// {{ cls.cppClassName }}(stage->GetPrimAtPath(path));
/// \endcode
{% endif %}
///
{% if useExportAPI -%}
{{ Upper(libraryName) }}_API
{% endif -%}
static {{ cls.cppClassName }}
Get(const UsdStagePtr &stage, const SdfPath &path);
{% if cls.isMultipleApply %}
/// Return a {{ cls.cppClassName }} with name \p name holding the
/// prim \p prim. Shorthand for {{ cls.cppClassName }}(prim, name);
{% if useExportAPI -%}
{{ Upper(libraryName) }}_API
{% endif -%}
static {{ cls.cppClassName }}
Get(const UsdPrim &prim, const TfToken &name);
{% endif %}
{% endif %}
{% if cls.isConcrete %}
/// Attempt to ensure a \a UsdPrim adhering to this schema at \p path
/// is defined (according to UsdPrim::IsDefined()) on this stage.
///
/// If a prim adhering to this schema at \p path is already defined on this
/// stage, return that prim. Otherwise author an \a SdfPrimSpec with
/// \a specifier == \a SdfSpecifierDef and this schema's prim type name for
/// the prim at \p path at the current EditTarget. Author \a SdfPrimSpec s
/// with \p specifier == \a SdfSpecifierDef and empty typeName at the
/// current EditTarget for any nonexistent, or existing but not \a Defined
/// ancestors.
///
/// The given \a path must be an absolute prim path that does not contain
/// any variant selections.
///
/// If it is impossible to author any of the necessary PrimSpecs, (for
/// example, in case \a path cannot map to the current UsdEditTarget's
/// namespace) issue an error and return an invalid \a UsdPrim.
///
/// Note that this method may return a defined prim whose typeName does not
/// specify this schema class, in case a stronger typeName opinion overrides
/// the opinion at the current EditTarget.
///
{% if useExportAPI -%}
{{ Upper(libraryName) }}_API
{% endif -%}
static {{ cls.cppClassName }}
Define(const UsdStagePtr &stage, const SdfPath &path);
{% endif %}
{% if cls.isMultipleApply and cls.propertyNamespacePrefix %}
/// Checks if the given name \p baseName is the base name of a property
/// of {{ cls.usdPrimTypeName }}.
{% if useExportAPI -%}
{{ Upper(libraryName) }}_API
{% endif -%}
static bool
IsSchemaPropertyBaseName(const TfToken &baseName);
/// Checks if the given path \p path is of an API schema of type
/// {{ cls.usdPrimTypeName }}. If so, it stores the instance name of
/// the schema in \p name and returns true. Otherwise, it returns false.
{% if useExportAPI -%}
{{ Upper(libraryName) }}_API
{% endif -%}
static bool
Is{{ cls.usdPrimTypeName }}Path(const SdfPath &path, TfToken *name);
{% endif %}
{% if cls.isPrivateApply %}
private:
{% endif %}
{% if cls.isAppliedAPISchema and not cls.isMultipleApply %}
/// Applies this <b>single-apply</b> API schema to the given \p prim.
/// This information is stored by adding "{{ cls.primName }}" to the
/// token-valued, listOp metadata \em apiSchemas on the prim.
///
/// \return A valid {{ cls.cppClassName }} object is returned upon success.
/// An invalid (or empty) {{ cls.cppClassName }} object is returned upon
/// failure. See \ref UsdPrim::ApplyAPI() for conditions
/// resulting in failure.
///
/// \sa UsdPrim::GetAppliedSchemas()
/// \sa UsdPrim::HasAPI()
/// \sa UsdPrim::ApplyAPI()
/// \sa UsdPrim::RemoveAPI()
///
{% if useExportAPI and not cls.isPrivateApply -%}
{{ Upper(libraryName) }}_API
{% endif -%}
static {{ cls.cppClassName }}
{% if cls.isPrivateApply %}
_Apply(const UsdPrim &prim);
{% else %}
Apply(const UsdPrim &prim);
{% endif %}
{% endif %}
{% if cls.isAppliedAPISchema and cls.isMultipleApply %}
/// Applies this <b>multiple-apply</b> API schema to the given \p prim
/// along with the given instance name, \p name.
///
/// This information is stored by adding "{{ cls.primName }}:<i>name</i>"
/// to the token-valued, listOp metadata \em apiSchemas on the prim.
/// For example, if \p name is 'instance1', the token
/// '{{ cls.primName }}:instance1' is added to 'apiSchemas'.
///
/// \return A valid {{ cls.cppClassName }} object is returned upon success.
/// An invalid (or empty) {{ cls.cppClassName }} object is returned upon
/// failure. See \ref UsdPrim::ApplyAPI() for
/// conditions resulting in failure.
///
/// \sa UsdPrim::GetAppliedSchemas()
/// \sa UsdPrim::HasAPI()
/// \sa UsdPrim::ApplyAPI()
/// \sa UsdPrim::RemoveAPI()
///
{% if useExportAPI and not cls.isPrivateApply -%}
{{ Upper(libraryName) }}_API
{% endif -%}
static {{ cls.cppClassName }}
{% if cls.isPrivateApply %}
_Apply(const UsdPrim &prim, const TfToken &name);
{% else %}
Apply(const UsdPrim &prim, const TfToken &name);
{% endif %}
{% endif %}
protected:
/// Returns the type of schema this class belongs to.
///
/// \sa UsdSchemaType
{% if useExportAPI -%}
{{ Upper(libraryName) }}_API
{% endif -%}
UsdSchemaType _GetSchemaType() const override;
private:
// needs to invoke _GetStaticTfType.
friend class UsdSchemaRegistry;
{% if useExportAPI -%}
{{ Upper(libraryName) }}_API
{% endif -%}
static const TfType &_GetStaticTfType();
static bool _IsTypedSchema();
// override SchemaBase virtuals.
{% if useExportAPI -%}
{{ Upper(libraryName) }}_API
{% endif -%}
const TfType &_GetTfType() const override;
{% for attrName in cls.attrOrder %}
{% set attr = cls.attrs[attrName]%}
{# Only emit Create/Get API and doxygen if apiName is not empty string. #}
{% if attr.apiName != '' %}
public:
// --------------------------------------------------------------------- //
// {{ Upper(attr.apiName) }}
// --------------------------------------------------------------------- //
/// {{ attr.doc }}
///
{% if attr.details %}
/// | ||
/// | -- | -- |
{% for detail in attr.details %}
/// | {{ detail[0] }} | {{ detail[1] }} |
{% endfor %}
{% endif %}
{% if useExportAPI -%}
{{ Upper(libraryName) }}_API
{% endif -%}
UsdAttribute Get{{ Proper(attr.apiName) }}Attr() const;
/// See Get{{ Proper(attr.apiName) }}Attr(), and also
/// \ref Usd_Create_Or_Get_Property for when to use Get vs Create.
/// If specified, author \p defaultValue as the attribute's default,
/// sparsely (when it makes sense to do so) if \p writeSparsely is \c true -
/// the default for \p writeSparsely is \c false.
{% if useExportAPI -%}
{{ Upper(libraryName) }}_API
{% endif -%}
UsdAttribute Create{{ Proper(attr.apiName) }}Attr(VtValue const &defaultValue = VtValue(), bool writeSparsely=false) const;
{% endif %}
{% endfor %}
{% for relName in cls.relOrder %}
{% set rel = cls.rels[relName]%}
{# Only emit Create/Get API and doxygen if apiName is not empty string. #}
{% if rel.apiName != '' %}
public:
// --------------------------------------------------------------------- //
// {{ Upper(rel.apiName) }}
// --------------------------------------------------------------------- //
/// {{ rel.doc }}
///
{% for detail in rel.details %}
/// \n {{ detail[0] }}: {{ detail[1] }}
{% endfor %}
{% if useExportAPI -%}
{{ Upper(libraryName) }}_API
{% endif -%}
UsdRelationship Get{{ Proper(rel.apiName) }}Rel() const;
/// See Get{{ Proper(rel.apiName) }}Rel(), and also
/// \ref Usd_Create_Or_Get_Property for when to use Get vs Create
{% if useExportAPI -%}
{{ Upper(libraryName) }}_API
{% endif -%}
UsdRelationship Create{{ Proper(rel.apiName) }}Rel() const;
{% endif %}
{% endfor %}
public:
// ===================================================================== //
// Feel free to add custom code below this line, it will be preserved by
// the code generator.
//
// Just remember to:
// - Close the class declaration with };
{% if useExportAPI %}
// - Close the namespace with {{ namespaceClose }}
{% endif %}
// - Close the include guard with #endif
// ===================================================================== //
// --(BEGIN CUSTOM CODE)--
| 15,039 | C | 35.772616 | 127 | 0.605093 |
USwampertor/OmniverseJS/ov/usd/usd/resources/codegenTemplates/tokens.cpp | //
// Copyright 2016 Pixar
//
// Licensed under the Apache License, Version 2.0 (the "Apache License")
// with the following modification; you may not use this file except in
// compliance with the Apache License and the following modification to it:
// Section 6. Trademarks. is deleted and replaced with:
//
// 6. Trademarks. This License does not grant permission to use the trade
// names, trademarks, service marks, or product names of the Licensor
// and its affiliates, except as required to comply with Section 4(c) of
// the License and to reproduce the content of the NOTICE file.
//
// You may obtain a copy of the Apache License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the Apache License with the above modification is
// distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the Apache License for the specific
// language governing permissions and limitations under the Apache License.
//
#include "{{ libraryPath }}/tokens.h"
{% if useExportAPI %}
{{ namespaceOpen }}
{% endif %}
{{ tokensPrefix }}TokensType::{{ tokensPrefix }}TokensType() :
{% for token in tokens %}
{{ token.id }}("{{ token.value }}", TfToken::Immortal),
{% endfor %}
allTokens({
{% for token in tokens %}
{{ token.id }}{% if not loop.last %},{% endif %}
{% endfor %}
})
{
}
TfStaticData<{{ tokensPrefix }}TokensType> {{ tokensPrefix }}Tokens;
{% if useExportAPI %}
{{ namespaceClose }}
{% endif %}
| 1,588 | C++ | 32.104166 | 75 | 0.696474 |
Motionverse/MV-omniverse-extension/exts/motionverse.engine.coder/config/extension.toml | [package]
version = "1.0.0"
authors = ["Guowei Zhou <[email protected]>"]
title = "Motionverse"
description = "#"
readme = "docs/README.md"
# URL of the extension source repository.
repository = "https://github.com/Motionverse/MV-omniverse-extension.git"
# One of categories for UI.
category = "Animation"
# Keywords for the extension
keywords = ["Ai", "Text","Audio","Animation"]
# Icon to show in the extension manager
icon = "data/logo.png"
# Preview to show in the extension manager
preview_image = "data/preview.png"
# Use omni.ui to build simple UI
[dependencies]
"omni.kit.uiapp" = {}
# Main python module this extension provides, it will be publicly available as "import motionverse.engine.coder".
[[python.module]]
name = "motionverse.engine.coder"
| 777 | TOML | 22.575757 | 113 | 0.727156 |
Motionverse/MV-omniverse-extension/exts/motionverse.engine.coder/docs/CHANGELOG.md | # Changelog
Motionverse Extension changelog
===========================================
## [1.0.0] - 2022-07-15 | 113 | Markdown | 21.799996 | 43 | 0.433628 |
Motionverse/MV-omniverse-extension/exts/motionverse.engine.coder/motionverse/engine/coder/constants.py | # Copyright (c) 2022 Motionverse Inc. All rights reserved.
# UI constants
WINDOW_NAME = "Motionverse"
CS_HOSTNAME_TEXT = "Host/IP"
CS_PORT_TEXT = "PORT"
CS_GOTO_BTN_TEXT = "Contact us"
CS_START_BTN_TEXT = "Start streaming"
CS_STOP_BTN_TEXT = "Stop streaming"
CS_URL = "http://motionverse.io/omniverse"
SKEL_SOURCE_EDIT_TEXT = "Target skeleton"
SKEL_SOURCE_BTN_TEXT = "Use highlighted skeleton"
SKEL_INVALID_TEXT = "No skeleton selected"
RIG_DROPDOWN_TEXT = "Rig Type"
RIG_UNSUPPORTED_TEXT = "Unsupported rig"
# UI image filepaths
LOGO_FILEPATH = "/data/logo-white.png"
# UI widget spacing
CS_H_SPACING = 5
# general constants
DEFAULT_PORT = 4188 | 648 | Python | 27.21739 | 58 | 0.736111 |
Motionverse/MV-omniverse-extension/exts/motionverse.engine.coder/motionverse/engine/coder/extension.py | from email import message
from operator import le
import carb
import omni.ext
import omni.ui as ui
import omni.timeline
import omni.usd
import omni.kit.window.file
from pxr import Vt, Gf, UsdSkel, Usd, Sdf, UsdGeom
import struct
import asyncio
import pathlib
from typing import cast, Union, List
import traceback
import webbrowser
from .constants import *
import json
import glob
import numpy as np
def get_rig_index(model_joint_names, rig_mappings):
candidates = [mapping["joint_mappings"].keys() for mapping in rig_mappings]
index = None
for i in range(len(candidates)):
if all([(joint in model_joint_names) for joint in candidates[i]]):
index = i
return index
def get_all_descendents(prim: Usd.Prim, result: List[Usd.Prim] = []):
if len(result) == 0:
result.append(prim)
children = prim.GetChildren()
result.extend(list(children))
for child in children:
get_all_descendents(child, result)
def find_skeleton(path):
stage = omni.usd.get_context().get_stage()
prim = stage.GetPrimAtPath(path)
descendants = []
get_all_descendents(prim, descendants)
skeleton = next(filter(lambda x: x.IsA(UsdSkel.Skeleton), descendants), None)
assert skeleton is not None, "Could not find skeleton"
print(UsdSkel.Skeleton(skeleton))
return UsdSkel.Skeleton(skeleton)
def find_blendShapes(path):
stage = omni.usd.get_context().get_stage()
prim = stage.GetPrimAtPath(path)
descendants = []
get_all_descendents(prim, descendants)
blendShapePrims = list(filter(lambda x: x.IsA(UsdSkel.BlendShape), descendants))
blendShapes = [UsdSkel.BlendShape(blendShape) for blendShape in blendShapePrims]
return blendShapes
def get_this_files_path():
return pathlib.Path(__file__).parent.absolute().as_posix()
#
# styles for UIController class
#
style_btn_enabled = {
"Button": {"border_radius": 5.0,"margin": 5.0,"padding": 10.0,"background_color": 0xFFFF7E09,"border_color": 0xFFFD761D},
"Button:hovered": {"background_color": 0xFFFF4F00},
"Button:pressed": {"background_color": 0xFFFAE26F},
"Button.Label": {"color": 0xFFFFFFFF},
}
style_btn_disabled = {
"Button": {"border_radius": 3.0,"margin": 5.0,"padding": 10.0,"background_color": 0xFFC0E0C0,"border_color": 0xFFFD7F1D},
"Button:hovered": {"background_color": 0xFFC0C0C0, "background_gradient_color": 0xFFFFAE5A},
"Button:pressed": {"background_color": 0xFFC0C0C0, "background_gradient_color": 0xFFFAB26D},
"Button.Label": {"color": 0xFF808080},
}
style_status_circle_green = {"background_color": 0xFF00FF00, "border_width": 0}
style_status_circle_red = {"background_color": 0xFF0000FF, "border_width": 0}
style_btn_goto_motionverse = {"Button": {"border_width": 0.0, "border_radius": 3.0, "margin": 5.0, "padding": 10.0}}
#
# UIController class
#
class UIController:
def __init__(self, ext):
self.ext = ext
self.extension_path = omni.kit.app.get_app().get_extension_manager().get_extension_path(ext.ext_id)
self._streaming_active = False
self._window = ui.Window(WINDOW_NAME,width=600, height=260)
self.build_ui()
def build_ui(self):
with self._window.frame:
with ui.VStack(height=0):
with ui.HStack():
#logo
logo_path = f"{self.extension_path}{LOGO_FILEPATH}"
ui.Image(logo_path, width=50,height=50,fill_policy=ui.FillPolicy.PRESERVE_ASPECT_FIT,alignment=ui.Alignment.CENTER)
ui.Spacer()
ui.Button(
CS_GOTO_BTN_TEXT,width=ui.Percent(10), style=style_btn_goto_motionverse,alignment=ui.Alignment.RIGHT_CENTER, clicked_fn=self.launch_motionverse_website)
with ui.HStack():
# green/red status
with ui.VStack(width=50, alignment=ui.Alignment.TOP):
self._status_circle = ui.Circle(
radius = 8,size_policy=ui.CircleSizePolicy.FIXED, style=style_status_circle_red
)
ui.Spacer()
with ui.VStack():
# CaptureStream device selection drop-down
with ui.HStack():
ui.Label(
CS_HOSTNAME_TEXT, width=ui.Percent(20), alignment=ui.Alignment.RIGHT_CENTER
)
ui.Spacer(width=CS_H_SPACING)
with ui.VStack(width=ui.Percent(50)):
ui.Spacer()
self.source_ip_field = ui.StringField(
model=ui.SimpleStringModel("192.168.10.113"), height=0, visible=True
)
ui.Spacer()
ui.Label(
CS_PORT_TEXT, width=ui.Percent(10), alignment=ui.Alignment.RIGHT_CENTER
)
with ui.VStack(width=ui.Percent(10)):
ui.Spacer()
self.source_port_field = ui.StringField(
model=ui.SimpleStringModel("4188"), height=0, visible=True
)
ui.Spacer()
# skeleton selection
with ui.HStack():
ui.Label(
SKEL_SOURCE_EDIT_TEXT, width=ui.Percent(20), alignment=ui.Alignment.RIGHT_CENTER
)
ui.Spacer(width=CS_H_SPACING)
with ui.VStack(width=ui.Percent(50)):
ui.Spacer()
self._skeleton_to_drive_stringfield = ui.StringField(
model=ui.SimpleStringModel(SKEL_INVALID_TEXT), height=0, enabled=False
)
ui.Spacer()
ui.Spacer(width=CS_H_SPACING)
self._skel_select_button = ui.Button(
SKEL_SOURCE_BTN_TEXT, width=0, clicked_fn=self.select_skeleton
)
# rig selection
with ui.HStack():
ui.Label(RIG_DROPDOWN_TEXT, width=ui.Percent(20), alignment=ui.Alignment.RIGHT_CENTER)
ui.Spacer(width=CS_H_SPACING)
with ui.VStack(width=ui.Percent(75)):
ui.Spacer()
self._selected_rig_label = ui.Label("")
ui.Spacer()
# start/stop stream buttons
with ui.HStack():
ui.Spacer(width=ui.Percent(20))
self._start_button = ui.Button(
CS_START_BTN_TEXT,
width=0,
clicked_fn=self.start_streaming,
enabled=not self.streaming_active,
style=style_btn_disabled if self.streaming_active else style_btn_enabled,
)
ui.Spacer(width=CS_H_SPACING)
self._stop_button = ui.Button(
CS_STOP_BTN_TEXT,
width=0,
clicked_fn=self.stop_streaming,
enabled=self.streaming_active,
style=style_btn_enabled if self.streaming_active else style_btn_disabled,
)
ui.Spacer(height=5)
def select_skeleton(self):
paths = omni.usd.get_context().get_selection().get_selected_prim_paths()
if paths:
path = paths[0]
try:
self.ext.init_skeletons(path)
except Exception as ex:
self._skeleton_to_drive_stringfield.model.set_value(SKEL_INVALID_TEXT)
self._selected_rig_label.text = self.ext.selected_rig_name or RIG_UNSUPPORTED_TEXT
def launch_motionverse_website(self):
webbrowser.open_new_tab(CS_URL)
def update_ui(self):
if self.streaming_active:
self._start_button.enabled = False
self._start_button.set_style(style_btn_disabled)
self._stop_button.enabled = True
self._stop_button.set_style(style_btn_enabled)
else:
self._start_button.enabled = self.ext.ready_to_stream
self._start_button.set_style(
style_btn_enabled if self.ext.ready_to_stream else style_btn_disabled
)
self._stop_button.enabled = False
self._stop_button.set_style(style_btn_disabled)
if self.streaming_active:
self._status_circle.set_style(style_status_circle_green)
else:
self._status_circle.set_style(style_status_circle_red)
self._skeleton_to_drive_stringfield.model.set_value(self.ext.target_skeleton_path)
def start_streaming(self):
self.ext.connect()
def stop_streaming(self):
self.ext.disconnect("User cancelled")
@property
def streaming_active(self):
return self._streaming_active
@streaming_active.setter
def streaming_active(self, value):
self._streaming_active = value
class MotionverseExtension(omni.ext.IExt):
def __init__(self):
self._net_io_task = None
self._update_skeleton_task = None
self.target_skeleton = None
self.skel_cache = UsdSkel.Cache()
def on_startup(self, ext_id):
self.import_rig_mappings_from_json_files()
self.ext_id = ext_id
stream = omni.kit.app.get_app().get_update_event_stream()
self.update_sub = stream.create_subscription_to_pop(self.update_ui, name="update frame")
self.ui_controller = UIController(self)
def connect(self):
self.disconnect("Resetting connection")
host = self.ui_controller.source_ip_field.model.as_string
port = self.ui_controller.source_port_field.model.as_int
loop = asyncio.get_event_loop()
queue = asyncio.Queue(maxsize=10, loop=loop)
if self._net_io_task:
loop.run_until_complete(asyncio.wait({self._net_io_task}, timeout=1.0))
if self._update_skeleton_task:
loop.run_until_complete(asyncio.wait({self._update_skeleton_task}, timeout=1.0))
self._net_io_task = loop.create_task(self._do_net_io(host, port, queue))
self._update_skeleton_task = loop.create_task(self._update_skeleton_loop(queue))
self._net_io_task.add_done_callback(self.on_task_complete)
self._update_skeleton_task.add_done_callback(self.on_task_complete)
def on_task_complete(self, fut=None):
if fut is self._net_io_task:
self._update_skeleton_task.cancel()
elif fut is self._update_skeleton_task:
self._net_io_task.cancel()
self.ui_controller.streaming_active = False
async def _do_net_io(self, host, port, queue):
self.ui_controller.streaming_active = True
writer = None
try:
reader, writer = await asyncio.open_connection(host, port)
writer.write(b"ov")
await self._read_client(reader, queue)
except asyncio.CancelledError:
print("Network streaming cancelled")
except:
carb.log_error(traceback.format_exc())
finally:
if writer is not None:
writer.close()
await writer.wait_closed()
print("TCP connection closed")
print("Net I/O task stopped")
async def _read_client(self, reader, queue):
while True:
message_data = await reader.readexactly(1660)
await queue.put(message_data)
async def _update_skeleton_loop(self, queue):
try:
while True:
message = await queue.get()
fd = FrameDetections()
fd.ParseFromString(message)
self.update_skeleton(fd)
except asyncio.CancelledError:
print("Skeleton update task cancelled")
except:
carb.log_error(traceback.format_exc())
def disconnect(self, reason=str()):
streaming_active = False
if self._net_io_task is not None:
self._net_io_task.cancel()
def import_rig_mappings_from_json_files(self):
self.rig_mappings = []
rig_filenames = glob.glob(get_this_files_path() + "/xform_*.json")
if rig_filenames is not None:
for filename in rig_filenames:
rig_mapfile = open(filename, "r")
if rig_mapfile is not None:
self.rig_mappings.append(json.load(rig_mapfile))
else:
print("error - could not load file %s" % filename)
def init_skeletons(self, skel_root_path):
self.selected_rig_index = None
self.motion_skel_anim = None
self.selected_joints = None
stage = omni.usd.get_context().get_stage()
selected_skeleton = find_skeleton(skel_root_path)
blendShapes = find_blendShapes(skel_root_path)
print("skel_cache =====",self.skel_cache)
skel_query = self.skel_cache.GetSkelQuery(selected_skeleton)
print("selected_skeleton ====",selected_skeleton)
print("blendShapes[0] ====",blendShapes[0])
# blendShape_query = UsdSkel.BlendShapeQuery(blendShapes[0])
# print("blendShape_query",blendShape_query)
joint_tokens = skel_query.GetJointOrder()
jointPaths = [Sdf.Path(jointToken) for jointToken in joint_tokens]
all_joint_names = [jointPath.name for jointPath in jointPaths]
# all_blendshape_names = [blendShapePath.name for blendShapePath in blendShapePaths]
self.selected_rig_index = get_rig_index(all_joint_names, self.rig_mappings)
assert self.selected_rig_index is not None, "Unsupported rig"
self.target_skeleton = selected_skeleton
self.target_skel_root = UsdSkel.Root.Find(self.target_skeleton.GetPrim())
# print("target_skeleton = ",self.target_skeleton.GetPrim())
# skel_root_rotate_xyz is a set of rotations in XYZ order used to align the rest pose
# with wrnch's axes (+Y up, +Z forward)
skel_root_rotate_xyz = self.rig_mappings[self.selected_rig_index]["skel_root_rotate_xyz"]
rot_x = Gf.Rotation(Gf.Vec3d(1, 0, 0), skel_root_rotate_xyz[0])
rot_y = Gf.Rotation(Gf.Vec3d(0, 1, 0), skel_root_rotate_xyz[1])
rot_z = Gf.Rotation(Gf.Vec3d(0, 0, 1), skel_root_rotate_xyz[2])
self.rest_xform_adjust = Gf.Matrix4d()
self.rest_xform_adjust.SetRotate(rot_x * rot_y * rot_z)
self.rest_xform_adjust_inverse = self.rest_xform_adjust.GetInverse()
if not skel_query.HasRestPose():
xforms = skel_query.ComputeJointLocalTransforms()
self.target_skeleton.GetRestTransformsAttr().Set(xforms)
self.skel_cache.Clear()
def update_skeleton(self, fd):
if self.selected_joints is None:
self._init_animation(fd.body_pose_names)
num_joints = len(self.rest_xforms_anim_global)
root_index = self.motion_to_anim_index["Hips"]
motion_xforms_global = Vt.Matrix4dArray(num_joints)
for i, pose in enumerate(fd.body_poses):
name = fd.body_pose_names[i]
if name in self.motion_to_anim_index:
anim_index = self.motion_to_anim_index[name]
q = pose['rotation']
t = pose['position']
rot = Gf.Rotation(Gf.Quatd(q[3], q[0], q[1], q[2]))
trans = Gf.Vec3d(t[0], t[1], t[2])
xform = Gf.Matrix4d()
xform.SetTransform(rot, trans)
motion_xforms_global[anim_index] = xform
target_pose_xforms_global = Vt.Matrix4dArray(
[
base_xform * motion_xform
for motion_xform, base_xform in zip(motion_xforms_global, self.rest_xforms_anim_global)
]
)
root_xform = self.rest_xform_adjust_inverse
target_xforms_local = UsdSkel.ComputeJointLocalTransforms(
self.anim_topology, target_pose_xforms_global, root_xform
)
anim_rotations = Vt.QuatfArray([Gf.Quatf(xform.ExtractRotationQuat()) for xform in target_xforms_local])
height_offset = 0
# Apply root motion to the animation attr
local_translations_attr = self.motion_skel_anim.GetTranslationsAttr()
local_translations = local_translations_attr.Get(0)
local_translations[root_index] = Gf.Vec3f(
root_xform.Transform(
Gf.Vec3d(0, 1, 0) * height_offset + motion_xforms_global[root_index].ExtractTranslation()
)
)
local_translations_attr.Set(local_translations, 0)
# Apply joint rotations to animation attr
self.motion_skel_anim.GetRotationsAttr().Set(anim_rotations, 0)
def _init_animation(self,selected_joints):
stage = omni.usd.get_context().get_stage()
rig_mapping = self.rig_mappings[self.selected_rig_index]["joint_mappings"]
skel_query = self.skel_cache.GetSkelQuery(self.target_skeleton)
joint_tokens = skel_query.GetJointOrder()
joint_names = {Sdf.Path(token).name: token for token in joint_tokens}
print(joint_names)
# Lookup index of joint by token
joint_token_indices = {token: index for index, token in enumerate(joint_tokens)}
motion_to_token = {
value: joint_names[key] for key, value in rig_mapping.items() if value in selected_joints
}
anim_tokens = Vt.TokenArray(motion_to_token.values())
assert len(anim_tokens) > 0
anim_token_indices = {token: index for index, token in enumerate(anim_tokens)}
active_token_indices = [joint_token_indices[token] for token in anim_tokens]
self.motion_to_anim_index = {
motion_name: anim_token_indices[token] for motion_name, token in motion_to_token.items()
}
self.anim_topology = UsdSkel.Topology([Sdf.Path(token) for token in anim_tokens])
assert self.anim_topology.Validate()
anim_path = self.target_skeleton.GetPath().AppendChild("SkelRoot")
self.motion_skel_anim = UsdSkel.Animation.Define(stage, anim_path)
print("anim_tokens=",anim_tokens)
self.motion_skel_anim.GetJointsAttr().Set(anim_tokens)
self.motion_skel_anim.GetBlendShapesAttr().Set(anim_tokens)
# Set our UsdSkelAnimation as the animationSource of the UsdSkelSkeleton
binding = UsdSkel.BindingAPI.Apply(self.target_skeleton.GetPrim())
binding.CreateAnimationSourceRel().SetTargets([self.motion_skel_anim.GetPrim().GetPath()])
# Set initial the scale, translation, and rotation attributes for the UsdSkelAnimation.
# Note that these attributes need to be in the UsdSkelSkeleton's Local Space.
root_xform = Gf.Matrix4d()
root_xform.SetIdentity()
root_xform = self.rest_xform_adjust
identity_xform = Gf.Matrix4d()
identity_xform.SetIdentity()
rest_xforms_local = self.target_skeleton.GetRestTransformsAttr().Get()
assert rest_xforms_local, "Skeleton has no restTransforms"
skel_topology = skel_query.GetTopology()
anim_start_index = active_token_indices[0]
xform_accum = Gf.Matrix4d()
xform_accum.SetIdentity()
index = skel_topology.GetParent(anim_start_index)
while index >= 0:
xform_accum = rest_xforms_local[index] * xform_accum
rest_xforms_local[index] = identity_xform
index = skel_topology.GetParent(index)
rest_xforms_local[anim_start_index] = xform_accum * rest_xforms_local[anim_start_index]
# Set the rest pose transforms
self.target_skeleton.GetRestTransformsAttr().Set(rest_xforms_local)
# Joint transforms in world coordinates such that the t-pose is aligned with wrnch's
# base t-pose (+Y up, +Z forward)
rest_xforms_global = UsdSkel.ConcatJointTransforms(skel_topology, rest_xforms_local, root_xform)
# Get the subset of the rest transforms that correspond to our UsdSkelAnimation attrs.
# We're going to concatenate these to the wrx transforms to get the desired
# pose
self.rest_xforms_anim_global = Vt.Matrix4dArray([rest_xforms_global[i] for i in active_token_indices])
base_xforms_anim_local = UsdSkel.ComputeJointLocalTransforms(
self.anim_topology, self.rest_xforms_anim_global, identity_xform
)
self.motion_skel_anim.SetTransforms(base_xforms_anim_local, 0)
self.selected_joints = set(selected_joints)
def update_ui(self, dt):
try:
self.ui_controller.update_ui()
except:
self.disconnect("Error updating UI")
raise
def on_shutdown(self):
self.ext = None
self._window = None
@property
def ready_to_stream(self):
has_skeleton_target = self.target_skeleton is not None and self.target_skeleton.GetPrim()
return has_skeleton_target
@property
def target_skeleton_path(self):
if not self.target_skeleton or not self.target_skeleton.GetPrim():
return ""
else:
return str(self.target_skeleton.GetPath())
@property
def selected_rig_name(self):
if self.selected_rig_index is not None:
return self.rig_mappings[self.selected_rig_index]["display_name"]
else:
return None
class FrameDetections():
def __init__(self):
self.body_poses = None
self.faces = None
self.body_pose_names = ("Hips","LeftUpLeg","RightUpLeg","LeftLeg","RightLeg","LeftFoot","RightFoot","Spine","Spine1","Neck","Head","LeftShoulder","RightShoulder","LeftArm",
"RightArm","LeftForeArm","RightForeArm","LeftHand","RightHand","LeftToeBase","RightToeBase","LeftHandThumb1","LeftHandThumb2","LeftHandThumb3",
"LeftHandIndex1","LeftHandIndex2","LeftHandIndex3","LeftHandMiddle1","LeftHandMiddle2","LeftHandMiddle3","LeftHandRing1","LeftHandRing2","LeftHandRing3","LeftHandPinky1",
"LeftHandPinky2","LeftHandPinky3","RightHandThumb1","RightHandThumb2","RightHandThumb3","RightHandIndex1","RightHandIndex2","RightHandIndex3","RightHandMiddle1",
"RightHandMiddle2","RightHandMiddle3","RightHandRing1","RightHandRing2","RightHandRing3","RightHandPinky1","RightHandPinky2","RightHandPinky3")
def ParseFromString(self,value):
message_list=struct.unpack("415f",value)
self.faces = message_list[:51]
body_data = np.array(message_list[51:]).reshape(-1, 7) #joints num, 4+3
self.body_poses = [{'rotation': body_data[idx][:4], 'position': body_data[idx][4:]}
for idx in range(len(self.body_pose_names))]
| 23,616 | Python | 39.302048 | 180 | 0.588626 |
Motionverse/MV-omniverse-extension/exts/motionverse.engine.coder/motionverse/engine/coder/scripts/styles.py | #
# styles
#
style_btn_enabled = {
"Button": {"border_radius": 5.0,"margin": 5.0,"padding": 10.0,"background_color": 0xFFFF7E09,"border_color": 0xFFFD761D},
"Button:hovered": {"background_color": 0xFFFF4F00},
"Button:pressed": {"background_color": 0xFFFAE26F},
"Button.Label": {"color": 0xFFFFFFFF},
}
style_btn_disabled = {
"Button": {"border_radius": 3.0,"margin": 5.0,"padding": 10.0,"background_color": 0xFFC0E0C0,"border_color": 0xFFFD7F1D},
"Button:hovered": {"background_color": 0xFFC0C0C0, "background_gradient_color": 0xFFFFAE5A},
"Button:pressed": {"background_color": 0xFFC0C0C0, "background_gradient_color": 0xFFFAB26D},
"Button.Label": {"color": 0xFF808080},
}
style_status_circle_green = {"background_color": 0xFF00FF00, "border_width": 0}
style_status_circle_red = {"background_color": 0xFF0000FF, "border_width": 0}
style_btn_goto_motionverse = {"Button": {"border_width": 0.0, "border_radius": 3.0, "margin": 5.0, "padding": 10.0}} | 982 | Python | 53.611108 | 125 | 0.677189 |
Motionverse/MV-omniverse-extension/exts/motionverse.engine.coder/motionverse/engine/coder/scripts/extension.py | import carb
import omni.ext
import omni.timeline
import omni.usd
import omni.kit.window.file
import traceback
import json
import glob
import asyncio
from email import message
from operator import le
from pxr import Vt, Gf, UsdSkel, Usd, Sdf, UsdGeom
from typing import cast, Union, List
from .constants import *
from .ui import *
from .styles import *
from .utils import *
class MotionverseExtension(omni.ext.IExt):
def __init__(self):
self._net_io_task = None
self._update_skeleton_task = None
self.target_skeleton = None
self.skel_root_path = None
self.skel_cache = UsdSkel.Cache()
def on_startup(self, ext_id):
self.import_rig_mappings_from_json_files()
self.ext_id = ext_id
stream = omni.kit.app.get_app().get_update_event_stream()
self.update_sub = stream.create_subscription_to_pop(self.update_ui, name="update frame")
self.ui_controller = UIController(self)
def connect(self):
self.disconnect("Resetting connection")
host = self.ui_controller.source_ip_field.model.as_string
port = self.ui_controller.source_port_field.model.as_int
loop = asyncio.get_event_loop()
queue = asyncio.Queue(maxsize=10, loop=loop)
if self._net_io_task:
loop.run_until_complete(asyncio.wait({self._net_io_task}, timeout=1.0))
if self._update_skeleton_task:
loop.run_until_complete(asyncio.wait({self._update_skeleton_task}, timeout=1.0))
self._net_io_task = loop.create_task(self._do_net_io(host, port, queue))
self._update_skeleton_task = loop.create_task(self._update_skeleton_loop(queue))
self._net_io_task.add_done_callback(self.on_task_complete)
self._update_skeleton_task.add_done_callback(self.on_task_complete)
def on_task_complete(self, fut=None):
if fut is self._net_io_task:
self._update_skeleton_task.cancel()
elif fut is self._update_skeleton_task:
self._net_io_task.cancel()
self.ui_controller.streaming_active = False
async def _do_net_io(self, host, port, queue):
self.ui_controller.streaming_active = True
writer = None
try:
reader, writer = await asyncio.open_connection(host, port)
writer.write(b"ov")
await self._read_client(reader, queue)
except asyncio.CancelledError:
log_info("Network streaming cancelled")
except:
carb.log_error(traceback.format_exc())
finally:
if writer is not None:
writer.close()
await writer.wait_closed()
log_info("TCP connection closed")
log_info("Net I/O task stopped")
async def _read_client(self, reader, queue):
while True:
message_data = await reader.readexactly(1660)
await queue.put(message_data)
async def _update_skeleton_loop(self, queue):
try:
while True:
message = await queue.get()
fd = FrameDetections()
fd.ParseFromString(message)
self.update_skeleton(fd)
except asyncio.CancelledError:
log_info("Skeleton update task cancelled")
except:
carb.log_error(traceback.format_exc())
def disconnect(self, reason=str()):
streaming_active = False
if self._net_io_task is not None:
self._net_io_task.cancel()
def import_rig_mappings_from_json_files(self):
self.rig_mappings = []
rig_filenames = glob.glob(get_this_files_path() + "/xform_*.json")
if rig_filenames is not None:
for filename in rig_filenames:
rig_mapfile = open(filename, "r")
if rig_mapfile is not None:
self.rig_mappings.append(json.load(rig_mapfile))
else:
log_info("error - could not load file %s" % filename)
def init_skeletons(self, skel_root_path):
self.selected_rig_index = None
self.motion_skel_anim = None
self.selected_joints = None
self.skel_root_path = skel_root_path
selected_skeleton = find_skeleton(skel_root_path)
skel_query = self.skel_cache.GetSkelQuery(selected_skeleton)
joint_tokens = skel_query.GetJointOrder()
jointPaths = [Sdf.Path(jointToken) for jointToken in joint_tokens]
all_joint_names = [jointPath.name for jointPath in jointPaths]
self.selected_rig_index = get_rig_index(all_joint_names, self.rig_mappings)
assert self.selected_rig_index is not None, "Unsupported rig"
self.target_skeleton = selected_skeleton
self.target_skel_root = UsdSkel.Root.Find(self.target_skeleton.GetPrim())
skel_root_rotate_xyz = self.rig_mappings[self.selected_rig_index]["skel_root_rotate_xyz"]
rot_x = Gf.Rotation(Gf.Vec3d(1, 0, 0), skel_root_rotate_xyz[0])
rot_y = Gf.Rotation(Gf.Vec3d(0, 1, 0), skel_root_rotate_xyz[1])
rot_z = Gf.Rotation(Gf.Vec3d(0, 0, 1), skel_root_rotate_xyz[2])
self.rest_xform_adjust = Gf.Matrix4d()
self.rest_xform_adjust.SetRotate(rot_x * rot_y * rot_z)
self.rest_xform_adjust_inverse = self.rest_xform_adjust.GetInverse()
if not skel_query.HasRestPose():
xforms = skel_query.ComputeJointLocalTransforms()
self.target_skeleton.GetRestTransformsAttr().Set(xforms)
self.skel_cache.Clear()
def update_skeleton(self, fd):
if self.selected_joints is None:
self._init_animation(fd.body_pose_names)
num_joints = len(self.rest_xforms_anim_global)
root_index = self.motion_to_anim_index["Hips"]
motion_xforms_global = Vt.Matrix4dArray(num_joints)
for i, pose in enumerate(fd.body_poses):
name = fd.body_pose_names[i]
if name in self.motion_to_anim_index:
anim_index = self.motion_to_anim_index[name]
q = pose['rotation']
t = pose['position']
rot = Gf.Rotation(Gf.Quatd(q[3], q[0], q[1], q[2]))
trans = Gf.Vec3d(t[0], t[1], t[2])
xform = Gf.Matrix4d()
xform.SetTransform(rot, trans)
motion_xforms_global[anim_index] = xform
target_pose_xforms_global = Vt.Matrix4dArray(
[
base_xform * motion_xform
for motion_xform, base_xform in zip(motion_xforms_global, self.rest_xforms_anim_global)
]
)
root_xform = self.rest_xform_adjust_inverse
target_xforms_local = UsdSkel.ComputeJointLocalTransforms(
self.anim_topology, target_pose_xforms_global, root_xform
)
anim_rotations = Vt.QuatfArray([Gf.Quatf(xform.ExtractRotationQuat()) for xform in target_xforms_local])
height_offset = 0
local_translations_attr = self.motion_skel_anim.GetTranslationsAttr()
local_translations = local_translations_attr.Get(0)
local_translations[root_index] = Gf.Vec3f(
root_xform.Transform(
Gf.Vec3d(0, 1, 0) * height_offset + motion_xforms_global[root_index].ExtractTranslation()
)
)
local_translations_attr.Set(local_translations, 0)
self.motion_skel_anim.GetRotationsAttr().Set(anim_rotations, 0)
# self.motion_skel_anim.GetBlendShapeWeightsAttr().Set(fd.faces,0)
def _init_animation(self,selected_joints):
stage = omni.usd.get_context().get_stage()
rig_mapping = self.rig_mappings[self.selected_rig_index]["joint_mappings"]
skel_query = self.skel_cache.GetSkelQuery(self.target_skeleton)
joint_tokens = skel_query.GetJointOrder()
joint_names = {Sdf.Path(token).name: token for token in joint_tokens}
joint_token_indices = {token: index for index, token in enumerate(joint_tokens)}
motion_to_token = {
value: joint_names[key] for key, value in rig_mapping.items() if value in selected_joints
}
anim_tokens = Vt.TokenArray(motion_to_token.values())
assert len(anim_tokens) > 0
anim_token_indices = {token: index for index, token in enumerate(anim_tokens)}
active_token_indices = [joint_token_indices[token] for token in anim_tokens]
self.motion_to_anim_index = {
motion_name: anim_token_indices[token] for motion_name, token in motion_to_token.items()
}
self.anim_topology = UsdSkel.Topology([Sdf.Path(token) for token in anim_tokens])
assert self.anim_topology.Validate()
anim_path = self.target_skeleton.GetPath().AppendChild("SkelRoot")
self.motion_skel_anim = UsdSkel.Animation.Define(stage, anim_path)
self.motion_skel_anim.GetJointsAttr().Set(anim_tokens)
# self.motion_skel_anim.GetBlendShapesAttr().Set(anim_tokens)
binding = UsdSkel.BindingAPI.Apply(self.target_skeleton.GetPrim())
binding.CreateAnimationSourceRel().SetTargets([self.motion_skel_anim.GetPrim().GetPath()])
root_xform = Gf.Matrix4d()
root_xform.SetIdentity()
root_xform = self.rest_xform_adjust
identity_xform = Gf.Matrix4d()
identity_xform.SetIdentity()
rest_xforms_local = self.target_skeleton.GetRestTransformsAttr().Get()
assert rest_xforms_local, "Skeleton has no restTransforms"
skel_topology = skel_query.GetTopology()
anim_start_index = active_token_indices[0]
xform_accum = Gf.Matrix4d()
xform_accum.SetIdentity()
index = skel_topology.GetParent(anim_start_index)
while index >= 0:
xform_accum = rest_xforms_local[index] * xform_accum
rest_xforms_local[index] = identity_xform
index = skel_topology.GetParent(index)
rest_xforms_local[anim_start_index] = xform_accum * rest_xforms_local[anim_start_index]
self.target_skeleton.GetRestTransformsAttr().Set(rest_xforms_local)
rest_xforms_global = UsdSkel.ConcatJointTransforms(skel_topology, rest_xforms_local, root_xform)
self.rest_xforms_anim_global = Vt.Matrix4dArray([rest_xforms_global[i] for i in active_token_indices])
base_xforms_anim_local = UsdSkel.ComputeJointLocalTransforms(
self.anim_topology, self.rest_xforms_anim_global, identity_xform
)
self.motion_skel_anim.SetTransforms(base_xforms_anim_local, 0)
self.selected_joints = set(selected_joints)
def update_ui(self, dt):
try:
self.ui_controller.update_ui()
except:
self.disconnect("Error updating UI")
raise
def on_shutdown(self):
log_info("on_shutdown")
self.ui_controller.shutdown()
self.ui_controller = None
self.disconnect("Extension is shutting down")
@property
def ready_to_stream(self):
has_skeleton_target = self.target_skeleton is not None and self.target_skeleton.GetPrim()
return has_skeleton_target
@property
def target_skeleton_path(self):
if not self.target_skeleton or not self.target_skeleton.GetPrim():
return ""
else:
return str(self.target_skeleton.GetPath())
@property
def selected_rig_name(self):
if self.selected_rig_index is not None:
return self.rig_mappings[self.selected_rig_index]["display_name"]
else:
return None | 11,565 | Python | 39.725352 | 112 | 0.626891 |
Motionverse/MV-omniverse-extension/exts/motionverse.engine.coder/motionverse/engine/coder/scripts/utils.py | import pathlib
from typing import cast, Union, List
import carb
from pxr import Vt, Gf, UsdSkel, Usd, Sdf, UsdGeom
import omni.timeline
import omni.usd
import omni.kit.window.file
import struct
import numpy as np
def log_info(msg):
carb.log_info("{}".format(msg))
def log_warn(msg):
carb.log_warn("{}".format(msg))
def log_error(msg):
carb.log_error("{}".format(msg))
def get_rig_index(model_joint_names, rig_mappings):
candidates = [mapping["joint_mappings"].keys() for mapping in rig_mappings]
index = None
for i in range(len(candidates)):
if all([(joint in model_joint_names) for joint in candidates[i]]):
index = i
return index
def get_all_descendents(prim: Usd.Prim, result: List[Usd.Prim] = []):
if len(result) == 0:
result.append(prim)
children = prim.GetChildren()
result.extend(list(children))
for child in children:
get_all_descendents(child, result)
def find_skeleton(path):
stage = omni.usd.get_context().get_stage()
prim = stage.GetPrimAtPath(path)
descendants = []
get_all_descendents(prim, descendants)
skeleton = next(filter(lambda x: x.IsA(UsdSkel.Skeleton), descendants), None)
assert skeleton is not None, "Could not find skeleton"
return UsdSkel.Skeleton(skeleton)
def find_blendShapes(path):
stage = omni.usd.get_context().get_stage()
prim = stage.GetPrimAtPath(path)
descendants = []
get_all_descendents(prim, descendants)
blendShapePrims = list(filter(lambda x: x.IsA(UsdSkel.BlendShape), descendants))
blendShapes = [UsdSkel.BlendShape(blendShape) for blendShape in blendShapePrims]
return blendShapes
def get_this_files_path():
return pathlib.Path(__file__).parent.absolute().as_posix()
class FrameDetections():
def __init__(self):
self.body_poses = None
self.faces = None
self.body_pose_names = ("Hips","LeftUpLeg","RightUpLeg","LeftLeg","RightLeg","LeftFoot","RightFoot","Spine","Spine1","Neck","Head","LeftShoulder","RightShoulder","LeftArm",
"RightArm","LeftForeArm","RightForeArm","LeftHand","RightHand","LeftToeBase","RightToeBase","LeftHandThumb1","LeftHandThumb2","LeftHandThumb3",
"LeftHandIndex1","LeftHandIndex2","LeftHandIndex3","LeftHandMiddle1","LeftHandMiddle2","LeftHandMiddle3","LeftHandRing1","LeftHandRing2","LeftHandRing3","LeftHandPinky1",
"LeftHandPinky2","LeftHandPinky3","RightHandThumb1","RightHandThumb2","RightHandThumb3","RightHandIndex1","RightHandIndex2","RightHandIndex3","RightHandMiddle1",
"RightHandMiddle2","RightHandMiddle3","RightHandRing1","RightHandRing2","RightHandRing3","RightHandPinky1","RightHandPinky2","RightHandPinky3")
def ParseFromString(self,value):
message_list=struct.unpack("415f",value)
self.faces = message_list[:51]
body_data = np.array(message_list[51:]).reshape(-1, 7) #joints num, 4+3
self.body_poses = [{'rotation': body_data[idx][:4], 'position': body_data[idx][4:]}
for idx in range(len(self.body_pose_names))] | 3,083 | Python | 42.436619 | 180 | 0.687642 |
Motionverse/MV-omniverse-extension/exts/motionverse.engine.coder/motionverse/engine/coder/scripts/ui.py | import omni.ui as ui
import omni.kit.ui
import omni.kit.app
import omni.kit.window.filepicker
import webbrowser
from .styles import *
from .constants import *
#
# UIController class
#
class UIController:
def __init__(self, ext):
self.ext = ext
self.extension_path = omni.kit.app.get_app().get_extension_manager().get_extension_path(ext.ext_id)
self._streaming_active = False
self._window = ui.Window(WINDOW_NAME,width=600, height=260)
self.build_ui()
def build_ui(self):
with self._window.frame:
with ui.VStack(height=0):
with ui.HStack():
#logo
logo_path = f"{self.extension_path}{LOGO_FILEPATH}"
ui.Image(logo_path, width=50,height=50,fill_policy=ui.FillPolicy.PRESERVE_ASPECT_FIT,alignment=ui.Alignment.CENTER)
ui.Spacer()
ui.Button(
CS_GOTO_BTN_TEXT,width=ui.Percent(10), style=style_btn_goto_motionverse,alignment=ui.Alignment.RIGHT_CENTER, clicked_fn=self.launch_motionverse_website)
with ui.HStack():
# green/red status
with ui.VStack(width=50, alignment=ui.Alignment.TOP):
self._status_circle = ui.Circle(
radius = 8,size_policy=ui.CircleSizePolicy.FIXED, style=style_status_circle_red
)
ui.Spacer()
with ui.VStack():
# CaptureStream device selection drop-down
with ui.HStack():
ui.Label(
CS_HOSTNAME_TEXT, width=ui.Percent(20), alignment=ui.Alignment.RIGHT_CENTER
)
ui.Spacer(width=CS_H_SPACING)
with ui.VStack(width=ui.Percent(50)):
ui.Spacer()
self.source_ip_field = ui.StringField(
model=ui.SimpleStringModel(DEFAULT_IP), height=0, visible=True
)
ui.Spacer()
ui.Label(
CS_PORT_TEXT, width=ui.Percent(10), alignment=ui.Alignment.RIGHT_CENTER
)
with ui.VStack(width=ui.Percent(10)):
ui.Spacer()
self.source_port_field = ui.StringField(
model=ui.SimpleStringModel(DEFAULT_PORT), height=0, visible=True
)
ui.Spacer()
# skeleton selection
with ui.HStack():
ui.Label(
SKEL_SOURCE_EDIT_TEXT, width=ui.Percent(20), alignment=ui.Alignment.RIGHT_CENTER
)
ui.Spacer(width=CS_H_SPACING)
with ui.VStack(width=ui.Percent(50)):
ui.Spacer()
self._skeleton_to_drive_stringfield = ui.StringField(
model=ui.SimpleStringModel(SKEL_INVALID_TEXT), height=0, enabled=False
)
ui.Spacer()
ui.Spacer(width=CS_H_SPACING)
self._skel_select_button = ui.Button(
SKEL_SOURCE_BTN_TEXT, width=0, clicked_fn=self.select_skeleton
)
# rig selection
with ui.HStack():
ui.Label(RIG_DROPDOWN_TEXT, width=ui.Percent(20), alignment=ui.Alignment.RIGHT_CENTER)
ui.Spacer(width=CS_H_SPACING)
with ui.VStack(width=ui.Percent(75)):
ui.Spacer()
self._selected_rig_label = ui.Label("")
ui.Spacer()
# start/stop stream buttons
with ui.HStack():
ui.Spacer(width=ui.Percent(20))
self._start_button = ui.Button(
CS_START_BTN_TEXT,
width=0,
clicked_fn=self.start_streaming,
enabled=not self.streaming_active,
style=style_btn_disabled if self.streaming_active else style_btn_enabled,
)
ui.Spacer(width=CS_H_SPACING)
self._stop_button = ui.Button(
CS_STOP_BTN_TEXT,
width=0,
clicked_fn=self.stop_streaming,
enabled=self.streaming_active,
style=style_btn_enabled if self.streaming_active else style_btn_disabled,
)
ui.Spacer(height=5)
def shutdown(self):
self._window.frame.clear()
self._window = None
def select_skeleton(self):
paths = omni.usd.get_context().get_selection().get_selected_prim_paths()
if paths:
path = paths[0]
try:
self.ext.init_skeletons(path)
except Exception as ex:
self._skeleton_to_drive_stringfield.model.set_value(SKEL_INVALID_TEXT)
self._selected_rig_label.text = self.ext.selected_rig_name or RIG_UNSUPPORTED_TEXT
def launch_motionverse_website(self):
webbrowser.open_new_tab(CS_URL)
def update_ui(self):
if self.streaming_active:
self._start_button.enabled = False
self._start_button.set_style(style_btn_disabled)
self._stop_button.enabled = True
self._stop_button.set_style(style_btn_enabled)
else:
self._start_button.enabled = self.ext.ready_to_stream
self._start_button.set_style(
style_btn_enabled if self.ext.ready_to_stream else style_btn_disabled
)
self._stop_button.enabled = False
self._stop_button.set_style(style_btn_disabled)
if self.streaming_active:
self._status_circle.set_style(style_status_circle_green)
else:
self._status_circle.set_style(style_status_circle_red)
self._skeleton_to_drive_stringfield.model.set_value(self.ext.target_skeleton_path)
def start_streaming(self):
self.ext.connect()
def stop_streaming(self):
self.ext.disconnect("User cancelled")
@property
def streaming_active(self):
return self._streaming_active
@streaming_active.setter
def streaming_active(self, value):
self._streaming_active = value | 7,135 | Python | 40.730994 | 177 | 0.476524 |
matiascodesal/omni-camera-reticle/README.md | # Camera Reticle Omniverse Extension

The Camera Reticle extension adds a new menu button to all viewports. From this menu, users can enable and configure:
1. Composition Guidelines
2. Safe Area Guidelines
3. Letterbox

I created this to learn and test the Kit SDK. To have a clear design target, I designed the UI as a look-alike of
the similar Unreal Engine Cinematic Viewport control. Even though this was just a test, feel free to use this
extension to help compose your shots or as an example to learn from.
## Adding This Extension
To add a this extension to your Omniverse app:
1. Go into: Extension Manager -> Gear Icon -> Extension Search Path
2. Add this as a search path: `git://github.com/matiascodesal/omni-camera-reticle.git?branch=main&dir=exts` | 910 | Markdown | 46.947366 | 117 | 0.785714 |
matiascodesal/omni-camera-reticle/exts/maticodes.viewport.reticle/maticodes/viewport/reticle/styles.py | from pathlib import Path
import omni.ui as ui
from omni.ui import color as cl
CURRENT_PATH = Path(__file__).parent.absolute()
ICON_PATH = CURRENT_PATH.parent.parent.parent.joinpath("icons")
cl.action_safe_default = cl(1.0, 0.0, 0.0)
cl.title_safe_default = cl(1.0, 1.0, 0.0)
cl.custom_safe_default = cl(0.0, 1.0, 0.0)
cl.letterbox_default = cl(0.0, 0.0, 0.0, 0.75)
cl.comp_lines_default = cl(1.0, 1.0, 1.0, 0.6)
safe_areas_group_style = {
"Label:disabled": {
"color": cl(1.0, 1.0, 1.0, 0.2)
},
"FloatSlider:enabled": {
"draw_mode": ui.SliderDrawMode.HANDLE,
"background_color": cl(0.75, 0.75, 0.75, 1),
"color": cl.black
},
"FloatSlider:disabled": {
"draw_mode": ui.SliderDrawMode.HANDLE,
"background_color": cl(0.75, 0.75, 0.75, 0.2),
"color": cl(0.0, 0.0, 1.0, 0.2)
},
"CheckBox": {
"background_color": cl(0.75, 0.75, 0.75, 1),
"color": cl.black
},
"Rectangle::ActionSwatch": {
"background_color": cl.action_safe_default
},
"Rectangle::TitleSwatch": {
"background_color": cl.title_safe_default
},
"Rectangle::CustomSwatch": {
"background_color": cl.custom_safe_default
}
}
comp_group_style = {
"Button.Image::Off": {
"image_url": str(ICON_PATH / "off.png")
},
"Button.Image::Thirds": {
"image_url": str(ICON_PATH / "thirds.png")
},
"Button.Image::Quad": {
"image_url": str(ICON_PATH / "quad.png")
},
"Button.Image::Crosshair": {
"image_url": str(ICON_PATH / "crosshair.png")
},
"Button:checked": {
"background_color": cl(1.0, 1.0, 1.0, 0.2)
}
}
| 1,688 | Python | 26.688524 | 63 | 0.560427 |
matiascodesal/omni-camera-reticle/exts/maticodes.viewport.reticle/maticodes/viewport/reticle/constants.py | """Constants used by the CameraReticleExtension"""
import enum
class CompositionGuidelines(enum.IntEnum):
"""Enum representing all of the composition modes."""
OFF = 0
THIRDS = 1
QUAD = 2
CROSSHAIR = 3
DEFAULT_ACTION_SAFE_PERCENTAGE = 93
DEFAULT_TITLE_SAFE_PERCENTAGE = 90
DEFAULT_CUSTOM_SAFE_PERCENTAGE = 85
DEFAULT_LETTERBOX_RATIO = 2.35
DEFAULT_COMPOSITION_MODE = CompositionGuidelines.OFF
SETTING_RESOLUTION_WIDTH = "/app/renderer/resolution/width"
SETTING_RESOLUTION_HEIGHT = "/app/renderer/resolution/height"
SETTING_RESOLUTION_FILL = "/app/runLoops/rendering_0/fillResolution"
| 609 | Python | 26.727272 | 68 | 0.760263 |
matiascodesal/omni-camera-reticle/exts/maticodes.viewport.reticle/maticodes/viewport/reticle/extension.py | import carb
import omni.ext
from omni.kit.viewport.utility import get_active_viewport_window
from . import constants
from .models import ReticleModel
from .views import ReticleOverlay
class CameraReticleExtension(omni.ext.IExt):
def on_startup(self, ext_id):
carb.log_info("[maticodes.viewport.reticle] CameraReticleExtension startup")
# Reticle should ideally be used with "Fill Viewport" turned off.
settings = carb.settings.get_settings()
settings.set(constants.SETTING_RESOLUTION_FILL, False)
viewport_window = get_active_viewport_window()
if viewport_window is not None:
reticle_model = ReticleModel()
self.reticle = ReticleOverlay(reticle_model, viewport_window, ext_id)
self.reticle.build_viewport_overlay()
def on_shutdown(self):
""" Executed when the extension is disabled."""
carb.log_info("[maticodes.viewport.reticle] CameraReticleExtension shutdown")
self.reticle.destroy()
| 1,010 | Python | 33.862068 | 85 | 0.70495 |
matiascodesal/omni-camera-reticle/exts/maticodes.viewport.reticle/maticodes/viewport/reticle/__init__.py | from .extension import CameraReticleExtension
| 46 | Python | 22.499989 | 45 | 0.891304 |
matiascodesal/omni-camera-reticle/exts/maticodes.viewport.reticle/maticodes/viewport/reticle/models.py | """Models used by the CameraReticleExtension"""
import omni.ui as ui
from . import constants
class ReticleModel:
"""Model containing all of the data used by the ReticleOverlay and ReticleMenu
The ReticleOverlay and ReticleMenu classes need to share the same data and stay
in sync with updates from user input. This is achieve by passing the same
ReticleModel object to both classes.
"""
def __init__(self):
self.composition_mode = ui.SimpleIntModel(constants.DEFAULT_COMPOSITION_MODE)
self.action_safe_enabled = ui.SimpleBoolModel(False)
self.action_safe_percentage = ui.SimpleFloatModel(constants.DEFAULT_ACTION_SAFE_PERCENTAGE, min=0, max=100)
self.title_safe_enabled = ui.SimpleBoolModel(False)
self.title_safe_percentage = ui.SimpleFloatModel(constants.DEFAULT_TITLE_SAFE_PERCENTAGE, min=0, max=100)
self.custom_safe_enabled = ui.SimpleBoolModel(False)
self.custom_safe_percentage = ui.SimpleFloatModel(constants.DEFAULT_CUSTOM_SAFE_PERCENTAGE, min=0, max=100)
self.letterbox_enabled = ui.SimpleBoolModel(False)
self.letterbox_ratio = ui.SimpleFloatModel(constants.DEFAULT_LETTERBOX_RATIO, min=0.001)
self._register_submodel_callbacks()
self._callbacks = []
def _register_submodel_callbacks(self):
"""Register to listen to when any submodel values change."""
self.composition_mode.add_value_changed_fn(self._reticle_changed)
self.action_safe_enabled.add_value_changed_fn(self._reticle_changed)
self.action_safe_percentage.add_value_changed_fn(self._reticle_changed)
self.title_safe_enabled.add_value_changed_fn(self._reticle_changed)
self.title_safe_percentage.add_value_changed_fn(self._reticle_changed)
self.custom_safe_enabled.add_value_changed_fn(self._reticle_changed)
self.custom_safe_percentage.add_value_changed_fn(self._reticle_changed)
self.letterbox_enabled.add_value_changed_fn(self._reticle_changed)
self.letterbox_ratio.add_value_changed_fn(self._reticle_changed)
def _reticle_changed(self, model):
"""Executes all registered callbacks of this model.
Args:
model (Any): The submodel that has changed. [Unused]
"""
for callback in self._callbacks:
callback()
def add_reticle_changed_fn(self, callback):
"""Add a callback to be executed whenever any ReticleModel submodel data changes.
This is useful for rebuilding the overlay whenever any data changes.
Args:
callback (function): The function to call when the reticle model changes.
"""
self._callbacks.append(callback)
| 2,712 | Python | 44.98305 | 115 | 0.703171 |
matiascodesal/omni-camera-reticle/exts/maticodes.viewport.reticle/maticodes/viewport/reticle/views.py | from functools import partial
import carb
import omni.ui as ui
from omni.ui import color as cl
from omni.ui import scene
from . import constants
from .constants import CompositionGuidelines
from .models import ReticleModel
from . import styles
class ReticleOverlay:
"""The reticle viewport overlay.
Build the reticle graphics and ReticleMenu button on the given viewport window.
"""
_instances = []
def __init__(self, model: ReticleModel, vp_win: ui.Window, ext_id: str):
"""ReticleOverlay constructor
Args:
model (ReticleModel): The reticle model
vp_win (Window): The viewport window to build the overlay on.
ext_id (str): The extension id.
"""
self.model = model
self.vp_win = vp_win
self.ext_id = ext_id
# Rebuild the overlay whenever the viewport window changes
self.vp_win.set_height_changed_fn(self.on_window_changed)
self.vp_win.set_width_changed_fn(self.on_window_changed)
self._view_change_sub = None
try:
# VP2 resolution change sub
self._view_change_sub = self.vp_win.viewport_api.subscribe_to_view_change(self.on_window_changed)
except AttributeError:
carb.log_info("Using Viewport Legacy: Reticle will not automatically update on resolution changes.")
# Rebuild the overlay whenever the model changes
self.model.add_reticle_changed_fn(self.build_viewport_overlay)
ReticleOverlay._instances.append(self)
resolution = self.vp_win.viewport_api.get_texture_resolution()
self._aspect_ratio = resolution[0] / resolution[1]
@classmethod
def get_instances(cls):
"""Get all created instances of ReticleOverlay"""
return cls._instances
def __del__(self):
self.destroy()
def destroy(self):
self._view_change_sub = None
self.scene_view.scene.clear()
self.scene_view = None
self.reticle_menu.destroy()
self.reticle_menu = None
self.vp_win = None
def on_window_changed(self, *args):
"""Update aspect ratio and rebuild overlay when viewport window changes."""
if self.vp_win is None:
return
settings = carb.settings.get_settings()
if type(self.vp_win).__name__ == "LegacyViewportWindow":
fill = settings.get(constants.SETTING_RESOLUTION_FILL)
else:
fill = self.vp_win.viewport_api.fill_frame
if fill:
width = self.vp_win.frame.computed_width + 8
height = self.vp_win.height
else:
width, height = self.vp_win.viewport_api.resolution
self._aspect_ratio = width / height
self.build_viewport_overlay()
def get_aspect_ratio_flip_threshold(self):
"""Get magic number for aspect ratio policy.
Aspect ratio policy doesn't seem to swap exactly when window_aspect_ratio == window_texture_aspect_ratio.
This is a hack that approximates where the policy changes.
"""
return self.get_aspect_ratio() - self.get_aspect_ratio() * 0.05
def build_viewport_overlay(self, *args):
"""Build all viewport graphics and ReticleMenu button."""
if self.vp_win is not None:
with self.vp_win.get_frame(self.ext_id):
with ui.ZStack():
# Set the aspect ratio policy depending if the viewport is wider than it is taller or vice versa.
if self.vp_win.width / self.vp_win.height > self.get_aspect_ratio_flip_threshold():
self.scene_view = scene.SceneView(aspect_ratio_policy=scene.AspectRatioPolicy.PRESERVE_ASPECT_VERTICAL)
else:
self.scene_view = scene.SceneView(aspect_ratio_policy=scene.AspectRatioPolicy.PRESERVE_ASPECT_HORIZONTAL)
# Build all the scene view guidelines
with self.scene_view.scene:
if self.model.composition_mode.as_int == CompositionGuidelines.THIRDS:
self._build_thirds()
elif self.model.composition_mode.as_int == CompositionGuidelines.QUAD:
self._build_quad()
elif self.model.composition_mode.as_int == CompositionGuidelines.CROSSHAIR:
self._build_crosshair()
if self.model.action_safe_enabled.as_bool:
self._build_safe_rect(self.model.action_safe_percentage.as_float / 100.0,
color=cl.action_safe_default)
if self.model.title_safe_enabled.as_bool:
self._build_safe_rect(self.model.title_safe_percentage.as_float / 100.0,
color=cl.title_safe_default)
if self.model.custom_safe_enabled.as_bool:
self._build_safe_rect(self.model.custom_safe_percentage.as_float / 100.0,
color=cl.custom_safe_default)
if self.model.letterbox_enabled.as_bool:
self._build_letterbox()
# Build ReticleMenu button
with ui.VStack():
ui.Spacer()
with ui.HStack(height=0):
ui.Spacer()
self.reticle_menu = ReticleMenu(self.model)
def _build_thirds(self):
"""Build the scene ui graphics for the Thirds composition mode."""
aspect_ratio = self.get_aspect_ratio()
line_color = cl.comp_lines_default
inverse_ratio = 1 / aspect_ratio
if self.scene_view.aspect_ratio_policy == scene.AspectRatioPolicy.PRESERVE_ASPECT_VERTICAL:
scene.Line([-0.333 * aspect_ratio, -1, 0], [-0.333 * aspect_ratio, 1, 0], color=line_color)
scene.Line([0.333 * aspect_ratio, -1, 0], [0.333 * aspect_ratio, 1, 0], color=line_color)
scene.Line([-aspect_ratio, -0.333, 0], [aspect_ratio, -0.333, 0], color=line_color)
scene.Line([-aspect_ratio, 0.333, 0], [aspect_ratio, 0.333, 0], color=line_color)
else:
scene.Line([-1, -0.333 * inverse_ratio, 0], [1, -0.333 * inverse_ratio, 0], color=line_color)
scene.Line([-1, 0.333 * inverse_ratio, 0], [1, 0.333 * inverse_ratio, 0], color=line_color)
scene.Line([-0.333, -inverse_ratio, 0], [-0.333, inverse_ratio, 0], color=line_color)
scene.Line([0.333, -inverse_ratio, 0], [0.333, inverse_ratio, 0], color=line_color)
def _build_quad(self):
"""Build the scene ui graphics for the Quad composition mode."""
aspect_ratio = self.get_aspect_ratio()
line_color = cl.comp_lines_default
inverse_ratio = 1 / aspect_ratio
if self.scene_view.aspect_ratio_policy == scene.AspectRatioPolicy.PRESERVE_ASPECT_VERTICAL:
scene.Line([0, -1, 0], [0, 1, 0], color=line_color)
scene.Line([-aspect_ratio, 0, 0], [aspect_ratio, 0, 0], color=line_color)
else:
scene.Line([0, -inverse_ratio, 0], [0, inverse_ratio, 0], color=line_color)
scene.Line([-1, 0, 0], [1, 0, 0], color=line_color)
def _build_crosshair(self):
"""Build the scene ui graphics for the Crosshair composition mode."""
aspect_ratio = self.get_aspect_ratio()
line_color = cl.comp_lines_default
if self.scene_view.aspect_ratio_policy == scene.AspectRatioPolicy.PRESERVE_ASPECT_VERTICAL:
scene.Line([0, 0.05 * aspect_ratio, 0], [0, 0.1 * aspect_ratio, 0], color=line_color)
scene.Line([0, -0.05 * aspect_ratio, 0], [0, -0.1 * aspect_ratio, 0], color=line_color)
scene.Line([0.05 * aspect_ratio, 0, 0], [0.1 * aspect_ratio, 0, 0], color=line_color)
scene.Line([-0.05 * aspect_ratio, 0, 0], [-0.1 * aspect_ratio, 0, 0], color=line_color)
else:
scene.Line([0, 0.05 * 1, 0], [0, 0.1 * 1, 0], color=line_color)
scene.Line([0, -0.05 * 1, 0], [0, -0.1 * 1, 0], color=line_color)
scene.Line([0.05 * 1, 0, 0], [0.1 * 1, 0, 0], color=line_color)
scene.Line([-0.05 * 1, 0, 0], [-0.1 * 1, 0, 0], color=line_color)
scene.Points([[0.00005, 0, 0]], sizes=[2], colors=[line_color])
def _build_safe_rect(self, percentage, color):
"""Build the scene ui graphics for the safe area rectangle
Args:
percentage (float): The 0-1 percentage the render target that the rectangle should fill.
color: The color to draw the rectangle wireframe with.
"""
aspect_ratio = self.get_aspect_ratio()
inverse_ratio = 1 / aspect_ratio
if self.scene_view.aspect_ratio_policy == scene.AspectRatioPolicy.PRESERVE_ASPECT_VERTICAL:
scene.Rectangle(aspect_ratio*2*percentage, 1*2*percentage, thickness=1, wireframe=True, color=color)
else:
scene.Rectangle(1*2*percentage, inverse_ratio*2*percentage, thickness=1, wireframe=True, color=color)
def _build_letterbox(self):
"""Build the scene ui graphics for the letterbox."""
aspect_ratio = self.get_aspect_ratio()
letterbox_color = cl.letterbox_default
letterbox_ratio = self.model.letterbox_ratio.as_float
def build_letterbox_helper(width, height, x_offset, y_offset):
move = scene.Matrix44.get_translation_matrix(x_offset, y_offset, 0)
with scene.Transform(transform=move):
scene.Rectangle(width * 2, height * 2, thickness=0, wireframe=False, color=letterbox_color)
move = scene.Matrix44.get_translation_matrix(-x_offset, -y_offset, 0)
with scene.Transform(transform=move):
scene.Rectangle(width * 2, height * 2, thickness=0, wireframe=False, color=letterbox_color)
if self.scene_view.aspect_ratio_policy == scene.AspectRatioPolicy.PRESERVE_ASPECT_VERTICAL:
if letterbox_ratio >= aspect_ratio:
height = 1 - aspect_ratio / letterbox_ratio
rect_height = height / 2
rect_offset = 1 - rect_height
build_letterbox_helper(aspect_ratio, rect_height, 0, rect_offset)
else:
width = aspect_ratio - letterbox_ratio
rect_width = width / 2
rect_offset = aspect_ratio - rect_width
build_letterbox_helper(rect_width, 1, rect_offset, 0)
else:
inverse_ratio = 1 / aspect_ratio
if letterbox_ratio >= aspect_ratio:
height = inverse_ratio - 1 / letterbox_ratio
rect_height = height / 2
rect_offset = inverse_ratio - rect_height
build_letterbox_helper(1, rect_height, 0, rect_offset)
else:
width = (aspect_ratio - letterbox_ratio) * inverse_ratio
rect_width = width / 2
rect_offset = 1 - rect_width
build_letterbox_helper(rect_width, inverse_ratio, rect_offset, 0)
def get_aspect_ratio(self):
"""Get the aspect ratio of the viewport.
Returns:
float: The viewport aspect ratio.
"""
return self._aspect_ratio
class ReticleMenu:
"""The popup reticle menu"""
def __init__(self, model: ReticleModel):
"""ReticleMenu constructor
Stores the model and builds the Reticle button.
Args:
model (ReticleModel): The reticle model
"""
self.model = model
self.button = ui.Button("Reticle", width=0, height=0, mouse_pressed_fn=self.show_reticle_menu,
style={"margin": 10, "padding": 5, "color": cl.white})
self.reticle_menu = None
def destroy(self):
self.button.destroy()
self.button = None
self.reticle_menu = None
def on_group_check_changed(self, safe_area_group, model):
"""Enables/disables safe area groups
When a safe area checkbox state changes, all the widgets of the respective
group should be enabled/disabled.
Args:
safe_area_group (HStack): The safe area group to enable/disable
model (SimpleBoolModel): The safe group checkbox model.
"""
safe_area_group.enabled = model.as_bool
def on_composition_mode_changed(self, guideline_type):
"""Sets the selected composition mode.
When a composition button is clicked, it should be checked on and the other
buttons should be checked off. Sets the composition mode on the ReticleModel too.
Args:
guideline_type (_type_): _description_
"""
self.model.composition_mode.set_value(guideline_type)
self.comp_off_button.checked = guideline_type == CompositionGuidelines.OFF
self.comp_thirds_button.checked = guideline_type == CompositionGuidelines.THIRDS
self.comp_quad_button.checked = guideline_type == CompositionGuidelines.QUAD
self.comp_crosshair_button.checked = guideline_type == CompositionGuidelines.CROSSHAIR
def show_reticle_menu(self, x, y, button, modifier):
"""Build and show the reticle menu popup."""
self.reticle_menu = ui.Menu("Reticle", width=400, height=200)
self.reticle_menu.clear()
with self.reticle_menu:
with ui.Frame(width=0, height=100):
with ui.HStack():
with ui.VStack():
ui.Label("Composition", alignment=ui.Alignment.LEFT, height=30)
with ui.VGrid(style=styles.comp_group_style, width=150, height=0,
column_count=2, row_height=75):
current_comp_mode = self.model.composition_mode.as_int
with ui.HStack():
off_checked = current_comp_mode == CompositionGuidelines.OFF
callback = partial(self.on_composition_mode_changed, CompositionGuidelines.OFF)
self.comp_off_button = ui.Button("Off", name="Off", checked=off_checked,
width=70, height=70, clicked_fn=callback)
with ui.HStack():
thirds_checked = current_comp_mode == CompositionGuidelines.THIRDS
callback = partial(self.on_composition_mode_changed, CompositionGuidelines.THIRDS)
self.comp_thirds_button = ui.Button("Thirds", name="Thirds", checked=thirds_checked,
width=70, height=70, clicked_fn=callback)
with ui.HStack():
quad_checked = current_comp_mode == CompositionGuidelines.QUAD
callback = partial(self.on_composition_mode_changed, CompositionGuidelines.QUAD)
self.comp_quad_button = ui.Button("Quad", name="Quad", checked=quad_checked,
width=70, height=70, clicked_fn=callback)
with ui.HStack():
crosshair_checked = current_comp_mode == CompositionGuidelines.CROSSHAIR
callback = partial(self.on_composition_mode_changed,
CompositionGuidelines.CROSSHAIR)
self.comp_crosshair_button = ui.Button("Crosshair", name="Crosshair",
checked=crosshair_checked, width=70, height=70,
clicked_fn=callback)
ui.Spacer(width=10)
with ui.VStack(style=styles.safe_areas_group_style):
ui.Label("Safe Areas", alignment=ui.Alignment.LEFT, height=30)
with ui.HStack(width=0):
ui.Spacer(width=20)
cb = ui.CheckBox(model=self.model.action_safe_enabled)
action_safe_group = ui.HStack(enabled=self.model.action_safe_enabled.as_bool)
callback = partial(self.on_group_check_changed, action_safe_group)
cb.model.add_value_changed_fn(callback)
with action_safe_group:
ui.Spacer(width=10)
ui.Label("Action Safe", alignment=ui.Alignment.TOP)
ui.Spacer(width=14)
with ui.VStack():
ui.FloatSlider(self.model.action_safe_percentage, width=100,
format="%.0f%%", min=0, max=100, step=1)
ui.Rectangle(name="ActionSwatch", height=5)
ui.Spacer()
with ui.HStack(width=0):
ui.Spacer(width=20)
cb = ui.CheckBox(model=self.model.title_safe_enabled)
title_safe_group = ui.HStack(enabled=self.model.title_safe_enabled.as_bool)
callback = partial(self.on_group_check_changed, title_safe_group)
cb.model.add_value_changed_fn(callback)
with title_safe_group:
ui.Spacer(width=10)
ui.Label("Title Safe", alignment=ui.Alignment.TOP)
ui.Spacer(width=25)
with ui.VStack():
ui.FloatSlider(self.model.title_safe_percentage, width=100,
format="%.0f%%", min=0, max=100, step=1)
ui.Rectangle(name="TitleSwatch", height=5)
ui.Spacer()
with ui.HStack(width=0):
ui.Spacer(width=20)
cb = ui.CheckBox(model=self.model.custom_safe_enabled)
custom_safe_group = ui.HStack(enabled=self.model.custom_safe_enabled.as_bool)
callback = partial(self.on_group_check_changed, custom_safe_group)
cb.model.add_value_changed_fn(callback)
with custom_safe_group:
ui.Spacer(width=10)
ui.Label("Custom Safe", alignment=ui.Alignment.TOP)
ui.Spacer(width=5)
with ui.VStack():
ui.FloatSlider(self.model.custom_safe_percentage, width=100,
format="%.0f%%", min=0, max=100, step=1)
ui.Rectangle(name="CustomSwatch", height=5)
ui.Spacer()
ui.Label("Letterbox", alignment=ui.Alignment.LEFT, height=30)
with ui.HStack(width=0):
ui.Spacer(width=20)
cb = ui.CheckBox(model=self.model.letterbox_enabled)
letterbox_group = ui.HStack(enabled=self.model.letterbox_enabled.as_bool)
callback = partial(self.on_group_check_changed, letterbox_group)
cb.model.add_value_changed_fn(callback)
with letterbox_group:
ui.Spacer(width=10)
ui.Label("Letterbox Ratio", alignment=ui.Alignment.TOP)
ui.Spacer(width=5)
ui.FloatDrag(self.model.letterbox_ratio, width=35, min=0.001, step=0.01)
self.reticle_menu.show_at(x - self.reticle_menu.width, y - self.reticle_menu.height)
| 20,348 | Python | 52.691293 | 129 | 0.543886 |
matiascodesal/omni-camera-reticle/exts/maticodes.viewport.reticle/maticodes/viewport/reticle/tests/reticle_tests.py | """ Tests Module
TODO:
* Write actual tests.
"""
from pathlib import Path
from typing import Optional
import carb
import omni.kit
import omni.ui as ui
from omni.ui.tests.test_base import OmniUiTest
from maticodes.viewport.reticle.extension import CameraReticleExtension
CURRENT_PATH = Path(__file__).parent.joinpath("../../../../data")
class TestReticle(OmniUiTest):
# Before running each test
async def setUp(self):
await super().setUp()
self._golden_img_dir = CURRENT_PATH.absolute().resolve().joinpath("tests")
self._all_widgets = []
self._settings = carb.settings.get_settings()
self._original_value = self._settings.get_as_int("/persistent/app/viewport/displayOptions")
self._settings.set_int("/persistent/app/viewport/displayOptions", 0)
# Create test area
await self.create_test_area(256, 256)
window_flags = ui.WINDOW_FLAGS_NO_SCROLLBAR | ui.WINDOW_FLAGS_NO_TITLE_BAR | ui.WINDOW_FLAGS_NO_RESIZE
self._test_window = ui.Window(
"Viewport",
dockPreference=ui.DockPreference.DISABLED,
flags=window_flags,
width=256,
height=256,
position_x=0,
position_y=0,
)
# Override default background
self._test_window.frame.set_style({"Window": {"background_color": 0xFF000000, "border_color": 0x0, "border_radius": 0}})
await omni.kit.app.get_app().next_update_async()
await omni.kit.app.get_app().next_update_async()
# After running each test
async def tearDown(self):
self._golden_img_dir = None
self._test_window = None
self._settings.set_int("/persistent/app/viewport/displayOptions", self._original_value)
await super().tearDown()
async def test_reticle_menu_button(self):
await self.finalize_test(golden_img_dir=self._golden_img_dir, golden_img_name="test_reticle_menu_button.png")
| 1,962 | Python | 32.844827 | 128 | 0.649847 |
matiascodesal/omni-camera-reticle/exts/maticodes.viewport.reticle/maticodes/viewport/reticle/tests/__init__.py | from maticodes.viewport.reticle.tests.reticle_tests import TestReticle | 70 | Python | 69.99993 | 70 | 0.885714 |
matiascodesal/omni-camera-reticle/exts/maticodes.viewport.reticle/config/extension.toml | [package]
version = "1.4.0"
title = "Camera Reticle"
description="Adds a camera reticle featuring composition guidelines, safe area guidelines, and letterbox."
authors=["Matias Codesal <[email protected]>"]
readme = "docs/README.md"
changelog="docs/CHANGELOG.md"
repository = ""
category = "Rendering"
keywords = ["camera", "reticle", "viewport"]
preview_image = "data/preview.png"
icon = "icons/icon.png"
# Use omni.ui to build simple UI
[dependencies]
"omni.ui.scene" = {}
"omni.kit.viewport.utility" = {}
# Main python module this extension provides, it will be publicly available as "import omni.hello.world".
[[python.module]]
name = "maticodes.viewport.reticle"
| 677 | TOML | 28.47826 | 106 | 0.734121 |
matiascodesal/omni-camera-reticle/exts/maticodes.viewport.reticle/docs/CHANGELOG.md | # Changelog
All notable changes to this project will be documented in this file.
## [Unreleased]
## [1.4.0] - 2022-09-09
### Added
- Fixed bad use of viewport window frame for VP Next
- Now use ViewportAPI.subscribe_to_view_change() on VP Next
## [1.3.0] - 2022-07-10
### Added
- `omni.kit.viewport.utility` dependency.
### Changed
- Refactored to use `omni.kit.viewport.utility`. (Only works with Kit 103.1.2+ now.)
- Renamed `reticle.py` to `views.py`
- Moved **Reticle** button to the bottom right of the viewport instead of bottom left.
## [1.2.0] - 2022-05-24
### Changed
- Refactored to use VP1 instead of hybrid VP1/2
- Renamed "draw" functions to "build"
- Moved color constants to omni.ui.color
## [1.1.0] - 2022-03-31
### Changed
- Improved the cleanup code when the extension is shutdown.
## [1.0.0] - 2022-03-28
### Added
- Added extension icon
- Applies overlay to all existing viewports on extension startup
- Added docstrings
- Refactored to use Model-View pattern
- Now supports when viewport is narrower than the render resolution aspect ratio.
## [0.1.0] - 2022-03-25
### Added
- Initial add of the Camera Reticle extension. | 1,150 | Markdown | 27.774999 | 86 | 0.712174 |
heavyai/omni-component/README.md | # HEAVY.AI | Omni.Ui Component System
This component system was first presented during a NVIDIA Partner Spotlight with HEAVY.AI and can be found [here on YouTube](https://youtu.be/QhBMgx2G86g?t=1640)

## Goals
1. Provide a means to encapsulate functions and `omni.ui` widgets to modularize UI code by its purpose to enforce boundaries using separation of concerns
2. Standardize the creation and implementation of UI components in a manner that enforces self-documentation and a homogenous UI codebase
3. Provide the utility of updating and re-rendering specific components without re-rendering components that have not changed
## Examples
### Hello World
```python
from heavyai.ui.component import Component
class SimpleLabel(Component):
def render(self):
ui.Label("Hello World")
# USAGE
SimpleLabel()
class CustomLabel(Component):
value: str = "Hello World"
def render(self):
ui.Label(self.value)
# USAGE
CustomLabel(value="Hello There")
```
### Internal State
```python
from heavyai.ui.component import Component
class ButtonCounter(Component):
value: int = 0
on_change: Optional[Callable[[int], None]]
def _handle_click(self):
self.value += 1
if self.on_change:
self.on_change(self.value)
self.update()
def render(self):
with self.get_root(ui.HStack):
ui.Label(str(self.value))
ui.Button("Increment", clicked_fn=self._handle_click()
# USAGE
ButtonCounter(on_change=lambda val: print(val))
```
### Subscribing Components
```python
from heavyai.ui.component import Component
class UsdSubscribingComponent(Component):
prim: pxr.Usd.Prim
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.subscription: carb.Subscription = (
usd.get_watcher().subscribe_to_change_info_path(
path=self.prim.GetPath(), on_change=self._handle_prim_change
)
)
def _handle_prim_change(self, prim_attr_path):
self.update()
def render(self):
with self.get_root(ui.VStack):
pass
def destroy(self):
self.subscription.unsubscribe()
class AbstractModelSubscribingComponent(Component):
model: sc.AbstractManipulatorModel
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.subscription: carb.Subscription = (
self.model.subscribe_item_changed_fn(
self._handle_model_item_changed
)
)
def _handle_model_item_changed(self, model, item):
self.update()
def render(self):
with self.get_root(ui.VStack):
pass
def destroy(self):
self.subscription.unsubscribe()
```
## Re-rendering “In Place”
Similar to the ability to update the `style` property of any `ui.Widget` element or the `text` property of `ui.Label` and `ui.Button` elements, `Components` can also be updated after they are created. Components can update themselves in response to subscribed events or user interactions, or components can be updated elsewhere via reference. This *drastically* increases the composability of UI elements.
### Using Setters
```python
class CustomLabel(Component):
value: str = "Hello World"
def set_value(self, value: str):
self.value = value
self.update()
def render(self):
ui.Label(self.value)
hello_world_label = CustomLabel()
hello_world_label.set_value(
"Goodbye, and see you later"
)
```
### Using a Reference
```python
hello_world_label = CustomLabel()
hello_world_label.value = "Goodbye, and see you later"
hello_world_label.update()
```
## Caveat: Destroying a Component
Components are not automatically deleted when cleared from a container (or should otherwise be destroyed), so if a `destroy()` method is provided, a reference to that component must be stored and destroyed by the component’s “parent” when appropriate, resulting in a chain of saved references and destroy calls. This is often the case when a component sets up a subscription; the subscription must be unsubscribed when the component is removed. See 'Subscribing Components' above for examples of components that require a `destroy` method.
```python
class ParentComponent(Component):
def render(self):
prim = # get prim reference
with self.get_root(ui.VStack):
self.child = UsdSubscribingComponent(prim=prim)
def destroy(self):
self.child.destroy()
```
| 4,626 | Markdown | 27.73913 | 539 | 0.689148 |
heavyai/omni-component/exts/heavyai.ui.component/heavyai/ui/component/extension.py | from __future__ import annotations
import asyncio
import contextlib
from typing import Dict, Optional, TYPE_CHECKING
import omni.kit.app
if TYPE_CHECKING:
import omni.ui as ui
class Component:
"""
The base class that UI elements should be subclassed from
Attributes
----------
name : Optional[str]
The name of the root container.
style : Optional[Dict]
The local style of the root container.
height : Optional[int]
The height of the root container.
width : Optional[int]
The width of the root container
style_type_name_override : Optional[str]
By default, we use typeName to look up the style.
But sometimes it's necessary to use a custom name.
For example, when a widget as a part of another widget.(Label is a part of Button)
This property can override the name to use in style.
"""
style: Optional[Dict] = None
height: Optional[int] = None
width: Optional[int] = None
name: Optional[str] = None
style_type_name_override: Optional[str] = None
def __init__(self, render_on_init=True, **kwargs):
"""
Parameters
----------
render_on_init : bool
If the render method should be called upon component creation
"""
# ui.Container is ui.VStack/HStack/ZStack/etc
self._root: ui.Container = None
self._debounce_task: asyncio.Future = None
props = self.get_props() # grab declared component props
for k, v in kwargs.items():
try:
assert k in props # ensure the prop has been declared
setattr(self, k, v) # set props
except AssertionError:
raise AssertionError(f"Prop '{k}' must be annotated") from None
# in rare situations you may need to choose when the component initially renders
if render_on_init:
self.render()
@classmethod
def get_props(cls):
d = {}
for c in cls.mro():
try:
d.update(**c.__annotations__)
except AttributeError:
pass
return d
@property
def visible(self):
if self._root:
return self._root.visible
return False
@visible.setter
def visible(self, new_visible):
if not self._root:
raise Exception("Component has not been rendered") from None
self._root.visible = new_visible
@property
def enabled(self):
if self._root:
return self._root.enabled
@enabled.setter
def enabled(self, value):
if self._root:
self._root.enabled = value
def get_root(self, Container: ui.Container, default_visible=True, **kwargs):
"""
Creates and returns a new container upon initial call.
Clears the container and returns reference upon subsequent calls.
This allows a component to be re-rendered without losing its positioning
"""
if self._root:
self._root.clear()
else:
if self.height is not None:
kwargs.update(height=self.height)
if self.width is not None:
kwargs.update(width=self.width)
if self.style is not None:
kwargs.update(style=self.style)
if self.name is not None:
kwargs.update(name=self.name)
if self.style_type_name_override is not None:
kwargs.update(style_type_name_override=self.style_type_name_override)
self._root = Container(**kwargs)
self._root.visible = default_visible
return self._root
async def render_async(self):
"""Waits for next frame before re-rendering"""
await omni.kit.app.get_app().next_update_async()
self.render()
def update(self, loop=asyncio.get_event_loop()):
"""Used to re-render the component"""
asyncio.ensure_future(self.render_async(), loop=loop)
def update_debounce(self, delay=0.2):
"""
Queues re-render after a delay and resets the timer on subsequent calls if timer has not completed
"""
async def run_after_delay():
await asyncio.sleep(delay)
await self.render_async()
with contextlib.suppress(Exception):
self._debounce_task.cancel()
self._debounce_task = asyncio.ensure_future(run_after_delay())
def render(self):
raise NotImplementedError()
def __del__(self):
"""
Note: `__del__` is not reliably called when parent component is destroyed or re-rendered
If a component requires clean-up (such as subscriptions, windows, frames, or event listeners),
the parent component/class must manually call destroy when appropriate.
"""
self.destroy()
def destroy(self):
"""
If a component requires clean-up (such as subscriptions, windows, frames, or event listeners),
the parent component/class must manually call destroy when appropriate.
"""
pass
| 5,135 | Python | 31.506329 | 106 | 0.601558 |
heavyai/omni-component/exts/heavyai.ui.component/config/extension.toml | [package]
# Semantic Versionning is used: https://semver.org/
version = "1.0.0"
# Lists people or organizations that are considered the "authors" of the package.
authors = ["HEAVY.AI", "Brendan Turner", "Chris Matzenbach"]
# The title and description fields are primarily for displaying extension info in UI
title = "Heavy Omni.UI Component System"
description="Component class for UI organization and encapsulation"
# Path (relative to the root) or content of readme markdown file for UI.
readme = "./docs/README.md"
# URL of the extension source repository.
repository = "https://github.com/heavyai/omni-component"
# One of categories for UI.
category = "other"
# Keywords for the extension
keywords = ["heavyai", "component", "react"]
# Preview image is shown in "Overview" of Extensions window. Screenshot of an extension might be a good preview image.
preview_image = "resources/partner-spotlight-slide.png"
# Icon is shown in Extensions window, it is recommended to be square, of size 256x256.
icon = "resources/logo_black.png"
# Use omni.ui to build simple UI
[dependencies]
"omni.ui" = {}
# Main python module this extension provides, it will be publicly available as "import omni.hello.world".
[[python.module]]
name = "heavyai.ui.component"
| 1,262 | TOML | 33.135134 | 118 | 0.751189 |
syntway/model_exploder/README.md | # Model Exploder Tool

Model Exploder separates a 3D model into its component parts for a better view of their relationship and how they fit together.
Model separation happens as if by a small controlled explosion, emanating from its center. This is often known as an [exploded-view](https://en.wikipedia.org/wiki/Exploded-view_drawing) of the model.
Exploded-views can be used to understand a model from its components and can also be used to create drawings for parts catalogs or assembly/maintenance/instruction information. Or in general, just to blow-up a 3D model.
## Quick Links
* [Features](#features)
* [Installation](#installation)
* [How to Use](#how-to-use)
* [Credits](#credits)
* [Examples](#examples)
## Features
- Easy to use: select a model, click the Use button and move the Distance slider.
- Includes several ways to explode the model around a central point, axis or plane.
- Interactive editing of the explosion center: just drag the "Center" manipulator in the viewport.
- Works with meshes, USD Shapes, references/payloads. Point instances and skeletons are moved as a whole.
- Adds Undo-Redo state when applying changes.
- Works with NVIDIA's Omniverse Create, Code 2022+ or any other Kit-based apps. Compatible with multiple viewports and with the legacy viewport of older Omniverse versions.
## Installation
This tool is a plain Omniverse extension that can be installed like any other.
1. Download the latest [release](https://github.com/syntway/model_exploder/releases) or clone the [repository](https://github.com/syntway/model_exploder) into your computer.
2. In the Omniverse App (Create, Code, etc.), open the Extensions window from: Window menu > Extensions.
3. In the Extensions window, open the Settings page, by clicking the small gear button at the top of the left bar.
4. In the Settings page that appears at the right, you'll see a list of Extension Search Paths. Add the path location of the exts/ subfolder. This subfolder is inside the location to where you installed in step 1.
5. In the search field at the top left bar, next to the gear button, type model exploder - the tool should appear listed - click it.
6. Information about the Model Exploder tool will appear at the right side: check the Enabled and the Autoload checkboxes. The tool is now installed in your Omniverse app.
## How to Use
Select Model Exploder from the Window menu - the Model Exploder tool window appears:

Select models to explode: at least two parts must be selected (meshes, USD Shapes, etc).
The top button changes to "Click to use...". Click it to explode the selected parts.

Move the Distance slider and the parts are exploded from their shared center:

The Center combo box chooses the type of explosion movement: from a central point, around an axis or expanding from a plane.

Move the center point/axis/plane by dragging the Center manipulator in the viewport. Click the Recenter button to bring it back to the default middle position:

The Options section allows customization of the tool's behavior:

The available options are:
- Acceleration from Center: exploded parts accelerate based on their initial distance from Center.
This setting controls how farthest parts accelerate more than nearest ones.
- Distance Multiplier: multiply the explosion distance selected in the above slider.
For smaller or larger explosion scales.
- Initial Bounds Visibility: the visibility of the initial bounding box for the used shapes, from transparent to fully visible.
- Unselect Parts on Use: when starting to use a group of selected parts, should they be unselected for simpler visuals?
Finally, click the Apply button to add an Undo-Redo state. Or Cancel to return to the initial parts positions.
### Tips
- Click the ( i ) button for help and more information (opens this page).
- On complex models, the first interaction with the Distance slider might take a few seconds - next ones are much faster.
- If model parts do not separate and remain joined to each other:
- Make sure model is divided in parts (meshes, USD shapes, etc), as this tools works by moving those parts.
- With the Distance slider away from its leftmost position, move the Center manipulator in the viewport into the middle of the parts group.
- Separate the group of "stuck" parts before separating the rest of the model.
- The initial bounds preview and center manipulator work in the active (last used) viewport. To change viewports, close the Model Exploder window and open again after using the new viewport.
## Credits
This tool is developed by [Syntway](https://www.syntway.com), the VR/Metaverse tools division of FaronStudio.
The tool uses SVG icons from [SVG Repo](https://www.svgrepo.com/).
3D models used in examples are from: [Bastien Genbrugge](https://sketchfab.com/3d-models/spherebot-17baf2bd295f460a924e62854ced1427), [mdesigns100](https://3dexport.com/free-3dmodel-residential-building-model-296192.htm), [dap](https://3dexport.com/free-3dmodel-ym-house-185521.htm), [KatSy](https://3dsky.org/3dmodels/show/chastnyi_dom_3) and Pixar's [Kitchen Set](https://graphics.pixar.com/usd/release/dl_downloads.html).
## Examples




| 5,622 | Markdown | 50.587155 | 424 | 0.777303 |
syntway/model_exploder/exts/syntway.model_exploder/syntway/model_exploder/style.py | from pathlib import Path
import omni.ui as ui
from omni.ui import color as cl
from .libs.ui_utils import UiPal
THIS_FOLDER_PATH = Path(__file__).parent.absolute()
EXT_ROOT_FOLDER_PATH = THIS_FOLDER_PATH.parent.parent
ICONS_PATH = EXT_ROOT_FOLDER_PATH.joinpath("data").joinpath("icons")
# window frame cascade
WINDOW_FRAME = {
"ComboBox": {
"border_radius": 6,
"margin": 0,
},
"ComboBox:disabled": {
"color": UiPal.TEXT_DISABLED,
},
"Slider": {
"draw_mode": ui.SliderDrawMode.HANDLE,
"color": UiPal.TRANSP_NOT_0,
"border_radius": 6,
},
"Slider:disabled": {
"secondary_color": UiPal.TEXT_DISABLED,
},
"CheckBox:disabled": {
"background_color": UiPal.TEXT_DISABLED,
},
"Button.Label:disabled": {
"color": UiPal.TEXT_DISABLED,
},
"Button.Label::ever_bright": {
"color": cl.white,
},
"Button.Label::ever_bright:disabled": {
"color": cl.white,
},
"Image::info": {
"image_url": str(ICONS_PATH / "info.svg"),
"color": UiPal.TEXT,
},
"Image::info:hovered": {
"image_url": str(ICONS_PATH / "info.svg"),
"color": cl.white,
},
"Line": {
"color": UiPal.TEXT_DISABLED
},
"CollapsableFrame": {
"border_radius": 4,
},
}
| 1,358 | Python | 18.985294 | 68 | 0.552283 |
syntway/model_exploder/exts/syntway.model_exploder/syntway/model_exploder/extension.py | from functools import partial
import asyncio
import omni.ext
import omni.ui as ui
import omni.kit.commands
import carb.settings
from .libs.app_utils import call_on_parts_ready, call_after_update, get_setting_or
from .window import Window
from .engine import Engine
from . import const
class Extension(omni.ext.IExt):
def on_startup(self, ext_id):
# print("ext.on_startup", ext_id)
self._window = None
self._ext_id = ext_id
def build():
ui.Workspace.set_show_window_fn(const.WINDOW_NAME, partial(self.show_window, None))
# carb.settings.get_settings().set("persistent/exts/syntway.model_exploder/windowShowOnStartup", True)
show = get_setting_or(const.SETTINGS_PATH + "windowShowOnStartup", False)
ed_menu = omni.kit.ui.get_editor_menu()
if ed_menu:
self._menu = ed_menu.add_item(const.MENU_PATH, self.show_window, toggle=True, value=show)
if show:
self.show_window(None, True) # ui.Workspace.show_window(WINDOW_NAME)
call_on_parts_ready(build, 1) # stage ready
def on_shutdown(self):
# print("ext.on_shutdown")
ui.Workspace.set_show_window_fn(const.WINDOW_NAME, None)
ed_menu = omni.kit.ui.get_editor_menu()
if ed_menu and omni.kit.ui.editor_menu.EditorMenu.has_item(const.MENU_PATH):
ed_menu.remove_item(const.MENU_PATH)
self._menu = None
if self._window:
self._window.destroy(True)
self._window = None
def show_window(self, menu, value):
# print("ext.show_window", value, self._window)
if value: # show
if self._window is None:
self._window = Window(const.WINDOW_NAME, self._ext_id)
self._window.set_visibility_changed_fn(self._visibility_changed_fn)
else:
self._window.show()
elif self._window:
self._window.visible = False # will destroy in _visibility_changed_fn
def _set_menu(self, value):
# print("ext._set_menu", value)
ed_menu = omni.kit.ui.get_editor_menu()
if ed_menu:
ed_menu.set_value(const.MENU_PATH, value)
def _visibility_changed_fn(self, visible):
# print("ext._visibility_changed_fn", visible)
self._set_menu(visible)
if not visible: # destroy window
def destroy_window():
# print("ext.destroy_window", self._window)
if self._window:
self._window.destroy(False)
self._window = None
call_after_update(destroy_window)
class ExplodeEngineApplyCommand(omni.kit.commands.Command):
"""
Undo/redoable command used by engine to apply final and initial position lists
Don't use outside this extension.
States are a tuple of (dist, change_list, time_code)
"""
def __init__(self, initial_state, final_state, stage):
super().__init__()
self._initial_state = initial_state
self._final_state = final_state
self._stage = stage
def do(self):
Engine.apply_state(self._final_state, self._stage, None)
def undo(self):
Engine.apply_state(self._initial_state, self._stage, None)
omni.kit.commands.register_all_commands_in_module(__name__)
| 3,380 | Python | 26.266129 | 114 | 0.606805 |
syntway/model_exploder/exts/syntway.model_exploder/syntway/model_exploder/engine.py | import asyncio, copy
import carb
import omni.ext
import omni.ui as ui
from omni.ui import scene as sc
from omni.ui import color as cl
import omni.kit.commands
import omni.usd
import omni.timeline
from pxr import Usd, UsdGeom, UsdSkel, Sdf, Tf
import pxr.Gf as Gf
import omni.kit.notification_manager as nm
from omni.usd.commands import TransformPrimCommand, TransformPrimSRTCommand
from .libs.usd_helper import UsdHelper
from .libs.usd_utils import (set_prim_translation, set_prim_translation_fast,
set_prim_transform, get_prim_transform,
get_prim_translation, create_edit_context)
from .libs.viewport_helper import ViewportHelper
from .libs.app_helper import AppHelper
from .libs.app_utils import get_setting_or, set_setting, call_after_update
from . import const
APPLY_ASYNC = True
class Engine():
def __init__(self):
self.meshes_base_aabb = Gf.Range3d()
self._meshes = []
self._dist = 0
self._center_mode = get_setting_or(const.SETTINGS_PATH + const.CENTER_MODE_SETTING, const.DEFAULT_CENTER_MODE)
self._dist_mult = get_setting_or(const.SETTINGS_PATH + const.DIST_MULT_SETTING, const.DEFAULT_DIST_MULT)
self._order_accel = get_setting_or(const.SETTINGS_PATH + const.ACCEL_SETTING, const.ACCEL_DEFAULT)
self._explo_center = Gf.Vec3d(0)
self._last_explo_center = Gf.Vec3d(0)
self._apply_needed = False
self._apply_task = None
self._recalc_changed_needed = set()
self._ignore_next_objects_changed = 0 # 0=no, 1=only next, 2:all until reset
self._dist_base_size = 100
self.usd = UsdHelper()
self._app = AppHelper()
self._app.add_update_event_fn(self._on_update)
stream = omni.timeline.get_timeline_interface().get_timeline_event_stream()
self._timeline_sub = stream.create_subscription_to_pop(self._on_timeline_event)
def destroy(self):
self._apply_cancel()
self._timeline_sub = None
self._recalc_changed_needed.clear()
if self.usd:
self.usd.remove_stage_objects_changed_fn(self._on_stage_objects_changed)
self.usd.detach()
self.usd = None
if self._app:
self._app.detach()
self._app = None
Engine._instance = None
def reset(self, set_to_initial):
self._apply_cancel()
if set_to_initial and self.dist > 0: # if dist is 0, nothing to do
self._apply(-2, self._explo_center, self._meshes) # returns prims to initial's
self._meshes.clear()
self._dist = 0
self.usd.remove_stage_objects_changed_fn(self._on_stage_objects_changed)
def _on_update(self, _):
if self._recalc_changed_needed:
self._recalc_changed(self._recalc_changed_needed)
self._recalc_changed_needed.clear()
if self._apply_needed:
if APPLY_ASYNC:
if not self._apply_task or self._apply_task.done():
self._apply_needed = False
dist = self._dist
explo_center = Gf.Vec3d(self._explo_center)
meshes = copy.copy(self._meshes)
self._apply_task = asyncio.ensure_future(self._async_apply(dist, explo_center, meshes))
# else still applying last
else:
self._apply_needed = False
self._apply(-1, self._explo_center, self._meshes) # returns prims to initial's
def _on_stage_objects_changed(self, notice):
if self._ignore_next_objects_changed:
if self._ignore_next_objects_changed == 1:
self._ignore_next_objects_changed = 0
return
if not self._meshes: # should never happen?
return
# set filters out duplicate path property changes
changed_paths = set(Sdf.Path.GetAbsoluteRootOrPrimPath(i) for i in notice.GetChangedInfoOnlyPaths())
# print("_on_stage_objects_changed", changed_paths)
for n in changed_paths:
ch_path = n.GetPrimPath().pathString
# avoid camera changes
if ch_path.startswith("/OmniverseKit_") or ch_path.endswith("/animationData"):
continue
for p in self._meshes:
path = p["path"]
if path.startswith(ch_path):
self._recalc_changed_needed.add(path)
def _on_timeline_event(self, e):
# print("engine:_on_timeline_event", e.type)
if self.has_meshes:
if e.type == int(omni.timeline.TimelineEventType.CURRENT_TIME_CHANGED):
self._ignore_next_objects_changed = 1
AVOID_CHILDREN_PRIM_TYPES = ["Camera"] # avoid recursion on these
@staticmethod
def _traverse_add_prim(list, prim):
"""Recursively traverse the hierarchy"""
if not prim.IsValid(): # might not exist anymore
return
prim_t = prim.GetTypeName()
if prim.HasAuthoredReferences(): # refs: check if any children
ref_list = []
children = prim.GetChildren()
for c in children:
Engine._traverse_add_prim(ref_list, c)
if ref_list: # add children but not itself
list += ref_list
else: # no children, add itself
list.append(prim)
return
if prim.IsA(UsdGeom.PointInstancer) or prim.IsA(UsdSkel.Root): # instance, SkelRoot: add but don't recurse inside
list.append(prim)
return
if prim.IsA(UsdGeom.Gprim):
list.append(prim)
if not prim_t in Engine.AVOID_CHILDREN_PRIM_TYPES:
children = prim.GetChildren()
for c in children:
Engine._traverse_add_prim(list, c)
def _sel_get_prim_paths_parent_first_order(self, paths):
stage = self.usd.stage
prims = []
for path in paths:
prim = stage.GetPrimAtPath(path)
prims.append(prim)
u_prims = []
for p in prims:
Engine._traverse_add_prim(u_prims, p)
return u_prims
def sel_capture(self, paths=None):
# print("sel_capture")
if paths is None:
paths = self.usd.get_selected_prim_paths()
# print("_sel_capture", paths)
u_prims = self._sel_get_prim_paths_parent_first_order(paths)
self._meshes = []
self._dist = 0
if len(u_prims) < 2:
return False
time_code = self.usd.timecode
xform_cache = UsdGeom.XformCache(time_code)
bbox_cache = UsdGeom.BBoxCache(time_code, [UsdGeom.Tokens.default_])
self._explo_center = Gf.Vec3d(0) # average of prim centroids
aa_bounds = Gf.Range3d()
# world positions
for prim in u_prims:
path = prim.GetPath().pathString
lbb = bbox_cache.ComputeLocalBound(prim)
lcent = lbb.ComputeCentroid()
ltrans = get_prim_translation(prim, time_code)
ldelta = ltrans - lcent # translation from centroid to the placing pos
wbb = bbox_cache.ComputeWorldBound(prim)
wbb_aa = wbb.ComputeAlignedRange()
aa_bounds.UnionWith(wbb_aa)
wtrans = wbb.ComputeCentroid()
lmat = get_prim_transform(prim, False, xform_cache, time_code)
# print(path, "local", lbb, lcent, ltrans, "world", wbb, wbb_aa, wtrans, lmat)
# prim, prim_path, untransformed/local mid, world_mid, initial_local_translation
entry = {"prim": prim, "path": path, "ini_wtrans": wtrans, "ldelta": ldelta, "ini_lmat": lmat}
self._meshes.append(entry)
# print(entry)
self._explo_center += wtrans
# centroid and base AA bounds
self._explo_center /= len(u_prims)
self._last_explo_center = self._explo_center
self.meshes_base_aabb = aa_bounds
# _dist_base_size size scale
size = aa_bounds.GetSize()
self._dist_base_size = max(size[0], size[1], size[2]) * 0.5
self._calc_dist_order()
# print(time_code, self._explo_center, self._dist_base_size)
self._ignore_next_objects_changed = 0
self.usd.add_stage_objects_changed_fn(self._on_stage_objects_changed)
# print("sel_capture end")
return True
def _recalc_changed(self, ch_paths):
time_code = self.usd.timecode
bbox_cache = UsdGeom.BBoxCache(time_code, [UsdGeom.Tokens.default_])
dist = self._dist
dist = self._calc_dist(dist)
for p in self._meshes:
path = p["path"]
if path in ch_paths: # only if changed
prim = p["prim"]
lbb = bbox_cache.ComputeLocalBound(prim)
lcent = lbb.ComputeCentroid()
ltrans = get_prim_translation(prim, time_code)
ldelta = ltrans - lcent
wbb = bbox_cache.ComputeWorldBound(prim)
new_wtrans = wbb.ComputeCentroid()
# calc dir
w_dir = new_wtrans - self._explo_center
w_dir = self._calc_normalized_dir(w_dir)
new_ini_wtrans = new_wtrans - w_dir * dist
p["ini_wtrans"] = new_ini_wtrans
p["ldelta"] = ldelta
# print("changed", path, new_wtrans, ldelta)
# not needed and conflicts with translate manipulator's dragging: self.apply_asap()
self._calc_dist_order()
def apply_asap(self):
self._apply_needed = True
def _apply_cancel(self):
if APPLY_ASYNC:
if self._apply_task:
if self._apply_task.done():
return
self._apply_task.cancel()
async def _async_apply(self, dist_value, explo_center, meshes):
self._apply(dist_value, explo_center, meshes)
self._apply_task = None
def _apply(self, dist, explo_center, meshes):
"""dist: -2: reset to stored initial pos, -1: use current self._dist, >=0: 0..1"""
if not meshes:
return
# print("_apply", dist)
time_code = self.usd.timecode
changes = self._prepare_apply_state(dist, explo_center, meshes, time_code, True)
is_reset = dist == -2
state = (is_reset, changes, time_code)
Engine.apply_state(state, self.usd.stage, self)
# print("_apply end")
def _prepare_apply_state(self, dist, explo_center, meshes, time_code, with_prims):
"""dist: -2: reset to stored initial pos, -1: use current self._dist, >=0: 0..1"""
if dist == -1:
dist = self._dist
# dist can now be [0..1] or -2 for reset to initial
if dist >= 0:
dist_factor = self._calc_dist(dist)
else:
dist_factor = dist
time_code = self.usd.timecode
xform_cache = UsdGeom.XformCache(time_code)
changes = []
for mp in meshes:
prim = mp["prim"]
if not prim.IsValid(): # avoid any invalidated prims, deleted for example
# print("skipping", prim)
continue
path = mp["path"]
ini_wtrans = mp["ini_wtrans"]
ldelta = mp["ldelta"]
prim = mp["prim"]
dist_order = mp["dist_order"]
if dist_factor >= 0:
# calc world pos
# calc dir
w_ini_vec = ini_wtrans - explo_center
w_ini_len = w_ini_vec.GetLength()
w_ini_len = max(w_ini_len, 1e-5)
w_dir = self._calc_normalized_dir(w_ini_vec)
order_factor = 1.0 + dist_order * self._order_accel
w_vec = w_dir * dist_factor * order_factor
dest_w_trans = ini_wtrans + w_vec
# get local->parent->world transforms
p2w = xform_cache.GetParentToWorldTransform(prim)
# transform back from world to local coords
w2p = p2w.GetInverse()
dest_ptrans = w2p.Transform(dest_w_trans)
# calc delta in mesh local/untransformed space
dest_ltrans = dest_ptrans + ldelta
# local trans, in parent space coords
ltrans = (dest_ltrans[0], dest_ltrans[1], dest_ltrans[2])
#print(prim, dest_w_trans, ltrans)
else:
ltrans = mp["ini_lmat"]
if with_prims:
changes.append((prim, path, ltrans))
else:
changes.append((None, path, ltrans))
return changes
@staticmethod
def apply_state(state, stage, instance):
# print("apply_state", state, instance)
is_reset, changes, time_code = state
if instance:
instance._ignore_next_objects_changed = 2
if not is_reset:
""" Slower alternative:
for ch in changes:
prim, path, ltrans = ch
# print(path,ltrans, type(ltrans))
cmd = TransformPrimSRTCommand(path=path,
new_translation=ltrans,
time_code=time_code)
cmd.do()
"""
stage = stage
sdf_change_block = 2
with Sdf.ChangeBlock():
for ch in changes:
prim, path, lmat = ch
if prim is None:
prim = stage.GetPrimAtPath(path)
# print(prim, ltrans)
with create_edit_context(path, stage):
set_prim_translation(prim, lmat, sdf_change_block=sdf_change_block, time_code=time_code)
#set_prim_translation_fast(prim, lmat, sdf_change_block=sdf_change_block, time_code=time_code)
else:
for ch in changes:
prim, path, ltrans = ch
# print(path,ltrans, type(ltrans))
cmd = TransformPrimCommand(path=path,
new_transform_matrix=ltrans,
time_code=time_code)
cmd.do()
if instance:
instance._ignore_next_objects_changed = 0
# print("apply_state end")
def commit(self):
time_code = self.usd.timecode
dist = -2
changes = self._prepare_apply_state(dist, self._explo_center, self._meshes, time_code, False)
is_reset = dist == -2
initial_state = (is_reset, changes, time_code)
dist = -1
changes = self._prepare_apply_state(dist, self._explo_center, self._meshes, time_code, False)
is_reset = dist == -2
final_state = (is_reset, changes, time_code)
self._ignore_next_objects_changed = 2
stage = self.usd.stage
omni.kit.commands.execute("ExplodeEngineApplyCommand",
initial_state=initial_state,
final_state=final_state,
stage=stage)
self._ignore_next_objects_changed = 0
self.reset(False)
"""
# compile transform list for undo
time_code = self.usd.timecode
xform_cache = UsdGeom.XformCache(time_code)
self._ignore_next_objects_changed = 2
xforms=[]
for mp in self._meshes:
p = mp["prim"]
path = mp["path"]
ini_mat = mp["ini_lmat"]
new_mat = get_prim_transform(p, False, xform_cache, time_code)
xforms.append((path, new_mat, ini_mat, time_code, False))
self.reset(False)
if xforms:
if True:
omni.kit.undo.begin_group()
for x in xforms:
omni.kit.commands.execute("TransformPrim",
path=x[0],
new_transform_matrix=x[1],
old_transform_matrix=x[2]
)
omni.kit.undo.end_group()
else:
omni.kit.commands.execute(
"TransformPrims", prims_to_transform=xforms
)
self._ignore_next_objects_changed = 0
"""
def _calc_dist(self, dist):
dist = dist ** const.DIST_EXP
dist = dist * self._dist_base_size * self._dist_mult
return dist
def _calc_dir(self, dir):
if self._center_mode >= 1 and self._center_mode <= 3: # around axis: zero axis displacement
dir[self._center_mode - 1] = 0.
elif self._center_mode >= 4: # from a plane
i = self._center_mode - 4
dir[i] = 0.
dir[(i + 1) % 3] = 0.
def _calc_normalized_dir(self, dir):
self._calc_dir(dir)
if dir.GetLength() > 1e-6:
dir.Normalize()
return dir
def _calc_dist_order(self):
"""dist_order is the 0..1 position of the mesh with regard to _explo_center"""
min_len = float("inf")
max_len = -1
len_list = []
for mp in self._meshes:
vec = mp["ini_wtrans"] - self._explo_center
self._calc_dir(vec)
len = vec.GetLength()
len = max(len, 1e-5)
len_list.append(len)
min_len = min(len, min_len)
max_len = max(len, max_len)
max_min_range = max_len - min_len
max_min_range = max(max_min_range, 1e-5)
index = 0
for mp in self._meshes:
order = (len_list[index] - min_len) / max_min_range
mp["dist_order"] = order
index+=1
@property
def has_meshes(self):
return self.meshes_count >= 2
@property
def meshes_count(self):
return len(self._meshes)
@property
def stage_selection_meshes_count(self):
paths = self.usd.get_selected_prim_paths()
u_prims = self._sel_get_prim_paths_parent_first_order(paths)
return len(u_prims)
@property
def center(self):
return self._explo_center
@center.setter
def center(self, center):
self._explo_center = center
self._calc_dist_order()
self.apply_asap()
@property
def dist(self):
return self._dist
@dist.setter
def dist(self, d):
self._dist = d
self.apply_asap()
@property
def center_mode(self):
return self._center_mode
@center_mode.setter
def center_mode(self, c):
self._center_mode = c
set_setting(const.SETTINGS_PATH + const.CENTER_MODE_SETTING, self._center_mode)
self.apply_asap()
@property
def order_accel(self):
return self._order_accel
@order_accel.setter
def order_accel(self, v):
self._order_accel = v
set_setting(const.SETTINGS_PATH + const.ACCEL_SETTING, self._order_accel)
self.apply_asap()
@property
def dist_mult(self):
return self._dist_mult
@dist_mult.setter
def dist_mult(self, m):
self._dist_mult = m
set_setting(const.SETTINGS_PATH + const.DIST_MULT_SETTING, self._dist_mult)
self.apply_asap()
def recenter(self):
self._explo_center = self._last_explo_center
self.apply_asap()
def is_centered(self):
return Gf.IsClose(self._explo_center, self._last_explo_center, 1e-6)
| 19,891 | Python | 26.437241 | 126 | 0.539641 |
syntway/model_exploder/exts/syntway.model_exploder/syntway/model_exploder/const.py | from omni.ui import color as cl
DEV_MODE = 0
# extension/window
WINDOW_NAME = "Model Exploder"
MENU_PATH = f"Window/{WINDOW_NAME}"
SETTINGS_PATH = "persistent/exts/syntway.model_exploder/"
INFO_URL = "https://www.syntway.com/model_exploder/?info#how-to-use"
# ui
DISTANCE_LABEL = "Distance"
CENTER_LABEL = "Center"
SELECT_TO_EXPLODE_TEXT = "Start by selecting what to explode..."
SELECT_TO_USE_TEXT = "Click to use the {0} selected parts"
SELECTED_TEXT = "Exploding {0} parts"
DONE_TEXT = "Apply"
RESET_TEXT = "Cancel"
CENTER_TEXT = "Center"
RECENTER_TEXT = "Recenter"
OPTIONS_TITLE = "Options"
OPTIONS_DIST_MULT_LABEL = "Distance Multiplier"
OPTIONS_DIST_MULT_COMBO_VALUES = [
("1x", 1.),
("5x ", 5.),
("10x", 10.),
("100x", 100.)
]
OPTIONS_ACCEL_LABEL = "Acceleration from Center"
OPTIONS_ACCEL_MAX = 5.
OPTIONS_BOUNDS_ALPHA_LABEL = "Initial Bounds Visibility"
OPTIONS_BOUNDS_ALPHA_SETTING = "boundsAlpha"
OPTIONS_BOUNDS_ALPHA_DEFAULT = 0.5
OPTIONS_UNSELECT_ON_USE_LABEL = "Unselect Parts on Use"
OPTIONS_UNSELECT_ON_USE_SETTING = "unselectOnUse"
OPTIONS_UNSELECT_ON_USE_DEFAULT = True
TIMELINE_RESET_TEXT = "Timeline has changed: resetting exploded meshes..."
CENTER_COMBO_LABELS = [
"Point",
"X Axis",
"Y Axis", # up
"Z Axis", # up
"XY Plane", # ground
"YZ Plane",
"ZX Plane" # ground
]
CENTER_COMBO_AXIS_FIRST = 1
CENTER_COMBO_AXIS_SUFFIX = " (Vertical)"
CENTER_COMBO_PLANE_FIRST = 4
CENTER_COMBO_PLANE_SUFFIX = " (Ground)"
# engine
CENTER_MANIP_LABEL_OFFSET = -11
CENTER_MANIP_LABEL_SIZE = 15
DEFAULT_CENTER_MODE = 0
CENTER_MODE_SETTING = "centerMode"
DEFAULT_DIST_MULT = 5.
DIST_MULT_SETTING = "distMult"
ACCEL_DEFAULT = 1.68
ACCEL_SETTING = "orderAccel"
DIST_EXP = 1.3
BOUNDS_BASE_AABB_COLOR = cl("#808080ff") # rgba order
# tooltips
TOOLTIP_USE = "First select the models to explode, then click this button to use."
TOOLTIP_INFO = "Help and more info on this tool."
TOOLTIP_DIST = "Select the explosion distance. For larger distances, see Options - Distance Multiplier."
TOOLTIP_CENTER_MODE = """Select the explosion center type, which can be a point, an axis or a plane.
You can drag the Center manipulator directly in the viewport to change its position."""
TOOLTIP_RECENTER = "Toggle the Center manipulator in the viewport back to the centroid of the used shapes."
TOOLTIP_OPTIONS_ACCEL = """Exploded parts accelerate based on their initial distance from Center.
This setting controls how farthest parts accelerate more than nearest ones."""
TOOLTIP_OPTIONS_DIST = """Multiply the explosion distance selected in the above slider.
For smaller or larger explosion scales."""
TOOLTIP_OPTIONS_BOUNDS = """Visibility of the initial bounding box for the used shapes,
from transparent to fully visible."""
TOOLTIP_OPTIONS_UNSELECT = """When starting to use a group of selected parts,
should they be unselected for simpler visuals?"""
TOOLTIP_CANCEL = "Cancel the tool and leave parts in their initial positions."
TOOLTIP_APPLY = "Applies the current parts positions and adds an Undo-Redo state." | 3,079 | Python | 28.333333 | 107 | 0.725885 |
syntway/model_exploder/exts/syntway.model_exploder/syntway/model_exploder/window.py | import asyncio, copy, webbrowser
import carb
import omni.ui as ui
from omni.ui import scene as sc
from omni.ui import color as cl
import omni.kit.commands
import omni.usd
import omni.timeline
from pxr import Usd, UsdGeom, UsdSkel, Sdf, Tf
import pxr.Gf as Gf
import omni.kit.notification_manager as nm
from .libs.viewport_helper import ViewportHelper
from .libs.app_utils import get_setting_or, set_setting, call_after_update
from .libs.ui_utils import create_reset_button, create_tooltip_fn, UiPal, UiPal_refresh
from .libs.manipulators import TranslateManipulator
from .engine import Engine
from . import const
from . import style
class Window(ui.Window):
def __init__(self, title: str, ext_id: str, **kwargs):
# print("win.__init__")
self._ext_id = ext_id
self._engine = Engine()
self._scene_reg = None
self._center_manip = None
self._center_label = None
self._center_label_transform = None
self._base_aabb_lines = []
self._options_bounds_alpha = get_setting_or(const.SETTINGS_PATH + const.OPTIONS_BOUNDS_ALPHA_SETTING,
const.OPTIONS_BOUNDS_ALPHA_DEFAULT)
self._options_unselect_on_use = get_setting_or(const.SETTINGS_PATH + const.OPTIONS_UNSELECT_ON_USE_SETTING,
const.OPTIONS_UNSELECT_ON_USE_DEFAULT)
kwargs["auto_resize"] = True
super().__init__(title, **kwargs)
self.auto_resize = True
self._ui_built = False
self.frame.set_build_fn(self._build_fn)
self._vp = ViewportHelper()
# print(self._vp.info())
# create manipulator scene
self._scene_reg = self._vp.register_scene_proxy(self._scene_create, self._scene_destroy,
self._scene_get_visible, self._scene_set_visible,
self._ext_id)
self._engine.usd.add_stage_event_fn(self._on_stage_event)
def destroy(self, is_ext_shutdown):
# print("win.destroy", is_ext_shutdown)
self._dist_slider = None
self._use_button = None
self._center_mode_combo = None
self._recenter_button = None
self._options = None
self._options_dist_mult_combo = None
self._options_accel_slider = None
self._options_bounds_slider = None
self._options_unselect_on_use_check = None
self._done_button = None
self._reset_button = None
if self._center_manip:
self._center_manip.destroy()
self._center_manip = None
self._center_label = None
self._center_label_transform = None
self._base_aabb_lines.clear()
if self._scene_reg:
self._vp.unregister_scene(self._scene_reg)
self._scene_reg = None
if self._vp:
self._vp.detach()
self._vp = None
if self._engine:
if self._engine.usd:
self._engine.usd.remove_stage_event_fn(self._on_stage_event)
if not is_ext_shutdown and self._engine.has_meshes and self._engine.dist != 0:
self._engine.reset(True) # cancel current to intial positions
self._engine.destroy()
self._engine = None
super().destroy()
def _build_fn(self):
"""Called to build the UI once the window is visible"""
# print(f"win._build_fn {self.visible}")
UiPal_refresh()
self.frame.style = style.WINDOW_FRAME
with ui.VStack(width=386, style={"margin": 7}): # spacing=9, style={"margin": 7}
with ui.VStack(height=0, spacing=11, style={"margin": 0}): # spacing=9, style={"margin": 7}
with ui.HStack(skip_draw_when_clipped=True, spacing=5):
self._use_button = ui.Button(const.SELECT_TO_EXPLODE_TEXT,
name="ever_bright",
height=24,
clicked_fn=self._on_use_clicked,
tooltip_fn=create_tooltip_fn(const.TOOLTIP_USE))
ui.Image(name="info",
fill_policy=ui.FillPolicy.PRESERVE_ASPECT_FIT, width=18, height=24,
mouse_pressed_fn=lambda *p: self._on_info(),
tooltip_fn=create_tooltip_fn(const.TOOLTIP_INFO))
with ui.HStack(skip_draw_when_clipped=True, spacing=6):
ui.Label(const.DISTANCE_LABEL, width=50,
mouse_pressed_fn=lambda *p: self._on_dist_set_zero(),
tooltip_fn=create_tooltip_fn(const.TOOLTIP_DIST))
self._dist_slider = ui.FloatSlider(min=0, max=1,
# tooltip_fn=create_tooltip_fn(const.TOOLTIP_DIST)
)
self._dist_slider.model.add_value_changed_fn(self._on_dist_slider_changed)
with ui.HStack(skip_draw_when_clipped=True, spacing=6):
ui.Label(const.CENTER_LABEL, width=50,
tooltip_fn=create_tooltip_fn(const.TOOLTIP_CENTER_MODE))
self._center_mode_combo = ui.ComboBox(self._engine.center_mode,
*const.CENTER_COMBO_LABELS,
width=145,
tooltip_fn=create_tooltip_fn(const.TOOLTIP_CENTER_MODE))
self._center_mode_combo.model.add_item_changed_fn(self._on_center_mode_changed)
self._setup_center_combo_labels()
self._recenter_button = ui.Button(const.RECENTER_TEXT, width=60,
clicked_fn=self._on_recenter_clicked,
tooltip_fn=create_tooltip_fn(const.TOOLTIP_RECENTER))
ui.Spacer(height=1)
self._options = ui.CollapsableFrame(const.OPTIONS_TITLE,
collapsed=not bool(const.DEV_MODE))
with self._options:
with ui.VStack(spacing=0, style={"margin": 3}):
with ui.HStack(spacing=6):
ui.Label(const.OPTIONS_ACCEL_LABEL,
tooltip_fn=create_tooltip_fn(const.TOOLTIP_OPTIONS_ACCEL))
with ui.HStack():
self._options_accel_slider = ui.FloatSlider(min=0, max=const.OPTIONS_ACCEL_MAX)
self._options_accel_slider.model.set_value(self._engine._order_accel)
self._options_accel_slider.model.add_value_changed_fn(self._on_options_accel_changed)
create_reset_button(const.ACCEL_DEFAULT,
self._options_accel_slider.model,
self._options_accel_slider.model.set_value,
self._options_accel_slider.model.add_value_changed_fn)
with ui.HStack(spacing=6):
ui.Label(const.OPTIONS_DIST_MULT_LABEL,
tooltip_fn=create_tooltip_fn(const.TOOLTIP_OPTIONS_DIST))
with ui.HStack():
# locate dist_mult label index from self._engine.dist_mult
def get_dist_mult_index(dist_mult):
index = 0
for i in range(len(const.OPTIONS_DIST_MULT_COMBO_VALUES)):
entry = const.OPTIONS_DIST_MULT_COMBO_VALUES[i]
if dist_mult == entry[1]:
index = i
break
return index
self._options_dist_mult_combo = ui.ComboBox(
get_dist_mult_index(self._engine.dist_mult),
*[a[0] for a in const.OPTIONS_DIST_MULT_COMBO_VALUES],
tooltip_fn=create_tooltip_fn(const.TOOLTIP_OPTIONS_DIST)
)
self._options_dist_mult_combo.model.add_item_changed_fn(self._on_options_dist_mult_changed)
create_reset_button(get_dist_mult_index(const.DEFAULT_DIST_MULT),
self._options_dist_mult_combo.model.get_item_value_model(),
self._options_dist_mult_combo.model.get_item_value_model().set_value,
self._options_dist_mult_combo.model.add_item_changed_fn)
with ui.HStack(spacing=6):
ui.Label(const.OPTIONS_BOUNDS_ALPHA_LABEL,
tooltip_fn=create_tooltip_fn(const.TOOLTIP_OPTIONS_BOUNDS))
with ui.HStack():
self._options_bounds_slider = ui.FloatSlider(min=0, max=1,
#tooltip_fn=create_tooltip_fn(const.TOOLTIP_OPTIONS_BOUNDS)
)
self._options_bounds_slider.model.set_value(self._options_bounds_alpha)
self._options_bounds_slider.model.add_value_changed_fn(self._on_options_bounds_changed)
create_reset_button(const.OPTIONS_BOUNDS_ALPHA_DEFAULT,
self._options_bounds_slider.model,
self._options_bounds_slider.model.set_value,
self._options_bounds_slider.model.add_value_changed_fn)
with ui.HStack(spacing=6):
ui.Label(const.OPTIONS_UNSELECT_ON_USE_LABEL,
tooltip_fn=create_tooltip_fn(const.TOOLTIP_OPTIONS_UNSELECT))
with ui.HStack():
self._options_unselect_on_use_check = ui.CheckBox(width=12,
tooltip_fn=create_tooltip_fn(const.TOOLTIP_OPTIONS_UNSELECT))
self._options_unselect_on_use_check.model.set_value(self._options_unselect_on_use)
self._options_unselect_on_use_check.model.add_value_changed_fn(self._on_options_unselect_changed)
# ui.Spacer(width=1)
ui.Line()
create_reset_button(const.OPTIONS_UNSELECT_ON_USE_DEFAULT,
self._options_unselect_on_use_check.model,
self._options_unselect_on_use_check.model.set_value,
self._options_unselect_on_use_check.model.add_value_changed_fn)
ui.Spacer(height=1)
with ui.HStack(skip_draw_when_clipped=True, spacing=9):
self._reset_button = ui.Button(const.RESET_TEXT, clicked_fn=self._on_reset_clicked,
tooltip_fn=create_tooltip_fn(const.TOOLTIP_CANCEL))
ui.Spacer()
self._done_button = ui.Button(const.DONE_TEXT, clicked_fn=self._on_done_clicked,
tooltip_fn=create_tooltip_fn(const.TOOLTIP_APPLY))
#ui.Button("Test", clicked_fn=self._on_test)
self._ui_built = True
self._refresh_ui()
def _on_stage_event(self, ev: carb.events.IEvent):
# print("Window._on_stage_event", ev.type)
if not self._ui_built: # a stage event can call us before _build_fn()
return
if ev.type == int(omni.usd.StageEventType.SELECTION_CHANGED):
if not self._engine.has_meshes:
self._refresh_ui()
elif ev.type == int(omni.usd.StageEventType.CLOSING):
# print("Window.CLOSING")
self._reset(False) # calls engine.reset
#self._engine.usd.detach()
elif ev.type == int(omni.usd.StageEventType.OPENED):
# print("Window.OPENED")
self._setup_center_combo_labels()
def _refresh_ui(self):
if not self._engine.has_meshes: # nothing selected
self._dist_slider.enabled = False
self._center_mode_combo.enabled = False
self._recenter_button.enabled = False
self._done_button.enabled = False
self._reset_button.enabled = False
sel_mesh_count = self._engine.stage_selection_meshes_count
if sel_mesh_count >= 2:
self._use_button.text = const.SELECT_TO_USE_TEXT.format(sel_mesh_count)
self._use_button.enabled = True
else:
self._use_button.text = const.SELECT_TO_EXPLODE_TEXT
self._use_button.enabled = False
else:
mesh_count = self._engine.meshes_count
self._use_button.text = const.SELECTED_TEXT.format(mesh_count)
self._use_button.enabled = False
self._dist_slider.enabled = True
self._center_mode_combo.enabled = True
self._recenter_button.enabled = not self._engine.is_centered()
self._done_button.enabled = True
self._reset_button.enabled = True
def _setup_center_combo_labels(self):
model = self._center_mode_combo.model
ch = model.get_item_children()
up = self._engine.usd.stage_up_index
if up == 1: # y up
mark = [const.CENTER_COMBO_AXIS_FIRST + 1, const.CENTER_COMBO_PLANE_FIRST + 2]
else: # z up
mark = [const.CENTER_COMBO_AXIS_FIRST + 2, const.CENTER_COMBO_PLANE_FIRST + 0]
for l in range(len(const.CENTER_COMBO_LABELS)):
label = const.CENTER_COMBO_LABELS[l]
if l in mark:
if l < const.CENTER_COMBO_PLANE_FIRST:
label += const.CENTER_COMBO_AXIS_SUFFIX
else:
label += const.CENTER_COMBO_PLANE_SUFFIX
m = model.get_item_value_model(ch[l])
m.set_value(label)
def _reset(self, set_to_initial):
self._engine.reset(set_to_initial)
self._enable_center_controls(False)
self._enable_base_aabb(False)
self._dist_slider.model.set_value(0)
self._refresh_ui()
def _on_use_clicked(self):
if not self._engine.sel_capture():
self._reset(False)
return
self._sync_base_aabb()
self._enable_base_aabb(True)
self._enable_center_controls(True)
if self._center_manip:
self._set_center_manip_point(self._engine.center)
if self._options_unselect_on_use:
self._engine.usd.set_selected_prim_paths([])
self._refresh_ui()
def _on_dist_set_zero(self):
self._dist_slider.model.set_value(0)
def _on_dist_slider_changed(self, model):
self._engine.dist = model.as_float
def _on_center_mode_changed(self, m, *args):
self._engine.center_mode = m.get_item_value_model().get_value_as_int()
def _on_recenter_clicked(self):
self._engine.recenter()
self._set_center_manip_point(self._engine.center)
self._recenter_button.enabled = not self._engine.is_centered()
def _on_done_clicked(self):
self._engine.commit()
self._reset(False)
def _on_reset_clicked(self):
self._reset(True)
def _scene_create(self, vp_args):
vp_api = vp_args["viewport_api"]
if not self._vp.same_api(vp_api): # ensure scene is created in same viewport we're attached to
return
# print("_scene_create", vp_args, self._vp._api)
self._center_manip = TranslateManipulator(viewport=self._vp,
enabled=False,
changed_fn=self._on_center_manip_changed
)
self._center_label_transform = sc.Transform() # before next _sync
self._sync_scene_label()
with self._center_label_transform:
with sc.Transform(look_at=sc.Transform.LookAt.CAMERA, scale_to=sc.Space.SCREEN):
with sc.Transform(transform=sc.Matrix44.get_scale_matrix(2, 2, 1)):
wup = self._engine.usd.stage_up
wup *= const.CENTER_MANIP_LABEL_OFFSET
with sc.Transform(transform=sc.Matrix44.get_translation_matrix(*wup)):
self._center_label = sc.Label(const.CENTER_TEXT, alignment=ui.Alignment.CENTER,
size=const.CENTER_MANIP_LABEL_SIZE, visible=False)
self._create_base_aabb()
def _scene_destroy(self):
if self._center_manip:
self._center_manip.destroy()
self._center_manip = None
def _scene_get_visible(self):
return True
def _scene_set_visible(self, value):
if self._center_manip.enabled: # only set if manip is enabled
self._center_manip.enabled = value
def _on_center_manip_changed(self, action, manip):
# print("_on_center_manip_changed")
assert self._engine.has_meshes
self._sync_scene_label()
self._engine.center = manip.point
self._recenter_button.enabled = not self._engine.is_centered()
def _enable_center_controls(self, ena):
if self._center_manip:
self._center_manip.enabled = ena
if self._center_label:
self._center_label.visible = ena
def _set_center_manip_point(self, wpt):
self._center_manip.point = wpt
self._sync_scene_label()
def _sync_scene_label(self):
wpt = Gf.Vec3d(self._center_manip.point)
self._center_label_transform.transform = sc.Matrix44.get_translation_matrix(*wpt)
def prepare_base_aabb_color(self):
color = const.BOUNDS_BASE_AABB_COLOR
color = (color & 0x00ffffff) | (int(self._options_bounds_alpha * 255) << 24)
return color
def _create_base_aabb(self):
self._base_aabb_lines.clear()
color = self.prepare_base_aabb_color()
self._base_aabb_lines.append(sc.Line([0, 0, 0], [0, 0, 0], color=color, visible=False))
self._base_aabb_lines.append(sc.Line([0, 0, 0], [0, 0, 0], color=color, visible=False))
self._base_aabb_lines.append(sc.Line([0, 0, 0], [0, 0, 0], color=color, visible=False))
self._base_aabb_lines.append(sc.Line([0, 0, 0], [0, 0, 0], color=color, visible=False))
self._base_aabb_lines.append(sc.Line([0, 0, 0], [0, 0, 0], color=color, visible=False))
self._base_aabb_lines.append(sc.Line([0, 0, 0], [0, 0, 0], color=color, visible=False))
self._base_aabb_lines.append(sc.Line([0, 0, 0], [0, 0, 0], color=color, visible=False))
self._base_aabb_lines.append(sc.Line([0, 0, 0], [0, 0, 0], color=color, visible=False))
self._base_aabb_lines.append(sc.Line([0, 0, 0], [0, 0, 0], color=color, visible=False))
self._base_aabb_lines.append(sc.Line([0, 0, 0], [0, 0, 0], color=color, visible=False))
self._base_aabb_lines.append(sc.Line([0, 0, 0], [0, 0, 0], color=color, visible=False))
self._base_aabb_lines.append(sc.Line([0, 0, 0], [0, 0, 0], color=color, visible=False))
def _sync_base_aabb(self):
"""
points p#
4 5
6 7
0 1
2 3
lines
8| |9
10| |11
_4_
5/ /6
-7-
_0_
1/ /2
-3-
"""
if self._engine.meshes_base_aabb.IsEmpty():
return
mi, ma = self._engine.meshes_base_aabb.min, self._engine.meshes_base_aabb.max
p0=[mi[0],mi[1],mi[2]]
p1=[ma[0],mi[1],mi[2]]
p2=[mi[0],mi[1],ma[2]]
p3=[ma[0],mi[1],ma[2]]
p4=[mi[0],ma[1],mi[2]]
p5=[ma[0],ma[1],mi[2]]
p6=[mi[0],ma[1],ma[2]]
p7=[ma[0],ma[1],ma[2]]
self._base_aabb_lines[0].start,self._base_aabb_lines[0].end, = p0,p1
self._base_aabb_lines[1].start,self._base_aabb_lines[1].end, = p0,p2
self._base_aabb_lines[2].start,self._base_aabb_lines[2].end, = p1,p3
self._base_aabb_lines[3].start,self._base_aabb_lines[3].end, = p2,p3
self._base_aabb_lines[4].start,self._base_aabb_lines[4].end, = p4,p5
self._base_aabb_lines[5].start,self._base_aabb_lines[5].end, = p4,p6
self._base_aabb_lines[6].start,self._base_aabb_lines[6].end, = p5,p7
self._base_aabb_lines[7].start,self._base_aabb_lines[7].end, = p6,p7
self._base_aabb_lines[8].start,self._base_aabb_lines[8].end, = p0,p4
self._base_aabb_lines[9].start,self._base_aabb_lines[9].end, = p1,p5
self._base_aabb_lines[10].start,self._base_aabb_lines[10].end, = p2,p6
self._base_aabb_lines[11].start,self._base_aabb_lines[11].end, = p3,p7
def _enable_base_aabb(self, ena):
if self._engine.meshes_base_aabb.IsEmpty():
ena = False
for l in self._base_aabb_lines:
l.visible = ena
def _on_options_dist_mult_changed(self, m, *args):
index = m.get_item_value_model().get_value_as_int()
mult = const.OPTIONS_DIST_MULT_COMBO_VALUES[index][1]
self._engine.dist_mult = mult
def _on_options_accel_changed(self, model):
self._engine.order_accel = model.as_float
def _on_options_bounds_changed(self, model):
self._options_bounds_alpha = model.as_float
set_setting(const.SETTINGS_PATH + const.OPTIONS_BOUNDS_ALPHA_SETTING, self._options_bounds_alpha)
color = self.prepare_base_aabb_color()
for l in self._base_aabb_lines:
l.color = color
def _on_options_unselect_changed(self, m):
self._options_unselect_on_use = m.as_float
set_setting(const.SETTINGS_PATH + const.OPTIONS_UNSELECT_ON_USE_SETTING, self._options_unselect_on_use)
def _on_info(self):
res = webbrowser.open(const.INFO_URL)
| 23,446 | Python | 37.063312 | 143 | 0.514416 |
syntway/model_exploder/exts/syntway.model_exploder/syntway/model_exploder/libs/app_utils.py | """"""
import asyncio, functools, sys
import os.path
import carb
import omni.kit
import omni.kit.viewport.utility as vut
import omni.ui as ui
import omni.usd
from pxr import Gf, Tf, Sdf, Usd, UsdGeom, CameraUtil
VERSION = 11
def call_after_update(fn, update_count=1):
async def wait_for_update(count):
while count:
await omni.kit.app.get_app().next_update_async()
count -= 1
fn()
asyncio.ensure_future(wait_for_update(update_count))
def call_on_ready(is_ready_fn, on_ready_fn, max_tries=sys.maxsize):
async def wait_for():
nonlocal max_tries
while max_tries:
await omni.kit.app.get_app().next_update_async()
if is_ready_fn():
on_ready_fn()
return
max_tries -= 1
if is_ready_fn(): # straight away?
on_ready_fn()
return
else:
asyncio.ensure_future(wait_for())
def call_on_parts_ready(on_ready_fn, part_flags=1 | 2 | 4,
max_tries=sys.maxsize,
usd_context=None, usd_context_name='',
window_name: str = None,
):
"""Call back when all parts in part_flags are ready:
part_flags:
Stage ready=1
Stage camera ready=2 -> implies stage ready
Viewport non-zero frame size=4
"""
def are_parts_ready():
ready_mask = 0
if part_flags & (1 | 2 | 4):
api, win = vut.get_active_viewport_and_window(usd_context_name=usd_context_name,
window_name=window_name)
if part_flags & (1 | 2):
if usd_context is None:
ctx = omni.usd.get_context()
else:
ctx = usd_context
if not ctx:
return False
stage = ctx.get_stage()
if not stage:
return False
cam_prim = stage.GetPrimAtPath(api.camera_path)
ready_mask = 1 | (2 if cam_prim.IsValid() else 0)
if part_flags & 4:
if not win:
return False
ws_win = ui.Workspace.get_window(win.name)
if not ws_win:
return False
if not hasattr(ws_win, 'frame'):
return False
ws_win_frame = ws_win.frame
if ws_win_frame.computed_width > 0 and ws_win_frame.computed_height > 0:
ready_mask |= 4
return part_flags & ready_mask == part_flags
call_on_ready(are_parts_ready, on_ready_fn, max_tries)
# convenience calls
def call_on_stage_ready(on_ready_fn, usd_context=None, max_tries=sys.maxsize):
call_on_parts_ready(on_ready_fn, 1, usd_context=usd_context, max_tries=max_tries)
def call_on_stage_camera_ready(on_ready_fn,
usd_context=None, usd_context_name='',
window_name: str = None,
max_tries=sys.maxsize):
call_on_parts_ready(on_ready_fn, 1 | 2,
usd_context=usd_context, usd_context_name=usd_context_name,
window_name=window_name, max_tries=max_tries)
def get_setting_or(path, not_found_value):
value = carb.settings.get_settings().get(path)
if value is not None:
return value
else:
return not_found_value
def set_setting(path, value):
carb.settings.get_settings().set(path, value)
def delete_setting(path):
carb.settings.get_settings().destroy_item(path)
def get_extension_path(ext_id, sub_path=None):
ext_path = omni.kit.app.get_app().get_extension_manager().get_extension_path(ext_id)
if sub_path is not None:
return os.path.join(ext_path, sub_path)
else:
return ext_path
def matrix44_flatten(mat):
"""Get a omni.ui.scene.Matrix44 (an array[16]) from a pxr.Gf.Matrix4d or array[4][4]."""
return [mat[0][0], mat[0][1], mat[0][2], mat[0][3],
mat[1][0], mat[1][1], mat[1][2], mat[1][3],
mat[2][0], mat[2][1], mat[2][2], mat[2][3],
mat[3][0], mat[3][1], mat[3][2], mat[3][3]]
| 4,241 | Python | 25.185185 | 93 | 0.537373 |
syntway/model_exploder/exts/syntway.model_exploder/syntway/model_exploder/libs/manipulators.py | """
If you're getting Kit launch time errors related with omni.ui.scene,
add omni.ui.scene to your extension dependencies in extension.toml:
[dependencies]
"omni.ui.scene" = {}
"""
from typing import Dict
import carb
import omni.kit, omni.usd
from pxr import Gf, Sdf, Tf, Usd, UsdGeom
from omni.kit.manipulator.viewport import ManipulatorFactory
from omni.kit.manipulator.transform import AbstractTransformManipulatorModel, Operation
from omni.kit.manipulator.transform.manipulator import TransformManipulator, Axis
from omni.kit.manipulator.transform.simple_transform_model import SimpleTransformModel
from omni.kit.manipulator.transform.gestures import TranslateChangedGesture, TranslateDragGesturePayload
from .viewport_helper import ViewportHelper
class TranslateManipulator():
VERSION = 9
def __init__(self, viewport: ViewportHelper,
point=Gf.Vec3d(0, 0, 0),
size=1.,
enabled=False,
axes: Axis = Axis.ALL,
style: Dict = {},
changed_fn=None):
""" style: all colors in 0xAABBGGRR
{
"Translate.Axis::x": {"color": 0xAABBGGRR},
"Translate.Axis::y": {"color": },
"Translate.Axis::z": {"color": },
"Translate.Plane::x_y": {"color": },
"Translate.Plane::y_z": {"color": },
"Translate.Plane::z_x": {"color": },
"Translate.Point": {"color": 0xAABBGGRR, "type": "point"/"notpoint"},
}
"""
self._manip = None
self._gesture = None
self._changed_fn = None
#if not viewport.is_attached:
# raise AssertionError("Viewport not attached")
self._is_legacy = viewport.is_legacy
model = SimpleTransformModel()
model.set_operation(Operation.TRANSLATE)
model.set_floats(model.get_item("translate"), point)
self._changed_fn = changed_fn
self._gesture = TranslateGesture(viewport=viewport, changed_fn=self._on_changed_fn)
if self._is_legacy:
self._manip = ManipulatorFactory.create_manipulator(TransformManipulator,
model=model,
size=size,
enabled=enabled,
axes=axes,
style=style,
gestures=[self._gesture])
else:
#self._manip = None
#raise AssertionError("TranslateManipulator not currently usable on VP2")
self._manip = TransformManipulator(model=model,
size=size,
enabled=enabled,
axes=axes,
style=style,
gestures=[self._gesture])
def __del__(self):
self.destroy()
def destroy(self):
if self._gesture:
self._gesture.destroy()
self._gesture = None
if self._manip:
if self._is_legacy:
ManipulatorFactory.destroy_manipulator(self._manip)
else:
self._manip.destroy()
self._manip = None
if self._changed_fn:
self._changed_fn = None
@property
def enabled(self):
return self._manip.enabled
@enabled.setter
def enabled(self, ena):
self._manip.enabled = ena
@property
def point(self):
return self._manip.model.get_as_floats(self._manip.model.get_item("translate"))
@point.setter
def point(self, point):
self._manip.model.set_floats(self._manip.model.get_item("translate"),
[point[0], point[1], point[2]])
def set_changed_fn(self, fn):
""" fn(action, manip)
action: began=0,changed=1,ended=2,canceled=3
"""
self._changed_fn = fn
def _on_changed_fn(self, action, point):
if self._changed_fn:
self._changed_fn(action, self)
"""
class PointTranslateModel(SimpleTransformModel):
def __init__(self, point):
super().__init__()
self.set_operation(Operation.TRANSLATE)
self.set_floats(self.get_item("translate"), point)
"""
class TranslateGesture(TranslateChangedGesture):
def __init__(self, viewport, changed_fn=None, **kwargs):
TranslateChangedGesture.__init__(self)
self._vp = viewport
self.changed_fn = changed_fn
def destroy(self):
self._vp = None
self.changed_fn = None
def __del__(self):
self.destroy()
def on_began(self):
# print("TranslateGesture.on_began", self._vp.window_name)
if not self.gesture_payload or not self.sender or not isinstance(self.gesture_payload, TranslateDragGesturePayload):
return
model = self.sender.model
if not model:
return
pt = model.get_as_floats(model.get_item("translate"))
self._begin_point = Gf.Vec3d(*pt)
if self._vp.is_legacy:
self._vp.temp_select_enabled(False)
if self.changed_fn:
self.changed_fn(0, self._begin_point)
def on_ended(self):
# print("TranslateGesture.on_ended")
if not self.gesture_payload or not self.sender or not isinstance(self.gesture_payload, TranslateDragGesturePayload):
return
model = self.sender.model
if not model:
return
if self.changed_fn:
pt = model.get_as_floats(model.get_item("translate"))
self.changed_fn(2, Gf.Vec3d(*pt))
def on_canceled(self):
# print("TranslateGesture.on_canceled")
if not self.gesture_payload or not self.sender or not isinstance(self.gesture_payload, TranslateDragGesturePayload):
return
model = self.sender.model
if not model:
return
if self.changed_fn:
pt = model.get_as_floats(model.get_item("translate"))
self.changed_fn(3, Gf.Vec3d(*pt))
def on_changed(self):
# print("TranslateGesture.on_changed")
if not self.gesture_payload or not self.sender or not isinstance(self.gesture_payload, TranslateDragGesturePayload):
return
model = self.sender.model
if not model:
return
translate = Gf.Vec3d(*self.gesture_payload.moved)
point = self._begin_point + translate
model.set_floats(model.get_item("translate"), [point[0], point[1], point[2]])
if self.changed_fn:
self.changed_fn(1, point)
| 6,989 | Python | 25.477273 | 124 | 0.546001 |
syntway/model_exploder/exts/syntway.model_exploder/syntway/model_exploder/libs/usd_utils.py | """
Notes:
"""
import omni.kit
import omni.usd
from pxr import Gf, Sdf, Usd, UsdGeom
VERSION = 15
XFORM_OP_TRANSLATE_TYPE_TOKEN = UsdGeom.XformOp.GetOpTypeToken(UsdGeom.XformOp.TypeTranslate)
XFORM_OP_TRANSLATE_ATTR_NAME = "xformOp:" + XFORM_OP_TRANSLATE_TYPE_TOKEN
def get_prim_transform(prim,
with_pivot,
xform_cache=None,
time_code=Usd.TimeCode.Default()):
"""Returns a prim's local transformation, converting mesh points into parent-space coords.
with_pivot=True: returns GetLocalTransformation, where pivot and pivot^-1 are included into the translation.
with_pivot=False will set translation to the actual translate XformOp.
If no pivot is set, returns GetLocalTransformation()
"""
if xform_cache is None:
xform_cache = UsdGeom.XformCache(time_code)
mat, _ = xform_cache.GetLocalTransformation(prim)
if with_pivot:
return mat
# remove pivot from local transform
attr_name = XFORM_OP_TRANSLATE_ATTR_NAME
op_attr = prim.GetAttribute(attr_name + ":pivot")
if not op_attr: # no pivot, return mat
return mat
op_attr = prim.GetAttribute(attr_name)
if op_attr:
op = UsdGeom.XformOp(op_attr)
if op:
trans = op.Get(time_code)
if trans is not None:
mat.SetTranslateOnly(make_vec3_for_matrix4(mat, trans))
return mat
# translation not found: set to identity translate
mat.SetTranslateOnly(make_vec3_for_matrix4(mat, 0, 0, 0))
return mat
def set_prim_transform(prim, mat,
sdf_change_block=1,
time_code=Usd.TimeCode.Default()):
"""sdf_change_block: 0: don't use, 1: use locally, 2: assume already began"""
sdf_change_block = 0
stage = prim.GetStage()
if sdf_change_block == 1:
Sdf.BeginChangeBlock()
xform = UsdGeom.Xformable(prim)
ops = xform.GetOrderedXformOps()
for op in ops:
if op.GetOpType() == UsdGeom.XformOp.TypeTransform:
_set_xform_op_time_code(op, mat, time_code, stage)
if sdf_change_block == 1:
Sdf.EndChangeBlock()
return
def get_or_add(op_type, prec):
type_token = UsdGeom.XformOp.GetOpTypeToken(op_type)
attr_name = "xformOp:" + type_token
op_attr = prim.GetAttribute(attr_name)
if op_attr:
op = UsdGeom.XformOp(op_attr)
if op:
return op
if sdf_change_block >= 1:
Sdf.EndChangeBlock()
op = xform.AddXformOp(op_type, prec)
if sdf_change_block >= 1:
Sdf.BeginChangeBlock()
return op
# not a transform: decompose matrix and set various S,R,T as needed
_, _, scale, rot_mat, trans, _ = mat.Factor()
rot_mat.Orthonormalize(False)
rot = rot_mat.ExtractRotation()
new_ops = []
# translation
op = get_or_add(UsdGeom.XformOp.TypeTranslate, UsdGeom.XformOp.PrecisionDouble)
if op:
_set_xform_op_time_code(op, trans, time_code, stage)
new_ops.append(op)
# scale/rotate pivot (a translate)
pivot_op = None
attr_name = XFORM_OP_TRANSLATE_ATTR_NAME + ":pivot"
op_attr = prim.GetAttribute(attr_name)
if op_attr:
pivot_op = UsdGeom.XformOp(op_attr)
if pivot_op:
new_ops.append(pivot_op)
# rotation: pick first type
rot_type, rot_prec = UsdGeom.XformOp.TypeRotateXYZ, UsdGeom.XformOp.PrecisionFloat
for op in ops:
op_type = op.GetOpType()
if op_type >= UsdGeom.XformOp.TypeRotateX and op_type <= UsdGeom.XformOp.TypeOrient:
rot_type, rot_prec = op_type, op.GetPrecision()
break
def rot_get_or_add(rot_type,
axis_0, axis_1, axis_2,
x, y, z,
rot_prec
):
angles = rot.Decompose(axis_0, axis_1, axis_2)
rot_vals = Gf.Vec3f(angles[x], angles[y], angles[z]) # unscramble to x,y,z order that op.Set() needs
op = get_or_add(rot_type, rot_prec)
if op:
_set_xform_op_time_code(op, rot_vals, time_code, stage)
new_ops.append(op)
# single rotation?
if rot_type >= UsdGeom.XformOp.TypeRotateX and rot_type <= UsdGeom.XformOp.TypeRotateZ:
angles = rot.Decompose(Gf.Vec3d.ZAxis(), Gf.Vec3d.YAxis(), Gf.Vec3d.XAxis())
op = get_or_add(UsdGeom.XformOp.TypeRotateX, rot_prec)
if op:
_set_xform_op_time_code(op, angles[2], time_code, stage)
new_ops.append(op)
op = get_or_add(UsdGeom.XformOp.TypeRotateY, rot_prec)
if op:
_set_xform_op_time_code(op, angles[1], time_code, stage)
new_ops.append(op)
op = get_or_add(UsdGeom.XformOp.TypeRotateZ, rot_prec)
if op:
_set_xform_op_time_code(op, angles[0], time_code, stage)
new_ops.append(op)
# quaternion?
elif rot_type == UsdGeom.XformOp.TypeOrient:
type_token = UsdGeom.XformOp.GetOpTypeToken(rot_type)
attr_name = "xformOp:" + type_token
op_attr = prim.GetAttribute(attr_name)
if op_attr:
op = UsdGeom.XformOp(op_attr)
if op:
_set_xform_op_time_code(op, rot.GetQuat(), time_code, stage)
new_ops.append(op)
# triple rotation?
elif rot_type == UsdGeom.XformOp.TypeRotateXZY:
rot_get_or_add(rot_type,
Gf.Vec3d.YAxis(), Gf.Vec3d.ZAxis(), Gf.Vec3d.XAxis(),
2, 0, 1,
rot_prec)
elif rot_type == UsdGeom.XformOp.TypeRotateYXZ:
rot_get_or_add(rot_type,
Gf.Vec3d.ZAxis(), Gf.Vec3d.XAxis(), Gf.Vec3d.YAxis(),
1, 2, 0,
rot_prec)
elif rot_type == UsdGeom.XformOp.TypeRotateYZX:
rot_get_or_add(rot_type,
Gf.Vec3d.XAxis(), Gf.Vec3d.ZAxis(), Gf.Vec3d.YAxis(),
0, 2, 1,
rot_prec)
elif rot_type == UsdGeom.XformOp.TypeRotateZXY:
rot_get_or_add(rot_type,
Gf.Vec3d.YAxis(), Gf.Vec3d.XAxis(), Gf.Vec3d.ZAxis(),
1, 0, 2,
rot_prec)
elif rot_type == UsdGeom.XformOp.TypeRotateZYX:
rot_get_or_add(rot_type,
Gf.Vec3d.XAxis(), Gf.Vec3d.YAxis(), Gf.Vec3d.ZAxis(),
0, 1, 2,
rot_prec)
else: # just assume TypeRotateXYZ for any other
rot_get_or_add(rot_type,
Gf.Vec3d.ZAxis(), Gf.Vec3d.YAxis(), Gf.Vec3d.XAxis(),
2, 1, 0,
rot_prec)
# scale
op = get_or_add(UsdGeom.XformOp.TypeScale, UsdGeom.XformOp.PrecisionFloat)
if op:
_set_xform_op_time_code(op, scale, time_code, stage)
new_ops.append(op)
# pivot_op^-1
if pivot_op is not None:
for op in ops:
if op.IsInverseOp() and \
op.GetOpType() == UsdGeom.XformOp.TypeTranslate and \
is_pivot_xform_op_name_suffix(op.GetOpName()):
new_ops.append(op)
break
# and finally set new ops into xform
xform.SetXformOpOrder(new_ops, xform.GetResetXformStack())
if sdf_change_block == 1:
Sdf.EndChangeBlock()
"""
Note: touch_prim_xform() doesn't work, probably because the value is equal and caches are not rebuilt.
But this does:
lmat = get_prim_transform(prim, False, xform_cache, time_code)
cmd = TransformPrimCommand(path=path, new_transform_matrix=lmat, time_code=time_code)
#slower: cmd = TransformPrimSRTCommand(path=path, time_code=time_code)
cmd.do()
--------------------
def touch_prim_xform(prim,
sdf_change_block=1,
time_code=Usd.TimeCode.Default()):
#sdf_change_block: 0: don't use, 1: use locally, 2: assume already began
if sdf_change_block == 1:
Sdf.BeginChangeBlock()
xform = UsdGeom.Xformable(prim)
ops = xform.GetOrderedXformOps()
for op in ops:
if not op.IsInverseOp():
op.Set(op.Get(time_code), time_code)
break
if sdf_change_block == 1:
Sdf.EndChangeBlock()
"""
def get_prim_translation(prim,
time_code=Usd.TimeCode.Default()):
# remove pivot from local transform
op_attr = prim.GetAttribute(XFORM_OP_TRANSLATE_ATTR_NAME)
if op_attr:
op = UsdGeom.XformOp(op_attr)
if op:
trans = op.Get(time_code)
if trans is not None:
return Gf.Vec3d(trans)
# translation not found: return identity
return Gf.Vec3d(0.)
def set_prim_translation(prim, trans,
sdf_change_block=1,
time_code=Usd.TimeCode.Default()):
"""sdf_change_block: 0: don't use, 1: use locally, 2: assume already began"""
# print(prim.GetPath().pathString)
sdf_change_block = 0
mat_op = trans_op = None
xform = UsdGeom.Xformable(prim)
for op in xform.GetOrderedXformOps():
op_type = op.GetOpType()
if op_type == UsdGeom.XformOp.TypeTransform:
mat_op = op
break
elif op_type == UsdGeom.XformOp.TypeTranslate and not is_pivot_xform_op_name_suffix(op.GetOpName()): # op.SplitName()
# simple translation, not pivot/invert
trans_op = op
break
if mat_op: # has matrix op
if sdf_change_block == 1:
Sdf.BeginChangeBlock()
mat = Gf.Matrix4d()
mat.SetTranslate(trans)
stage = prim.GetStage()
_set_xform_op_time_code(mat_op, mat, time_code, stage)
else: # set or add a translation xform op
stage = prim.GetStage()
# can't just set attr as order might not have been set
if not trans_op:
if sdf_change_block == 2:
Sdf.EndChangeBlock()
trans_op = _prepend_xform_op(xform,
UsdGeom.XformOp.TypeTranslate,
get_xform_op_precision(trans),
time_code, stage)
if sdf_change_block == 2:
Sdf.BeginChangeBlock()
if sdf_change_block == 1:
Sdf.BeginChangeBlock()
_set_xform_op_time_code(trans_op, trans, time_code, stage)
if sdf_change_block == 1:
Sdf.EndChangeBlock()
def set_prim_translation_fast(prim, trans,
sdf_change_block=1,
time_code=Usd.TimeCode.Default()):
"""
As set_translation() but won't copy time samples from weaker layers.
sdf_change_block: 0: don't use, 1: use locally, 2: assume already began
see: https://graphics.pixar.com/usd/release/api/class_sdf_change_block.html
"""
sdf_change_block = 0
if prim.HasAttribute("xformOp:mat"): # has matrix op
if sdf_change_block == 1:
Sdf.BeginChangeBlock()
at = prim.GetAttribute("xformOp:mat")
if not at.GetNumTimeSamples():
time_code = Usd.TimeCode.Default()
mat = at.Get(time_code)
mat.SetTranslateOnly(trans)
at.Set(mat, time_code)
else: # set or add a translation xform op
# can't just set attr as order might not have been set
attr = prim.GetAttribute("xformOp:translate")
op = UsdGeom.XformOp(attr)
if not op:
if sdf_change_block == 2:
Sdf.EndChangeBlock()
stage = prim.GetStage()
xform = UsdGeom.Xformable(prim)
op = _prepend_xform_op(xform,
UsdGeom.XformOp.TypeTranslate,
get_xform_op_precision(trans),
time_code, stage)
if sdf_change_block == 2:
Sdf.BeginChangeBlock()
if sdf_change_block == 1:
Sdf.BeginChangeBlock()
if not op.GetNumTimeSamples():
time_code = Usd.TimeCode.Default()
op.Set(trans, time_code) # Gf.Vec3d()
if sdf_change_block == 1:
Sdf.EndChangeBlock()
def _set_xform_op_time_code(xform_op, value, time_code, stage):
prev = xform_op.Get(time_code)
if not xform_op.GetNumTimeSamples(): # no time samples
time_code = Usd.TimeCode.Default()
if prev is None:
if not time_code.IsDefault():
omni.usd.copy_timesamples_from_weaker_layer(stage, xform_op.GetAttr())
xform_op.Set(value, time_code)
else:
value_type = type(prev) # to preserve existing value type
if not time_code.IsDefault():
omni.usd.copy_timesamples_from_weaker_layer(stage, xform_op.GetAttr())
xform_op.Set(value_type(value), time_code)
def _prepend_xform_op(xform, op_type, prec, time_code, stage):
# print("pre", _get_xform_op_order(xform))
prev_ops = xform.GetOrderedXformOps()
xform.SetXformOpOrder([])
# print("mid", _get_xform_op_order(xform))
new_op = xform.AddXformOp(op_type, prec)
for op in prev_ops:
suffix = get_xform_op_name_suffix(op.GetOpName())
inverse = op.IsInverseOp()
new = xform.AddXformOp(op.GetOpType(), op.GetPrecision(),
suffix,
inverse)
if not inverse:
value = op.Get(time_code)
if value is not None:
_set_xform_op_time_code(new, value, time_code, stage)
# print("post", _get_xform_op_order(xform))
return new_op
def get_xform_op_precision(t):
if isinstance(t, Gf.Matrix4d) or isinstance(t, Gf.Vec3d):
return UsdGeom.XformOp.PrecisionDouble
else:
return UsdGeom.XformOp.PrecisionFloat
def get_vec3_type_for_matrix4(mat):
if isinstance(mat, Gf.Matrix4d):
return Gf.Vec3d
else:
return Gf.Vec3f
def make_vec3_for_matrix4(mat, x, y=None, z=None):
t = get_vec3_type_for_matrix4(mat)
if y is None:
return t(x[0], x[1], x[2])
else:
return t(x, y, z)
def _get_xform_op_order(xform):
out = ""
for op in xform.GetOrderedXformOps():
out += op.GetOpName() + ","
return out
XFORM_OP_INVERSE_PREFIX = "!invert!"
def is_xform_op_name_inverse(op_name):
return op_name.startswith(XFORM_OP_INVERSE_PREFIX)
def get_xform_op_name_suffix(op_name):
# or
if is_xform_op_name_inverse(op_name):
op_name = op_name.split(XFORM_OP_INVERSE_PREFIX, 1)[1]
if op_name.startswith("xformOp:"):
tags = op_name.split(":", 2)
if len(tags) >= 3:
return tags[2]
return ""
def is_pivot_xform_op_name_suffix(op_name):
"""or faster:
"xformOp:" in op_name and "pivot" in op_name
"""
suffix = get_xform_op_name_suffix(op_name)
if suffix != "":
return suffix == "pivot"
else:
return False
def create_edit_context(path, stage):
"""Unsafe from threading? No issues so far:
https://graphics.pixar.com/usd/release/api/class_usd_edit_context.html#details
"""
layer, prim = omni.usd.find_spec_on_session_or_its_sublayers(stage, path)
if not prim or not layer:
return Usd.EditContext(stage)
if prim.specifier == Sdf.SpecifierDef:
return Usd.EditContext(stage, Usd.EditTarget(layer))
else:
return Usd.EditContext(stage)
| 15,617 | Python | 27.14054 | 126 | 0.570212 |
syntway/model_exploder/exts/syntway.model_exploder/syntway/model_exploder/libs/ui_utils.py | """
Utility UI functions.
"""
from enum import IntEnum
import omni.ui as ui
VERSION = 4
class UiPaletteDark(IntEnum):
"""Colors in 0xAABBGGRR format. All colors with ff alpha if possible"""
BACK = 0xff23211f # darker than WINDOW_BACK: general widget background, window title bar
BACK_SELECTED = 0xff6e6e6e
BACK_HOVERED = BACK_SELECTED
TEXT = 0xffcccccc
TEXT_SELECTED = 0xff8b8a8a
TEXT_DISABLED = 0xff505050
WINDOW_BACK = 0xff454545 # lighter than BACK: window base color where darker controls are placed
TOOLTIP_TEXT = 0xff303030
TOOLTIP_BACK = 0xffaadddd
RESET = 0xffa07d4f # field reset button
TRANSP = 0x00000000
TRANSP_NOT_0 = 0x00ffffff # some widgets collapse width if 0 is passed as a color
UiPal = UiPaletteDark
def UiPal_refresh():
global UiPal
UiPal = UiPaletteDark
def create_tooltip(text: str,
tooltip_style=None,
tooltip_text_style=None):
if tooltip_style is None:
tooltip_style = {
"color": UiPal.TOOLTIP_TEXT,
"background_color": UiPal.TOOLTIP_BACK,
"margin": -1,
"border_width": 0,
}
if tooltip_text_style is None:
tooltip_text_style = {"margin": 3}
with ui.ZStack(style=tooltip_style):
ui.Rectangle()
ui.Label(text, style=tooltip_text_style)
def create_tooltip_fn(text: str,
tooltip_style=None,
tooltip_text_style=None):
return lambda: create_tooltip(text, tooltip_style, tooltip_text_style)
def create_reset_button(reset_value,
widget_model,
widget_set_value_fn,
widget_add_value_changed_fn,
style_on=None,
style_off=None,
on_tooltip_text=True, # True: use default, None: no tooltip
) -> ui.Rectangle:
if style_on is None:
style_on = {
"background_color": UiPal.RESET,
"border_radius": 2,
"color": 0xffffffff
}
if style_off is None:
style_off = {"background_color": UiPal.TEXT_DISABLED}
if on_tooltip_text is True:
on_tooltip_text = "Click to reset to default value"
def update_rect(new_value, *_):
if type(new_value) is ui.AbstractItemModel:
new_value = new_value.get_item_value_model()
if type(reset_value) is bool:
new_value = new_value.as_bool
elif type(reset_value) is int:
new_value = new_value.as_int
elif type(reset_value) is float:
new_value = new_value.as_float
# value changed? display reset button
rect.visible = new_value != reset_value
SIZE = 12
OFF_LEFT_PAD = 3
OFF_SIZE = 5
with ui.VStack(width=0, style={"margin": 0}):
ui.Spacer()
with ui.ZStack(width=SIZE, height=SIZE):
# disabled reset button
with ui.HStack(width=SIZE, height=SIZE):
ui.Spacer(width=OFF_LEFT_PAD)
with ui.VStack(width=SIZE, height=SIZE):
ui.Spacer()
ui.Rectangle(width=OFF_SIZE, height=OFF_SIZE, name="reset_off",
style=style_off)
ui.Spacer()
# actionable reset button
rect = ui.Rectangle(
width=SIZE,
height=SIZE,
name="reset",
alignment=ui.Alignment.V_CENTER,
style=style_on,
margin=0)
if on_tooltip_text is not None:
rect.set_tooltip_fn(create_tooltip_fn(on_tooltip_text))
rect.set_mouse_pressed_fn(lambda x, y, b, m: widget_set_value_fn(reset_value))
# initial rect visibility
update_rect(widget_model)
ui.Spacer()
widget_add_value_changed_fn(update_rect)
return rect
| 4,015 | Python | 25.077922 | 101 | 0.558655 |
syntway/model_exploder/exts/syntway.model_exploder/syntway/model_exploder/libs/viewport_helper.py | """
+ Coordinate spaces:
- 2D screen coordinate spaces
ui: whole frame area float UI units, only equal to px units when omni.ui.Workspace.get_dpi_scale() is 1. ui = px_units / dpi_scale.
Origin is left-top corner of the frame, 0..ui_size().
(app_ui = ui coordinates in Kit's app coordinates, window left-top is the origin.)
(px: whole frame area integer screen monitor pixels, 0..px_size(). Use ui coords instead for units to scale in high density displays)
01: float 0..1 coordinates covering whole frame area. Origin is left-top corner.
ndc: float -1..+1 Normalized Device Coordinates covering whole frame area. Origin is center, 1,1 is top-right corner of frame.
iscene: coordinates in a SceneView with view and projection transforms both set to identity matrices.
Origin is center, +x,+y is right-top corner. Can span -xy..+xy, where xy=iscene_half().
Fixed aspect ratio: a size displays at the same length in x and y.
render: area where rendering displays with size fitted to frame area, which can occupy whole or only a part.
NDC coords always extend -1..+1, origin is the center, 1,1 is top-right corner of frame.
- 3D world space
world: world space 3D coordinates
+ Coordinate/size conversions:
- 2D screen spaces
conv_iscene_from_render
conv_iscene_from_ndc
conv_iscene_from_01
conv_iscene_from_ui
size_iscene_from_ui
conv_render_from_ndc <-> conv_ndc_from_render
conv_01_from_ui <-> conv_ui_from_01
conv_ndc_from_ui <-> conv_ui_from_ndc
conv_01_from_app_ui, conv_ndc_from_app_ui, conv_ui_from_app_ui
conv_ndc_from_01 <-> conv_01_from_ndc
- 3D <-> 2D spaces
conv_render_from_world
conv_iscene_from_world
pick_ray_from_render
All conv_* methods accept points in Gf.Vec2*/Gf.Vec3* or tuple, but always return Gf.Vec2d/Gf.Vec3d points.
+ SceneView transformations
get_transform_iscene_from_ui
get_transform_iscene_from_render
+ Legacy Viewport:
Extension omni.kit.viewport_legacy (was omni.kit.window.viewport)
_win -> class omni.kit.viewport.utility.legacy_viewport_window.LegacyViewportWindow -> omni.ui.Window
_api -> class omni.kit.viewport.utility.legacy_viewport_api.LegacyViewportAPI
Use _win.legacy_window to get the actual IViewportWindow ->
class omni.kit.viewport_legacy._viewport_legacy.IViewportWindow
set_enabled_picking(), get_mouse_event_stream(), etc
+ Viewport Next
Partially supported. Extensions omni.kit.viewport.window, omni.kit.widget.viewport
+ Notes
- Don't store and always access ViewportHelper's frame or render area sizes as they may change due to user interactions,
even when changing Kit between display monitors.
"""
import asyncio, functools
import carb
import omni.kit
import omni.kit.viewport.utility as vut
import omni.ui as ui
""" Since omni.ui.scene may not not available on Kit's early launch,
if you're launch time errors related with omni.ui.scene, add omni.ui.scene
to your extension dependencies in extension.toml:
[dependencies]
"omni.ui.scene" = {} """
from omni.ui import scene as sc
from pxr import Gf, Tf, Sdf, Usd, UsdGeom, CameraUtil
SETTING_RENDER_WIDTH = "/app/renderer/resolution/width"
SETTING_RENDER_HEIGHT = "/app/renderer/resolution/height"
SETTING_CONFORM_POLICY = "/app/hydra/aperture/conform"
SETTING_RENDER_FILL_LEGACY = "/app/runLoops/rendering_0/fillResolution"
SETTING_RENDER_FILL = "/persistent/app/viewport/{api_id}/fillViewport"
SETTING_DEFAULT_WINDOW_NAME = "/exts/omni.kit.viewport.window/startup/windowName"
class ViewportHelper():
LIB_VERSION = 45
def __init__(self, window_name=None, attach: bool = True):
self._win = None
self._api = None
self._ws_win_frame = None
self._sub_render_width = None
self._sub_render_height = None
self._sub_render_fill = None
self._is_legacy = True
self._frame_mouse_fns = {} # frame: set(fn,fn,...)
self._frame_size_changed_fns = {} # frame: set(fn,fn,...)
self._render_changed_fns = set() # set(fn,fn,...)
self._stage_objects_changed = None # [listener, set(fn,fn,...)]
self._changed_fns = {} # fn: sub_flags
if attach:
res = self.attach(window_name=window_name)
if not res:
raise AssertionError("Could not attach")
def __del__(self):
self.detach()
def attach(self, window_name=None, usd_context_name: str = '') -> bool:
""" window_name:
str: actual window name/title, like "Viewport"
None: current/last active viewport
int: index into ViewportHelper.get_window_names()
Window selection order: .get_active_viewport_and_window() vut tries to attach "Viewport Next" first,
then legacy "Viewport" windows."""
self.detach()
if window_name is not None:
if type(window_name) is int:
wn_list = ViewportHelper.get_window_names()
if window_name < len(wn_list):
window_name = wn_list[window_name]
else:
raise AssertionError("Non-existent window_name")
else:
raise AssertionError("Bad window_name index")
self._api,self._win = vut.get_active_viewport_and_window(usd_context_name=usd_context_name,
window_name=window_name)
if self._win is None or self._api is None:
self._win = None
self._api = None
self._ws_win = None
self._ws_win_frame = None
return False
if self.stage is None:
raise AssertionError("Stage not available")
self._is_legacy = hasattr(self._api, "legacy_window")
self._ws_win = ui.Workspace.get_window(self._win.name)
if self._ws_win is None:
raise AssertionError("Workspace window not available")
"""
if not self._ws_win.visible:
print("Viewport Window is not visible: can't attach")
self.detach()
return False
"""
if not hasattr(self._ws_win, 'frame'):
self._ws_win_frame = None
raise AssertionError("Workspace window frame not available")
self._ws_win_frame = self._ws_win.frame
return True
def detach(self):
settings = carb.settings.get_settings()
if self._sub_render_width:
settings.unsubscribe_to_change_events(self._sub_render_width)
self._sub_render_width = None
if self._sub_render_height:
settings.unsubscribe_to_change_events(self._sub_render_height)
self._sub_render_height = None
if self._sub_render_fill:
settings.unsubscribe_to_change_events(self._sub_render_fill)
self._sub_render_fill = None
if self._win is not None:
if self._is_legacy:
self._win.destroy()
self._win = None
self._api = None
self._ws_win = None
self._ws_win_frame = None
self._frame_mouse_fns.clear()
self._frame_size_changed_fns.clear()
self._render_changed_fns.clear()
self._changed_fns.clear()
if self._stage_objects_changed is not None:
if len(self._stage_objects_changed):
self._stage_objects_changed[0].Revoke()
self._stage_objects_changed = None
@property
def is_attached(self):
return self._win is not None
@property
def window_name(self) -> str:
return self._win.name
@staticmethod
def get_default_window_name():
return carb.settings.get_settings().get(SETTING_DEFAULT_WINDOW_NAME) or 'Viewport'
@staticmethod
def get_window_names():
try:
from omni.kit.viewport.window import get_viewport_window_instances
return [w.title for w in get_viewport_window_instances()]
except ImportError:
return [ViewportHelper.get_default_window_name()]
@property
def is_legacy(self):
return self._is_legacy
@property
def camera_path(self) -> Sdf.Path:
return self._api.camera_path
@camera_path.setter
def camera_path(self, camera_path):
self._api.camera_path = camera_path
def get_camera_view_proj(self):
frustum = self.get_conformed_frustum()
if frustum is None:
return None
return frustum.ComputeViewMatrix(), frustum.ComputeProjectionMatrix()
def same_api(self, api) -> bool:
return id(api) == id(self._api)
def get_gf_camera(self):
"""Returns None if no valid prim found."""
cam = self._api.camera_path
stage = self.stage
if stage is None:
raise AssertionError("Stage not available")
cam_prim = stage.GetPrimAtPath( self.camera_path )
if cam_prim and cam_prim.IsValid():
usd_cam = UsdGeom.Camera(cam_prim)
if usd_cam:
return usd_cam.GetCamera()
# fall over
return None
@property
def fps(self) -> float:
return self._api.fps
@property
def usd_context_name(self) -> str:
return self._api.usd_context_name
@property
def usd_context(self):
return self._api.usd_context
@property
def stage(self):
return self.usd_context.get_stage()
def get_frame(self, frame_id: str):
return self._win.get_frame(frame_id)
@property
def ui_size(self):
""" Due to DPI pixel multiplier, can return fractional.
In DPI > 1 displays, this is UI units. Actual display pixels = UI units * omni.ui.Workspace.get_dpi_scale() """
if self._ws_win_frame is not None:
return self._ws_win_frame.computed_width, self._ws_win_frame.computed_height
else:
return 1.,1.
@property
def px_size(self):
""" Returns int size """
ui_size = self.ui_size
dpi_mult = ui.Workspace.get_dpi_scale()
return int(round(ui_size[0] * dpi_mult)), int(round(ui_size[1] * dpi_mult))
@property
def ui_size_ratio(self):
size = self.ui_size
return size[0] / size[1] if size[1] else 1.
@property
def render_size_px(self):
size = self._api.resolution
return (int(size[0]), int(size[1]))
@render_size_px.setter
def render_size_px(self, size):
self._api.resolution = (int(size[0]), int(size[1]))
# render_size width/height ratio
@property
def render_size_ratio(self):
size = self.render_size_px
return size[0] / size[1] if size[1] else 1.
""" ?Also render_rect_px, render_left_top_px """
"""
Kit-103.1.2/3: render_fill_frame get/set does not work coherently
Legacy Viewport: setting fill_frame makes viewport settings "Fill Viewport" disappear
Viewport 2: only works setting to True
Kit 104.0:
Viewport 2: api is not initialized to setting: so we use setting
@property
def render_fill_frame(self):
return self._api.fill_frame
@render_fill_frame.setter
def render_fill_frame(self, value: bool):
self._api.fill_frame = value
"""
@property
def render_fill_frame(self):
if self._is_legacy:
name = SETTING_RENDER_FILL_LEGACY
else:
name = SETTING_RENDER_FILL.format(api_id=self._api.id)
return bool(carb.settings.get_settings().get(name))
@render_fill_frame.setter
def render_fill_frame(self, value: bool):
if self._is_legacy:
name = SETTING_RENDER_FILL_LEGACY
else:
name = SETTING_RENDER_FILL.format(api_id=self._api.id)
carb.settings.get_settings().set(name, value)
def get_conformed_frustum(self):
cam = self.get_gf_camera()
if cam is None:
raise AssertionError("Camera not available")
frustum = cam.frustum
conform_policy = ViewportHelper.get_conform_policy()
CameraUtil.ConformWindow(frustum, conform_policy, self.render_size_ratio)
return frustum
@staticmethod
def get_conform_policy():
"""conform_policy: how is the render area fit into the frame area"""
policy = carb.settings.get_settings().get(SETTING_CONFORM_POLICY)
if policy is None or policy < 0 or policy > 5:
return CameraUtil.MatchHorizontally
else:
policies = [
CameraUtil.MatchVertically,
CameraUtil.MatchHorizontally,
CameraUtil.Fit,
CameraUtil.Crop,
CameraUtil.DontConform,
CameraUtil.DontConform,
]
return policies[policy]
def sync_scene_view(self, scene_view):
"""Must be called after viewport changes or before using a SceneView.
A SceneView's "screen_aspect_ratio" is the ratio of what we call the render space"""
frame_ratio = self.ui_size_ratio
render_ratio = self.render_size_ratio
if False and abs(frame_ratio - render_ratio) < 1e-6: # render equal to frame area: set to 0
ratio = 0
else:
ratio = render_ratio
if scene_view.screen_aspect_ratio != ratio:
scene_view.screen_aspect_ratio = ratio
# print("setup_scene_view asp_rat", scene_view.screen_aspect_ratio)
#====================================================================== coord space conversion
# generic NDC <-> 0..1 conversion
@staticmethod
def conv_ndc_from_01(coord):
return Gf.Vec2d( coord[0]*2. - 1., -(coord[1]*2. - 1.) )
@staticmethod
def conv_01_from_ndc(coord):
return Gf.Vec2d( (coord[0] + 1.) * 0.5, (-coord[1] + 1.) * 0.5)
def conv_01_from_ui(self, coord):
width,height = self.ui_size
return Gf.Vec2d(coord[0] / width, coord[1] / height)
def conv_ui_from_01(self, coord):
width,height = self.ui_size
return Gf.Vec2d(coord[0] * width, coord[1] * height)
def conv_ui_from_app_ui(self, coord):
frame = self._win.frame
return Gf.Vec2d(coord[0] - frame.screen_position_x, coord[1] - frame.screen_position_y)
def conv_01_from_app_ui(self, coord):
frame = self._win.frame
return self.conv_01_from_ui( (coord[0] - frame.screen_position_x, coord[1] - frame.screen_position_y) )
def conv_ndc_from_ui(self, coord):
xy = self.conv_01_from_ui(coord)
return ViewportHelper.conv_ndc_from_01(xy)
def conv_ui_from_ndc(self, coord):
xy = ViewportHelper.conv_01_from_ndc(xy)
return ViewportHelper.conv_ui_from_01(xy)
def conv_ndc_from_app_ui(self, coord):
xy = self.conv_01_from_app_ui(coord)
return ViewportHelper.conv_ndc_from_01(xy)
@property
def _render_from_size_ratios(self):
fr = self.ui_size
frame_ratio = fr[0] / fr[1] if fr[1] else 1.
render_ratio = self.render_size_ratio
if frame_ratio >= render_ratio: # tex vertical -1..+1
return (frame_ratio / render_ratio, 1.)
else: #
return (1., render_ratio / frame_ratio)
# coordinate conversion between frame-NDC and render(NDC) spaces
def conv_render_from_ndc(self, frame_ndc):
mx = frame_ndc[0]
my = frame_ndc[1]
ratios = self._render_from_size_ratios
mx *= ratios[0]
my *= ratios[1]
return Gf.Vec2d(mx, my)
def conv_ndc_from_render(self, render_ndc):
mx,my = self.conv_render_from_ndc(render_ndc)
return Gf.Vec2d(1./mx, 1./my)
def iscene_size(self, scene_view):
w,h = self.iscene_half(scene_view)
return w*2.,h*2.
def iscene_half(self, scene_view):
frame_ratio = self.ui_size_ratio
render_ratio = self.render_size_ratio
fills = abs(frame_ratio - render_ratio) < 1e-6
lands = frame_ratio >= render_ratio
asp_rat = scene_view.aspect_ratio_policy
# print("fills,lands", fills, lands, frame_ratio, render_ratio)
if asp_rat == sc.AspectRatioPolicy.PRESERVE_ASPECT_FIT:
if fills and frame_ratio < 1:
mul = 1.,1./frame_ratio
elif lands:
mul = frame_ratio,1.
else:
mul = render_ratio,render_ratio/frame_ratio
elif asp_rat == sc.AspectRatioPolicy.PRESERVE_ASPECT_HORIZONTAL:
if lands:
mul = frame_ratio/render_ratio,1./render_ratio
else:
mul = 1.,1./frame_ratio
elif asp_rat == sc.AspectRatioPolicy.PRESERVE_ASPECT_VERTICAL:
if lands:
mul = frame_ratio,1.
else:
mul = render_ratio,render_ratio/frame_ratio
elif asp_rat == sc.AspectRatioPolicy.PRESERVE_ASPECT_CROP:
if fills and frame_ratio < 1:
mul=frame_ratio,1.
elif lands:
mul = frame_ratio/render_ratio,1./render_ratio
elif frame_ratio >= 1:
mul = 1.,1./frame_ratio
else:
mul = 1,1./frame_ratio
elif asp_rat == sc.AspectRatioPolicy.STRETCH:
if frame_ratio >= 1:
mul = frame_ratio,1.
else:
mul = 1,1./frame_ratio
else:
mul = 1.,1.
return mul
def iscene_render_half(self, scene_view):
"""Render half size expressed in iscene coords"""
frame_ratio = self.ui_size_ratio
render_ratio = self.render_size_ratio
fills = abs(frame_ratio - render_ratio) < 1e-6
lands = frame_ratio >= render_ratio
asp_rat = scene_view.aspect_ratio_policy
if asp_rat == sc.AspectRatioPolicy.PRESERVE_ASPECT_FIT:
if fills and frame_ratio < 1:
mul = 1.,1./frame_ratio
else:
mul = render_ratio,1.
elif asp_rat == sc.AspectRatioPolicy.PRESERVE_ASPECT_HORIZONTAL:
mul = 1.,1./render_ratio
elif asp_rat == sc.AspectRatioPolicy.PRESERVE_ASPECT_VERTICAL:
mul = render_ratio,1.
elif asp_rat == sc.AspectRatioPolicy.PRESERVE_ASPECT_CROP:
if fills and frame_ratio < 1:
mul=frame_ratio,1.
else:
mul = 1.,1./render_ratio
elif asp_rat == sc.AspectRatioPolicy.STRETCH:
if fills and frame_ratio < 1:
mul = 1.,1./render_ratio
elif lands:
mul = render_ratio,1.
elif frame_ratio >= 1:
mul = frame_ratio,frame_ratio/render_ratio
else:
mul = 1.,1./render_ratio
else:
mul = 1.,1.
return mul
def conv_iscene_from_render(self, render_pt, scene_view):
mul = self.iscene_render_half(scene_view)
return Gf.Vec2d(render_pt[0] * mul[0], render_pt[1] * mul[1])
def conv_iscene_from_01(self, ui01, scene_view):
size = self.ui_size
pt = ViewportHelper.conv_ndc_from_01(ui01)
mul = self.iscene_half(scene_view)
return Gf.Vec2d(pt[0] * mul[0], pt[1] * mul[1])
def conv_iscene_from_ndc(self, ndc, scene_view):
mul = self.iscene_half(scene_view)
return Gf.Vec2d(ndc[0] * mul[0], ndc[1] * mul[1])
def conv_iscene_from_ui(self, ui_pt, scene_view):
size = self.ui_size
pt = ui_pt[0] / size[0] * 2. - 1., ui_pt[1] / size[1] * 2. - 1. # pt now in NDC
mul = self.iscene_half(scene_view)
return Gf.Vec2d(pt[0] * mul[0], pt[1] * mul[1])
def size_iscene_from_ui(self, ui_size, scene_view):
size = self.ui_size
ui_sz = 2. * ui_size / size[0]
mul = self.iscene_half(scene_view)
return ui_sz * mul[0]
def get_transform_iscene_from_ui(self, scene_view):
size_ui = self.ui_size
iscene_half = self.iscene_half(scene_view)
return sc.Matrix44.get_scale_matrix(iscene_half[0], iscene_half[1], 1.) * \
sc.Matrix44.get_translation_matrix(-1., +1., 0) * \
sc.Matrix44.get_scale_matrix(2./size_ui[0], -2./size_ui[1], 1.)
def get_transform_iscene_from_render(self, scene_view):
iscene_render_half = self.iscene_render_half(scene_view)
return sc.Matrix44.get_scale_matrix(iscene_render_half[0], iscene_render_half[1], 1.)
#====================================================================== 3D world <-> 2D screen conversion
def pick_ray_from_render(self, render_ndc, frustum=None):
if frustum is None:
frustum = self.get_conformed_frustum()
pos = Gf.Vec2d(render_ndc[0],render_ndc[1])
return frustum.ComputePickRay(pos)
"""
From frame space NDC coords example:
x,y = self.conv_render_from_ndc(frame_ndc)
if x is None or x < -1.0 or x > 1.0 or y < -1.0 or y > 1.0:
return None
return get_pick_ray((x,y))
"""
def conv_render_from_world(self, wpt):
""" wpt can be Gd.Vec3*, (x,y,z), single value or list
returns Gf.Vec2d, single value or list NDC coords
"""
view,proj = self.get_camera_view_proj()
mat = view*proj
if isinstance(wpt, list):
wpt_list=wpt
else:
wpt_list=[wpt]
rpt = []
for pt in wpt_list:
r = mat.Transform( Gf.Vec3d(pt[0],pt[1],pt[2]) )
rpt.append(r)
if isinstance(wpt, list):
return rpt
else:
return rpt[0]
def conv_iscene_from_world(self, wpt, scene_view):
""" wpt can be Gd.Vec3*, (x,y,z) or list. Not single value.
returns Gf.Vec2d, single value or list NDC coords
"""
view,proj = self.get_camera_view_proj()
mat = view*proj
if isinstance(wpt, list):
wpt_list=wpt
else:
wpt_list=[wpt]
mul = self.iscene_render_half(scene_view)
spt = []
for pt in wpt_list:
r = mat.Transform( Gf.Vec3d(pt[0],pt[1],pt[2]) )
s = Gf.Vec2d(r[0] * mul[0], r[1] * mul[1])
spt.append(s)
if isinstance(wpt, list):
return spt
else:
return spt[0]
def add_frame_mouse_fn(self, frame, fn, coord_space=0):
"""Called function params:
op:
0=press
1=move
2=release
3=double click
4=mouse wheel
5=mouse hovered (entered) frame
x,y: coordinates inside frame, depending on coord_space:
0=01 space
1=ui space
2=ndc space
3=render space
button:
0=left
1=right
2=middle
mod flags:
1=shift
2=ctrl
4=alt
(6=altGr = ctrl + alt)
0x40000000=unknown during move and release
"""
if not frame in self._frame_mouse_fns:
self._frame_mouse_fns[frame] = set()
fnlist = self._frame_mouse_fns[frame]
if fn in fnlist:
return
fnlist.add(fn)
last_button_pressed = None
def dispatch(op, x,y, button, mod):
for fn in fnlist:
fn(op, x,y, button, mod)
def to_space(x,y):
if coord_space <= 1:
p01 = self.conv_01_from_app_ui((x,y))
if coord_space == 0:
return p01
else:
return self.conv_ui_from_01(p01)
else:
pndc = self.conv_ndc_from_app_ui((x,y))
if coord_space == 2:
return pndc
else:
return self.conv_render_from_ndc(pndc)
def on_mouse_pressed(x,y, button, mod):
nonlocal last_button_pressed
x,y = to_space(x,y)
dispatch(0, x,y, button, mod)
last_button_pressed = button
def on_mouse_moved(x,y, mod, unknown_always_true): #on move: x,y can go outside 0,1
x,y = to_space(x,y)
dispatch(1, x,y, last_button_pressed, mod)
def on_mouse_released(x,y, button, mod):
nonlocal last_button_pressed
x,y = to_space(x,y)
dispatch(2, x,y, button, mod)
last_button_pressed = None
def on_mouse_double_clicked(x,y, button, mod):
x,y = to_space(x,y)
dispatch(3, x,y, button, mod)
def on_mouse_wheel(x,y, mod):
dispatch(4, x,y, None, mod)
def on_mouse_hovered(entered): # x=entered info
dispatch(5, entered, None, None, None)
frame.set_mouse_pressed_fn(on_mouse_pressed)
frame.set_mouse_moved_fn(on_mouse_moved)
frame.set_mouse_released_fn(on_mouse_released)
frame.set_mouse_double_clicked_fn(on_mouse_double_clicked)
frame.set_mouse_wheel_fn(on_mouse_wheel)
frame.set_mouse_hovered_fn(on_mouse_hovered)
def add_frame_size_changed_fn(self, frame, fn):
if not frame in self._frame_size_changed_fns:
def on_frame_size_changed():
if not frame in self._frame_size_changed_fns:
return
for fn in self._frame_size_changed_fns[frame]:
fn()
frame.set_computed_content_size_changed_fn( on_frame_size_changed )
self._frame_size_changed_fns[frame] = set()
fnlist = self._frame_size_changed_fns[frame]
fnlist.add( fn )
def remove_frame_size_changed_fn(self, frame, fn):
if frame in self._frame_size_changed_fns:
fnlist = self._frame_size_changed_fns[frame]
fnlist.discard( fn )
def add_render_changed_fn(self, fn):
"""Call fn handler on render resolution or fill mode changed"""
if self._sub_render_width is None:
def on_render_changed(*args):
""" will render resolution/frame_fill take a frame to reflect """
async def async_func():
await omni.kit.app.get_app().next_update_async()
for fn in self._render_changed_fns:
fn()
asyncio.ensure_future( async_func() )
settings = carb.settings.get_settings()
self._sub_render_width = settings.subscribe_to_node_change_events(SETTING_RENDER_WIDTH, on_render_changed)
self._sub_render_height = settings.subscribe_to_node_change_events(SETTING_RENDER_HEIGHT, on_render_changed)
self._sub_render_fill = settings.subscribe_to_node_change_events(SETTING_RENDER_FILL, on_render_changed)
self._render_changed_fns.add(fn)
def remove_render_changed_fn(self, fn):
if self._sub_render_width is not None:
self._render_changed_fns.discard(fn)
def add_camera_changed_fn(self, fn):
"""Call fn handler when USD camera changes"""
if self._stage_objects_changed is None:
# handler needs to be a method as Register won't hold reference to a local function
listener = Tf.Notice.Register( Usd.Notice.ObjectsChanged, self._on_stage_objects_changed, self.stage)
self._stage_objects_changed = [listener, set()]
val = self._stage_objects_changed
val[1].add(fn)
def _on_stage_objects_changed(self, notice, stage):
if stage != self.stage or self._stage_objects_changed is None:
return
# did active camera change?
cam_path = self.camera_path
for n in notice.GetChangedInfoOnlyPaths():
if n.GetPrimPath() == cam_path: # found camera
for fn in self._stage_objects_changed[1]:
fn()
return
def remove_camera_changed_fn(self, fn):
if self._stage_objects_changed is not None:
val = self._stage_objects_changed
val[1].discard(fn)
def add_changed_fn(self, fn, sub_flags = 1|2|4, frame = None):
"""Call handler on frame, render or camera changes, depending on sub_flags mask.
sub_flags: 1=frame size changed (requires frame param), 2=render changed, 4=camera changed
fn(changed_flag)
"""
self._changed_fns[fn] = sub_flags #overwrite any existing for fn
# add everytime because functions avoid duplicates: but only if not using lambdas!
if sub_flags & 1:
if frame is None:
raise AssertionError("Frame size changed: frame parameter cannot be None")
self.add_frame_size_changed_fn(frame, self._on_frame_changed)
if sub_flags & 2:
self.add_render_changed_fn(self._on_render_changed)
if sub_flags & 4:
self.add_camera_changed_fn(self._on_camera_changed)
def _on_frame_changed(self):
self._on_changed(1)
def _on_render_changed(self):
self._on_changed(2)
def _on_camera_changed(self):
self._on_changed(4)
def _on_changed(self, changed_flag):
for fn, mask in self._changed_fns.items():
if mask & changed_flag:
fn(changed_flag)
def remove_changed_fn(self, fn, frame):
if fn in self._changed_fns:
if self._changed_fns[fn] & 1 and frame is None:
raise AssertionError("Frame size changed: frame parameter cannot be None")
del self._changed_fns[fn]
if not len(self._changed_fns):
if frame is not None:
self.remove_frame_size_changed_fn(frame, self._on_frame_changed)
self.remove_render_changed_fn(self._on_render_changed)
self.remove_camera_changed_fn(self._on_camera_changed)
def add_scene_view_update(self, scene_view):
self._api.add_scene_view(scene_view)
def remove_scene_view_update(self, scene_view):
self._api.remove_scene_view(scene_view)
def register_scene(self, scene_creator,
ext_id_or_name: str):
"""Registers a scene creator into:
VP1: a viewport window, where scene is immediately created
VP2: calls RegisterScene with omni.kit.viewport.registry, to create scene in
current (full window) viewports and any new ones.
scene_creator object created with: scene_creator_class(dict)
VP1 dict = {viewport_api}
VP2 dict = {viewport_api: omni.kit.viewport.window.ViewportAPI,
layer_provider: omni.kit.viewport.window.ViewportLayers,
usd_context_name: str}
"""
if self.is_legacy:
with self._win.get_frame(ext_id_or_name):
scene_view = sc.SceneView()
with scene_view.scene:
sce = scene_creator({"viewport_api": self._api})
# have viewport update our SceneView
self.add_scene_view_update(scene_view)
return [scene_view, sce]
else:
try:
from omni.kit.viewport.registry import RegisterScene
scene_reg = RegisterScene(scene_creator, ext_id_or_name)
return [scene_reg]
except ImportError:
return None
def register_scene_proxy(self, create_fn, destroy_fn, get_visible_fn, set_visible_fn,
ext_id_or_name: str):
lamb = ViewportHelper.SceneCreatorProxy.make_lambda(create_fn, destroy_fn, get_visible_fn, set_visible_fn)
return self.register_scene(lamb, ext_id_or_name)
def unregister_scene(self, scene_reg):
if scene_reg is None or not len(scene_reg):
return
if self.is_legacy:
scene_view = scene_reg[0]
self.remove_scene_view_update(scene_view)
scene_view.destroy()
scene_reg.clear()
class SceneCreatorProxy:
@staticmethod
def make_lambda(create_fn, destroy_fn, get_visible_fn, set_visible_fn):
return lambda vp_args: ViewportHelper.SceneCreatorProxy(vp_args, create_fn, destroy_fn, get_visible_fn, set_visible_fn)
def __init__(self, vp_args: dict,
create_fn, destroy_fn, get_visible_fn, set_visible_fn):
# print("SceneCreatorProxy.__init__", vp_args)
# dict_keys(['usd_context_name', 'layer_provider', 'viewport_api'])
"""@ATTN: a scene may be created in multiple viewports. It's up to the _create_fn() callee to make sure it's
being called in the intended viewport by checking vp_args['viewport_api']"""
self._create_fn = create_fn
self._destroy_fn = destroy_fn
self._get_visible_fn = get_visible_fn
self._set_visible_fn = set_visible_fn
self._create_fn(vp_args)
def destroy(self):
# print("SceneCreatorProxy.destroy")
if self._destroy_fn:
self._destroy_fn()
self._create_fn = None
self._destroy_fn = None
self._get_visible_fn = None
self._set_visible_fn = None
def __del__(self):
self.destroy()
# called from viewport registry
@property
def visible(self):
# print("SceneCreatorProxy.get_visible")
if self._get_visible_fn:
return self._get_visible_fn()
else:
return True
@visible.setter
def visible(self, value: bool):
# print("SceneCreatorProxy.set_visible", value)
if self._set_visible_fn:
return self._set_visible_fn(value)
@property
def picking_enabled(self):
"""Object picking and selection rect."""
if self._is_legacy:
self._win.legacy_window.is_enabled_picking()
else:
# print("picking_enabled only supported for legacy viewport")
return True
@picking_enabled.setter
def picking_enabled(self, enabled):
"""Disables object picking and selection rect."""
if self._is_legacy:
self._win.legacy_window.set_enabled_picking(enabled)
else:
# print("picking_enabled only supported for legacy viewport")
pass
def temp_select_enabled(self, enable_picking):
"""Disables object picking and selection rect until next mouse up.
enable_picking: enable picking for surface snap
"""
if self._is_legacy:
self._win.legacy_window.disable_selection_rect(enable_picking)
else:
# print("temp_select_enabled only supported for legacy viewport")
pass
@property
def manipulating_camera(self):
if self._is_legacy:
return self._win.legacy_window.is_manipulating_camera()
else:
# print("is_manipulating_camera only supported for legacy viewport")
return False
def save_render(self, file_path: str, render_product_path: str = None):
"""Doesn't save any overlaid SceneView drawing"""
vut.capture_viewport_to_file(self._api,
file_path=file_path,
is_hdr=False,
render_product_path=render_product_path)
def info(self, scene_view=None):
out = f"window_name='{self.window_name}' is_legacy={self.is_legacy} usd_context_name='{self.usd_context_name} api_id='{self._api.id}'\n"
out += f"ui_size={self.ui_size} dpi={omni.ui.Workspace.get_dpi_scale()} px_size={self.px_size} ui_size_ratio={self.ui_size_ratio}\n"
out += f"render_size_px={self.render_size_px} render_fill_frame={self.render_fill_frame} render_ratio={self.render_size_ratio}\n"
if scene_view is not None:
out += f"iscene_half={self.iscene_half(scene_view)} iscene_size={self.iscene_size(scene_view)} iscene_render_half={self.iscene_render_half(scene_view)}\n"
out += f"camera_path='{self.camera_path}'\n"
out += f"camera frustrum={self.get_conformed_frustum()}\n"
view,proj = self.get_camera_view_proj()
out += f"camera matrixes: view={view} proj={proj}\n"
out += f"conform_policy={self.get_conform_policy()}\n"
if scene_view is not None:
out += f"scene_view aspect_ratio={scene_view.aspect_ratio_policy}\n"
out += f"fps={self.fps}\n"
return out
"""Examples:
vp = ViewportHelper()
res = vp.attach() # "Viewport" "Viewport Next"
print(f"attach res={res}")
frame = vp.get_frame("id")
#frame.clear()
#with frame:
# with ui.VStack():
# ui.Spacer()
# ui.Label("LABEL", alignment=ui.Alignment.CENTER, style={"font_size": 72})
# ui.Button("TO")
# ui.Spacer()
print (vp.info())
#vp.camera_path = "OmniverseKit_Top" # OmniverseKit_Persp
vp.save_render("c:/tmp/vp.png")
""" | 37,345 | Python | 29.045052 | 166 | 0.578525 |
syntway/model_exploder/exts/syntway.model_exploder/syntway/model_exploder/libs/app_helper.py | """"""
import asyncio, functools, sys
import os.path
import carb
import omni.kit
class AppHelper():
VERSION = 10
SETTING_TRANSFORM_OP = "/app/transform/operation"
def __init__(self, attach=True):
self._app = None
self._settings = None
self._setting_changed = {} # {"setting_path": [subs, set(fn0,fn1,...)], }
self._input = None
self._update_event_sub = None
self._update_event_fns = set()
self._key_action_subs = {} # {"action_name": [sub, [(fn0,fn1), (fn1,fn2), ...]] }
if attach:
res = self.attach()
if not res:
raise AssertionError("Could not attach")
def __del__(self):
self.detach()
def attach(self) -> bool:
self.detach()
self._app = omni.kit.app.get_app() # omni.kit.app
self._app_win = omni.appwindow.get_default_app_window() # omni.appwindow
self._settings = carb.settings.get_settings()
return True
def detach(self):
self._update_event_sub = None
self._update_event_fns.clear()
for v in self._setting_changed.values():
self._settings.unsubscribe_to_change_events(v[0])
self._setting_changed = {}
self._settings = None
if self._input is not None:
for v in self._key_action_subs.values():
self._input.unsubscribe_to_action_events(v[0])
self._key_action_subs = {}
self._input = None
if self._app is not None:
self._app = None
def add_update_event_fn(self, fn, order=0, subscription_name=None):
""" 0=NEW_FRAME """
if self._update_event_sub is None:
def on_update(ev):
for fn in self._update_event_fns:
fn(ev)
self._update_event_sub = self._app.get_update_event_stream().create_subscription_to_pop(on_update,
order=order,
name=subscription_name)
self._update_event_fns.clear()
self._update_event_fns.add(fn)
def remove_update_event_fn(self, fn, event_type=-1):
if self._update_event_sub:
self._update_event_fns.discard(fn)
def add_setting_changed_fn(self, setting_path, fn):
""" fn(value, event_type) """
if not setting_path in self._setting_changed:
def on_changed(item, event_type):
fns = self._setting_changed[setting_path][1]
for fn in fns:
fn(str(item), event_type)
self._setting_changed[setting_path] = [None, set()]
self._setting_changed[setting_path][0] = self._settings.subscribe_to_node_change_events(setting_path, on_changed)
s = self._setting_changed[setting_path][1]
s.add(fn)
def get_setting(self, setting_path):
return str( self._settings.get(setting_path) )
def set_setting(self, setting_path, value):
self._settings.set(setting_path, value)
def add_key_action_fn(self, action_name, key, key_modifiers, on_key_fn, is_key_enabled_fn=None):
""" key_modifiers: 1=shift, 2=ctrl, alt=4"""
if action_name in self._key_action_subs:
sub = self._key_action_subs[action_name]
if not (on_key_fn, is_key_enabled_fn) in sub[1]: # fn pair already there
sub[1].append((on_key_fn, is_key_enabled_fn))
return
if self._input is None:
self._input = carb.input.acquire_input_interface()
set_path = self._app_win.get_action_mapping_set_path()
set = self._input.get_action_mapping_set_by_path(set_path)
string = carb.input.get_string_from_action_mapping_desc(key, key_modifiers)
path = set_path + "/" + action_name + "/0"
self._settings.set_default_string(path, string)
def on_action(action_name, event, *_):
if not event.flags & carb.input.BUTTON_FLAG_PRESSED:
return
if not action_name in self._key_action_subs:
return
try: # avoid keys pressed during camera manipulation
import omni.kit.viewport_legacy
vp = omni.kit.viewport_legacy.get_viewport_interface().get_viewport_window()
if vp.is_manipulating_camera():
return
except Exception:
pass
sub = self._key_action_subs[action_name]
for on_key_fn,is_key_enabled_fn in sub[1]:
if is_key_enabled_fn is not None:
if not is_key_enabled_fn():
continue
on_key_fn()
sub = [self._input.subscribe_to_action_events(set, action_name, functools.partial(on_action, action_name)),
[(on_key_fn, is_key_enabled_fn)]]
self._key_action_subs[action_name] = sub
| 5,108 | Python | 26.320855 | 125 | 0.536022 |
syntway/model_exploder/exts/syntway.model_exploder/syntway/model_exploder/libs/usd_helper.py | """
Notes:
"""
import omni.kit
import omni.usd
from pxr import Gf, Tf, Sdf, Usd, UsdGeom, CameraUtil
from .app_utils import call_after_update
class UsdHelper():
VERSION = 17
STAGE_CHANGED_SUB_PREFIX = "UsdHelper-stage-changed-ev"
def __init__(self, attach=True, stage_opened_refresh=1 | 2):
"""
stage_opened_refresh: resubscribe events when a new stage finishes opening. A mask of:
1: resubscribe add_stage_event_fn handlers
2: resubscribe add_stage_objects_changed_fn handlers
"""
self._ctx = None
self._stage_changed = {} # event_type: [sub, set(fn,fn,...)]
self._stage_objects_changed = None # [listener, set(fn,fn,...)]
self._stage_opened_refresh = stage_opened_refresh
self._stage_opened_refresh_sub = None
if attach:
res = self.attach()
if not res:
raise AssertionError("Could not attach")
def __del__(self):
self.detach()
def attach(self, usd_ctx=None) -> bool:
"""usd_ctx can be a string for context name, or an existing UsdContext."""
self.detach()
if usd_ctx is None:
usd_ctx = ''
if isinstance(usd_ctx, str):
self._ctx = omni.usd.get_context(usd_ctx)
else:
self._ctx = usd_ctx
if self._stage_opened_refresh:
self.add_stage_event_fn(self._on_stage_opened_refresh,
omni.usd.StageEventType.OPENED)
return True
def detach(self):
if self._ctx is not None:
self._ctx = None
if self._stage_objects_changed is not None:
if len(self._stage_objects_changed):
self._stage_objects_changed[0].Revoke()
self._stage_objects_changed = None
self._stage_changed.clear()
self._stage_opened_refresh_sub = None
@property
def context(self):
return self._ctx
@property
def stage(self):
return self._ctx.get_stage()
@property
def stage_state(self) -> omni.usd.StageState:
return self._ctx.get_stage_state()
def is_stage_opened(self) -> bool:
return self.stage_state == omni.usd.StageState.OPENED
@property
def stage_up(self):
up = UsdGeom.GetStageUpAxis(self.stage)
if up == UsdGeom.Tokens.y:
return Gf.Vec3d(0, 1, 0)
elif up == UsdGeom.Tokens.z:
return Gf.Vec3d(0, 0, 1)
else: # UsdGeom.Tokens.x
return Gf.Vec3d(1, 0, 0)
@property
def stage_up_index(self):
up = UsdGeom.GetStageUpAxis(self.stage)
if up == UsdGeom.Tokens.y:
return 1
elif up == UsdGeom.Tokens.z:
return 2
else: # UsdGeom.Tokens.x: illegal
return 0
@property
def timecode(self) -> Usd.TimeCode:
stage = self.stage
"""
if stage.HasAuthoredTimeCodeRange(): -> wrong: a stage might not have timeCodes authored,
but its references may have.
Using Usd.TimeCode.Default() in xform_cache.GetLocalTransformation(prim) won't fetch correct matrices
for time_coded prims
"""
time = omni.timeline.get_timeline_interface().get_current_time()
ret = Usd.TimeCode(omni.usd.get_frame_time_code(time, stage.GetTimeCodesPerSecond()))
# or ret = Usd.TimeCode( time * stage.GetTimeCodesPerSecond() )
return ret
def add_stage_event_fn(self, fn, event_type=-1):
"""
Doesn't depend on open stage and remains after closing-opening.
Arg event_type = -1 to accept all, otherwise a single event of type omni.usd.StageEventType.*: (@Kit103)
0=SAVED
1=SAVE_FAILED
2=OPENING
3=OPENED
4=OPEN_FAILED
5=CLOSING
6=CLOSED
7=SELECTION_CHANGED
8=ASSETS_LOADED
9=ASSETS_LOAD_ABORTED
10=GIZMO_TRACKING_CHANGED
11=MDL_PARAM_LOADED
12=SETTINGS_LOADED
13=SETTINGS_SAVING
14=OMNIGRAPH_START_PLAY
15=OMNIGRAPH_STOP_PLAY
16=SIMULATION_START_PLAY
17=SIMULATION_STOP_PLAY
18=ANIMATION_START_PLAY
19=ANIMATION_STOP_PLAY
20=DIRTY_STATE_CHANGED
"""
event_type = int(event_type)
if event_type not in self._stage_changed:
sub = self._sub_stage_event(event_type)
self._stage_changed[event_type] = [sub, set()]
ch = self._stage_changed[event_type]
ch[1].add(fn)
def _sub_stage_event(self, event_type):
sub_name = UsdHelper.STAGE_CHANGED_SUB_PREFIX + str(event_type)
lamb = lambda ev: self._on_stage_event(ev, event_type)
if event_type == -1:
sub = self._ctx.get_stage_event_stream().create_subscription_to_pop(lamb,
name=sub_name)
else:
sub = self._ctx.get_stage_event_stream().create_subscription_to_pop_by_type(event_type,
lamb,
name=sub_name)
return sub
def _on_stage_event(self, ev, target_event_type):
# print("_on_stage_event", ev.type, target_event_type)
if target_event_type in self._stage_changed:
for fn in self._stage_changed[target_event_type][1]:
fn(ev)
def remove_stage_event_fn(self, fn, event_type=-1):
"""
Don't call from fn or will get:
RuntimeError: Set changed size during iteration
"""
if event_type in self._stage_changed:
ch = self._stage_changed[event_type]
ch[1].discard(fn)
def _on_stage_opened_refresh(self, ev):
# print("_on_stage_opened_refresh", ev.type)
def resub():
if self._stage_opened_refresh & 1:
# print("resub _stage_changed")
for event_type in self._stage_changed:
ch = self._stage_changed[event_type]
ch[0] = self._sub_stage_event(event_type)
if self._stage_opened_refresh & 2 and self._stage_objects_changed is not None:
# print("resub _stage_objects_changed")
self._stage_objects_changed[0] = self._sub_stage_objects_changed()
call_after_update(resub)
def add_stage_objects_changed_fn(self, fn):
# print("add_stage_objects_changed_fn")
"""
Depends on stage: if closed must call remove_stage_objects_changed_fn(), then on stage opened call add_stage_objects_changed_fn again.
From https://graphics.pixar.com/usd/dev/api/class_usd_notice_1_1_objects_changed.html:
Usd.Notice.ObjectsChanged: Object changes, either "resync" or "changed-info".
"Resyncs" are potentially structural changes that invalidate entire subtrees of UsdObjects (including prims and properties).
For example, if the path "/foo" is resynced, then all subpaths like "/foo/bar" and "/foo/bar.baz" may be arbitrarily changed.
When a prim is resynced, say "/foo/bar", it might have been created or destroyed. In that case "/foo"'s list of children will have changed, but we do not consider "/foo" to be resynced. If we did, it would mean clients would have to consider all of "/foo/bar"'s siblings (and their descendants) to be resynced which might be egregious overinvalidation.
In contrast, "changed-info" means that a nonstructural change has occurred, like an attribute value change or a value change to a metadata field not related to composition.
This notice provides API for two client use-cases. Clients interested in testing whether specific objects are affected by the changes should use the AffectedObject()
method (and the ResyncedObject() and ChangedInfoOnly() methods).
Clients that wish to reason about all changes as a whole should use the GetResyncedPaths() and GetChangedInfoOnlyPaths() methods.
fn(notice: Tf.notice) can call notice.GetChangedInfoOnlyPaths()
"""
if self._stage_objects_changed is None:
# handler needs to be a method as Register won't hold reference to a local function
listener = self._sub_stage_objects_changed()
self._stage_objects_changed = [listener, set()]
val = self._stage_objects_changed
val[1].add(fn)
# print("add")
def _sub_stage_objects_changed(self):
return Tf.Notice.Register(Usd.Notice.ObjectsChanged, self._on_stage_objects_changed, self.stage)
def _on_stage_objects_changed(self, notice, stage):
if stage != self.stage or self._stage_objects_changed is None:
return
for fn in self._stage_objects_changed[1]:
fn(notice)
def remove_stage_objects_changed_fn(self, fn):
# print("remove_stage_objects_changed_fn")
if self._stage_objects_changed is not None:
val = self._stage_objects_changed
val[1].discard(fn)
# print("discard")
def get_selected_prim_paths(self):
sel = self.get_selection()
return sel.get_selected_prim_paths()
def set_selected_prim_paths(self, paths, expand_in_stage=False):
sel = self.get_selection()
sel.set_selected_prim_paths(paths, expand_in_stage)
def get_selection(self):
return self._ctx.get_selection()
def set_pickable(self, enabled, prim_path="/"):
"""If disabled, Kit will still display selection rects but nothing will be selected."""
self._ctx.set_pickable(prim_path, enabled)
"""
Timeline events
stream = omni.timeline.get_timeline_interface().get_timeline_event_stream()
self._timeline_sub = stream.create_subscription_to_pop(self._on_timeline_event)
0=PLAY
1=PAUSE
2=STOP
3=CURRENT_TIME_CHANGED
4=CURRENT_TIME_TICKED
5=LOOP_MODE_CHANGED
6=START_TIME_CHANGED
7=END_TIME_CHANGED
8=TIME_CODE_PER_SECOND_CHANGED
9=AUTO_UPDATE_CHANGED
10=PREROLLING_CHANGED
""" | 10,223 | Python | 28.80758 | 360 | 0.60002 |
syntway/model_exploder/exts/syntway.model_exploder/config/extension.toml | [package]
# Semantic versioning: https://semver.org/
version = "0.9.5"
title = "Model Exploder"
description="Separate model parts to view their relationship and how they fit together."
authors = ["Syntway"]
category = "Tools"
keywords = ["kit", "tool", "tools", "util", "utils", "explode model", "exploded view"]
# repository = "https://github.com/syntway/model_exploder"
icon = "data/icons/ext.png"
preview_image = "data/preview.png"
readme = "docs/README.md"
changelog = "docs/CHANGELOG.md"
[dependencies]
"omni.kit.uiapp" = {}
"omni.kit.viewport.utility" = {}
"omni.ui.scene" = {}
"omni.usd" = {}
[[python.module]]
name = "syntway.model_exploder"
| 659 | TOML | 20.999999 | 88 | 0.68437 |
syntway/model_exploder/exts/syntway.model_exploder/docs/CHANGELOG.md | # Changelog
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [0.9.5] - 2024-04-12
### Changed
- Fix deprecated SDF.Begin/EndChangeBlock() reference.
- Bump version.
## [0.9.4] - 2022-12-07
### Changed
- Moved menu entry to the Window top-level menu, because some apps hide the Tools menu, like Code.
## [0.9.3] - 2022-12-04
### Changed
- New UiPal for simpler color handling. Changed styles and supporting code to use it.
- ReadMe doc updated.
## [0.9.2] - 2022-11-12
### Changed
- Compatible with the multiple viewports of Create 2022.3. The initial bounds and center manipulator work in the viewport which is active when the tool window opens.
- Initial Bounds Visibility displays in mid-grey to be visible in bright backgrounds.
- Info button link changed to Syntway's website.
## [0.9.1] - 2022-10-29
### Added
- First public release.
| 968 | Markdown | 32.413792 | 165 | 0.722107 |
syntway/model_exploder/exts/syntway.model_exploder/docs/README.md | # Model Exploder Tool
Model Exploder separates a 3D model into its parts for a better view of their relationship and how they fit together.
Model separation is done as if by a small controlled explosion emanating from its center. This is often known as an exploded-view of the model.
Exploded-views can be used to understand a model from its components and can also be used to create drawings for parts catalogs or assembly/maintenance/instruction information.
## Features
- Easy to use: select a model, click the Use button and move the Distance slider.
- Includes several ways to explode the model around a central point, axis or plane.
- Interactive editing of the explosion center: just drag the "Center" manipulator in the viewport.
- Works with meshes, USD Shapes, references/payloads. Point instances and skeletons are moved as a whole.
- Adds Undo-Redo state when applying changes.
- Works with NVIDIA's Omniverse Create, Code 2022+ or any other Kit-based apps. Compatible with multiple viewports and with the legacy viewport of older Omniverse versions.
### Tips
- Model Exploder is available in the Window menu.
- Click the ( i ) button for help and more information.
- On complex models, the first interaction with the Distance slider might take a few seconds - next ones are much faster.
- If model parts do not separate and remain joined to each other:
- Make sure model is divided in parts (meshes, USD shapes, etc), as this tools works by moving those parts.
- With the Distance slider away from its leftmost position, move the Center manipulator in the viewport into the middle of the parts group.
- Separate the group of "stuck" parts before separating the rest of the model.
- The initial bounds preview and center manipulator work in the active (last used) viewport. To change viewports, close the Model Exploder window and open again after using the new viewport.
## Credits
This tool is developed by Syntway, the VR/Metaverse tools division of FaronStudio: www.syntway.com
Uses icons from SVG Repo: www.svgrepo.com
3D model used in the preview snapshot is from mdesigns100: 3dexport.com/free-3dmodel-residential-building-model-296192.htm
| 2,175 | Markdown | 61.171427 | 190 | 0.788506 |
NVIDIA-Omniverse/OmniIsaacGymEnvs/README.md | # Omniverse Isaac Gym Reinforcement Learning Environments for Isaac Sim
## About this repository
This repository contains Reinforcement Learning examples that can be run with the latest release of [Isaac Sim](https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/overview.html). RL examples are trained using PPO from [rl_games](https://github.com/Denys88/rl_games) library and examples are built on top of Isaac Sim's `omni.isaac.core` and `omni.isaac.gym` frameworks.
Please see [release notes](docs/release_notes.md) for the latest updates.
<img src="https://user-images.githubusercontent.com/34286328/171454189-6afafbff-bb61-4aac-b518-24646007cb9f.gif" width="300" height="150"/> <img src="https://user-images.githubusercontent.com/34286328/184172037-cdad9ee8-f705-466f-bbde-3caa6c7dea37.gif" width="300" height="150"/>
<img src="https://user-images.githubusercontent.com/34286328/171454182-0be1b830-bceb-4cfd-93fb-e1eb8871ec68.gif" width="300" height="150"/> <img src="https://user-images.githubusercontent.com/34286328/171454193-e027885d-1510-4ef4-b838-06b37f70c1c7.gif" width="300" height="150"/>
<img src="https://user-images.githubusercontent.com/34286328/184174894-03767aa0-936c-4bfe-bbe9-a6865f539bb4.gif" width="300" height="150"/> <img src="https://user-images.githubusercontent.com/34286328/184168200-152567a8-3354-4947-9ae0-9443a56fee4c.gif" width="300" height="150"/>
<img src="https://user-images.githubusercontent.com/34286328/184176312-df7d2727-f043-46e3-b537-48a583d321b9.gif" width="300" height="150"/> <img src="https://user-images.githubusercontent.com/34286328/184178817-9c4b6b3c-c8a2-41fb-94be-cfc8ece51d5d.gif" width="300" height="150"/>
<img src="https://user-images.githubusercontent.com/34286328/171454160-8cb6739d-162a-4c84-922d-cda04382633f.gif" width="300" height="150"/> <img src="https://user-images.githubusercontent.com/34286328/171454176-ce08f6d0-3087-4ecc-9273-7d30d8f73f6d.gif" width="300" height="150"/>
<img src="https://user-images.githubusercontent.com/34286328/184170040-3f76f761-e748-452e-b8c8-3cc1c7c8cb98.gif" width="614" height="307"/>
## System Requirements
It is recommended to have at least 32GB RAM and a GPU with at least 12GB VRAM. For detailed system requirements, please visit the [Isaac Sim System Requirements](https://docs.omniverse.nvidia.com/isaacsim/latest/installation/requirements.html#system-requirements) page. Please refer to the [Troubleshooting](docs/troubleshoot.md#memory-consumption) page for a detailed breakdown of memory consumption.
## Installation
Follow the Isaac Sim [documentation](https://docs.omniverse.nvidia.com/isaacsim/latest/installation/install_workstation.html) to install the latest Isaac Sim release.
*Examples in this repository rely on features from the most recent Isaac Sim release. Please make sure to update any existing Isaac Sim build to the latest release version, 2023.1.1, to ensure examples work as expected.*
Once installed, this repository can be used as a python module, `omniisaacgymenvs`, with the python executable provided in Isaac Sim.
To install `omniisaacgymenvs`, first clone this repository:
```bash
git clone https://github.com/NVIDIA-Omniverse/OmniIsaacGymEnvs.git
```
Once cloned, locate the [python executable in Isaac Sim](https://docs.omniverse.nvidia.com/isaacsim/latest/installation/install_python.html). By default, this should be `python.sh`. We will refer to this path as `PYTHON_PATH`.
To set a `PYTHON_PATH` variable in the terminal that links to the python executable, we can run a command that resembles the following. Make sure to update the paths to your local path.
```
For Linux: alias PYTHON_PATH=~/.local/share/ov/pkg/isaac_sim-*/python.sh
For Windows: doskey PYTHON_PATH=C:\Users\user\AppData\Local\ov\pkg\isaac_sim-*\python.bat $*
For IsaacSim Docker: alias PYTHON_PATH=/isaac-sim/python.sh
```
Install `omniisaacgymenvs` as a python module for `PYTHON_PATH`:
```bash
PYTHON_PATH -m pip install -e .
```
The following error may appear during the initial installation. This error is harmless and can be ignored.
```
ERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.
```
### Running the examples
*Note: All commands should be executed from `OmniIsaacGymEnvs/omniisaacgymenvs`.*
To train your first policy, run:
```bash
PYTHON_PATH scripts/rlgames_train.py task=Cartpole
```
An Isaac Sim app window should be launched. Once Isaac Sim initialization completes, the Cartpole scene will be constructed and simulation will start running automatically. The process will terminate once training finishes.
Note that by default, we show a Viewport window with rendering, which slows down training. You can choose to close the Viewport window during training for better performance. The Viewport window can be re-enabled by selecting `Window > Viewport` from the top menu bar.
To achieve maximum performance, launch training in `headless` mode as follows:
```bash
PYTHON_PATH scripts/rlgames_train.py task=Ant headless=True
```
#### A Note on the Startup Time of the Simulation
Some of the examples could take a few minutes to load because the startup time scales based on the number of environments. The startup time will continually
be optimized in future releases.
### Extension Workflow
The extension workflow provides a simple user interface for creating and launching RL tasks. To launch Isaac Sim for the extension workflow, run:
```bash
./<isaac_sim_root>/isaac-sim.gym.sh --ext-folder </parent/directory/to/OIGE>
```
Note: `isaac_sim_root` should be located in the same directory as `python.sh`.
The UI window can be activated from `Isaac Examples > RL Examples` by navigating the top menu bar.
For more details on the extension workflow, please refer to the [documentation](docs/framework/extension_workflow.md).
### Loading trained models // Checkpoints
Checkpoints are saved in the folder `runs/EXPERIMENT_NAME/nn` where `EXPERIMENT_NAME`
defaults to the task name, but can also be overridden via the `experiment` argument.
To load a trained checkpoint and continue training, use the `checkpoint` argument:
```bash
PYTHON_PATH scripts/rlgames_train.py task=Ant checkpoint=runs/Ant/nn/Ant.pth
```
To load a trained checkpoint and only perform inference (no training), pass `test=True`
as an argument, along with the checkpoint name. To avoid rendering overhead, you may
also want to run with fewer environments using `num_envs=64`:
```bash
PYTHON_PATH scripts/rlgames_train.py task=Ant checkpoint=runs/Ant/nn/Ant.pth test=True num_envs=64
```
Note that if there are special characters such as `[` or `=` in the checkpoint names,
you will need to escape them and put quotes around the string. For example,
`checkpoint="runs/Ant/nn/last_Antep\=501rew\[5981.31\].pth"`
We provide pre-trained checkpoints on the [Nucleus](https://docs.omniverse.nvidia.com/nucleus/latest/index.html) server under `Assets/Isaac/2023.1.1/Isaac/Samples/OmniIsaacGymEnvs/Checkpoints`. Run the following command
to launch inference with pre-trained checkpoint:
Localhost (To set up localhost, please refer to the [Isaac Sim installation guide](https://docs.omniverse.nvidia.com/isaacsim/latest/installation/install_workstation.html)):
```bash
PYTHON_PATH scripts/rlgames_train.py task=Ant checkpoint=omniverse://localhost/NVIDIA/Assets/Isaac/2023.1.1/Isaac/Samples/OmniIsaacGymEnvs/Checkpoints/ant.pth test=True num_envs=64
```
Production server:
```bash
PYTHON_PATH scripts/rlgames_train.py task=Ant checkpoint=http://omniverse-content-production.s3-us-west-2.amazonaws.com/Assets/Isaac/2023.1.1/Isaac/Samples/OmniIsaacGymEnvs/Checkpoints/ant.pth test=True num_envs=64
```
When running with a pre-trained checkpoint for the first time, we will automatically download the checkpoint file to `omniisaacgymenvs/checkpoints`. For subsequent runs, we will re-use the file that has already been downloaded, and will not overwrite existing checkpoints with the same name in the `checkpoints` folder.
## Runing from Docker
Latest Isaac Sim Docker image can be found on [NGC](https://catalog.ngc.nvidia.com/orgs/nvidia/containers/isaac-sim). A utility script is provided at `docker/run_docker.sh` to help initialize this repository and launch the Isaac Sim docker container. The script can be run with:
```bash
./docker/run_docker.sh
```
Then, training can be launched from the container with:
```bash
/isaac-sim/python.sh scripts/rlgames_train.py headless=True task=Ant
```
To run the Isaac Sim docker with UI, use the following script:
```bash
./docker/run_docker_viewer.sh
```
Then, training can be launched from the container with:
```bash
/isaac-sim/python.sh scripts/rlgames_train.py task=Ant
```
To avoid re-installing OIGE each time a container is launched, we also provide a dockerfile that can be used to build an image with OIGE installed. To build the image, run:
```bash
docker build -t isaac-sim-oige -f docker/dockerfile .
```
Then, start a container with the built image:
```bash
./docker/run_dockerfile.sh
```
Then, training can be launched from the container with:
```bash
/isaac-sim/python.sh scripts/rlgames_train.py task=Ant headless=True
```
### Isaac Sim Automator
Cloud instances for AWS, Azure, or GCP can be setup using [IsaacSim Automator](https://github.com/NVIDIA-Omniverse/IsaacSim-Automator/tree/main#omniverse-isaac-gym).
## Livestream
OmniIsaacGymEnvs supports livestream through the [Omniverse Streaming Client](https://docs.omniverse.nvidia.com/app_streaming-client/app_streaming-client/overview.html). To enable this feature, add the commandline argument `enable_livestream=True`:
```bash
PYTHON_PATH scripts/rlgames_train.py task=Ant headless=True enable_livestream=True
```
Connect from the Omniverse Streaming Client once the SimulationApp has been created. Note that enabling livestream is equivalent to training with the viewer enabled, thus the speed of training/inferencing will decrease compared to running in headless mode.
## Training Scripts
All scripts provided in `omniisaacgymenvs/scripts` can be launched directly with `PYTHON_PATH`.
To test out a task without RL in the loop, run the random policy script with:
```bash
PYTHON_PATH scripts/random_policy.py task=Cartpole
```
This script will sample random actions from the action space and apply these actions to your task without running any RL policies. Simulation should start automatically after launching the script, and will run indefinitely until terminated.
To run a simple form of PPO from `rl_games`, use the single-threaded training script:
```bash
PYTHON_PATH scripts/rlgames_train.py task=Cartpole
```
This script creates an instance of the PPO runner in `rl_games` and automatically launches training and simulation. Once training completes (the total number of iterations have been reached), the script will exit. If running inference with `test=True checkpoint=<path/to/checkpoint>`, the script will run indefinitely until terminated. Note that this script will have limitations on interaction with the UI.
### Configuration and command line arguments
We use [Hydra](https://hydra.cc/docs/intro/) to manage the config.
Common arguments for the training scripts are:
* `task=TASK` - Selects which task to use. Any of `AllegroHand`, `Ant`, `Anymal`, `AnymalTerrain`, `BallBalance`, `Cartpole`, `CartpoleCamera`, `Crazyflie`, `FactoryTaskNutBoltPick`, `FactoryTaskNutBoltPlace`, `FactoryTaskNutBoltScrew`, `FrankaCabinet`, `FrankaDeformable`, `Humanoid`, `Ingenuity`, `Quadcopter`, `ShadowHand`, `ShadowHandOpenAI_FF`, `ShadowHandOpenAI_LSTM` (these correspond to the config for each environment in the folder `omniisaacgymenvs/cfg/task`)
* `train=TRAIN` - Selects which training config to use. Will automatically default to the correct config for the environment (ie. `<TASK>PPO`).
* `num_envs=NUM_ENVS` - Selects the number of environments to use (overriding the default number of environments set in the task config).
* `seed=SEED` - Sets a seed value for randomization, and overrides the default seed in the task config
* `pipeline=PIPELINE` - Which API pipeline to use. Defaults to `gpu`, can also set to `cpu`. When using the `gpu` pipeline, all data stays on the GPU. When using the `cpu` pipeline, simulation can run on either CPU or GPU, depending on the `sim_device` setting, but a copy of the data is always made on the CPU at every step.
* `sim_device=SIM_DEVICE` - Device used for physics simulation. Set to `gpu` (default) to use GPU and to `cpu` for CPU.
* `device_id=DEVICE_ID` - Device ID for GPU to use for simulation and task. Defaults to `0`. This parameter will only be used if simulation runs on GPU.
* `rl_device=RL_DEVICE` - Which device / ID to use for the RL algorithm. Defaults to `cuda:0`, and follows PyTorch-like device syntax.
* `multi_gpu=MULTI_GPU` - Whether to train using multiple GPUs. Defaults to `False`. Note that this option is only available with `rlgames_train.py`.
* `test=TEST`- If set to `True`, only runs inference on the policy and does not do any training.
* `checkpoint=CHECKPOINT_PATH` - Path to the checkpoint to load for training or testing.
* `headless=HEADLESS` - Whether to run in headless mode.
* `enable_livestream=ENABLE_LIVESTREAM` - Whether to enable Omniverse streaming.
* `experiment=EXPERIMENT` - Sets the name of the experiment.
* `max_iterations=MAX_ITERATIONS` - Sets how many iterations to run for. Reasonable defaults are provided for the provided environments.
* `warp=WARP` - If set to True, launch the task implemented with Warp backend (Note: not all tasks have a Warp implementation).
* `kit_app=KIT_APP` - Specifies the absolute path to the kit app file to be used.
Hydra also allows setting variables inside config files directly as command line arguments. As an example, to set the minibatch size for a rl_games training run, you can use `train.params.config.minibatch_size=64`. Similarly, variables in task configs can also be set. For example, `task.env.episodeLength=100`.
#### Hydra Notes
Default values for each of these are found in the `omniisaacgymenvs/cfg/config.yaml` file.
The way that the `task` and `train` portions of the config works are through the use of config groups.
You can learn more about how these work [here](https://hydra.cc/docs/tutorials/structured_config/config_groups/)
The actual configs for `task` are in `omniisaacgymenvs/cfg/task/<TASK>.yaml` and for `train` in `omniisaacgymenvs/cfg/train/<TASK>PPO.yaml`.
In some places in the config you will find other variables referenced (for example,
`num_actors: ${....task.env.numEnvs}`). Each `.` represents going one level up in the config hierarchy.
This is documented fully [here](https://omegaconf.readthedocs.io/en/latest/usage.html#variable-interpolation).
### Tensorboard
Tensorboard can be launched during training via the following command:
```bash
PYTHON_PATH -m tensorboard.main --logdir runs/EXPERIMENT_NAME/summaries
```
## WandB support
You can run (WandB)[https://wandb.ai/] with OmniIsaacGymEnvs by setting `wandb_activate=True` flag from the command line. You can set the group, name, entity, and project for the run by setting the `wandb_group`, `wandb_name`, `wandb_entity` and `wandb_project` arguments. Make sure you have WandB installed in the Isaac Sim Python executable with `PYTHON_PATH -m pip install wandb` before activating.
## Training with Multiple GPUs
To train with multiple GPUs, use the following command, where `--proc_per_node` represents the number of available GPUs:
```bash
PYTHON_PATH -m torch.distributed.run --nnodes=1 --nproc_per_node=2 scripts/rlgames_train.py headless=True task=Ant multi_gpu=True
```
## Multi-Node Training
To train across multiple nodes/machines, it is required to launch an individual process on each node.
For the master node, use the following command, where `--proc_per_node` represents the number of available GPUs, and `--nnodes` represents the number of nodes:
```bash
PYTHON_PATH -m torch.distributed.run --nproc_per_node=2 --nnodes=2 --node_rank=0 --rdzv_id=123 --rdzv_backend=c10d --rdzv_endpoint=localhost:5555 scripts/rlgames_train.py headless=True task=Ant multi_gpu=True
```
Note that the port (`5555`) can be replaced with any other available port.
For non-master nodes, use the following command, replacing `--node_rank` with the index of each machine:
```bash
PYTHON_PATH -m torch.distributed.run --nproc_per_node=2 --nnodes=2 --node_rank=1 --rdzv_id=123 --rdzv_backend=c10d --rdzv_endpoint=ip_of_master_machine:5555 scripts/rlgames_train.py headless=True task=Ant multi_gpu=True
```
For more details on multi-node training with PyTorch, please visit [here](https://pytorch.org/tutorials/intermediate/ddp_series_multinode.html). As mentioned in the PyTorch documentation, "multinode training is bottlenecked by inter-node communication latencies". When this latency is high, it is possible multi-node training will perform worse than running on a single node instance.
## Tasks
Source code for tasks can be found in `omniisaacgymenvs/tasks`.
Each task follows the frameworks provided in `omni.isaac.core` and `omni.isaac.gym` in Isaac Sim.
Refer to [docs/framework/framework.md](docs/framework/framework.md) for how to create your own tasks.
Full details on each of the tasks available can be found in the [RL examples documentation](docs/examples/rl_examples.md).
## Demo
We provide an interactable demo based on the `AnymalTerrain` RL example. In this demo, you can click on any of
the ANYmals in the scene to go into third-person mode and manually control the robot with your keyboard as follows:
- `Up Arrow`: Forward linear velocity command
- `Down Arrow`: Backward linear velocity command
- `Left Arrow`: Leftward linear velocity command
- `Right Arrow`: Rightward linear velocity command
- `Z`: Counterclockwise yaw angular velocity command
- `X`: Clockwise yaw angular velocity command
- `C`: Toggles camera view between third-person and scene view while maintaining manual control
- `ESC`: Unselect a selected ANYmal and yields manual control
Launch this demo with the following command. Note that this demo limits the maximum number of ANYmals in the scene to 128.
```
PYTHON_PATH scripts/rlgames_demo.py task=AnymalTerrain num_envs=64 checkpoint=omniverse://localhost/NVIDIA/Assets/Isaac/2023.1.1/Isaac/Samples/OmniIsaacGymEnvs/Checkpoints/anymal_terrain.pth
```
<img src="https://user-images.githubusercontent.com/34286328/184688654-6e7899b2-5847-4184-8944-2a96b129b1ff.gif" width="600" height="300"/>
| 18,653 | Markdown | 55.871951 | 469 | 0.777408 |
NVIDIA-Omniverse/OmniIsaacGymEnvs/omniisaacgymenvs/cfg/config.yaml |
# Task name - used to pick the class to load
task_name: ${task.name}
# experiment name. defaults to name of training config
experiment: ''
# if set to positive integer, overrides the default number of environments
num_envs: ''
# seed - set to -1 to choose random seed
seed: 42
# set to True for deterministic performance
torch_deterministic: False
# set the maximum number of learning iterations to train for. overrides default per-environment setting
max_iterations: ''
## Device config
physics_engine: 'physx'
# whether to use cpu or gpu pipeline
pipeline: 'gpu'
# whether to use cpu or gpu physx
sim_device: 'gpu'
# used for gpu simulation only - device id for running sim and task if pipeline=gpu
device_id: 0
# device to run RL
rl_device: 'cuda:0'
# multi-GPU training
multi_gpu: False
## PhysX arguments
num_threads: 4 # Number of worker threads used by PhysX - for CPU PhysX only.
solver_type: 1 # 0: pgs, 1: tgs
# RLGames Arguments
# test - if set, run policy in inference mode (requires setting checkpoint to load)
test: False
# used to set checkpoint path
checkpoint: ''
# evaluate checkpoint
evaluation: False
# disables rendering
headless: False
# enables native livestream
enable_livestream: False
# timeout for MT script
mt_timeout: 300
# enables viewport recording
enable_recording: False
# interval between video recordings (in steps)
recording_interval: 2000
# length of the recorded video (in steps)
recording_length: 100
# fps for writing recorded video
recording_fps: 30
# directory to save recordings in
recording_dir: ''
wandb_activate: False
wandb_group: ''
wandb_name: ${train.params.config.name}
wandb_entity: ''
wandb_project: 'omniisaacgymenvs'
# path to a kit app file
kit_app: ''
# Warp
warp: False
# set default task and default training config based on task
defaults:
- _self_
- task: Cartpole
- train: ${task}PPO
- override hydra/job_logging: disabled
# set the directory where the output files get saved
hydra:
output_subdir: null
run:
dir: .
| 2,007 | YAML | 22.348837 | 103 | 0.744893 |
NVIDIA-Omniverse/OmniIsaacGymEnvs/omniisaacgymenvs/scripts/random_policy.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import gym
import hydra
from omegaconf import DictConfig
import os
import time
import numpy as np
import torch
import omniisaacgymenvs
from omniisaacgymenvs.envs.vec_env_rlgames import VecEnvRLGames
from omniisaacgymenvs.utils.config_utils.path_utils import get_experience
from omniisaacgymenvs.utils.hydra_cfg.hydra_utils import *
from omniisaacgymenvs.utils.hydra_cfg.reformat import omegaconf_to_dict, print_dict
from omniisaacgymenvs.utils.task_util import initialize_task
@hydra.main(version_base=None, config_name="config", config_path="../cfg")
def parse_hydra_configs(cfg: DictConfig):
cfg_dict = omegaconf_to_dict(cfg)
print_dict(cfg_dict)
headless = cfg.headless
render = not headless
enable_viewport = "enable_cameras" in cfg.task.sim and cfg.task.sim.enable_cameras
# select kit app file
experience = get_experience(headless, cfg.enable_livestream, enable_viewport, cfg.enable_recording, cfg.kit_app)
env = VecEnvRLGames(
headless=headless,
sim_device=cfg.device_id,
enable_livestream=cfg.enable_livestream,
enable_viewport=enable_viewport or cfg.enable_recording,
experience=experience
)
# parse experiment directory
module_path = os.path.abspath(os.path.join(os.path.dirname(omniisaacgymenvs.__file__)))
experiment_dir = os.path.join(module_path, "runs", cfg.train.params.config.name)
# use gym RecordVideo wrapper for viewport recording
if cfg.enable_recording:
if cfg.recording_dir == '':
videos_dir = os.path.join(experiment_dir, "videos")
else:
videos_dir = cfg.recording_dir
video_interval = lambda step: step % cfg.recording_interval == 0
video_length = cfg.recording_length
env.is_vector_env = True
if env.metadata is None:
env.metadata = {"render_modes": ["rgb_array"], "render_fps": cfg.recording_fps}
else:
env.metadata["render_modes"] = ["rgb_array"]
env.metadata["render_fps"] = cfg.recording_fps
env = gym.wrappers.RecordVideo(
env, video_folder=videos_dir, step_trigger=video_interval, video_length=video_length
)
# sets seed. if seed is -1 will pick a random one
from omni.isaac.core.utils.torch.maths import set_seed
cfg.seed = set_seed(cfg.seed, torch_deterministic=cfg.torch_deterministic)
cfg_dict["seed"] = cfg.seed
task = initialize_task(cfg_dict, env)
num_frames = 0
first_frame = True
prev_time = time.time()
while env.simulation_app.is_running():
if env.world.is_playing():
if first_frame:
env.reset()
prev_time = time.time()
first_frame = False
# get upper and lower bounds of action space, sample actions randomly on this interval
action_high = env.action_space.high[0]
action_low = env.action_space.low[0]
actions = (action_high - action_low) * torch.rand(env.num_envs, env.action_space.shape[0], device=task.rl_device) - action_high
if time.time() - prev_time >= 1:
print("FPS:", num_frames, "FPS * num_envs:", env.num_envs * num_frames)
num_frames = 0
prev_time = time.time()
else:
num_frames += 1
env.step(actions)
else:
env.world.step(render=render)
env.simulation_app.close()
if __name__ == "__main__":
parse_hydra_configs()
| 5,069 | Python | 38.92126 | 139 | 0.688301 |
NVIDIA-Omniverse/OmniIsaacGymEnvs/docs/framework/limitations.md | ### API Limitations
#### omni.isaac.core Setter APIs
Setter APIs in omni.isaac.core for ArticulationView, RigidPrimView, and RigidContactView should only be called once per simulation step for
each view instance per API. This means that for use cases where multiple calls to the same setter API from the same view instance is required,
users will need to cache the states to be set for intermmediate calls, and make only one call to the setter API prior to stepping physics with
the complete buffer containing all cached states.
If multiple calls to the same setter API from the same view object are made within the simulation step,
subsequent calls will override the states that have been set by prior calls to the same API,
voiding the previous calls to the API. The API can be called again once a simulation step is made.
For example, the below code will override states.
```python
my_view.set_world_poses(positions=[[0, 0, 1]], orientations=[[1, 0, 0, 0]], indices=[0])
# this call will void the previous call
my_view.set_world_poses(positions=[[0, 1, 1]], orientations=[[1, 0, 0, 0]], indices=[1])
my_world.step()
```
Instead, the below code should be used.
```python
my_view.set_world_poses(positions=[[0, 0, 1], [0, 1, 1]], orientations=[[1, 0, 0, 0], [1, 0, 0, 0]], indices=[0, 1])
my_world.step()
```
#### omni.isaac.core Getter APIs
Getter APIs for cloth simulation may return stale states when used with the GPU pipeline. This is because the physics simulation requires a simulation step
to occur in order to refresh the GPU buffers with new states. Therefore, when a getter API is called after a setter API before a
simulation step, the states returned from the getter API may not reflect the values that were set using the setter API.
For example:
```python
my_view.set_world_positions(positions=[[0, 0, 1]], indices=[0])
# Values may be stale when called before step
positions = my_view.get_world_positions() # positions may not match [[0, 0, 1]]
my_world.step()
# Values will be updated when called after step
positions = my_view.get_world_positions() # positions will reflect the new states
```
#### Performing Resets
When resetting the states of actors, impulses generated by previous target or effort controls
will continue to be carried over from the previous states in simulation.
Therefore, depending on the time step, the masses of the objects, and the magnitude of the impulses,
the difference between the desired reset state and the observed first state after reset can be large.
To eliminate this issue, users should also reset any position/velocity targets or effort controllers
to the reset state or zero state when resetting actor states. For setting joint positions and velocities
using the omni.isaac.core ArticulationView APIs, position targets and velocity targets will
automatically be set to the same states as joint positions and velocities.
#### Massless Links
It may be helpful in some scenarios to introduce dummy bodies into articulations for
retrieving transformations at certain locations of the articulation. Although it is possible
to introduce rigid bodies with no mass and colliders APIs and attach them to the articulation
with fixed joints, this can sometimes cause physics instabilities in simulation. To prevent
instabilities from occurring, it is recommended to add a dummy geometry to the rigid body
and include both Mass and Collision APIs. The mass of the geometry can be set to a very
small value, such as 0.0001, to avoid modifying physical behaviors of the articulation.
Similarly, we can also disable collision on the Collision API of the geometry to preserve
contact behavior of the articulation. | 3,685 | Markdown | 52.420289 | 155 | 0.775577 |
NVIDIA-Omniverse/Blender-Addon-OmniPanel/README.md | # DEPRECATED
This repo has been deprecated. Please see [NVIDIA-Omniverse/blender_omniverse_addons](https://github.com/NVIDIA-Omniverse/blender_omniverse_addons)
| 164 | Markdown | 31.999994 | 148 | 0.804878 |
NVIDIA-Omniverse/Blender-Addon-OmniPanel/omni_panel/__init__.py | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
bl_info = {
"name": "Omni Panel",
"author": "NVIDIA Corporation",
"version": (1, 0, 0),
"blender": (3, 0, 0),
"location": "View3D > Toolbar > Omniverse",
"description": "Nvidia Omniverse bake materials for export to usd",
"warning": "",
"doc_url": "",
"category": "Omniverse",
}
import bpy
#Import classes
from .material_bake.operators import (OBJECT_OT_omni_bake_mapbake,
OBJECT_OT_omni_bake_bgbake_status, OBJECT_OT_omni_bake_bgbake_import, OBJECT_OT_omni_bake_bgbake_clear)
from .ui import (OBJECT_PT_omni_bake_panel, OmniBakePreferences)
from .particle_bake.operators import(MyProperties, PARTICLES_OT_omni_hair_bake)
#Classes list for register
#List of all classes that will be registered
classes = ([OBJECT_OT_omni_bake_mapbake, OBJECT_PT_omni_bake_panel, OmniBakePreferences, OBJECT_OT_omni_bake_bgbake_status,
OBJECT_OT_omni_bake_bgbake_import, OBJECT_OT_omni_bake_bgbake_clear, MyProperties, PARTICLES_OT_omni_hair_bake])
def ShowMessageBox(message = "", title = "Message Box", icon = 'INFO'):
def draw(self, context):
self.layout.label(text=message)
bpy.context.window_manager.popup_menu(draw, title = title, icon = icon)
#---------------------UPDATE FUNCTIONS--------------------------------------------
def prepmesh_update(self, context):
if context.scene.prepmesh == False:
context.scene.hidesourceobjects = False
else:
context.scene.hidesourceobjects = True
def texture_res_update(self, context):
if context.scene.texture_res == "0.5k":
context.scene.imgheight = 1024/2
context.scene.imgwidth = 1024/2
context.scene.render.bake.margin = 6
elif context.scene.texture_res == "1k":
context.scene.imgheight = 1024
context.scene.imgwidth = 1024
context.scene.render.bake.margin = 10
elif context.scene.texture_res == "2k":
context.scene.imgheight = 1024*2
context.scene.imgwidth = 1024*2
context.scene.render.bake.margin = 14
elif context.scene.texture_res == "4k":
context.scene.imgheight = 1024*4
context.scene.imgwidth = 1024*4
context.scene.render.bake.margin = 20
elif context.scene.texture_res == "8k":
context.scene.imgheight = 1024*8
context.scene.imgwidth = 1024*8
context.scene.render.bake.margin = 32
def newUVoption_update(self, context):
if bpy.context.scene.newUVoption == True:
bpy.context.scene.prefer_existing_sbmap = False
def all_maps_update(self,context):
bpy.context.scene.selected_col = True
bpy.context.scene.selected_metal = True
bpy.context.scene.selected_rough = True
bpy.context.scene.selected_normal = True
bpy.context.scene.selected_trans = True
bpy.context.scene.selected_transrough = True
bpy.context.scene.selected_emission = True
bpy.context.scene.selected_specular = True
bpy.context.scene.selected_alpha = True
bpy.context.scene.selected_sss = True
bpy.context.scene.selected_ssscol = True
#-------------------END UPDATE FUNCTIONS----------------------------------------------
def register():
#Register classes
global classes
for cls in classes:
bpy.utils.register_class(cls)
global bl_info
version = bl_info["version"]
version = str(version[0]) + str(version[1]) + str(version[2])
OBJECT_PT_omni_bake_panel.version = f"{str(version[0])}.{str(version[1])}.{str(version[2])}"
#Global variables
des = "Texture Resolution"
bpy.types.Scene.texture_res = bpy.props.EnumProperty(name="Texture Resolution", default="1k", description=des, items=[
("0.5k", "0.5k", f"Texture Resolution of {1024/2} x {1024/2}"),
("1k", "1k", f"Texture Resolution of 1024 x 1024"),
("2k", "2k", f"Texture Resolution of {1024*2} x {1024*2}"),
("4k", "4k", f"Texture Resolution of {1024*4} x {1024*4}"),
("8k", "8k", f"Texture Resolution of {1024*8} x {1024*8}")
], update = texture_res_update)
des = "Distance to cast rays from target object to selected object(s)"
bpy.types.Scene.ray_distance = bpy.props.FloatProperty(name="Ray Distance", default = 0.2, description=des)
bpy.types.Scene.ray_warning_given = bpy.props.BoolProperty(default = False)
#--- MAPS -----------------------
des = "Bake all maps (Diffuse, Metal, SSS, SSS Col. Roughness, Normal, Transmission, Transmission Roughness, Emission, Specular, Alpha, Displacement)"
bpy.types.Scene.all_maps = bpy.props.BoolProperty(name="Bake All Maps", default = True, description=des, update = all_maps_update)
des = "Bake a PBR Colour map"
bpy.types.Scene.selected_col = bpy.props.BoolProperty(name="Diffuse", default = True, description=des)
des = "Bake a PBR Metalness map"
bpy.types.Scene.selected_metal = bpy.props.BoolProperty(name="Metal", description=des, default= True)
des = "Bake a PBR Roughness or Glossy map"
bpy.types.Scene.selected_rough = bpy.props.BoolProperty(name="Roughness", description=des, default= True)
des = "Bake a Normal map"
bpy.types.Scene.selected_normal = bpy.props.BoolProperty(name="Normal", description=des, default= True)
des = "Bake a PBR Transmission map"
bpy.types.Scene.selected_trans = bpy.props.BoolProperty(name="Transmission", description=des, default= True)
des = "Bake a PBR Transmission Roughness map"
bpy.types.Scene.selected_transrough = bpy.props.BoolProperty(name="TR Rough", description=des, default= True)
des = "Bake an Emission map"
bpy.types.Scene.selected_emission = bpy.props.BoolProperty(name="Emission", description=des, default= True)
des = "Bake a Subsurface map"
bpy.types.Scene.selected_sss = bpy.props.BoolProperty(name="SSS", description=des, default= True)
des = "Bake a Subsurface colour map"
bpy.types.Scene.selected_ssscol = bpy.props.BoolProperty(name="SSS Col", description=des, default= True)
des = "Bake a Specular/Reflection map"
bpy.types.Scene.selected_specular = bpy.props.BoolProperty(name="Specular", description=des, default= True)
des = "Bake a PBR Alpha map"
bpy.types.Scene.selected_alpha = bpy.props.BoolProperty(name="Alpha", description=des, default= True)
#------------------------------------------UVs-----------------------------------------
des = "Use Smart UV Project to create a new UV map for your objects (or target object if baking to a target). See Blender Market FAQs for more details"
bpy.types.Scene.newUVoption = bpy.props.BoolProperty(name="New UV(s)", description=des, update=newUVoption_update, default= False)
des = "If one exists for the object being baked, use any existing UV maps called 'OmniBake' for baking (rather than the active UV map)"
bpy.types.Scene.prefer_existing_sbmap = bpy.props.BoolProperty(name="Prefer existing UV maps called OmniBake", description=des)
des = "New UV Method"
bpy.types.Scene.newUVmethod = bpy.props.EnumProperty(name="New UV Method", default="SmartUVProject_Individual", description=des, items=[
("SmartUVProject_Individual", "Smart UV Project (Individual)", "Each object gets a new UV map using Smart UV Project")])
des = "Margin between islands to use for Smart UV Project"
bpy.types.Scene.unwrapmargin = bpy.props.FloatProperty(name="Margin", default=0.03, description=des)
des = "Bake to normal UVs"
bpy.types.Scene.uv_mode = bpy.props.EnumProperty(name="UV Mode", default="normal", description=des, items=[
("normal", "Normal", "Normal UV maps")])
#--------------------------------Prep/CleanUp----------------------------------
des = "Create a copy of your selected objects in Blender (or target object if baking to a target) and apply the baked textures to it. If you are baking in the background, this happens after you import"
bpy.types.Scene.prepmesh = bpy.props.BoolProperty(name="Copy objects and apply bakes", default = True, description=des, update=prepmesh_update)
des = "Hide the source object that you baked from in the viewport after baking. If you are baking in the background, this happens after you import"
bpy.types.Scene.hidesourceobjects = bpy.props.BoolProperty(name="Hide source objects after bake", default = True, description=des)
des = "Set the height of the baked image that will be produced"
bpy.types.Scene.imgheight = bpy.props.IntProperty(name="Height", default=1024, description=des)
des = "Set the width of the baked image that will be produced"
bpy.types.Scene.imgwidth = bpy.props.IntProperty(name="Width", default=1024, description=des)
des="Name to apply to these bakes (is incorporated into the bakes file name, provided you have included this in the image format string - see addon preferences). NOTE: To maintain compatibility, only MS Windows acceptable characters will be used"
bpy.types.Scene.batchName = bpy.props.StringProperty(name="Batch name", description=des, default="Bake1", maxlen=20)
#---------------------Where To Bake?-------------------------------------------
bpy.types.Scene.bgbake = bpy.props.EnumProperty(name="Background Bake", default="fg", items=[
("fg", "Foreground", "Perform baking in the foreground. Blender will lock up until baking is complete"),
("bg", "Background", "Perform baking in the background, leaving you free to continue to work in Blender while the baking is being carried out")
])
#---------------------Filehanding & Particles------------------------------------------
bpy.types.Scene.particle_options = bpy.props.PointerProperty(type= MyProperties)
#-------------------Additional Shaders-------------------------------------------
des = "Allows for use of Add, Diffuse, Glossy, Glass, Refraction, Transparent, Anisotropic Shaders. May cause inconsistent results"
bpy.types.Scene.more_shaders = bpy.props.BoolProperty(name="Use Additional Shader Types", default=False, description=des)
def unregister():
#User preferences
global classes
for cls in classes:
bpy.utils.unregister_class(cls)
del bpy.types.Scene.particle_options
del bpy.types.Scene.more_shaders
del bpy.types.Scene.newUVoption
del bpy.types.Scene.prepmesh
del bpy.types.Scene.unwrapmargin
del bpy.types.Scene.texture_res
del bpy.types.Scene.hidesourceobjects
del bpy.types.Scene.batchName
del bpy.types.Scene.bgbake
del bpy.types.Scene.imgheight
del bpy.types.Scene.imgwidth | 11,397 | Python | 47.918455 | 250 | 0.675529 |
NVIDIA-Omniverse/Blender-Addon-OmniPanel/omni_panel/ui.py | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
import bpy
from .particle_bake.operators import*
from .material_bake.background_bake import bgbake_ops
from os.path import join, dirname
import bpy.utils.previews
#---------------Custom ICONs----------------------
def get_icons_directory():
icons_directory = join(dirname(__file__), "icons")
return icons_directory
#------------------------PANEL---------------------
class OBJECT_PT_omni_bake_panel(bpy.types.Panel):
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_category = "Omniverse"
bl_label = "NVIDIA OMNIVERSE"
version = "0.0.0"
#retrieve icons
icons = bpy.utils.previews.new()
icons_directory = get_icons_directory()
icons.load("OMNIBLEND", join(icons_directory, "BlenderOMNI.png"), 'IMAGE')
icons.load("OMNI", join(icons_directory, "ICON.png"), 'IMAGE')
icons.load("BAKE",join(icons_directory, "Oven.png"), 'IMAGE')
#draw the panel
def draw(self, context):
layout = self.layout
#--------File Handling-------------------
layout.label(text="Omniverse", icon_value=self.icons["OMNI"].icon_id)
impExpCol = self.layout.column(align=True)
impExpCol.label(text= "File Handling",
icon='FILEBROWSER')
impExpCol.operator('wm.usd_import',
text='Import USD',
icon='IMPORT')
impExpCol.operator('wm.usd_export',
text='Export USD',
icon='EXPORT')
#--------Particle Collection Instancing-------------------
layout.separator()
particleOptions = context.scene.particle_options
particleCol = self.layout.column(align=True)
particleCol.label(text = "Omni Particles",
icon='PARTICLES')
box = particleCol.box()
column= box.column(align= True)
column.prop(particleOptions, "deletePSystemAfterBake")
row = column.row()
row.prop(particleOptions, "animateData")
if particleOptions.animateData:
row = column.row(align=True)
row.prop(particleOptions, "selectedStartFrame")
row.prop(particleOptions, "selectedEndFrame")
row = column.row()
row.enabled = False
row.label(text="Increased Calculation Time", icon= 'ERROR')
row = column.row()
row.scale_y = 1.5
row.operator('omni.hair_bake',
text='Convert',
icon='MOD_PARTICLE_INSTANCE')
#Does not update while running. Set in "particle_bake.operators.py"
# row = column.row()
# row.scale_y = 1.2
# row.prop(particleOptions, "progressBar")
#--------PBR Bake Settings-------------------
layout.separator()
column = layout.column(align= True)
header = column.row()
header.label(text = "Material Bake", icon = 'UV_DATA')
box = column.box()
row = box.row()
if context.scene.all_maps == True:
row.prop(context.scene, "all_maps", icon = 'CHECKBOX_HLT')
if context.scene.all_maps == False:
row.prop(context.scene, "all_maps", icon = 'CHECKBOX_DEHLT')
column = box.column(align= True)
row = column.row()
row.prop(context.scene, "selected_col")
row.prop(context.scene, "selected_metal")
row = column.row()
row.prop(context.scene, "selected_sss")
row.prop(context.scene, "selected_ssscol")
row = column.row()
row.prop(context.scene, "selected_rough")
row.prop(context.scene, "selected_normal")
row = column.row()
row.prop(context.scene, "selected_trans")
row.prop(context.scene, "selected_transrough")
row = column.row()
row.prop(context.scene, "selected_emission")
row.prop(context.scene, "selected_specular")
row = column.row()
row.prop(context.scene, "selected_alpha")
row = column.row()
colm = box.column(align=True)
colm.prop(context.scene, "more_shaders")
row = colm.row()
row.enabled = False
if context.scene.more_shaders:
row.label(text="Inconsistent Results", icon= 'ERROR')
#--------Texture Settings-------------------
row = box.row()
row.label(text="Texture Resolution:")
row.scale_y = 0.5
row = box.row()
row.prop(context.scene, "texture_res", expand=True)
row.scale_y = 1
if context.scene.texture_res == "8k" or context.scene.texture_res == "4k":
row = box.row()
row.enabled = False
row.label(text="Long Bake Times", icon= 'ERROR')
#--------UV Settings-------------------
column = box.column(align = True)
row = column.row()
row.prop(context.scene, "newUVoption")
row.prop(context.scene, "unwrapmargin")
#--------Other Settings-------------------
column= box.column(align=True)
row = column.row()
if bpy.context.scene.bgbake == "fg":
text = "Copy objects and apply bakes"
else:
text = "Copy objects and apply bakes (after import)"
row.prop(context.scene, "prepmesh", text=text)
if (context.scene.prepmesh == True):
if bpy.context.scene.bgbake == "fg":
text = "Hide source objects after bake"
else:
text = "Hide source objects after bake (after import)"
row = column.row()
row.prop(context.scene, "hidesourceobjects", text=text)
#-------------Buttons-------------------------
row = box.row()
row.scale_y = 1.5
row.operator("object.omni_bake_mapbake", icon_value=self.icons["BAKE"].icon_id)
row = column.row()
row.scale_y = 1
row.prop(context.scene, "bgbake", expand=True)
if context.scene.bgbake == "bg":
row = column.row(align= True)
# - BG status button
col = row.column()
if len(bgbake_ops.bgops_list) == 0:
enable = False
icon = "TIME"
else:
enable = True
icon = "TIME"
col.operator("object.omni_bake_bgbake_status", text="", icon=icon)
col.enabled = enable
# - BG import button
col = row.column()
if len(bgbake_ops.bgops_list_finished) != 0:
enable = True
icon = "IMPORT"
else:
enable = False
icon = "IMPORT"
col.operator("object.omni_bake_bgbake_import", text="", icon=icon)
col.enabled = enable
#BG erase button
col = row.column()
if len(bgbake_ops.bgops_list_finished) != 0:
enable = True
icon = "TRASH"
else:
enable = False
icon = "TRASH"
col.operator("object.omni_bake_bgbake_clear", text="", icon=icon)
col.enabled = enable
row.alignment = 'CENTER'
row.label(text=f"Running {len(bgbake_ops.bgops_list)} | Finished {len(bgbake_ops.bgops_list_finished)}")
#-------------Other material options-------------------------
if len(bpy.context.selected_objects) != 0 and bpy.context.active_object != None:
if bpy.context.active_object.select_get() and bpy.context.active_object.type == "MESH":
layout.separator()
column= layout.column(align= True)
column.label(text= "Convert Material to:", icon= 'SHADING_RENDERED')
box = column.box()
materialCol = box.column(align=True)
materialCol.operator('universalmaterialmap.create_template_omnipbr',
text='OmniPBR')
materialCol.operator('universalmaterialmap.create_template_omniglass',
text='OmniGlass')
class OmniBakePreferences(bpy.types.AddonPreferences):
# this must match the add-on name, use '__package__'
# when defining this in a submodule of a python package.
bl_idname = __package__
img_name_format: bpy.props.StringProperty(name="Image format string",
default="%OBJ%_%BATCH%_%BAKEMODE%_%BAKETYPE%")
#Aliases
diffuse_alias: bpy.props.StringProperty(name="Diffuse", default="diffuse")
metal_alias: bpy.props.StringProperty(name="Metal", default="metalness")
roughness_alias: bpy.props.StringProperty(name="Roughness", default="roughness")
glossy_alias: bpy.props.StringProperty(name="Glossy", default="glossy")
normal_alias: bpy.props.StringProperty(name="Normal", default="normal")
transmission_alias: bpy.props.StringProperty(name="Transmission", default="transparency")
transmissionrough_alias: bpy.props.StringProperty(name="Transmission Roughness", default="transparencyroughness")
clearcoat_alias: bpy.props.StringProperty(name="Clearcost", default="clearcoat")
clearcoatrough_alias: bpy.props.StringProperty(name="Clearcoat Roughness", default="clearcoatroughness")
emission_alias: bpy.props.StringProperty(name="Emission", default="emission")
specular_alias: bpy.props.StringProperty(name="Specular", default="specular")
alpha_alias: bpy.props.StringProperty(name="Alpha", default="alpha")
sss_alias: bpy.props.StringProperty(name="SSS", default="sss")
ssscol_alias: bpy.props.StringProperty(name="SSS Colour", default="ssscol")
@classmethod
def reset_img_string(self):
prefs = bpy.context.preferences.addons[__package__].preferences
prefs.property_unset("img_name_format")
bpy.ops.wm.save_userpref()
| 10,922 | Python | 37.192308 | 117 | 0.568211 |
NVIDIA-Omniverse/Blender-Addon-OmniPanel/omni_panel/particle_bake/__init__.py | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. | 858 | Python | 44.210524 | 74 | 0.7331 |
NVIDIA-Omniverse/Blender-Addon-OmniPanel/omni_panel/particle_bake/operators.py | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
import time
import bpy
import numpy as np
class MyProperties(bpy.types.PropertyGroup):
deletePSystemAfterBake: bpy.props.BoolProperty(
name = "Delete PS after converting",
description = "Delete selected particle system after conversion",
default = False
)
progressBar: bpy.props.StringProperty(
name = "Progress",
description = "Progress of Particle Conversion",
default = "RUNNING"
)
animateData: bpy.props.BoolProperty(
name = "Keyframe Animation",
description = "Add a keyframe for each particle for each of the specified frames",
default = False
)
selectedStartFrame: bpy.props.IntProperty(
name = "Start",
description = "Frame to begin keyframes",
default = 1
)
selectedEndFrame: bpy.props.IntProperty(
name = "End",
description = "Frame to stop keyframes",
default = 3
)
# def fixEndFrame():
# particleOptions = context.particle_options
# particleOptions.selectedEndFrame = particleOptions.selectedStartFrame
particleSystemVisibility = []
particleSystemRender = []
def getOriginalModifiers(parent):
particleSystemVisibility.clear()
particleSystemRender.clear()
for mod in parent.modifiers:
if mod.type == 'PARTICLE_SYSTEM':
particleSystemVisibility.append(mod.show_viewport)
particleSystemRender.append(mod.show_render)
def restoreOriginalModifiers(parent):
count = 0
for mod in parent.modifiers:
if mod.type == 'PARTICLE_SYSTEM':
mod.show_viewport = particleSystemVisibility[count]
mod.show_render = particleSystemRender[count]
count+=1
def hideOtherModifiers(parent, countH):
count = 0
for mod in parent.modifiers:
if mod.type == 'PARTICLE_SYSTEM':
if countH != count:
mod.show_viewport = False
count += 1
def particleSystemVisible(parent, countP):
countS = 0
for mod in parent.modifiers:
if mod.type == 'PARTICLE_SYSTEM':
if countP == countS:
return mod.show_viewport
else:
countS += 1
# Omni Hair Bake
class PARTICLES_OT_omni_hair_bake(bpy.types.Operator):
"""Convert blender particles for Omni scene instancing"""
bl_idname = "omni.hair_bake"
bl_label = "Omni Hair Bake"
bl_options = {'REGISTER', 'UNDO'} # create undo state
def execute(self, context):
particleOptions = context.scene.particle_options
startTime= time.time()
print()
print("____BEGINING PARTICLE CONVERSION______")
#Deselect Non-meshes
for obj in bpy.context.selected_objects:
if obj.type != "MESH":
obj.select_set(False)
print("not mesh")
#Do we still have an active object?
if bpy.context.active_object == None:
#Pick arbitary
bpy.context.view_layer.objects.active = bpy.context.selected_objects[0]
for parentObj in bpy.context.selected_objects:
print()
print("--Staring " + parentObj.name + ":")
getOriginalModifiers(parentObj)
countH = 0
countP = 0
countPS = 0
showEmmiter = False
hasPS = False
for currentPS in parentObj.particle_systems:
hideOtherModifiers(parentObj, countH)
countH+=1
hasVisible = particleSystemVisible(parentObj, countP)
countP+=1
if currentPS != None and hasVisible:
hasPS = True
bpy.ops.object.select_all(action='DESELECT')
renderType = currentPS.settings.render_type
emmitOrHair = currentPS.settings.type
if parentObj.show_instancer_for_viewport == True:
showEmmiter = True
if renderType == 'OBJECT' or renderType == 'COLLECTION':
count = 0
listInst = []
listInstScale = []
# For Object Instances
if renderType == 'OBJECT':
instObj = currentPS.settings.instance_object
# Duplicate Instanced Object
dupInst = instObj.copy()
bpy.context.collection.objects.link(dupInst)
dupInst.select_set(True)
dupInst.location = (0,0,0)
bpy.ops.object.move_to_collection(collection_index=0, is_new=True, new_collection_name="INST_"+str(dupInst.name))
dupInst.select_set(False)
count += 1
listInst.append(dupInst)
listInstScale.append(instObj.scale)
# For Collection Instances
if renderType == 'COLLECTION':
instCol = currentPS.settings.instance_collection.objects
countW = 0
weight = 1
for obj in instCol:
# Duplicate Instanced Object
dupInst = obj.copy()
bpy.context.collection.objects.link(dupInst)
dupInst.select_set(True)
dupInst.location = (0,0,0)
bpy.ops.object.move_to_collection(collection_index=0, is_new=True, new_collection_name="INST_"+str(dupInst.name))
dupInst.select_set(False)
if parentObj.particle_systems.active.settings.use_collection_count:
weight = currentPS.settings.instance_weights[countW].count
print("Instance Count: " + str(weight))
for i in range(weight):
count += 1
listInst.append(dupInst)
listInstScale.append(obj.scale)
countW += 1
# For Path Instances *NOT SUPPORTED
if renderType == 'PATH':
print("path no good")
return {'FINISHED'}
if renderType == 'NONE':
print("no instances")
return {'FINISHED'}
#DOES NOTHING RIGHT NOW
#if overwriteExsisting:
#bpy.ops.outliner.delete(hierarchy=True)
# Variables
parentObj.select_set(True)
parentCollection = parentObj.users_collection[0]
nameP = parentObj.particle_systems[countPS].name # get name of object's particle system
# Create Empty as child
o = bpy.data.objects.new( "empty", None)
o.name = "EM_" + nameP
o.parent = parentObj
parentCollection.objects.link( o )
# FOR ANIMATED EMITTER DATA
if particleOptions.animateData and emmitOrHair == 'EMITTER':
print("--ANIMATED EMITTER--")
#Prep for Keyframing
collectionInstances = []
# Calculate Dependency Graph
degp = bpy.context.evaluated_depsgraph_get()
# Evaluate the depsgraph (Important step)
particle_systems = parentObj.evaluated_get(degp).particle_systems
# All particles of selected particle system
activePS = particle_systems[countPS]
particles = activePS.particles
# Total Particles
totalParticles = len(particles)
#Currently does NOT work
# if activePS.type == 'HAIR':
# hairLength = particles[0].hair_length
# print(hairLength)
# print(bpy.types.ParticleHairKey.co_object(parentObj,parentObj.modifiers[0], particles[0]))
# key = particles[0].hair_keys
# print(key)
# coo = key.co
# print(coo)
# print(particles[0].location)
#Beginings of supporting use random, requires more thought
# obInsttt = parentObj.evaluated_get(degp).object_instances
# for i in obInsttt:
# obj = i.object
# print(obj.name)
# for obj in degp.object_instances:
# print(obj.instance_object)
# print(obj.particle_system)
# Handle instances for construction of scene collections **Fast**
for i in range(totalParticles):
childObj = particles[i]
calculateChild = False
if childObj.birth_time <= particleOptions.selectedEndFrame and childObj.die_time > particleOptions.selectedStartFrame:
calculateChild = True
if calculateChild:
modInst = i % count
#Works for "use count" but not "pick random"
dupColName = str(listInst[modInst].users_collection[0].name)
#Create Collection Instance
source_collection = bpy.data.collections[dupColName]
instance_obj = bpy.data.objects.new(
name= "Inst_" + listInst[modInst].name + "." + str(i),
object_data=None
)
instance_obj.empty_display_type = 'SINGLE_ARROW'
instance_obj.empty_display_size = .1
instance_obj.instance_collection = source_collection
instance_obj.instance_type = 'COLLECTION'
parentCollection.objects.link(instance_obj)
instance_obj.parent = o
instance_obj.matrix_parent_inverse = o.matrix_world.inverted()
collectionInstances.append(instance_obj)
print("Using " + str(len(collectionInstances)))
print("Out of " + str(totalParticles) + " instances")
collectionCount = len(collectionInstances)
startFrame = particleOptions.selectedStartFrame
endFrame = particleOptions.selectedEndFrame
#Do we need to swap start and end frame?
if particleOptions.selectedStartFrame > particleOptions.selectedEndFrame:
endFrame = startFrame
startFrame = particleOptions.selectedEndFrame
for frame in range(startFrame, endFrame + 1):
print("frame = " + str(frame))
bpy.context.scene.frame_current = frame
# Calculate Dependency Graph for each frame
degp = bpy.context.evaluated_depsgraph_get()
particle_systems = parentObj.evaluated_get(degp).particle_systems
particles = particle_systems[countPS].particles
for i in range(collectionCount):
activeCol = collectionInstances[i]
activeDup = particles[i]
#Keyframe Visibility, Scale, Location, and Rotation
if activeDup.alive_state == 'UNBORN' or activeDup.alive_state == 'DEAD':
activeCol.scale = (0,0,0)
activeCol.keyframe_insert(data_path='scale')
activeCol.hide_viewport = True
activeCol.hide_render = True
activeCol.keyframe_insert("hide_viewport")
activeCol.keyframe_insert("hide_render")
else:
activeCol.hide_viewport = False
activeCol.hide_render = False
scale = activeDup.size
activeCol.location = activeDup.location
activeCol.rotation_mode = 'QUATERNION'
activeCol.rotation_quaternion = activeDup.rotation
activeCol.rotation_mode = 'XYZ'
activeCol.scale = (scale, scale, scale)
activeCol.keyframe_insert(data_path='location')
activeCol.keyframe_insert(data_path='rotation_euler')
activeCol.keyframe_insert(data_path='scale')
activeCol.keyframe_insert("hide_viewport")
activeCol.keyframe_insert("hide_render")
# FOR ANIMATED HAIR DATA
elif particleOptions.animateData and emmitOrHair == 'HAIR':
print("--ANIMATED HAIR--")
#Prep for Keyframing
bpy.ops.object.duplicates_make_real(use_base_parent=True, use_hierarchy=True) # bake particles
dups = bpy.context.selected_objects
lengthDups = len(dups)
collectionInstances = []
# Handle instances for construction of scene collections **Fast**
for i in range(lengthDups):
childObj = dups.pop(0)
modInst = i % count
#Works for "use count" but not "pick random"
dupColName = str(listInst[modInst].users_collection[0].name)
#Create Collection Instance
source_collection = bpy.data.collections[dupColName]
instance_obj = bpy.data.objects.new(
name= "Inst_" + childObj.name,
object_data=None
)
instance_obj.empty_display_type = 'SINGLE_ARROW'
instance_obj.empty_display_size = .1
instance_obj.instance_collection = source_collection
instance_obj.instance_type = 'COLLECTION'
parentCollection.objects.link(instance_obj)
instance_obj.parent = o
bpy.data.objects.remove(childObj, do_unlink=True)
collectionInstances.append(instance_obj)
print(str(len(collectionInstances)) + " instances")
collectionCount = len(collectionInstances)
startFrame = particleOptions.selectedStartFrame
endFrame = particleOptions.selectedEndFrame
#Do we need to swap start and end frame?
if particleOptions.selectedStartFrame > particleOptions.selectedEndFrame:
endFrame = startFrame
startFrame = particleOptions.selectedEndFrame
for frame in range(startFrame, endFrame + 1):
print("frame = " + str(frame))
bpy.context.scene.frame_current = frame
# Calculate hairs for each frame
parentObj.select_set(True)
bpy.ops.object.duplicates_make_real(use_base_parent=True, use_hierarchy=True) # bake particles
tempdups = bpy.context.selected_objects
for i in range(collectionCount):
activeDup = tempdups.pop(0)
activeCol = collectionInstances[i]
#Keyframe Scale, Location, and Rotation
activeCol.location = activeDup.location
activeCol.rotation_euler = activeDup.rotation_euler
activeCol.scale = activeDup.scale
activeCol.keyframe_insert(data_path='location')
activeCol.keyframe_insert(data_path='rotation_euler')
activeCol.keyframe_insert(data_path='scale')
bpy.data.objects.remove(activeDup, do_unlink=True)
# FOR SINGLE FRAME CONVERSION
else:
print("--SINGLE FRAME--")
bpy.ops.object.duplicates_make_real(use_base_parent=True, use_hierarchy=True) # bake particles
dups = bpy.context.selected_objects
lengthDups = len(dups)
# Handle instances for construction of scene collections **Fast**
for i in range(lengthDups):
childObj = dups.pop(0)
modInst = i % count
dupColName = str(listInst[modInst].users_collection[0].name)
loc=childObj.location
rot=childObj.rotation_euler
newScale = np.divide(childObj.scale, listInstScale[modInst])
#Create Collection Instance
source_collection = bpy.data.collections[dupColName]
instance_obj = bpy.data.objects.new(
name= "Inst_" + childObj.name,
object_data=None
)
instance_obj.empty_display_type = 'SINGLE_ARROW'
instance_obj.empty_display_size = .1
instance_obj.instance_collection = source_collection
instance_obj.instance_type = 'COLLECTION'
instance_obj.location = loc
instance_obj.rotation_euler = rot
instance_obj.scale = newScale
parentCollection.objects.link(instance_obj)
instance_obj.parent = o
bpy.data.objects.remove(childObj, do_unlink=True)
for obj in listInst:
bpy.context.view_layer.layer_collection.children[obj.users_collection[0].name].exclude = True
#Make parent object active object again
parentObj.select_set(True)
bpy.context.view_layer.objects.active = parentObj
else:
print("Must be object or collection instance")
else:
print("Object has no active particle system")
restoreOriginalModifiers(parentObj)
countPS += 1
#Handle PS after converting
if particleOptions.deletePSystemAfterBake:
if showEmmiter == False and hasPS == True:
bpy.context.active_object.hide_render = True
bpy.context.active_object.hide_set(True)
countI = 0
for ps in range(len(parentObj.particle_systems)):
if particleSystemVisibility[ps] == True:
parentObj.particle_systems.active_index = countI
bpy.ops.object.particle_system_remove()
else:
countI+=1
else:
countI = 0
for mod in parentObj.modifiers:
if mod.type == 'PARTICLE_SYSTEM':
mod.show_viewport = False
if particleSystemVisibility[countI] == True:
mod.show_render = False
countI+=1
print ("My program took", time.time() - startTime, " seconds to run") # run time
return {'FINISHED'} | 23,439 | Python | 46.258064 | 150 | 0.462477 |
NVIDIA-Omniverse/Blender-Addon-OmniPanel/omni_panel/material_bake/material_setup.py | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
import bpy
from . import functions
from .data import MasterOperation
def find_node_from_label(label, nodes):
for node in nodes:
if node.label == label:
return node
return False
def find_isocket_from_identifier(idname, node):
for inputsocket in node.inputs:
if inputsocket.identifier == idname:
return inputsocket
return False
def find_osocket_from_identifier(idname, node):
for outputsocket in node.outputs:
if outputsocket.identifier == idname:
return outputsocket
return False
def make_link(f_node_label, f_node_ident, to_node_label, to_node_ident, nodetree):
fromnode = find_node_from_label(f_node_label, nodetree.nodes)
if(fromnode == False):
return False
fromsocket = find_osocket_from_identifier(f_node_ident, fromnode)
tonode = find_node_from_label(to_node_label, nodetree.nodes)
if(tonode == False):
return False
tosocket = find_isocket_from_identifier(to_node_ident, tonode)
nodetree.links.new(fromsocket, tosocket)
return True
def wipe_labels(nodes):
for node in nodes:
node.label = ""
def get_image_from_tag(thisbake, objname):
current_bake_op = MasterOperation.current_bake_operation
global_mode = current_bake_op.bake_mode
objname = functions.untrunc_if_needed(objname)
batch_name = bpy.context.scene.batchName
result = []
result = [img for img in bpy.data.images if\
("SB_objname" in img and img["SB_objname"] == objname) and\
("SB_batch" in img and img["SB_batch"] == batch_name) and\
("SB_globalmode" in img and img["SB_globalmode"] == global_mode) and\
("SB_thisbake" in img and img["SB_thisbake"] == thisbake)\
]
if len(result) > 0:
return result[0]
functions.printmsg(f"ERROR: No image with matching tag ({thisbake}) found for object {objname}")
return False
def create_principled_setup(nodetree, obj):
functions.printmsg("Creating principled material")
nodes = nodetree.nodes
obj_name = obj.name.replace("_OmniBake", "")
obj.active_material.cycles.displacement_method = 'BOTH'
#First we wipe out any existing nodes
for node in nodes:
nodes.remove(node)
# Node Frame
node = nodes.new("NodeFrame")
node.location = (0,0)
node.use_custom_color = True
node.color = (0.149763, 0.214035, 0.0590617)
#Now create the Principled BSDF
pnode = nodes.new("ShaderNodeBsdfPrincipled")
pnode.location = (-25, 335)
pnode.label = "pnode"
pnode.use_custom_color = True
pnode.color = (0.3375297784805298, 0.4575316309928894, 0.08615386486053467)
pnode.parent = nodes["Frame"]
#And the output node
node = nodes.new("ShaderNodeOutputMaterial")
node.location = (500, 200)
node.label = "monode"
node.show_options = False
node.parent = nodes["Frame"]
#-----------------------------------------------------------------
#Node Image texture types Types
if(bpy.context.scene.selected_col):
image = get_image_from_tag("diffuse", obj_name)
node = nodes.new("ShaderNodeTexImage")
node.hide = True
node.location = (-500, 250)
node.label = "col_tex"
node.image = image
node.parent = nodes["Frame"]
if(bpy.context.scene.selected_sss):
node = nodes.new("ShaderNodeTexImage")
node.hide = True
node.location = (-500, 210)
node.label = "sss_tex"
image = get_image_from_tag("sss", obj_name)
node.image = image
node.parent = nodes["Frame"]
if(bpy.context.scene.selected_ssscol):
node = nodes.new("ShaderNodeTexImage")
node.hide = True
node.location = (-500, 170)
node.label = "ssscol_tex"
image = get_image_from_tag("ssscol", obj_name)
node.image = image
node.parent = nodes["Frame"]
if(bpy.context.scene.selected_metal):
node = nodes.new("ShaderNodeTexImage")
node.hide = True
node.location = (-500, 130)
node.label = "metal_tex"
image = get_image_from_tag("metalness", obj_name)
node.image = image
node.parent = nodes["Frame"]
if(bpy.context.scene.selected_specular):
node = nodes.new("ShaderNodeTexImage")
node.hide = True
node.location = (-500, 90)
node.label = "specular_tex"
image = get_image_from_tag("specular", obj_name)
node.image = image
node.parent = nodes["Frame"]
if(bpy.context.scene.selected_rough):
node = nodes.new("ShaderNodeTexImage")
node.hide = True
node.location = (-500, 50)
node.label = "roughness_tex"
image = get_image_from_tag("roughness", obj_name)
node.image = image
node.parent = nodes["Frame"]
if(bpy.context.scene.selected_trans):
node = nodes.new("ShaderNodeTexImage")
node.hide = True
node.location = (-500, -90)
node.label = "transmission_tex"
image = get_image_from_tag("transparency", obj_name)
node.image = image
node.parent = nodes["Frame"]
if(bpy.context.scene.selected_transrough):
node = nodes.new("ShaderNodeTexImage")
node.hide = True
node.location = (-500, -130)
node.label = "transmissionrough_tex"
image = get_image_from_tag("transparencyroughness", obj_name)
node.image = image
node.parent = nodes["Frame"]
if(bpy.context.scene.selected_emission):
node = nodes.new("ShaderNodeTexImage")
node.hide = True
node.location = (-500, -170)
node.label = "emission_tex"
image = get_image_from_tag("emission", obj_name)
node.image = image
node.parent = nodes["Frame"]
if(bpy.context.scene.selected_alpha):
node = nodes.new("ShaderNodeTexImage")
node.hide = True
node.location = (-500, -210)
node.label = "alpha_tex"
image = get_image_from_tag("alpha", obj_name)
node.image = image
node.parent = nodes["Frame"]
if(bpy.context.scene.selected_normal):
node = nodes.new("ShaderNodeTexImage")
node.hide = True
node.location = (-500, -318.7)
node.label = "normal_tex"
image = get_image_from_tag("normal", obj_name)
node.image = image
node.parent = nodes["Frame"]
#-----------------------------------------------------------------
# Additional normal map node for normal socket
if(bpy.context.scene.selected_normal):
node = nodes.new("ShaderNodeNormalMap")
node.location = (-220, -240)
node.label = "normalmap"
node.show_options = False
node.parent = nodes["Frame"]
#-----------------------------------------------------------------
make_link("emission_tex", "Color", "pnode", "Emission", nodetree)
make_link("col_tex", "Color", "pnode", "Base Color", nodetree)
make_link("metal_tex", "Color", "pnode", "Metallic", nodetree)
make_link("roughness_tex", "Color", "pnode", "Roughness", nodetree)
make_link("transmission_tex", "Color", "pnode", "Transmission", nodetree)
make_link("transmissionrough_tex", "Color", "pnode", "Transmission Roughness", nodetree)
make_link("normal_tex", "Color", "normalmap", "Color", nodetree)
make_link("normalmap", "Normal", "pnode", "Normal", nodetree)
make_link("specular_tex", "Color", "pnode", "Specular", nodetree)
make_link("alpha_tex", "Color", "pnode", "Alpha", nodetree)
make_link("sss_tex", "Color", "pnode", "Subsurface", nodetree)
make_link("ssscol_tex", "Color", "pnode", "Subsurface Color", nodetree)
make_link("pnode", "BSDF", "monode", "Surface", nodetree)
#---------------------------------------------------
wipe_labels(nodes)
node = nodes["Frame"]
node.label = "OMNI PBR" | 8,828 | Python | 33.088803 | 100 | 0.608518 |
NVIDIA-Omniverse/Blender-Addon-OmniPanel/omni_panel/material_bake/data.py | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
from .bake_operation import bakestolist
class MasterOperation:
current_bake_operation = None
total_bake_operations = 0
this_bake_operation_num = 0
orig_UVs_dict = {}
baked_textures = []
prepared_mesh_objects = []
batch_name = ""
orig_objects = []
orig_active_object = ""
orig_sample_count = 0
@staticmethod
def clear():
# Master variables called throughout bake process
MasterOperation.orig_UVs_dict = {}
MasterOperation.total_bake_operations = 0
MasterOperation.current_bake_operation = None
MasterOperation.this_bake_operation_num = 0
MasterOperation.prepared_mesh_objects = []
MasterOperation.baked_textures = []
MasterOperation.batch_name = ""
# Variables to reset your scene to what it was before bake.
MasterOperation.orig_objects = []
MasterOperation.orig_active_object = ""
MasterOperation.orig_sample_count = 0
return True
class BakeOperation:
#Constants
PBR = "pbr"
def __init__(self):
#Mapping of object name to active UVs
self.bake_mode = BakeOperation.PBR #So the example in the user prefs will work
self.bake_objects = []
self.active_object = None
#normal
self.uv_mode = "normal"
#pbr stuff
self.pbr_selected_bake_types = []
def assemble_pbr_bake_list(self):
self.pbr_selected_bake_types = bakestolist() | 2,334 | Python | 28.935897 | 86 | 0.667095 |
NVIDIA-Omniverse/Blender-Addon-OmniPanel/omni_panel/material_bake/operators.py | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
import bpy
import sys
import subprocess
import os
from .bake_operation import BakeStatus, bakestolist
from .data import MasterOperation, BakeOperation
from . import functions
from . import bakefunctions
from .background_bake import bgbake_ops
from pathlib import Path
import tempfile
class OBJECT_OT_omni_bake_mapbake(bpy.types.Operator):
"""Start the baking process"""
bl_idname = "object.omni_bake_mapbake"
bl_label = "Bake"
bl_options = {'REGISTER', 'UNDO'} # create undo state
def execute(self, context):
def commence_bake(needed_bake_modes):
#Prepare the BakeStatus tracker for progress bar
num_of_objects = 0
num_of_objects = len(bpy.context.selected_objects)
total_maps = 0
for need in needed_bake_modes:
if need == BakeOperation.PBR:
total_maps+=(bakestolist(justcount=True) * num_of_objects)
BakeStatus.total_maps = total_maps
#Clear the MasterOperation stuff
MasterOperation.clear()
#Need to know the total operations
MasterOperation.total_bake_operations = len(needed_bake_modes)
#Master list of all ops
bops = []
for need in needed_bake_modes:
#Create operation
bop = BakeOperation()
#Set master level attributes
#-------------------------------
bop.bake_mode = need
#-------------------------------
bops.append(bop)
functions.printmsg(f"Created operation for {need}")
#Run queued operations
for bop in bops:
MasterOperation.this_bake_operation_num+=1
MasterOperation.current_bake_operation = bop
if bop.bake_mode == BakeOperation.PBR:
functions.printmsg("Running PBR bake")
bakefunctions.doBake()
return True
######################TEMP###############################################
needed_bake_modes = []
needed_bake_modes.append(BakeOperation.PBR)
#Clear the progress stuff
BakeStatus.current_map = 0
BakeStatus.total_maps = 0
#If we have been called in background mode, just get on with it. Checks should be done.
if "--background" in sys.argv:
if "OmniBake_Bakes" in bpy.data.collections:
#Remove any prior baked objects
bpy.data.collections.remove(bpy.data.collections["OmniBake_Bakes"])
#Bake
commence_bake(needed_bake_modes)
self.report({"INFO"}, "Bake complete")
return {'FINISHED'}
functions.deselect_all_not_mesh()
#We are in foreground, do usual checks
result = True
for need in needed_bake_modes:
if not functions.startingChecks(bpy.context.selected_objects, need):
result = False
if not result:
return {"CANCELLED"}
#If the user requested background mode, fire that up now and exit
if bpy.context.scene.bgbake == "bg":
bpy.ops.wm.save_mainfile()
filepath = filepath = bpy.data.filepath
process = subprocess.Popen(
[bpy.app.binary_path, "--background",filepath, "--python-expr",\
"import bpy;\
import os;\
from pathlib import Path;\
savepath=Path(bpy.data.filepath).parent / (str(os.getpid()) + \".blend\");\
bpy.ops.wm.save_as_mainfile(filepath=str(savepath), check_existing=False);\
bpy.ops.object.omni_bake_mapbake();"],
shell=False)
bgbake_ops.bgops_list.append([process, bpy.context.scene.prepmesh, bpy.context.scene.hidesourceobjects])
self.report({"INFO"}, "Background bake process started")
return {'FINISHED'}
#If we are doing this here and now, get on with it
#Create a bake operation
commence_bake(needed_bake_modes)
self.report({"INFO"}, "Bake complete")
return {'FINISHED'}
#--------------------BACKGROUND BAKE----------------------------------
class OBJECT_OT_omni_bake_bgbake_status(bpy.types.Operator):
bl_idname = "object.omni_bake_bgbake_status"
bl_label = "Check on the status of bakes running in the background"
def execute(self, context):
msg_items = []
#Display remaining
if len(bgbake_ops.bgops_list) == 0:
msg_items.append("No background bakes are currently running")
else:
msg_items.append(f"--------------------------")
for p in bgbake_ops.bgops_list:
t = Path(tempfile.gettempdir())
t = t / f"OmniBake_Bgbake_{str(p[0].pid)}"
try:
with open(str(t), "r") as progfile:
progress = progfile.readline()
except:
#No file yet, as no bake operation has completed yet. Holding message
progress = 0
msg_items.append(f"RUNNING: Process ID: {str(p[0].pid)} - Progress {progress}%")
msg_items.append(f"--------------------------")
functions.ShowMessageBox(msg_items, "Background Bake Status(es)")
return {'FINISHED'}
class OBJECT_OT_omni_bake_bgbake_import(bpy.types.Operator):
bl_idname = "object.omni_bake_bgbake_import"
bl_label = "Import baked objects previously baked in the background"
bl_options = {'REGISTER', 'UNDO'} # create undo state
def execute(self, context):
if bpy.context.mode != "OBJECT":
self.report({"ERROR"}, "You must be in object mode")
return {'CANCELLED'}
for p in bgbake_ops.bgops_list_finished:
savepath = Path(bpy.data.filepath).parent
pid_str = str(p[0].pid)
path = savepath / (pid_str + ".blend")
path = str(path) + "\\Collection\\"
#Record the objects and collections before append (as append doesn't give us a reference to the new stuff)
functions.spot_new_items(initialise=True, item_type="objects")
functions.spot_new_items(initialise=True, item_type="collections")
functions.spot_new_items(initialise=True, item_type="images")
#Append
bpy.ops.wm.append(filename="OmniBake_Bakes", directory=path, use_recursive=False, active_collection=False)
#If we didn't actually want the objects, delete them
if not p[1]:
#Delete objects we just imported (leaving only textures)
for obj_name in functions.spot_new_items(initialise=False, item_type = "objects"):
bpy.data.objects.remove(bpy.data.objects[obj_name])
for col_name in functions.spot_new_items(initialise=False, item_type = "collections"):
bpy.data.collections.remove(bpy.data.collections[col_name])
#If we have to hide the source objects, do it
if p[2]:
#Get the newly introduced objects:
objects_before_names = functions.spot_new_items(initialise=False, item_type="objects")
for obj_name in objects_before_names:
#Try this in case there are issues with long object names.. better than a crash
try:
bpy.data.objects[obj_name.replace("_Baked", "")].hide_set(True)
except:
pass
#Delete the temp blend file
try:
os.remove(str(savepath / pid_str) + ".blend")
os.remove(str(savepath / pid_str) + ".blend1")
except:
pass
#Clear list for next time
bgbake_ops.bgops_list_finished = []
#Confirm back to user
self.report({"INFO"}, "Import complete")
messagelist = []
messagelist.append(f"{len(functions.spot_new_items(initialise=False, item_type='objects'))} objects imported")
messagelist.append(f"{len(functions.spot_new_items(initialise=False, item_type='images'))} textures imported")
functions.ShowMessageBox(messagelist, "Import complete", icon = 'INFO')
#If we imported an image, and we already had an image with the same name, get rid of the original in favour of the imported
new_images_names = functions.spot_new_items(initialise=False, item_type="images")
#Find any .001s
for imgname in new_images_names:
try:
int(imgname[-3:])
#Delete the existing version
bpy.data.images.remove(bpy.data.images[imgname[0:-4]])
#Rename our version
bpy.data.images[imgname].name = imgname[0:-4]
except ValueError:
pass
return {'FINISHED'}
class OBJECT_OT_omni_bake_bgbake_clear(bpy.types.Operator):
"""Delete the background bakes because you don't want to import them into Blender. NOTE: If you chose to save bakes or FBX externally, these are safe and NOT deleted. This is just if you don't want to import into this Blender session"""
bl_idname = "object.omni_bake_bgbake_clear"
bl_label = ""
bl_options = {'REGISTER', 'UNDO'} # create undo state
def execute(self, context):
savepath = Path(bpy.data.filepath).parent
for p in bgbake_ops.bgops_list_finished:
pid_str = str(p[0].pid)
try:
os.remove(str(savepath / pid_str) + ".blend")
os.remove(str(savepath / pid_str) + ".blend1")
except:
pass
bgbake_ops.bgops_list_finished = []
return {'FINISHED'} | 11,531 | Python | 38.493151 | 240 | 0.540976 |
NVIDIA-Omniverse/Blender-Addon-OmniPanel/omni_panel/material_bake/bake_operation.py | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
import bpy
# Bake helper method
def bakestolist(justcount = False):
#Assemble properties into list
selectedbakes = []
selectedbakes.append("diffuse") if bpy.context.scene.selected_col else False
selectedbakes.append("metalness") if bpy.context.scene.selected_metal else False
selectedbakes.append("roughness") if bpy.context.scene.selected_rough else False
selectedbakes.append("normal") if bpy.context.scene.selected_normal else False
selectedbakes.append("transparency") if bpy.context.scene.selected_trans else False
selectedbakes.append("transparencyroughness") if bpy.context.scene.selected_transrough else False
selectedbakes.append("emission") if bpy.context.scene.selected_emission else False
selectedbakes.append("specular") if bpy.context.scene.selected_specular else False
selectedbakes.append("alpha") if bpy.context.scene.selected_alpha else False
selectedbakes.append("sss") if bpy.context.scene.selected_sss else False
selectedbakes.append("ssscol") if bpy.context.scene.selected_ssscol else False
if justcount:
return len(selectedbakes)
else:
return selectedbakes
class BakeStatus:
total_maps = 0
current_map = 0
| 2,095 | Python | 40.919999 | 101 | 0.741766 |
NVIDIA-Omniverse/Blender-Addon-OmniPanel/omni_panel/material_bake/bakefunctions.py | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
import bpy
from . import functions
import sys
from .bake_operation import BakeStatus
from .data import MasterOperation, BakeOperation
def optimize():
current_bake_op = MasterOperation.current_bake_operation
MasterOperation.orig_sample_count = bpy.context.scene.cycles.samples
functions.printmsg("Reducing sample count to 16 for more efficient baking")
bpy.context.scene.cycles.samples = 16
return True
def undo_optimize():
#Restore sample count
bpy.context.scene.cycles.samples = MasterOperation.orig_sample_count
def common_bake_prep():
#--------------Set Bake Operation Variables----------------------------
current_bake_op = MasterOperation.current_bake_operation
functions.printmsg("================================")
functions.printmsg("---------Beginning Bake---------")
functions.printmsg(f"{current_bake_op.bake_mode}")
functions.printmsg("================================")
#Run information
op_num = MasterOperation.this_bake_operation_num
firstop = False
lastop = False
if op_num == 1: firstop = True
if op_num == MasterOperation.total_bake_operations: lastop = True
#If this is a pbr bake, gather the selected maps
if current_bake_op.bake_mode in {BakeOperation.PBR}:
current_bake_op.assemble_pbr_bake_list()
#Record batch name
MasterOperation.batch_name = bpy.context.scene.batchName
#Set values based on viewport selection
current_bake_op.orig_objects = bpy.context.selected_objects.copy()
current_bake_op.orig_active_object = bpy.context.active_object
current_bake_op.bake_objects = bpy.context.selected_objects.copy()
current_bake_op.active_object = bpy.context.active_object
current_bake_op.orig_engine = bpy.context.scene.render.engine
#Record original UVs for everyone
if firstop:
for obj in current_bake_op.bake_objects:
try:
MasterOperation.orig_UVs_dict[obj.name] = obj.data.uv_layers.active.name
except AttributeError:
MasterOperation.orig_UVs_dict[obj.name] = False
#Record the rendering engine
if firstop:
MasterOperation.orig_engine = bpy.context.scene.render.engine
current_bake_op.uv_mode = "normal"
#----------------------------------------------------------------------
#Force it to cycles
bpy.context.scene.render.engine = "CYCLES"
bpy.context.scene.render.bake.use_selected_to_active = False
functions.printmsg(f"Selected to active is now {bpy.context.scene.render.bake.use_selected_to_active}")
#If the user doesn't have a GPU, but has still set the render device to GPU, set it to CPU
if not bpy.context.preferences.addons["cycles"].preferences.has_active_device():
bpy.context.scene.cycles.device = "CPU"
#Clear the trunc num for this session
functions.trunc_num = 0
functions.trunc_dict = {}
#Turn off that dam use clear.
bpy.context.scene.render.bake.use_clear = False
#Do what we are doing with UVs (only if we are the primary op)
if firstop:
functions.processUVS()
#Optimize
optimize()
#Make sure the normal y setting is at default
bpy.context.scene.render.bake.normal_g = "POS_Y"
return True
def common_bake_finishing():
#Run information
current_bake_op = MasterOperation.current_bake_operation
op_num = MasterOperation.this_bake_operation_num
firstop = False
lastop = False
if op_num == 1: firstop = True
if op_num == MasterOperation.total_bake_operations: lastop = True
#Restore the original rendering engine
if lastop:
bpy.context.scene.render.engine = MasterOperation.orig_engine
undo_optimize()
#If prep mesh, or save object is selected, or running in the background, then do it
#We do this on primary run only
if firstop:
if(bpy.context.scene.prepmesh or "--background" in sys.argv):
functions.prepObjects(current_bake_op.bake_objects, current_bake_op.bake_mode)
#If the user wants it, restore the original active UV map so we don't confuse anyone
functions.restore_Original_UVs()
#Restore the original object selection so we don't confuse anyone
bpy.ops.object.select_all(action="DESELECT")
for obj in current_bake_op.orig_objects:
obj.select_set(True)
bpy.context.view_layer.objects.active = current_bake_op.orig_active_object
#Hide all the original objects
if bpy.context.scene.prepmesh and bpy.context.scene.hidesourceobjects and lastop:
for obj in current_bake_op.bake_objects:
obj.hide_set(True)
#Delete placeholder material
if lastop and "OmniBake_Placeholder" in bpy.data.materials:
bpy.data.materials.remove(bpy.data.materials["OmniBake_Placeholder"])
if "--background" in sys.argv:
bpy.ops.wm.save_mainfile()
def doBake():
current_bake_op = MasterOperation.current_bake_operation
#Do the prep we need to do for all bake types
common_bake_prep()
#Loop over the bake modes we are using
def doBake_actual():
IMGNAME = ""
for thisbake in current_bake_op.pbr_selected_bake_types:
for obj in current_bake_op.bake_objects:
#Reset the already processed list
mats_done = []
functions.printmsg(f"Baking object: {obj.name}")
#Truncate if needed from this point forward
OBJNAME = functions.trunc_if_needed(obj.name)
#Create the image we need for this bake (Delete if exists)
IMGNAME = functions.gen_image_name(obj.name, thisbake)
functions.create_Images(IMGNAME, thisbake, obj.name)
#Prep the materials one by one
materials = obj.material_slots
for matslot in materials:
mat = bpy.data.materials.get(matslot.name)
if mat.name in mats_done:
functions.printmsg(f"Skipping material {mat.name}, already processed")
#Skip this loop
#We don't want to process any materials more than once or bad things happen
continue
else:
mats_done.append(mat.name)
#Make sure we are using nodes
if not mat.use_nodes:
functions.printmsg(f"Material {mat.name} wasn't using nodes. Have enabled nodes")
mat.use_nodes = True
nodetree = mat.node_tree
nodes = nodetree.nodes
#Take a copy of material to restore at the end of the process
functions.backupMaterial(mat)
#Create the image node and set to the bake texutre we are using
imgnode = nodes.new("ShaderNodeTexImage")
imgnode.image = bpy.data.images[IMGNAME]
imgnode.label = "OmniBake"
#Remove all disconnected nodes so don't interfere with typing the material
functions.removeDisconnectedNodes(nodetree)
#Use additional shader types
functions.useAdditionalShaderTypes(nodetree, nodes)
#Normal and emission bakes require no further material prep. Just skip the rest
if(thisbake != "normal" and thisbake != "emission"):
#Work out what type of material we are dealing with here and take correct action
mat_type = functions.getMatType(nodetree)
if(mat_type == "MIX"):
functions.setup_mix_material(nodetree, thisbake)
elif(mat_type == "PURE_E"):
functions.setup_pure_e_material(nodetree, thisbake)
elif(mat_type == "PURE_P"):
functions.setup_pure_p_material(nodetree, thisbake)
#Last action before leaving this material, make the image node selected and active
functions.deselectAllNodes(nodes)
imgnode.select = True
nodetree.nodes.active = imgnode
#Select only this object
functions.selectOnlyThis(obj)
#We are done with this image, set colour space
functions.set_image_internal_col_space(bpy.data.images[IMGNAME], thisbake)
#Bake the object for this bake mode
functions.bakeoperation(thisbake, bpy.data.images[IMGNAME])
#Update tracking
BakeStatus.current_map+=1
functions.printmsg(f"Bake maps {BakeStatus.current_map} of {BakeStatus.total_maps} complete")
functions.write_bake_progress(BakeStatus.current_map, BakeStatus.total_maps)
#Restore the original materials
functions.printmsg("Restoring original materials")
functions.restoreAllMaterials()
functions.printmsg("Restore complete")
#Last thing we do with this image is scale it
functions.sacle_image_if_needed(bpy.data.images[IMGNAME])
#Do the bake at least once
doBake_actual()
#Finished baking. Perform wind down actions
common_bake_finishing() | 10,620 | Python | 36.932143 | 109 | 0.608004 |
NVIDIA-Omniverse/Blender-Addon-OmniPanel/omni_panel/material_bake/functions.py | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
from pathlib import Path
from ..ui import OmniBakePreferences
import bpy
import os
import sys
import tempfile
from . import material_setup
from .data import MasterOperation
#Global variables
psocketname = {
"diffuse": "Base Color",
"metalness": "Metallic",
"roughness": "Roughness",
"normal": "Normal",
"transparency": "Transmission",
"transparencyroughness": "Transmission Roughness",
"specular": "Specular",
"alpha": "Alpha",
"sss": "Subsurface",
"ssscol": "Subsurface Color",
"displacement": "Displacement"
}
def printmsg(msg):
print(f"BAKE: {msg}")
def gen_image_name(obj_name, baketype):
current_bake_op = MasterOperation.current_bake_operation
#First, let's get the format string we are working with
prefs = bpy.context.preferences.addons[OmniBakePreferences.bl_idname].preferences
image_name = prefs.img_name_format
#The easy ones
image_name = image_name.replace("%OBJ%", obj_name)
image_name = image_name.replace("%BATCH%", bpy.context.scene.batchName)
#Bake mode
image_name = image_name.replace("%BAKEMODE%", current_bake_op.bake_mode)
#The hard ones
if baketype == "diffuse":
image_name = image_name.replace("%BAKETYPE%", prefs.diffuse_alias)
elif baketype == "metalness":
image_name = image_name.replace("%BAKETYPE%", prefs.metal_alias)
elif baketype == "roughness":
image_name = image_name.replace("%BAKETYPE%", prefs.roughness_alias)
elif baketype == "normal":
image_name = image_name.replace("%BAKETYPE%", prefs.normal_alias)
elif baketype == "transparency":
image_name = image_name.replace("%BAKETYPE%", prefs.transmission_alias)
elif baketype == "transparencyroughness":
image_name = image_name.replace("%BAKETYPE%", prefs.transmissionrough_alias)
elif baketype == "emission":
image_name = image_name.replace("%BAKETYPE%", prefs.emission_alias)
elif baketype == "specular":
image_name = image_name.replace("%BAKETYPE%", prefs.specular_alias)
elif baketype == "alpha":
image_name = image_name.replace("%BAKETYPE%", prefs.alpha_alias)
elif baketype == "sss":
image_name = image_name.replace("%BAKETYPE%", prefs.sss_alias)
elif baketype == "ssscol":
image_name = image_name.replace("%BAKETYPE%", prefs.ssscol_alias)
#Displacement is not currently Implemented
elif baketype == "displacement":
image_name = image_name.replace("%BAKETYPE%", prefs.displacement_alias)
else:
image_name = image_name.replace("%BAKETYPE%", baketype)
return image_name
def removeDisconnectedNodes(nodetree):
nodes = nodetree.nodes
#Loop through nodes
repeat = False
for node in nodes:
if node.type == "BSDF_PRINCIPLED" and len(node.outputs[0].links) == 0:
#Not a player, delete node
nodes.remove(node)
repeat = True
elif node.type == "EMISSION" and len(node.outputs[0].links) == 0:
#Not a player, delete node
nodes.remove(node)
repeat = True
elif node.type == "MIX_SHADER" and len(node.outputs[0].links) == 0:
#Not a player, delete node
nodes.remove(node)
repeat = True
elif node.type == "ADD_SHADER" and len(node.outputs[0].links) == 0:
#Not a player, delete node
nodes.remove(node)
repeat = True
#Displacement is not currently Implemented
elif node.type == "DISPLACEMENT" and len(node.outputs[0].links) == 0:
#Not a player, delete node
nodes.remove(node)
repeat = True
#If we removed any nodes, we need to do this again
if repeat:
removeDisconnectedNodes(nodetree)
def backupMaterial(mat):
dup = mat.copy()
dup.name = mat.name + "_OmniBake"
def restoreAllMaterials():
#Not efficient but, if we are going to do things this way, we need to loop over every object in the scene
dellist = []
for obj in bpy.data.objects:
for slot in obj.material_slots:
origname = slot.name
#Try to set to the corresponding material that was the backup
try:
slot.material = bpy.data.materials[origname + "_OmniBake"]
#If not already on our list, log the original material (that we messed with) for mass deletion
if origname not in dellist:
dellist.append(origname)
except KeyError:
#Not been backed up yet. Must not have processed an object with that material yet
pass
#Delete the unused materials
for matname in dellist:
bpy.data.materials.remove(bpy.data.materials[matname])
#Rename all materials to the original name, leaving us where we started
for mat in bpy.data.materials:
if "_OmniBake" in mat.name:
mat.name = mat.name.replace("_OmniBake", "")
def create_Images(imgname, thisbake, objname):
#thisbake is subtype e.g. diffuse, ao, etc.
current_bake_op = MasterOperation.current_bake_operation
global_mode = current_bake_op.bake_mode
batch = MasterOperation.batch_name
printmsg(f"Creating image {imgname}")
#Get the image height and width from the interface
IMGHEIGHT = bpy.context.scene.imgheight
IMGWIDTH = bpy.context.scene.imgwidth
#If it already exists, remove it.
if(imgname in bpy.data.images):
bpy.data.images.remove(bpy.data.images[imgname])
#Create image 32 bit or not 32 bit
if thisbake == "normal" :
image = bpy.data.images.new(imgname, IMGWIDTH, IMGHEIGHT, float_buffer=True)
else:
image = bpy.data.images.new(imgname, IMGWIDTH, IMGHEIGHT, float_buffer=False)
#Set tags
image["SB_objname"] = objname
image["SB_batch"] = batch
image["SB_globalmode"] = global_mode
image["SB_thisbake"] = thisbake
#Always mark new images fake user when generated in the background
if "--background" in sys.argv:
image.use_fake_user = True
#Store it at bake operation level
MasterOperation.baked_textures.append(image)
def deselectAllNodes(nodes):
for node in nodes:
node.select = False
def findSocketConnectedtoP(pnode, thisbake):
#Get socket name for this bake mode
socketname = psocketname[thisbake]
#Get socket of the pnode
socket = pnode.inputs[socketname]
fromsocket = socket.links[0].from_socket
#Return the socket connected to the pnode
return fromsocket
def createdummynodes(nodetree, thisbake):
#Loop through pnodes
nodes = nodetree.nodes
for node in nodes:
if node.type == "BSDF_PRINCIPLED":
pnode = node
#Get socket name for this bake mode
socketname = psocketname[thisbake]
#Get socket of the pnode
psocket = pnode.inputs[socketname]
#If it has something plugged in, we can leave it here
if(len(psocket.links) > 0):
continue
#Get value of the unconnected socket
val = psocket.default_value
#If this is base col or ssscol, add an RGB node and set it's value to that of the socket
if(socketname == "Base Color" or socketname == "Subsurface Color"):
rgb = nodetree.nodes.new("ShaderNodeRGB")
rgb.outputs[0].default_value = val
rgb.label = "OmniBake"
nodetree.links.new(rgb.outputs[0], psocket)
#If this is anything else, use a value node
else:
vnode = nodetree.nodes.new("ShaderNodeValue")
vnode.outputs[0].default_value = val
vnode.label = "OmniBake"
nodetree.links.new(vnode.outputs[0], psocket)
def bakeoperation(thisbake, img):
printmsg(f"Beginning bake for {thisbake}")
if(thisbake != "normal"):
bpy.ops.object.bake(type="EMIT", save_mode="INTERNAL", use_clear=True)
else:
bpy.ops.object.bake(type="NORMAL", save_mode="INTERNAL", use_clear=True)
#Always pack the image for now
img.pack()
def startingChecks(objects, bakemode):
messages = []
if len(objects) == 0:
messages.append("ERROR: Nothing selected for bake")
#Are any of our objects hidden?
for obj in objects:
if (obj.hide_viewport == True) or (obj.hide_get(view_layer=bpy.context.view_layer) == True):
messages.append(f"ERROR: Object '{obj.name}' is hidden in viewport (eye icon in outliner) or in the current view lawyer (computer screen icon in outliner)")
#What about hidden from rendering?
for obj in objects:
if obj.hide_render:
messages.append(f"ERROR: Object '{obj.name}' is hidden for rendering (camera icon in outliner)")
#None of the objects can have zero faces
for obj in objects:
if len(obj.data.polygons) < 1:
messages.append(f"ERROR: Object '{obj.name}' has no faces")
if(bpy.context.mode != "OBJECT"):
messages.append("ERROR: Not in object mode")
#PBR Bake Checks
for obj in objects:
#Is it mesh?
if obj.type != "MESH":
messages.append(f"ERROR: Object {obj.name} is not mesh")
#Must continue here - other checks will throw exceptions
continue
#Are UVs OK?
if bpy.context.scene.newUVoption == False and len(obj.data.uv_layers) == 0:
messages.append(f"ERROR: Object {obj.name} has no UVs, and you aren't generating new ones")
continue
#Are materials OK? Fix if not
if not checkObjectValidMaterialConfig(obj):
fix_invalid_material_config(obj)
#Do all materials have valid PBR config?
if bpy.context.scene.more_shaders == False:
for slot in obj.material_slots:
mat = slot.material
result = checkMatsValidforPBR(mat)
if len(result) > 0:
for node_name in result:
messages.append(f"ERROR: Node '{node_name}' in material '{mat.name}' on object '{obj.name}' is not valid for PBR bake. In order to use more than just Princpled, Emission, and Mix Shaders, turn on 'Use additional Shader Types'!")
else:
for slot in obj.material_slots:
mat = slot.material
result = checkExtraMatsValidforPBR(mat)
if len(result) > 0:
for node_name in result:
messages.append(f"ERROR: Node '{node_name}' in material '{mat.name}' on object '{obj.name}' is not supported")
#Let's report back
if len(messages) != 0:
ShowMessageBox(messages, "Errors occured", "ERROR")
return False
else:
#If we get here then everything looks good
return True
#------------------------------------------
def processUVS():
current_bake_op = MasterOperation.current_bake_operation
#------------------NEW UVS ------------------------------------------------------------
if bpy.context.scene.newUVoption:
printmsg("We are generating new UVs")
printmsg("We are unwrapping each object individually with Smart UV Project")
objs = current_bake_op.bake_objects
for obj in objs:
if("OmniBake" in obj.data.uv_layers):
obj.data.uv_layers.remove(obj.data.uv_layers["OmniBake"])
obj.data.uv_layers.new(name="OmniBake")
obj.data.uv_layers["OmniBake"].active = True
#Will set active object
selectOnlyThis(obj)
#Blender 2.91 kindly breaks Smart UV Project in object mode so... yeah... thanks
bpy.ops.object.mode_set(mode="EDIT", toggle=False)
#Unhide any geo that's hidden in edit mode or it'll cause issues.
bpy.ops.mesh.reveal()
bpy.ops.mesh.select_all(action="SELECT")
bpy.ops.mesh.reveal()
bpy.ops.uv.smart_project(island_margin=bpy.context.scene.unwrapmargin)
bpy.ops.object.mode_set(mode="OBJECT", toggle=False)
#------------------END NEW UVS ------------------------------------------------------------
else: #i.e. New UV Option was not selected
printmsg("We are working with the existing UVs")
if bpy.context.scene.prefer_existing_sbmap:
printmsg("We are preferring existing UV maps called OmniBake. Setting them to active")
for obj in current_bake_op.bake_objects:
if("OmniBake" in obj.data.uv_layers):
obj.data.uv_layers["OmniBake"].active = True
#Before we finish, restore the original selected and active objects
bpy.ops.object.select_all(action="DESELECT")
for obj in current_bake_op.orig_objects:
obj.select_set(True)
bpy.context.view_layer.objects.active = current_bake_op.orig_active_object
#Done
return True
def restore_Original_UVs():
current_bake_op = MasterOperation.current_bake_operation
#First the bake objects
for obj in current_bake_op.bake_objects:
if MasterOperation.orig_UVs_dict[obj. name] != None:
original_uv = MasterOperation.orig_UVs_dict[obj.name]
obj.data.uv_layers.active = obj.data.uv_layers[original_uv]
def setupEmissionRunThrough(nodetree, m_output_node, thisbake, ismix=False):
nodes = nodetree.nodes
pnode = find_pnode(nodetree)
#Create emission shader
emissnode = nodes.new("ShaderNodeEmission")
emissnode.label = "OmniBake"
#Connect to output
if(ismix):
#Find the existing mix node before we create a new one
existing_m_node = find_mnode(nodetree)
#Add a mix shader node and label it
mnode = nodes.new("ShaderNodeMixShader")
mnode.label = "OmniBake"
#Connect new mix node to the output
fromsocket = mnode.outputs[0]
tosocket = m_output_node.inputs[0]
nodetree.links.new(fromsocket, tosocket)
#Connect new emission node to the first mix slot (leaving second empty)
fromsocket = emissnode.outputs[0]
tosocket = mnode.inputs[1]
nodetree.links.new(fromsocket, tosocket)
#If there is one, plug the factor from the original mix node into our new mix node
if(len(existing_m_node.inputs[0].links) > 0):
fromsocket = existing_m_node.inputs[0].links[0].from_socket
tosocket = mnode.inputs[0]
nodetree.links.new(fromsocket, tosocket)
#If no input, add a value node set to same as the mnode factor
else:
val = existing_m_node.inputs[0].default_value
vnode = nodes.new("ShaderNodeValue")
vnode.label = "OmniBake"
vnode.outputs[0].default_value = val
fromsocket = vnode.outputs[0]
tosocket = mnode.inputs[0]
nodetree.links.new(fromsocket, tosocket)
else:
#Just connect our new emission to the output
fromsocket = emissnode.outputs[0]
tosocket = m_output_node.inputs[0]
nodetree.links.new(fromsocket, tosocket)
#Create dummy nodes for the socket for this bake if needed
createdummynodes(nodetree, pnode, thisbake)
#Connect whatever is in Principled Shader for this bakemode to the emission
fromsocket = findSocketConnectedtoP(pnode, thisbake)
tosocket = emissnode.inputs[0]
nodetree.links.new(fromsocket, tosocket)
#---------------------Node Finders---------------------------
def find_pnode(nodetree):
nodes = nodetree.nodes
for node in nodes:
if(node.type == "BSDF_PRINCIPLED"):
return node
#We never found it
return False
def find_enode(nodetree):
nodes = nodetree.nodes
for node in nodes:
if(node.type == "EMISSION"):
return node
#We never found it
return False
def find_mnode(nodetree):
nodes = nodetree.nodes
for node in nodes:
if(node.type == "MIX_SHADER"):
return node
#We never found it
return False
def find_onode(nodetree):
nodes = nodetree.nodes
for node in nodes:
if(node.type == "OUTPUT_MATERIAL"):
return node
#We never found it
return False
def checkObjectValidMaterialConfig(obj):
#Firstly, check it actually has material slots
if len(obj.material_slots) == 0:
return False
#Check the material slots all have a material assigned
for slot in obj.material_slots:
if slot.material == None:
return False
#All materials must be using nodes
for slot in obj.material_slots:
if slot.material.use_nodes == False:
return False
#If we get here, everything looks good
return True
def getMatType(nodetree):
if (find_pnode(nodetree) and find_mnode(nodetree)):
return "MIX"
elif(find_pnode(nodetree)):
return "PURE_P"
elif(find_enode(nodetree)):
return "PURE_E"
else:
return "INVALID"
def prepObjects(objs, baketype):
current_bake_op = MasterOperation.current_bake_operation
printmsg("Creating prepared object")
#First we prepare objectes
export_objects = []
for obj in objs:
#-------------Create the prepared mesh----------------------------------------
#Object might have a truncated name. Should use this if it's there
objname = trunc_if_needed(obj.name)
new_obj = obj.copy()
new_obj.data = obj.data.copy()
new_obj["SB_createdfrom"] = obj.name
#clear all materials
new_obj.data.materials.clear()
new_obj.name = objname + "_OmniBake"
#Create a collection for our baked objects if it doesn't exist
if "OmniBake_Bakes" not in bpy.data.collections:
c = bpy.data.collections.new("OmniBake_Bakes")
bpy.context.scene.collection.children.link(c)
#Make sure it's visible and enabled for current view laywer or it screws things up
bpy.context.view_layer.layer_collection.children["OmniBake_Bakes"].exclude = False
bpy.context.view_layer.layer_collection.children["OmniBake_Bakes"].hide_viewport = False
c = bpy.data.collections["OmniBake_Bakes"]
#Link object to our new collection
c.objects.link(new_obj)
#Append this object to the export list
export_objects.append(new_obj)
#---------------------------------UVS--------------------------------------
uvlayers = new_obj.data.uv_layers
#If we generated new UVs, it will be called "OmniBake" and we are using that. End of.
#Same if we are being called for Sketchfab upload, and last bake used new UVs
if bpy.context.scene.newUVoption:
pass
#If there is an existing map called OmniBake, and we are preferring it, use that
elif ("OmniBake" in uvlayers) and bpy.context.scene.prefer_existing_sbmap:
pass
#Even if we are not preferring it, if there is just one map called OmniBake, we are using that
elif ("OmniBake" in uvlayers) and len(uvlayers) <2:
pass
#If there is an existing map called OmniBake, and we are not preferring it, it has to go
#Active map becommes OmniBake
elif ("OmniBake" in uvlayers) and not bpy.context.scene.prefer_existing_sbmap:
uvlayers.remove(uvlayers["OmniBake"])
active_layer = uvlayers.active
active_layer.name = "OmniBake"
#Finally, if none of the above apply, we are just using the active map
#Active map becommes OmniBake
else:
active_layer = uvlayers.active
active_layer.name = "OmniBake"
#In all cases, we can now delete everything other than OmniBake
deletelist = []
for uvlayer in uvlayers:
if (uvlayer.name != "OmniBake"):
deletelist.append(uvlayer.name)
for uvname in deletelist:
uvlayers.remove(uvlayers[uvname])
#---------------------------------END UVS--------------------------------------
#Create a new material
#call it same as object + batchname + baked
mat = bpy.data.materials.get(objname + "_" + bpy.context.scene.batchName + "_baked")
if mat is None:
mat = bpy.data.materials.new(name=objname + "_" + bpy.context.scene.batchName +"_baked")
# Assign it to object
mat.use_nodes = True
new_obj.data.materials.append(mat)
#Set up the materials for each object
for obj in export_objects:
#Should only have one material
mat = obj.material_slots[0].material
nodetree = mat.node_tree
material_setup.create_principled_setup(nodetree, obj)
#Change object name to avoid collisions
obj.name = obj.name.replace("_OmniBake", "_Baked")
bpy.ops.object.select_all(action="DESELECT")
for obj in export_objects:
obj.select_set(state=True)
if (not bpy.context.scene.prepmesh) and (not "--background" in sys.argv):
#Deleted duplicated objects
for obj in export_objects:
bpy.data.objects.remove(obj)
#Add the created objects to the bake operation list to keep track of them
else:
for obj in export_objects:
MasterOperation.prepared_mesh_objects.append(obj)
def selectOnlyThis(obj):
bpy.ops.object.select_all(action="DESELECT")
obj.select_set(state=True)
bpy.context.view_layer.objects.active = obj
def setup_pure_p_material(nodetree, thisbake):
#Create dummy nodes as needed
createdummynodes(nodetree, thisbake)
#Create emission shader
nodes = nodetree.nodes
m_output_node = find_onode(nodetree)
loc = m_output_node.location
#Create an emission shader
emissnode = nodes.new("ShaderNodeEmission")
emissnode.label = "OmniBake"
emissnode.location = loc
emissnode.location.y = emissnode.location.y + 200
#Connect our new emission to the output
fromsocket = emissnode.outputs[0]
tosocket = m_output_node.inputs[0]
nodetree.links.new(fromsocket, tosocket)
#Connect whatever is in Principled Shader for this bakemode to the emission
fromsocket = findSocketConnectedtoP(find_pnode(nodetree), thisbake)
tosocket = emissnode.inputs[0]
nodetree.links.new(fromsocket, tosocket)
def setup_pure_e_material(nodetree, thisbake):
#If baking something other than emission, mute the emission modes so they don't contaiminate our bake
if thisbake != "Emission":
nodes = nodetree.nodes
for node in nodes:
if node.type == "EMISSION":
node.mute = True
node.label = "OmniBakeMuted"
def setup_mix_material(nodetree, thisbake):
#No need to mute emission nodes. They are automuted by setting the RGBMix to black
nodes = nodetree.nodes
#Create dummy nodes as needed
createdummynodes(nodetree, thisbake)
#For every mix shader, create a mixrgb above it
#Also connect the factor input to the same thing
created_mix_nodes = {}
for node in nodes:
if node.type == "MIX_SHADER":
loc = node.location
rgbmix = nodetree.nodes.new("ShaderNodeMixRGB")
rgbmix.label = "OmniBake"
rgbmix.location = loc
rgbmix.location.y = rgbmix.location.y + 200
#If there is one, plug the factor from the original mix node into our new mix node
if(len(node.inputs[0].links) > 0):
fromsocket = node.inputs[0].links[0].from_socket
tosocket = rgbmix.inputs["Fac"]
nodetree.links.new(fromsocket, tosocket)
#If no input, add a value node set to same as the mnode factor
else:
val = node.inputs[0].default_value
vnode = nodes.new("ShaderNodeValue")
vnode.label = "OmniBake"
vnode.outputs[0].default_value = val
fromsocket = vnode.outputs[0]
tosocket = rgbmix.inputs[0]
nodetree.links.new(fromsocket, tosocket)
#Keep a dictionary with paired shader mix node
created_mix_nodes[node.name] = rgbmix.name
#Loop over the RGBMix nodes that we created
for node in created_mix_nodes:
mshader = nodes[node]
rgb = nodes[created_mix_nodes[node]]
#Mshader - Socket 1
#First, check if there is anything plugged in at all
if len(mshader.inputs[1].links) > 0:
fromnode = mshader.inputs[1].links[0].from_node
if fromnode.type == "BSDF_PRINCIPLED":
#Get the socket we are looking for, and plug it into RGB socket 1
fromsocket = findSocketConnectedtoP(fromnode, thisbake)
nodetree.links.new(fromsocket, rgb.inputs[1])
elif fromnode.type == "MIX_SHADER":
#If it's a mix shader on the other end, connect the equivilent RGB node
#Get the RGB node for that mshader
fromrgb = nodes[created_mix_nodes[fromnode.name]]
fromsocket = fromrgb.outputs[0]
nodetree.links.new(fromsocket, rgb.inputs[1])
elif fromnode.type == "EMISSION":
#Set this input to black
rgb.inputs[1].default_value = (0.0, 0.0, 0.0, 1)
else:
printmsg("Error, invalid node config")
else:
rgb.inputs[1].default_value = (0.0, 0.0, 0.0, 1)
#Mshader - Socket 2
if len(mshader.inputs[2].links) > 0:
fromnode = mshader.inputs[2].links[0].from_node
if fromnode.type == "BSDF_PRINCIPLED":
#Get the socket we are looking for, and plug it into RGB socket 2
fromsocket = findSocketConnectedtoP(fromnode, thisbake)
nodetree.links.new(fromsocket, rgb.inputs[2])
elif fromnode.type == "MIX_SHADER":
#If it's a mix shader on the other end, connect the equivilent RGB node
#Get the RGB node for that mshader
fromrgb = nodes[created_mix_nodes[fromnode.name]]
fromsocket = fromrgb.outputs[0]
nodetree.links.new(fromsocket, rgb.inputs[2])
elif fromnode.type == "EMISSION":
#Set this input to black
rgb.inputs[2].default_value = (0.0, 0.0, 0.0, 1)
else:
printmsg("Error, invalid node config")
else:
rgb.inputs[2].default_value = (0.0, 0.0, 0.0, 1)
#Find the output node with location
m_output_node = find_onode(nodetree)
loc = m_output_node.location
#Create an emission shader
emissnode = nodes.new("ShaderNodeEmission")
emissnode.label = "OmniBake"
emissnode.location = loc
emissnode.location.y = emissnode.location.y + 200
#Get the original mix node that was connected to the output node
socket = m_output_node.inputs["Surface"]
fromnode = socket.links[0].from_node
#Find our created mix node that is paired with it
rgbmix = nodes[created_mix_nodes[fromnode.name]]
#Plug rgbmix into emission
nodetree.links.new(rgbmix.outputs[0], emissnode.inputs[0])
#Plug emission into output
nodetree.links.new(emissnode.outputs[0], m_output_node.inputs[0])
#------------Long Name Truncation-----------------------
trunc_num = 0
trunc_dict = {}
def trunc_if_needed(objectname):
global trunc_num
global trunc_dict
#If we already truncated this, just return that
if objectname in trunc_dict:
printmsg(f"Object name {objectname} was previously truncated. Returning that.")
return trunc_dict[objectname]
#If not, let's see if we have to truncate it
elif len(objectname) >= 38:
printmsg(f"Object name {objectname} is too long and will be truncated")
trunc_num += 1
truncdobjectname = objectname[0:34] + "~" + str(trunc_num)
trunc_dict[objectname] = truncdobjectname
return truncdobjectname
#If nothing else, just return the original name
else:
return objectname
def untrunc_if_needed(objectname):
global trunc_num
global trunc_dict
for t in trunc_dict:
if trunc_dict[t] == objectname:
printmsg(f"Returning untruncated value {t}")
return t
return objectname
def ShowMessageBox(messageitems_list, title, icon = 'INFO'):
def draw(self, context):
for m in messageitems_list:
self.layout.label(text=m)
bpy.context.window_manager.popup_menu(draw, title = title, icon = icon)
#---------------Bake Progress--------------------------------------------
def write_bake_progress(current_operation, total_operations):
progress = int((current_operation / total_operations) * 100)
t = Path(tempfile.gettempdir())
t = t / f"OmniBake_Bgbake_{os.getpid()}"
with open(str(t), "w") as progfile:
progfile.write(str(progress))
#---------------End Bake Progress--------------------------------------------
past_items_dict = {}
def spot_new_items(initialise=True, item_type="images"):
global past_items_dict
if item_type == "images":
source = bpy.data.images
elif item_type == "objects":
source = bpy.data.objects
elif item_type == "collections":
source = bpy.data.collections
#First run
if initialise:
#Set to empty list for this item type
past_items_dict[item_type] = []
for source_item in source:
past_items_dict[item_type].append(source_item.name)
return True
else:
#Get the list of items for this item type from the dict
past_items_list = past_items_dict[item_type]
new_item_list_names = []
for source_item in source:
if source_item.name not in past_items_list:
new_item_list_names.append(source_item.name)
return new_item_list_names
#---------------Validation Checks-------------------------------------------
def checkMatsValidforPBR(mat):
nodes = mat.node_tree.nodes
valid = True
invalid_node_names = []
for node in nodes:
if len(node.outputs) > 0:
if node.outputs[0].type == "SHADER" and not (node.bl_idname == "ShaderNodeBsdfPrincipled" or node.bl_idname == "ShaderNodeMixShader" or node.bl_idname == "ShaderNodeEmission"):
#But is it actually connected to anything?
if len(node.outputs[0].links) >0:
invalid_node_names.append(node.name)
return invalid_node_names
def checkExtraMatsValidforPBR(mat):
nodes = mat.node_tree.nodes
valid = True
invalid_node_names = []
for node in nodes:
if len(node.outputs) > 0:
if node.outputs[0].type == "SHADER" and not (node.bl_idname == "ShaderNodeBsdfPrincipled" or
node.bl_idname == "ShaderNodeMixShader" or
node.bl_idname == "ShaderNodeAddShader" or
node.bl_idname == "ShaderNodeEmission" or
node.bl_idname == "ShaderNodeBsdfGlossy" or
node.bl_idname == "ShaderNodeBsdfGlass" or
node.bl_idname == "ShaderNodeBsdfRefraction" or
node.bl_idname == "ShaderNodeBsdfDiffuse" or
node.bl_idname == "ShaderNodeBsdfAnisotropic" or
node.bl_idname == "ShaderNodeBsdfTransparent"):
#But is it actually connected to anything?
if len(node.outputs[0].links) >0:
invalid_node_names.append(node.name)
print(invalid_node_names)
return invalid_node_names
def deselect_all_not_mesh():
import bpy
for obj in bpy.context.selected_objects:
if obj.type != "MESH":
obj.select_set(False)
#Do we still have an active object?
if bpy.context.active_object == None:
#Pick arbitary
bpy.context.view_layer.objects.active = bpy.context.selected_objects[0]
def fix_invalid_material_config(obj):
if "OmniBake_Placeholder" in bpy.data.materials:
mat = bpy.data.materials["OmniBake_Placeholder"]
else:
mat = bpy.data.materials.new("OmniBake_Placeholder")
bpy.data.materials["OmniBake_Placeholder"].use_nodes = True
# Assign it to object
if len(obj.material_slots) > 0:
#Assign it to every empty slot
for slot in obj.material_slots:
if slot.material == None:
slot.material = mat
else:
# no slots
obj.data.materials.append(mat)
#All materials must use nodes
for slot in obj.material_slots:
mat = slot.material
if mat.use_nodes == False:
mat.use_nodes = True
return True
def sacle_image_if_needed(img):
printmsg("Scaling images if needed")
context = bpy.context
width = img.size[0]
height = img.size[1]
proposed_width = 0
proposed_height = 0
if context.scene.texture_res == "0.5k": proposed_width, proposed_height = 512,512
if context.scene.texture_res == "1k": proposed_width, proposed_height = 1024,1024
if context.scene.texture_res == "2k": proposed_width, proposed_height = 1024*2,1024*2
if context.scene.texture_res == "4k": proposed_width, proposed_height = 1024*4,1024*4
if context.scene.texture_res == "8k": proposed_width, proposed_height = 1024*8,1024*8
if width != proposed_width or height != proposed_height:
img.scale(proposed_width, proposed_height)
def set_image_internal_col_space(image, thisbake):
if thisbake != "diffuse":
image.colorspace_settings.name = "Non-Color"
#------------------------Allow Additional Shaders----------------------------
def findProperInput(OName, pnode):
for input in pnode.inputs:
if OName == "Anisotropy":
OName = "Anisotropic"
if OName == "Rotation":
OName = "Anisotropic Rotation"
if OName == "Color":
OName = "Base Color"
if input.identifier == OName:
return input
def useAdditionalShaderTypes(nodetree, nodes):
count = 0
for node in nodes:
if (node.type == "BSDF_GLOSSY" or
node.type == "BSDF_GLASS" or
node.type == "BSDF_REFRACTION" or
node.type == "BSDF_DIFFUSE" or
node.type == "BSDF_ANISOTROPIC" or
node.type == "BSDF_TRANSPARENT" or
node.type == "ADD_SHADER"):
if node.type == "ADD_SHADER":
pnode = nodes.new("ShaderNodeMixShader")
pnode.label = "mixNew" + str(count)
else:
pnode = nodes.new("ShaderNodeBsdfPrincipled")
pnode.label = "BsdfNew" + str(count)
pnode.location = node.location
pnode.use_custom_color = True
pnode.color = (0.3375297784805298, 0.4575316309928894, 0.08615386486053467)
for input in node.inputs:
if len(input.links) != 0:
fromNode = input.links[0].from_node
for output in fromNode.outputs:
if len(output.links) != 0:
for linkOut in output.links:
if linkOut.to_node == node:
inSocket = findProperInput(input.identifier, pnode)
nodetree.links.new(output, inSocket)
else:
inSocket = findProperInput(input.identifier, pnode)
if inSocket.name != "Shader":
inSocket.default_value = input.default_value
if len(node.outputs[0].links) != 0:
for link in node.outputs[0].links:
toNode = link.to_node
for input in toNode.inputs:
if len(input.links) != 0:
if input.links[0].from_node == node:
nodetree.links.new(pnode.outputs[0], input)
if node.type == "BSDF_REFRACTION" or node.type == "BSDF_GLASS":
pnode.inputs[15].default_value = 1
if node.type == "BSDF_DIFFUSE":
pnode.inputs[5].default_value = 0
if node.type == "BSDF_ANISOTROPIC" or node.type == "BSDF_GLOSSY":
pnode.inputs[4].default_value = 1
pnode.inputs[5].default_value = 0
if node.type == "BSDF_TRANSPARENT":
pnode.inputs[7].default_value = 0
pnode.inputs[15].default_value = 1
pnode.inputs[14].default_value = 1
pnode.hide = True
pnode.select = False
nodetree.nodes.remove(node)
count += 1 | 38,803 | Python | 36.419479 | 252 | 0.592093 |
NVIDIA-Omniverse/blender_omniverse_addons/README.md | # blender_omniverse_addons
This repository contains the source code for NVIDIA Omniverse Add-ons for Blender, including:
* [Omni Panel](https://docs.omniverse.nvidia.com/con_connect/con_connect/blender/omni-panel.html) - Utility functions for particles and material conversions.
* [Audio2Face Panel](https://docs.omniverse.nvidia.com/con_connect/con_connect/blender/audio2face.html) - A tool that helps get characters into Audio2Face, as well as assisting in the import of shape keys and animation clips onto rigged Blender characters.
* [Scene Optimizer](https://docs.omniverse.nvidia.com/con_connect/con_connect/blender/scene-optimizer.html) - A tool for quickly optimizing meshes, correcting bad geometry, creating automated UVs, and generating collision/proxy geometry. The user can pick any of six optional tools to run, and run them all on either selected meshes or all meshes in a given scene.
* UMM - The Universal Material Mapper Add-on for Blender.
## Usage
For information on usage, including video tutorials, please see the [Blender Omniverse documentation](https://docs.omniverse.nvidia.com/con_connect/con_connect/blender.html).
| 1,150 | Markdown | 70.937496 | 364 | 0.797391 |
NVIDIA-Omniverse/blender_omniverse_addons/omni_audio2face/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
bl_info = {
"name": "Audio2Face Tools",
"author": "NVIDIA Corporation",
"version": (1, 0, 1),
"blender": (3, 4, 0),
"location": "View3D > Toolbar > Omniverse",
"description": "NVIDIA Omniverse tools for working with Audio2Face",
"warning": "",
"doc_url": "",
"category": "Omniverse",
}
## ======================================================================
import sys
from importlib import reload
import bpy
from bpy.props import (BoolProperty, CollectionProperty, EnumProperty, FloatProperty,
IntProperty, PointerProperty, StringProperty)
from omni_audio2face import (operators, ui)
for module in (operators, ui):
reload(module)
from omni_audio2face.ui import OBJECT_PT_Audio2FacePanel
from omni_audio2face.operators import (
OMNI_OT_PrepareScene,
OMNI_OT_MarkExportMesh,
OMNI_OT_ExportPreparedScene,
OMNI_OT_ChooseUSDFile,
OMNI_OT_ChooseAnimCache,
OMNI_OT_ImportRigFile,
OMNI_OT_TransferShapeData,
OMNI_OT_ImportAnimation,
)
## ======================================================================
class Audio2FaceToolsSettings(bpy.types.PropertyGroup):
## shapes stuff
use_face_selection: BoolProperty(description="Use Face Selection")
export_project: BoolProperty(description="Export Project File", default=True)
export_filepath: StringProperty(description="Export Path")
import_filepath: StringProperty(description="Shapes Import Path")
## anim import settings
import_anim_path: StringProperty(description="Anim Cache Path")
anim_start_type: EnumProperty(
items=[("CURRENT", "At Play Head", "Load Clip at the playhead"),
("CUSTOM", "Custom", "Choose a custom start frame")],
default="CURRENT")
anim_start_frame: IntProperty(default=0)
anim_frame_rate: FloatProperty(default=60.0, min=1.0)
anim_apply_scale: BoolProperty(default=True)
anim_set_range: BoolProperty(default=False)
anim_load_to: EnumProperty(
items=[("CURRENT", "Current Action", "Load curves onto current Action"),
("CLIP", "Clip", "Load curves as a new Action for NLE use")],
default="CURRENT")
anim_overwrite: BoolProperty(default=False, name="Overwrite Existing Clips")
## Store pointers to all the meshes for the full setup.
mesh_skin: PointerProperty(type=bpy.types.Object)
mesh_tongue: PointerProperty(type=bpy.types.Object)
mesh_eye_left: PointerProperty(type=bpy.types.Object)
mesh_eye_right: PointerProperty(type=bpy.types.Object)
mesh_gums_lower: PointerProperty(type=bpy.types.Object)
transfer_apply_fix: BoolProperty(name="Apply Fix",
description="Apply Basis to points not part of the head during transfer",
default=False)
## ======================================================================
classes = (
Audio2FaceToolsSettings,
OBJECT_PT_Audio2FacePanel,
OMNI_OT_PrepareScene,
OMNI_OT_MarkExportMesh,
OMNI_OT_ExportPreparedScene,
OMNI_OT_ChooseUSDFile,
OMNI_OT_ChooseAnimCache,
OMNI_OT_ImportRigFile,
OMNI_OT_TransferShapeData,
OMNI_OT_ImportAnimation,
)
def register():
unregister()
for item in classes:
bpy.utils.register_class(item)
bpy.types.Scene.audio2face = bpy.props.PointerProperty(type=Audio2FaceToolsSettings)
bpy.types.Object.a2f_original = bpy.props.PointerProperty(type=bpy.types.Object)
version = bl_info["version"]
version = str(version[0]) + str(version[1]) + str(version[2])
OBJECT_PT_Audio2FacePanel.version = f"{str(version[0])}.{str(version[1])}.{str(version[2])}"
## ======================================================================
def unregister():
# User preferences
for item in classes:
try:
bpy.utils.unregister_class(item)
except:
continue
if hasattr(bpy.types.Scene, "audio2face"):
del bpy.types.Scene.audio2face
if hasattr(bpy.types.Object, "a2f_original"):
del bpy.types.Object.a2f_original
| 3,862 | Python | 30.153226 | 93 | 0.682289 |
NVIDIA-Omniverse/blender_omniverse_addons/omni_audio2face/operators.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
import json
import os
import re
import sys
from typing import *
import numpy as np
import bpy
import bmesh
from bpy.props import (BoolProperty, EnumProperty, FloatProperty, IntProperty, StringProperty)
from bpy.types import (Collection, Context, Event, Mesh, Object, Scene)
from mathutils import *
## ======================================================================
def _get_filepath(scene:Scene, as_import:bool=False) -> str:
if as_import:
result = scene.audio2face.import_filepath.strip()
else:
result = scene.audio2face.export_filepath.strip()
return result
## ======================================================================
def _get_or_create_collection(collection:Collection, name:str) -> Collection:
"""Find a child collection of the specified collection, or create it if it does not exist."""
result = collection.children.get(name, None)
if not result:
result = bpy.data.collections.new(name)
collection.children.link(result)
## Make sure this is visible or things'll break in other ways down the line
if result.is_evaluated:
result = result.original
result.hide_render = result.hide_viewport = result.hide_select = False
result_lc = [x for x in bpy.context.view_layer.layer_collection.children if x.collection is result]
if len(result_lc):
result_lc = result_lc[0]
result_lc.exclude = False
result_lc.hide_viewport = False
else:
print(f"-- Warning: No layer collection found for {result.name}")
return result
## ======================================================================
def ensure_scene_collections(scene:Scene) -> Tuple[bpy.types.Collection]:
"""Make sure that all Audio2Face scene collections exist."""
a2f_collection = _get_or_create_collection(scene.collection, "Audio2Face")
a2f_export = _get_or_create_collection(a2f_collection, "A2F Export")
a2f_export_static = _get_or_create_collection(a2f_export, "A2F Export Static")
a2f_export_dynamic = _get_or_create_collection(a2f_export, "A2F Export Dynamic")
return a2f_collection, a2f_export, a2f_export_static, a2f_export_dynamic
## ======================================================================
def _get_base_collection() -> Collection:
return bpy.data.collections.get("Audio2Face", None)
def _get_import_collection() -> Collection:
return bpy.data.collections.get("A2F Import", None)
def _get_export_collection() -> Collection:
return bpy.data.collections.get("A2F Export", None)
## ======================================================================
class OMNI_OT_PrepareScene(bpy.types.Operator):
"""Prepares the active scene for interaction with Audio2Face"""
bl_idname = "audio2face.prepare_scene"
bl_label = "Prepare Scene for Audio2Face"
bl_options = {"REGISTER", "UNDO"}
@classmethod
def poll(cls, context:Context) -> bool:
return bool(context.scene)
def execute(self, context:Context) -> Set[str]:
scene = context.scene
ensure_scene_collections(scene)
self.report({"INFO"}, "A2F: Scene is prepped.")
return {'FINISHED'}
## ======================================================================
def selected_mesh_objects(context:Context) -> List[Object]:
"""Return a filtered list of Mesh objects from the context."""
a2f_collection = bpy.data.collections.get("Audio2Face", None)
export_objects = {x.name for x in a2f_collection.all_objects} if a2f_collection else {}
result = [x for x in context.selected_objects if x.data and isinstance(x.data, bpy.types.Mesh)]
result = list(filter(lambda x: not x.name in export_objects and x.data and isinstance(x.data, bpy.types.Mesh), result))
return result
## ======================================================================
def export_mesh_poll(context:Context) -> bool:
"""
Check for a mesh object selection if use_face_selection is false,
or an edit mode face selection otherwise.
"""
valid_mesh = len(selected_mesh_objects(context))
is_poly_edit_mode = context.tool_settings.mesh_select_mode[2]
if context.scene.audio2face.use_face_selection:
if (context.mode == "EDIT_MESH"
and is_poly_edit_mode
and valid_mesh
and len(context.active_object.data.count_selected_items())
and context.active_object.data.count_selected_items()[2]):
return True
else:
if context.mode == "OBJECT" and valid_mesh:
return True
return False
## ======================================================================
def make_valid_name(name:str) -> str:
result = name.replace("-","_").replace(" ","_").replace(".","_")
return result
## ======================================================================
def process_export_mesh(orig:Object, target_collection:Collection, is_dynamic:bool, split:bool):
"""
Processes the selected mesh for export, adding original vertex
indices and copying it over into the target collection.
"""
assert isinstance(orig.data, bpy.types.Mesh)
obj_dupe_name = make_valid_name(orig.name) + "__Audio2Face_EX"
if obj_dupe_name in bpy.data.objects:
bpy.data.objects.remove(bpy.data.objects[obj_dupe_name])
mesh_dupe = orig.data.copy()
mesh_dupe.name = make_valid_name(orig.data.name) + "__Audio2Face_EX"
obj_dupe = bpy.data.objects.new(obj_dupe_name, mesh_dupe)
target_collection.objects.link(obj_dupe)
obj_dupe.a2f_original = orig
bpy.ops.object.mode_set(mode="OBJECT")
orig.select_set(False)
obj_dupe.select_set(True)
## Clean out all extraneous data.
for item in obj_dupe.modifiers, obj_dupe.vertex_groups:
item.clear()
obj_dupe.shape_key_clear()
## Add a custom data layer to remember the original point indices.
attr = obj_dupe.data.attributes.get("index_orig",
obj_dupe.data.attributes.new("index_orig", "INT", "POINT"))
vertex_count = len(obj_dupe.data.vertices)
attr.data.foreach_set("value", np.arange(vertex_count))
bpy.ops.object.mode_set(mode="OBJECT")
if split:
## Delete all unselected faces.
deps = bpy.context.evaluated_depsgraph_get()
indices = [x.index for x in orig.data.polygons if not x.select]
bm = bmesh.new()
bm.from_object(obj_dupe, deps)
bm.faces.ensure_lookup_table()
## Must convert to list; delete does not accept map objects
selected = list(map(lambda x: bm.faces[x], indices))
bpy.ops.object.mode_set(mode="EDIT")
bmesh.ops.delete(bm, geom=selected, context="FACES")
bpy.ops.object.mode_set(mode="OBJECT")
bm.to_mesh(obj_dupe.data)
## Make sure to snap the object into place.
obj_dupe.matrix_world = orig.matrix_world.copy()
return obj_dupe
## =====================================================a=================
class OMNI_OT_MarkExportMesh(bpy.types.Operator):
"""Tags the selected mesh as static for Audio2Face."""
bl_idname = "audio2face.mark_export_mesh"
bl_label = "Mark Mesh for Export"
bl_options = {"REGISTER", "UNDO"}
is_dynamic: BoolProperty(description="Mesh is Dynamic", default=False)
@classmethod
def poll(cls, context:Context) -> bool:
return export_mesh_poll(context)
def execute(self, context:Context) -> Set[str]:
a2f_collection, a2f_export, a2f_export_static, a2f_export_dynamic = ensure_scene_collections(context.scene)
target_collection = a2f_export_dynamic if self.is_dynamic else a2f_export_static
split = context.scene.audio2face.use_face_selection
processed_meshes = []
for mesh in selected_mesh_objects(context):
context.view_layer.objects.active = mesh
result = process_export_mesh(mesh, target_collection, self.is_dynamic, split)
processed_meshes.append(result)
context.view_layer.objects.active = processed_meshes[-1]
return {'FINISHED'}
## ======================================================================
class OMNI_OT_ChooseUSDFile(bpy.types.Operator):
"""File chooser with proper extensions."""
bl_idname = "collections.usd_choose_file"
bl_label = "Choose USD File"
bl_options = {"REGISTER"}
## Required for specifying extensions.
filepath: StringProperty(subtype="FILE_PATH")
operation: EnumProperty(items=[("IMPORT", "Import", ""),("EXPORT", "Export", "")], default="IMPORT", options={"HIDDEN"})
filter_glob: StringProperty(default="*.usd;*.usda;*.usdc", options={"HIDDEN"})
check_existing: BoolProperty(default=True, options={"HIDDEN"})
def execute(self, context:Context):
real_path = os.path.abspath(bpy.path.abspath(self.filepath))
real_path = real_path.replace("\\", "/")
if self.operation == "EXPORT":
context.scene.audio2face.export_filepath = real_path
else:
context.scene.audio2face.import_filepath = real_path
return {"FINISHED"}
def invoke(self, context:Context, event:Event) -> Set[str]:
if len(self.filepath.strip()) == 0:
self.filepath = "untitled.usdc"
context.window_manager.fileselect_add(self)
return {"RUNNING_MODAL"}
## ======================================================================
class OMNI_OT_ChooseAnimCache(bpy.types.Operator):
"""File chooser with proper extensions."""
bl_idname = "collections.usd_choose_anim_cache"
bl_label = "Choose Animation Cache"
bl_options = {"REGISTER"}
## Required for specifying extensions.
filepath: StringProperty(subtype="FILE_PATH")
filter_glob: StringProperty(default="*.usd;*.usda;*.usdc;*.json", options={"HIDDEN"})
check_existing: BoolProperty(default=True, options={"HIDDEN"})
def execute(self, context:Context):
real_path = os.path.abspath(bpy.path.abspath(self.filepath))
real_path = real_path.replace("\\", "/")
context.scene.audio2face.import_anim_path = real_path
return {"FINISHED"}
def invoke(self, context:Context, event:Event) -> Set[str]:
context.window_manager.fileselect_add(self)
return {"RUNNING_MODAL"}
## ======================================================================
class OMNI_OT_ExportPreparedScene(bpy.types.Operator):
"""Exports prepared scene as USD for Audio2Face."""
bl_idname = "audio2face.export_prepared_scene"
bl_label = "Export Prepared Scene"
bl_options = {"REGISTER"}
@classmethod
def poll(cls, context:Context) -> bool:
a2f_export = _get_export_collection()
child_count = len(a2f_export.all_objects) if a2f_export else 0
path = _get_filepath(context.scene)
return a2f_export and child_count and len(path)
def execute(self, context:Context) -> Set[str]:
## Grab filepath before the scene switches
scene = context.scene
filepath = _get_filepath(scene)
export_scene = bpy.data.scenes.get("a2f_export",
bpy.data.scenes.new("a2f_export"))
for child_collection in list(export_scene.collection.children):
export_scene.collection.children.remove(child_collection)
export_collection = _get_export_collection()
export_scene.collection.children.link(export_collection)
context.window.scene = export_scene
args = {
"filepath": filepath,
"start": scene.frame_current,
"end": scene.frame_current,
"convert_to_cm": False,
"export_lights": False,
"export_cameras": False,
"export_materials": False,
"export_textures": False,
"default_prim_path": "/World",
"root_prim_path": "/World",
}
result = bpy.ops.wm.usd_export(**args)
context.window.scene = scene
bpy.data.scenes.remove(export_scene)
export_scene = None
## generate the project file
if scene.audio2face.export_project:
project_filename = os.path.basename(filepath)
skin = scene.audio2face.mesh_skin
tongue = scene.audio2face.mesh_tongue
eye_left = scene.audio2face.mesh_eye_left
eye_right= scene.audio2face.mesh_eye_right
gums = scene.audio2face.mesh_gums_lower
a2f_export_static = bpy.data.collections.get("A2F Export Static", None)
static_objects = list(a2f_export_static.objects) if a2f_export_static else []
a2f_export_dynamic = bpy.data.collections.get("A2F Export Dynamic", None)
dynamic_objects = list(a2f_export_dynamic.objects) if a2f_export_dynamic else []
for mesh in skin, tongue:
if mesh in dynamic_objects:
dynamic_objects.pop(dynamic_objects.index(mesh))
for mesh in eye_left, eye_right, gums:
if mesh in static_objects:
static_objects.pop(static_objects.index(mesh))
transfer_data = ""
if skin:
transfer_data += '\t\tstring mm:skin = "/World/character_root/{}/{}"\n'.format(make_valid_name(skin.name),
make_valid_name(skin.data.name))
if tongue:
transfer_data += '\t\tstring mm:tongue = "/World/character_root/{}/{}"\n'.format(make_valid_name(tongue.name),
make_valid_name(tongue.data.name))
if eye_left:
transfer_data += '\t\tstring[] mm:l_eye = ["/World/character_root/{}/{}"]\n'.format(make_valid_name(eye_left.name),
make_valid_name(eye_left.data.name))
if eye_right:
transfer_data += '\t\tstring[] mm:r_eye = ["/World/character_root/{}/{}"]\n'.format(make_valid_name(eye_right.name),
make_valid_name(eye_right.data.name))
if gums:
transfer_data += '\t\tstring[] mm:gums = ["/World/character_root/{}/{}"]\n'.format(make_valid_name(gums.name),
make_valid_name(gums.data.name))
if len(static_objects):
transfer_data += '\t\tstring[] mm:extra_static = [{}]\n'.format(
', '.join(['"/World/character_root/{}/{}"'.format(make_valid_name(x.name), make_valid_name(x.data.name))
for x in static_objects])
)
if len(dynamic_objects):
transfer_data += '\t\tstring[] mm:extra_dynamic = [{}]\n'.format(
', '.join(['"/World/character_root/{}/{}"'.format(make_valid_name(x.name), make_valid_name(x.data.name))
for x in dynamic_objects])
)
template = ""
template_path = os.sep.join([os.path.dirname(os.path.abspath(__file__)), "templates", "project_template.usda"])
with open(template_path, "r") as fp:
template = fp.read()
template = template.replace("%filepath%", project_filename)
template = template.replace("%transfer_data%", transfer_data)
project_usd_filepath = filepath.rpartition(".")[0] + "_project.usda"
with open(project_usd_filepath, "w") as fp:
fp.write(template)
self.report({"INFO"}, f"Exported project to: '{project_usd_filepath}'")
else:
self.report({"INFO"}, f"Exported head to: '{filepath}'")
return result
## ======================================================================
def _abs_path(file_path:str) -> str:
if not len(file_path) > 2:
return file_path
if file_path[0] == '/' and file_path[1] == '/':
file_path = bpy.path.abspath(file_path)
return os.path.abspath(file_path)
## ======================================================================
class OMNI_OT_ImportRigFile(bpy.types.Operator):
"""Imports a rigged USD file from Audio2Face"""
bl_idname = "audio2face.import_rig"
bl_label = "Import Rig File"
bl_options = {"REGISTER", "UNDO"}
@classmethod
def poll(cls, context:Context) -> bool:
return len(_get_filepath(context.scene, as_import=True))
def execute(self, context:Context) -> Set[str]:
filepath = _get_filepath(context.scene, as_import=True)
args = {
"filepath": filepath,
"import_skeletons": False,
"import_materials": False,
}
scene = context.scene
## Switching the active collection requires this odd code.
base = _get_or_create_collection(scene.collection, "Audio2Face")
import_col = _get_or_create_collection(base, "A2F Import")
base_lc = [x for x in context.view_layer.layer_collection.children if x.collection is base][0]
import_lc = [x for x in base_lc.children if x.collection is import_col][0]
context.view_layer.active_layer_collection = import_lc
if not context.mode == 'OBJECT':
try:
bpy.ops.object.mode_set(mode="OBJECT")
except RuntimeError:
pass
if len(import_col.all_objects):
bpy.ops.object.select_all(action="DESELECT")
## Let's clean out the import collection on each go to keep things simple
bpy.ops.object.select_same_collection(collection=import_col.name)
bpy.ops.object.delete()
## Make sure the import collection is selected so the imported objects
## get assigned to it.
# scene.view_layers[0].active_layer_collection.collection = import_col
bpy.ops.object.select_all(action='DESELECT')
override = context.copy()
override["collection"] = bpy.data.collections["A2F Import"]
result = bpy.ops.wm.usd_import(**args)
roots = [x for x in import_col.objects if not x.parent]
for root in roots:
## bugfix: don't reset rotation, since there may have been a rotation
## carried over from the blender scene and we want to line up visibly
## even though it has no bearing on the shape transfer.
root.scale = [1.0, 1.0, 1.0]
## Strip out any childless empties, like joint1.
empties = [x for x in import_col.objects if not len(x.children) and x.type == "EMPTY"]
for empty in empties:
bpy.data.objects.remove(empty)
self.report({"INFO"}, f"Imported Rig from: {filepath}")
return {"FINISHED"}
## ======================================================================
class AnimData:
"""Small data holder unifying what's coming in from JSON and USD(A)"""
def __init__(self, clip_name:str, shapes:List[str], key_data:List[List[float]], start_frame:int=0, frame_rate:float=60.0):
self.clip_name = clip_name
self.shapes = shapes
self.num_frames = len(key_data)
self.key_data = self._swizzle_data(key_data)
self.start_frame = start_frame
self.frame_rate = frame_rate
def curves(self):
for index, name in enumerate(self.shapes):
yield f'key_blocks["{name}"].value', self.key_data[index]
def _swizzle_data(self, data:List[List[float]]) -> List[List[float]]:
"""Massage the data a bit for writing directly to the curves"""
result = []
for index, _ in enumerate(self.shapes):
result.append( [data[frame][index] for frame in range(self.num_frames)] )
return result
class OMNI_OT_ImportAnimation(bpy.types.Operator):
"""Imports a shape key animation from an Audio2Face USDA file or JSON"""
bl_idname = "audio2face.import_animation"
bl_label = "Import Animation"
bl_options = {"REGISTER", "UNDO"}
start_type: EnumProperty(
name="Start Type",
items=[("CURRENT", "Current Action", "Load Clip at the playhead"),
("CUSTOM", "Custom", "Choose a custom start frame")],
default="CURRENT")
start_frame: IntProperty(default=1, name="Start Frame", description="Align start of animation to this frame")
frame_rate: FloatProperty(default=60.0, min=1.0, name="Frame Rate", description="Frame Rate of file you're importing")
set_range: BoolProperty(default=False, name="Set Range", description="If checked, set the scene animation frame range to the imported file's range")
apply_scale: BoolProperty(default=False, name="Apply Clip Scale",
description="If checked and the clip framerate differs from the scene, scale the keys to match")
load_to: EnumProperty(
name="Load To",
description="Load animation to current Action, or to a new Action Clip",
items=[("CURRENT", "Current Action", "Load curves onto current Action"),
("CLIP", "Clip", "Load curves as a new Action Clip (for NLE use)")],
default="CURRENT")
overwrite: BoolProperty(default=False, name="Overwrite Existing Clips")
@classmethod
def poll(cls, context:Context) -> bool:
have_file = len(context.scene.audio2face.import_anim_path)
have_mesh = context.active_object and context.active_object.type == "MESH"
have_selection = context.active_object in context.selected_objects
is_object_mode = context.mode == "OBJECT"
return all([have_file, have_mesh, have_selection, is_object_mode])
def apply_animation(self, animation:AnimData, ob:Object):
shapes = ob.data.shape_keys
action = None
start_frame = bpy.context.scene.frame_current if self.start_type == "CURRENT" else self.start_frame
if shapes.animation_data is None:
shapes.animation_data_create()
nla_tracks = shapes.animation_data.nla_tracks
if self.load_to == "CLIP":
def _predicate(track):
for strip in track.strips:
if strip.action and strip.action.name == animation.clip_name:
return True
return False
if len(nla_tracks):
existing_tracks = list(filter(_predicate, nla_tracks))
if len(existing_tracks) and not self.overwrite:
self.report({"ERROR"}, f"Clip named {animation.clip_name} already exists; aborting.")
return False
else:
## remove the track(s) specified for overwrites
for track in existing_tracks:
self.report({"INFO"}, f"Removing old track {track.name}")
nla_tracks.remove(track)
if not animation.clip_name in bpy.data.actions:
bpy.data.actions.new(animation.clip_name)
action = bpy.data.actions[animation.clip_name]
offset = 0
else:
if not shapes.animation_data.action:
bpy.data.actions.new(animation.clip_name)
action = shapes.animation_data.action = bpy.data.actions[animation.clip_name]
else:
action = shapes.animation_data.action
offset = start_frame
## clean out old curves
to_clean = []
for curve in action.fcurves:
for name in animation.shapes:
if f'["{name}"]' in curve.data_path:
to_clean.append(curve)
for curve in to_clean:
action.fcurves.remove(curve)
scene_framerate = bpy.context.scene.render.fps
clip_scale = 1.0
clip_to_scene_scale = scene_framerate / animation.frame_rate
if self.apply_scale and self.load_to == "CURRENT" and not (int(animation.frame_rate) == int(scene_framerate)):
clip_scale = clip_to_scene_scale
for data_path, values in animation.curves():
curve = action.fcurves.new(data_path)
curve.keyframe_points.add(len(values))
for index, value in enumerate(values):
curve.keyframe_points[index].co = (float(index) * clip_scale + offset, value)
if self.load_to == "CLIP":
## I'm really not sure if this is the correct idea, but when loading as clip
## we push a new NLA_Track and add the action as a strip, then offset it using
## the strip frame start.
track = nla_tracks.new()
track.name = animation.clip_name + "_NLE"
strip = track.strips.new(animation.clip_name, start_frame, action)
if self.apply_scale:
strip.scale = clip_to_scene_scale
for item in [x for x in nla_tracks if not x == track]:
item.select = False
track.select = True
def load_animation_usda(self, clip_name:str, file_path:str) -> AnimData:
"""
Do a quick parse of the input USDA file in plain text, as we can't use the USD Python API yet.
!TODO: When the USD Python API is available, switch to it instead.
"""
with open(file_path, "r") as fp:
source = fp.read().strip()
## quick sanity checks; not robust!
if not all([
source.startswith("#usda"),
"framesPerSecond = " in source,
"uniform token[] blendShapes = [" in source,
"float[] blendShapeWeights.timeSamples = {" in source,
"token[] custom:mh_curveNames = [" in source,
"float[] custom:mh_curveValues.timeSamples = {" in source]):
self.report({"ERROR"}, f"USDA not a weights animation cache: {file_path}")
return None
end_time = int(source.partition("endTimeCode = ")[-1].partition("\n")[0])
frame_rate = int(source.partition("framesPerSecond = ")[-1].partition("\n")[0])
start_frame = int(source.partition("startTimeCode = ")[-1].partition("\n")[0])
shape_names = source.partition("uniform token[] blendShapes = [")[-1].partition("]")[0]
shape_names = shape_names.replace('"','').replace(' ', '').split(',')
## strip to timeSamples, split lines, then split off the index and parse out the arrays into floats
samples = source.partition("float[] blendShapeWeights.timeSamples = {")[-1].partition("}")[0].strip().split('\n')
weights = [list(map(float, x.partition(": [")[-1].rpartition("]")[0].replace(" ", "").split(","))) for x in samples]
## capture frame rate
frame_rate = float(source.partition("framesPerSecond = ")[-1].partition("\n")[0])
return AnimData(clip_name=clip_name, shapes=shape_names, key_data=weights, frame_rate=frame_rate)
def load_animation_json(self, clip_name:str, file_path:str) -> AnimData:
assert file_path.lower().endswith(".json")
file_path = _abs_path(file_path)
data = None
with open(file_path, "r") as fp:
try:
data = json.load(fp)
except:
return None
if not "facsNames" in data or not "weightMat" in data or not "numFrames" in data:
self.report({"ERROR"}, f"Malformed JSON file (missing data): {file_path}")
return None
if not data["numFrames"] == len(data["weightMat"]):
self.report({"ERROR"}, f"Malformed JSON: malformed file. Expected {data['numFrames']} frames, found {len(data['weightMat'])} -- {file_path}")
return None
return AnimData(clip_name=clip_name, shapes=data["facsNames"], key_data=data["weightMat"],
frame_rate=self.frame_rate)
def load_animation(self, file_path:str, ob:Object) -> bool:
assert ob and isinstance(ob, (bpy.types.Object))
if not file_path.endswith((".usda", ".json")):
self.report({"Error"}, f"Path should point to a USDA or JSON file: {file_path}")
return False
clip_name = os.path.basename(file_path).partition(".")[0]
self.report({"INFO"}, f"Loading anim: {file_path}")
if file_path.endswith(".json"):
data = self.load_animation_json(clip_name, file_path)
else:
data = self.load_animation_usda(clip_name, file_path)
if data is None:
self.report({"ERROR"}, f"Unable to load data from file {file_path}")
return False
self.apply_animation(data, ob)
return True
def execute(self, context:Context) -> Set[str]:
scene = context.scene
ob = context.active_object
if not self.load_animation(scene.audio2face.import_anim_path, ob):
return {"CANCELLED"}
return {"FINISHED"}
## ======================================================================
class OMNI_OT_TransferShapeData(bpy.types.Operator):
"""Transfers shape data from imported rig heads to the original meshes."""
bl_idname = "audio2face.transfer_shape_data"
bl_label = "Transfer Shape Data"
bl_options = {"REGISTER", "UNDO"}
apply_fix: BoolProperty(name="Apply Fix",
description="Propate Basis shape to all parts of the mesh not covered by the head, to prevent vertex vomit.",
default=False)
@classmethod
def poll(cls, context:Context) -> bool:
collection = _get_import_collection()
if collection is None:
return False
meshes = [x.name for x in collection.objects if x.type == "MESH"]
return bool(len(meshes))
def _get_collection_meshes(self, collection:Collection) -> List["bpy.data.Mesh"]:
result = [x for x in collection.all_objects if x.type == "MESH"]
return result
def _build_mapping_table(self, import_meshes:Collection, export_meshes:Collection) -> Dict:
result = {}
for imported in import_meshes:
## Intentionally doing the exported data name but the import object name
## because of how the imports work on both sides.
token = imported.name.rpartition("__Audio2Face_EX")[0]
for exported in export_meshes:
exported_token = exported.data.name.rpartition("__Audio2Face_EX")[0]
if exported_token == token:
result[imported] = exported
return result
def _transfer_shapes(self, context:Context, source:Object, target:Object, mapping_object:Object) -> int:
"""
Transfers shapes from the source mesh to the target.
:returns: The number of shapes transferred.
"""
assert source.data and source.data.shape_keys, "Source object has no shape key data."
wm = context.window_manager
result = 0
## Run these to make sure they're all visible, checked, and in the view layer
a2f_collection, _, _, _ = ensure_scene_collections(context.scene)
_get_or_create_collection(a2f_collection, "A2F Import")
blocks = source.data.shape_keys.key_blocks
total_shapes = len(blocks)
if not context.mode == "OBJECT" and context.active_object:
bpy.ops.object.mode_set(mode="OBJECT")
bpy.ops.object.select_all(action="DESELECT")
source.select_set(True)
target.select_set(True)
context.view_layer.objects.active = target
basis = target.data.shape_keys.key_blocks["Basis"]
wm.progress_begin(0, total_shapes)
start_index = len(target.data.shape_keys.key_blocks)
## Grab the mapping array using the new Attributes API.
mapping_indices = np.zeros(len(source.data.vertices), dtype=np.int32)
attr = mapping_object.data.attributes['index_orig']
attr.data.foreach_get("value", mapping_indices)
for index, block in enumerate(blocks):
if block.name == "Basis":
continue
target.shape_key_add(name=block.name, from_mix=False)
target_key_block = target.data.shape_keys.key_blocks[block.name]
target_key_block.relative_key = basis
for index, target_index in enumerate(mapping_indices):
target_key_block.data[target_index].co = block.data[index].co
self.report({"INFO"}, f"Transferred shape {block.name} from {source.name} to {target.name}")
result += 1
wm.progress_update(index)
wm.progress_end()
if self.apply_fix:
self._select_verts_inverse(target, mapping_indices)
bpy.ops.object.mode_set(mode="EDIT")
wm.progress_begin(0, total_shapes)
for index in range(start_index, start_index+total_shapes-1):
shape = target.data.shape_keys.key_blocks[index]
self.report({"INFO"}, f"Fixing shape: {shape.name}")
target.active_shape_key_index = index
bpy.ops.mesh.blend_from_shape(shape='Basis', blend=1.0, add=False)
wm.progress_update(index)
bpy.ops.object.mode_set(mode="OBJECT")
wm.progress_end()
return result
def _select_verts_inverse(self, ob:Object, mapping_indices:Iterable[int]) -> int:
"""
Set the vertex selection of the target object to the inverse of
what's in mapping_indices through the bmesh API.
:returns: The number of vertices selected.
"""
result = 0
bm = bmesh.new()
bm.from_mesh(ob.data)
for v in bm.verts:
should_set = not (v.index in mapping_indices)
v.select_set(should_set)
result += int(should_set)
bm.to_mesh(ob.data)
def _clean_shapes(self, ob:Object, shapes_list:List[str]) -> int:
"""
For each named shape, remove it from ob's shape keys.
:returns: The number of shapes removed
"""
self.report({"INFO"}, f"Cleaning {', '.join(shapes_list)}")
if ob.data.shape_keys is None:
return 0
result = 0
for shape in shapes_list:
key = ob.data.shape_keys.key_blocks.get(shape)
if key:
ob.shape_key_remove(key)
result +=1
return result
def execute(self, context:Context) -> Set[str]:
## Transfer shape data over automatically
scene = context.scene
export_meshes = self._get_collection_meshes(_get_export_collection())
import_meshes = self._get_collection_meshes(_get_import_collection())
total = 0
mapping_table = self._build_mapping_table(import_meshes, export_meshes).items()
self.report({"INFO"}, f"{mapping_table}")
for source, mapping_object in mapping_table:
## hop to the true original mesh
target = mapping_object.a2f_original
source_shapes = [x.name for x in source.data.shape_keys.key_blocks if not x.name == "Basis"]
count = self._clean_shapes(target, source_shapes)
self.report({"INFO"}, f"Cleaned {count} shape{'' if count == 1 else 's'} from {target.name}")
## regrab the target object now that it's been modified and we're
## holding onto an old pointer
target = mapping_object.a2f_original
## bugfix: add a Basis target if none exists
if target.data.shape_keys is None or not "Basis" in target.data.shape_keys.key_blocks:
target.shape_key_add(name="Basis", from_mix=False)
result = self._transfer_shapes(context, source, target, mapping_object)
self.report({"INFO"}, f"Transferred {result} shape{'' if result == 1 else 's'} from {source.name} to {target.name}")
total += result
self.report({"INFO"}, f"Transferred {total} total shape{'' if total == 1 else 's'}")
return {"FINISHED"}
| 31,692 | Python | 35.220571 | 151 | 0.669033 |
NVIDIA-Omniverse/blender_omniverse_addons/omni_audio2face/ui.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
import os
from typing import *
import bpy
from bpy.utils import previews
from omni_audio2face.operators import (
OMNI_OT_PrepareScene,
OMNI_OT_MarkExportMesh,
OMNI_OT_ChooseUSDFile,
OMNI_OT_ChooseAnimCache,
OMNI_OT_ExportPreparedScene,
OMNI_OT_ImportRigFile,
OMNI_OT_TransferShapeData,
OMNI_OT_ImportAnimation,
)
## ======================================================================
def preload_icons() -> previews.ImagePreviewCollection:
"""Preload icons used by the interface."""
icons_directory = os.path.join(os.path.dirname(os.path.abspath(__file__)), "icons")
all_icons = {
"AUDIO2FACE": "omni_audio2face.png",
}
preview = previews.new()
for name, filepath in all_icons.items():
preview.load(name, os.path.join(icons_directory, filepath), "IMAGE")
return preview
## ======================================================================
class OBJECT_PT_Audio2FacePanel(bpy.types.Panel):
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_category = "Omniverse"
bl_label = "Audio2Face"
bl_options = {"DEFAULT_CLOSED"}
version = "0.0.0"
icons = preload_icons()
def draw_header(self, context):
self.layout.label(text="", icon_value=self.icons["AUDIO2FACE"].icon_id)
# draw the panel
def draw(self, context):
use_face_selection = context.scene.audio2face.use_face_selection
is_poly_edit_mode = context.tool_settings.mesh_select_mode[2] and context.mode == "EDIT_MESH"
a2f_export_static = bpy.data.collections.get("A2F Export Static", None)
a2f_export_dynamic = bpy.data.collections.get("A2F Export Dynamic", None)
layout = self.layout
layout.label(text="Face Prep and Export", icon="EXPORT")
row = layout.row(align=True)
op = row.operator(OMNI_OT_MarkExportMesh.bl_idname, text="Export Static")
op.is_dynamic = False
op = row.operator(OMNI_OT_MarkExportMesh.bl_idname, text="Export Dynamic")
op.is_dynamic = True
row = layout.row(align=True)
row.prop(context.scene.audio2face, "use_face_selection", text="")
if use_face_selection and not is_poly_edit_mode:
row.label(text="Use Faces: Must be in Polygon Edit Mode!", icon="ERROR")
else:
row.label(text="Use Face Selection?")
## mesh selections
col = layout.column(align=True)
if a2f_export_dynamic:
col.prop_search(context.scene.audio2face, "mesh_skin", a2f_export_dynamic, "objects", text="Skin Mesh: ")
col.prop_search(context.scene.audio2face, "mesh_tongue", a2f_export_dynamic, "objects", text="Tongue Mesh: ")
else:
col.label(text="Dynamic Meshes are required to set Skin and Tongue", icon="ERROR")
col.label(text=" ")
if a2f_export_static:
col.prop_search(context.scene.audio2face, "mesh_eye_left", a2f_export_static, "objects", text="Left Eye Mesh: ")
col.prop_search(context.scene.audio2face, "mesh_eye_right", a2f_export_static, "objects", text="Right Eye Mesh: ")
col.prop_search(context.scene.audio2face, "mesh_gums_lower", a2f_export_static, "objects", text="Lower Gums Mesh: ")
else:
col.label(text="Static Meshes are required to set Eyes", icon="ERROR")
col.label(text=" ")
col = layout.column(align=True)
row = col.row(align=True)
row.prop(context.scene.audio2face, "export_filepath", text="Export Path: ")
op = row.operator(OMNI_OT_ChooseUSDFile.bl_idname, text="", icon="FILE_FOLDER")
op.operation = "EXPORT"
col.prop(context.scene.audio2face, "export_project", text="Export With Project File")
row = col.row(align=True)
collection = bpy.data.collections.get("A2F Export", None)
child_count = len(collection.all_objects) if collection else 0
args = {
"text": "Export Face USD" if child_count else "No meshes available for Export",
}
op = row.operator(OMNI_OT_ExportPreparedScene.bl_idname, **args)
## Import Side -- after Audio2Face has transferred the shapes
layout.separator()
layout.label(text="Face Shapes Import", icon="IMPORT")
col = layout.column(align=True)
row = col.row(align=True)
row.prop(context.scene.audio2face, "import_filepath", text="Shapes Import Path")
op = row.operator(OMNI_OT_ChooseUSDFile.bl_idname, text="", icon="FILE_FOLDER")
op.operation = "IMPORT"
col = layout.column(align=True)
col.operator(OMNI_OT_ImportRigFile.bl_idname)
row = col.row(align=True)
op = row.operator(OMNI_OT_TransferShapeData.bl_idname)
op.apply_fix = context.scene.audio2face.transfer_apply_fix
row.prop(context.scene.audio2face, "transfer_apply_fix", icon="MODIFIER", text="")
col = layout.column(align=True)
col.label(text="Anim Cache Path")
row = col.row(align=True)
row.prop(context.scene.audio2face, "import_anim_path", text="")
row.operator(OMNI_OT_ChooseAnimCache.bl_idname, text="", icon="FILE_FOLDER")
if context.scene.audio2face.import_anim_path.lower().endswith(".json"):
col.prop(context.scene.audio2face, "anim_frame_rate", text="Source Framerate")
row = col.row(align=True)
row.prop(context.scene.audio2face, "anim_start_type", text="Start Frame")
if context.scene.audio2face.anim_start_type == "CUSTOM":
row.prop(context.scene.audio2face, "anim_start_frame", text="")
col.prop(context.scene.audio2face, "anim_load_to", text="Load To")
row = col.row(align=True)
row.prop(context.scene.audio2face, "anim_apply_scale", text="Apply Clip Scale")
if context.scene.audio2face.anim_load_to == "CLIP":
row.prop(context.scene.audio2face, "anim_overwrite")
op_label = ("Please change to Object Mode" if not context.mode == "OBJECT"
else ("Import Animation Clip" if OMNI_OT_ImportAnimation.poll(context)
else "Please Select Target Mesh"))
op = col.operator(OMNI_OT_ImportAnimation.bl_idname, text=op_label)
op.start_type = context.scene.audio2face.anim_start_type
op.frame_rate = context.scene.audio2face.anim_frame_rate
op.start_frame = context.scene.audio2face.anim_start_frame
op.set_range = context.scene.audio2face.anim_set_range
op.load_to = context.scene.audio2face.anim_load_to
op.overwrite = context.scene.audio2face.anim_overwrite
op.apply_scale = context.scene.audio2face.anim_apply_scale
| 6,105 | Python | 36.00606 | 119 | 0.702867 |
NVIDIA-Omniverse/blender_omniverse_addons/omni_panel/ui.py | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
from typing import *
import bpy
from bpy.types import (Context, Object, Material, Scene)
from . particle_bake.operators import *
from . material_bake.background_bake import bgbake_ops
# from .material_bake_complex import OBJECT_OT_omni_material_bake
from os.path import join, dirname
import bpy.utils.previews
from .material_bake import baker
## ======================================================================
def get_icons_directory():
icons_directory = join(dirname(__file__), "icons")
return icons_directory
## ======================================================================
def _get_bake_types(scene:Scene) -> List[str]:
result = []
bake_all = scene.all_maps
if scene.selected_col or bake_all:
result.append("DIFFUSE")
if scene.selected_normal or bake_all:
result.append("NORMAL")
if scene.selected_emission or bake_all:
result.append("EMIT")
if scene.selected_specular or bake_all:
result.append("GLOSSY")
if scene.selected_rough or bake_all:
result.append("ROUGHNESS")
if scene.selected_trans or bake_all:
result.append("TRANSMISSION")
## special types
if scene.omni_bake.bake_metallic or bake_all:
result.append("METALLIC")
return ",".join(result)
## ======================================================================
class OBJECT_PT_omni_panel(bpy.types.Panel):
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_category = "Omniverse"
bl_label = "NVIDIA Omniverse"
bl_options = {"DEFAULT_CLOSED"}
version = "0.0.0"
#retrieve icons
icons = bpy.utils.previews.new()
icons_directory = get_icons_directory()
icons.load("OMNI", join(icons_directory, "ICON.png"), 'IMAGE')
def draw_header(self, context):
self.layout.label(text="", icon_value=self.icons["OMNI"].icon_id)
def draw(self, context):
layout = self.layout
scene = context.scene
# --------Particle Collection Instancing-------------------
particleOptions = scene.particle_options
particleCol = self.layout.column(align=True)
particleCol.label(text="Omni Particles",
icon='PARTICLES')
box = particleCol.box()
column = box.column(align=True)
column.prop(particleOptions, "deletePSystemAfterBake")
row = column.row()
row.prop(particleOptions, "animateData")
if particleOptions.animateData:
row = column.row(align=True)
row.prop(particleOptions, "selectedStartFrame")
row.prop(particleOptions, "selectedEndFrame")
row = column.row()
row.enabled = False
row.label(text="Increased Calculation Time", icon='ERROR')
row = column.row()
row.scale_y = 1.5
row.operator('omni.hair_bake',
text='Convert',
icon='MOD_PARTICLE_INSTANCE')
if len(bpy.context.selected_objects) != 0 and bpy.context.active_object != None:
if bpy.context.active_object.select_get() and bpy.context.active_object.type == "MESH":
layout.separator()
column = layout.column(align=True)
column.label(text="Convert Material to:", icon='SHADING_RENDERED')
box = column.box()
materialCol = box.column(align=True)
materialCol.operator('universalmaterialmap.create_template_omnipbr',
text='OmniPBR')
materialCol.operator('universalmaterialmap.create_template_omniglass',
text='OmniGlass')
## ======================================================================
class OBJECT_PT_omni_bake_panel(bpy.types.Panel):
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_category = "Omniverse"
bl_label = "Material Baking"
bl_options = {"DEFAULT_CLOSED"}
version = "0.0.0"
#retrieve icons
icons = bpy.utils.previews.new()
icons_directory = get_icons_directory()
icons.load("OMNI", join(icons_directory, "ICON.png"), 'IMAGE')
icons.load("BAKE",join(icons_directory, "Oven.png"), 'IMAGE')
def draw_header(self, context):
self.layout.label(text="", icon="UV_DATA")
def draw(self, context):
layout = self.layout
scene = context.scene
box = layout.box()
#--------PBR Bake Settings-------------------
row = box.row()
if scene.all_maps == True:
row.prop(scene, "all_maps", icon = 'CHECKBOX_HLT')
else:
row.prop(scene, "all_maps", icon = 'CHECKBOX_DEHLT')
column = box.column(align= True)
row = column.row()
row.prop(scene, "selected_col")
row.prop(scene, "selected_normal")
row = column.row()
row.prop(scene, "selected_rough")
row.prop(scene, "selected_specular", text="Gloss")
row = column.row()
row.prop(scene, "selected_trans")
row.prop(scene, "selected_emission")
row = column.row()
row.label(text="Special Maps")
row = column.row()
row.prop(scene.omni_bake, "bake_metallic")
row.label(text=" ")
#--------Texture Settings-------------------
row = box.row()
row.label(text="Texture Resolution:")
row.scale_y = 0.5
row = box.row()
row.prop(scene, "texture_res", expand=True)
row.scale_y = 1
if scene.texture_res == "8k" or scene.texture_res == "4k":
row = box.row()
row.enabled = False
row.label(text="Long Bake Times", icon= 'ERROR')
#--------UV Settings-------------------
column = box.column(align = True)
row = column.row()
row.prop(scene, "newUVoption")
row.prop(scene, "unwrapmargin")
#--------Other Settings-------------------
column= box.column(align=True)
row = column.row()
if scene.bgbake == "fg":
text = "Copy objects and apply bakes"
else:
text = "Copy objects and apply bakes (after import)"
row.prop(scene, "prepmesh", text=text)
if scene.prepmesh == True:
if scene.bgbake == "fg":
text = "Hide source objects after bake"
else:
text = "Hide source objects after bake (after import)"
row = column.row()
row.prop(scene, "hidesourceobjects", text=text)
#-------------Buttons-------------------------
row = box.row()
try:
row.prop(scene.cycles, "device", text="Device")
except:
pass
row = box.row()
row.scale_y = 1.5
op = row.operator("omni.bake_maps", icon_value=self.icons["BAKE"].icon_id)
op.unwrap = scene.newUVoption
op.bake_types = _get_bake_types(scene)
op.merge_textures = scene.omni_bake.merge_textures
op.hide_original = scene.hidesourceobjects
op.width = op.height = {
"0.5k": 512,
"1k": 1024,
"2k": 2048,
"4k": 4096,
"8k": 8192,
}[scene.texture_res]
can_bake_poll, error_data = baker.omni_bake_maps_poll(context)
can_bake_poll_result = {
-1: f"Cannot bake objects in collection {baker.COLLECTION_NAME}",
-2: f"Material cannot be baked:",
-3: "Cycles Renderer Add-on not loaded!"
}
if can_bake_poll < 0:
row = box.row()
row.label(text=can_bake_poll_result[can_bake_poll], icon="ERROR")
if can_bake_poll == -2:
mesh_name, material_name = error_data
row = box.row()
row.label(text=f"{material_name} on {mesh_name}")
row = column.row()
row.scale_y = 1
##!TODO: Restore background baking
# row.prop(context.scene, "bgbake", expand=True)
if scene.bgbake == "bg":
row = column.row(align= True)
# - BG status button
col = row.column()
if len(bgbake_ops.bgops_list) == 0:
enable = False
icon = "TIME"
else:
enable = True
icon = "TIME"
col.operator("object.omni_bake_bgbake_status", text="", icon=icon)
col.enabled = enable
# - BG import button
col = row.column()
if len(bgbake_ops.bgops_list_finished) != 0:
enable = True
icon = "IMPORT"
else:
enable = False
icon = "IMPORT"
col.operator("object.omni_bake_bgbake_import", text="", icon=icon)
col.enabled = enable
#BG erase button
col = row.column()
if len(bgbake_ops.bgops_list_finished) != 0:
enable = True
icon = "TRASH"
else:
enable = False
icon = "TRASH"
col.operator("object.omni_bake_bgbake_clear", text="", icon=icon)
col.enabled = enable
row.alignment = 'CENTER'
row.label(text=f"Running {len(bgbake_ops.bgops_list)} | Finished {len(bgbake_ops.bgops_list_finished)}")
## ======================================================================
class OmniBakePreferences(bpy.types.AddonPreferences):
# this must match the add-on name, use '__package__'
# when defining this in a submodule of a python package.
bl_idname = __package__
img_name_format: bpy.props.StringProperty(name="Image format string",
default="%OBJ%_%BATCH%_%BAKEMODE%_%BAKETYPE%")
#Aliases
diffuse_alias: bpy.props.StringProperty(name="Diffuse", default="diffuse")
metal_alias: bpy.props.StringProperty(name="Metal", default="metalness")
roughness_alias: bpy.props.StringProperty(name="Roughness", default="roughness")
glossy_alias: bpy.props.StringProperty(name="Glossy", default="glossy")
normal_alias: bpy.props.StringProperty(name="Normal", default="normal")
transmission_alias: bpy.props.StringProperty(name="Transmission", default="transparency")
transmissionrough_alias: bpy.props.StringProperty(name="Transmission Roughness", default="transparencyroughness")
clearcoat_alias: bpy.props.StringProperty(name="Clearcost", default="clearcoat")
clearcoatrough_alias: bpy.props.StringProperty(name="Clearcoat Roughness", default="clearcoatroughness")
emission_alias: bpy.props.StringProperty(name="Emission", default="emission")
specular_alias: bpy.props.StringProperty(name="Specular", default="specular")
alpha_alias: bpy.props.StringProperty(name="Alpha", default="alpha")
sss_alias: bpy.props.StringProperty(name="SSS", default="sss")
ssscol_alias: bpy.props.StringProperty(name="SSS Colour", default="ssscol")
@classmethod
def reset_img_string(self):
prefs = bpy.context.preferences.addons[__package__].preferences
prefs.property_unset("img_name_format")
bpy.ops.wm.save_userpref()
| 12,271 | Python | 34.98827 | 117 | 0.557412 |
NVIDIA-Omniverse/blender_omniverse_addons/omni_panel/workflow/usd_kind.py | from typing import *
import bpy
from bpy.types import (Collection, Context, Image, Object, Material,
Mesh, Node, NodeSocket, NodeTree, Scene)
from bpy.props import *
## ======================================================================
usd_kind_items = {
('COMPONENT', 'component', 'kind: component'),
('GROUP', 'group', 'kind: group'),
('ASSEMBLY', 'assembly', 'kind: assembly'),
('CUSTOM', 'custom', 'kind: custom'),
}
## ======================================================================
def get_plural_count(items) -> (str, int):
count = len(items)
plural = '' if count == 1 else 's'
return plural, count
## ======================================================================
class OBJECT_OT_omni_set_usd_kind(bpy.types.Operator):
"""Sets the USD Kind value on the selected objects."""
bl_idname = "omni.set_usd_kind"
bl_label = "Set USD Kind"
bl_options = {"REGISTER", "UNDO"}
kind: EnumProperty(name='kind', description='USD Kind', items=usd_kind_items)
custom_kind: StringProperty(default="")
verbose: BoolProperty(default=False)
@property ## read-only
def value(self) -> str:
return self.custom_kind if self.kind == "CUSTOM" else self.kind.lower()
@classmethod
def poll(cls, context:Context) -> bool:
return bool(len(context.selected_objects))
def execute(self, context:Context) -> Set[str]:
if self.kind == "NONE":
self.report({"WARNING"}, "No kind specified-- nothing authored.")
return {"CANCELLED"}
for item in context.selected_objects:
props = item.id_properties_ensure()
props["usdkind"] = self.value
props_ui = item.id_properties_ui("usdkind")
props_ui.update(default=self.value, description="USD Kind")
if self.verbose:
plural, count = get_plural_count(context.selected_objects)
self.report({"INFO"}, f"Set USD Kind to {self.value} for {count} object{plural}.")
return {"FINISHED"}
## ======================================================================
class OBJECT_OT_omni_set_usd_kind_auto(bpy.types.Operator):
"""Sets the USD Kind value on scene objects, automatically."""
bl_idname = "omni.set_usd_kind_auto"
bl_label = "Set USD Kind Auto"
bl_options = {"REGISTER", "UNDO"}
verbose: BoolProperty(default=False)
def execute(self, context:Context) -> Set[str]:
active = context.active_object
selected = list(context.selected_objects)
bpy.ops.object.select_all(action='DESELECT')
## heuristics
## First, assign "component" to all unparented empties
unparented = [x for x in context.scene.collection.all_objects if not x.parent and x.type == "EMPTY"]
for item in unparented:
item.select_set(True)
bpy.ops.omni.set_usd_kind(kind="COMPONENT")
item.select_set(False)
if self.verbose:
plural, count = get_plural_count(unparented)
self.report({"INFO"}, f"Set USD Kind Automatically on {count} object{plural}.")
return {"FINISHED"}
## ======================================================================
class OBJECT_OT_omni_clear_usd_kind(bpy.types.Operator):
"""Clear USD Kind values on the selected objects."""
bl_idname = "omni.clear_usd_kind"
bl_label = "Clear USD Kind"
bl_options = {"REGISTER", "UNDO"}
verbose: BoolProperty(default=False)
@classmethod
def poll(cls, context:Context) -> bool:
return bool(len(context.selected_objects))
def execute(self, context:Context) -> Set[str]:
from rna_prop_ui import rna_idprop_ui_prop_update
total = 0
for item in context.selected_objects:
if "usdkind" in item:
rna_idprop_ui_prop_update(item, "usdkind")
del item["usdkind"]
total += 1
if self.verbose:
plural, count = get_plural_count(range(total))
self.report({"INFO"}, f"Cleared USD Kind from {count} object{plural}.")
return {"FINISHED"}
## ======================================================================
class OBJECT_PT_omni_usd_kind_panel(bpy.types.Panel):
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_category = "Omniverse"
bl_label = "USD Kind"
def draw(self, context:Context):
layout = self.layout
scene = context.scene
layout.label(text="USD Kind")
row = layout.row()
row.prop(scene.omni_usd_kind, "kind", text="Kind")
if scene.omni_usd_kind.kind == "CUSTOM":
row = layout.row()
row.prop(scene.omni_usd_kind, "custom_kind", text="Custom Kind")
col = layout.column(align=True)
op = col.operator(OBJECT_OT_omni_set_usd_kind.bl_idname, icon="PLUS")
op.kind = scene.omni_usd_kind.kind
op.custom_kind = scene.omni_usd_kind.custom_kind
op.verbose = True
op = col.operator(OBJECT_OT_omni_clear_usd_kind.bl_idname, icon="X")
op.verbose = True
op = col.operator(OBJECT_OT_omni_set_usd_kind_auto.bl_idname, icon="BRUSH_DATA")
op.verbose = True
## ======================================================================
class USDKindProperites(bpy.types.PropertyGroup):
kind: EnumProperty(name='kind', description='USD Kind', items=usd_kind_items)
custom_kind: StringProperty(default="")
## ======================================================================
classes = [
OBJECT_OT_omni_set_usd_kind,
OBJECT_OT_omni_set_usd_kind_auto,
OBJECT_OT_omni_clear_usd_kind,
OBJECT_PT_omni_usd_kind_panel,
USDKindProperites,
]
def unregister():
for cls in reversed(classes):
try:
bpy.utils.unregister_class(cls)
except ValueError:
continue
except RuntimeError:
continue
try:
del bpy.types.Scene.omni_usd_kind
except AttributeError:
pass
def register():
unregister()
for cls in classes:
bpy.utils.register_class(cls)
bpy.types.Scene.omni_usd_kind = bpy.props.PointerProperty(type=USDKindProperites)
| 5,618 | Python | 27.668367 | 102 | 0.620862 |
NVIDIA-Omniverse/blender_omniverse_addons/omni_panel/material_bake/baker.py | from tempfile import NamedTemporaryFile
from typing import *
import addon_utils
import bpy
from bpy.types import (Collection, Context, Image, Object, Material,
Mesh, Node, NodeSocket, NodeTree, Scene)
from bpy.props import *
from mathutils import *
from omni_panel.material_bake import material_setup
COLLECTION_NAME = "OmniBake_Bakes"
def get_material_output(tree:NodeTree, engine:str="CYCLES") -> Optional[Node]:
"""
Find the material output node that applies only to a specific engine.
:param tree: The NodeTree to search.
:param engine: The engine to search for.
:return: The Material Output Node associated with the engine, or None if not found.
"""
supported_engines = {"CYCLES", "EEVEE", "ALL"}
assert engine in supported_engines, f"Only the following engines are supported: {','.join(supported_engines)}"
result = [x for x in tree.nodes if x.type == "OUTPUT_MATERIAL" and x.target in {"ALL", engine}]
if len(result):
return result[0]
return None
def prepare_collection(scene:Scene) -> Collection:
"""
Ensures the bake Collection exists in the specified scene.
:param scene: The scene to which you wish to add the bake Collection.
:return: the bake Collection
"""
collection = bpy.data.collections.get(COLLECTION_NAME, None) or bpy.data.collections.new(COLLECTION_NAME)
if not COLLECTION_NAME in scene.collection.children:
scene.collection.children.link(collection)
return collection
def select_only(ob:Object):
"""
Ensure that only the specified object is selected.
:param ob: Object to select
"""
bpy.ops.object.select_all(action="DESELECT")
ob.select_set(state=True)
bpy.context.view_layer.objects.active = ob
def smart_unwrap_object(ob:Object, name:str="OmniBake"):
"""
Use Blenders built-in smart unwrap functionality to generate a new UV map.
:param ob: Mesh Object to unwrap.
"""
bpy.ops.object.mode_set(mode="EDIT", toggle=False)
# Unhide any geo that's hidden in edit mode or it'll cause issues.
bpy.ops.mesh.reveal()
bpy.ops.mesh.select_all(action="SELECT")
bpy.ops.mesh.reveal()
if name in ob.data.uv_layers:
ob.data.uv_layers.remove(ob.data.uv_layers[name])
uv_layer = ob.data.uv_layers.new(name=name)
uv_layer.active = True
bpy.ops.uv.select_all(action="SELECT")
bpy.ops.uv.smart_project(island_margin=0.0)
bpy.ops.object.mode_set(mode="OBJECT", toggle=False)
def prepare_mesh(ob:Object, collection: Collection, unwrap=False) -> Object:
"""
Duplicate the specified Object, also duplicating all its materials.
:param ob: The object to duplicate.
:param collection: After duplication, the object will be inserted into this Collection
:param unwrap: If True, also smart unwrap the object's UVs.
:return: The newly created duplicate object.
"""
assert not ob.name in collection.all_objects, f"{ob.name} is a baked mesh (cannot be used)"
new_mesh_name = ob.data.name[:56] + "_baked"
if new_mesh_name in bpy.data.meshes:
bpy.data.meshes.remove(bpy.data.meshes[new_mesh_name])
new_mesh = ob.data.copy()
new_mesh.name = new_mesh_name
new_name = ob.name[:56] + "_baked"
if new_name in bpy.data.objects:
bpy.data.objects.remove(bpy.data.objects[new_name])
new_object = bpy.data.objects.new(new_name, new_mesh)
collection.objects.link(new_object)
select_only(new_object)
new_object.matrix_world = ob.matrix_world.copy()
if unwrap:
smart_unwrap_object(new_object)
for index, material in enumerate([x.material for x in new_object.material_slots]):
new_material_name = material.name[:56] + "_baked"
if new_material_name in bpy.data.materials:
bpy.data.materials.remove(bpy.data.materials[new_material_name])
new_material = material.copy()
new_material.name = new_material_name
new_object.material_slots[index].material = new_material
ob.hide_viewport = True
return new_object
##!<--- TODO: Fix these
def find_node_from_label(label:str, nodes:List[Node]) -> Node:
for node in nodes:
if node.label == label:
return node
return False
def find_isocket_from_identifier(idname:str, node:Node) -> NodeSocket:
for inputsocket in node.inputs:
if inputsocket.identifier == idname:
return inputsocket
return False
def find_osocket_from_identifier(idname, node):
for outputsocket in node.outputs:
if outputsocket.identifier == idname:
return outputsocket
return False
def make_link(f_node_label, f_node_ident, to_node_label, to_node_ident, nodetree):
fromnode = find_node_from_label(f_node_label, nodetree.nodes)
if (fromnode == False):
return False
fromsocket = find_osocket_from_identifier(f_node_ident, fromnode)
tonode = find_node_from_label(to_node_label, nodetree.nodes)
if (tonode == False):
return False
tosocket = find_isocket_from_identifier(to_node_ident, tonode)
nodetree.links.new(fromsocket, tosocket)
return True
## --->
## ======================================================================
##!TODO: Shader type identification and bake setup
def _nodes_for_type(node_tree:NodeTree, node_type:str) -> List[Node]:
result = [x for x in node_tree.nodes if x.type == node_type]
## skip unconnected nodes
from_nodes = [x.from_node for x in node_tree.links]
to_nodes = [x.to_node for x in node_tree.links]
all_nodes = set(from_nodes + to_nodes)
result = list(filter(lambda x: x in all_nodes, result))
return result
def output_nodes_for_engine(node_tree:NodeTree, engine:str) -> List[Node]:
nodes = _nodes_for_type(node_tree, "OUTPUT_MATERIAL")
return nodes
def get_principled_nodes(node_tree:NodeTree) -> List[Node]:
return _nodes_for_type(node_tree, "BSDF_PRINCIPLED")
def identify_shader_type(node_tree:NodeTree) -> str:
principled_nodes = get_principled_nodes(node_tree)
emission_nodes = _nodes_for_type(node_tree, "EMISSION")
mix_nodes = _nodes_for_type(node_tree, "MIX_SHADER")
outputs = output_nodes_for_engine(node_tree, "CYCLES")
total_shader_nodes = principled_nodes + emission_nodes + mix_nodes
## first type: principled straight into the output
## ----------------------------------------------------------------------
def create_principled_setup(material:Material, images:Dict[str,Image]):
"""
Creates a new shader setup in the tree of the specified
material using the baked images, removing all old shader nodes.
:param material: The material to change.
:param images: The baked Images dictionary, name:Image pairs.
"""
node_tree = material.node_tree
nodes = node_tree.nodes
material.cycles.displacement_method = 'BOTH'
principled_nodes = get_principled_nodes(node_tree)
for node in filter(lambda x: not x in principled_nodes, nodes):
nodes.remove(node)
# Node Frame
frame = nodes.new("NodeFrame")
frame.location = (0, 0)
frame.use_custom_color = True
frame.color = (0.149763, 0.214035, 0.0590617)
## reuse the old BSDF if it exists to make sure the non-textured constant inputs are correct
pnode = principled_nodes[0] if len(principled_nodes) else nodes.new("ShaderNodeBsdfPrincipled")
pnode.location = (-25, 335)
pnode.label = "pnode"
pnode.use_custom_color = True
pnode.color = (0.3375297784805298, 0.4575316309928894, 0.08615386486053467)
pnode.parent = nodes["Frame"]
# And the output node
node = nodes.new("ShaderNodeOutputMaterial")
node.location = (500, 200)
node.label = "monode"
node.show_options = False
node.parent = nodes["Frame"]
make_link("pnode", "BSDF", "monode", "Surface", node_tree)
# -----------------------------------------------------------------
# 'COMBINED', 'AO', 'SHADOW', 'POSITION', 'NORMAL', 'UV', 'ROUGHNESS',
# 'EMIT', 'ENVIRONMENT', 'DIFFUSE', 'GLOSSY', 'TRANSMISSION'
## These are the currently supported types.
## More could be supported at a future date.
if "DIFFUSE" in images:
node = nodes.new("ShaderNodeTexImage")
node.hide = True
node.location = (-500, 250)
node.label = "col_tex"
node.image = images["DIFFUSE"]
node.parent = nodes["Frame"]
make_link("col_tex", "Color", "pnode", "Base Color", node_tree)
if "METALLIC" in images:
node = nodes.new("ShaderNodeTexImage")
node.hide = True
node.location = (-500, 140)
node.label = "metallic_tex"
node.image = images["METALLIC"]
node.parent = nodes["Frame"]
make_link("metallic_tex", "Color", "pnode", "Metallic", node_tree)
if "GLOSSY" in images:
node = nodes.new("ShaderNodeTexImage")
node.hide = True
node.location = (-500, 90)
node.label = "specular_tex"
node.image = images["GLOSSY"]
node.parent = nodes["Frame"]
make_link("specular_tex", "Color", "pnode", "Specular", node_tree)
if "ROUGHNESS" in images:
node = nodes.new("ShaderNodeTexImage")
node.hide = True
node.location = (-500, 50)
node.label = "roughness_tex"
node.image = images["ROUGHNESS"]
node.parent = nodes["Frame"]
make_link("roughness_tex", "Color", "pnode", "Roughness", node_tree)
if "TRANSMISSION" in images:
node = nodes.new("ShaderNodeTexImage")
node.hide = True
node.location = (-500, -90)
node.label = "transmission_tex"
node.image = images["TRANSMISSION"]
node.parent = nodes["Frame"]
make_link("transmission_tex", "Color", "pnode", "Transmission", node_tree)
if "EMIT" in images:
node = nodes.new("ShaderNodeTexImage")
node.hide = True
node.location = (-500, -170)
node.label = "emission_tex"
node.image = images["EMIT"]
node.parent = nodes["Frame"]
make_link("emission_tex", "Color", "pnode", "Emission", node_tree)
if "NORMAL" in images:
node = nodes.new("ShaderNodeTexImage")
node.hide = True
node.location = (-500, -318.7)
node.label = "normal_tex"
image = images["NORMAL"]
node.image = image
node.parent = nodes["Frame"]
# Additional normal map node for normal socket
node = nodes.new("ShaderNodeNormalMap")
node.location = (-220, -240)
node.label = "normalmap"
node.show_options = False
node.parent = nodes["Frame"]
make_link("normal_tex", "Color", "normalmap", "Color", node_tree)
make_link("normalmap", "Normal", "pnode", "Normal", node_tree)
# -----------------------------------------------------------------
## wipe all labels
for item in nodes:
item.label = ""
node = nodes["Frame"]
node.label = "OMNI PBR"
for type, image in images.items():
if type in {"DIFFUSE", "EMIT"}:
image.colorspace_settings.name = "sRGB"
else:
image.colorspace_settings.name = "Non-Color"
## ======================================================================
def _selected_meshes(context:Context) -> List[Mesh]:
"""
:return: List[Mesh] of all selected mesh objects in active Blender Scene.
"""
return [x for x in context.selected_objects if x.type == "MESH"]
def _material_can_be_baked(material:Material) -> bool:
outputs = output_nodes_for_engine(material.node_tree, "CYCLES")
if not len(outputs) == 1:
return False
try:
from_node = outputs[0].inputs["Surface"].links[0].from_node
except IndexError:
return False
##!TODO: Support one level of mix with principled inputs
if not from_node.type == "BSDF_PRINCIPLED":
return False
return True
def omni_bake_maps_poll(context:Context) -> (int, Any):
"""
:return: 1 if we can bake
0 if no meshes are selected
-1 if any selected meshes are already in the bake collection
-2 if mesh contains non-bakeable materials
-3 if Cycles renderer isn't loaded
"""
## Cycles renderer is not available
_, loaded_state = addon_utils.check("cycles")
if not loaded_state:
return (-3, None)
selected = _selected_meshes(context)
if not len(selected):
return (0, None)
for mesh in selected:
for material in [slot.material for slot in mesh.material_slots]:
if not _material_can_be_baked(material):
return (-2, [mesh.name, material.name])
collection = bpy.data.collections.get(COLLECTION_NAME, None)
if collection is None:
## We have selected meshes but no collection-- early out
return (1, None)
in_collection = [x for x in selected if x.name in collection.all_objects]
if len(in_collection):
return (-1, None)
return (1, None)
## ======================================================================
class OmniBakerProperties(bpy.types.PropertyGroup):
bake_metallic: BoolProperty(name="Metallic",
default=True)
merge_textures: BoolProperty(name="Merge Textures",
description="Bake all materials for each object onto a single map",
default=True)
## ======================================================================
class OBJECT_OT_omni_bake_maps(bpy.types.Operator):
"""Bake specified passes on the selected Mesh object."""
bl_idname = "omni.bake_maps"
bl_label = "Bake Maps"
bl_options = {"REGISTER", "UNDO"}
base_bake_types = {
##!TODO: Possibly support these at a later date?
# "COMBINED", "AO", "SHADOW", "POSITION", "UV", "ENVIRONMENT",
"DIFFUSE",
"NORMAL",
"EMIT",
"GLOSSY",
"ROUGHNESS",
"TRANSMISSION",
}
special_bake_types = {
"METALLIC": "Metallic",
}
unwrap: BoolProperty(default=False, description="Unwrap")
hide_original: BoolProperty(default=False, description="Hide Original")
width: IntProperty(default=1024, min=128, max=8192, description="Width")
height: IntProperty(default=1024, min=128, max=8192, description="Height")
bake_types: StringProperty(default="DIFFUSE")
merge_textures: BoolProperty(default=True, description="Merge Textures")
@classmethod
def poll(cls, context:Context) -> bool:
return omni_bake_maps_poll(context)[0] == 1
def draw(self, context:Context):
"""Empty draw to disable the Operator Props Panel."""
pass
def _get_bake_emission_target(self, node_tree:NodeTree) -> Node:
bake_emission_name = "OmniBake_Emission"
if not bake_emission_name in node_tree.nodes:
node = node_tree.nodes.new("ShaderNodeEmission")
node.name = bake_emission_name
output = get_material_output(node_tree, "CYCLES")
node.location = output.location + Vector((-200.0, -100.0))
return node_tree.nodes[bake_emission_name]
def _copy_connection(self, material:Material, bsdf:Node, bake_type:str, target_socket:NodeSocket) -> bool:
if not bake_type in self.special_bake_types:
return False
orig_socket = bsdf.inputs[self.special_bake_types[bake_type]]
if not len(orig_socket.links):
## copy over the color and return
if orig_socket.type == "VECTOR":
for index in range(4):
target_socket.default_value[index] = orig_socket.default_value
elif orig_socket.type in {"VECTOR", "RGBA"}:
for index in range(3):
target_socket.default_value[index] = orig_socket.default_value[index]
target_socket.default_value[3] = 1.0
else:
## should never arrive here
return False
else:
input_socket = orig_socket.links[0].from_socket
material.node_tree.links.new(input_socket, target_socket)
return True
def _create_bake_texture_names(self, ob:Object, bake_types:List[str]) -> List[str]:
result = []
for material in [x.material for x in ob.material_slots]:
material_name = material.name.rpartition('_baked')[0]
for bake_type in bake_types:
if self.merge_textures:
image_name = f"{ob.name}__{bake_type}"
else:
image_name = f"{ob.name}_{material_name}_{bake_type}"
result.append(image_name)
return result
def report(self, type:Set[str], message:str):
print(message)
super(OBJECT_OT_omni_bake_maps, self).report(type, message)
def execute(self, context:Context) -> Set[str]:
wm = context.window_manager
scene = context.scene
scene_engine = scene.render.engine
scene.render.engine = "CYCLES"
scene_use_clear = scene.render.bake.use_clear
scene.render.bake.use_clear = False
collection = prepare_collection(scene)
all_bake_types = self.base_bake_types | self.special_bake_types.keys()
valid_types_str = "Valid types are: " + ", ".join(all_bake_types)
self.report({"INFO"}, f"Bake types: {self.bake_types}")
bake_types = self.bake_types.split(",")
if not len(bake_types):
self.report({"ERROR"}, "No bake type specified. " + valid_types_str)
for bake_type in bake_types:
if not bake_type in all_bake_types:
self.report({"ERROR"}, f"Bake type '{bake_type}' is not valid. " + valid_types_str)
return {"CANCELLED"}
selected_meshes = _selected_meshes(context)
count = 0
total = 0
for mesh in selected_meshes:
count += len(mesh.material_slots) * len(bake_types)
wm.progress_begin(total, count)
bpy.ops.object.mode_set(mode="OBJECT")
for mesh_object in _selected_meshes(context):
mesh_object.hide_select = mesh_object.hide_render = mesh_object.hide_viewport = False
baked_ob = prepare_mesh(mesh_object, collection, unwrap=self.unwrap)
uv_layer = "OmniBake" if self.unwrap else baked_ob.data.uv_layers.active.name
bpy.ops.object.select_all(action="DESELECT")
baked_ob.select_set(True)
context.view_layer.objects.active = baked_ob
self.report({"INFO"}, f"Baking Object {baked_ob.name}")
baked_materials = []
## Because of merge_textures, we have to create the names now and clear them
## before the whole bake process starts
bake_image_names = self._create_bake_texture_names(baked_ob, bake_types)
## if merge_textures is on there'll be some repeats
for image_name in set(bake_image_names):
if image_name in bpy.data.images:
bpy.data.images.remove(bpy.data.images[image_name])
image = bpy.data.images.new(image_name, self.width, self.height,
float_buffer=(image_name.endswith(("NORMAL", "EMIT"))) )
# if bake_type in {"DIFFUSE", "EMIT"}:
# image.colorspace_settings.name = "sRGB"
# else:
# image.colorspace_settings.name = "Non-Color"
image.colorspace_settings.name = "Raw"
if self.merge_textures:
temp_file = NamedTemporaryFile(prefix=bake_type, suffix=".png", delete=False)
image.filepath = temp_file.name
image_index = 0
for material_index, material in enumerate([x.material for x in baked_ob.material_slots]):
self.report({"INFO"}, f" => Material: {material.name}")
tree = material.node_tree
baked_ob.active_material_index = material_index
for node in tree.nodes:
node.select = False
output = get_material_output(tree)
bsdf = output.inputs["Surface"].links[0].from_node
if "OmniBakeImage" in tree.nodes:
tree.nodes.remove(tree.nodes["OmniBakeImage"])
bake_image_node = tree.nodes.new("ShaderNodeTexImage")
bake_image_node.name = "OmniBakeImage"
bake_image_node.location = output.location.copy()
bake_image_node.location.x += 200.0
bake_image_node.select = True
tree.nodes.active = bake_image_node
## for special cases
bake_emission = self._get_bake_emission_target(tree)
original_link = output.inputs["Surface"].links[0]
original_from, original_to = original_link.from_socket, original_link.to_socket
baked_images = {}
for bake_type in bake_types:
image_name = bake_image_names[image_index]
image = bpy.data.images[image_name]
bake_image_node.image = image.original if image.original else image
self.report({"INFO"}, f"====> Baking {material.name} pass {bake_type}...")
kwargs = {}
if bake_type in {"DIFFUSE"}:
## ensure no black due to bad direct / indirect lighting
kwargs["pass_filter"] = {"COLOR"}
scene.render.bake.use_pass_indirect = False
scene.render.bake.use_pass_direct = False
if bake_type in self.special_bake_types:
## cheat by running the bake through emit after reconnecting
real_bake_type = "EMIT"
tree.links.new(bake_emission.outputs["Emission"], original_to)
self._copy_connection(material, bsdf, bake_type, bake_emission.inputs["Color"])
else:
real_bake_type = bake_type
tree.links.new(original_from, original_to)
## have to do this every pass?
if bake_type in {"DIFFUSE", "EMIT"}:
image.colorspace_settings.name = "sRGB"
else:
image.colorspace_settings.name = "Non-Color"
bpy.ops.object.bake(type=real_bake_type, width=self.width, height=self.height, uv_layer=uv_layer,
use_clear=False, margin=1, **kwargs)
if self.merge_textures:
## I know this seems weird, but if you don't save the file here
## post-bake when merging, the texture gets corrupted and you end
## up with a texture that's taking up ram, but can't be loaded
## for rendering (comes up pink in Cycles)
image.save()
self.report({"INFO"}, "... Done.")
baked_images[bake_type] = image
total += 1
image_index += 1
wm.progress_update(total)
wm.update_tag()
for node in bake_image_node, bake_emission:
tree.nodes.remove(node)
tree.links.new(original_from, original_to)
baked_materials.append((material, baked_images))
for material, images in baked_materials:
## Perform conversion after all images are baked
## If this is not done, then errors can arise despite not
## replacing shader indices.
create_principled_setup(material, images)
for image in [bpy.data.images[x] for x in bake_image_names]:
image.pack()
## Set new UV map as active if it exists
if "OmniBake" in baked_ob.data.uv_layers:
baked_ob.data.uv_layers["OmniBake"].active_render = True
if self.hide_original:
mesh_object.hide_set(True)
wm.progress_end()
scene.render.engine = scene_engine
scene.render.bake.use_clear = scene_use_clear
return {"FINISHED"}
## ======================================================================
module_classes = [
OBJECT_OT_omni_bake_maps,
OmniBakerProperties,
]
def register():
for cls in module_classes:
bpy.utils.register_class(cls)
bpy.types.Scene.omni_bake = bpy.props.PointerProperty(type=OmniBakerProperties)
def unregister():
for cls in reversed(module_classes):
bpy.utils.unregister_class(cls)
try:
del bpy.types.Scene.omni_bake
except (AttributeError, RuntimeError):
pass
| 21,781 | Python | 30.659884 | 111 | 0.678573 |
NVIDIA-Omniverse/blender_omniverse_addons/omni_optimization_panel/__init__.py | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
bl_info = {
"name": "Omni Scene Optimization Panel",
"author": "Nvidia",
"description": "",
"blender": (3, 4, 0),
"version": (2, 0, 0),
"location": "View3D > Toolbar > Omniverse",
"warning": "",
"category": "Omniverse"
}
from . import (operators, ui)
def register():
operators.register()
ui.register()
def unregister():
operators.unregister()
ui.unregister()
| 1,274 | Python | 27.333333 | 74 | 0.678964 |
NVIDIA-Omniverse/blender_omniverse_addons/omni_optimization_panel/operators.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
import os
import subprocess
import time
from typing import *
from importlib import reload
import bpy
from bpy.props import (BoolProperty, EnumProperty, FloatProperty, IntProperty, StringProperty)
from bpy.types import (Context, Event, Object, Modifier, NodeTree, Scene)
from mathutils import Vector
from .properties import (OmniSceneOptChopPropertiesMixin, chopProperties)
## ======================================================================
symmetry_axis_items = [
("X", "X", "X"),
("Y", "Y", "Y"),
("Z", "Z", "Z")
]
generate_type_items = [
("CONVEX_HULL", "Convex Hull", "Convex Hull"),
("BOUNDING_BOX", "Bounding Box", "Bounding Box")
]
generate_name = "OmniSceneOptGenerate"
## ======================================================================
def selected_meshes(scene:Scene) -> List[Object]:
result = [x for x in scene.collection.all_objects if x.type == "MESH" and x.select_get()]
return result
def get_plural_count(items) -> (str, int):
count = len(items)
plural = '' if count == 1 else 's'
return plural, count
## ======================================================================
def preserve_selection(func, *args, **kwargs):
def wrapper(*args, **kwargs):
selection = [x.name for x in bpy.context.selected_objects]
active = bpy.context.active_object.name if bpy.context.active_object else None
result = func(*args, **kwargs)
scene_objects = bpy.context.scene.objects
to_select = [ scene_objects[x] for x in selection if x in scene_objects ]
if active:
active = scene_objects[active] if active in scene_objects else (to_select[-1] if len(to_select) else None)
bpy.ops.object.select_all(action="DESELECT")
for item in to_select:
item.select_set(True)
bpy.context.view_layer.objects.active = active
return result
return wrapper
## ======================================================================
class OmniSceneOptPropertiesMixin:
"""
Blender Properties that are shared between the in-scene preferences pointer
and the various operators.
"""
verbose: BoolProperty(name="Verbose",
description="Print information while running",
default=False)
selected: BoolProperty(name="Selected",
description="Run on Selected Objects (if False, run on whole Scene)",
default=False)
## export options
export_textures: BoolProperty(name="Export Textures",
description="Export textures when doing a background export",
default=True)
## these are deliberate copies from ui.OmniYes.Properties
validate: BoolProperty(name="Validate Meshes",
description="Attempt to remove invalid geometry",
default=True)
weld: BoolProperty(name="Weld Verts",
description="Weld loose vertices",
default=False)
weld_distance: FloatProperty(name="Weld Distance",
description="Distance threshold for welds",
default=0.0001,
min=0.00001,
step=0.00001)
unwrap: BoolProperty(name="Unwrap Mesh UVs",
description="Use the Smart Unwrap feature to add new UVs",
default=False)
unwrap_margin: FloatProperty(name="Margin",
description="Distance between UV islands",
default=0.00,
min=0.0,
step=0.01)
decimate: BoolProperty(name="Decimate",
description="Reduce polygon and vertex counts on meshes",
default=False)
decimate_ratio: IntProperty(name="Ratio",
subtype="PERCENTAGE",
description="Reduce face count to this percentage of original",
default=50,
min=10, max=100,
step=5)
decimate_use_symmetry: BoolProperty(name="Use Symmetry",
description="Decimate with Symmetry across an axis",
default=False)
decimate_symmetry_axis: EnumProperty(name="Symmetry Axis",
description="Axis for symmetry",
items=symmetry_axis_items,
default="X")
decimate_min_face_count: IntProperty(name="Minimum Face Count",
description="Do not decimate objects with less faces",
default=500,
min=100,
step=10)
decimate_remove_shape_keys: BoolProperty(name="Remove Shape Keys",
description="Remove shape keys to allow meshes with shapes to be decimated",
default=False)
chop: BoolProperty(name="Chop Meshes",
description="Physically divide meshes based on size and point count",
default=False)
generate: BoolProperty(name="Generate",
description="Generate convex hulls or bounding boxes",
default=False)
merge: BoolProperty(name="Merge Selected",
description="On Export, merge selected meshes into a single object",
default=False)
## ======================================================================
class OmniSceneOptGeneratePropertiesMixin:
generate_duplicate: BoolProperty(name="Create Duplicate",
description="Generate a new object instead of replacing the original",
default=True)
generate_type: EnumProperty(name="Generate Type",
description="Type of geometry to generate",
items=generate_type_items,
default="CONVEX_HULL")
## ======================================================================
"""
This is a weird one.
The decimate modifier was failing on multiple objects in order, but
wrapping it in an Operator seems to fix the issues with making sure
the correct things are selected in the Context.
"""
class OBJECT_OT_omni_sceneopt_decimate(bpy.types.Operator, OmniSceneOptPropertiesMixin):
"""Decimates the selected object using the Decimation modifier."""
bl_idname = "omni_sceneopt.decimate"
bl_label = "Omni Scene Optimization: Decimate"
bl_options = {"REGISTER", "UNDO"}
ratio: IntProperty(name="Ratio",
subtype="PERCENTAGE",
description="Reduce face count to this percentage of original",
default=50,
min=10, max=100,
step=5)
use_symmetry: BoolProperty(name="Use Symmetry",
description="Decimate with Symmetry across an axis",
default=True)
symmetry_axis: EnumProperty(name="Symmetry Axis",
description="Axis for symmetry",
items=symmetry_axis_items,
default="X")
min_face_count: IntProperty(name="Minimum Face Count",
description="Do not decimate objects with less faces",
default=500,
min=100,
step=10)
@classmethod
def poll(cls, context:Context) -> bool:
return bool(context.active_object)
def execute(self, context:Context) -> Set[str]:
from .batch import lod
result = lod.decimate_object(context.active_object,
ratio=self.ratio / 100.0,
use_symmetry=self.use_symmetry,
symmetry_axis=self.symmetry_axis,
min_face_count=self.min_face_count,
create_duplicate=False)
return {"FINISHED"}
## ======================================================================
class OmniOverrideMixin:
def set_active(self, ob:Object):
try:
bpy.context.view_layer.objects.active = ob
except RuntimeError as e:
print(f"-- unable to set active: {ob.name} ({e}")
def override(self, objects:List[Object], single=False):
assert isinstance(objects, (list, tuple)), "'objects' is expected to be a list or tuple"
assert len(objects), "'objects' cannot be empty"
## filter out objects not in current view layer
objects = list(filter(lambda x: x.name in bpy.context.view_layer.objects, objects))
if single:
objects = objects[0:1]
override = {
'active_object': objects[0],
'edit_object': None,
'editable_objects': objects,
'object': objects[0],
'objects_in_mode': [],
'objects_in_mode_unique_data': [],
'selectable_objects': objects,
'selected_editable_objects': objects,
'selected_objects': objects,
'visible_objects': objects,
}
self.set_active(objects[0])
return bpy.context.temp_override(**override)
def edit_override(self, objects:List[Object], single=False):
assert isinstance(objects, (list, tuple)), "'objects' is expected to be a list or tuple"
assert len(objects), "'objects' cannot be empty"
if single:
objects = objects[0:1]
override = {
'active_object': objects[0],
'edit_object': objects[0],
'editable_objects': objects,
'object': objects[0],
'objects_in_mode': objects,
'objects_in_mode_unique_data': objects,
'selectable_objects': objects,
'selected_editable_objects': objects,
'selected_objects': objects,
'visible_objects': objects,
}
self.set_active(objects[0])
return bpy.context.temp_override(**override)
## ======================================================================
class OBJECT_OT_omni_sceneopt_optimize(bpy.types.Operator,
OmniSceneOptPropertiesMixin,
OmniSceneOptChopPropertiesMixin,
OmniSceneOptGeneratePropertiesMixin,
OmniOverrideMixin):
"""Run specified optimizations on the scene or on selected objects."""
bl_idname = "omni_sceneopt.optimize"
bl_label = "Omni Scene Optimization: Optimize Scene"
bl_options = {"REGISTER", "UNDO"}
# def draw(self, context:Context):
# """Empty draw to disable the Operator Props Panel."""
# pass
def _object_mode(self):
if not bpy.context.mode == "OBJECT":
bpy.ops.object.mode_set(mode="OBJECT")
def _edit_mode(self):
if not bpy.context.mode == "EDIT_MESH":
bpy.ops.object.mode_set(mode="EDIT")
@staticmethod
def _remove_shape_keys(ob:Object):
assert ob.type == "MESH", "Cannot be run on non-Mesh Objects."
## Reversed because we want to remove Basis last, or we will end up
## with garbage baked in.
for key in reversed(ob.data.shape_keys.key_blocks):
ob.shape_key_remove(key)
@staticmethod
def _select_one(ob:Object):
bpy.ops.object.select_all(action="DESELECT")
ob.select_set(True)
bpy.context.view_layer.objects.active = ob
@staticmethod
def _select_objects(objects:List[Object]):
bpy.ops.object.select_all(action="DESELECT")
for item in objects:
item.select_set(True)
bpy.context.view_layer.objects.active = objects[-1]
@staticmethod
def _get_evaluated(objects:List[Object]) -> List[Object]:
deps = bpy.context.evaluated_depsgraph_get()
return [x.evaluated_get(deps).original for x in objects]
@staticmethod
def _total_vertex_count(target_objects:List[Object]):
deps = bpy.context.evaluated_depsgraph_get()
eval_objs = [x.evaluated_get(deps) for x in target_objects]
return sum([len(x.data.vertices) for x in eval_objs])
def do_validate(self, target_objects:List[Object]) -> List[Object]:
"""Expects to be run in Edit Mode with all meshes selected"""
total_orig = self._total_vertex_count(target_objects)
bpy.ops.mesh.select_all(action="SELECT")
bpy.ops.mesh.dissolve_degenerate()
total_result = self._total_vertex_count(target_objects)
if self.verbose:
plural, obj_count = get_plural_count(target_objects)
message = f"Validated {obj_count} object{plural}."
self.report({"INFO"}, message)
return target_objects
def do_weld(self, target_objects:List[Object]) -> List[Object]:
"""Expects to be run in Edit Mode with all meshes selected"""
bpy.ops.mesh.remove_doubles(threshold=self.weld_distance, use_unselected=True)
bpy.ops.mesh.normals_make_consistent(inside=False)
return target_objects
def do_unwrap(self, target_objects:List[Object]) -> List[Object]:
bpy.ops.object.select_all(action="DESELECT")
start = time.time()
for item in target_objects:
with self.edit_override([item]):
bpy.ops.object.mode_set(mode="EDIT")
bpy.ops.mesh.select_all(action="SELECT")
bpy.ops.uv.smart_project(island_margin=0.0)
bpy.ops.uv.select_all(action="SELECT")
# bpy.ops.uv.average_islands_scale()
# bpy.ops.uv.pack_islands(margin=self.unwrap_margin)
bpy.ops.object.mode_set(mode="OBJECT")
end = time.time()
if self.verbose:
plural, obj_count = get_plural_count(target_objects)
message = f"Unwrapped {obj_count} object{plural} ({end-start:.02f} seconds)."
self.report({"INFO"}, message)
return target_objects
def do_decimate(self, target_objects:List[Object]) -> List[Object]:
assert bpy.context.mode == "OBJECT", "Decimate must be run in object mode."
total_orig = self._total_vertex_count(target_objects)
total_result = 0
start = time.time()
for item in target_objects:
if item.data.shape_keys and len(item.data.shape_keys.key_blocks):
if not self.decimate_remove_shape_keys:
self.report({"WARNING"}, f"[ Decimate ] Skipping {item.name} because it has shape keys.")
continue
else:
self._remove_shape_keys(item)
if len(item.data.polygons) < self.decimate_min_face_count:
self.report({"INFO"}, f"{item.name} is under face count-- not decimating.")
continue
## We're going to use the decimate modifier
mod = item.modifiers.new("OmniLOD", type="DECIMATE")
mod.decimate_type = "COLLAPSE"
mod.ratio = self.decimate_ratio / 100.0
mod.use_collapse_triangulate = True
mod.use_symmetry = self.decimate_use_symmetry
mod.symmetry_axis = self.decimate_symmetry_axis
## we don't need a full context override here
self.set_active(item)
bpy.ops.object.modifier_apply(modifier=mod.name)
total_result += len(item.data.vertices)
end = time.time()
if self.verbose:
plural, obj_count = get_plural_count(target_objects)
message = f"Decimated {obj_count} object{plural}. Vertex count original {total_orig} to {total_result} ({end-start:.02f} seconds)."
self.report({"INFO"}, message)
return target_objects
def do_chop(self, target_objects:List[Object]):
"""
Assumes all objects are selected and that we are in Object mode
"""
assert bpy.context.mode == "OBJECT", "Chop must be run in object mode."
scene = bpy.context.scene
attributes = scene.omni_sceneopt_chop.attributes()
attributes["selected_only"] = self.selected
bpy.ops.omni_sceneopt.chop(**attributes)
return target_objects
def do_generate(self, target_objects:List[Object]):
with self.override(target_objects):
bpy.ops.omni_sceneopt.generate(generate_type=self.generate_type,
generate_duplicate=self.generate_duplicate)
return target_objects
def execute(self, context:Context) -> Set[str]:
start = time.time()
active = context.active_object
if self.selected:
targets = selected_meshes(context.scene)
else:
targets = [x for x in context.scene.collection.all_objects if x.type == "MESH"]
bpy.ops.object.select_all(action="DESELECT")
[ x.select_set(True) for x in targets ]
if active:
self.set_active(active)
if not len(targets):
self.info({"ERROR"}, "No targets specified.")
return {"CANCELLED"}
self._object_mode()
## Have to do vertex counts outside edit mode!
total_orig = self._total_vertex_count(targets)
if self.validate or self.weld:
with self.edit_override(targets):
bpy.ops.object.mode_set(mode="EDIT")
## We can run these two operations together because they don't collide
## or cause issues between each other.
if self.validate:
self.do_validate(targets)
if self.weld:
self.do_weld(targets)
## Unfortunately, the rest are object-by-object operations
self._object_mode()
total_result = self._total_vertex_count(targets)
if self.verbose and self.weld:
plural, obj_count = get_plural_count(targets)
message = f"Welded {obj_count} object{plural}. Vertex count original {total_orig} to {total_result}."
self.report({"INFO"}, message)
if self.unwrap:
self.do_unwrap(targets)
if self.decimate:
self.do_decimate(targets)
if self.chop:
self.do_chop(targets)
if self.generate:
self.do_generate(targets)
end = time.time()
if self.verbose:
self.report({"INFO"}, f"Optimization complete-- process took {end-start:.02f} seconds")
return {"FINISHED"}
## ======================================================================
class OBJECT_OT_omni_sceneopt_chop(bpy.types.Operator, OmniSceneOptChopPropertiesMixin):
"""Chop the specified object into a grid of smaller ones"""
bl_idname = "omni_sceneopt.chop"
bl_label = "Omni Scene Optimizer: Chop"
bl_options = {"REGISTER", "UNDO"}
# def draw(self, context:Context):
# """Empty draw to disable the Operator Props Panel."""
# pass
def execute(self, context:Context) -> Set[str]:
attributes = dict(
merge=self.merge,
cut_meshes=self.cut_meshes,
max_vertices=self.max_vertices,
min_box_size=self.min_box_size,
max_depth=self.max_depth,
print_updated_results=self.print_updated_results,
create_bounds=self.create_bounds,
selected_only=self.selected_only
)
from .scripts.chop import Chop
chopper = Chop()
chopper.execute(self.attributes())
return {"FINISHED"}
## ======================================================================
class OBJECT_OT_omni_sceneopt_generate(bpy.types.Operator, OmniSceneOptGeneratePropertiesMixin, OmniOverrideMixin):
"""Generate geometry based on selected objects. Currently supported: Bounding Box, Convex Hull"""
bl_idname = "omni_sceneopt.generate"
bl_label = "Omni Scene Optimizer: Generate"
bl_options = {"REGISTER", "UNDO"}
# def draw(self, context:Context):
# """Empty draw to disable the Operator Props Panel."""
# pass
def create_geometry_nodes_group(self, group:NodeTree):
"""Create or return the shared Generate node group."""
node_type = {
"CONVEX_HULL": "GeometryNodeConvexHull",
"BOUNDING_BOX": "GeometryNodeBoundBox",
}[self.generate_type]
geometry_input = group.nodes["Group Input"]
geometry_input.location = Vector((-1.5 * geometry_input.width, 0))
group_output = group.nodes["Group Output"]
group_output.location = Vector((1.5 * group_output.width, 0))
node = group.nodes.new(node_type)
node.name = "Processor"
group.links.new(geometry_input.outputs['Geometry'], node.inputs['Geometry'])
group.links.new(node.outputs[0], group_output.inputs['Geometry'])
return bpy.data.node_groups[generate_name]
def create_geometry_nodes_modifier(self, ob:Object) -> Modifier:
if generate_name in ob.modifiers:
ob.modifiers.remove(ob.modifiers[generate_name])
if generate_name in bpy.data.node_groups:
bpy.data.node_groups.remove(bpy.data.node_groups[generate_name])
mod = ob.modifiers.new(name=generate_name, type="NODES")
bpy.ops.node.new_geometry_node_group_assign()
mod.node_group.name = generate_name
self.create_geometry_nodes_group(mod.node_group)
return mod
def create_duplicate(self, ob:Object, token:str) -> Object:
from .batch import lod
duplicate = lod.duplicate_object(ob, token, weld=False)
return duplicate
@preserve_selection
def apply_modifiers(self, target_objects:List[Object]):
count = 0
for item in target_objects:
if self.generate_duplicate:
token = self.generate_type.rpartition("_")[-1]
duplicate = self.create_duplicate(item, token=token)
duplicate.parent = item.parent
duplicate.matrix_world = item.matrix_world.copy()
bpy.context.scene.collection.objects.unlink(duplicate)
for collection in item.users_collection:
collection.objects.link(duplicate)
item = duplicate
with self.override([item]):
mod = self.create_geometry_nodes_modifier(item)
bpy.context.view_layer.objects.active = item
item.select_set(True)
bpy.ops.object.modifier_apply(modifier=mod.name)
count += 1
def execute(self, context:Context) -> Set[str]:
changed = self.apply_modifiers(context.selected_objects)
if changed:
group = bpy.data.node_groups["OMNI_SCENEOPT_GENERATE"]
bpy.data.node_groups.remove(group)
return {"FINISHED"}
## ======================================================================
class OBJECT_OT_omni_progress(bpy.types.Operator):
bl_idname = "omni.progress"
bl_label = "Export Optimized USD"
bl_options = {"REGISTER", "UNDO"}
message: StringProperty(name="message",
description="Message to print upon completion.",
default="")
_timer = None
def modal(self, context:Context, event:Event) -> Set[str]:
if context.scene.omni_progress_active is False:
message = self.message.strip()
if len(message):
self.report({"INFO"}, message)
return {"FINISHED"}
context.area.tag_redraw()
context.window.cursor_set("WAIT")
return {"RUNNING_MODAL"}
def invoke(self, context:Context, event:Event) -> Set[str]:
context.scene.omni_progress_active = True
self._timer = context.window_manager.event_timer_add(0.1, window=context.window)
context.window_manager.modal_handler_add(self)
context.window.cursor_set("WAIT")
return {"RUNNING_MODAL"}
## ======================================================================
class OBJECT_OT_omni_sceneopt_export(bpy.types.Operator,
OmniSceneOptPropertiesMixin,
OmniSceneOptChopPropertiesMixin,
OmniSceneOptGeneratePropertiesMixin):
"""Runs specified optimizations on the scene before running a USD Export"""
bl_idname = "omni_sceneopt.export"
bl_label = "Export USD"
bl_options = {"REGISTER", "UNDO"}
filepath: StringProperty(subtype="FILE_PATH")
filter_glob: StringProperty(default="*.usd;*.usda;*.usdc", options={"HIDDEN"})
check_existing: BoolProperty(default=True, options={"HIDDEN"})
def draw(self, context:Context):
"""Empty draw to disable the Operator Props Panel."""
pass
def invoke(self, context:Context, event:Event) -> Set[str]:
if len(self.filepath.strip()) == 0:
self.filepath = "untitled.usdc"
context.window_manager.fileselect_add(self)
return {"RUNNING_MODAL"}
def execute(self, context:Context) -> Set[str]:
output_path = bpy.path.abspath(self.filepath)
script_path = os.sep.join((os.path.dirname(os.path.abspath(__file__)), "batch", "optimize_export.py"))
bpy.ops.omni.progress(message=f"Finished background write to {output_path}")
bpy.ops.wm.save_mainfile()
command = " ".join([
'"{}"'.format(bpy.app.binary_path),
"--background",
'"{}"'.format(bpy.data.filepath),
"--python",
'"{}"'.format(script_path),
"--",
'"{}"'.format(output_path)
])
print(command)
subprocess.check_output(command, shell=True)
context.scene.omni_progress_active = False
if self.verbose:
self.report({"INFO"}, f"Exported optimized scene to: {output_path}")
return {"FINISHED"}
## ======================================================================
classes = [
OBJECT_OT_omni_sceneopt_decimate,
OBJECT_OT_omni_sceneopt_chop,
OBJECT_OT_omni_sceneopt_generate,
OBJECT_OT_omni_sceneopt_optimize,
OBJECT_OT_omni_progress,
OBJECT_OT_omni_sceneopt_export,
chopProperties
]
def unregister():
try:
del bpy.types.Scene.omni_sceneopt_chop
except AttributeError:
pass
try:
del bpy.types.Scene.omni_progress_active
except AttributeError:
pass
for cls in reversed(classes):
try:
bpy.utils.unregister_class(cls)
except (ValueError, AttributeError, RuntimeError):
continue
def register():
for cls in classes:
bpy.utils.register_class(cls)
bpy.types.Scene.omni_sceneopt_chop = bpy.props.PointerProperty(type=chopProperties)
bpy.types.Scene.omni_progress_active = bpy.props.BoolProperty(default=False)
| 23,131 | Python | 30.687671 | 134 | 0.67001 |
NVIDIA-Omniverse/blender_omniverse_addons/omni_optimization_panel/panel.py | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
from bpy.types import Panel
from os.path import join, dirname
import bpy.utils.previews
#---------------Custom ICONs----------------------
def get_icons_directory():
icons_directory = join(dirname(__file__), "icons")
return icons_directory
class OPTIMIZE_PT_Panel(Panel):
bl_space_type = "VIEW_3D"
bl_region_type = "UI"
bl_label = "OPTIMIZE SCENE"
bl_category = "Omniverse"
#retrieve icons
icons = bpy.utils.previews.new()
icons_directory = get_icons_directory()
icons.load("OMNI", join(icons_directory, "ICON.png"), 'IMAGE')
icons.load("GEAR", join(icons_directory, "gear.png"), 'IMAGE')
def draw(self, context):
layout = self.layout
layout.label(text="Omniverse", icon_value=self.icons["OMNI"].icon_id)
optimizeOptions = context.scene.optimize_options
modifyOptions = context.scene.modify_options
uvOptions = context.scene.uv_options
chopOptions = context.scene.chop_options
# OPERATOR SETTINGS
box = layout.box()
col = box.column(align= True)
row = col.row(align=True)
row.scale_y = 1.5
row.operator("optimize.scene", text = "Optimize Scene", icon_value=self.icons["GEAR"].icon_id)
col.separator()
row2 = col.row(align=True)
row2.scale_y = 1.3
row2.prop(optimizeOptions, "operation", text="Operation")
col.separator()
col.prop(optimizeOptions, "print_attributes", expand= True)
box2 = layout.box()
box2.label(text= "OPERATION PROPERTIES:")
col2 = box2.column(align= True)
# MODIFY SETTINGS
if optimizeOptions.operation == 'modify':
row = col2.row(align= True)
row.prop(modifyOptions, "modifier", text="Modifier")
row2 = col2.row(align= True)
row3 = col2.row(align= True)
#DECIMATE
if modifyOptions.modifier == 'DECIMATE':
row2.prop(modifyOptions, "decimate_type", expand= True)
if modifyOptions.decimate_type == 'COLLAPSE':
row3.prop(modifyOptions, "ratio", expand= True)
elif modifyOptions.decimate_type == 'UNSUBDIV':
row3.prop(modifyOptions, "iterations", expand= True)
elif modifyOptions.decimate_type == 'DISSOLVE':
row3.prop(modifyOptions, "angle", expand= True)
#REMESH
elif modifyOptions.modifier == 'REMESH':
row2.prop(modifyOptions, "remesh_type", expand= True)
if modifyOptions.remesh_type == 'BLOCKS':
row3.prop(modifyOptions, "oDepth", expand= True)
if modifyOptions.remesh_type == 'SMOOTH':
row3.prop(modifyOptions, "oDepth", expand= True)
if modifyOptions.remesh_type == 'SHARP':
row3.prop(modifyOptions, "oDepth", expand= True)
if modifyOptions.remesh_type == 'VOXEL':
row3.prop(modifyOptions, "voxel_size", expand= True)
#NODES
elif modifyOptions.modifier == 'NODES':
row2.prop(modifyOptions, "geo_type")
if modifyOptions.geo_type == "GeometryNodeSubdivisionSurface":
row2.prop(modifyOptions, "geo_attribute", expand= True)
col2.prop(modifyOptions, "selected_only", expand= True)
col2.prop(modifyOptions, "apply_mod", expand= True)
box3 = col2.box()
col3 = box3.column(align=True)
col3.label(text="FIX MESH BEFORE MODIFY")
col3.prop(modifyOptions, "fix_bad_mesh", expand= True)
if modifyOptions.fix_bad_mesh:
col3.prop(modifyOptions, "dissolve_threshold", expand= True)
col3.prop(modifyOptions, "merge_vertex", expand= True)
if modifyOptions.merge_vertex:
col3.prop(modifyOptions, "merge_threshold", expand= True)
if modifyOptions.fix_bad_mesh or modifyOptions.merge_vertex:
col3.prop(modifyOptions, "remove_existing_sharp", expand= True)
col3.prop(modifyOptions, "fix_normals", expand= True)
if modifyOptions.fix_normals:
col3.prop(modifyOptions, "create_new_custom_normals", expand= True)
# use_modifier_stack= modifyOptions.use_modifier_stack,
# modifier_stack=[["DECIMATE", "COLLAPSE", 0.5]],
# FIX MESH SETTINGS
elif optimizeOptions.operation == 'fixMesh':
col2.prop(modifyOptions, "selected_only", expand= True)
col3 = col2.column(align=True)
col3.prop(modifyOptions, "fix_bad_mesh", expand= True)
if modifyOptions.fix_bad_mesh:
col3.prop(modifyOptions, "dissolve_threshold", expand= True)
col3.prop(modifyOptions, "merge_vertex", expand= True)
if modifyOptions.merge_vertex:
col3.prop(modifyOptions, "merge_threshold", expand= True)
if modifyOptions.fix_bad_mesh or modifyOptions.merge_vertex:
col3.prop(modifyOptions, "remove_existing_sharp", expand= True)
col3.prop(modifyOptions, "fix_normals", expand= True)
if modifyOptions.fix_normals:
col3.prop(modifyOptions, "create_new_custom_normals", expand= True)
# UV SETTINGS
elif optimizeOptions.operation == 'uv':
if uvOptions.unwrap_type == 'Smart':
col2.label(text= "SMART UV CAN BE SLOW", icon='ERROR')
else:
col2.label(text= "Unwrap Type")
col2.prop(uvOptions, "unwrap_type", expand= True)
col2.prop(uvOptions, "selected_only", expand= True)
col2.prop(uvOptions, "scale_to_bounds", expand= True)
col2.prop(uvOptions, "clip_to_bounds", expand= True)
col2.prop(uvOptions, "use_set_size", expand= True)
if uvOptions.use_set_size:
col2.prop(uvOptions, "set_size", expand= True)
col2.prop(uvOptions, "print_updated_results", expand= True)
# CHOP SETTINGS
elif optimizeOptions.operation == 'chop':
col2.prop(chopOptions, "selected_only", expand= True)
col2.prop(chopOptions, "cut_meshes", expand= True)
col2.prop(chopOptions, "max_vertices", expand= True)
col2.prop(chopOptions, "min_box_size", expand= True)
col2.prop(chopOptions, "max_depth", expand= True)
col2.prop(chopOptions, "merge", expand= True)
col2.prop(chopOptions, "create_bounds", expand= True)
col2.prop(chopOptions, "print_updated_results", expand= True) | 7,603 | Python | 44.532934 | 102 | 0.605682 |
NVIDIA-Omniverse/blender_omniverse_addons/omni_optimization_panel/properties.py | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
from typing import *
from bpy.props import *
import bpy
class optimizeProperties(bpy.types.PropertyGroup):
# PROPERTIES
operation: EnumProperty(
name="Operation",
items= [ ('modify', 'MODIFY', 'run modify'),
('fixMesh', 'FIX MESH', 'run fix Mesh'),
('uv', 'UV UNWRAP', "run uv"),
('chop', 'CHOP', 'run chop')],
description= "Choose the operation to run on the scene",
default = 'modify'
)
print_attributes: BoolProperty(
name ="Print Attributes",
description = "Print attributes used at the begging of operation",
default = False
)
class modProperties(bpy.types.PropertyGroup):
# PROPERTIES
selected_only: BoolProperty(
name ="Use Selected Only",
description = "Operate on selected objects only",
default = False
)
apply_mod: BoolProperty(
name ="Apply Modifier",
description = "Apply modifier after adding",
default = True
)
fix_bad_mesh: BoolProperty(
name ="Fix Bad Mesh",
description = "Remove zero area faces and zero length edges",
default = False
)
dissolve_threshold: FloatProperty(
name="Dissolve Threshold",
description = "Threshold value used with Fix Bad Mesh",
default=0.08,
min=0,
max=50
)
merge_vertex: BoolProperty(
name ="Merge Vertex",
description = "Merge vertices by distance",
default = False
)
merge_threshold: FloatProperty(
name="Merge Threshold",
description = "Distance value used with merge vertex",
default=0.01,
min=0,
max=50
)
remove_existing_sharp: BoolProperty(
name ="Remove Existing Sharp",
description = "Remove existing sharp edges from meshes. This helps sometimes after fixing bad meshes",
default = True
)
fix_normals: BoolProperty(
name ="Fix Normals",
description = "Remove existing custom split normals",
default = False
)
create_new_custom_normals: BoolProperty(
name ="Create New Custom Normals",
description = "Create new custom split normals",
default = False
)
# Some common modifier names for reference:'DECIMATE''REMESH''NODES''SUBSURF''SOLIDIFY''ARRAY''BEVEL'
modifier: EnumProperty(
name="Modifier",
items= [ ('DECIMATE', 'Decimate', 'decimate geometry'),
('REMESH', 'Remesh', 'remesh geometry'),
('NODES', 'Nodes', 'add geometry node mod'),
('FIX', 'Fix Mesh', "fix mesh")],
description= "Choose the modifier to apply to geometry",
default = 'DECIMATE'
)
# TODO: Implement this modifier stack properly. would allow for multiple modifiers to be queued and run at once
# use_modifier_stack: BoolProperty(
# name ="Use Modifier Stack",
# description = "use stack of modifiers instead of a single modifier",
# default = False
# )
# modifier_stack: CollectionProperty(
# type= optimizeProperties,
# name="Modifiers",
# description= "list of modifiers to be used",
# default = [["DECIMATE", "COLLAPSE", 0.5]]
# )
decimate_type: EnumProperty(
items= [ ('COLLAPSE','collapse',"collapse geometry"),
('UNSUBDIV','unSubdivide',"un subdivide geometry"),
('DISSOLVE','planar',"dissolve geometry")],
description = "Choose which type of decimation to perform.",
default = "COLLAPSE"
)
ratio: FloatProperty(
name="Ratio",
default=0.5,
min=0.0,
max=1.0
)
iterations: IntProperty(
name="Iterations",
default=2,
min=0,
max=50
)
angle: FloatProperty(
name="Angle",
default=15.0,
min=0.0,
max=180.0
)
remesh_type: EnumProperty(
items= [ ('BLOCKS','blocks',"collapse geometry"),
('SMOOTH','smooth',"un subdivide geometry"),
('SHARP','sharp',"un subdivide geometry"),
('VOXEL','voxel',"dissolve geometry")],
description = "Choose which type of remesh to perform.",
default = "VOXEL"
)
oDepth: IntProperty(
name="Octree Depth",
default=4,
min=1,
max=8
)
voxel_size: FloatProperty(
name="Voxel Size",
default=0.1,
min=0.01,
max=2.0
)
geo_type: EnumProperty(
items= [ ('GeometryNodeConvexHull','convex hull',"basic convex hull"),
('GeometryNodeBoundBox','bounding box',"basic bounding box"),
('GeometryNodeSubdivisionSurface','subdiv',"subdivide geometry")],
description = "Choose which type of geo node tree to add",
default = "GeometryNodeBoundBox"
)
geo_attribute: IntProperty(
name="Attribute",
description = "Additional attribute used for certain geo nodes",
default=2,
min=0,
max=8
)
class uvProperties(bpy.types.PropertyGroup):
# PROPERTIES
selected_only: BoolProperty(
name ="Use Selected Only",
description = "Operate on selected objects only",
default = False
)
unwrap_type: EnumProperty(
items= [ ('Cube','cube project',"basic convex hull"),
('Sphere','sphere project',"subdivide geometry"),
('Cylinder','cylinder project',"dissolve geometry"),
('Smart','smart project',"basic bounding box")],
description = "Choose which type of unwrap process to use.",
default = "Cube"
)
scale_to_bounds: BoolProperty(
name ="Scale To Bounds",
description = "Scale UVs to 2D bounds",
default = False
)
clip_to_bounds: BoolProperty(
name ="Clip To Bounds",
description = "Clip UVs to 2D bounds",
default = False
)
use_set_size: BoolProperty(
name ="Use Set Size",
description = "Use a defined UV size for all objects",
default = False
)
set_size : FloatProperty(
name="Set Size",
default=2.0,
min=0.01,
max=100.0
)
print_updated_results: BoolProperty(
name ="Print Updated Results",
description = "Print updated results to console",
default = True
)
class OmniSceneOptChopPropertiesMixin:
selected_only: BoolProperty(
name="Split Selected Only",
description="Operate on selected objects only",
default=False
)
print_updated_results: BoolProperty(
name="Print Updated Results",
description="Print updated results to console",
default=True
)
cut_meshes: BoolProperty(
name="Cut Meshes",
description="Cut meshes",
default=True
)
merge: BoolProperty(
name="Merge",
description="Merge split chunks after splitting is complete",
default=False
)
create_bounds: BoolProperty(
name="Create Boundary Objects",
description="Add generated boundary objects to scene",
default=False
)
max_depth: IntProperty(
name="Max Depth",
description="Maximum recursion depth",
default=8,
min=0,
max=32
)
max_vertices: IntProperty(
name="Max Vertices",
description="Maximum vertices allowed per block",
default=10000,
min=0,
max=1000000
)
min_box_size: FloatProperty(
name="Min Box Size",
description="Minimum dimension for a chunk to be created",
default=1,
min=0,
max=10000
)
def attributes(self) -> Dict:
return dict(
merge=self.merge,
cut_meshes=self.cut_meshes,
max_vertices=self.max_vertices,
min_box_size=self.min_box_size,
max_depth=self.max_depth,
print_updated_results=self.print_updated_results,
create_bounds=self.create_bounds,
selected_only=self.selected_only
)
def set_attributes(self, attributes:Dict):
for attr, value in attributes.items():
if hasattr(self, attr):
setattr(self, attr, value)
else:
raise ValueError(f"OmniSceneOptChopPropertiesMixin: invalid attribute for set {attr}")
class chopProperties(bpy.types.PropertyGroup, OmniSceneOptChopPropertiesMixin):
pass
| 9,344 | Python | 27.842593 | 115 | 0.601241 |
NVIDIA-Omniverse/blender_omniverse_addons/omni_optimization_panel/ui.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
import os
from typing import *
import bpy
from bpy.utils import previews
from bpy.props import (BoolProperty, EnumProperty, FloatProperty, IntProperty, StringProperty)
from bpy.types import (Context, Object, Operator, Scene)
from .operators import (
OBJECT_OT_omni_sceneopt_optimize,
OBJECT_OT_omni_sceneopt_export,
OmniSceneOptPropertiesMixin,
OmniSceneOptGeneratePropertiesMixin,
selected_meshes,
symmetry_axis_items
)
## ======================================================================
def preload_icons() -> previews.ImagePreviewCollection:
"""Preload icons used by the interface."""
icons_directory = os.path.join(os.path.dirname(os.path.abspath(__file__)), "icons")
all_icons = {
"GEAR": "gear.png",
"ICON": "ICON.png",
}
preview = previews.new()
for name, filepath in all_icons.items():
preview.load(name, os.path.join(icons_directory, filepath), "IMAGE")
return preview
## ======================================================================
class OmniSceneOptProperties(bpy.types.PropertyGroup,
OmniSceneOptPropertiesMixin,
OmniSceneOptGeneratePropertiesMixin):
"""We're only here to register the mixins through the PropertyGroup"""
pass
## ======================================================================
def can_run_optimization(scene:Scene) -> bool:
if scene.omni_sceneopt.selected and not len(selected_meshes(scene)):
return False
has_operations = any((
scene.omni_sceneopt.validate,
scene.omni_sceneopt.weld,
scene.omni_sceneopt.decimate,
scene.omni_sceneopt.unwrap,
scene.omni_sceneopt.chop,
scene.omni_sceneopt.generate,
))
if not has_operations:
return False
return True
## ======================================================================
class OBJECT_PT_OmniOptimizationPanel(bpy.types.Panel):
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_category = "Omniverse"
bl_label = "Scene Optimizer"
bl_options = {"DEFAULT_CLOSED"}
icons = preload_icons()
@staticmethod
def _apply_parameters(settings, op:Operator):
"""Copy parameters from the scene-level settings blob to an operator"""
invalid = {"bl_rna", "name", "rna_type"}
for property_name in filter(lambda x: not x[0] == '_' and not x in invalid, dir(settings)):
if hasattr(op, property_name):
value = getattr(settings, property_name)
setattr(op, property_name, value)
op.verbose = True
def draw_validate(self, layout, scene: Scene):
box = layout.box()
box.prop(scene.omni_sceneopt, "validate")
def draw_weld(self, layout, scene: Scene):
box = layout.box()
box.prop(scene.omni_sceneopt, "weld")
if not scene.omni_sceneopt.weld:
return
box.prop(scene.omni_sceneopt, "weld_distance")
def draw_decimate(self, layout, scene: Scene):
box = layout.box()
box.prop(scene.omni_sceneopt, "decimate")
if not scene.omni_sceneopt.decimate:
return
box.prop(scene.omni_sceneopt, "decimate_ratio")
box.prop(scene.omni_sceneopt, "decimate_min_face_count")
row = box.row()
row.prop(scene.omni_sceneopt, "decimate_use_symmetry")
row = row.row()
row.prop(scene.omni_sceneopt, "decimate_symmetry_axis", text="")
row.enabled = scene.omni_sceneopt.decimate_use_symmetry
box.prop(scene.omni_sceneopt, "decimate_remove_shape_keys")
def draw_unwrap(self, layout, scene: Scene):
box = layout.box()
box.prop(scene.omni_sceneopt, "unwrap")
if not scene.omni_sceneopt.unwrap:
return
box.prop(scene.omni_sceneopt, "unwrap_margin")
def draw_chop(self, layout, scene: Scene):
box = layout.box()
box.prop(scene.omni_sceneopt, "chop")
if not scene.omni_sceneopt.chop:
return
col = box.column(align=True)
col.prop(scene.omni_sceneopt_chop, "max_vertices")
col.prop(scene.omni_sceneopt_chop, "min_box_size")
col.prop(scene.omni_sceneopt_chop, "max_depth")
box.prop(scene.omni_sceneopt_chop, "create_bounds")
def draw_generate(self, layout, scene: Scene):
box = layout.box()
box.prop(scene.omni_sceneopt, "generate", text="Generate Bounding Mesh")
if not scene.omni_sceneopt.generate:
return
col = box.column(align=True)
col.prop(scene.omni_sceneopt, "generate_type")
col.prop(scene.omni_sceneopt, "generate_duplicate")
def draw_operators(self, layout, context:Context, scene:Scene):
layout.label(text="")
row = layout.row(align=True)
row.label(text="Run Operations", icon="PLAY")
row.prop(scene.omni_sceneopt, "selected", text="Selected Meshes Only")
run_text = f"{'Selected' if scene.omni_sceneopt.selected else 'Scene'}"
col = layout.column(align=True)
op = col.operator(OBJECT_OT_omni_sceneopt_optimize.bl_idname,
text=f"Optimize {run_text}",
icon_value=self.icons["GEAR"].icon_id)
self._apply_parameters(scene.omni_sceneopt, op)
col.enabled = can_run_optimization(scene)
col = layout.column(align=True)
op = col.operator(OBJECT_OT_omni_sceneopt_export.bl_idname,
text=f"Export Optimized Scene to USD",
icon='EXPORT')
self._apply_parameters(scene.omni_sceneopt, op)
col.label(text="Export Options")
row = col.row(align=True)
row.prop(scene.omni_sceneopt, "merge")
row.prop(scene.omni_sceneopt, "export_textures")
def draw(self, context:Context):
scene = context.scene
layout = self.layout
self.draw_validate(layout, scene=scene)
self.draw_weld(layout, scene=scene)
self.draw_unwrap(layout, scene=scene)
self.draw_decimate(layout, scene=scene)
self.draw_chop(layout, scene=scene)
self.draw_generate(layout, scene=scene)
self.draw_operators(layout, context, scene=scene)
## ======================================================================
classes = [
OBJECT_PT_OmniOptimizationPanel,
OmniSceneOptProperties,
]
def unregister():
try:
del bpy.types.Scene.omni_sceneopt
except (ValueError, AttributeError, RuntimeError):
pass
for cls in reversed(classes):
try:
bpy.utils.unregister_class(cls)
except (ValueError, AttributeError, RuntimeError):
continue
def register():
for cls in classes:
bpy.utils.register_class(cls)
bpy.types.Scene.omni_sceneopt = bpy.props.PointerProperty(type=OmniSceneOptProperties)
| 6,169 | Python | 27.302752 | 94 | 0.677906 |
NVIDIA-Omniverse/blender_omniverse_addons/omni_optimization_panel/batch/lod.py | import argparse
import os
import sys
from typing import *
import bpy
from bpy.types import (Collection, Context, Image, Object, Material,
Mesh, Node, NodeSocket, NodeTree, Scene)
from bpy.props import *
from mathutils import *
## ======================================================================
def select_only(ob:Object):
"""
Ensure that only the specified object is selected.
:param ob: Object to select
"""
bpy.ops.object.select_all(action="DESELECT")
ob.select_set(state=True)
bpy.context.view_layer.objects.active = ob
## --------------------------------------------------------------------------------
def _selected_meshes(context:Context, use_instancing=True) -> List[Mesh]:
"""
:return: List[Mesh] of all selected mesh objects in active Blender Scene.
"""
## instances support
meshes = [x for x in context.selected_objects if x.type == "MESH"]
instances = [x for x in context.selected_objects if x.type == "EMPTY" and x.instance_collection]
if use_instancing:
for inst in instances:
instance_meshes = [x for x in inst.instance_collection.all_objects if x.type == "MESH"]
meshes += instance_meshes
meshes = list(set(meshes))
return meshes
## --------------------------------------------------------------------------------
def copy_object_parenting(source_ob:Object, target_ob:Object):
"""
Copy parenting and Collection membership from a source object.
"""
target_collections = list(target_ob.users_collection)
for collection in target_collections:
collection.objects.unlink(target_ob)
for collection in source_ob.users_collection:
collection.objects.link(target_ob)
target_ob.parent = source_ob.parent
## --------------------------------------------------------------------------------
def find_unique_name(name:str, library:Iterable) -> str:
"""
Given a Blender library, find a unique name that does
not exist in it.
"""
if not name in library:
return name
index = 0
result_name = name + f".{index:03d}"
while result_name in library:
index += 1
result_name = name + f".{index:03d}"
print(f"Unique Name: {result_name}")
return result_name
## --------------------------------------------------------------------------------
def duplicate_object(ob:Object, token:str="D", weld=True) -> Object:
"""
Duplicates the specified object, maintaining the same parenting
and collection memberships.
"""
base_name = "__".join((ob.name.rpartition("__")[0] if "__" in ob.name else ob.name, token))
base_data = "__".join((ob.data.name.rpartition("__")[0] if "__" in ob.data.name else ob.data.name, token))
if base_name in bpy.data.objects:
base_name = find_unique_name(base_name, bpy.data.objects)
if base_data in bpy.data.objects:
base_data = find_unique_name(base_data, bpy.data.objects)
data = ob.data.copy()
data.name = base_data
duplicate = bpy.data.objects.new(base_name, data)
## Ensure scene collection membership
## Prototypes might not have this or be in the view layer
if not duplicate.name in bpy.context.scene.collection.all_objects:
bpy.context.scene.collection.objects.link(duplicate)
select_only(duplicate)
## decimate doesn't work on unwelded triangle soups
if weld:
bpy.ops.object.mode_set(mode="EDIT")
bpy.ops.mesh.select_all(action="SELECT")
bpy.ops.mesh.remove_doubles(threshold=0.01, use_unselected=True)
bpy.ops.object.mode_set(mode="OBJECT")
return duplicate
## --------------------------------------------------------------------------------
def delete_mesh_object(ob:Object):
"""
Removes object from the Blender library.
"""
base_name = ob.name
data_name = ob.data.name
bpy.data.objects.remove(bpy.data.objects[base_name])
bpy.data.meshes.remove(bpy.data.meshes[data_name])
## --------------------------------------------------------------------------------
def decimate_object(ob:Object, token:str=None, ratio:float=0.5,
use_symmetry:bool=False, symmetry_axis="X",
min_face_count:int=3,
create_duplicate=True):
old_mode = bpy.context.mode
scene = bpy.context.scene
token = token or "DCM"
if create_duplicate:
target = duplicate_object(ob, token=token)
else:
target = ob
if len(target.data.polygons) < min_face_count:
print(f"{target.name} is under face count-- not decimating.")
return target
## We're going to use the decimate modifier
mod = target.modifiers.new("OmniLOD", type="DECIMATE")
mod.decimate_type = "COLLAPSE"
mod.ratio = ratio
mod.use_collapse_triangulate = True
mod.use_symmetry = use_symmetry
mod.symmetry_axis = symmetry_axis
bpy.ops.object.select_all(action="DESELECT")
target.select_set(True)
bpy.context.view_layer.objects.active = target
bpy.ops.object.modifier_apply(modifier=mod.name)
return target
## --------------------------------------------------------------------------------
def decimate_selected(ratios:List[float]=[0.5], min_face_count=3, use_symmetry:bool=False, symmetry_axis="X", use_instancing=True):
assert isinstance(ratios, (list, tuple)), "Ratio should be a list of floats from 0.1 to 1.0"
for value in ratios:
assert 0.1 <= value <= 1.0, f"Invalid ratio value {value} -- should be between 0.1 and 1.0"
selected_objects = list(bpy.context.selected_objects)
active = bpy.context.view_layer.objects.active
selected_meshes = _selected_meshes(bpy.context, use_instancing=use_instancing)
total = len(selected_meshes) * len(ratios)
count = 1
print(f"\n\n[ Generating {total} decimated LOD meshes (minimum face count: {min_face_count}]")
for mesh in selected_meshes:
welded_duplicate = duplicate_object(mesh, token="welded")
for index, ratio in enumerate(ratios):
padd = len(str(total)) - len(str(count))
token = f"LOD{index}"
orig_count = len(welded_duplicate.data.vertices)
lod_duplicate = decimate_object(welded_duplicate, ratio=ratio, token=token, use_symmetry=use_symmetry,
symmetry_axis=symmetry_axis, min_face_count=min_face_count)
print(f"[{'0'*padd}{count}/{total}] Decimating {mesh.name} to {ratio} ({orig_count} >> {len(lod_duplicate.data.vertices)}) ...")
copy_object_parenting(mesh, lod_duplicate)
count += 1
delete_mesh_object(welded_duplicate)
print(f"\n[ Decimation complete ]\n\n")
## --------------------------------------------------------------------------------
def import_usd_file(filepath:str, root_prim:Optional[str]=None, visible_only:bool=False, use_instancing:bool=True):
all_objects = bpy.context.scene.collection.all_objects
names = [x.name for x in all_objects]
try:
bpy.ops.object.mode_set(mode="OBJECT")
except RuntimeError:
pass
for name in names:
ob = bpy.data.objects[name]
bpy.data.objects.remove(ob)
kwargs = {
"filepath":filepath,
"import_cameras": False,
"import_curves": False,
"import_lights": False,
"import_materials": True,
"import_blendshapes": False,
"import_volumes": False,
"import_skeletons": False,
"import_shapes": False,
"import_instance_proxies": True,
"import_visible_only": visible_only,
"read_mesh_uvs": True,
"read_mesh_colors": False,
"use_instancing": use_instancing,
"validate_meshes": True,
}
if root_prim:
## if you end with a slash it fails
kwargs["prim_path_mask"] = root_prim[:-1] if root_prim.endswith("/") else root_prim
bpy.ops.wm.usd_import(**kwargs)
print(f"Imported USD file: {filepath}")
## --------------------------------------------------------------------------------
def export_usd_file(filepath:str, use_instancing:bool=True):
kwargs = {
"filepath":filepath,
"visible_objects_only": False,
"default_prim_path": "/World",
"root_prim_path": "/World",
"generate_preview_surface": True,
"export_materials": True,
"export_uvmaps": True,
"merge_transform_and_shape": True,
"use_instancing": use_instancing,
}
bpy.ops.wm.usd_export(**kwargs)
print(f"Wrote USD file with UVs: {filepath}")
## ======================================================================
if __name__ == "__main__":
real_args = sys.argv[sys.argv.index("--") + 1:] if "--" in sys.argv else []
parser = argparse.ArgumentParser()
parser.add_argument('--input', type=str, required=True, help="Path to input USD file")
parser.add_argument('--output', type=str, help="Path to output USD file (default is input_LOD.usd)")
parser.add_argument('--ratios', type=str, required=True, help='Ratios to use as a space-separated string, ex: "0.5 0.2"')
parser.add_argument('--use_symmetry', action="store_true", default=False, help="Decimate with symmetry enabled.")
parser.add_argument('--symmetry_axis', default="X", help="Symmetry axis to use (X, Y, or Z)")
parser.add_argument('--visible_only', action="store_true", default=False, help="Only import visible prims from the input USD file.")
parser.add_argument('--min_face_count', type=int, default=3, help="Minimum number of faces for decimation.")
parser.add_argument('--no_instancing', action="store_false", help="Process the prototype meshes of instanced prims.")
parser.add_argument('--root_prim', type=str, default=None,
help="Root Prim to import. If unspecified, the whole file will be imported.")
if not len(real_args):
parser.print_help()
sys.exit(1)
args = parser.parse_args(real_args)
input_file = os.path.abspath(args.input)
split = input_file.rpartition(".")
output_path = args.output or (split[0] + "_LOD." + split[-1])
ratios = args.ratios
if not " " in ratios:
ratios = [float(ratios)]
else:
ratios = list(map(lambda x: float(x), ratios.split(" ")))
use_instancing = not args.no_instancing
import_usd_file(input_file, root_prim=args.root_prim, visible_only=args.visible_only, use_instancing=use_instancing)
bpy.ops.object.select_all(action="SELECT")
decimate_selected(ratios=ratios, min_face_count=args.min_face_count, use_symmetry=args.use_symmetry, symmetry_axis=args.symmetry_axis, use_instancing=use_instancing)
export_usd_file(output_path, use_instancing=use_instancing)
sys.exit(0)
| 9,912 | Python | 32.94863 | 166 | 0.64659 |
NVIDIA-Omniverse/blender_omniverse_addons/omni_optimization_panel/batch/optimize_export.py | import os
import sys
import time
import bpy
from omni_optimization_panel.operators import OmniOverrideMixin
omniover = OmniOverrideMixin()
## ======================================================================
def perform_scene_merge():
"""
Combine all selected mesh objects into a single mesh.
"""
orig_scene = bpy.context.scene
selected = [x for x in bpy.context.selected_objects if x.type == "MESH"]
if not len(selected):
print("-- No objects selected for merge.")
return
merge_collection = bpy.data.collections.new("MergeCollection") if not "MergeCollection" in bpy.data.collections else bpy.data.collections["MergeCollection"]
merge_scene = bpy.data.scenes.new("MergeScene") if not "MergeScene" in bpy.data.scenes else bpy.data.scenes["MergeScene"]
for child in merge_scene.collection.children:
merge_scene.collection.children.unlink(child)
for ob in merge_collection.all_objects:
merge_collection.objects.unlink(ob)
to_merge = set()
sources = set()
for item in selected:
to_merge.add(item)
merge_collection.objects.link(item)
if not item.instance_type == "NONE":
item.show_instancer_for_render = True
child_set = set(item.children)
to_merge |= child_set
sources |= child_set
merge_scene.collection.children.link(merge_collection)
bpy.context.window.scene = merge_scene
for item in to_merge:
try:
merge_collection.objects.link(item)
except RuntimeError:
continue
## make sure to remove shape keys and merge modifiers for all merge_collection objects
for item in merge_collection.all_objects:
with omniover.override([item], single=True):
if item.data.shape_keys:
bpy.ops.object.shape_key_remove(all=True, apply_mix=True)
for mod in item.modifiers:
bpy.ops.object.modifier_apply(modifier=mod.name, single_user=True)
## turns out the make_duplis_real function swaps selection for you, and
## leaves non-dupli objects selected
bpy.ops.object.select_all(action="SELECT")
bpy.ops.object.duplicates_make_real()
## this invert and delete is removing the old instancer objects
bpy.ops.object.select_all(action="INVERT")
for item in sources:
item.select_set(True)
bpy.ops.object.delete(use_global=False)
bpy.ops.object.select_all(action="SELECT")
## need an active object for join poll()
bpy.context.view_layer.objects.active = bpy.context.selected_objects[0]
bpy.ops.object.join()
## ======================================================================
if __name__ == "__main__":
real_args = sys.argv[sys.argv.index("--") + 1:] if "--" in sys.argv else []
if not len(real_args):
print("-- No output path name.")
sys.exit(-1)
output_file = real_args[-1]
## make sure the add-on is properly loaded
bpy.ops.preferences.addon_enable(module="omni_optimization_panel")
start_time = time.time()
## pull all attribute names from all mixins for passing on to the optimizer
sceneopts = bpy.context.scene.omni_sceneopt
chopopts = bpy.context.scene.omni_sceneopt_chop
skips = {"bl_rna", "name", "rna_type"}
optimize_kwargs = {}
for item in sceneopts, chopopts:
for key in filter(lambda x: not x.startswith("__") and not x in skips, dir(item)):
optimize_kwargs[key] = getattr(item, key)
print(f"optimize kwargs: {optimize_kwargs}")
if sceneopts.merge:
## merge before because of the possibility of objects getting created
perform_scene_merge()
bpy.ops.wm.save_as_mainfile(filepath=output_file.rpartition(".")[0]+".blend")
## always export whole scene
optimize_kwargs["selected"] = False
optimize_kwargs["verbose"] = True
bpy.ops.omni_sceneopt.optimize(**optimize_kwargs)
optimize_time = time.time()
print(f"Optimization time: {(optimize_time - start_time):.2f} seconds.")
export_kwargs = {
"filepath": output_file,
"visible_objects_only": False,
"default_prim_path": "/World",
"root_prim_path": "/World",
"material_prim_path": "/World/materials",
"generate_preview_surface": True,
"export_materials": True,
"export_uvmaps": True,
"merge_transform_and_shape": True,
"use_instancing": True,
"export_textures": sceneopts.export_textures,
}
bpy.ops.wm.usd_export(**export_kwargs)
export_time = time.time()
print(f"Wrote optimized USD file: {output_file}")
print(f"Export time: {(export_time - optimize_time):.2f} seconds.")
print(f"Total time: {(export_time - start_time):.2f} seconds.")
sys.exit(0)
| 4,378 | Python | 30.278571 | 157 | 0.693011 |
NVIDIA-Omniverse/blender_omniverse_addons/omni_optimization_panel/batch/uv.py | import argparse
import os
import sys
from typing import *
import bpy
from bpy.types import (Collection, Context, Image, Object, Material,
Mesh, Node, NodeSocket, NodeTree, Scene)
from bpy.props import *
from mathutils import *
## ======================================================================
OMNI_MATERIAL_NAME = "OmniUVTestMaterial"
## ======================================================================
def select_only(ob:Object):
"""
Ensure that only the specified object is selected.
:param ob: Object to select
"""
bpy.ops.object.select_all(action="DESELECT")
ob.select_set(state=True)
bpy.context.view_layer.objects.active = ob
## --------------------------------------------------------------------------------
def _selected_meshes(context:Context) -> List[Mesh]:
"""
:return: List[Mesh] of all selected mesh objects in active Blender Scene.
"""
return [x for x in context.selected_objects if x.type == "MESH"]
## --------------------------------------------------------------------------------
def get_test_material() -> Material:
image_name = "OmniUVGrid"
if not image_name in bpy.data.images:
bpy.ops.image.new(generated_type="COLOR_GRID", width=4096, height=4096, name=image_name, alpha=False)
if not OMNI_MATERIAL_NAME in bpy.data.materials:
image = bpy.data.images[image_name]
material = bpy.data.materials.new(name=OMNI_MATERIAL_NAME)
## this creates the new graph
material.use_nodes = True
tree = material.node_tree
shader = tree.nodes['Principled BSDF']
im_node = tree.nodes.new("ShaderNodeTexImage")
im_node.location = [-300, 300]
tree.links.new(im_node.outputs['Color'], shader.inputs['Base Color'])
im_node.image = image
return bpy.data.materials[OMNI_MATERIAL_NAME]
## --------------------------------------------------------------------------------
def apply_test_material(ob:Object):
##!TODO: Generate it
select_only(ob)
while len(ob.material_slots):
bpy.ops.object.material_slot_remove()
material = get_test_material()
bpy.ops.object.material_slot_add()
ob.material_slots[0].material = material
## --------------------------------------------------------------------------------
def unwrap_object(ob:Object, uv_layer_name="OmniUV", apply_material=False, margin=0.0):
"""
Unwraps the target object by creating a fixed duplicate and copying the UVs over
to the original.
"""
old_mode = bpy.context.mode
scene = bpy.context.scene
if not old_mode == "OBJECT":
bpy.ops.object.mode_set(mode="OBJECT")
select_only(ob)
uv_layers = list(ob.data.uv_layers)
for layer in uv_layers:
ob.data.uv_layers.remove(layer)
bpy.ops.object.mode_set(mode="EDIT")
bpy.ops.mesh.select_all(action='SELECT')
bpy.ops.uv.cube_project()
bpy.ops.object.mode_set(mode="OBJECT")
duplicate = ob.copy()
duplicate.data = ob.data.copy()
scene.collection.objects.link(duplicate)
## if the two objects are sitting on each other it gets silly,
## so move the dupe over by double it's Y bounds size
bound_size = Vector(duplicate.bound_box[0]) - Vector(duplicate.bound_box[-1])
duplicate.location.y += bound_size.y
select_only(duplicate)
bpy.ops.object.mode_set(mode="EDIT")
bpy.ops.mesh.select_all(action='SELECT')
bpy.ops.mesh.remove_doubles(threshold=0.01, use_unselected=True)
bpy.ops.mesh.normals_make_consistent(inside=True)
bpy.ops.object.mode_set(mode="OBJECT")
bpy.ops.object.mode_set(mode="EDIT")
bpy.ops.uv.select_all(action='SELECT')
bpy.ops.uv.smart_project(island_margin=margin)
bpy.ops.uv.average_islands_scale()
bpy.ops.uv.pack_islands(margin=0)
bpy.ops.object.mode_set(mode="OBJECT")
## copies from ACTIVE to all other SELECTED
select_only(ob)
## This is incredibly broken
# bpy.ops.object.data_transfer(data_type="UV")
## snap back now that good UVs exist; the two meshes need to be in the same
## position in space for the modifier to behave correctly.
duplicate.matrix_world = ob.matrix_world.copy()
modifier = ob.modifiers.new(type="DATA_TRANSFER", name="OmniBake_Transfer")
modifier.object = duplicate
modifier.use_loop_data = True
modifier.data_types_loops = {'UV'}
modifier.loop_mapping = 'NEAREST_NORMAL'
select_only(ob)
bpy.ops.object.modifier_apply(modifier=modifier.name)
if apply_material:
apply_test_material(ob)
bpy.data.objects.remove(duplicate)
## --------------------------------------------------------------------------------
def unwrap_selected(uv_layer_name="OmniUV", apply_material=False, margin=0.0):
old_mode = bpy.context.mode
selected_objects = list(bpy.context.selected_objects)
active = bpy.context.view_layer.objects.active
selected_meshes = _selected_meshes(bpy.context)
total = len(selected_meshes)
count = 1
print(f"\n\n[ Unwrapping {total} meshes ]")
for mesh in selected_meshes:
padd = len(str(total)) - len(str(count))
print(f"[{'0'*padd}{count}/{total}] Unwrapping {mesh.name}...")
unwrap_object(mesh, uv_layer_name=uv_layer_name, apply_material=apply_test_material)
count += 1
print(f"\n[ Unwrapping complete ]\n\n")
select_only(selected_objects[0])
for item in selected_objects[1:]:
item.select_set(True)
bpy.context.view_layer.objects.active = active
if old_mode == "EDIT_MESH":
bpy.ops.object.mode_set(mode="EDIT")
## --------------------------------------------------------------------------------
def import_usd_file(filepath:str, root_prim=None, visible_only=False):
all_objects = bpy.context.scene.collection.all_objects
names = [x.name for x in all_objects]
try:
bpy.ops.object.mode_set(mode="OBJECT")
except RuntimeError:
pass
for name in names:
ob = bpy.data.objects[name]
bpy.data.objects.remove(ob)
kwargs = {
"filepath":filepath,
"import_cameras": False,
"import_curves": False,
"import_lights": False,
"import_materials": False,
"import_blendshapes": False,
"import_volumes": False,
"import_skeletons": False,
"import_shapes": False,
"import_instance_proxies": True,
"import_visible_only": visible_only,
"read_mesh_uvs": False,
"read_mesh_colors": False,
}
if root_prim:
## if you end with a slash it fails
kwargs["prim_path_mask"] = root_prim[:-1] if root_prim.endswith("/") else root_prim
bpy.ops.wm.usd_import(**kwargs)
print(f"Imported USD file: {filepath}")
## --------------------------------------------------------------------------------
def export_usd_file(filepath:str):
kwargs = {
"filepath":filepath,
"visible_objects_only": False,
"default_prim_path": "/World",
"root_prim_path": "/World",
# "generate_preview_surface": False,
# "generate_mdl": False,
"merge_transform_and_shape": True,
}
bpy.ops.wm.usd_export(**kwargs)
print(f"Wrote USD file with UVs: {filepath}")
## ======================================================================
if __name__ == "__main__":
real_args = sys.argv[sys.argv.index("--") + 1:] if "--" in sys.argv else []
parser = argparse.ArgumentParser()
parser.add_argument('--input', type=str, required=True, help="Path to input USD file")
parser.add_argument('--output', type=str, help="Path to output USD file (default is input_UV.usd)")
parser.add_argument('--margin', type=float, default=None, help="Island margin (default is 0.01)")
parser.add_argument('--root_prim', type=str, default=None,
help="Root Prim to import. If unspecified, the whole file will be imported.")
parser.add_argument('--add_test_material', action="store_true")
parser.add_argument('--visible_only', action="store_true", default=False)
if not len(real_args):
parser.print_help()
sys.exit(1)
args = parser.parse_args(real_args)
input_file = os.path.abspath(args.input)
split = input_file.rpartition(".")
output_path = args.output or (split[0] + "_UV." + split[-1])
margin = args.margin or 0.0
import_usd_file(input_file, root_prim=args.root_prim, visible_only=args.visible_only)
bpy.ops.object.select_all(action="SELECT")
unwrap_selected(apply_material=args.add_test_material, margin=margin)
export_usd_file(output_path)
sys.exit(0)
| 8,005 | Python | 29.674329 | 103 | 0.639975 |
NVIDIA-Omniverse/blender_omniverse_addons/omni_optimization_panel/scripts/geo_nodes.py | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
import bpy
from mathutils import Vector
# the type of geometry node tree to create:
# geometry nodes is currently under development, so feature set is not yet at a stage to be fully utilized
# this puts in place a framework for more customizable and easily implementable optimizations in the future
# geometry nodes is a modifier, but unlike "DECIMATE" or "REMESH", geometry nodes can be customized with a wide array of options.
# similar to other modifiers, if there are multiple objects with the same geo node modifier, the calculations are done independently for each object.
# currently this setup can be used for generating convex hulls, creating bounding box meshes, and subdividing geometry.
# (GeometryNodeConvexHull, GeometryNodeBoundBox, GeometryNodeSubdivisionSurface)
# as the nodes options in blender expand, A lot more can be done wit it.
# more on geometry nodes: https://docs.blender.org/manual/en/latest/modeling/geometry_nodes/index.html#geometry-nodes
def new_GeometryNodes_group():
# create a new empty node group that can be used in a GeometryNodes modifier
# tree only contains a simple input/output node setup
# the input node gives a geometry, and the output node takes a geometry.
# nodes then have input and output SOCKET(S).
# this basic tree setup will accesses the output socket of the input node in order to connect it to the input socket of the output node
# in order to make these connections, physical links between index values of inputs and outputs need to be made
# this tree on its own will do nothing. In order to make changes to the geometry, more nodes must be inserted
node_group = bpy.data.node_groups.new('GeometryNodes', 'GeometryNodeTree') # this is the container for the nodes
inNode = node_group.nodes.new('NodeGroupInput') # this is the input node and gives the geometry to be modified.
inNode.outputs.new('NodeSocketGeometry', 'Geometry') # gets reference to the output socket on the input node
outNode = node_group.nodes.new('NodeGroupOutput') # this is the output node and returns the geometry that modified.
outNode.inputs.new('NodeSocketGeometry', 'Geometry') # gets reference to the input socket on the output node
node_group.links.new(inNode.outputs['Geometry'], outNode.inputs['Geometry']) # makes the link between the two nodes at the given sockets
inNode.location = Vector((-1.5*inNode.width, 0)) # sets the position of the node in 2d space so that they are readable in the GUI
outNode.location = Vector((1.5*outNode.width, 0))
return node_group # now that there is a basic node tree, additional nodes can be inserted into the tree to modify the geometry
def geoTreeBasic(geo_tree, nodes, group_in, group_out, geo_type, attribute):
# once the base geo tree has been created, we can insert additional pieces
# this includes: convex hull, bounding box, subdivide
new_node = nodes.new(geo_type) # create a new node of the specified type
# insert that node between the input and output node
geo_tree.links.new(group_in.outputs['Geometry'], new_node.inputs[0])
geo_tree.links.new(new_node.outputs[0], group_out.inputs['Geometry'])
if geo_type == 'GeometryNodeSubdivisionSurface': # subsurf node requires an additional input value
geo_tree.nodes["Subdivision Surface"].inputs[1].default_value = attribute
def geoNodes(objects, geo_type, attribute):
# TODO: When Geo Nodes develops further, hopefully all other modifier ops can be done through nodes
# (currently does not support decimate/remesh)
modifier = 'NODES'
# create empty tree - this tree is a container for nodes
geo_tree = new_GeometryNodes_group()
# add tree to all objects
for obj in objects: # for each object in selected objects, add the desired modifier and adjust its properties
mod = obj.modifiers.new(name = modifier, type=modifier) # set name of modifier based on its type
mod.node_group = geo_tree #bpy.data.node_groups[geo_tree.name]
# alter tree - once the default tree has been created, additional nodes can be added in
nodes = geo_tree.nodes
group_in = nodes.get('Group Input') # keep track of the input node
group_out = nodes.get('Group Output') # keep track of the output node
geoTreeBasic(geo_tree, nodes, group_in, group_out, geo_type, attribute) # adds node to make modifications to the geometry
| 5,272 | Python | 63.304877 | 149 | 0.744499 |
NVIDIA-Omniverse/blender_omniverse_addons/omni_optimization_panel/scripts/run_ops_wo_update.py | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
from bpy.ops import _BPyOpsSubModOp
view_layer_update = _BPyOpsSubModOp._view_layer_update
def open_update():
# blender operator calls update the scene each time after running
# updating the scene can take a long time, esp for large scenes. So we want to delay update until we are finished
# there is not an official way to suppress this update, so we need to use a workaround
def dummy_view_layer_update(context): # tricks blender into thinking the scene has been updated and instead passes
pass
_BPyOpsSubModOp._view_layer_update = dummy_view_layer_update
def close_update(): # in the end, still need to update scene, so this manually calls update
_BPyOpsSubModOp._view_layer_update = view_layer_update | 1,619 | Python | 42.783783 | 118 | 0.731316 |
NVIDIA-Omniverse/blender_omniverse_addons/omni_optimization_panel/scripts/chop.py | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
import bpy, bmesh
from mathutils import Vector
import time
from . import blender_class, run_ops_wo_update, select_mesh, bounds, utils, fix_mesh
class Chop(blender_class.BlenderClass):
# settings for GUI version only
bl_idname = "chop.scene"
bl_label = "Chop Scene"
bl_description = "Recursively split scene in half until reaches a desired threshold"
bl_options = {"REGISTER", "UNDO"}
print_results = True
def __init__(self):
self._default_attributes = dict(
merge= True, # optionally merge meshes in each split chunk after split recursion is complete
cut_meshes=True, # split all meshes intersecting each cut plane
# Cannot set this very low since split creates new triangles(if quads...)
max_vertices= 100000, # a vertex threshold value, that once a chunk is below, the splitting terminates
min_box_size= 1, # a size threshold that once a chunk is smaller than, the splitting terminates
max_depth= 16, # a recursion depth threshold that once is reached, the splitting terminates
print_updated_results= True, # print progress to console
create_bounds = False, # create new bounds objects for displaying the cut boundaries. Mostly useful for GUI
selected_only = False # uses only objects selected in scene. For GUI version only
)
def execute(self, in_attributes=None):
attributes = self.get_attributes(in_attributes)
context = bpy.context
Chop.print_results = attributes["print_updated_results"]
Stats.resetValues()
Stats.startTime = time.time()
then = Stats.startTime
# select objects
selected = select_mesh.setSelected(context, attributes["selected_only"], deselectAll = False)
if len(selected): # run only if there are selected mesh objects in the scene
self.split(context, selected, attributes) # starts the splitting process
now = time.time() # time after it finished
Stats.printTermination()
if attributes['merge']:
Stats.printMerge()
print("TIME FOR SPLIT: ", round(now-then, 3))
else:
utils.do_print_error("NO MESH OBJECTS")
return {'FINISHED'}
def getSplitPlane(self, obj_details): # the cut plane used in split. Aligned perpendicular to the longest dimension of the bounds
# find longest side
var = {obj_details.x.distance: "x", obj_details.y.distance: "y", obj_details.z.distance: "z"}
max_dim = var.get(max(var)) # get the axis name of maximum of the three dims
# adjust the plane normal depending on the axis with the largest dimension
if max_dim == "x":
normal = [1,0,0,0]
axis = "x"
elif max_dim == "y":
normal = [0,1,0,0]
axis = "y"
else:
normal = [0,0,1,0]
axis = "z"
# get data for sub-boxes
midPt = [obj_details.x.mid,obj_details.y.mid,obj_details.z.mid] # get center of bounds to be able to create the next set of bounds
return midPt, normal, axis
def getSplitBoxes(self, obj_details, attributes): # get the bounds for the two successive splits during recursion
# find longest side
var = {obj_details.x.distance: "x", obj_details.y.distance: "y", obj_details.z.distance: "z"}
mx = var.get(max(var)) # get the axis name of maximum of the three dims
mid_0 = [obj_details.x.max, obj_details.y.max, obj_details.z.max] # the longest axis value will be replaced with a mid point
high = mid_0.copy() # maximum value of bounds
mid_1 = [obj_details.x.min, obj_details.y.min, obj_details.z.min] # the longest axis value will be replaced with a mid point
low = mid_1.copy() # minimum value fo bounds
midPt = [obj_details.x.mid,obj_details.y.mid,obj_details.z.mid] # center point of previous bounds
# replace the mid point of new bounds depending on the axis with the largest dimension
if mx == "x":
mid_0[0] = midPt[0]
mid_1[0] = midPt[0]
elif mx == "y":
mid_0[1] = midPt[1]
mid_1[1] = midPt[1]
else:
mid_0[2] = midPt[2]
mid_1[2] = midPt[2]
# Create sub-bounds. These are the two halves of the previous bounds, split along the longest axis of the bounds
# only need two points to calculate bounds, uses the maximum/minimum value point (high/low) and the set mid point (mid_0/mid_1)
coords_1 = [high[:], mid_1[:]] # put the points in a list
box_0 = bounds.bounds(coords_1) # gather attributes of new bounds (max, min, mid, and dim of each axis)
coords_0 = [low[:], mid_0[:]] # put the points in a list
box_1 = bounds.bounds(coords_0) # gather attributes of new bounds (max, min, mid, and dim of each axis)
if attributes["create_bounds"]: # optionally create display objects for viewing bounds
bounds.boundsObj(coords_1)
bounds.boundsObj(coords_0)
return box_0, box_1
def boxTooSmall(self, obj_details, attributes): # returns whether bounds of current occurrences is too small
# find longest sides
dims = [obj_details.x.distance, obj_details.y.distance, obj_details.z.distance] # get the dimensions of each axis of the bounds
if max(dims) < attributes["min_box_size"]: # if the maximum of the three dims is less than the specified min_box_size
return True # continue recursion
return False # end recursion
def parentEmpty(self, part, children): # for parenting new created objects from split
parent_name = part.name # part is the original object that was split. keep track of its name
parent_col = part.users_collection[0] # track the collection of the part as well
parent_parent = part.parent # if the part object has an existing parent track that too
bpy.data.objects.remove(part, do_unlink=True) # now that that info is stored, part can be deleted and removed from the scene
# an empty will take the place of the original part
obj = bpy.data.objects.new(parent_name, None) # create an empty object that will inherit the name of part
parent_col.objects.link(obj) # connect this object to part's collection
obj.parent = parent_parent # make this empty the child of part's parent
for child in children: # make the newly created objects from the split operation children of the empty
child.parent = obj
def newObj(self, bm, parent): # create a new object for each half of a split
obj = parent.copy() # parent is the original mesh being split. this contains data such as material,
# so it is easiest to start with a copy of the object
obj.data = parent.data.copy() # need to copy the object mesh data separately
# TODO: obj.animation_data = sibling.animation_data.copy() # not sure if animation data should be copied. This would do that.
parent.users_collection[0].objects.link(obj)
# apply bmesh to new mesh
bm.to_mesh(obj.data) # Once the new object is formed, bmesh data created during the split process can be transferred to the new obj
bm.free() # always do this when finished with a bmesh
return obj
def checkIntersect(self, obj, axis, center): # for checking cut plane intersection while splitting
# intersection is checked by testing the objects bounds rather than each vertex individually
obj_details = bounds.bounds([obj.matrix_world @ Vector(v) for v in obj.bound_box])
tolerance = .01 # a tolerance value for intersection to prevent cutting a mesh that is in line with cut plane
# TODO: may need to have user control over this tolerance, or define it relative to total scene size.
# check for intersection depending on the direction of the cutting
# boolean is created for both sides of cut plane.
# rather than a single boolean checking for intersection, return if mesh is on one or both sides of cut plane.
if axis == "x":
intersect_0 = obj_details.x.max > center[0] + tolerance
intersect_1 = obj_details.x.min < center[0] - tolerance
elif axis == "y":
intersect_0 = obj_details.y.max > center[1] + tolerance
intersect_1 = obj_details.y.min < center[1] - tolerance
elif axis == "z":
intersect_0 = obj_details.z.max > center[2] + tolerance
intersect_1 = obj_details.z.min < center[2] - tolerance
return intersect_0, intersect_1
def doSplit(self, partsToSplit, planeOrigin, planeNormal, axis): # perform the actual split
# split separates the occurrences into two. those halves need to be stored in their own new lists
occurrences_0 = []
occurrences_1 = []
for part in partsToSplit: # iterate over occurrences
intersect_0, intersect_1 = self.checkIntersect(part, axis, planeOrigin) # only perform split if object intersects the cut plane.
if intersect_0 and intersect_1: # if mesh has vertices on both sides of cut plane
Stats.printPart(part) # print the part being processed
co = part.matrix_world.inverted() @ Vector(planeOrigin) # splitting takes place relative to object space not world space.
normDir = part.matrix_world.transposed() @ Vector(planeNormal) # need to adjust plane origin and normal for each object.
bmi = bmesh.new() # 'bmesh' in Blender is data type that contains the 'edit mesh' for an object
# It allows for much greater control over mesh properties and operations
bmi.from_mesh(part.data) # attach the mesh to the bmesh container so that changes can be made
bmo = bmi.copy() # must use two separate bmesh objects because two new occurrence lists are being written to
# bisect_plane is how to split a mesh using a plane. It can only save one side of the split result at a time, so it is done twice
# save inner mesh data
bmesh.ops.bisect_plane(bmi,
geom=bmi.verts[:]+bmi.edges[:]+bmi.faces[:], # the geometry to be split, which is the first bmesh just created
dist=0.0001, # a threshold value for the split to check vertex proximity to cut plane
# TODO: may need to have user control over this tolerance, or define it relative to total scene size.
plane_co=co, # the cut plane
plane_no=(normDir.x,normDir.y,normDir.z), # the plane normal direction
clear_inner=True, # remove the geometry on the positive side of the cut plane
clear_outer=False) # keep the geometry on the negative side of the cut plane
# save outer mesh data
bmesh.ops.bisect_plane(bmo,
geom=bmo.verts[:]+bmo.edges[:]+bmo.faces[:], # the geometry to be split, which is the second bmesh just created
dist=0.0001, # a threshold value for the split to check vertex proximity to cut plane
plane_co=co, # the cut plane
plane_no=(normDir.x,normDir.y,normDir.z), # the plane normal direction
clear_inner=False, # keep the geometry on the positive side of the cut plane
clear_outer=True) # remove the geometry on the negative side of the cut plane
# make the bmesh the object's mesh
# need to transfer the altered bmesh data back to the original mesh
children = [] # create a list that will contain the newly created split meshes
obj = self.newObj(bmi, part) # create a new mesh object to attach the inner bmesh data to
occurrences_0.append(obj) # add new object to inner occurrence list
children.append(obj) # add new object to children list
obj2 = self.newObj(bmo, part) # create a new mesh object to attach the outer bmesh data to
occurrences_1.append(obj2) # add new object to outer occurrence list
children.append(obj2) # add new object to children list
self.parentEmpty(part, children) # use children list to fix object parents
if Chop.print_results:
utils.printClearLine() # clear last printed line before continuing
# if there are vertices on only one side of the cut plane there is nothing to split so place the existing mesh into the appropriate list
elif intersect_0:
occurrences_0.append(part) # add object to inner occurrence list
part.select_set(False) # deselect object
else:
occurrences_1.append(part )# add object to outer occurrence list
part.select_set(False) # deselect object
# bisect_plane can create empty objects, or zero vert count meshes. remove those objects before continuing
occurrences_0 = fix_mesh.deleteEmptyXforms(occurrences_0) # update occurrences_0
occurrences_1 = fix_mesh.deleteEmptyXforms(occurrences_1) # update occurrences_1
return occurrences_0, occurrences_1
def doMerge(self, partsToMerge): # for merging individual meshes within each chunk after split is complete
if len(partsToMerge) > 1: # if there is only one mesh or zero meshes, there is no merging to do
then = time.time() # time at the beginning of merge
ctx = bpy.context.copy() #making a copy of the current context allows for temporary modifications to be made
# in this case, the temporary context is switching the active and selected objects
# this allows avoiding needing to deselect and reselect after the merge
ctx['selected_editable_objects'] = partsToMerge # set the meshes in the chunk being merged to be selected
ctx['active_object'] = partsToMerge[0] # set active object. Blender needs active object to be the selected object
parents = [] # a list that will contain the parent of each part being merged
for merge in partsToMerge:
parents.append(merge.parent)
run_ops_wo_update.open_update() # allows for operators to be run without updating scene
bpy.ops.object.join(ctx) # merges all parts into one
run_ops_wo_update.close_update() # must always call close_update if open_update is called
now = time.time() # time after merging is complete
Stats.mergeTime += (now-then) # add time to total merge time to get an output of total time spent on merge
def recursiveSplit(self, occurrences, attributes, obj_details, depth): # runs checks before each split, and handles recursion
if not occurrences: # if there are no occurrences, end recursion
Stats.printPercent(depth, True) # optionally print results before ending recursion
return
# Check for maximum recursive depth has been reached to terminate and merge
if attributes["max_depth"] != 0 and depth >= attributes["max_depth"]: # if max recursion depth is 0, the check will be ignored
Stats.chunks += 1 # each split creates a new chunk, adds only chunks from completed recursive branches
Stats.printMsg_maxDepth += 1 # "REACHED MAX DEPTH"
Stats.printPercent(depth) # optionally print results before ending recursion
if attributes["merge"]: # if merging, do so now
self.doMerge(occurrences)
return
# Check for vertex count threshold and bbox size to terminate and merge
vertices = utils.getVertexCount(occurrences)
if self.boxTooSmall(obj_details, attributes) or vertices < attributes["max_vertices"]:
Stats.chunks += 1 # each split creates a new chunk, adds only chunks form completed recursive branches
if vertices < attributes["max_vertices"]:
Stats.printMsg_vertexGoal += 1 # "REACHED VERTEX GOAL"
elif self.boxTooSmall(obj_details, attributes): # or vertices < attributes["max_vertices"]:
Stats.printMsg_boxSize += 1 # "BOX TOO SMALL"
Stats.printPercent(depth) # optionally print results before ending recursion
if attributes["merge"]: # if merging, do so now
self.doMerge(occurrences)
return
# Keep subdividing
planeOrigin, planeNormal, axis = self.getSplitPlane(obj_details) # calculate components for cutter object
# Do the split and merge
if attributes["cut_meshes"]: # splits meshes in scene based on cut plane and separates them into two halves
occurrences_0, occurrences_1 = self.doSplit(occurrences, planeOrigin, planeNormal, axis)
depth += 1 # if split has taken place, increment recursive depth count
# Recurse. Get bounding box for each half.
box_0, box_1 = self.getSplitBoxes(obj_details, attributes)
self.recursiveSplit(occurrences_0, attributes, box_0, depth)
self.recursiveSplit(occurrences_1, attributes, box_1, depth)
def split(self, context, selected, attributes): # preps original occurrences and file for split
occurrences = selected # tracks the objects for each recursive split
# on the first split, this is the selected objects.
# Initial bbox includes all original occurrences
boundsCombined = bounds.boundingBox(occurrences) # gets the combined bounds coordinates of the occurrences
obj_details = bounds.bounds(boundsCombined) # create a dictionary of specific statistics for each axis of bounds
if attributes["create_bounds"]: # optionally create a bounds object for each recursive split.
target_coll_name = "BOUNDARIES" # put these objects in a separate collection to keep scene organized
target_coll = bpy.data.collections.new(target_coll_name) # create a new collection in the master scene collection
context.scene.collection.children.link(target_coll) # link the newly created collection to the scene
bounds.boundsObj(boundsCombined) # create bounds obj
depth = 0 # tracks recursive depth
print("-----SPLIT HAS BEGUN-----")
Stats.printPercent(depth) # for optionally printing progress of operation
self.recursiveSplit(occurrences, attributes, obj_details, depth) # begin recursive split
class Stats():
startTime= 0 # start time of script execution, used for calculating progress
printMsg_vertexGoal = 0 # for tracking number of times recursion terminated because vertex goal was reached
printMsg_boxSize = 0 # for tracking number of times recursion terminated because box was too small
printMsg_maxDepth = 0 # for tracking number of times recursion terminated because max recursive depth was exceeded
percent_worked = 0 # for tracking amount of scene that contains objects for progress calculation
percent_empty = 0 # for tracking amount of scene that is empty for progress calculation
chunks = 0 # the number of parts created after the recursive split. each chunk may contain multiple meshes/objects
mergeTime = 0 # for tracking the amount of time spent merging chunks
def resetValues(): # reset values before running
Stats.startTime= 0
Stats.printMsg_vertexGoal = 0
Stats.printMsg_boxSize = 0
Stats.printMsg_maxDepth = 0
Stats.percent_worked = 0
Stats.percent_empty = 0
Stats.chunks = 0
Stats.mergeTime = 0
# for printing progress statistics to console
def printTermination():
print("Reached Vertex Goal: ", Stats.printMsg_vertexGoal, # print number of times recursion terminated because vertex goal was reached
" Box Too Small: ", Stats.printMsg_boxSize, # print number of times recursion terminated because box was too small
" Exceeded Max Depth: ", Stats.printMsg_maxDepth) # print number of times recursion terminated because max recursive depth was exceeded
print("chunks: ", Stats.chunks) # print total number of chunks created from split
def printMerge():
print("merge time: ", Stats.mergeTime) # print the total time the merging took
def printPart(part):
if Chop.print_results:
print("current part being split: ", part) # want to keep track of latest part being split in order to more easily debug if blender crashes
def printPercent(depth, empty=False): # for printing progress of recursive split
if Chop.print_results:
if depth != 0:
if empty: # generated chunk contains no geometry it is considered empty
Stats.percent_empty += 100/pow(2,depth) # calculated as a fraction of 2 raised to the recursive depth. Gives a measurement of total volume complete
elif depth: # cannot calculate if depth is zero due to division by zero
Stats.percent_worked += 100/pow(2,depth) # calculated as a fraction of 2 raised to the recursive depth. Gives a measurement of total volume complete
total = Stats.percent_empty + Stats.percent_worked # percent of bounds volume calculated. Includes empty and occupied chunks
percent_real = Stats.percent_worked/(100-Stats.percent_empty)*100 # calculated based on a ratio of chunks with split meshes to empty chunks.
# this results in a more accurate calculation of remaining time because empty chunks take virtually zero time to process
#timer
now = time.time() # current time elapsed in operation
if percent_real > 0: # if at least one occupied chunk has been calculated
est_comp_time = f"{((now-Stats.startTime)/percent_real*100 - (now-Stats.startTime)):1.0f}" # estimation of remaining time
# based on what has already been processed
else:
est_comp_time = "Unknown"
utils.printClearLine()
utils.printClearLine()
# print results to console
print("\033[93m" + "Percent_empty: ", f"{Stats.percent_empty:.1f}" , "%, Percent_worked: ", f"{Stats.percent_worked:.1f}",
"%, Total: ", f"{total:.1f}", "%, Real: ", f"{percent_real:.1f}", "%")
print("Estimated time remaining: ", est_comp_time, "s, Depth: ", depth, "\033[0m")
else:
print() # empty lines to prep for the progress printing
print() # empty lines to prep for the progress printing
| 23,807 | Python | 59.580153 | 168 | 0.656278 |
NVIDIA-Omniverse/blender_omniverse_addons/omni_optimization_panel/scripts/fix_mesh.py | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
import bpy
import bmesh
import time
from functools import reduce
from . import blender_class, run_ops_wo_update, select_mesh, utils
class FixMesh(blender_class.BlenderClass):
# settings for GUI version only
bl_idname = "fix.mesh"
bl_label = "Fix Mesh"
bl_description = "fix bad meshes in the scene"
bl_options = {"REGISTER", "UNDO"}
def __init__(self):
self._default_attributes = dict(
selected_only=False, # uses only objects selected in scene. For GUI version only
fix_bad_mesh = True, # used to remove zero are faces and zero length edges based on the 'dissolve_threshold'
dissolve_threshold = 0.08, # threshold value for 'fix_bad_mesh'
merge_vertex = False, # merge connected and disconnected vertices of a mesh by a distance threshold
merge_threshold = 0.01, # distance value to use for merge_vertex
remove_existing_sharp = True, # when removing zero area faces, edge data can become messed up, causing bad normals. This helps minimize that.
fix_normals = True, # optionally fix normals. useful for after 'fix_bad_mesh' to fix the normals as well.
create_new_custom_normals = True # will auto generate new sharp edges (based on angle)
)
def execute(self, in_attributes=None):
attributes = self.get_attributes(in_attributes)
context = bpy.context
then = time.time() # start time of script execution
if context.mode != 'OBJECT': # must be in object mode to perform the rest of the operations.
bpy.ops.object.mode_set(mode='OBJECT')
# select objects
selected = select_mesh.setSelected(context, attributes["selected_only"], deselectAll = False)
if len(selected): # run only if there are selected mesh objects in the scene
# if removing zero-area-faces/zero-length-edges or merging vertices by distance:
if attributes["fix_bad_mesh"] or attributes["merge_vertex"]:
self.fixBadMesh(
selected,
attributes["dissolve_threshold"],
attributes["fix_bad_mesh"],
attributes["merge_vertex"],
attributes["merge_threshold"],
attributes["remove_existing_sharp"])
if attributes["fix_normals"]: # optionally fix bad normals (can often arise after fixing bad mesh)
self.fixNormals(selected, attributes["create_new_custom_normals"])
else:
utils.do_print_error("NO MESH OBJECTS")
now = time.time() # time after it finished
print("TIME FOR FIX MESH: ", round(now-then, 3))
return {'FINISHED'}
def fixBadMesh(self, selected, dissolveThreshold = 0.08, fixBadMesh = False, mergeVertex = False, mergeThreshold = 0.1, removeExistingSharp = True):
# once degenerate dissolve geometry node exists (needs to be developed by Blender), replace this with a GN setup
# that would go towards producing non-destructive workflows, which is a goal for the GUI version
# for printing vertex and face data
startingVerts = utils.getVertexCount(selected)
startingFaces = utils.getFaceCount(selected)
bm = bmesh.new() # 'bmesh' in BLender is data type that contains the 'edit mesh' for an object
# It allows for much greater control over mesh properties and operations
for object in selected: # loop through each selected object
utils.printPart(object) # print the current part being fixed.
mesh = object.data # all mesh objects contain mesh data, that is what we need to alter, not the object itself
bm.from_mesh(mesh) # attach the mesh to the bmesh container so that changes can be made
if fixBadMesh:
bmesh.ops.dissolve_degenerate( # for removing zero area faces and zero length edges
bm,
dist=dissolveThreshold,
edges=bm.edges
)
if mergeVertex:
bmesh.ops.remove_doubles(
bm,
verts=bm.verts,
dist=mergeThreshold
)
# Clear sharp state for all edges. This step reduces problems that arise from bad normals
if removeExistingSharp:
for edge in bm.edges:
edge.smooth = True # smooth is the opposite of sharp, so setting to smooth is the same as removing sharp
bm.to_mesh(mesh) # need to transfer the altered bmesh data back to the original mesh
bm.clear() # always clear a bmesh after use
utils.printClearLine() # remove last print, so that printPart can be updated
# print vertex and face data
endingVerts = utils.getVertexCount(selected)
endingFaces = utils.getFaceCount(selected)
vertsRemoved = startingVerts-endingVerts
facesRemoved = startingFaces-endingFaces
print("Fix Mesh Statistics:")
utils.do_print("Starting Verts: " + str(startingVerts) + ", Ending Verts: " + str(endingVerts) + ", Verts Removed: " + str(vertsRemoved))
utils.do_print("Starting Faces: " + str(startingFaces) + ", Ending Faces: " + str(endingFaces) + ", Faces Removed: " + str(facesRemoved))
def fixNormals(self, selected, createNewCustomNormals):
run_ops_wo_update.open_update() # allows for operators to be run without updating scene
# important especially when working with loops
for o in selected:
if o.type != 'MESH':
continue
bpy.context.view_layer.objects.active = o
mesh = o.data
if mesh.has_custom_normals:
bpy.ops.mesh.customdata_custom_splitnormals_clear()
if createNewCustomNormals:
bpy.ops.mesh.customdata_custom_splitnormals_add()
run_ops_wo_update.close_update() # must always call close_update if open_update is called
def deleteEmptyXforms(occurrences): # Delete objects with no meshes, or zero vertex count meshes
# first separate occurrences into two lists to get meshes with zero vertex count
def partition(p, l): # uses lambda function to efficiently parse data
return reduce(lambda x, y: x[not p(y)].append(y) or x, l, ([], [])) # if obj has vertices place in x, else place in y
occurrences_clean, occurrences_dirty = partition(lambda obj:len(obj.data.vertices), occurrences)
# delete obj with zero vertex count or no meshes
for obj in occurrences_dirty:
bpy.data.objects.remove(obj, do_unlink=True)
# return good meshes
return occurrences_clean
| 7,637 | Python | 48.597402 | 153 | 0.647506 |
NVIDIA-Omniverse/blender_omniverse_addons/omni_optimization_panel/scripts/bounds.py | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
import bpy, bmesh
from mathutils import Vector
import collections
def boundsObj(points): # for displaying the bounds of each split chunk
mesh = bpy.data.meshes.new("mesh") # add a new mesh
obj = bpy.data.objects.new("MyObject", mesh) # add a new object using the new mesh
# link the new bounds object to the newly created collection in split.
# this is the last collection added to the scene, hence index of len -1
bpy.context.scene.collection.children[len( bpy.context.scene.collection.children)-1].objects.link(obj)
obj.display_type = 'BOUNDS' # display only the objects bounds in the Blender viewport.
bm = bmesh.new() # 'bmesh' in Blender is data type that contains the 'edit mesh' for an object
# allows control over vertices, edges, and faces
for point in points: # iterate over input bounds(points)
bm.verts.new(point) # add a new vert
# make the bmesh the object's mesh
bm.to_mesh(obj.data) # transfer bmesh data to the new obj
bm.free() # always do this when finished with a bmesh
return obj
def boundingBox(objects): # the bounding box used for calculating the split plane
if not isinstance(objects, list): # if objects is not a list convert it to one
objects = [objects]
points_co_global = [] # list of all vertices of all objects from list with global coordinates
for obj in objects: # iterate over objects list and add its vertices to list
points_co_global.extend([obj.matrix_world @ Vector(v) for v in obj.bound_box]) # must add points in world space
return points_co_global
def bounds(coords): # returns a dictionary containing details of split bounds
zipped = zip(*coords) # The zip() function returns a zip object, which is an iterator of tuples
push_axis = [] # list that will contain useful for each axis
for (axis, _list) in zip('xyz', zipped): # for x, y, and z axis calculate set of values and add them to list
info = lambda: None
info.max = max(_list) # the maximum value of bounds for each axis
info.min = min(_list) # the minimum value of bounds for each axis
info.distance = info.max - info.min # the length of the bounds for each axis
info.mid = (info.max + info.min)/2 # the center point of bounds for each axis
push_axis.append(info) # add this info to push_axis
originals = dict(zip(['x', 'y', 'z'], push_axis)) # create dictionary wit the values from push_axis
o_details = collections.namedtuple('object_details', ['x', 'y', 'z']) # organize dictionary to be accessed easier
return o_details(**originals)
| 3,481 | Python | 47.36111 | 119 | 0.703533 |
NVIDIA-Omniverse/blender_omniverse_addons/omni_optimization_panel/scripts/remesh.py | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
# Remeshing reconstructs a mesh to produce clean/uniform geometry, but removes all UV mappings from an object
# There are four different remesh methods. (BLOCKS, SMOOTH, SHARP, VOXEL)
# https://docs.blender.org/manual/en/latest/modeling/modifiers/generate/remesh.html#remesh-modifier
def remesh(objects, remesh_type, prop):
modifier = 'REMESH' # sets type of modifier to be used
for obj in objects: # for each object in selected objects, add the desired modifier and adjust its properties
mod = obj.modifiers.new(name = modifier, type=modifier) # set name of modifier based on its type
mod.mode = remesh_type # sets remesh type (BLOCKS, SMOOTH, SHARP, VOXEL)
# first three modes produce almost identical typology, but with differing amounts of smoothing (BLOCKS, SMOOTH, SHARP)
if remesh_type == 'BLOCKS': # "There is no smoothing at all."
mod.octree_depth = prop # controls the resolution of most of the remesh modifiers.
# the higher the number, the more geometry created (2^x)
elif remesh_type == 'SMOOTH': # "Output a smooth surface."
mod.octree_depth = prop # the higher the number, the more geometry created (2^x)
elif remesh_type == 'SHARP': # "Similar to Smooth, but preserves sharp edges and corners."
mod.octree_depth = prop # the higher the number, the more geometry created (2^x)
elif remesh_type == 'VOXEL': # "Uses an OpenVDB to generate a new manifold mesh from the current geometry
# while trying to preserve the mesh’s original volume."
mod.voxel_size = prop # used for voxel remesh to control resolution. the lower the number, the more geometry created (x)
else:
raise TypeError('Invalid Remesh Type')
return
| 2,657 | Python | 54.374999 | 132 | 0.703049 |
NVIDIA-Omniverse/blender_omniverse_addons/omni_optimization_panel/scripts/process_attributes.py | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
from bpy.types import Operator
from . import modify, fix_mesh, chop, uv, utils
class OPTIMIZE_OT_Scene(Operator):
bl_idname = "optimize.scene"
bl_label = "Optimize Scene"
bl_description = "Optimize scene based on operation and set parameters"
bl_options = {"REGISTER", "UNDO"}
def execute(self, context):
self.get_attributes(context)
return {'FINISHED'}
def get_attributes(self, context):
optimizeOptions = context.scene.optimize_options
modifyOptions = context.scene.modify_options
uvOptions = context.scene.uv_options
chopOptions = context.scene.chop_options
if optimizeOptions.operation == "modify":
attributes = dict(
selected_only= modifyOptions.selected_only,
apply_mod= modifyOptions.apply_mod,
fix_bad_mesh = modifyOptions.fix_bad_mesh,
dissolve_threshold = modifyOptions.dissolve_threshold,
merge_vertex = modifyOptions.merge_vertex,
merge_threshold = modifyOptions.merge_threshold,
remove_existing_sharp = modifyOptions.remove_existing_sharp,
fix_normals = modifyOptions.fix_normals,
create_new_custom_normals = modifyOptions.create_new_custom_normals,
modifier= modifyOptions.modifier,
# use_modifier_stack= modifyOptions.use_modifier_stack,
# modifier_stack= modifyOptions.modifier_stack,
decimate_type= modifyOptions.decimate_type,
ratio= modifyOptions.ratio,
iterations= modifyOptions.iterations,
angle= modifyOptions.angle,
remesh_type= modifyOptions.remesh_type,
oDepth= modifyOptions.oDepth,
voxel_size= modifyOptions.voxel_size,
geo_type= modifyOptions.geo_type,
geo_attribute= modifyOptions.geo_attribute
)
elif optimizeOptions.operation == "fixMesh":
attributes = dict(
selected_only=modifyOptions.selected_only,
fix_bad_mesh = modifyOptions.fix_bad_mesh,
dissolve_threshold = modifyOptions.dissolve_threshold,
merge_vertex = modifyOptions.merge_vertex,
merge_threshold = modifyOptions.merge_threshold,
remove_existing_sharp = modifyOptions.remove_existing_sharp,
fix_normals = modifyOptions.fix_normals,
create_new_custom_normals = modifyOptions.create_new_custom_normals
)
elif optimizeOptions.operation == "uv":
attributes = dict(
selected_only= uvOptions.selected_only,
scale_to_bounds = uvOptions.scale_to_bounds,
clip_to_bounds = uvOptions.clip_to_bounds,
unwrap_type = uvOptions.unwrap_type,
use_set_size = uvOptions.use_set_size,
set_size = uvOptions.set_size,
print_updated_results= uvOptions.print_updated_results
)
elif optimizeOptions.operation == "chop":
attributes = dict(
merge= chopOptions.merge,
cut_meshes= chopOptions.cut_meshes,
max_vertices= chopOptions.max_vertices,
min_box_size= chopOptions.min_box_size,
max_depth= chopOptions.max_depth,
print_updated_results= chopOptions.print_updated_results,
create_bounds = chopOptions.create_bounds,
selected_only = chopOptions.selected_only
)
if optimizeOptions.print_attributes:
print(attributes)
self.process_operation(optimizeOptions.operation, attributes)
def process_operation(self, operation, attributes):
start = utils.start_time()
blender_cmd = None
if operation == 'modify':
# Modify Scene
blender_cmd = modify.Modify()
elif operation == 'fixMesh':
# Clean Scene
blender_cmd = fix_mesh.FixMesh()
elif operation == 'chop':
# Chop Scene
blender_cmd = chop.Chop()
elif operation == 'uv':
# Unwrap scene
blender_cmd = uv.uvUnwrap()
elif operation == "noop":
# Runs the load/save USD round trip without modifying the scene.
utils.do_print("No-op for this scene")
return
else:
utils.do_print_error("Unknown operation: " + operation + " - add function call to process_file in process.py")
return
# Run the command
if blender_cmd:
blender_cmd.execute(attributes)
else:
utils.do_print_error("No Blender class found to run")
utils.report_time(start, "operation")
| 5,736 | Python | 40.273381 | 122 | 0.61175 |
NVIDIA-Omniverse/blender_omniverse_addons/omni_optimization_panel/scripts/utils.py | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
# Generic utility functions for Blender
import json
import sys
from timeit import default_timer as timer
import bpy
def do_print(msg):
# Flush so prints immediately.
print("\033[93m" + msg + "\033[0m", flush=True)
def do_print_error(msg):
# Flush so prints immediately.
print("\033[91m" + msg + "\033[0m", flush=True)
def start_time():
return timer()
def report_time(start, msg):
end = timer()
do_print("Elapsed time for {}: {:.3f}".format(msg, end-start))
def print_python_version():
do_print("Python version: %s.%s" % (sys.version_info.major, sys.version_info.minor))
def open_file(inputPath):
start = timer()
# Load scene. Clears any existing file before loading
if inputPath.endswith(tuple([".usd", ".usda", ".usdc"])):
do_print("Load file: " + inputPath)
bpy.ops.wm.usd_import(filepath=inputPath)
elif inputPath.endswith(".fbx"):
bpy.ops.import_scene.fbx(filepath=inputPath)
else:
do_print_error("Unrecognized file, not loaded: " + inputPath)
return False
end = timer()
do_print("Elapsed time to load file: " + "{:.3f}".format(end-start))
return True
def save_file(outputPath):
# Save scene. Only writes diffs, so faster than export.
start = timer()
do_print("Save file: " + outputPath)
bpy.ops.wm.usd_export(filepath=outputPath)
end = timer()
do_print("Elapsed time to save file: " + "{:.3f}".format(end-start))
return True
def clear_scene():
# This seems to be difficult with Blender. Partially working code:
bpy.ops.wm.read_factory_settings(use_empty=True)
def process_json_config(operation):
return json.loads(operation) if operation else None
def getVertexCount(occurrences): # returns the vertex count of all current occurrences for threshold testing during recursion
vertexCount = 0
for obj in occurrences:
vertexCount += len(obj.data.vertices)
return vertexCount
def getFaceCount(occurrences): # returns the face count of all current occurrences for threshold testing during recursion
faceCount = 0
for obj in occurrences:
faceCount += len(obj.data.polygons)
return faceCount
def printPart(part):
print("current part being operated on: ", part.name)
def printClearLine():
LINE_UP = '\033[1A' # command to move up a line in the console
LINE_CLEAR = '\x1b[2K' # command to clear current line in the console
print(LINE_UP, end=LINE_CLEAR) # don't want endless print statements
| 3,371 | Python | 33.408163 | 125 | 0.69119 |
NVIDIA-Omniverse/blender_omniverse_addons/omni_optimization_panel/scripts/select_mesh.py | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
# for selecting only mesh objects in the scene. To be used by multiple other files.
def setSelected(context, selectedOnly = False, deselectAll = True):
def select(input):
for obj in input:
if obj.type == 'MESH': # only mesh objects, ignore lights/cameras/curves/etc.
selected.append(obj) # add object to array
if deselectAll: # may want all objects deselected at end of processing
obj.select_set(False) # make sure all objects are deselected before continuing.
else:
obj.select_set(obj.type == 'MESH') # select only mesh objects
selected = [] # an empty array that will be used to store the objects that need to be unwrapped
objects=[ob for ob in context.view_layer.objects if ob.visible_get()] # only want to look at visible objects. process will fail otherwise
if not selectedOnly: # selectedOnly is for GUI version only
select(objects)
elif len(context.selected_objects): # run only if there are selected objects in the scene to isolate just the selected meshes
select(context.selected_objects)
return selected | 2,025 | Python | 46.116278 | 141 | 0.698765 |
NVIDIA-Omniverse/blender_omniverse_addons/omni_optimization_panel/scripts/blender_class.py | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
from abc import ABC, abstractmethod
import json
from . import utils
class BlenderClass(ABC):
def __init__(self):
self._default_attributes = dict()
def get_attributes(self, in_attributes):
attributes = {**self._default_attributes, **in_attributes}
# utils.do_print("Attributes: " + json.dumps(attributes, indent=4, sort_keys=False))
return attributes
@abstractmethod
def execute(self, in_attributes=None):
pass
| 1,332 | Python | 32.324999 | 92 | 0.705706 |
NVIDIA-Omniverse/blender_omniverse_addons/omni_optimization_panel/scripts/decimate.py | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
# Decimation reduces geometry while maintaining form and UVs
# There are three different decimation methods. Each method produces different results, with its own pros/cons)
# https://docs.blender.org/manual/en/latest/modeling/modifiers/generate/decimate.html#decimate-modifier
def decimate(objects, decimate_type, prop):
modifier = 'DECIMATE' # sets type of modifier to be used
for obj in objects: # for each object in selected objects, add the desired modifier and adjust its properties
if len(obj.data.polygons) > 3: # decimation cannot be performed on meshes with 3 or less faces
mod = obj.modifiers.new(name = modifier, type=modifier) # set name of modifier based on its type
mod.decimate_type = decimate_type # sets decimation type
if decimate_type == 'COLLAPSE': # "Merges vertices together progressively, taking the shape of the mesh into account.""
mod.ratio = prop # the ratio value used for collapse decimation. Is a ratio of total faces. (x/1)
elif decimate_type == 'UNSUBDIV': # "It is intended for meshes with a mainly grid-based topology (without giving uneven geometry)"
mod.iterations = prop # the number of un-subdivisions performed. The higher the number, the less geometry remaining (1/2^x)
elif decimate_type == 'DISSOLVE': # "It reduces details on forms comprised of mainly flat surfaces."
mod.angle_limit = prop # the reduction is limited to an angle between faces (x degrees)
mod.delimit = {'UV'}
else:
raise TypeError('Invalid Decimate Type')
return
| 2,515 | Python | 54.91111 | 142 | 0.702982 |
NVIDIA-Omniverse/blender_omniverse_addons/omni_optimization_panel/scripts/modify.py | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
import bpy
import time
import math
from . import blender_class, select_mesh, fix_mesh, decimate, remesh, geo_nodes, utils
# Master Class for all modifiers
class Modify(blender_class.BlenderClass):
# settings for GUI version only
bl_idname = "modify.scene"
bl_label = "Modify Scene"
bl_description = "Modify the scene based on set parameters"
bl_options = {"REGISTER", "UNDO"}
def __init__(self):
self._default_attributes = dict(
selected_only=True, # uses only objects selected in scene. For GUI version only
apply_mod=True, # applies the generated modifiers. Should always be true for command line running
fix_bad_mesh = True, # used to remove zero are faces and zero length edges based on the 'dissolve_threshold'
dissolve_threshold = .08, # threshold value for 'fix_bad_mesh'
merge_vertex = False, # merge connected and disconnected vertices of a mesh by a distance threshold
merge_threshold = 0.01, # distance value to use for merge_vertex
remove_existing_sharp = True, # when removing zero area faces, edge data can become messed up, causing bad normals. This helps minimize that.
fix_normals = True, # optionally fix normals. useful for after 'fix_bad_mesh' to fix the normals as well.
create_new_custom_normals = True, # useful for after 'fix_bad_mesh' to fix the normals as well.
modifier= "DECIMATE", # determines which modifier type to use if 'use_modifier_stack' is False. (DECIMATE, REMESH, NODES, or SUBSURF)
# Some common modifier names for reference:'DECIMATE''REMESH''NODES''SUBSURF''SOLIDIFY''ARRAY''BEVEL'
use_modifier_stack= False, # allows use of more that one modifier sequentially. Useful for more specific customizable workflows.
modifier_stack=[["DECIMATE", "COLLAPSE", 0.5]], # determines which modifier(s) to use if 'use_modifier_stack' is True.(DECIMATE, REMESH, NODES)
# Modifiers are procedural adjustments to a mesh. The modifiers are stored in 'modifier_stack'.
# Most modifiers have different options for calculation. for instance the 'DECIMATE' modifier options are stored in 'decimate_type'
decimate_type="COLLAPSE", # the type of decimation being performed(COLLAPSE, UNSUBDIV, or DISSOLVE)
# Each method produces different results, with its own pros/cons)
# https://docs.google.com/document/d/1pkMZxgW4Xn_KJymFlKOo5XIkK2YleVYtyLJztTUTyAY/edit
# COLLAPSE: "Merges vertices together progressively, taking the shape of the mesh into account.""
# UNSUBDIV: "It is intended for meshes with a mainly grid-based topology (without giving uneven geometry)"
# DISSOLVE: "It reduces details on forms comprised of mainly flat surfaces."
ratio=0.5, # the ratio value used for collapse decimation.
iterations=2, # the number of un-subdivisions performed
angle=15.0, # attribute used when performing dissolve decimation.
remesh_type="VOXEL", # the type of remesh being performed(BLOCKS, SMOOTH, SHARP, VOXEL)
# remeshing removes all UV mappings from an object
# https://docs.blender.org/manual/en/latest/modeling/modifiers/generate/remesh.html#remesh-modifier
# first three modes produce almost identical typology, but with differing amounts of smoothing (BLOCKS, SMOOTH, SHARP)
# BLOCKS: "There is no smoothing at all."
# SMOOTH: "Output a smooth surface."
# SHARP: "Similar to Smooth, but preserves sharp edges and corners."
# VOXEL: "Uses an OpenVDB to generate a new manifold mesh from the current geometry while trying to preserve the mesh’s original volume."
oDepth=4, # stands for octree depth and controls the resolution of most of the remesh modifiers
voxel_size=0.1, # used for voxel remesh to control resolution
geo_type="GeometryNodeBoundBox", # the type of geometry node tree to create:
# (GeometryNodeConvexHull, GeometryNodeBoundBox, GeometryNodeSubdivisionSurface)
# geometry nodes is currently under development, so feature set is not yet at a stage to be fully utilized
# this puts in place a framework for more customizable and easily implementable optimizations in the future
# more on geometry nodes: https://docs.blender.org/manual/en/latest/modeling/geometry_nodes/index.html#geometry-nodes
geo_attribute=2 # a generic attribute variable that can be used for the different geo node types
)
def execute(self, in_attributes=None):
attributes = self.get_attributes(in_attributes)
context = bpy.context
then = time.time() # start time of script execution.
# shorthands for multi-used attributes
modifier = attributes["modifier"]
decimate_type = attributes["decimate_type"]
angle = attributes["angle"]
remesh_type = attributes["remesh_type"]
if context.mode != 'OBJECT': # must be in object mode to perform the rest of the operations.
bpy.ops.object.mode_set(mode='OBJECT')
# select objects
selected = select_mesh.setSelected(context, attributes["selected_only"], deselectAll = False)
if len(selected): # run only if there are selected mesh objects in the scene
if attributes["fix_bad_mesh"]: # optionally fix bad meshes. Can also be done separately before hand
fix_mesh.FixMesh.fixBadMesh(
self,
selected,
attributes["dissolve_threshold"],
attributes["fix_bad_mesh"],
attributes["merge_vertex"],
attributes["merge_threshold"],
attributes["remove_existing_sharp"])
if attributes["fix_normals"]: # optionally fix bad normals (can often arise after fixing bad mesh)
fix_mesh.FixMesh.fixNormals(self, selected, attributes["create_new_custom_normals"])
# for printing vertex and face data
startingVerts = utils.getVertexCount(selected)
startingFaces = utils.getFaceCount(selected)
if attributes["use_modifier_stack"]:
for mod in attributes["modifier_stack"]:
self.run_modifier(selected, mod[0], mod[1], mod[2])
else:
#Decimate
if modifier == 'DECIMATE':
sub_mod = decimate_type
if decimate_type == 'COLLAPSE':
prop = attributes["ratio"]
elif decimate_type == 'UNSUBDIV':
prop = attributes["iterations"]
elif decimate_type == 'DISSOLVE':
angle = math.radians(angle) # need to change angle to radians for the modifier
prop = angle
#Remesh
elif modifier == 'REMESH':
sub_mod = remesh_type
if remesh_type == 'BLOCKS' or remesh_type == 'SMOOTH' or remesh_type == 'SHARP':
prop = attributes["oDepth"]
if remesh_type == 'VOXEL':
prop = attributes["voxel_size"]
#Geometry Nodes
elif modifier == 'NODES':
sub_mod = attributes["geo_type"]
prop = attributes["geo_attribute"]
else:
sub_mod = None
prop = None
self.run_modifier(selected, modifier, sub_mod, prop)
raise RuntimeError
# apply modifiers once above loop is complete
if attributes["apply_mod"]:
context.view_layer.objects.active = selected[0] # need to set one of the selected objects as the active object
# arbitrarily choosing to set the first object in selected_objects list. (there can only be one AO, but multiple SO)
# this is necessary for the applying the modifiers.
bpy.ops.object.convert(target='MESH') # applies all modifiers of each selected mesh. this preps the scene for proper export.
# print vertex and face data
endingVerts = utils.getVertexCount(selected)
endingFaces = utils.getFaceCount(selected)
vertsRemoved = startingVerts-endingVerts
facesRemoved = startingFaces-endingFaces
print("Modify Mesh Statistics:")
utils.do_print("Starting Verts: " + str(startingVerts) + ", Ending Verts: " + str(endingVerts) + ", Verts Removed: " + str(vertsRemoved))
utils.do_print("Starting Faces: " + str(startingFaces) + ", Ending Faces: " + str(endingFaces) + ", Faces Removed: " + str(facesRemoved))
else:
utils.do_print_error("NO MESH OBJECTS")
now = time.time() # time after it finished.
print("TIME FOR MODIFY: ", round(now-then, 3))
return {'FINISHED'} # "return {"FINISHED"} (or return{"CANCELED"}) is how Blender understands that an operator call is complete
def run_modifier(self, objects, modifier, sub_mod = None, prop = None):
# RUN BASED ON TYPE OF MODIFIER AND MODIFIER SUB_TYPE. Each modifier requires different input variables/values
# Decimate
if modifier == 'DECIMATE':
decimate.decimate(objects, sub_mod, prop)
# Remesh
elif modifier == 'REMESH':
remesh.remesh(objects, sub_mod, prop)
# Geometry Nodes
elif modifier == 'NODES':
geo_nodes.geoNodes(objects, sub_mod, prop)
| 10,769 | Python | 58.175824 | 155 | 0.626613 |
NVIDIA-Omniverse/blender_omniverse_addons/omni_optimization_panel/scripts/uv.py | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
import bpy
import time
import contextlib
from . import blender_class, run_ops_wo_update, select_mesh, utils
class uvUnwrap(blender_class.BlenderClass):
# settings for GUI version only
bl_idname = "uv.unwrap_batch"
bl_label = "Batch UV Unwrap"
bl_description = "batch uv unwrap objects"
bl_options = {"REGISTER", "UNDO"}
def __init__(self):
self._default_attributes = dict(
selected_only= False, # uses only objects selected in scene. For GUI version only
scale_to_bounds = False, # determines if the unwrapped map gets scaled to the square uv image bounds
clip_to_bounds = False, # if unwrapping exceeds bounds, it will be clipped off
unwrap_type = 'Cube', # the method for unwrapping (cube, sphere, cylinder, or smart)
use_set_size = False, # for cube and cylinder project, use specified projection size for all objects.
# Overrides scale_to_bounds to False
set_size = 2, # projection size for cube and cylinder project
print_updated_results= True # print progress to console
)
def execute(self, in_attributes=None):
attributes = self.get_attributes(in_attributes)
context = bpy.context
then = time.time() # start time of script execution
# blender operates in modes/contexts, and certain operations can only be performed in certain contexts
if bpy.context.mode != 'OBJECT': # make sure context is object mode.
bpy.ops.object.mode_set(mode='OBJECT') # if it is not, set it to object mode
run_ops_wo_update.open_update() # allows for operators to be run without updating scene
# important especially when working with loops
self.unwrap(context, attributes)
run_ops_wo_update.close_update() # must always call close_update if open_update is called
now = time.time() # time after it finished
print("TIME FOR UNWRAP: ", round(now-then, 3))
return {"FINISHED"}
def unwrap(self, context, attributes):
scaleBounds = attributes["scale_to_bounds"]
clipBounds = attributes["clip_to_bounds"]
unwrapType = attributes["unwrap_type"]
use_set_size = attributes["use_set_size"]
set_size = attributes["set_size"]
print_updated_results = attributes["print_updated_results"]
# select objects
selected = select_mesh.setSelected(context, attributes["selected_only"], deselectAll = True)
if len(selected): # run only if there are mesh objects in the 'selected' array
LINE_UP = '\033[1A' # command to move up a line in the console
LINE_CLEAR = '\x1b[2K' # command to clear current line in the console
count = 0 # counter for which object is being calculated
then = time.time() # start time of loop execution
for object in selected: # unwrap each object separately
object.select_set(True) # select object. This is now the only selected object
context.view_layer.objects.active = object # set active object. Blender needs active object to be the selected object
bpy.ops.object.mode_set(mode='EDIT') # make sure context is edit mode. Context switching is object dependent, must be after selection
bpy.ops.mesh.select_all(action='SELECT') # select all mesh vertices. only selected vertices will be uv unwrapped
# for smart UV projection
if unwrapType == "Smart":
# smart UV can take a long time, so this prints out a progress bar
if count and print_updated_results: # if the first object has already been calculated and results should be printed
with contextlib.redirect_stdout(None): # smartUV prints an output sometimes. We don't want/need this output this suppresses it
self.smartUV(scaleBounds) # perform the uv unwrap
now = time.time() # time after unwrapping is complete
timeElapsed = now - then
remaining = len(selected)-count # number of remaining objects
timeLeft = timeElapsed/count * remaining # estimation of remaining time
print(LINE_UP, end=LINE_CLEAR) # don't want endless print statements
print(LINE_UP, end=LINE_CLEAR) # don't want endless print statements
# so move up and clear the previously printed lines and overwrite them
print("Object Count = ", count, " Objects Remaining = ", remaining)
print(" Elapsed Time = ", round(timeElapsed,3), " Time Remaining = ", round(timeLeft,3)) # print results to console
else: # if calculating the first object or not printing results
self.smartUV(scaleBounds) # perform the uv unwrap
if print_updated_results:
print("Object Count = 0")
print("Time Remaining = UNKOWN")
# for cube projection
elif unwrapType == "Cube":
self.cubeUV(scaleBounds, clipBounds, use_set_size, set_size) # perform the uv unwrap
# for sphere projection
elif unwrapType == "Sphere":
self.sphereUV(scaleBounds, clipBounds) # perform the uv unwrap
# for cylinder projection
elif unwrapType == "Cylinder":
self.cylinderUV(scaleBounds, clipBounds, use_set_size, set_size) # perform the uv unwrap
bpy.ops.object.mode_set(mode='OBJECT') # once complete, make sure context is object mode.
# Must be in object mode to select the next object
object.select_set(False) # deselect the current object. Now there are again no objects selected
count += 1 # increase the object counter
for obj in selected: # reselect all originally selected meshes
obj.select_set(True)
else:
utils.do_print_error("NO MESH OBJECTS")
return {'FINISHED'}
# methods for running each type of uv projection
def smartUV(self, scale):
bpy.ops.uv.smart_project(correct_aspect=True, scale_to_bounds=scale)
def cubeUV(self, scale, clip, use_set_size, size):
if use_set_size: # user sets cube_size value of cube projection
bpy.ops.uv.cube_project(scale_to_bounds=False, clip_to_bounds=clip, cube_size=size)
else:
bpy.ops.uv.cube_project(scale_to_bounds=scale, clip_to_bounds=clip)
def sphereUV(self, scale, clip):
bpy.ops.uv.sphere_project(direction='ALIGN_TO_OBJECT', scale_to_bounds=scale, clip_to_bounds=clip)
# 'ALIGN_TO_OBJECT' sets the direction of the projection to be consistent regardless of view position/direction
def cylinderUV(self, scale, clip, use_set_size, size):
if use_set_size: # user sets radius value of cylinder projection
bpy.ops.uv.cylinder_project(direction='ALIGN_TO_OBJECT', scale_to_bounds=False, clip_to_bounds=clip, radius=size)
else:
bpy.ops.uv.cylinder_project(direction='ALIGN_TO_OBJECT', scale_to_bounds=scale, clip_to_bounds=clip) | 8,297 | Python | 52.192307 | 150 | 0.632277 |
NVIDIA-Omniverse/blender_omniverse_addons/omni/__init__.py | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
"""
To invoke in Blender script editor:
import bpy
bpy.ops.universalmaterialmap.generator()
bpy.ops.universalmaterialmap.converter()
INFO_HT_header
Header
VIEW3D_HT_tool_header
Info Header: INFO_HT_HEADER
3D View Header: VIEW3D_HT_HEADER
Timeline Header: TIME_HT_HEADER
Outliner Header: OUTLINER_HT_HEADER
Properties Header: PROPERTIES_HT_HEADER, etc.
"""
"""
Menu location problem
https://blender.stackexchange.com/questions/3393/add-custom-menu-at-specific-location-in-the-header#:~:text=Blender%20has%20a%20built%20in,%3EPython%2D%3EUI%20Menu.
"""
bl_info = {
'name': 'Universal Material Map',
'author': 'NVIDIA Corporation',
'description': 'A Blender AddOn based on the Universal Material Map framework.',
'blender': (3, 1, 0),
'location': 'View3D',
'warning': '',
'category': 'Omniverse'
}
import sys
import importlib
import bpy
from .universalmaterialmap.blender import developer_mode
if developer_mode:
print('UMM DEBUG: Initializing "{0}"'.format(__file__))
ordered_module_names = [
'omni.universalmaterialmap',
'omni.universalmaterialmap.core',
'omni.universalmaterialmap.core.feature',
'omni.universalmaterialmap.core.singleton',
'omni.universalmaterialmap.core.data',
'omni.universalmaterialmap.core.util',
'omni.universalmaterialmap.core.operator',
'omni.universalmaterialmap.core.service',
'omni.universalmaterialmap.core.service.core',
'omni.universalmaterialmap.core.service.delegate',
'omni.universalmaterialmap.core.service.resources',
'omni.universalmaterialmap.core.service.store',
'omni.universalmaterialmap.core.converter',
'omni.universalmaterialmap.core.converter.core',
'omni.universalmaterialmap.core.converter.util',
'omni.universalmaterialmap.core.generator',
'omni.universalmaterialmap.core.generator.core',
'omni.universalmaterialmap.core.generator.util',
'omni.universalmaterialmap.blender',
'omni.universalmaterialmap.blender.menu',
'omni.universalmaterialmap.blender.converter',
'omni.universalmaterialmap.blender.generator',
'omni.universalmaterialmap.blender.material',
]
for module_name in sys.modules:
if 'omni.' not in module_name:
continue
if module_name not in ordered_module_names:
raise Exception('Unexpected module name in sys.modules: {0}'.format(module_name))
for module_name in ordered_module_names:
if module_name in sys.modules:
print('UMM reloading: {0}'.format(module_name))
importlib.reload(sys.modules.get(module_name))
if developer_mode:
from .universalmaterialmap.blender.converter import OT_InstanceToDataConverter, OT_DataToInstanceConverter, OT_DataToDataConverter, OT_ApplyDataToInstance, OT_DescribeShaderGraph
from .universalmaterialmap.blender.converter import OT_CreateTemplateOmniPBR, OT_CreateTemplateOmniGlass
from .universalmaterialmap.blender.menu import UniversalMaterialMapMenu
from .universalmaterialmap.blender.generator import OT_Generator
else:
from .universalmaterialmap.blender.converter import OT_CreateTemplateOmniPBR, OT_CreateTemplateOmniGlass
from .universalmaterialmap.blender.menu import UniversalMaterialMapMenu
def draw_item(self, context):
layout = self.layout
layout.menu(UniversalMaterialMapMenu.bl_idname)
def register():
bpy.utils.register_class(OT_CreateTemplateOmniPBR)
bpy.utils.register_class(OT_CreateTemplateOmniGlass)
if developer_mode:
bpy.utils.register_class(OT_DataToInstanceConverter)
bpy.utils.register_class(OT_DataToDataConverter)
bpy.utils.register_class(OT_ApplyDataToInstance)
bpy.utils.register_class(OT_InstanceToDataConverter)
bpy.utils.register_class(OT_DescribeShaderGraph)
bpy.utils.register_class(OT_Generator)
bpy.utils.register_class(UniversalMaterialMapMenu)
# lets add ourselves to the main header
bpy.types.NODE_HT_header.append(draw_item)
def unregister():
bpy.utils.unregister_class(OT_CreateTemplateOmniPBR)
bpy.utils.unregister_class(OT_CreateTemplateOmniGlass)
if developer_mode:
bpy.utils.unregister_class(OT_DataToInstanceConverter)
bpy.utils.unregister_class(OT_DataToDataConverter)
bpy.utils.unregister_class(OT_ApplyDataToInstance)
bpy.utils.unregister_class(OT_InstanceToDataConverter)
bpy.utils.unregister_class(OT_DescribeShaderGraph)
bpy.utils.unregister_class(OT_Generator)
bpy.utils.unregister_class(UniversalMaterialMapMenu)
bpy.types.NODE_HT_header.remove(draw_item)
if __name__ == "__main__":
register()
# The menu can also be called from scripts
# bpy.ops.wm.call_menu(name=UniversalMaterialMapMenu.bl_idname)
| 5,725 | Python | 35.471337 | 182 | 0.731528 |
NVIDIA-Omniverse/blender_omniverse_addons/omni/universalmaterialmap/core/util.py | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
import typing
import sys
from .data import Plug
def to_plug_value_type(value: typing.Any, assumed_value_type: str) -> str:
"""Returns matching :class:`omni.universalmaterialmap.core.data.Plug` value type."""
if sys.version_info.major < 3:
if isinstance(value, basestring):
return Plug.VALUE_TYPE_STRING
else:
if isinstance(value, str):
return Plug.VALUE_TYPE_STRING
if type(value) == bool:
return Plug.VALUE_TYPE_BOOLEAN
if isinstance(value, int):
return Plug.VALUE_TYPE_INTEGER
if isinstance(value, float):
return Plug.VALUE_TYPE_FLOAT
try:
test = iter(value)
is_iterable = True
except TypeError:
is_iterable = False
if is_iterable:
if assumed_value_type == Plug.VALUE_TYPE_LIST:
return Plug.VALUE_TYPE_LIST
bum_booleans = 0
num_integers = 0
num_floats = 0
num_strings = 0
for o in value:
if sys.version_info.major < 3:
if isinstance(value, basestring):
num_strings += 1
continue
else:
if isinstance(value, str):
num_strings += 1
continue
if type(o) == bool:
bum_booleans += 1
continue
if isinstance(o, int):
num_integers += 1
continue
if isinstance(o, float):
num_floats += 1
if num_floats > 0:
if len(value) == 2:
return Plug.VALUE_TYPE_VECTOR2
if len(value) == 3:
return Plug.VALUE_TYPE_VECTOR3
if len(value) == 4:
return Plug.VALUE_TYPE_VECTOR4
if len(value) == 2 and assumed_value_type == Plug.VALUE_TYPE_VECTOR2:
return assumed_value_type
if len(value) == 3 and assumed_value_type == Plug.VALUE_TYPE_VECTOR3:
return assumed_value_type
if len(value) == 4 and assumed_value_type == Plug.VALUE_TYPE_VECTOR4:
return assumed_value_type
return Plug.VALUE_TYPE_LIST
return Plug.VALUE_TYPE_ANY
def get_extension_from_image_file_format(format:str, base_name:str) -> str:
"""
For image formats that have multiple possible extensions,
determine if we should stick with the current format specifier
or use the one from the filename itself.
"""
format = format.lower()
split = base_name.rpartition(".")[-1]
extension = split.lower() if len(split) else None
if format == "open_exr":
format = "exr"
elif format == "jpeg":
format = extension if extension in {"jpeg", "jpg"} else "jpg"
elif format == "tiff":
format = extension if extension in {"tiff", "tif"} else "tif"
elif format == "targa_raw":
format = "tga"
return format
| 3,780 | Python | 31.042373 | 88 | 0.598677 |
NVIDIA-Omniverse/blender_omniverse_addons/omni/universalmaterialmap/core/data.py | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
import typing
import uuid
import sys
import importlib
from .service.core import IDelegate
class ChangeNotification(object):
def __init__(self, item: object, property_name: str, old_value: typing.Any, new_value: typing.Any):
super(ChangeNotification, self).__init__()
self._item: object = item
self._property_name: str = property_name
self._old_value: typing.Any = old_value
self._new_value: typing.Any = new_value
@property
def item(self) -> object:
""" """
return self._item
@property
def property_name(self) -> str:
""" """
return self._property_name
@property
def old_value(self) -> typing.Any:
""" """
return self._old_value
@property
def new_value(self) -> typing.Any:
""" """
return self._new_value
class Notifying(object):
"""Base class providing change notification capability"""
def __init__(self):
super(Notifying, self).__init__()
self._changed_callbacks: typing.Dict[uuid.uuid4, typing.Callable[[ChangeNotification], typing.NoReturn]] = dict()
def add_changed_fn(self, callback: typing.Callable[[ChangeNotification], typing.NoReturn]) -> uuid.uuid4:
for key, value in self._changed_callbacks.items():
if value == callback:
return key
key = uuid.uuid4()
self._changed_callbacks[key] = callback
return key
def remove_changed_fn(self, callback_id: uuid.uuid4) -> None:
if callback_id in self._changed_callbacks.keys():
del self._changed_callbacks[callback_id]
def _notify(self, notification: ChangeNotification):
for callback in self._changed_callbacks.values():
callback(notification)
def destroy(self):
self._changed_callbacks = None
class Subscribing(Notifying):
def __init__(self):
super(Subscribing, self).__init__()
self._subscriptions: typing.Dict[Notifying, uuid.uuid4] = dict()
def _subscribe(self, notifying: Notifying) -> uuid.uuid4:
if notifying in self._subscriptions.keys():
return self._subscriptions[notifying]
self._subscriptions[notifying] = notifying.add_changed_fn(self._on_notification)
def _unsubscribe(self, notifying: Notifying) -> None:
if notifying in self._subscriptions.keys():
callback_id = self._subscriptions[notifying]
del self._subscriptions[notifying]
notifying.remove_changed_fn(callback_id=callback_id)
def _on_notification(self, notification: ChangeNotification) -> None:
pass
class ManagedListInsert(object):
def __init__(self, notifying: Notifying, index: int):
super(ManagedListInsert, self).__init__()
self._notifying: Notifying = notifying
self._index: int = index
@property
def notifying(self) -> Notifying:
""" """
return self._notifying
@property
def index(self) -> int:
""" """
return self._index
class ManagedListRemove(object):
def __init__(self, notifying: Notifying, index: int):
super(ManagedListRemove, self).__init__()
self._notifying: Notifying = notifying
self._index: int = index
@property
def notifying(self) -> Notifying:
""" """
return self._notifying
@property
def index(self) -> int:
""" """
return self._index
class ManagedListNotification(object):
ADDED_ITEMS: int = 0
UPDATED_ITEMS: int = 1
REMOVED_ITEMS: int = 2
def __init__(self, managed_list: 'ManagedList', items: typing.List[typing.Union[ManagedListInsert, ChangeNotification, ManagedListRemove]]):
super(ManagedListNotification, self).__init__()
self._managed_list: ManagedList = managed_list
self._inserted_items: typing.List[ManagedListInsert] = []
self._change_notifications: typing.List[ChangeNotification] = []
self._removed_items: typing.List[ManagedListRemove] = []
self._kind: int = -1
if isinstance(items[0], ManagedListInsert):
self._kind = ManagedListNotification.ADDED_ITEMS
self._inserted_items = typing.cast(typing.List[ManagedListInsert], items)
elif isinstance(items[0], ChangeNotification):
self._kind = ManagedListNotification.UPDATED_ITEMS
self._change_notifications = typing.cast(typing.List[ChangeNotification], items)
elif isinstance(items[0], ManagedListRemove):
self._kind = ManagedListNotification.REMOVED_ITEMS
self._removed_items = typing.cast(typing.List[ManagedListRemove], items)
else:
raise Exception('Unexpected object: "{0}" of type "{1}".'.format(items[0], type(items[0])))
@property
def managed_list(self) -> 'ManagedList':
""" """
return self._managed_list
@property
def kind(self) -> int:
""" """
return self._kind
@property
def inserted_items(self) -> typing.List[ManagedListInsert]:
""" """
return self._inserted_items
@property
def change_notifications(self) -> typing.List[ChangeNotification]:
""" """
return self._change_notifications
@property
def removed_items(self) -> typing.List[ManagedListRemove]:
""" """
return self._removed_items
class ManagedList(object):
def __init__(self, items: typing.List[Notifying] = None):
super(ManagedList, self).__init__()
self._subscriptions: typing.Dict[Notifying, uuid.uuid4] = dict()
self._changed_callbacks: typing.Dict[uuid.uuid4, typing.Callable[[ManagedListNotification], typing.NoReturn]] = dict()
self._managed_items: typing.List[Notifying] = []
if items:
for o in items:
self._manage_item(notifying=o)
def __iter__(self):
return iter(self._managed_items)
def _manage_item(self, notifying: Notifying) -> typing.Union[Notifying, None]:
""" Subscribes to managed item. Returns item only if it became managed. """
if notifying in self._managed_items:
return None
self._managed_items.append(notifying)
self._subscriptions[notifying] = notifying.add_changed_fn(self._on_notification)
return notifying
def _unmanage_item(self, notifying: Notifying) -> typing.Union[typing.Tuple[Notifying, int], typing.Tuple[None, int]]:
""" Unsubscribes to managed item. Returns item only if it became unmanaged. """
if notifying not in self._managed_items:
return None, -1
index = self._managed_items.index(notifying)
self._managed_items.remove(notifying)
callback_id = self._subscriptions[notifying]
del self._subscriptions[notifying]
notifying.remove_changed_fn(callback_id=callback_id)
return notifying, index
def _on_notification(self, notification: ChangeNotification) -> None:
self._notify(
notification=ManagedListNotification(
managed_list=self,
items=[notification]
)
)
def _notify(self, notification: ManagedListNotification):
for callback in self._changed_callbacks.values():
callback(notification)
def add_changed_fn(self, callback: typing.Callable[[ManagedListNotification], typing.NoReturn]) -> uuid.uuid4:
for key, value in self._changed_callbacks.items():
if value == callback:
return key
key = uuid.uuid4()
self._changed_callbacks[key] = callback
return key
def remove_changed_fn(self, callback_id: uuid.uuid4) -> None:
if callback_id in self._changed_callbacks.keys():
del self._changed_callbacks[callback_id]
def append(self, notifying: Notifying) -> None:
if self._manage_item(notifying=notifying) is not None:
self._notify(
ManagedListNotification(
managed_list=self,
items=[ManagedListInsert(notifying=notifying, index=self.index(notifying=notifying))]
)
)
def extend(self, notifying: typing.List[Notifying]) -> None:
added = []
for o in notifying:
o = self._manage_item(notifying=o)
if o:
added.append(o)
if len(added) == 0:
return
self._notify(
ManagedListNotification(
managed_list=self,
items=[ManagedListInsert(notifying=o, index=self.index(notifying=o)) for o in added]
)
)
def remove(self, notifying: Notifying) -> None:
notifying, index = self._unmanage_item(notifying=notifying)
if notifying:
self._notify(
ManagedListNotification(
managed_list=self,
items=[ManagedListRemove(notifying=notifying, index=index)]
)
)
def remove_all(self) -> None:
items = [ManagedListRemove(notifying=o, index=i) for i, o in enumerate(self._managed_items)]
for callback_id, notifying in self._subscriptions.items():
notifying.remove_changed_fn(callback_id=callback_id)
self._subscriptions = dict()
self._managed_items = []
self._notify(
ManagedListNotification(
managed_list=self,
items=items
)
)
def pop(self, index: int = 0) -> Notifying:
notifying, index = self._unmanage_item(self._managed_items[index])
self._notify(
ManagedListNotification(
managed_list=self,
items=[ManagedListRemove(notifying=notifying, index=index)]
)
)
return notifying
def index(self, notifying: Notifying) -> int:
if notifying in self._managed_items:
return self._managed_items.index(notifying)
return -1
class Serializable(Subscribing):
"""Base class providing serialization method template"""
def __init__(self):
super(Serializable, self).__init__()
def serialize(self) -> dict:
""" """
return dict()
def deserialize(self, data: dict) -> None:
""" """
pass
class Base(Serializable):
"""Base class providing id property"""
@classmethod
def Create(cls) -> 'Base':
return cls()
def __init__(self):
super(Base, self).__init__()
self._id: str = str(uuid.uuid4())
def serialize(self) -> dict:
""" """
output = super(Base, self).serialize()
output['_id'] = self._id
return output
def deserialize(self, data: dict) -> None:
""" """
super(Base, self).deserialize(data=data)
self._id = data['_id'] if '_id' in data.keys() else str(uuid.uuid4())
@property
def id(self) -> str:
""" """
return self._id
class DagNode(Base):
"""Base class providing input and outputs of :class:`omni.universalmaterialmap.core.data.Plug` """
def __init__(self):
super(DagNode, self).__init__()
self._inputs: typing.List[Plug] = []
self._outputs: typing.List[Plug] = []
self._computing: bool = False
def serialize(self) -> dict:
""" """
output = super(DagNode, self).serialize()
output['_inputs'] = [plug.serialize() for plug in self.inputs]
output['_outputs'] = [plug.serialize() for plug in self.outputs]
return output
def deserialize(self, data: dict) -> None:
""" """
super(DagNode, self).deserialize(data=data)
old_inputs = self._inputs[:]
old_outputs = self._outputs[:]
while len(self._inputs):
self._unsubscribe(notifying=self._inputs.pop())
while len(self._outputs):
self._unsubscribe(notifying=self._outputs.pop())
plugs = []
if '_inputs' in data.keys():
for o in data['_inputs']:
plug = Plug(parent=self)
plug.deserialize(data=o)
plugs.append(plug)
self._inputs = plugs
plugs = []
if '_outputs' in data.keys():
for o in data['_outputs']:
plug = Plug(parent=self)
plug.deserialize(data=o)
plugs.append(plug)
self._outputs = plugs
for o in self._inputs:
self._subscribe(notifying=o)
for o in self._outputs:
self._subscribe(notifying=o)
if not old_inputs == self._inputs:
self._notify(
ChangeNotification(
item=self,
property_name='inputs',
old_value=old_inputs,
new_value=self._inputs[:]
)
)
if not old_inputs == self._outputs:
self._notify(
ChangeNotification(
item=self,
property_name='outputs',
old_value=old_outputs,
new_value=self._outputs[:]
)
)
def _on_notification(self, notification: ChangeNotification) -> None:
if notification.item == self:
return
# Re-broadcast notification
self._notify(notification=notification)
def invalidate(self, plug: 'Plug'):
pass
def compute(self) -> None:
""" """
if self._computing:
return
self._computing = True
self._compute_inputs(input_plugs=self._inputs)
self._compute_outputs(output_plugs=self._outputs)
self._computing = False
def _compute_inputs(self, input_plugs: typing.List['Plug']):
# Compute dependencies
for plug in input_plugs:
if not plug.input:
continue
if not plug.input.parent:
continue
if not plug.input.is_invalid:
continue
plug.input.parent.compute()
# Set computed_value
for plug in input_plugs:
if plug.input:
plug.computed_value = plug.input.computed_value
else:
plug.computed_value = plug.value
def _compute_outputs(self, output_plugs: typing.List['Plug']):
# Compute dependencies
for plug in output_plugs:
if not plug.input:
continue
if not plug.input.parent:
continue
if not plug.input.is_invalid:
continue
plug.input.parent.compute()
# Set computed_value
for plug in output_plugs:
if plug.input:
plug.computed_value = plug.input.computed_value
else:
plug.computed_value = plug.value
def add_input(self) -> 'Plug':
raise NotImplementedError()
def can_remove_plug(self, plug: 'Plug') -> bool:
return plug.is_removable
def remove_plug(self, plug: 'Plug') -> None:
if not plug.is_removable:
raise Exception('Plug is not removable')
notifications = []
if plug in self._inputs:
old_value = self._inputs[:]
self._unsubscribe(notifying=plug)
self._inputs.remove(plug)
notifications.append(
ChangeNotification(
item=self,
property_name='inputs',
old_value=old_value,
new_value=self._inputs[:]
)
)
if plug in self._outputs:
old_value = self._outputs[:]
self._unsubscribe(notifying=plug)
self._outputs.remove(plug)
notifications.append(
ChangeNotification(
item=self,
property_name='outputs',
old_value=old_value,
new_value=self._outputs[:]
)
)
destination: Plug
for destination in plug.outputs:
destination.input = None
for notification in notifications:
self._notify(notification=notification)
@property
def can_add_input(self) -> bool:
return False
@property
def inputs(self) -> typing.List['Plug']:
""" """
return self._inputs
@property
def outputs(self) -> typing.List['Plug']:
""" """
return self._outputs
class GraphEntity(DagNode):
"""Base class providing omni.kit.widget.graph properties for a data item."""
OPEN = 0
MINIMIZED = 1
CLOSED = 2
def __init__(self):
super(GraphEntity, self).__init__()
self._display_name: str = ''
self._position: typing.Union[typing.Tuple[float, float], None] = None
self._expansion_state: int = GraphEntity.OPEN
self._show_inputs: bool = True
self._show_outputs: bool = True
self._show_peripheral: bool = False
def serialize(self) -> dict:
""" """
output = super(GraphEntity, self).serialize()
output['_display_name'] = self._display_name
output['_position'] = self._position
output['_expansion_state'] = self._expansion_state
output['_show_inputs'] = self._show_inputs
output['_show_outputs'] = self._show_outputs
output['_show_peripheral'] = self._show_peripheral
return output
def deserialize(self, data: dict) -> None:
""" """
super(GraphEntity, self).deserialize(data=data)
self._display_name = data['_display_name'] if '_display_name' in data.keys() else ''
self._position = data['_position'] if '_position' in data.keys() else None
self._expansion_state = data['_expansion_state'] if '_expansion_state' in data.keys() else GraphEntity.OPEN
self._show_inputs = data['_show_inputs'] if '_show_inputs' in data.keys() else True
self._show_outputs = data['_show_outputs'] if '_show_outputs' in data.keys() else True
self._show_peripheral = data['_show_peripheral'] if '_show_peripheral' in data.keys() else False
@property
def display_name(self) -> str:
""" """
return self._display_name
@display_name.setter
def display_name(self, value: str) -> None:
""" """
if self._display_name is value:
return
notification = ChangeNotification(
item=self,
property_name='display_name',
old_value=self._display_name,
new_value=value
)
self._display_name = value
self._notify(notification=notification)
@property
def position(self) -> typing.Union[typing.Tuple[float, float], None]:
""" """
return self._position
@position.setter
def position(self, value: typing.Union[typing.Tuple[float, float], None]) -> None:
""" """
if self._position is value:
return
notification = ChangeNotification(
item=self,
property_name='position',
old_value=self._position,
new_value=value
)
self._position = value
self._notify(notification=notification)
@property
def expansion_state(self) -> int:
""" """
return self._expansion_state
@expansion_state.setter
def expansion_state(self, value: int) -> None:
""" """
if self._expansion_state is value:
return
notification = ChangeNotification(
item=self,
property_name='expansion_state',
old_value=self._expansion_state,
new_value=value
)
self._expansion_state = value
self._notify(notification=notification)
@property
def show_inputs(self) -> bool:
""" """
return self._show_inputs
@show_inputs.setter
def show_inputs(self, value: bool) -> None:
""" """
if self._show_inputs is value:
return
notification = ChangeNotification(
item=self,
property_name='show_inputs',
old_value=self._show_inputs,
new_value=value
)
self._show_inputs = value
self._notify(notification=notification)
@property
def show_outputs(self) -> bool:
""" """
return self._show_outputs
@show_outputs.setter
def show_outputs(self, value: bool) -> None:
""" """
if self._show_outputs is value:
return
notification = ChangeNotification(
item=self,
property_name='show_outputs',
old_value=self._show_outputs,
new_value=value
)
self._show_outputs = value
self._notify(notification=notification)
@property
def show_peripheral(self) -> bool:
""" """
return self._show_peripheral
@show_peripheral.setter
def show_peripheral(self, value: bool) -> None:
""" """
if self._show_peripheral is value:
return
notification = ChangeNotification(
item=self,
property_name='show_peripheral',
old_value=self._show_peripheral,
new_value=value
)
self._show_peripheral = value
self._notify(notification=notification)
class Connection(Serializable):
def __init__(self):
super(Connection, self).__init__()
self._source_id = ''
self._destination_id = ''
def serialize(self) -> dict:
output = super(Connection, self).serialize()
output['_source_id'] = self._source_id
output['_destination_id'] = self._destination_id
return output
def deserialize(self, data: dict) -> None:
super(Connection, self).deserialize(data=data)
self._source_id = data['_source_id'] if '_source_id' in data.keys() else ''
self._destination_id = data['_destination_id'] if '_destination_id' in data.keys() else ''
@property
def source_id(self):
return self._source_id
@property
def destination_id(self):
return self._destination_id
class Plug(Base):
"""
A Plug can be:
a source
an output
both a source and an output
a container for a static value - most likely as an output
a container for an editable value - most likely as an output
plug.default_value Starting point and for resetting.
plug.value Apply as computed_value if there is no input or dependency providing a value.
plug.computed_value Final value. Could be thought of as plug.output_value.
Plug is_dirty on
input connect
input disconnect
value change if not connected
A Plug is_dirty if
it is_dirty
its input is_dirty
any dependency is_dirty
"""
VALUE_TYPE_ANY = 'any'
VALUE_TYPE_FLOAT = 'float'
VALUE_TYPE_INTEGER = 'int'
VALUE_TYPE_STRING = 'str'
VALUE_TYPE_BOOLEAN = 'bool'
VALUE_TYPE_NODE_ID = 'node_id'
VALUE_TYPE_VECTOR2 = 'vector2'
VALUE_TYPE_VECTOR3 = 'vector3'
VALUE_TYPE_VECTOR4 = 'vector4'
VALUE_TYPE_ENUM = 'enum'
VALUE_TYPE_LIST = 'list'
VALUE_TYPES = [
VALUE_TYPE_ANY,
VALUE_TYPE_FLOAT,
VALUE_TYPE_INTEGER,
VALUE_TYPE_STRING,
VALUE_TYPE_BOOLEAN,
VALUE_TYPE_NODE_ID,
VALUE_TYPE_VECTOR2,
VALUE_TYPE_VECTOR3,
VALUE_TYPE_VECTOR4,
VALUE_TYPE_ENUM,
VALUE_TYPE_LIST,
]
@classmethod
def Create(
cls,
parent: DagNode,
name: str,
display_name: str,
value_type: str = 'any',
editable: bool = False,
is_removable: bool = False,
) -> 'Plug':
instance = cls(parent=parent)
instance._name = name
instance._display_name = display_name
instance._value_type = value_type
instance._is_editable = editable
instance._is_removable = is_removable
return instance
def __init__(self, parent: DagNode):
super(Plug, self).__init__()
self._parent: DagNode = parent
self._name: str = ''
self._display_name: str = ''
self._value_type: str = Plug.VALUE_TYPE_ANY
self._internal_value_type: str = Plug.VALUE_TYPE_ANY
self._is_peripheral: bool = False
self._is_editable: bool = False
self._is_removable: bool = False
self._default_value: typing.Any = None
self._computed_value: typing.Any = None
self._value: typing.Any = None
self._is_invalid: bool = False
self._input: typing.Union[Plug, typing.NoReturn] = None
self._outputs: typing.List[Plug] = []
self._enum_values: typing.List = []
def serialize(self) -> dict:
output = super(Plug, self).serialize()
output['_name'] = self._name
output['_display_name'] = self._display_name
output['_value_type'] = self._value_type
output['_internal_value_type'] = self._internal_value_type
output['_is_peripheral'] = self._is_peripheral
output['_is_editable'] = self._is_editable
output['_is_removable'] = self._is_removable
output['_default_value'] = self._default_value
output['_value'] = self._value
output['_enum_values'] = self._enum_values
return output
def deserialize(self, data: dict) -> None:
super(Plug, self).deserialize(data=data)
self._input = None
self._name = data['_name'] if '_name' in data.keys() else ''
self._display_name = data['_display_name'] if '_display_name' in data.keys() else ''
self._value_type = data['_value_type'] if '_value_type' in data.keys() else Plug.VALUE_TYPE_ANY
self._internal_value_type = data['_internal_value_type'] if '_internal_value_type' in data.keys() else None
self._is_peripheral = data['_is_peripheral'] if '_is_peripheral' in data.keys() else False
self._is_editable = data['_is_editable'] if '_is_editable' in data.keys() else False
self._is_removable = data['_is_removable'] if '_is_removable' in data.keys() else False
self._default_value = data['_default_value'] if '_default_value' in data.keys() else None
self._value = data['_value'] if '_value' in data.keys() else self._default_value
self._enum_values = data['_enum_values'] if '_enum_values' in data.keys() else []
def invalidate(self) -> None:
if self._is_invalid:
return
self._is_invalid = True
if self.parent:
self.parent.invalidate(self)
@property
def parent(self) -> DagNode:
return self._parent
@property
def name(self) -> str:
return self._name
@name.setter
def name(self, value: str) -> None:
if self._name is value:
return
notification = ChangeNotification(
item=self,
property_name='name',
old_value=self._name,
new_value=value
)
self._name = value
self._notify(notification=notification)
@property
def display_name(self) -> str:
return self._display_name
@display_name.setter
def display_name(self, value: str) -> None:
if self._display_name is value:
return
notification = ChangeNotification(
item=self,
property_name='display_name',
old_value=self._display_name,
new_value=value
)
self._display_name = value
self._notify(notification=notification)
@property
def value_type(self) -> str:
return self._value_type
@value_type.setter
def value_type(self, value: str) -> None:
if self._value_type is value:
return
notification = ChangeNotification(
item=self,
property_name='value_type',
old_value=self._value_type,
new_value=value
)
self._value_type = value
self._notify(notification=notification)
@property
def internal_value_type(self) -> str:
return self._internal_value_type
@internal_value_type.setter
def internal_value_type(self, value: str) -> None:
if self._internal_value_type is value:
return
notification = ChangeNotification(
item=self,
property_name='internal_value_type',
old_value=self._internal_value_type,
new_value=value
)
self._internal_value_type = value
self._notify(notification=notification)
@property
def is_removable(self) -> bool:
return self._is_removable
@property
def is_peripheral(self) -> bool:
return self._is_peripheral
@is_peripheral.setter
def is_peripheral(self, value: bool) -> None:
if self._is_peripheral is value:
return
notification = ChangeNotification(
item=self,
property_name='is_peripheral',
old_value=self._is_peripheral,
new_value=value
)
self._is_peripheral = value
self._notify(notification=notification)
@property
def computed_value(self) -> typing.Any:
return self._computed_value
@computed_value.setter
def computed_value(self, value: typing.Any) -> None:
if self._computed_value is value:
self._is_invalid = False
self._value = self._computed_value
return
notification = ChangeNotification(
item=self,
property_name='computed_value',
old_value=self._computed_value,
new_value=value
)
if self._input and self._input.is_invalid:
print('WARNING: Universal Material Map: Compute encountered an unexpected state: input invalid after compute. Results may be incorrect.')
print('\tplug: "{0}"'.format(self.name))
if self._parent:
print('\tplug.parent: "{0}"'.format(self._parent.__class__.__name__))
print('\tplug.input: "{0}"'.format(self._input.name))
if self._input.parent:
print('\tplug.input.parent: "{0}"'.format(self._input.parent.__class__.__name__))
return
self._is_invalid = False
self._computed_value = value
self._value = self._computed_value
self._notify(notification=notification)
@property
def value(self) -> typing.Any:
return self._value
@value.setter
def value(self, value: typing.Any) -> None:
if self._value is value:
return
notification = ChangeNotification(
item=self,
property_name='value',
old_value=self._value,
new_value=value
)
self._value = value
self._notify(notification=notification)
if self._input is None:
self.invalidate()
@property
def is_invalid(self) -> typing.Any:
if self._input and self._input._is_invalid:
return True
return self._is_invalid
@property
def input(self) -> typing.Union['Plug', typing.NoReturn]:
return self._input
@input.setter
def input(self, value: typing.Union['Plug', typing.NoReturn]) -> None:
if self._input is value:
return
notification = ChangeNotification(
item=self,
property_name='input',
old_value=self._input,
new_value=value
)
self._input = value
self._notify(notification=notification)
self.invalidate()
@property
def outputs(self) -> typing.List['Plug']:
return self._outputs
@property
def is_editable(self) -> bool:
return self._is_editable
@is_editable.setter
def is_editable(self, value: bool) -> None:
if self._is_editable is value:
return
notification = ChangeNotification(
item=self,
property_name='is_editable',
old_value=self._is_editable,
new_value=value
)
self._is_editable = value
self._notify(notification=notification)
@property
def default_value(self) -> typing.Any:
return self._default_value
@default_value.setter
def default_value(self, value: typing.Any) -> None:
if self._default_value is value:
return
notification = ChangeNotification(
item=self,
property_name='default_value',
old_value=self._default_value,
new_value=value
)
self._default_value = value
self._notify(notification=notification)
@property
def enum_values(self) -> typing.List:
return self._enum_values
@enum_values.setter
def enum_values(self, value: typing.List) -> None:
if self._enum_values is value:
return
notification = ChangeNotification(
item=self,
property_name='enum_values',
old_value=self._enum_values,
new_value=value
)
self._enum_values = value
self._notify(notification=notification)
class Node(DagNode):
@classmethod
def Create(cls, class_name: str) -> 'Node':
instance = typing.cast(Node, super(Node, cls).Create())
instance._class_name = class_name
return instance
def __init__(self):
super(Node, self).__init__()
self._class_name: str = ''
def serialize(self) -> dict:
output = super(Node, self).serialize()
output['_class_name'] = self._class_name
return output
def deserialize(self, data: dict) -> None:
super(Node, self).deserialize(data=data)
self._class_name = data['_class_name'] if '_class_name' in data.keys() else ''
@property
def class_name(self):
return self._class_name
class Client(Serializable):
ANY_VERSION = 'any'
NO_VERSION = 'none'
DCC_OMNIVERSE_CREATE = 'Omniverse Create'
DCC_3DS_MAX = '3ds MAX'
DCC_MAYA = 'Maya'
DCC_HOUDINI = 'Houdini'
DCC_SUBSTANCE_DESIGNER = 'Substance Designer'
DCC_SUBSTANCE_PAINTER = 'Substance Painter'
DCC_BLENDER = 'Blender'
@classmethod
def Autodesk_3dsMax(cls, version: str = ANY_VERSION) -> 'Client':
instance = Client()
instance._name = Client.DCC_3DS_MAX
instance._version = version
return instance
@classmethod
def Autodesk_Maya(cls, version: str = ANY_VERSION) -> 'Client':
instance = Client()
instance._name = Client.DCC_MAYA
instance._version = version
return instance
@classmethod
def OmniverseCreate(cls, version: str = ANY_VERSION) -> 'Client':
instance = Client()
instance._name = Client.DCC_OMNIVERSE_CREATE
instance._version = version
return instance
@classmethod
def Blender(cls, version: str = ANY_VERSION) -> 'Client':
instance = Client()
instance._name = Client.DCC_BLENDER
instance._version = version
return instance
def __init__(self):
super(Client, self).__init__()
self._name: str = ''
self._version: str = ''
def __eq__(self, other: 'Client') -> bool:
if not isinstance(other, Client):
return False
return other.name == self._name and other.version == self._version
def is_compatible(self, other: 'Client') -> bool:
if not isinstance(other, Client):
return False
if other == self:
return True
return other._version == Client.ANY_VERSION or self._version == Client.ANY_VERSION
def serialize(self) -> dict:
output = super(Client, self).serialize()
output['_name'] = self._name
output['_version'] = self._version
return output
def deserialize(self, data: dict) -> None:
super(Client, self).deserialize(data=data)
self._name = data['_name'] if '_name' in data.keys() else ''
self._version = data['_version'] if '_version' in data.keys() else ''
@property
def name(self) -> str:
return self._name
@name.setter
def name(self, value: str) -> None:
self._name = value
@property
def version(self) -> str:
return self._version
@version.setter
def version(self, value: str) -> None:
self._version = value
class AssemblyMetadata(Serializable):
CATEGORY_BASE = 'Base Materials'
CATEGORY_CONNECTOR = 'Connector Materials'
CATEGORIES = [
CATEGORY_BASE,
CATEGORY_CONNECTOR,
]
def __init__(self):
super(AssemblyMetadata, self).__init__()
self._category = ''
self._name = ''
self._keywords: typing.List[str] = []
self._supported_clients: typing.List[Client] = []
def serialize(self) -> dict:
output = super(AssemblyMetadata, self).serialize()
output['_category'] = self._category
output['_name'] = self._name
output['_keywords'] = self._keywords
output['_supported_clients'] = [o.serialize() for o in self._supported_clients]
return output
def deserialize(self, data: dict) -> None:
super(AssemblyMetadata, self).deserialize(data=data)
self._category = data['_category'] if '_category' in data.keys() else ''
self._name = data['_name'] if '_name' in data.keys() else ''
self._keywords = data['_keywords'] if '_keywords' in data.keys() else ''
items = []
if '_supported_clients' in data.keys():
for o in data['_supported_clients']:
item = Client()
item.deserialize(data=o)
items.append(item)
self._supported_clients = items
@property
def category(self) -> str:
return self._category
@category.setter
def category(self, value: str) -> None:
self._category = value
@property
def name(self) -> str:
return self._name
@name.setter
def name(self, value: str) -> None:
self._name = value
@property
def keywords(self) -> typing.List[str]:
return self._keywords
@keywords.setter
def keywords(self, value: typing.List[str]) -> None:
self._keywords = value
@property
def supported_clients(self) -> typing.List[Client]:
return self._supported_clients
class Target(GraphEntity):
def __init__(self):
super(Target, self).__init__()
self._nodes: typing.List[Node] = []
self._metadata: AssemblyMetadata = AssemblyMetadata()
self._root_node_id: str = ''
self._root_node: Node = None
self._revision: int = 0
self._store_id: str = ''
self._connections: typing.List[Connection] = []
def serialize(self) -> dict:
output = super(Target, self).serialize()
output['_nodes'] = [node.serialize() for node in self.nodes]
output['_metadata'] = self._metadata.serialize()
output['_root_node_id'] = self._root_node_id
output['_revision'] = self._revision
output['_connections'] = [o.serialize() for o in self._connections]
return output
def deserialize(self, data: dict) -> None:
super(Target, self).deserialize(data=data)
self._root_node_id = data['_root_node_id'] if '_root_node_id' in data.keys() else ''
nodes = []
if '_nodes' in data.keys():
for o in data['_nodes']:
node = Node()
node.deserialize(data=o)
nodes.append(node)
self._nodes = nodes
root_node = None
if self._root_node_id:
for node in self._nodes:
if node.id == self._root_node_id:
root_node = node
break
self._root_node = root_node
metadata = AssemblyMetadata()
if '_metadata' in data.keys():
metadata.deserialize(data=data['_metadata'])
self._metadata = metadata
self._revision = data['_revision'] if '_revision' in data.keys() else 0
items = []
if '_connections' in data.keys():
for o in data['_connections']:
item = Connection()
item.deserialize(data=o)
items.append(item)
self._connections = items
for connection in self._connections:
input_plug: Plug = None
output_plug: Plug = None
for node in self._nodes:
for plug in node.inputs:
if connection.source_id == plug.id:
input_plug = plug
elif connection.destination_id == plug.id:
input_plug = plug
for plug in node.outputs:
if connection.source_id == plug.id:
output_plug = plug
elif connection.destination_id == plug.id:
output_plug = plug
if input_plug is not None and output_plug is not None:
break
if input_plug is None or output_plug is None:
continue
if output_plug not in input_plug.outputs:
input_plug.outputs.append(output_plug)
output_plug.input = input_plug
def connect(self, source: Plug, destination: Plug) -> None:
for connection in self._connections:
if connection.source_id == source.id and connection.destination_id == destination.id:
return
connection = Connection()
connection._source_id = source.id
connection._destination_id = destination.id
self._connections.append(connection)
if destination not in source.outputs:
source.outputs.append(destination)
destination.input = source
@property
def nodes(self) -> typing.List[Node]:
return self._nodes
@property
def metadata(self) -> AssemblyMetadata:
return self._metadata
@property
def root_node(self) -> Node:
return self._root_node
@root_node.setter
def root_node(self, value: Node) -> None:
self._root_node = value
self._root_node_id = self._root_node.id if self._root_node else ''
@property
def revision(self) -> int:
return self._revision
@revision.setter
def revision(self, value: int) -> None:
self._revision = value
@property
def store_id(self) -> str:
return self._store_id
@store_id.setter
def store_id(self, value: int) -> None:
if self._store_id is value:
return
notification = ChangeNotification(
item=self,
property_name='store_id',
old_value=self._store_id,
new_value=value
)
self._store_id = value
self._notify(notification=notification)
class TargetInstance(GraphEntity):
@classmethod
def FromAssembly(cls, assembly: Target) -> 'TargetInstance':
instance = cls()
instance._target_id = assembly.id
instance.target = assembly
instance.display_name = assembly.display_name
return instance
def __init__(self):
super(TargetInstance, self).__init__()
self._target_id: str = ''
self._target: typing.Union[Target, typing.NoReturn] = None
self._is_setting_target = False
def serialize(self) -> dict:
super(TargetInstance, self).serialize()
output = GraphEntity.serialize(self)
output['_target_id'] = self._target_id
output['_inputs'] = []
output['_outputs'] = []
return output
def deserialize(self, data: dict) -> None:
"""
Does not invoke super on DagNode base class because inputs and outputs are derived from assembly instance.
"""
data['_inputs'] = []
data['_outputs'] = []
GraphEntity.deserialize(self, data=data)
self._target_id = data['_target_id'] if '_target_id' in data.keys() else ''
def invalidate(self, plug: 'Plug' = None):
"""
Invalidate any plug that is a destination of an output plug named plug.name.
"""
# If a destination is invalidated it is assumed compute will be invoked once a destination endpoint has been found
do_compute = True
output: Plug
destination: Plug
for output in self.outputs:
if not plug or output.name == plug.name:
for destination in output.outputs:
destination.invalidate()
do_compute = False
if do_compute:
self.compute()
@property
def target_id(self) -> str:
return self._target_id
@property
def target(self) -> typing.Union[Target, typing.NoReturn]:
return self._target
@target.setter
def target(self, value: typing.Union[Target, typing.NoReturn]) -> None:
if self._target is value:
return
if not self._target_id and value:
raise Exception('Target ID "" does not match assembly instance "{0}".'.format(value.id))
if self._target_id and not value:
raise Exception('Target ID "{0}" does not match assembly instance "None".'.format(self._target_id))
if self._target_id and value and not self._target_id == value.id:
raise Exception('Target ID "{0}" does not match assembly instance "{1}".'.format(self._target_id, value.id))
self._is_setting_target = True
notification = ChangeNotification(
item=self,
property_name='target',
old_value=self._target,
new_value=value
)
self._target = value
self._inputs = []
self._outputs = []
if self._target:
node_id_plug = Plug.Create(
parent=self,
name='node_id_output',
display_name='Node Id',
value_type=Plug.VALUE_TYPE_STRING
)
node_id_plug._id = self._target.id
node_id_plug.value = self._target.id
self._outputs.append(node_id_plug)
for node in self._target.nodes:
for o in node.inputs:
plug = Plug(parent=self)
plug.deserialize(data=o.serialize())
self._inputs.append(plug)
for o in node.outputs:
plug = Plug(parent=self)
plug.deserialize(data=o.serialize())
self._outputs.append(plug)
self._is_setting_target = False
self._notify(notification=notification)
self.invalidate()
class Operator(Base):
def __init__(
self,
id: str,
name: str,
required_inputs: int,
min_inputs: int,
max_inputs: int,
num_outputs: int,
):
super(Operator, self).__init__()
self._id = id
self._name: str = name
self._required_inputs: int = required_inputs
self._min_inputs: int = min_inputs
self._max_inputs: int = max_inputs
self._num_outputs: int = num_outputs
self._computing: bool = False
def compute(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
"""
Base class only computes input_plugs. It is assumed that extending class computes output plugs.
"""
if self._computing:
return
self._computing = True
if len(input_plugs) < self._required_inputs:
raise Exception('Array of inputs not of required length "{0}". Actual length "{1}". Operator: "{2}"'.format(self._required_inputs, len(input_plugs), self.__class__.__name__))
for plug in input_plugs:
if plug.input:
if plug.input in input_plugs:
print('WARNING: Universal Material Map: Invalid state in compute graph. Compute cancelled.')
print('\tInput {0}.{1} is dependent on another input on the same node.'.format(plug.parent.display_name, plug.name))
print('\tDependency: {0}.{1}'.format(plug.input.parent.display_name, plug.input.name))
print('\tThis is not supported.')
print('\tComputations likely to not behave as expected. It is recommended you restart the solution using this data.')
self._computing = False
return
if plug.input in output_plugs:
print('WARNING: Universal Material Map: Invalid state in compute graph. Compute cancelled.')
print('\tInput {0}.{1} is dependent on another output on the same node.'.format(
plug.parent.display_name, plug.name))
print('\tDependency: {0}.{1}'.format(plug.input.parent.display_name, plug.input.name))
print('\tThis is not supported.')
print('\tComputations likely to not behave as expected. It is recommended you restart the solution using this data.')
self._computing = False
return
for plug in output_plugs:
if plug.input:
if plug.input in output_plugs:
print('WARNING: Universal Material Map: Invalid state in compute graph. Compute cancelled.')
print('\tInput {0}.{1} is dependent on another output on the same node.'.format(
plug.parent.display_name, plug.name))
print('\tDependency: {0}.{1}'.format(plug.input.parent.display_name, plug.input.name))
print('\tThis is not supported.')
print('\tComputations likely to not behave as expected. It is recommended you restart the solution using this data.')
self._computing = False
return
self._compute_inputs(input_plugs=input_plugs)
self._compute_outputs(input_plugs=input_plugs, output_plugs=output_plugs)
self._computing = False
def _compute_inputs(self, input_plugs: typing.List[Plug]):
# Compute dependencies
for plug in input_plugs:
if not plug.input:
continue
if not plug.input.parent:
continue
if not plug.input.is_invalid:
continue
plug.input.parent.compute()
# Set computed_value
for plug in input_plugs:
if plug.input:
plug.computed_value = plug.input.computed_value
else:
plug.computed_value = plug.value
def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
raise NotImplementedError(self.__class__)
def generate_input(self, parent: 'DagNode', index: int) -> Plug:
"""
Base class provides method template but does nothing.
"""
pass
def generate_output(self, parent: 'DagNode', index: int) -> Plug:
"""
Base class provides method template but does nothing.
"""
pass
def test(self) -> None:
parent = OperatorInstance()
inputs = []
while len(inputs) < self.min_inputs:
inputs.append(
self.generate_input(parent=parent, index=len(inputs))
)
outputs = []
while len(outputs) < self.num_outputs:
outputs.append(
self.generate_output(parent=parent, index=len(outputs))
)
self._prepare_plugs_for_test(input_plugs=inputs, output_plugs=outputs)
self._perform_test(input_plugs=inputs, output_plugs=outputs)
self._assert_test(input_plugs=inputs, output_plugs=outputs)
def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
pass
def _perform_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
self.compute(input_plugs=input_plugs, output_plugs=output_plugs)
def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
raise NotImplementedError()
def remove_plug(self, operator_instance: 'OperatorInstance', plug: 'Plug') -> None:
if not plug.is_removable:
raise Exception('Plug is not removable')
notifications = []
if plug in operator_instance._inputs:
old_value = operator_instance._inputs[:]
operator_instance._inputs.remove(plug)
operator_instance._unsubscribe(notifying=plug)
notifications.append(
ChangeNotification(
item=operator_instance,
property_name='inputs',
old_value=old_value,
new_value=operator_instance._inputs[:]
)
)
if plug in operator_instance._outputs:
old_value = operator_instance._outputs[:]
operator_instance._outputs.remove(plug)
operator_instance._unsubscribe(notifying=plug)
notifications.append(
ChangeNotification(
item=operator_instance,
property_name='outputs',
old_value=old_value,
new_value=operator_instance._outputs[:]
)
)
destination: Plug
for destination in plug.outputs:
destination.input = None
for notification in notifications:
for callback in operator_instance._changed_callbacks.values():
callback(notification)
@property
def name(self) -> str:
return self._name
@property
def min_inputs(self) -> int:
return self._min_inputs
@property
def max_inputs(self) -> int:
return self._max_inputs
@property
def required_inputs(self) -> int:
return self._required_inputs
@property
def num_outputs(self) -> int:
return self._num_outputs
class GraphOutput(Operator):
"""
Output resolves to a node id.
"""
def __init__(self):
super(GraphOutput, self).__init__(
id='5f39ab48-5bee-46fe-9a22-0f678013568e',
name='Graph Output',
required_inputs=1,
min_inputs=1,
max_inputs=1,
num_outputs=1
)
def generate_input(self, parent: DagNode, index: int) -> Plug:
if index == 0:
return Plug.Create(parent=parent, name='input_node_id', display_name='Node Id', value_type=Plug.VALUE_TYPE_NODE_ID)
raise Exception('Input index "{0}" not supported.'.format(index))
def generate_output(self, parent: DagNode, index: int) -> Plug:
if index == 0:
return Plug.Create(parent=parent, name='output_node_id', display_name='Node Id', value_type=Plug.VALUE_TYPE_NODE_ID)
raise Exception('Output index "{0}" not supported.'.format(index))
def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
output_plugs[0].computed_value = input_plugs[0].computed_value
def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
input_plugs[0].computed_value = self.id
def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
for output in output_plugs:
if not output.computed_value == self.id:
raise Exception('Test failed.')
class OperatorInstance(GraphEntity):
@classmethod
def FromOperator(cls, operator: Operator) -> 'OperatorInstance':
instance = OperatorInstance()
instance._is_deserializing = True
instance._operator = operator
instance._display_name = operator.name
while len(instance._inputs) < operator.min_inputs:
instance._inputs.append(
operator.generate_input(parent=instance, index=len(instance._inputs))
)
while len(instance._outputs) < operator.num_outputs:
instance._outputs.append(
operator.generate_output(parent=instance, index=len(instance._outputs))
)
instance._operator_module = operator.__class__.__module__
instance._operator_class_name = operator.__class__.__name__
instance._is_deserializing = False
instance.invalidate()
return instance
def __init__(self):
super(OperatorInstance, self).__init__()
self._description: str = ''
self._operator_module: str = ''
self._operator_class_name: str = ''
self._operator: Operator = None
self._is_deserializing = False
def serialize(self) -> dict:
output = super(OperatorInstance, self).serialize()
output['_description'] = self._description
output['_operator_module'] = self._operator_module
output['_operator_class_name'] = self._operator_class_name
return output
def deserialize(self, data: dict) -> None:
self._is_deserializing = True
super(OperatorInstance, self).deserialize(data=data)
self._description = data['_description'] if '_description' in data.keys() else ''
self._operator_module = data['_operator_module'] if '_operator_module' in data.keys() else ''
self._operator_class_name = data['_operator_class_name'] if '_operator_class_name' in data.keys() else ''
if not self._operator_module:
raise Exception('Unexpected data: no valid "operator module" defined')
if not self._operator_class_name:
raise Exception('Unexpected data: no valid "operator class name" defined')
if self._operator_module not in sys.modules.keys():
importlib.import_module(self._operator_module)
module_pointer = sys.modules[self._operator_module]
class_pointer = module_pointer.__dict__[self._operator_class_name]
self._operator = typing.cast(Operator, class_pointer())
notifying = []
while len(self._inputs) < self._operator.min_inputs:
plug = self._operator.generate_input(parent=self, index=len(self._inputs))
self._inputs.append(plug)
notifying.append(plug)
while len(self._outputs) < self._operator.num_outputs:
plug = self._operator.generate_output(parent=self, index=len(self._outputs))
self._outputs.append(plug)
notifying.append(plug)
self._is_deserializing = False
for o in notifying:
self._subscribe(notifying=o)
self.invalidate()
def invalidate(self, plug: 'Plug' = None):
"""
Because one plug changed we assume any connected plug to any output needs to be invalidated.
"""
if self._is_deserializing:
return
# Set all outputs to invalid
output: Plug
for output in self.outputs:
output._is_invalid = True
# If a destination is invalidated it is assumed compute will be invoked once a destination endpoint has been found
do_compute = True
destination: Plug
for output in self.outputs:
for destination in output.outputs:
destination.invalidate()
do_compute = False
if do_compute:
self.compute()
def compute(self) -> None:
if self._operator:
self._operator.compute(input_plugs=self._inputs, output_plugs=self._outputs)
def add_input(self) -> Plug:
if not self.can_add_input:
raise Exception('Cannot add another input.')
old_value = self._inputs[:]
plug = self._operator.generate_input(parent=self, index=len(self._inputs))
self._inputs.append(plug)
self._subscribe(notifying=plug)
notification = ChangeNotification(
item=self,
property_name='inputs',
old_value=old_value,
new_value=self._inputs[:]
)
self._notify(notification=notification)
for o in self.outputs:
o.invalidate()
return plug
def remove_plug(self, plug: 'Plug') -> None:
self._operator.remove_plug(operator_instance=self, plug=plug)
@property
def operator(self) -> Operator:
return self._operator
@property
def description(self) -> str:
return self._description
@description.setter
def description(self, value: str) -> None:
if self._description is value:
return
notification = ChangeNotification(
item=self,
property_name='description',
old_value=self._description,
new_value=value
)
self._description = value
self._notify(notification=notification)
@DagNode.can_add_input.getter
def can_add_input(self) -> bool:
if self._operator.max_inputs == -1:
return True
return len(self._inputs) < self._operator.max_inputs - 1
class StyleInfo(object):
def __init__(
self,
name: str,
background_color: int,
border_color: int,
connection_color: int,
node_background_color: int,
footer_icon_filename: str,
):
super(StyleInfo, self).__init__()
self._name: str = name
self._background_color: int = background_color
self._border_color: int = border_color
self._connection_color: int = connection_color
self._node_background_color: int = node_background_color
self._footer_icon_filename: str = footer_icon_filename
@property
def name(self) -> str:
return self._name
@property
def background_color(self) -> int:
return self._background_color
@property
def border_color(self) -> int:
return self._border_color
@property
def connection_color(self) -> int:
return self._connection_color
@property
def node_background_color(self) -> int:
return self._node_background_color
@property
def footer_icon_filename(self) -> str:
return self._footer_icon_filename
class ConversionGraph(Base):
# STYLE_OUTPUT: StyleInfo = StyleInfo(
# name='output',
# background_color=0xFF2E2E2E,
# border_color=0xFFB97E9C,
# connection_color=0xFF80C26F,
# node_background_color=0xFF444444,
# footer_icon_filename='Material.svg'
# )
STYLE_SOURCE_NODE: StyleInfo = StyleInfo(
name='source_node',
background_color=0xFF2E2E2E,
border_color=0xFFE5AAC8,
connection_color=0xFF80C26F,
node_background_color=0xFF444444,
footer_icon_filename='Material.svg'
)
STYLE_ASSEMBLY_REFERENCE: StyleInfo = StyleInfo(
name='assembly_reference',
background_color=0xFF2E2E2E,
border_color=0xFFB97E9C,
connection_color=0xFF80C26F,
node_background_color=0xFF444444,
footer_icon_filename='Material.svg'
)
STYLE_OPERATOR_INSTANCE: StyleInfo = StyleInfo(
name='operator_instance',
background_color=0xFF34302A,
border_color=0xFFCD923A,
connection_color=0xFF80C26F,
node_background_color=0xFF444444,
footer_icon_filename='constant_color.svg'
)
STYLE_VALUE_RESOLVER: StyleInfo = StyleInfo(
name='value_resolver',
background_color=0xFF34302A,
border_color=0xFFCD923A,
connection_color=0xFF80C26F,
node_background_color=0xFF444444,
footer_icon_filename='value_resolver.svg'
)
STYLE_BOOLEAN_SWITCH: StyleInfo = StyleInfo(
name='boolean_switch',
background_color=0xFF34302A,
border_color=0xFFCD923A,
connection_color=0xFF80C26F,
node_background_color=0xFF444444,
footer_icon_filename='boolean_switch.svg'
)
STYLE_CONSTANT_BOOLEAN: StyleInfo = StyleInfo(
name='constant_boolean',
background_color=0xFF34302A,
border_color=0xFFCD923A,
connection_color=0xFF80C26F,
node_background_color=0xFF444444,
footer_icon_filename='constant_boolean.svg'
)
STYLE_CONSTANT_COLOR: StyleInfo = StyleInfo(
name='constant_color',
background_color=0xFF34302A,
border_color=0xFFCD923A,
connection_color=0xFF80C26F,
node_background_color=0xFF444444,
footer_icon_filename='constant_color.svg'
)
STYLE_CONSTANT_FLOAT: StyleInfo = StyleInfo(
name='constant_float',
background_color=0xFF34302A,
border_color=0xFFCD923A,
connection_color=0xFF80C26F,
node_background_color=0xFF444444,
footer_icon_filename='constant_float.svg'
)
STYLE_CONSTANT_INTEGER: StyleInfo = StyleInfo(
name='constant_integer',
background_color=0xFF34302A,
border_color=0xFFCD923A,
connection_color=0xFF80C26F,
node_background_color=0xFF444444,
footer_icon_filename='constant_integer.svg'
)
STYLE_CONSTANT_STRING: StyleInfo = StyleInfo(
name='constant_string',
background_color=0xFF34302A,
border_color=0xFFCD923A,
connection_color=0xFF80C26F,
node_background_color=0xFF444444,
footer_icon_filename='constant_string.svg'
)
STYLE_EQUAL: StyleInfo = StyleInfo(
name='equal',
background_color=0xFF34302A,
border_color=0xFFCD923A,
connection_color=0xFF80C26F,
node_background_color=0xFF444444,
footer_icon_filename='equal.svg'
)
STYLE_GREATER_THAN: StyleInfo = StyleInfo(
name='greater_than',
background_color=0xFF34302A,
border_color=0xFFCD923A,
connection_color=0xFF80C26F,
node_background_color=0xFF444444,
footer_icon_filename='greater_than.svg'
)
STYLE_LESS_THAN: StyleInfo = StyleInfo(
name='less_than',
background_color=0xFF34302A,
border_color=0xFFCD923A,
connection_color=0xFF80C26F,
node_background_color=0xFF444444,
footer_icon_filename='less_than.svg'
)
STYLE_MERGE_RGB: StyleInfo = StyleInfo(
name='merge_rgb',
background_color=0xFF34302A,
border_color=0xFFCD923A,
connection_color=0xFF80C26F,
node_background_color=0xFF444444,
footer_icon_filename='merge_rgb.svg'
)
STYLE_NOT: StyleInfo = StyleInfo(
name='not',
background_color=0xFF34302A,
border_color=0xFFCD923A,
connection_color=0xFF80C26F,
node_background_color=0xFF444444,
footer_icon_filename='not.svg'
)
STYLE_OR: StyleInfo = StyleInfo(
name='or',
background_color=0xFF34302A,
border_color=0xFFCD923A,
connection_color=0xFF80C26F,
node_background_color=0xFF444444,
footer_icon_filename='or.svg'
)
STYLE_SPLIT_RGB: StyleInfo = StyleInfo(
name='split_rgb',
background_color=0xFF34302A,
border_color=0xFFCD923A,
connection_color=0xFF80C26F,
node_background_color=0xFF444444,
footer_icon_filename='split_rgb.svg'
)
STYLE_TRANSPARENCY_RESOLVER: StyleInfo = StyleInfo(
name='transparency_resolver',
background_color=0xFF34302A,
border_color=0xFFCD923A,
connection_color=0xFF80C26F,
node_background_color=0xFF444444,
footer_icon_filename='transparency_resolver.svg'
)
STYLE_OUTPUT: StyleInfo = StyleInfo(
name='output',
background_color=0xFF34302A,
border_color=0xFFCD923A,
connection_color=0xFF80C26F,
node_background_color=0xFF444444,
footer_icon_filename='output.svg'
)
STYLE_INFOS = (
STYLE_OUTPUT,
STYLE_SOURCE_NODE,
STYLE_ASSEMBLY_REFERENCE,
STYLE_OPERATOR_INSTANCE,
STYLE_VALUE_RESOLVER,
STYLE_BOOLEAN_SWITCH,
STYLE_CONSTANT_BOOLEAN,
STYLE_CONSTANT_COLOR,
STYLE_CONSTANT_FLOAT,
STYLE_CONSTANT_INTEGER,
STYLE_CONSTANT_STRING,
STYLE_EQUAL,
STYLE_GREATER_THAN,
STYLE_LESS_THAN,
STYLE_NOT,
STYLE_OR,
STYLE_SPLIT_RGB,
STYLE_TRANSPARENCY_RESOLVER,
STYLE_MERGE_RGB,
)
def __init__(self):
super(ConversionGraph, self).__init__()
self._graph_output: OperatorInstance = OperatorInstance.FromOperator(operator=GraphOutput())
self._target_instances: typing.List[TargetInstance] = []
self._operator_instances: typing.List[OperatorInstance] = [self._graph_output]
self._connections: typing.List[Connection] = []
self._library: Library = None
self._source_node_id: str = ''
self._source_node: TargetInstance = None
self._filename: str = ''
self._exists_on_disk: bool = False
self._revision: int = 0
def _on_notification(self, notification: ChangeNotification) -> None:
if notification.item == self:
return
# Re-broadcast notification
self._notify(notification=notification)
def serialize(self) -> dict:
output = super(ConversionGraph, self).serialize()
output['_target_instances'] = [o.serialize() for o in self._target_instances]
output['_operator_instances'] = [o.serialize() for o in self._operator_instances]
output['_connections'] = [o.serialize() for o in self._connections]
output['_source_node_id'] = self._source_node_id
output['_revision'] = self._revision
return output
def deserialize(self, data: dict) -> None:
super(ConversionGraph, self).deserialize(data=data)
notifications = []
# _source_node_id
old = self._source_node_id
new = data['_source_node_id'] if '_source_node_id' in data.keys() else ''
if not old == new:
self._source_node_id = new
notifications.append(
ChangeNotification(
item=self,
property_name='source_node_id',
old_value=old,
new_value=new
)
)
# _revision
old = self._revision
new = data['_revision'] if '_revision' in data.keys() else 0
if not old == new:
self._revision = new
notifications.append(
ChangeNotification(
item=self,
property_name='revision',
old_value=old,
new_value=new
)
)
# _target_instances
old = self._target_instances[:]
while len(self._target_instances):
self._unsubscribe(notifying=self._target_instances.pop())
items = []
if '_target_instances' in data.keys():
for o in data['_target_instances']:
item = TargetInstance()
item.deserialize(data=o)
items.append(item)
self._target_instances = items
if not self._target_instances == old:
notifications.append(
ChangeNotification(
item=self,
property_name='target_instances',
old_value=old,
new_value=self._target_instances
)
)
# _source_node
old = self._source_node
source_node = None
if self._source_node_id:
items = [o for o in self._target_instances if o.id == self._source_node_id]
source_node = items[0] if len(items) else None
self._source_node = source_node
if not self._source_node == old:
notifications.append(
ChangeNotification(
item=self,
property_name='source_node',
old_value=old,
new_value=self._source_node
)
)
# _operator_instances
# _graph_output
old_operator_instances = self._operator_instances
old_graph_output = self._graph_output
items = []
self._graph_output = None
if '_operator_instances' in data.keys():
for o in data['_operator_instances']:
item = OperatorInstance()
item.deserialize(data=o)
items.append(item)
if isinstance(item.operator, GraphOutput):
self._graph_output = item
if not self._graph_output:
self._graph_output = OperatorInstance.FromOperator(operator=GraphOutput())
items.insert(0, self._graph_output)
self._operator_instances = items
if not self._operator_instances == old_operator_instances:
notifications.append(
ChangeNotification(
item=self,
property_name='operator_instances',
old_value=old_operator_instances,
new_value=self._operator_instances
)
)
if not self._graph_output == old_graph_output:
notifications.append(
ChangeNotification(
item=self,
property_name='old_graph_output',
old_value=old_operator_instances,
new_value=self._graph_output
)
)
items = []
if '_connections' in data.keys():
for o in data['_connections']:
item = Connection()
item.deserialize(data=o)
items.append(item)
self._connections = items
for o in self._target_instances:
self._subscribe(notifying=o)
for o in self._operator_instances:
self._subscribe(notifying=o)
for o in notifications:
self._notify(notification=o)
def build_dag(self) -> None:
for connection in self._connections:
source = self._get_plug(plug_id=connection.source_id)
destination = self._get_plug(plug_id=connection.destination_id)
if not source or not destination:
continue
if destination not in source.outputs:
source.outputs.append(destination)
destination.input = source
def _get_plug(self, plug_id: str) -> typing.Union[Plug, typing.NoReturn]:
for assembly_reference in self._target_instances:
for plug in assembly_reference.inputs:
if plug.id == plug_id:
return plug
for plug in assembly_reference.outputs:
if plug.id == plug_id:
return plug
for operator_instance in self._operator_instances:
for plug in operator_instance.outputs:
if plug.id == plug_id:
return plug
for plug in operator_instance.inputs:
if plug.id == plug_id:
return plug
return None
def add_node(self, node: OperatorInstance) -> None:
self._operator_instances.append(node)
def add_connection(self, source: Plug, destination: Plug) -> None:
connection = Connection()
connection._source_id = source.id
connection._destination_id = destination.id
self._connections.append(connection)
if destination not in source.outputs:
source.outputs.append(destination)
destination.input = source
def add(self, entity: GraphEntity) -> None:
if isinstance(entity, TargetInstance):
if entity in self._target_instances:
return
self._target_instances.append(entity)
self._subscribe(notifying=entity)
return
if isinstance(entity, OperatorInstance):
if entity in self._operator_instances:
return
self._operator_instances.append(entity)
self._subscribe(notifying=entity)
return
raise NotImplementedError()
def can_be_removed(self, entity: GraphEntity) -> bool:
if not entity:
return False
if entity not in self._target_instances and entity not in self._operator_instances:
return False
if entity == self._graph_output:
return False
return True
def remove(self, entity: GraphEntity) -> None:
if not self.can_be_removed(entity=entity):
raise Exception('Not allowed: entity is not allowed to be deleted.')
if isinstance(entity, TargetInstance):
if entity in self._target_instances:
self._unsubscribe(notifying=entity)
self._target_instances.remove(entity)
to_remove = []
for connection in self._connections:
if connection.source_id == entity.id or connection.destination_id == entity.id:
to_remove.append(connection)
for connection in to_remove:
self.remove_connection(connection=connection)
return
if isinstance(entity, OperatorInstance):
if entity in self._operator_instances:
self._unsubscribe(notifying=entity)
self._operator_instances.remove(entity)
to_remove = []
for connection in self._connections:
if connection.source_id == entity.id or connection.destination_id == entity.id:
to_remove.append(connection)
for connection in to_remove:
self.remove_connection(connection=connection)
return
raise NotImplementedError()
def remove_connection(self, connection: Connection) -> None:
if connection in self._connections:
self._connections.remove(connection)
source = self._get_plug(plug_id=connection.source_id)
destination = self._get_plug(plug_id=connection.destination_id)
if source and destination:
if destination in source.outputs:
source.outputs.remove(destination)
if destination.input == source:
destination.input = None
def get_entity_by_id(self, identifier: str) -> typing.Union[GraphEntity, typing.NoReturn]:
entities = [entity for entity in self._target_instances if entity.id == identifier]
if len(entities):
return entities[0]
entities = [entity for entity in self._operator_instances if entity.id == identifier]
if len(entities):
return entities[0]
return None
def get_output_entity(self) -> typing.Union[TargetInstance, typing.NoReturn]:
"""
Computes the dependency graph and returns the resulting Target reference.
Make sure relevant source node plug values have been set prior to invoking this method.
"""
if not self._graph_output:
return None
self._graph_output.invalidate()
assembly_id = self._graph_output.outputs[0].computed_value
for item in self._target_instances:
if item.target_id == assembly_id:
return item
return None
def get_object_style_name(self, entity: GraphEntity) -> str:
if not entity:
return ''
# TODO: Style computed output entity
# if entity == self.get_output_entity():
# return ConversionGraph.STYLE_OUTPUT.name
if entity == self.source_node:
return ConversionGraph.STYLE_SOURCE_NODE.name
if isinstance(entity, TargetInstance):
return ConversionGraph.STYLE_ASSEMBLY_REFERENCE.name
if isinstance(entity, OperatorInstance):
if entity.operator:
if entity.operator.__class__.__name__ == 'ConstantBoolean':
return ConversionGraph.STYLE_CONSTANT_BOOLEAN.name
if entity.operator.__class__.__name__ == 'ConstantColor':
return ConversionGraph.STYLE_CONSTANT_COLOR.name
if entity.operator.__class__.__name__ == 'ConstantFloat':
return ConversionGraph.STYLE_CONSTANT_FLOAT.name
if entity.operator.__class__.__name__ == 'ConstantInteger':
return ConversionGraph.STYLE_CONSTANT_INTEGER.name
if entity.operator.__class__.__name__ == 'ConstantString':
return ConversionGraph.STYLE_CONSTANT_STRING.name
if entity.operator.__class__.__name__ == 'BooleanSwitch':
return ConversionGraph.STYLE_BOOLEAN_SWITCH.name
if entity.operator.__class__.__name__ == 'ValueResolver':
return ConversionGraph.STYLE_VALUE_RESOLVER.name
if entity.operator.__class__.__name__ == 'SplitRGB':
return ConversionGraph.STYLE_SPLIT_RGB.name
if entity.operator.__class__.__name__ == 'MergeRGB':
return ConversionGraph.STYLE_MERGE_RGB.name
if entity.operator.__class__.__name__ == 'LessThan':
return ConversionGraph.STYLE_LESS_THAN.name
if entity.operator.__class__.__name__ == 'GreaterThan':
return ConversionGraph.STYLE_GREATER_THAN.name
if entity.operator.__class__.__name__ == 'Or':
return ConversionGraph.STYLE_OR.name
if entity.operator.__class__.__name__ == 'Equal':
return ConversionGraph.STYLE_EQUAL.name
if entity.operator.__class__.__name__ == 'Not':
return ConversionGraph.STYLE_NOT.name
if entity.operator.__class__.__name__ == 'MayaTransparencyResolver':
return ConversionGraph.STYLE_TRANSPARENCY_RESOLVER.name
if entity.operator.__class__.__name__ == 'GraphOutput':
return ConversionGraph.STYLE_OUTPUT.name
return ConversionGraph.STYLE_OPERATOR_INSTANCE.name
return ''
def get_output_targets(self) -> typing.List[TargetInstance]:
return [o for o in self._target_instances if not o == self._source_node]
@property
def target_instances(self) -> typing.List[TargetInstance]:
return self._target_instances[:]
@property
def operator_instances(self) -> typing.List[OperatorInstance]:
return self._operator_instances[:]
@property
def connections(self) -> typing.List[Connection]:
return self._connections[:]
@property
def filename(self) -> str:
return self._filename
@filename.setter
def filename(self, value: str) -> None:
if self._filename is value:
return
notification = ChangeNotification(
item=self,
property_name='filename',
old_value=self._filename,
new_value=value
)
self._filename = value
self._notify(notification=notification)
@property
def library(self) -> 'Library':
return self._library
@property
def graph_output(self) -> OperatorInstance:
return self._graph_output
@property
def source_node(self) -> TargetInstance:
return self._source_node
@source_node.setter
def source_node(self, value: TargetInstance) -> None:
if self._source_node is value:
return
node_notification = ChangeNotification(
item=self,
property_name='source_node',
old_value=self._source_node,
new_value=value
)
node_id_notification = ChangeNotification(
item=self,
property_name='source_node_id',
old_value=self._source_node_id,
new_value=value.id if value else ''
)
self._source_node = value
self._source_node_id = self._source_node.id if self._source_node else ''
self._notify(notification=node_notification)
self._notify(notification=node_id_notification)
@property
def exists_on_disk(self) -> bool:
return self._exists_on_disk
@property
def revision(self) -> int:
return self._revision
@revision.setter
def revision(self, value: int) -> None:
if self._revision is value:
return
notification = ChangeNotification(
item=self,
property_name='revision',
old_value=self._revision,
new_value=value
)
self._revision = value
self._notify(notification=notification)
class FileHeader(Serializable):
@classmethod
def FromInstance(cls, instance: Serializable) -> 'FileHeader':
header = cls()
header._module = instance.__class__.__module__
header._class_name = instance.__class__.__name__
return header
@classmethod
def FromData(cls, data: dict) -> 'FileHeader':
if '_module' not in data.keys():
raise Exception('Unexpected data: key "_module" not in dictionary')
if '_class_name' not in data.keys():
raise Exception('Unexpected data: key "_class_name" not in dictionary')
header = cls()
header._module = data['_module']
header._class_name = data['_class_name']
return header
def __init__(self):
super(FileHeader, self).__init__()
self._module = ''
self._class_name = ''
def serialize(self) -> dict:
output = dict()
output['_module'] = self._module
output['_class_name'] = self._class_name
return output
@property
def module(self) -> str:
return self._module
@property
def class_name(self) -> str:
return self._class_name
class FileUtility(Serializable):
@classmethod
def FromInstance(cls, instance: Serializable) -> 'FileUtility':
utility = cls()
utility._header = FileHeader.FromInstance(instance=instance)
utility._content = instance
return utility
@classmethod
def FromData(cls, data: dict) -> 'FileUtility':
if '_header' not in data.keys():
raise Exception('Unexpected data: key "_header" not in dictionary')
if '_content' not in data.keys():
raise Exception('Unexpected data: key "_content" not in dictionary')
utility = cls()
utility._header = FileHeader.FromData(data=data['_header'])
if utility._header.module not in sys.modules.keys():
importlib.import_module(utility._header.module)
module_pointer = sys.modules[utility._header.module]
class_pointer = module_pointer.__dict__[utility._header.class_name]
utility._content = class_pointer()
if isinstance(utility._content, Serializable):
utility._content.deserialize(data=data['_content'])
return utility
def __init__(self):
super(FileUtility, self).__init__()
self._header: FileHeader = None
self._content: Serializable = None
def serialize(self) -> dict:
output = dict()
output['_header'] = self._header.serialize()
output['_content'] = self._content.serialize()
return output
def assert_content_serializable(self):
data = self.content.serialize()
self._assert(data=data)
def _assert(self, data: dict):
for key, value in data.items():
if isinstance(value, dict):
self._assert(data=value)
elif isinstance(value, list):
for item in value:
if isinstance(item, dict):
self._assert(data=item)
else:
print(item)
else:
print(key, value)
@property
def header(self) -> FileHeader:
return self._header
@property
def content(self) -> Serializable:
return self._content
class Library(Base):
"""
A Library represents a UMM data set. It can contain any of the following types of files:
- Settings
- Conversion Graph
- Target
- Conversion Manifest
A Library is divided into a "core" and a "user" data set.
"core":
- Files provided by NVIDIA.
- Installed and updated by UMM.
- Adding, editing, and deleting files require running in "Developer Mode".
- Types:
- Conversion Graph
- Target
- Conversion Manifest
"user"
- Files created and updated by user.
- Types:
- Conversion Graph
- Target
- Conversion Manifest
Overrides ./core/Conversion Manifest
...or...
each file header has an attribute: source = core, source = user
if source == core then it is read-only to users.
TARGET: problem with that is what if user needs to update an existing target?
...why would they?
...because they may want to edit property states in the Target... would want their own.
CONVERSION GRAPH
...they could just Save As and make a different one. no problem here. do need to change the 'source' attribute to 'user' though.
CONVERSION MANIFEST
2 files
ConversionManifest.json
ConversionManifest_user.json (overrides ConversionManifest.json)
Limitation: User cannot all together remove a manifest item
"""
@classmethod
def Create(
cls,
library_id: str,
name: str,
manifest: IDelegate = None,
conversion_graph: IDelegate = None,
target: IDelegate = None,
settings: IDelegate = None
) -> 'Library':
instance = typing.cast(Library, super(Library, cls).Create())
instance._id = library_id
instance._name = name
instance._manifest = manifest
instance._conversion_graph = conversion_graph
instance._target = target
instance._settings = settings
return instance
def __init__(self):
super(Library, self).__init__()
self._name: str = ''
self._manifest: typing.Union[IDelegate, typing.NoReturn] = None
self._conversion_graph: typing.Union[IDelegate, typing.NoReturn] = None
self._target: typing.Union[IDelegate, typing.NoReturn] = None
self._settings: typing.Union[IDelegate, typing.NoReturn] = None
def serialize(self) -> dict:
output = super(Library, self).serialize()
output['_name'] = self._name
return output
def deserialize(self, data: dict) -> None:
super(Library, self).deserialize(data=data)
self._name = data['_name'] if '_name' in data.keys() else ''
@property
def name(self) -> str:
return self._name
@name.setter
def name(self, value: str) -> None:
self._name = value
@property
def manifest(self) -> typing.Union[IDelegate, typing.NoReturn]:
return self._manifest
@property
def conversion_graph(self) -> typing.Union[IDelegate, typing.NoReturn]:
return self._conversion_graph
@property
def target(self) -> typing.Union[IDelegate, typing.NoReturn]:
return self._target
@property
def settings(self) -> typing.Union[IDelegate, typing.NoReturn]:
return self._settings
@property
def is_read_only(self) -> bool:
return not self._conversion_graph or not self._target or not self._conversion_graph
class Settings(Serializable):
def __init__(self):
super(Settings, self).__init__()
self._libraries: typing.List[Library] = []
self._store_id = 'Settings.json'
self._render_contexts: typing.List[str] = []
def serialize(self) -> dict:
output = super(Settings, self).serialize()
output['_libraries'] = [o.serialize() for o in self._libraries]
output['_render_contexts'] = self._render_contexts
return output
def deserialize(self, data: dict) -> None:
super(Settings, self).deserialize(data=data)
items = []
if '_libraries' in data.keys():
for o in data['_libraries']:
item = Library()
item.deserialize(data=o)
items.append(item)
self._libraries = items
self._render_contexts = data['_render_contexts'] if '_render_contexts' in data.keys() else []
@property
def libraries(self) -> typing.List[Library]:
return self._libraries
@property
def store_id(self) -> str:
return self._store_id
@property
def render_contexts(self) -> typing.List[str]:
return self._render_contexts
class ClassInfo(object):
def __init__(self, display_name: str, class_name: str):
super(ClassInfo, self).__init__()
self._display_name = display_name
self._class_name = class_name
@property
def display_name(self) -> str:
return self._display_name
@property
def class_name(self) -> str:
return self._class_name
class OmniMDL(object):
OMNI_GLASS: ClassInfo = ClassInfo(display_name='Omni Glass', class_name='OmniGlass.mdl|OmniGlass')
OMNI_GLASS_OPACITY: ClassInfo = ClassInfo(display_name='Omni Glass Opacity',
class_name='OmniGlass_Opacity.mdl|OmniGlass_Opacity')
OMNI_PBR: ClassInfo = ClassInfo(display_name='Omni PBR', class_name='OmniPBR.mdl|OmniPBR')
OMNI_PBR_CLEAR_COAT: ClassInfo = ClassInfo(display_name='Omni PBR Clear Coat',
class_name='OmniPBR_ClearCoat.mdl|OmniPBR_ClearCoat')
OMNI_PBR_CLEAR_COAT_OPACITY: ClassInfo = ClassInfo(display_name='Omni PBR Clear Coat Opacity',
class_name='OmniPBR_ClearCoat_Opacity.mdl|OmniPBR_ClearCoat_Opacity')
OMNI_PBR_OPACITY = ClassInfo(display_name='Omni PBR Opacity', class_name='OmniPBR_Opacity.mdl|OmniPBR_Opacity')
OMNI_SURFACE: ClassInfo = ClassInfo(display_name='OmniSurface', class_name='OmniSurface.mdl|OmniSurface')
OMNI_SURFACE_LITE: ClassInfo = ClassInfo(display_name='OmniSurfaceLite',
class_name='OmniSurfaceLite.mdl|OmniSurfaceLite')
OMNI_SURFACE_UBER: ClassInfo = ClassInfo(display_name='OmniSurfaceUber',
class_name='OmniSurfaceUber.mdl|OmniSurfaceUber')
class MayaShader(object):
LAMBERT: ClassInfo = ClassInfo(display_name='Lambert', class_name='lambert')
class ConversionMap(Serializable):
@classmethod
def Create(
cls,
render_context: str,
application: str,
document: ConversionGraph,
) -> 'ConversionMap':
if not isinstance(document, ConversionGraph):
raise Exception('Argument "document" unexpected class: "{0}"'.format(type(document)))
instance = cls()
instance._render_context = render_context
instance._application = application
instance._conversion_graph_id = document.id
instance._conversion_graph = document
return instance
def __init__(self):
super(ConversionMap, self).__init__()
self._render_context: str = ''
self._application: str = ''
self._conversion_graph_id: str = ''
self._conversion_graph: ConversionGraph = None
def __eq__(self, other: 'ConversionMap') -> bool:
if not isinstance(other, ConversionMap):
return False
if not self.render_context == other.render_context:
return False
if not self.application == other.application:
return False
if not self.conversion_graph_id == other.conversion_graph_id:
return False
return True
def serialize(self) -> dict:
output = super(ConversionMap, self).serialize()
output['_render_context'] = self._render_context
output['_application'] = self._application
output['_conversion_graph_id'] = self._conversion_graph_id
return output
def deserialize(self, data: dict) -> None:
super(ConversionMap, self).deserialize(data=data)
self._render_context = data['_render_context'] if '_render_context' in data.keys() else ''
self._application = data['_application'] if '_application' in data.keys() else ''
self._conversion_graph_id = data['_conversion_graph_id'] if '_conversion_graph_id' in data.keys() else ''
self._conversion_graph = None
@property
def render_context(self) -> str:
return self._render_context
@property
def application(self) -> str:
return self._application
@property
def conversion_graph_id(self) -> str:
return self._conversion_graph_id
@property
def conversion_graph(self) -> ConversionGraph:
return self._conversion_graph
class ConversionManifest(Serializable):
def __init__(self):
super(ConversionManifest, self).__init__()
self._version_major: int = 100
self._version_minor: int = 0
self._conversion_maps: typing.List[ConversionMap] = []
self._store_id = 'ConversionManifest.json'
def serialize(self) -> dict:
output = super(ConversionManifest, self).serialize()
output['_version_major'] = self._version_major
output['_version_minor'] = self._version_minor
output['_conversion_maps'] = [o.serialize() for o in self._conversion_maps]
return output
def deserialize(self, data: dict) -> None:
super(ConversionManifest, self).deserialize(data=data)
self._version_major = data['_version_major'] if '_version_major' in data.keys() else 100
self._version_minor = data['_version_minor'] if '_version_minor' in data.keys() else 0
items = []
if '_conversion_maps' in data.keys():
for o in data['_conversion_maps']:
item = ConversionMap()
item.deserialize(data=o)
items.append(item)
self._conversion_maps = items
def set_version(self, major: int = 100, minor: int = 0) -> None:
self._version_major = major
self._version_minor = minor
def add(
self,
render_context: str,
application: str,
document: ConversionGraph,
) -> ConversionMap:
item = ConversionMap.Create(
render_context=render_context,
application=application,
document=document,
)
self._conversion_maps.append(item)
return item
def remove(self, item: ConversionMap) -> None:
if item in self._conversion_maps:
self._conversion_maps.remove(item)
@property
def conversion_maps(self) -> typing.List[ConversionMap]:
return self._conversion_maps[:]
@property
def version(self) -> str:
return '{0}.{1}'.format(self._version_major, self._version_minor)
@property
def version_major(self) -> int:
return self._version_major
@property
def version_minor(self) -> int:
return self._version_minor
@property
def store_id(self) -> str:
return self._store_id
| 100,965 | Python | 32.949563 | 187 | 0.58241 |
NVIDIA-Omniverse/blender_omniverse_addons/omni/universalmaterialmap/core/operator.py | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
import sys
import typing
from .data import Operator, Plug, DagNode, OperatorInstance
from . import util
class ConstantFloat(Operator):
def __init__(self):
super(ConstantFloat, self).__init__(
id='293c38db-c9b3-4b37-ab02-c4ff6052bcb6',
name='Constant Float',
required_inputs=0,
min_inputs=0,
max_inputs=0,
num_outputs=1
)
def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
output_plugs[0].computed_value = output_plugs[0].value if output_plugs[0].value else 0.0
def generate_input(self, parent: DagNode, index: int) -> Plug:
raise Exception('Input index "{0}" not supported.'.format(index))
def generate_output(self, parent: DagNode, index: int) -> Plug:
if index == 0:
plug = Plug.Create(
parent=parent,
name='value',
display_name='Float',
value_type=Plug.VALUE_TYPE_FLOAT,
editable=True
)
plug.value = 0.0
return plug
raise Exception('Output index "{0}" not supported.'.format(index))
def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
output_plugs[0].value = len(self.id) * 0.3
def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
for output in output_plugs:
if not output.computed_value == len(self.id) * 0.3:
raise Exception('Test failed.')
class ConstantInteger(Operator):
def __init__(self):
super(ConstantInteger, self).__init__(
id='293c38db-c9b3-4b37-ab02-c4ff6052bcb7',
name='Constant Integer',
required_inputs=0,
min_inputs=0,
max_inputs=0,
num_outputs=1
)
def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
output_plugs[0].computed_value = output_plugs[0].value if output_plugs[0].value else 0
def generate_input(self, parent: DagNode, index: int) -> Plug:
raise Exception('Input index "{0}" not supported.'.format(index))
def generate_output(self, parent: DagNode, index: int) -> Plug:
if index == 0:
plug = Plug.Create(
parent=parent,
name='value',
display_name='Integer',
value_type=Plug.VALUE_TYPE_INTEGER,
editable=True
)
plug.value = 0
return plug
raise Exception('Output index "{0}" not supported.'.format(index))
def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
output_plugs[0].value = len(self.id)
def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
for output in output_plugs:
if not output.computed_value == len(self.id):
raise Exception('Test failed.')
class ConstantBoolean(Operator):
def __init__(self):
super(ConstantBoolean, self).__init__(
id='293c38db-c9b3-4b37-ab02-c4ff6052bcb8',
name='Constant Boolean',
required_inputs=0,
min_inputs=0,
max_inputs=0,
num_outputs=1
)
def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
output_plugs[0].computed_value = output_plugs[0].value if output_plugs[0].value else False
def generate_input(self, parent: DagNode, index: int) -> Plug:
raise Exception('Input index "{0}" not supported.'.format(index))
def generate_output(self, parent: DagNode, index: int) -> Plug:
if index == 0:
plug = Plug.Create(
parent=parent,
name='value',
display_name='Boolean',
value_type=Plug.VALUE_TYPE_BOOLEAN,
editable=True
)
plug.value = True
return plug
raise Exception('Output index "{0}" not supported.'.format(index))
def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
output_plugs[0].value = False
def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
for output in output_plugs:
if output.computed_value:
raise Exception('Test failed.')
class ConstantString(Operator):
def __init__(self):
super(ConstantString, self).__init__(
id='cb169ec0-5ddb-45eb-98d1-5d09f1ca759g',
name='Constant String',
required_inputs=0,
min_inputs=0,
max_inputs=0,
num_outputs=1
)
def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
output_plugs[0].computed_value = output_plugs[0].value if output_plugs[0].value else ''
# print('ConstantString._compute_outputs(): output_plugs[0].computed_value', output_plugs[0].computed_value)
def generate_input(self, parent: DagNode, index: int) -> Plug:
raise Exception('Input index "{0}" not supported.'.format(index))
def generate_output(self, parent: DagNode, index: int) -> Plug:
if index == 0:
plug = Plug.Create(
parent=parent,
name='value',
display_name='String',
value_type=Plug.VALUE_TYPE_STRING,
editable=True
)
plug.value = ''
plug.default_value = ''
return plug
raise Exception('Output index "{0}" not supported.'.format(index))
def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
output_plugs[0].value = self.id
def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
for output in output_plugs:
if not output.computed_value == self.id:
raise Exception('Test failed.')
class ConstantRGB(Operator):
def __init__(self):
super(ConstantRGB, self).__init__(
id='60f21797-dd62-4b06-9721-53882aa42e81',
name='Constant RGB',
required_inputs=0,
min_inputs=0,
max_inputs=0,
num_outputs=1
)
def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
output_plugs[0].computed_value = output_plugs[0].value if output_plugs[0].value else (0, 0, 0)
def generate_input(self, parent: DagNode, index: int) -> Plug:
raise Exception('Input index "{0}" not supported.'.format(index))
def generate_output(self, parent: DagNode, index: int) -> Plug:
if index == 0:
plug = Plug.Create(
parent=parent,
name='value',
display_name='Color',
value_type=Plug.VALUE_TYPE_VECTOR3,
editable=True
)
plug.value = (0, 0, 0)
return plug
raise Exception('Output index "{0}" not supported.'.format(index))
def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
output_plugs[0].value = (0.1, 0.2, 0.3)
def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
for output in output_plugs:
if not output.computed_value == (0.1, 0.2, 0.3):
raise Exception('Test failed.')
class ConstantRGBA(Operator):
def __init__(self):
super(ConstantRGBA, self).__init__(
id='0ab39d82-5862-4332-af7a-329200ae1d14',
name='Constant RGBA',
required_inputs=0,
min_inputs=0,
max_inputs=0,
num_outputs=1
)
def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
output_plugs[0].computed_value = output_plugs[0].value if output_plugs[0].value else (0, 0, 0, 0)
def generate_input(self, parent: DagNode, index: int) -> Plug:
raise Exception('Input index "{0}" not supported.'.format(index))
def generate_output(self, parent: DagNode, index: int) -> Plug:
if index == 0:
plug = Plug.Create(
parent=parent,
name='value',
display_name='Color',
value_type=Plug.VALUE_TYPE_VECTOR4,
editable=True
)
plug.value = (0, 0, 0, 1)
return plug
raise Exception('Output index "{0}" not supported.'.format(index))
def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
output_plugs[0].value = (0.1, 0.2, 0.3, 0.4)
def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
for output in output_plugs:
if not output.computed_value == (0.1, 0.2, 0.3, 0.4):
raise Exception('Test failed.')
class BooleanSwitch(Operator):
"""
Outputs the value of input 2 if input 1 is TRUE. Otherwise input 3 will be output.
Input 1 must be a boolean.
Input 2 and 3 can be of any value type.
"""
def __init__(self):
super(BooleanSwitch, self).__init__(
id='a628ab13-f19f-45b3-81cf-6824dd6e7b5d',
name='Boolean Switch',
required_inputs=3,
min_inputs=3,
max_inputs=3,
num_outputs=1
)
def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
debug = False
value = None
if debug:
print('BooleanSwitch')
print('\tinput_plugs[0].input:', input_plugs[0].input)
if input_plugs[0].input is not None:
if debug:
print('\tinput_plugs[0].input.computed_value:', input_plugs[0].input.computed_value)
print('\tinput_plugs[1].input:', input_plugs[1].input)
if input_plugs[1].input is not None:
print('\tinput_plugs[1].input.computed_value:', input_plugs[1].input.computed_value)
print('\tinput_plugs[2].input:', input_plugs[2].input)
if input_plugs[2].input is not None:
print('\tinput_plugs[2].input.computed_value:', input_plugs[2].input.computed_value)
if input_plugs[0].input.computed_value:
value = input_plugs[1].input.computed_value if input_plugs[1].input is not None else False
else:
value = input_plugs[2].input.computed_value if input_plugs[2].input is not None else False
elif debug:
print('\tskipping evaluating inputs')
if debug:
print('\tvalue:', value)
print('\toutput_plugs[0].computed_value is value', output_plugs[0].computed_value is value)
output_plugs[0].computed_value = value if value is not None else False
def generate_input(self, parent: DagNode, index: int) -> Plug:
if index == 0:
plug = Plug.Create(parent=parent, name='input_boolean', display_name='Boolean', value_type=Plug.VALUE_TYPE_BOOLEAN)
plug.value = False
return plug
if index == 1:
return Plug.Create(parent=parent, name='on_true', display_name='True Output', value_type=Plug.VALUE_TYPE_ANY)
if index == 2:
return Plug.Create(parent=parent, name='on_false', display_name='False Output', value_type=Plug.VALUE_TYPE_ANY)
raise Exception('Input index "{0}" not supported.'.format(index))
def generate_output(self, parent: DagNode, index: int) -> Plug:
if index == 0:
plug = Plug.Create(parent=parent, name='output', display_name='Output', value_type=Plug.VALUE_TYPE_ANY)
plug.value = False
return plug
raise Exception('Output index "{0}" not supported.'.format(index))
def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
fake = OperatorInstance.FromOperator(operator=ConstantBoolean())
fake.outputs[0].value = True
input_plugs[0].input = fake.outputs[0]
fake = OperatorInstance.FromOperator(operator=ConstantString())
fake.outputs[0].value = 'Input 1 value'
input_plugs[1].input = fake.outputs[0]
fake = OperatorInstance.FromOperator(operator=ConstantString())
fake.outputs[0].value = 'Input 2 value'
input_plugs[2].input = fake.outputs[0]
def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
for output in output_plugs:
if not output.computed_value == 'Input 1 value':
raise Exception('Test failed.')
class SplitRGB(Operator):
def __init__(self):
super(SplitRGB, self).__init__(
id='1cbcf8c6-328c-49b6-b4fc-d16fd78d4868',
name='Split RGB',
required_inputs=1,
min_inputs=1,
max_inputs=1,
num_outputs=3
)
def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
if input_plugs[0].input is None:
output_plugs[0].computed_value = 0
output_plugs[1].computed_value = 0
output_plugs[2].computed_value = 0
else:
value = input_plugs[0].input.computed_value
try:
test = iter(value)
is_iterable = True
except TypeError:
is_iterable = False
if is_iterable and len(value) == 3:
output_plugs[0].computed_value = value[0]
output_plugs[1].computed_value = value[1]
output_plugs[2].computed_value = value[2]
else:
output_plugs[0].computed_value = output_plugs[0].default_value
output_plugs[1].computed_value = output_plugs[1].default_value
output_plugs[2].computed_value = output_plugs[2].default_value
def generate_input(self, parent: DagNode, index: int) -> Plug:
if index == 0:
return Plug.Create(parent=parent, name='input_rgb', display_name='RGB', value_type=Plug.VALUE_TYPE_VECTOR3)
raise Exception('Input index "{0}" not supported.'.format(index))
def generate_output(self, parent: DagNode, index: int) -> Plug:
if index == 0:
plug = Plug.Create(
parent=parent,
name='red',
display_name='Red',
value_type=Plug.VALUE_TYPE_FLOAT,
editable=False
)
plug.value = 0
return plug
if index == 1:
plug = Plug.Create(
parent=parent,
name='green',
display_name='Green',
value_type=Plug.VALUE_TYPE_FLOAT,
editable=False
)
plug.value = 0
return plug
if index == 2:
plug = Plug.Create(
parent=parent,
name='blue',
display_name='Blue',
value_type=Plug.VALUE_TYPE_FLOAT,
editable=False
)
plug.value = 0
return plug
raise Exception('Output index "{0}" not supported.'.format(index))
def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
fake = OperatorInstance.FromOperator(operator=ConstantRGB())
fake.outputs[0].value = (0.1, 0.2, 0.3)
input_plugs[0].input = fake.outputs[0]
def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
if not output_plugs[0].computed_value == 0.1:
raise Exception('Test failed.')
if not output_plugs[1].computed_value == 0.2:
raise Exception('Test failed.')
if not output_plugs[2].computed_value == 0.3:
raise Exception('Test failed.')
class MergeRGB(Operator):
def __init__(self):
super(MergeRGB, self).__init__(
id='1cbcf8c6-328d-49b6-b4fc-d16fd78d4868',
name='Merge RGB',
required_inputs=3,
min_inputs=3,
max_inputs=3,
num_outputs=1
)
def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
rgb = [0.0, 0.0, 0.0]
for i in range(3):
if input_plugs[i].input is not None:
assumed_value_type = input_plugs[i].input.value_type
if util.to_plug_value_type(value=input_plugs[i].input.computed_value, assumed_value_type=assumed_value_type) == Plug.VALUE_TYPE_FLOAT:
rgb[i] = input_plugs[i].input.computed_value
output_plugs[0].computed_value = tuple(rgb)
def generate_input(self, parent: DagNode, index: int) -> Plug:
if index == 0:
return Plug.Create(parent=parent, name='input_r', display_name='R', value_type=Plug.VALUE_TYPE_FLOAT)
if index == 1:
return Plug.Create(parent=parent, name='input_g', display_name='G', value_type=Plug.VALUE_TYPE_FLOAT)
if index == 2:
return Plug.Create(parent=parent, name='input_B', display_name='B', value_type=Plug.VALUE_TYPE_FLOAT)
raise Exception('Input index "{0}" not supported.'.format(index))
def generate_output(self, parent: DagNode, index: int) -> Plug:
if index == 0:
plug = Plug.Create(
parent=parent,
name='rgb',
display_name='RGB',
value_type=Plug.VALUE_TYPE_VECTOR3,
editable=False
)
plug.value = (0, 0, 0)
return plug
raise Exception('Output index "{0}" not supported.'.format(index))
def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
fake = OperatorInstance.FromOperator(operator=ConstantFloat())
fake.outputs[0].value = 0.1
input_plugs[0].input = fake.outputs[0]
fake = OperatorInstance.FromOperator(operator=ConstantFloat())
fake.outputs[0].value = 0.2
input_plugs[1].input = fake.outputs[0]
fake = OperatorInstance.FromOperator(operator=ConstantFloat())
fake.outputs[0].value = 0.3
input_plugs[2].input = fake.outputs[0]
def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
if not output_plugs[0].computed_value == (0.1, 0.2, 0.3):
raise Exception('Test failed.')
class SplitRGBA(Operator):
def __init__(self):
super(SplitRGBA, self).__init__(
id='2c48e13c-2b58-48b9-a3b6-5f977c402b2e',
name='Split RGBA',
required_inputs=1,
min_inputs=1,
max_inputs=1,
num_outputs=4
)
def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
if input_plugs[0].input is None:
output_plugs[0].computed_value = 0
output_plugs[1].computed_value = 0
output_plugs[2].computed_value = 0
output_plugs[3].computed_value = 0
return
value = input_plugs[0].input.computed_value
try:
test = iter(value)
is_iterable = True
except TypeError:
is_iterable = False
if is_iterable and len(value) == 4:
output_plugs[0].computed_value = value[0]
output_plugs[1].computed_value = value[1]
output_plugs[2].computed_value = value[2]
output_plugs[3].computed_value = value[3]
else:
output_plugs[0].computed_value = output_plugs[0].default_value
output_plugs[1].computed_value = output_plugs[1].default_value
output_plugs[2].computed_value = output_plugs[2].default_value
output_plugs[3].computed_value = output_plugs[3].default_value
def generate_input(self, parent: DagNode, index: int) -> Plug:
if index == 0:
return Plug.Create(parent=parent, name='input_rgba', display_name='RGBA', value_type=Plug.VALUE_TYPE_VECTOR4)
raise Exception('Input index "{0}" not supported.'.format(index))
def generate_output(self, parent: DagNode, index: int) -> Plug:
if index == 0:
plug = Plug.Create(
parent=parent,
name='red',
display_name='Red',
value_type=Plug.VALUE_TYPE_FLOAT,
editable=False
)
plug.value = 0
return plug
if index == 1:
plug = Plug.Create(
parent=parent,
name='green',
display_name='Green',
value_type=Plug.VALUE_TYPE_FLOAT,
editable=False
)
plug.value = 0
return plug
if index == 2:
plug = Plug.Create(
parent=parent,
name='blue',
display_name='Blue',
value_type=Plug.VALUE_TYPE_FLOAT,
editable=False
)
plug.value = 0
return plug
if index == 3:
plug = Plug.Create(
parent=parent,
name='alpha',
display_name='Alpha',
value_type=Plug.VALUE_TYPE_FLOAT,
editable=False
)
plug.value = 0
return plug
raise Exception('Output index "{0}" not supported.'.format(index))
def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
fake = OperatorInstance.FromOperator(operator=ConstantRGB())
fake.outputs[0].value = (0.1, 0.2, 0.3, 0.4)
input_plugs[0].input = fake.outputs[0]
def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
if not output_plugs[0].computed_value == 0.1:
raise Exception('Test failed.')
if not output_plugs[1].computed_value == 0.2:
raise Exception('Test failed.')
if not output_plugs[2].computed_value == 0.3:
raise Exception('Test failed.')
if not output_plugs[3].computed_value == 0.4:
raise Exception('Test failed.')
class MergeRGBA(Operator):
def __init__(self):
super(MergeRGBA, self).__init__(
id='92e57f3d-8514-4786-a4ed-2767139a15eb',
name='Merge RGBA',
required_inputs=4,
min_inputs=4,
max_inputs=4,
num_outputs=1
)
def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
rgba = [0.0, 0.0, 0.0, 0.0]
for i in range(4):
if input_plugs[i].input is not None:
assumed_value_type = input_plugs[i].input.value_type
if util.to_plug_value_type(value=input_plugs[i].input.computed_value, assumed_value_type=assumed_value_type) == Plug.VALUE_TYPE_FLOAT:
rgba[i] = input_plugs[i].input.computed_value
output_plugs[0].computed_value = tuple(rgba)
def generate_input(self, parent: DagNode, index: int) -> Plug:
if index == 0:
return Plug.Create(parent=parent, name='input_r', display_name='R', value_type=Plug.VALUE_TYPE_FLOAT)
if index == 1:
return Plug.Create(parent=parent, name='input_g', display_name='G', value_type=Plug.VALUE_TYPE_FLOAT)
if index == 2:
return Plug.Create(parent=parent, name='input_b', display_name='B', value_type=Plug.VALUE_TYPE_FLOAT)
if index == 3:
return Plug.Create(parent=parent, name='input_a', display_name='A', value_type=Plug.VALUE_TYPE_FLOAT)
raise Exception('Input index "{0}" not supported.'.format(index))
def generate_output(self, parent: DagNode, index: int) -> Plug:
if index == 0:
plug = Plug.Create(
parent=parent,
name='rgba',
display_name='RGBA',
value_type=Plug.VALUE_TYPE_VECTOR3,
editable=False
)
plug.value = (0, 0, 0, 0)
return plug
raise Exception('Output index "{0}" not supported.'.format(index))
def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
fake = OperatorInstance.FromOperator(operator=ConstantFloat())
fake.outputs[0].value = 0.1
input_plugs[0].input = fake.outputs[0]
fake = OperatorInstance.FromOperator(operator=ConstantFloat())
fake.outputs[0].value = 0.2
input_plugs[1].input = fake.outputs[0]
fake = OperatorInstance.FromOperator(operator=ConstantFloat())
fake.outputs[0].value = 0.3
input_plugs[2].input = fake.outputs[0]
fake = OperatorInstance.FromOperator(operator=ConstantFloat())
fake.outputs[0].value = 0.4
input_plugs[3].input = fake.outputs[0]
def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
if not output_plugs[0].computed_value == (0.1, 0.2, 0.3, 0.4):
raise Exception('Test failed.')
class LessThan(Operator):
def __init__(self):
super(LessThan, self).__init__(
id='996df9bd-08d5-451b-a67c-80d0de7fba32',
name='Less Than',
required_inputs=2,
min_inputs=2,
max_inputs=2,
num_outputs=1
)
def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
if input_plugs[0].input is None or input_plugs[1].input is None:
for output in output_plugs:
output.computed_value = False
return
value = input_plugs[0].input.computed_value
compare = input_plugs[1].input.computed_value
result = False
try:
result = value < compare
except Exception as error:
print('WARNING: Universal Material Map: '
'unable to compare if "{0}" is less than "{1}". '
'Setting output to "{2}".'.format(
value,
compare,
result
))
output_plugs[0].computed_value = result
def generate_input(self, parent: DagNode, index: int) -> Plug:
if index == 0:
return Plug.Create(parent=parent, name='value', display_name='Value', value_type=Plug.VALUE_TYPE_FLOAT)
if index == 1:
return Plug.Create(parent=parent, name='comparison', display_name='Comparison', value_type=Plug.VALUE_TYPE_FLOAT)
raise Exception('Input index "{0}" not supported.'.format(index))
def generate_output(self, parent: DagNode, index: int) -> Plug:
if index == 0:
return Plug.Create(parent=parent, name='output', display_name='Is Less Than', value_type=Plug.VALUE_TYPE_BOOLEAN)
raise Exception('Output index "{0}" not supported.'.format(index))
def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
fake = OperatorInstance.FromOperator(operator=ConstantFloat())
fake.outputs[0].value = 0.1
input_plugs[0].input = fake.outputs[0]
fake = OperatorInstance.FromOperator(operator=ConstantFloat())
fake.outputs[0].value = 0.2
input_plugs[1].input = fake.outputs[0]
def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
if not output_plugs[0].computed_value:
raise Exception('Test failed.')
class GreaterThan(Operator):
def __init__(self):
super(GreaterThan, self).__init__(
id='1e751c3a-f6cd-43a2-aa72-22cb9d82ad19',
name='Greater Than',
required_inputs=2,
min_inputs=2,
max_inputs=2,
num_outputs=1
)
def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
if input_plugs[0].input is None or input_plugs[1].input is None:
output_plugs[0].computed_value = False
return
value = input_plugs[0].input.computed_value
compare = input_plugs[1].input.computed_value
result = False
try:
result = value > compare
except Exception as error:
print('WARNING: Universal Material Map: '
'unable to compare if "{0}" is greater than "{1}". '
'Setting output to "{2}".'.format(
value,
compare,
result
))
output_plugs[0].computed_value = result
def generate_input(self, parent: DagNode, index: int) -> Plug:
if index == 0:
return Plug.Create(parent=parent, name='value', display_name='Value', value_type=Plug.VALUE_TYPE_FLOAT)
if index == 1:
return Plug.Create(parent=parent, name='comparison', display_name='Comparison', value_type=Plug.VALUE_TYPE_FLOAT)
raise Exception('Input index "{0}" not supported.'.format(index))
def generate_output(self, parent: DagNode, index: int) -> Plug:
if index == 0:
return Plug.Create(parent=parent, name='output', display_name='Is Greater Than', value_type=Plug.VALUE_TYPE_BOOLEAN)
raise Exception('Output index "{0}" not supported.'.format(index))
def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
fake = OperatorInstance.FromOperator(operator=ConstantFloat())
fake.outputs[0].value = 0.1
input_plugs[0].input = fake.outputs[0]
fake = OperatorInstance.FromOperator(operator=ConstantFloat())
fake.outputs[0].value = 0.2
input_plugs[1].input = fake.outputs[0]
def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
if output_plugs[0].computed_value:
raise Exception('Test failed.')
class Or(Operator):
def __init__(self):
super(Or, self).__init__(
id='d0288faf-cb2e-4765-8923-1a368b45f62c',
name='Or',
required_inputs=2,
min_inputs=2,
max_inputs=2,
num_outputs=1
)
def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
if input_plugs[0].input is None and input_plugs[1].input is None:
output_plugs[0].computed_value = False
return
value_1 = input_plugs[0].input.computed_value if input_plugs[0].input else False
value_2 = input_plugs[1].input.computed_value if input_plugs[1].input else False
if value_1 is None and value_2 is None:
output_plugs[0].computed_value = False
return
if value_1 is None:
output_plugs[0].computed_value = True if value_2 else False
return
if value_2 is None:
output_plugs[0].computed_value = True if value_1 else False
return
output_plugs[0].computed_value = value_1 or value_2
def generate_input(self, parent: DagNode, index: int) -> Plug:
if index == 0:
return Plug.Create(parent=parent, name='value_1', display_name='Value 1', value_type=Plug.VALUE_TYPE_ANY)
if index == 1:
return Plug.Create(parent=parent, name='value_2', display_name='Value 2', value_type=Plug.VALUE_TYPE_ANY)
raise Exception('Input index "{0}" not supported.'.format(index))
def generate_output(self, parent: DagNode, index: int) -> Plug:
if index == 0:
return Plug.Create(parent=parent, name='output', display_name='Is True', value_type=Plug.VALUE_TYPE_BOOLEAN)
raise Exception('Output index "{0}" not supported.'.format(index))
def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
fake = OperatorInstance.FromOperator(operator=ConstantBoolean())
fake.outputs[0].value = True
input_plugs[0].input = fake.outputs[0]
fake = OperatorInstance.FromOperator(operator=ConstantBoolean())
fake.outputs[0].value = False
input_plugs[1].input = fake.outputs[0]
def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
if not output_plugs[0].computed_value:
raise Exception('Test failed.')
class And(Operator):
def __init__(self):
super(And, self).__init__(
id='9c5e4fb9-9948-4075-a7d6-ae9bc04e25b5',
name='And',
required_inputs=2,
min_inputs=2,
max_inputs=2,
num_outputs=1
)
def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
if input_plugs[0].input is None and input_plugs[1].input is None:
output_plugs[0].computed_value = False
return
value_1 = input_plugs[0].input.computed_value if input_plugs[0].input else False
value_2 = input_plugs[1].input.computed_value if input_plugs[1].input else False
if value_1 is None and value_2 is None:
output_plugs[0].computed_value = False
return
if value_1 is None:
output_plugs[0].computed_value = True if value_2 else False
return
if value_2 is None:
output_plugs[0].computed_value = True if value_1 else False
return
output_plugs[0].computed_value = value_1 and value_2
def generate_input(self, parent: DagNode, index: int) -> Plug:
if index == 0:
return Plug.Create(parent=parent, name='value_1', display_name='Value 1', value_type=Plug.VALUE_TYPE_ANY)
if index == 1:
return Plug.Create(parent=parent, name='value_2', display_name='Value 2', value_type=Plug.VALUE_TYPE_ANY)
raise Exception('Input index "{0}" not supported.'.format(index))
def generate_output(self, parent: DagNode, index: int) -> Plug:
if index == 0:
return Plug.Create(parent=parent, name='output', display_name='Is True', value_type=Plug.VALUE_TYPE_BOOLEAN)
raise Exception('Output index "{0}" not supported.'.format(index))
def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
fake = OperatorInstance.FromOperator(operator=ConstantBoolean())
fake.outputs[0].value = True
input_plugs[0].input = fake.outputs[0]
fake = OperatorInstance.FromOperator(operator=ConstantBoolean())
fake.outputs[0].value = True
input_plugs[1].input = fake.outputs[0]
def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
if not output_plugs[0].computed_value:
raise Exception('Test failed.')
class Equal(Operator):
def __init__(self):
super(Equal, self).__init__(
id='fb353972-aebd-4d32-8231-f644f75d322c',
name='Equal',
required_inputs=2,
min_inputs=2,
max_inputs=2,
num_outputs=1
)
def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
if input_plugs[0].input is None and input_plugs[1].input is None:
output_plugs[0].computed_value = True
return
if input_plugs[0].input is None or input_plugs[1].input is None:
output_plugs[0].computed_value = False
return
value_1 = input_plugs[0].input.computed_value
value_2 = input_plugs[1].input.computed_value
if value_1 is None and value_2 is None:
output_plugs[0].computed_value = True
return
if value_1 is None or value_2 is None:
output_plugs[0].computed_value = False
return
result = False
try:
result = value_1 == value_2
except Exception as error:
print('WARNING: Universal Material Map: '
'unable to compare if "{0}" is equal to "{1}". '
'Setting output to "{2}".'.format(
value_1,
value_2,
result
))
output_plugs[0].computed_value = result
def generate_input(self, parent: DagNode, index: int) -> Plug:
if index == 0:
return Plug.Create(parent=parent, name='value_1', display_name='Value 1', value_type=Plug.VALUE_TYPE_ANY)
if index == 1:
return Plug.Create(parent=parent, name='value_1', display_name='Value 2', value_type=Plug.VALUE_TYPE_ANY)
raise Exception('Input index "{0}" not supported.'.format(index))
def generate_output(self, parent: DagNode, index: int) -> Plug:
if index == 0:
return Plug.Create(parent=parent, name='output', display_name='Are Equal', value_type=Plug.VALUE_TYPE_BOOLEAN)
raise Exception('Output index "{0}" not supported.'.format(index))
def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
fake = OperatorInstance.FromOperator(operator=ConstantString())
fake.outputs[0].value = self.id
input_plugs[0].input = fake.outputs[0]
fake = OperatorInstance.FromOperator(operator=ConstantString())
fake.outputs[0].value = self.id
input_plugs[1].input = fake.outputs[0]
def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
if not output_plugs[0].computed_value:
raise Exception('Test failed.')
class Not(Operator):
def __init__(self):
super(Not, self).__init__(
id='7b8b67df-ce2e-445c-98b7-36ea695c77e3',
name='Not',
required_inputs=1,
min_inputs=1,
max_inputs=1,
num_outputs=1
)
def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
if input_plugs[0].input is None:
output_plugs[0].computed_value = False
return
value_1 = input_plugs[0].input.computed_value
if value_1 is None:
output_plugs[0].computed_value = False
return
output_plugs[0].computed_value = not value_1
def generate_input(self, parent: DagNode, index: int) -> Plug:
if index == 0:
return Plug.Create(parent=parent, name='value', display_name='Boolean', value_type=Plug.VALUE_TYPE_BOOLEAN)
raise Exception('Input index "{0}" not supported.'.format(index))
def generate_output(self, parent: DagNode, index: int) -> Plug:
if index == 0:
return Plug.Create(parent=parent, name='output', display_name='Boolean', value_type=Plug.VALUE_TYPE_BOOLEAN)
raise Exception('Output index "{0}" not supported.'.format(index))
def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
fake = OperatorInstance.FromOperator(operator=ConstantBoolean())
fake.outputs[0].value = False
input_plugs[0].input = fake.outputs[0]
def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
if not output_plugs[0].computed_value:
raise Exception('Test failed.')
class ValueTest(Operator):
def __init__(self):
super(ValueTest, self).__init__(
id='2899f66b-2e8d-467b-98d1-5f590cf98e7a',
name='Value Test',
required_inputs=1,
min_inputs=1,
max_inputs=1,
num_outputs=1
)
def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
if input_plugs[0].input is None:
output_plugs[0].computed_value = None
return
output_plugs[0].computed_value = input_plugs[0].input.computed_value
def generate_input(self, parent: DagNode, index: int) -> Plug:
if index == 0:
return Plug.Create(parent=parent, name='input', display_name='Input', value_type=Plug.VALUE_TYPE_ANY)
raise Exception('Input index "{0}" not supported.'.format(index))
def generate_output(self, parent: DagNode, index: int) -> Plug:
if index == 0:
return Plug.Create(parent=parent, name='output', display_name='Output', value_type=Plug.VALUE_TYPE_ANY)
raise Exception('Output index "{0}" not supported.'.format(index))
def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
fake = OperatorInstance.FromOperator(operator=ConstantInteger())
fake.outputs[0].value = 10
input_plugs[0].input = fake.outputs[0]
def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
if not output_plugs[0].computed_value == 10:
raise Exception('Test failed.')
class ValueResolver(Operator):
def __init__(self):
super(ValueResolver, self).__init__(
id='74306cd0-b668-4a92-9e15-7b23486bd89a',
name='Value Resolver',
required_inputs=8,
min_inputs=8,
max_inputs=8,
num_outputs=7
)
def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
assumed_value_type = input_plugs[0].input.value_type if input_plugs[0].input else input_plugs[0].value_type
computed_value = input_plugs[0].input.computed_value if input_plugs[0].input else False
value_type = util.to_plug_value_type(value=computed_value, assumed_value_type=assumed_value_type)
if value_type == Plug.VALUE_TYPE_BOOLEAN:
output_plugs[0].computed_value = computed_value
else:
output_plugs[0].computed_value = input_plugs[1].computed_value
if value_type == Plug.VALUE_TYPE_VECTOR3:
output_plugs[1].computed_value = computed_value
else:
output_plugs[1].computed_value = input_plugs[2].computed_value
if value_type == Plug.VALUE_TYPE_FLOAT:
output_plugs[2].computed_value = computed_value
else:
output_plugs[2].computed_value = input_plugs[3].computed_value
if value_type == Plug.VALUE_TYPE_INTEGER:
output_plugs[3].computed_value = computed_value
else:
output_plugs[3].computed_value = input_plugs[4].computed_value
if value_type == Plug.VALUE_TYPE_STRING:
output_plugs[4].computed_value = computed_value
else:
output_plugs[4].computed_value = input_plugs[5].computed_value
if value_type == Plug.VALUE_TYPE_VECTOR4:
output_plugs[5].computed_value = computed_value
else:
output_plugs[5].computed_value = input_plugs[6].computed_value
if value_type == Plug.VALUE_TYPE_LIST:
output_plugs[6].computed_value = computed_value
else:
output_plugs[6].computed_value = input_plugs[7].computed_value
for index, input_plug in enumerate(input_plugs):
if index == 0:
continue
input_plug.is_editable = not input_plug.input
for output_plug in output_plugs:
output_plug.is_editable = False
def generate_input(self, parent: DagNode, index: int) -> Plug:
if index == 0:
return Plug.Create(parent=parent, name='input', display_name='Input', value_type=Plug.VALUE_TYPE_ANY)
if index == 1:
plug = Plug.Create(
parent=parent,
name='boolean',
display_name='Boolean',
value_type=Plug.VALUE_TYPE_BOOLEAN,
editable=True,
)
plug.value = False
return plug
if index == 2:
plug = Plug.Create(
parent=parent,
name='color',
display_name='Color',
value_type=Plug.VALUE_TYPE_VECTOR3,
editable=True,
)
plug.value = (0, 0, 0)
return plug
if index == 3:
plug = Plug.Create(
parent=parent,
name='float',
display_name='Float',
value_type=Plug.VALUE_TYPE_FLOAT,
editable=True,
)
plug.value = 0
return plug
if index == 4:
plug = Plug.Create(
parent=parent,
name='integer',
display_name='Integer',
value_type=Plug.VALUE_TYPE_INTEGER,
editable=True,
)
plug.value = 0
return plug
if index == 5:
plug = Plug.Create(
parent=parent,
name='string',
display_name='String',
value_type=Plug.VALUE_TYPE_STRING,
editable=True,
)
plug.value = ''
return plug
if index == 6:
plug = Plug.Create(
parent=parent,
name='rgba',
display_name='RGBA',
value_type=Plug.VALUE_TYPE_VECTOR4,
editable=True,
)
plug.value = (0, 0, 0, 1)
return plug
if index == 7:
plug = Plug.Create(
parent=parent,
name='list',
display_name='List',
value_type=Plug.VALUE_TYPE_LIST,
editable=False,
)
plug.value = []
return plug
raise Exception('Input index "{0}" not supported.'.format(index))
def generate_output(self, parent: DagNode, index: int) -> Plug:
if index == 0:
plug = Plug.Create(
parent=parent,
name='boolean',
display_name='Boolean',
value_type=Plug.VALUE_TYPE_BOOLEAN,
editable=False,
)
plug.value = False
return plug
if index == 1:
plug = Plug.Create(
parent=parent,
name='color',
display_name='Color',
value_type=Plug.VALUE_TYPE_VECTOR3,
editable=False,
)
plug.value = (0, 0, 0)
return plug
if index == 2:
plug = Plug.Create(
parent=parent,
name='float',
display_name='Float',
value_type=Plug.VALUE_TYPE_FLOAT,
editable=False,
)
plug.value = 0
return plug
if index == 3:
plug = Plug.Create(
parent=parent,
name='integer',
display_name='Integer',
value_type=Plug.VALUE_TYPE_INTEGER,
editable=False,
)
plug.value = 0
return plug
if index == 4:
plug = Plug.Create(
parent=parent,
name='string',
display_name='String',
value_type=Plug.VALUE_TYPE_STRING,
editable=False,
)
plug.value = ''
return plug
if index == 5:
plug = Plug.Create(
parent=parent,
name='rgba',
display_name='RGBA',
value_type=Plug.VALUE_TYPE_VECTOR4,
editable=False,
)
plug.value = (0, 0, 0, 1)
return plug
if index == 6:
plug = Plug.Create(
parent=parent,
name='list',
display_name='List',
value_type=Plug.VALUE_TYPE_LIST,
editable=False,
)
plug.value = []
return plug
raise Exception('Output index "{0}" not supported.'.format(index))
def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
fake = OperatorInstance.FromOperator(operator=ConstantInteger())
fake.outputs[0].value = 10
input_plugs[0].input = fake.outputs[0]
def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
if not output_plugs[3].computed_value == 10:
raise Exception('Test failed.')
class MayaTransparencyResolver(Operator):
"""
Specialty operator based on Maya transparency attribute.
If the input is of type string - and is not an empty string - then the output will be TRUE.
If the input is a tripple float - and any value is greater than zero - then the output will also be TRUE.
In all other cases the output will be FALSE.
"""
def __init__(self):
super(MayaTransparencyResolver, self).__init__(
id='2b523832-ac84-4051-9064-6046121dcd48',
name='Maya Transparency Resolver',
required_inputs=1,
min_inputs=1,
max_inputs=1,
num_outputs=1
)
def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
is_transparent = False
assumed_value_type = input_plugs[0].input.value_type if input_plugs[0].input else input_plugs[0].value_type
computed_value = input_plugs[0].input.computed_value if input_plugs[0].input else False
value_type = util.to_plug_value_type(value=computed_value, assumed_value_type=assumed_value_type)
if value_type == Plug.VALUE_TYPE_STRING:
is_transparent = not computed_value == ''
elif value_type == Plug.VALUE_TYPE_VECTOR3:
for value in computed_value:
if value > 0:
is_transparent = True
break
elif value_type == Plug.VALUE_TYPE_FLOAT:
is_transparent = computed_value > 0
output_plugs[0].computed_value = is_transparent
def generate_input(self, parent: DagNode, index: int) -> Plug:
if index == 0:
return Plug.Create(parent=parent, name='input', display_name='Input', value_type=Plug.VALUE_TYPE_ANY)
raise Exception('Input index "{0}" not supported.'.format(index))
def generate_output(self, parent: DagNode, index: int) -> Plug:
if index == 0:
plug = Plug.Create(
parent=parent,
name='is_transparent',
display_name='Is Transparent',
value_type=Plug.VALUE_TYPE_BOOLEAN,
)
plug.value = False
return plug
raise Exception('Output index "{0}" not supported.'.format(index))
def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
fake = OperatorInstance.FromOperator(operator=ConstantRGB())
fake.outputs[0].value = (0.5, 0.5, 0.5)
input_plugs[0].input = fake.outputs[0]
def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
if not output_plugs[0].computed_value:
raise Exception('Test failed.')
class ListGenerator(Operator):
def __init__(self):
super(ListGenerator, self).__init__(
id='a410f7a0-280a-451f-a26c-faf9a8e302b4',
name='List Generator',
required_inputs=0,
min_inputs=0,
max_inputs=-1,
num_outputs=1
)
def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
output = []
for input_plug in input_plugs:
output.append(input_plug.computed_value)
output_plugs[0].computed_value = output
def generate_input(self, parent: DagNode, index: int) -> Plug:
return Plug.Create(
parent=parent,
name='[{0}]'.format(index),
display_name='[{0}]'.format(index),
value_type=Plug.VALUE_TYPE_ANY,
editable=False,
is_removable=True,
)
def generate_output(self, parent: DagNode, index: int) -> Plug:
if index == 0:
return Plug.Create(parent=parent, name='list', display_name='list', value_type=Plug.VALUE_TYPE_LIST)
raise Exception('Output index "{0}" not supported.'.format(index))
def remove_plug(self, operator_instance: 'OperatorInstance', plug: 'Plug') -> None:
super(ListGenerator, self).remove_plug(operator_instance=operator_instance, plug=plug)
for index, plug in enumerate(operator_instance.inputs):
plug.name = '[{0}]'.format(index)
plug.display_name = '[{0}]'.format(index)
for plug in operator_instance.outputs:
plug.invalidate()
def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
pass
def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
pass
class ListIndex(Operator):
def __init__(self):
super(ListIndex, self).__init__(
id='e4a81506-fb6b-4729-8273-f68e97f5bc6b',
name='List Index',
required_inputs=2,
min_inputs=2,
max_inputs=2,
num_outputs=1
)
def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
try:
test = iter(input_plugs[0].computed_value)
index = input_plugs[1].computed_value
if 0 <= index < len(input_plugs[0].computed_value):
output_plugs[0].computed_value = input_plugs[0].computed_value[index]
else:
output_plugs[0].computed_value = None
except TypeError:
output_plugs[0].computed_value = None
def generate_input(self, parent: DagNode, index: int) -> Plug:
if index == 0:
return Plug.Create(parent=parent, name='list', display_name='List', value_type=Plug.VALUE_TYPE_LIST)
if index == 1:
plug = Plug.Create(
parent=parent,
name='index',
display_name='Index',
value_type=Plug.VALUE_TYPE_INTEGER,
editable=True
)
plug.computed_value = 0
return plug
raise Exception('Input index "{0}" not supported.'.format(index))
def generate_output(self, parent: DagNode, index: int) -> Plug:
if index == 0:
return Plug.Create(parent=parent, name='output', display_name='Output', value_type=Plug.VALUE_TYPE_ANY)
raise Exception('Output index "{0}" not supported.'.format(index))
def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
input_plugs[0].value = ['hello', 'world']
input_plugs[1].value = 1
def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
if not output_plugs[0].computed_value == 'world':
raise Exception('Test failed.')
class MDLColorSpace(Operator):
def __init__(self):
super(MDLColorSpace, self).__init__(
id='cf0b97c8-fb55-4cf3-8afc-23ebd4a0a6c7',
name='MDL Color Space',
required_inputs=0,
min_inputs=0,
max_inputs=0,
num_outputs=1
)
def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
output_plugs[0].computed_value = output_plugs[0].value if output_plugs[0].value else 'auto'
def generate_input(self, parent: DagNode, index: int) -> Plug:
raise Exception('Input index "{0}" not supported.'.format(index))
def generate_output(self, parent: DagNode, index: int) -> Plug:
if index == 0:
plug = Plug.Create(
parent=parent,
name='color_space',
display_name='Color Space',
value_type=Plug.VALUE_TYPE_ENUM,
editable=True
)
plug.enum_values = ['auto', 'raw', 'sRGB']
plug.default_value = 'auto'
plug.value = 'auto'
return plug
raise Exception('Output index "{0}" not supported.'.format(index))
def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
output_plugs[0].value = output_plugs[0].enum_values[2]
def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
if not output_plugs[0].computed_value == output_plugs[0].enum_values[2]:
raise Exception('Test failed.')
class MDLTextureResolver(Operator):
def __init__(self):
super(MDLTextureResolver, self).__init__(
id='af766adb-cf54-4a8b-a598-44b04fbcf630',
name='MDL Texture Resolver',
required_inputs=2,
min_inputs=2,
max_inputs=2,
num_outputs=1
)
def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
filepath = input_plugs[0].input.computed_value if input_plugs[0].input else ''
value_type = util.to_plug_value_type(value=filepath, assumed_value_type=Plug.VALUE_TYPE_STRING)
filepath = filepath if value_type == Plug.VALUE_TYPE_STRING else ''
colorspace = input_plugs[1].computed_value
output_plugs[0].computed_value = [filepath, colorspace]
def generate_input(self, parent: DagNode, index: int) -> Plug:
if index == 0:
return Plug.Create(parent=parent, name='input', display_name='Input', value_type=Plug.VALUE_TYPE_STRING)
if index == 1:
plug = Plug.Create(
parent=parent,
name='color_space',
display_name='Color Space',
value_type=Plug.VALUE_TYPE_ENUM,
editable=True
)
plug.enum_values = ['auto', 'raw', 'sRGB']
plug.default_value = 'auto'
plug.value = 'auto'
return plug
raise Exception('Input index "{0}" not supported.'.format(index))
def generate_output(self, parent: DagNode, index: int) -> Plug:
if index == 0:
plug = Plug.Create(
parent=parent,
name='list',
display_name='List',
value_type=Plug.VALUE_TYPE_LIST,
editable=False,
)
plug.default_value = ['', 'auto']
plug.value = ['', 'auto']
return plug
raise Exception('Output index "{0}" not supported.'.format(index))
def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
input_plugs[0].value = 'c:/folder/color.png'
input_plugs[1].value = 'raw'
def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
if not output_plugs[3].computed_value == ['c:/folder/color.png', 'raw']:
raise Exception('Test failed.')
class SplitTextureData(Operator):
def __init__(self):
super(SplitTextureData, self).__init__(
id='6a411798-434c-4ad4-b464-0bd2e78cdcec',
name='Split Texture Data',
required_inputs=1,
min_inputs=1,
max_inputs=1,
num_outputs=2
)
def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
is_valid_input = False
try:
value = input_plugs[0].computed_value
test = iter(value)
if len(value) == 2:
if sys.version_info.major < 3:
if isinstance(value[0], basestring) and isinstance(value[1], basestring):
is_valid_input = True
else:
if isinstance(value[0], str) and isinstance(value[1], str):
is_valid_input = True
except TypeError:
pass
if is_valid_input:
output_plugs[0].computed_value = value[0]
output_plugs[1].computed_value = value[1]
else:
output_plugs[0].computed_value = ''
output_plugs[1].computed_value = 'auto'
def generate_input(self, parent: DagNode, index: int) -> Plug:
if index == 0:
plug = Plug.Create(parent=parent, name='list', display_name='List', value_type=Plug.VALUE_TYPE_LIST)
plug.default_value = ['', 'auto']
plug.computed_value = ['', 'auto']
return plug
raise Exception('Input index "{0}" not supported.'.format(index))
def generate_output(self, parent: DagNode, index: int) -> Plug:
if index == 0:
plug = Plug.Create(parent=parent, name='texture_path', display_name='Texture Path', value_type=Plug.VALUE_TYPE_STRING)
plug.default_value = ''
plug.computed_value = ''
return plug
if index == 1:
plug = Plug.Create(parent=parent, name='color_space', display_name='Color Space', value_type=Plug.VALUE_TYPE_STRING)
plug.default_value = 'auto'
plug.computed_value = 'auto'
return plug
raise Exception('Output index "{0}" not supported.'.format(index))
def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
input_plugs[0].computed_value = ['hello.png', 'world']
def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
if not output_plugs[0].computed_value == 'hello.png':
raise Exception('Test failed.')
if not output_plugs[1].computed_value == 'world':
raise Exception('Test failed.')
class Multiply(Operator):
def __init__(self):
super(Multiply, self).__init__(
id='0f5c9828-f582-48aa-b055-c12b91e692a7',
name='Multiply',
required_inputs=0,
min_inputs=2,
max_inputs=-1,
num_outputs=1
)
def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
values = []
for input_plug in input_plugs:
if isinstance(input_plug.computed_value, int):
values.append(input_plug.computed_value)
continue
if isinstance(input_plug.computed_value, float):
values.append(input_plug.computed_value)
if len(values) < 2:
output_plugs[0].computed_value = 0
else:
product = 1.0
for o in values:
product *= o
output_plugs[0].computed_value = product
for input_plug in input_plugs:
input_plug.is_editable = not input_plug.input
def generate_input(self, parent: DagNode, index: int) -> Plug:
plug = Plug.Create(
parent=parent,
name='[{0}]'.format(index),
display_name='[{0}]'.format(index),
value_type=Plug.VALUE_TYPE_FLOAT,
editable=True,
is_removable=index > 1,
)
plug.default_value = 1.0
plug.value = 1.0
plug.computed_value = 1.0
return plug
def generate_output(self, parent: DagNode, index: int) -> Plug:
if index == 0:
return Plug.Create(parent=parent, name='product', display_name='product', value_type=Plug.VALUE_TYPE_FLOAT)
raise Exception('Output index "{0}" not supported.'.format(index))
def remove_plug(self, operator_instance: 'OperatorInstance', plug: 'Plug') -> None:
super(Multiply, self).remove_plug(operator_instance=operator_instance, plug=plug)
for index, plug in enumerate(operator_instance.inputs):
plug.name = '[{0}]'.format(index)
plug.display_name = '[{0}]'.format(index)
for plug in operator_instance.outputs:
plug.invalidate()
def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
input_plugs[0].computed_value = 2
input_plugs[1].computed_value = 2
def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
if not output_plugs[0].computed_value == 4:
raise Exception('Test failed.')
class ColorSpaceResolver(Operator):
MAPPING = {
'MDL|auto|Blender': 'sRGB',
'MDL|srgb|Blender': 'sRGB',
'MDL|raw|Blender': 'Raw',
'Blender|filmic log|MDL': 'raw',
'Blender|linear|MDL': 'raw',
'Blender|linear aces|MDL': 'raw',
'Blender|non-color|MDL': 'raw',
'Blender|raw|MDL': 'raw',
'Blender|srgb|MDL': 'sRGB',
'Blender|xyz|MDL': 'raw',
}
DEFAULT = {
'Blender': 'Linear',
'MDL': 'auto',
}
def __init__(self):
super(ColorSpaceResolver, self).__init__(
id='c159df8f-a0a2-4300-b897-e8eaa689a901',
name='Color Space Resolver',
required_inputs=3,
min_inputs=3,
max_inputs=3,
num_outputs=1
)
def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
color_space = input_plugs[0].computed_value.lower()
from_color_space = input_plugs[1].computed_value
to_color_space = input_plugs[2].computed_value
key = '{0}|{1}|{2}'.format(
from_color_space,
color_space,
to_color_space
)
if key in ColorSpaceResolver.MAPPING:
output_plugs[0].computed_value = ColorSpaceResolver.MAPPING[key]
else:
output_plugs[0].computed_value = ColorSpaceResolver.DEFAULT[to_color_space]
def generate_input(self, parent: DagNode, index: int) -> Plug:
if index == 0:
plug = Plug.Create(
parent=parent,
name='color_space',
display_name='Color Space',
value_type=Plug.VALUE_TYPE_STRING,
editable=False,
is_removable=False,
)
plug.default_value = ''
plug.computed_value = ''
return plug
if index == 1:
plug = Plug.Create(
parent=parent,
name='from_color_space',
display_name='From',
value_type=Plug.VALUE_TYPE_ENUM,
editable=True
)
plug.enum_values = ['MDL', 'Blender']
plug.default_value = 'MDL'
plug.computed_value = 'MDL'
return plug
if index == 2:
plug = Plug.Create(
parent=parent,
name='to_color_space',
display_name='To',
value_type=Plug.VALUE_TYPE_ENUM,
editable=True
)
plug.enum_values = ['Blender', 'MDL']
plug.default_value = 'Blender'
plug.computed_value = 'Blender'
return plug
raise Exception('Input index "{0}" not supported.'.format(index))
def generate_output(self, parent: DagNode, index: int) -> Plug:
if index == 0:
plug = Plug.Create(
parent=parent,
name='color_space',
display_name='Color Space',
value_type=Plug.VALUE_TYPE_STRING,
editable=False
)
plug.default_value = ''
plug.computed_value = ''
return plug
raise Exception('Output index "{0}" not supported.'.format(index))
def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
raise NotImplementedError()
def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
if not output_plugs[0].computed_value == output_plugs[0].enum_values[2]:
raise Exception('Test failed.')
class Add(Operator):
def __init__(self):
super(Add, self).__init__(
id='f2818669-5454-4599-8792-2cb09f055bf9',
name='Add',
required_inputs=0,
min_inputs=2,
max_inputs=-1,
num_outputs=1
)
def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
output = 0
for input_plug in input_plugs:
try:
output += input_plug.computed_value
except:
pass
output_plugs[0].computed_value = output
def generate_input(self, parent: DagNode, index: int) -> Plug:
plug = Plug.Create(
parent=parent,
name='[{0}]'.format(index),
display_name='[{0}]'.format(index),
value_type=Plug.VALUE_TYPE_FLOAT,
editable=True,
is_removable=True,
)
plug.default_value = 0.0
plug.computed_value = 0.0
return plug
def generate_output(self, parent: DagNode, index: int) -> Plug:
if index == 0:
return Plug.Create(parent=parent, name='sum', display_name='sum', value_type=Plug.VALUE_TYPE_FLOAT)
raise Exception('Output index "{0}" not supported.'.format(index))
def remove_plug(self, operator_instance: 'OperatorInstance', plug: 'Plug') -> None:
super(Add, self).remove_plug(operator_instance=operator_instance, plug=plug)
for index, plug in enumerate(operator_instance.inputs):
plug.name = '[{0}]'.format(index)
plug.display_name = '[{0}]'.format(index)
for plug in operator_instance.outputs:
plug.invalidate()
def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
pass
def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
pass
class Subtract(Operator):
def __init__(self):
super(Subtract, self).__init__(
id='15f523f3-4e94-43a5-8306-92d07cbfa48c',
name='Subtract',
required_inputs=0,
min_inputs=2,
max_inputs=-1,
num_outputs=1
)
def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
output = None
for input_plug in input_plugs:
try:
if output is None:
output = input_plug.computed_value
else:
output -= input_plug.computed_value
except:
pass
output_plugs[0].computed_value = output
def generate_input(self, parent: DagNode, index: int) -> Plug:
plug = Plug.Create(
parent=parent,
name='[{0}]'.format(index),
display_name='[{0}]'.format(index),
value_type=Plug.VALUE_TYPE_FLOAT,
editable=True,
is_removable=True,
)
plug.default_value = 0.0
plug.computed_value = 0.0
return plug
def generate_output(self, parent: DagNode, index: int) -> Plug:
if index == 0:
return Plug.Create(parent=parent, name='difference', display_name='difference', value_type=Plug.VALUE_TYPE_FLOAT)
raise Exception('Output index "{0}" not supported.'.format(index))
def remove_plug(self, operator_instance: 'OperatorInstance', plug: 'Plug') -> None:
super(Subtract, self).remove_plug(operator_instance=operator_instance, plug=plug)
for index, plug in enumerate(operator_instance.inputs):
plug.name = '[{0}]'.format(index)
plug.display_name = '[{0}]'.format(index)
for plug in operator_instance.outputs:
plug.invalidate()
def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
pass
def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
pass
class Remap(Operator):
def __init__(self):
super(Remap, self).__init__(
id='2405c02a-facc-47a6-80ef-d35d959b0cd4',
name='Remap',
required_inputs=5,
min_inputs=5,
max_inputs=5,
num_outputs=1
)
def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
result = 0.0
old_value = input_plugs[0].computed_value
try:
test = iter(old_value)
is_iterable = True
except TypeError:
is_iterable = False
if not is_iterable:
try:
old_min = input_plugs[1].computed_value
old_max = input_plugs[2].computed_value
new_min = input_plugs[3].computed_value
new_max = input_plugs[4].computed_value
result = ((old_value - old_min) / (old_max - old_min)) * (new_max - new_min) + new_min
except:
pass
else:
result = []
for o in old_value:
try:
old_min = input_plugs[1].computed_value
old_max = input_plugs[2].computed_value
new_min = input_plugs[3].computed_value
new_max = input_plugs[4].computed_value
result.append(((o - old_min) / (old_max - old_min)) * (new_max - new_min) + new_min)
except:
pass
output_plugs[0].computed_value = result
def generate_input(self, parent: DagNode, index: int) -> Plug:
if index == 0:
plug = Plug.Create(parent=parent, name='value', display_name='Value', value_type=Plug.VALUE_TYPE_ANY)
plug.default_value = 0
plug.computed_value = 0
return plug
if index == 1:
plug = Plug.Create(parent=parent, name='old_min', display_name='Old Min', value_type=Plug.VALUE_TYPE_FLOAT)
plug.is_editable = True
plug.default_value = 0
plug.computed_value = 0
return plug
if index == 2:
plug = Plug.Create(parent=parent, name='old_max', display_name='Old Max', value_type=Plug.VALUE_TYPE_FLOAT)
plug.is_editable = True
plug.default_value = 1
plug.computed_value = 1
return plug
if index == 3:
plug = Plug.Create(parent=parent, name='new_min', display_name='New Min', value_type=Plug.VALUE_TYPE_FLOAT)
plug.is_editable = True
plug.default_value = 0
plug.computed_value = 0
return plug
if index == 4:
plug = Plug.Create(parent=parent, name='new_max', display_name='New Max', value_type=Plug.VALUE_TYPE_FLOAT)
plug.is_editable = True
plug.default_value = 10
plug.computed_value = 10
return plug
raise Exception('Input index "{0}" not supported.'.format(index))
def generate_output(self, parent: DagNode, index: int) -> Plug:
if index == 0:
return Plug.Create(parent=parent, name='remapped_value', display_name='Remapped Value', value_type=Plug.VALUE_TYPE_FLOAT)
raise Exception('Output index "{0}" not supported.'.format(index))
def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
input_plugs[0].computed_value = 0.5
input_plugs[1].computed_value = 0
input_plugs[2].computed_value = 1
input_plugs[3].computed_value = 1
input_plugs[4].computed_value = 0
def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]):
if not output_plugs[0].computed_value == 0.5:
raise Exception('Test failed.') | 77,143 | Python | 37.765829 | 150 | 0.570421 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.