file_path
stringlengths
22
162
content
stringlengths
19
501k
size
int64
19
501k
lang
stringclasses
1 value
avg_line_length
float64
6.33
100
max_line_length
int64
18
935
alphanum_fraction
float64
0.34
0.93
USwampertor/OmniverseJS/ov/python/pxr/UsdUtils/complianceChecker.py
# # Copyright 2018 Pixar # # Licensed under the Apache License, Version 2.0 (the "Apache License") # with the following modification; you may not use this file except in # compliance with the Apache License and the following modification to it: # Section 6. Trademarks. is deleted and replaced with: # # 6. Trademarks. This License does not grant permission to use the trade # names, trademarks, service marks, or product names of the Licensor # and its affiliates, except as required to comply with Section 4(c) of # the License and to reproduce the content of the NOTICE file. # # You may obtain a copy of the Apache License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the Apache License with the above modification is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the Apache License for the specific # language governing permissions and limitations under the Apache License. # from __future__ import print_function from pxr import Ar def _IsPackageOrPackagedLayer(layer): return layer.GetFileFormat().IsPackage() or \ Ar.IsPackageRelativePath(layer.identifier) class BaseRuleChecker(object): """This is Base class for all the rule-checkers.""" def __init__(self, verbose): self._verbose = verbose self._failedChecks = [] self._errors = [] def _AddFailedCheck(self, msg): self._failedChecks.append(msg) def _AddError(self, msg): self._errors.append(msg) def _Msg(self, msg): if self._verbose: print(msg) def GetFailedChecks(self): return self._failedChecks def GetErrors(self): return self._errors # ------------------------------------------------------------------------- # Virtual methods that any derived rule-checker may want to override. # Default implementations do nothing. # # A rule-checker may choose to override one or more of the virtual methods. # The callbacks are invoked in the order they are defined here (i.e. # CheckStage is invoked first, followed by CheckDiagnostics, followed by # CheckUnresolvedPaths and so on until CheckPrim). Some of the callbacks may # be invoked multiple times per-rule with different parameters, for example, # CheckLayer, CheckPrim and CheckZipFile. def CheckStage(self, usdStage): """ Check the given usdStage. """ pass def CheckDiagnostics(self, diagnostics): """ Check the diagnostic messages that were generated when opening the USD stage. The diagnostic messages are collected using a UsdUtilsCoalescingDiagnosticDelegate. """ pass def CheckUnresolvedPaths(self, unresolvedPaths): """ Check or process any unresolved asset paths that were found when analysing the dependencies. """ pass def CheckDependencies(self, usdStage, layerDeps, assetDeps): """ Check usdStage's layer and asset dependencies that were gathered using UsdUtils.ComputeAllDependencies(). """ pass def CheckLayer(self, layer): """ Check the given SdfLayer. """ pass def CheckZipFile(self, zipFile, packagePath): """ Check the zipFile object created by opening the package at path packagePath. """ pass def CheckPrim(self, prim): """ Check the given prim, which may only exist is a specific combination of variant selections on the UsdStage. """ pass # ------------------------------------------------------------------------- class ByteAlignmentChecker(BaseRuleChecker): @staticmethod def GetDescription(): return "Files within a usdz package must be laid out properly, "\ "i.e. they should be aligned to 64 bytes." def __init__(self, verbose): super(ByteAlignmentChecker, self).__init__(verbose) def CheckZipFile(self, zipFile, packagePath): fileNames = zipFile.GetFileNames() for fileName in fileNames: fileExt = Ar.GetResolver().GetExtension(fileName) fileInfo = zipFile.GetFileInfo(fileName) offset = fileInfo.dataOffset if offset % 64 != 0: self._AddFailedCheck("File '%s' in package '%s' has an " "invalid offset %s." % (fileName, packagePath, offset)) class CompressionChecker(BaseRuleChecker): @staticmethod def GetDescription(): return "Files withing a usdz package should not be compressed or "\ "encrypted." def __init__(self, verbose): super(CompressionChecker, self).__init__(verbose) def CheckZipFile(self, zipFile, packagePath): fileNames = zipFile.GetFileNames() for fileName in fileNames: fileExt = Ar.GetResolver().GetExtension(fileName) fileInfo = zipFile.GetFileInfo(fileName) if fileInfo.compressionMethod != 0: self._AddFailedCheck("File '%s' in package '%s' has " "compression. Compression method is '%s', actual size " "is %s. Uncompressed size is %s." % ( fileName, packagePath, fileInfo.compressionMethod, fileInfo.size, fileInfo.uncompressedSize)) class MissingReferenceChecker(BaseRuleChecker): @staticmethod def GetDescription(): return "The composed USD stage should not contain any unresolvable"\ " asset dependencies (in every possible variation of the "\ "asset), when using the default asset resolver. " def __init__(self, verbose): super(MissingReferenceChecker, self).__init__(verbose) def CheckDiagnostics(self, diagnostics): for diag in diagnostics: # "_ReportErrors" is the name of the function that issues # warnings about unresolved references, sublayers and other # composition arcs. if '_ReportErrors' in diag.sourceFunction and \ 'usd/stage.cpp' in diag.sourceFileName: self._AddFailedCheck(diag.commentary) def CheckUnresolvedPaths(self, unresolvedPaths): for unresolvedPath in unresolvedPaths: self._AddFailedCheck("Found unresolvable external dependency " "'%s'." % unresolvedPath) class TextureChecker(BaseRuleChecker): # Allow just png and jpg for now. _allowedImageFormats = ("jpg", "png") # Include a list of "unsupported" image formats to provide better error # messages whwn we find one of these. _unsupportedImageFormats = ["bmp", "tga", "hdr", "exr", "tif", "zfile", "tx"] @staticmethod def GetDescription(): return "Texture files should be .jpg or .png." def __init__(self, verbose): # Check if the prim has an allowed type. super(TextureChecker, self).__init__(verbose) def _CheckTexture(self, texAssetPath): self._Msg("Checking texture <%s>." % texAssetPath) texFileExt = Ar.GetResolver().GetExtension(texAssetPath).lower() if texFileExt in \ TextureChecker._unsupportedImageFormats: self._AddFailedCheck("Found texture file '%s' with unsupported " "file format." % texAssetPath) elif texFileExt not in \ TextureChecker._allowedImageFormats: self._AddFailedCheck("Found texture file '%s' with unknown file " "format." % texAssetPath) def CheckPrim(self, prim): # Right now, we find texture referenced by looking at the asset-valued # shader inputs. However, it is entirely legal to feed the "fileName" # input of a UsdUVTexture shader from a UsdPrimvarReader_string. # Hence, ideally we would also check "the right" primvars on # geometry prims here. However, identifying the right primvars is # non-trivial. We probably need to pre-analyze all the materials. # Not going to try to do this yet, but it raises an interesting # validation pattern - from pxr import Sdf, UsdShade # Check if the prim is a shader. if not prim.IsA(UsdShade.Shader): return shader = UsdShade.Shader(prim) shaderInputs = shader.GetInputs() for ip in shaderInputs: if ip.GetTypeName() == Sdf.ValueTypeNames.Asset: texFilePath = str(ip.Get()).strip('@') self._CheckTexture(texFilePath) elif ip.GetTypeName() == Sdf.ValueTypeNames.AssetArray: texPathArray = ip.Get() texPathArray = [str(i).strip('@') for i in texPathArray] for texPath in texPathArray: self._CheckTexture(texFilePath) class ARKitPackageEncapsulationChecker(BaseRuleChecker): @staticmethod def GetDescription(): return "If the root layer is a package, then the composed stage "\ "should not contain references to files outside the package. "\ "In other words, the package should be entirely self-contained." def __init__(self, verbose): super(ARKitPackageEncapsulationChecker, self).__init__(verbose) def CheckDependencies(self, usdStage, layerDeps, assetDeps): rootLayer = usdStage.GetRootLayer() if not _IsPackageOrPackagedLayer(rootLayer): return packagePath = usdStage.GetRootLayer().realPath if packagePath: if Ar.IsPackageRelativePath(packagePath): packagePath = Ar.SplitPackageRelativePathOuter( packagePath)[0] for layer in layerDeps: # In-memory layers like session layers (which we must skip when # doing this check) won't have a real path. if layer.realPath: if not layer.realPath.startswith(packagePath): self._AddFailedCheck("Found loaded layer '%s' that " "does not belong to the package '%s'." % (layer.identifier, packagePath)) for asset in assetDeps: if not asset.startswith(packagePath): self._AddFailedCheck("Found asset reference '%s' that " "does not belong to the package '%s'." % (asset, packagePath)) class ARKitLayerChecker(BaseRuleChecker): # Only core USD file formats are allowed. _allowedLayerFormatIds = ('usd', 'usda', 'usdc', 'usdz') @staticmethod def GetDescription(): return "All included layers that participate in composition should"\ " have one of the core supported file formats." def __init__(self, verbose): # Check if the prim has an allowed type. super(ARKitLayerChecker, self).__init__(verbose) def CheckLayer(self, layer): self._Msg("Checking layer <%s>." % layer.identifier) formatId = layer.GetFileFormat().formatId if not formatId in \ ARKitLayerChecker._allowedLayerFormatIds: self._AddFailedCheck("Layer '%s' has unsupported formatId " "'%s'." % (layer.identifier, formatId)) class ARKitPrimTypeChecker(BaseRuleChecker): # All core prim types other than UsdGeomPointInstancers, Curve types, Nurbs, # and the types in UsdLux are allowed. _allowedPrimTypeNames = ('', 'Scope', 'Xform', 'Camera', 'Shader', 'Material', 'Mesh', 'Sphere', 'Cube', 'Cylinder', 'Cone', 'Capsule', 'GeomSubset', 'Points', 'SkelRoot', 'Skeleton', 'SkelAnimation', 'BlendShape', 'SpatialAudio') @staticmethod def GetDescription(): return "UsdGeomPointInstancers and custom schemas not provided by "\ "core USD are not allowed." def __init__(self, verbose): # Check if the prim has an allowed type. super(ARKitPrimTypeChecker, self).__init__(verbose) def CheckPrim(self, prim): self._Msg("Checking prim <%s>." % prim.GetPath()) if prim.GetTypeName() not in \ ARKitPrimTypeChecker._allowedPrimTypeNames: self._AddFailedCheck("Prim <%s> has unsupported type '%s'." % (prim.GetPath(), prim.GetTypeName())) class ARKitStageYupChecker(BaseRuleChecker): @staticmethod def GetDescription(): return "The stage and all fo the assets referenced within it "\ "should be Y-up.", def __init__(self, verbose): # Check if the prim has an allowed type. super(ARKitStageYupChecker, self).__init__(verbose) def CheckStage(self, usdStage): from pxr import UsdGeom upAxis = UsdGeom.GetStageUpAxis(usdStage) if upAxis != UsdGeom.Tokens.y: self._AddFailedCheck("Stage has upAxis '%s'. upAxis should be " "'%s'." % (upAxis, UsdGeom.Tokens.y)) class ARKitShaderChecker(BaseRuleChecker): @staticmethod def GetDescription(): return "Shader nodes must have \"id\" as the implementationSource, " \ "with id values that begin with \"Usd*\". Also, shader inputs "\ "with connections must each have a single, valid connection " \ "source." def __init__(self, verbose): super(ARKitShaderChecker, self).__init__(verbose) def CheckPrim(self, prim): from pxr import UsdShade if not prim.IsA(UsdShade.Shader): return shader = UsdShade.Shader(prim) if not shader: self._AddError("Invalid shader prim <%s>." % prim.GetPath()) return self._Msg("Checking shader <%s>." % prim.GetPath()) implSource = shader.GetImplementationSource() if implSource != UsdShade.Tokens.id: self._AddFailedCheck("Shader <%s> has non-id implementation " "source '%s'." % (prim.GetPath(), implSource)) shaderId = shader.GetShaderId() if not shaderId or \ not (shaderId in ['UsdPreviewSurface', 'UsdUVTexture'] or shaderId.startswith('UsdPrimvarReader')) : self._AddFailedCheck("Shader <%s> has unsupported info:id '%s'." % (prim.GetPath(), shaderId)) # Check shader input connections shaderInputs = shader.GetInputs() for shdInput in shaderInputs: connections = shdInput.GetAttr().GetConnections() # If an input has one or more connections, ensure that the # connections are valid. if len(connections) > 0: if len(connections) > 1: self._AddFailedCheck("Shader input <%s> has %s connection " "sources, but only one is allowed." % (shdInput.GetAttr.GetPath(), len(connections))) connectedSource = shdInput.GetConnectedSource() if connectedSource is None: self._AddFailedCheck("Connection source <%s> for shader " "input <%s> is missing." % (connections[0], shdInput.GetAttr().GetPath())) else: # The source must be a valid shader or material prim. source = connectedSource[0] if not source.GetPrim().IsA(UsdShade.Shader) and \ not source.GetPrim().IsA(UsdShade.Material): self._AddFailedCheck("Shader input <%s> has an invalid " "connection source prim of type '%s'." % (shdInput.GetAttr().GetPath(), source.GetPrim().GetTypeName())) class ARKitMaterialBindingChecker(BaseRuleChecker): @staticmethod def GetDescription(): return "All material binding relationships must have valid targets." def __init__(self, verbose): super(ARKitMaterialBindingChecker, self).__init__(verbose) def CheckPrim(self, prim): from pxr import UsdShade relationships = prim.GetRelationships() bindingRels = [rel for rel in relationships if rel.GetName().startswith(UsdShade.Tokens.materialBinding)] for bindingRel in bindingRels: targets = bindingRel.GetTargets() if len(targets) == 1: directBinding = UsdShade.MaterialBindingAPI.DirectBinding( bindingRel) if not directBinding.GetMaterial(): self._AddFailedCheck("Direct material binding <%s> targets " "an invalid material <%s>." % (bindingRel.GetPath(), directBinding.GetMaterialPath())) elif len(targets) == 2: collBinding = UsdShade.MaterialBindingAPI.CollectionBinding( bindingRel) if not collBinding.GetMaterial(): self._AddFailedCheck("Collection-based material binding " "<%s> targets an invalid material <%s>." % (bindingRel.GetPath(), collBinding.GetMaterialPath())) if not collBinding.GetCollection(): self._AddFailedCheck("Collection-based material binding " "<%s> targets an invalid collection <%s>." % (bindingRel.GetPath(), collBinding.GetCollectionPath())) class ARKitFileExtensionChecker(BaseRuleChecker): _allowedFileExtensions = \ ARKitLayerChecker._allowedLayerFormatIds + \ TextureChecker._allowedImageFormats @staticmethod def GetDescription(): return "Only layer files and textures are allowed in a package." def __init__(self, verbose): super(ARKitFileExtensionChecker, self).__init__(verbose) def CheckZipFile(self, zipFile, packagePath): fileNames = zipFile.GetFileNames() for fileName in fileNames: fileExt = Ar.GetResolver().GetExtension(fileName) if fileExt not in ARKitFileExtensionChecker._allowedFileExtensions: self._AddFailedCheck("File '%s' in package '%s' has an " "unknown or unsupported extension '%s'." % (fileName, packagePath, fileExt)) class ARKitRootLayerChecker(BaseRuleChecker): @staticmethod def GetDescription(): return "The root layer of the package must be a usdc file and " \ "must not include any external dependencies that participate in "\ "stage composition." def __init__(self, verbose): super(ARKitRootLayerChecker, self).__init__(verbose=verbose) def CheckStage(self, usdStage): usedLayers = usdStage.GetUsedLayers() # This list excludes any session layers. usedLayersOnDisk = [i for i in usedLayers if i.realPath] if len(usedLayersOnDisk) > 1: self._AddFailedCheck("The stage uses %s layers. It should " "contain a single usdc layer to be compatible with ARKit's " "implementation of usdz." % len(usedLayersOnDisk)) rootLayerRealPath = usdStage.GetRootLayer().realPath if rootLayerRealPath.endswith(".usdz"): # Check if the root layer in the package is a usdc. from pxr import Usd zipFile = Usd.ZipFile.Open(rootLayerRealPath) if not zipFile: self._AddError("Could not open package at path '%s'." % resolvedPath) return fileNames = zipFile.GetFileNames() if not fileNames[0].endswith(".usdc"): self._AddFailedCheck("First file (%s) in usdz package '%s' " "does not have the .usdc extension." % (fileNames[0], rootLayerRealPath)) elif not rootLayerRealPath.endswith(".usdc"): self._AddFailedCheck("Root layer of the stage '%s' does not " "have the '.usdc' extension." % (rootLayerRealPath)) class ComplianceChecker(object): """ A utility class for checking compliance of a given USD asset or a USDZ package. Since usdz files are zip files, someone could use generic zip tools to create an archive and just change the extension, producing a .usdz file that does not honor the additional constraints that usdz files require. Even if someone does use our official archive creation tools, though, we intentionally allow creation of usdz files that can be very "permissive" in their contents for internal studio uses, where portability outside the studio is not a concern. For content meant to be delivered over the web (eg. ARKit assets), however, we must be much more restrictive. This class provides two levels of compliance checking: * "structural" validation that is represented by a set of base rules. * "ARKit" compatibility validation, which includes many more restrictions. Calling ComplianceChecker.DumpAllRules() will print an enumeration of the various rules in the two categories of compliance checking. """ @staticmethod def GetBaseRules(): return [ByteAlignmentChecker, CompressionChecker, MissingReferenceChecker, TextureChecker] @staticmethod def GetARKitRules(skipARKitRootLayerCheck=False): arkitRules = [ARKitLayerChecker, ARKitPrimTypeChecker, ARKitStageYupChecker, ARKitShaderChecker, ARKitMaterialBindingChecker, ARKitFileExtensionChecker, ARKitPackageEncapsulationChecker] if not skipARKitRootLayerCheck: arkitRules.append(ARKitRootLayerChecker) return arkitRules @staticmethod def GetRules(arkit=False, skipARKitRootLayerCheck=False): allRules = ComplianceChecker.GetBaseRules() if arkit: arkitRules = ComplianceChecker.GetARKitRules( skipARKitRootLayerCheck=skipARKitRootLayerCheck) allRules += arkitRules return allRules @staticmethod def DumpAllRules(): print('Base rules:') for ruleNum, rule in enumerate(GetBaseRules()): print('[%s] %s' % (ruleNum + 1, rule.GetDescription())) print('-' * 30) print('ARKit rules: ') for ruleNum, rule in enumerate(GetBaseRules()): print('[%s] %s' % (ruleNum + 1, rule.GetDescription())) print('-' * 30) def __init__(self, arkit=False, skipARKitRootLayerCheck=False, rootPackageOnly=False, skipVariants=False, verbose=False): self._rootPackageOnly = rootPackageOnly self._doVariants = not skipVariants self._verbose = verbose self._errors = [] # Once a package has been checked, it goes into this set. self._checkedPackages = set() # Instantiate an instance of every rule checker and store in a list. self._rules = [Rule(self._verbose) for Rule in ComplianceChecker.GetRules(arkit, skipARKitRootLayerCheck)] def _Msg(self, msg): if self._verbose: print(msg) def _AddError(self, errMsg): self._errors.append(errMsg) def GetErrors(self): errors = self._errors for rule in self._rules: errs = rule.GetErrors() for err in errs: errors.append("Error checking rule '%s': %s" % (type(rule).__name__, err)) return errors def DumpRules(self): descriptions = [rule.GetDescription() for rule in self._rules] print('Checking rules: ') for ruleNum, rule in enumerate(descriptions): print('[%s] %s' % (ruleNum + 1, rule)) print('-' * 30) def GetFailedChecks(self): failedChecks = [] for rule in self._rules: fcs = rule.GetFailedChecks() for fc in fcs: failedChecks.append("%s (fails '%s')" % (fc, type(rule).__name__)) return failedChecks def CheckCompliance(self, inputFile): from pxr import Sdf, Usd, UsdUtils if not Usd.Stage.IsSupportedFile(inputFile): _AddError("Cannot open file '%s' on a USD stage." % args.inputFile) return # Collect all warnings using a diagnostic delegate. delegate = UsdUtils.CoalescingDiagnosticDelegate() usdStage = Usd.Stage.Open(inputFile) stageOpenDiagnostics = delegate.TakeUncoalescedDiagnostics() for rule in self._rules: rule.CheckStage(usdStage) rule.CheckDiagnostics(stageOpenDiagnostics) with Ar.ResolverContextBinder(usdStage.GetPathResolverContext()): # This recursively computes all of inputFiles's external # dependencies. (allLayers, allAssets, unresolvedPaths) = \ UsdUtils.ComputeAllDependencies(Sdf.AssetPath(inputFile)) for rule in self._rules: rule.CheckUnresolvedPaths(unresolvedPaths) rule.CheckDependencies(usdStage, allLayers, allAssets) if self._rootPackageOnly: rootLayer = usdStage.GetRootLayer() if rootLayer.GetFileFormat().IsPackage(): packagePath = Ar.SplitPackageRelativePathInner( rootLayer.identifier)[0] self._CheckPackage(packagePath) else: self._AddError("Root layer of the USD stage (%s) doesn't belong to " "a package, but 'rootPackageOnly' is True!" % Usd.Describe(usdStage)) else: # Process every package just once by storing them all in a set. packages = set() for layer in allLayers: if _IsPackageOrPackagedLayer(layer): packagePath = Ar.SplitPackageRelativePathInner( layer.identifier)[0] packages.add(packagePath) self._CheckLayer(layer) for package in packages: self._CheckPackage(package) # Traverse the entire stage and check every prim. from pxr import Usd # Author all variant switches in the session layer. usdStage.SetEditTarget(usdStage.GetSessionLayer()) allPrimsIt = iter(Usd.PrimRange.Stage(usdStage, Usd.TraverseInstanceProxies())) self._TraverseRange(allPrimsIt, isStageRoot=True) def _CheckPackage(self, packagePath): self._Msg("Checking package <%s>." % packagePath) # XXX: Should we open the package on a stage to ensure that it is valid # and entirely self-contained. from pxr import Usd pkgExt = Ar.GetResolver().GetExtension(packagePath) if pkgExt != "usdz": self._AddError("Package at path %s has an invalid extension." % packagePath) return # Check the parent package first. if Ar.IsPackageRelativePath(packagePath): parentPackagePath = Ar.SplitPackageRelativePathInner(packagePath)[0] self._CheckPackage(parentPackagePath) # Avoid checking the same parent package multiple times. if packagePath in self._checkedPackages: return self._checkedPackages.add(packagePath) resolvedPath = Ar.GetResolver().Resolve(packagePath) if len(resolvedPath) == 0: self._AddError("Failed to resolve package path '%s'." % packagePath) return zipFile = Usd.ZipFile.Open(resolvedPath) if not zipFile: self._AddError("Could not open package at path '%s'." % resolvedPath) return for rule in self._rules: rule.CheckZipFile(zipFile, packagePath) def _CheckLayer(self, layer): for rule in self._rules: rule.CheckLayer(layer) def _CheckPrim(self, prim): for rule in self._rules: rule.CheckPrim(prim) def _TraverseRange(self, primRangeIt, isStageRoot): primsWithVariants = [] rootPrim = primRangeIt.GetCurrentPrim() for prim in primRangeIt: # Skip variant set check on the root prim if it is the stage'. if not self._doVariants or (not isStageRoot and prim == rootPrim): self._CheckPrim(prim) continue vSets = prim.GetVariantSets() vSetNames = vSets.GetNames() if len(vSetNames) == 0: self._CheckPrim(prim) else: primsWithVariants.append(prim) primRangeIt.PruneChildren() for prim in primsWithVariants: self._TraverseVariants(prim) def _TraverseVariants(self, prim): from pxr import Usd if prim.IsInstanceProxy(): return True vSets = prim.GetVariantSets() vSetNames = vSets.GetNames() allVariantNames = [] for vSetName in vSetNames: vSet = vSets.GetVariantSet(vSetName) vNames = vSet.GetVariantNames() allVariantNames.append(vNames) import itertools allVariations = itertools.product(*allVariantNames) for variation in allVariations: self._Msg("Testing variation %s of prim <%s>" % (variation, prim.GetPath())) for (idx, sel) in enumerate(variation): vSets.SetSelection(vSetNames[idx], sel) primRangeIt = iter(Usd.PrimRange(prim, Usd.TraverseInstanceProxies())) self._TraverseRange(primRangeIt, isStageRoot=False)
30,368
Python
40.715659
88
0.600797
USwampertor/OmniverseJS/ov/python/pxr/UsdMdl/__init__.py
#****************************************************************************** # * Copyright 2019 NVIDIA Corporation. All rights reserved. # ***************************************************************************** # # NVIDIA Material Definition Language (MDL) USD plugins. # # MDL Search Paths # ================ # At startup (i.e. when the MDL SDK is loaded) MDL search path is set in this order: # 1/ Dedicated environement variable # If it is set, PXR_USDMDL_PLUGIN_SEARCH_PATHS overwrites any MDL search path. # PXR_USDMDL_PLUGIN_SEARCH_PATHS can be set to a list of paths. # 2/ System and User Path # if PXR_USDMDL_PLUGIN_SEARCH_PATHS is not set: # a/ If set, add MDL_SYSTEM_PATH to the MDL search path # b/ If set, add MDL_USER_PATH to the MDL search path # # Discovery plugin # ================ # MDL discovery plugin is derived from NdrDiscoveryPlugin interface. # This plugin finds MDL functions and materials from all the modules found in the # MDL search paths. # This discovery plugin is executed as soon as the registry is instantiated, # for example in Python: # # >>> from pxr import Sdr # >>> reg = Sdr.Registry() # # MDL discovery plugin creates a discovery result (NdrNodeDiscoveryResult) # for each material and each function that is found. # # Parser plugin # ================ # MDL parser plugin is derived from NdrParserPlugin interface. # This plugin is responsible to parse a given MDL function or material and # create an NdrNode instance. # The parser plugin which is run is decided based on the discovery result discoveryType. # The parser plugin is invoked whenever a shader node is requested, for example in Python: # # >>> from pxr import Sdr # >>> MDLQualifiedName = "::material_examples::architectural::architectural" # >>> Sdr.Registry().GetShaderNodeByIdentifierAndType(MDLQualifiedName, "mdl") # # NdrNodes which is created contains a list of properties which are translated # from MDL parameters. # from . import _usdMdl from pxr import Tf Tf.PrepareModule(_usdMdl, locals()) del Tf try: import __DOC __DOC.Execute(locals()) del __DOC except Exception: try: import __tmpDoc __tmpDoc.Execute(locals()) del __tmpDoc except: pass
2,253
Python
34.777777
90
0.656458
USwampertor/OmniverseJS/ov/python/pxr/UsdAppUtils/rendererArgs.py
# # Copyright 2019 Pixar # # Licensed under the Apache License, Version 2.0 (the "Apache License") # with the following modification; you may not use this file except in # compliance with the Apache License and the following modification to it: # Section 6. Trademarks. is deleted and replaced with: # # 6. Trademarks. This License does not grant permission to use the trade # names, trademarks, service marks, or product names of the Licensor # and its affiliates, except as required to comply with Section 4(c) of # the License and to reproduce the content of the NOTICE file. # # You may obtain a copy of the Apache License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the Apache License with the above modification is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the Apache License for the specific # language governing permissions and limitations under the Apache License. # class RendererPlugins(object): """ An enum-like container of the available Hydra renderer plugins. """ class _RendererPlugin(object): """ Class which represents a Hydra renderer plugin. Each one has a plugin identifier and a display name. """ def __init__(self, pluginId, displayName): self._pluginId = pluginId self._displayName = displayName def __repr__(self): return self.displayName @property def id(self): return self._pluginId @property def displayName(self): return self._displayName @classmethod def allPlugins(cls): """ Get a tuple of all available renderer plugins. """ if not hasattr(cls, '_allPlugins'): from pxr import UsdImagingGL cls._allPlugins = tuple(cls._RendererPlugin(pluginId, UsdImagingGL.Engine.GetRendererDisplayName(pluginId)) for pluginId in UsdImagingGL.Engine.GetRendererPlugins()) return cls._allPlugins @classmethod def fromId(cls, pluginId): """ Get a renderer plugin from its identifier. """ matches = [plugin for plugin in cls.allPlugins() if plugin.id == pluginId] if len(matches) == 0: raise ValueError("No renderer plugin with id '{}'".format(pluginId)) return matches[0] @classmethod def fromDisplayName(cls, displayName): """ Get a renderer plugin from its display name. """ matches = [plugin for plugin in cls.allPlugins() if plugin.displayName == displayName] if len(matches) == 0: raise ValueError("No renderer plugin with display name '{}'".format(displayName)) return matches[0] def AddCmdlineArgs(argsParser, altHelpText=''): """ Adds Hydra renderer-related command line arguments to argsParser. The resulting 'rendererPlugin' argument will be a _RendererPlugin instance representing one of the available Hydra renderer plugins. """ from pxr import UsdImagingGL helpText = altHelpText if not helpText: helpText = ( 'Hydra renderer plugin to use when generating images') argsParser.add_argument('--renderer', '-r', action='store', type=RendererPlugins.fromDisplayName, dest='rendererPlugin', choices=[p for p in RendererPlugins.allPlugins()], help=helpText)
3,564
Python
33.61165
94
0.660494
Motionverse/MV-omniverse-extension/exts/motionverse.engine.coder/motionverse/engine/coder/constants.py
# Copyright (c) 2022 Motionverse Inc. All rights reserved. # UI constants WINDOW_NAME = "Motionverse" CS_HOSTNAME_TEXT = "Host/IP" CS_PORT_TEXT = "PORT" CS_GOTO_BTN_TEXT = "Contact us" CS_START_BTN_TEXT = "Start streaming" CS_STOP_BTN_TEXT = "Stop streaming" CS_URL = "http://motionverse.io/omniverse" SKEL_SOURCE_EDIT_TEXT = "Target skeleton" SKEL_SOURCE_BTN_TEXT = "Use highlighted skeleton" SKEL_INVALID_TEXT = "No skeleton selected" RIG_DROPDOWN_TEXT = "Rig Type" RIG_UNSUPPORTED_TEXT = "Unsupported rig" # UI image filepaths LOGO_FILEPATH = "/data/logo-white.png" # UI widget spacing CS_H_SPACING = 5 # general constants DEFAULT_PORT = 4188
648
Python
27.21739
58
0.736111
Motionverse/MV-omniverse-extension/exts/motionverse.engine.coder/motionverse/engine/coder/extension.py
from email import message from operator import le import carb import omni.ext import omni.ui as ui import omni.timeline import omni.usd import omni.kit.window.file from pxr import Vt, Gf, UsdSkel, Usd, Sdf, UsdGeom import struct import asyncio import pathlib from typing import cast, Union, List import traceback import webbrowser from .constants import * import json import glob import numpy as np def get_rig_index(model_joint_names, rig_mappings): candidates = [mapping["joint_mappings"].keys() for mapping in rig_mappings] index = None for i in range(len(candidates)): if all([(joint in model_joint_names) for joint in candidates[i]]): index = i return index def get_all_descendents(prim: Usd.Prim, result: List[Usd.Prim] = []): if len(result) == 0: result.append(prim) children = prim.GetChildren() result.extend(list(children)) for child in children: get_all_descendents(child, result) def find_skeleton(path): stage = omni.usd.get_context().get_stage() prim = stage.GetPrimAtPath(path) descendants = [] get_all_descendents(prim, descendants) skeleton = next(filter(lambda x: x.IsA(UsdSkel.Skeleton), descendants), None) assert skeleton is not None, "Could not find skeleton" print(UsdSkel.Skeleton(skeleton)) return UsdSkel.Skeleton(skeleton) def find_blendShapes(path): stage = omni.usd.get_context().get_stage() prim = stage.GetPrimAtPath(path) descendants = [] get_all_descendents(prim, descendants) blendShapePrims = list(filter(lambda x: x.IsA(UsdSkel.BlendShape), descendants)) blendShapes = [UsdSkel.BlendShape(blendShape) for blendShape in blendShapePrims] return blendShapes def get_this_files_path(): return pathlib.Path(__file__).parent.absolute().as_posix() # # styles for UIController class # style_btn_enabled = { "Button": {"border_radius": 5.0,"margin": 5.0,"padding": 10.0,"background_color": 0xFFFF7E09,"border_color": 0xFFFD761D}, "Button:hovered": {"background_color": 0xFFFF4F00}, "Button:pressed": {"background_color": 0xFFFAE26F}, "Button.Label": {"color": 0xFFFFFFFF}, } style_btn_disabled = { "Button": {"border_radius": 3.0,"margin": 5.0,"padding": 10.0,"background_color": 0xFFC0E0C0,"border_color": 0xFFFD7F1D}, "Button:hovered": {"background_color": 0xFFC0C0C0, "background_gradient_color": 0xFFFFAE5A}, "Button:pressed": {"background_color": 0xFFC0C0C0, "background_gradient_color": 0xFFFAB26D}, "Button.Label": {"color": 0xFF808080}, } style_status_circle_green = {"background_color": 0xFF00FF00, "border_width": 0} style_status_circle_red = {"background_color": 0xFF0000FF, "border_width": 0} style_btn_goto_motionverse = {"Button": {"border_width": 0.0, "border_radius": 3.0, "margin": 5.0, "padding": 10.0}} # # UIController class # class UIController: def __init__(self, ext): self.ext = ext self.extension_path = omni.kit.app.get_app().get_extension_manager().get_extension_path(ext.ext_id) self._streaming_active = False self._window = ui.Window(WINDOW_NAME,width=600, height=260) self.build_ui() def build_ui(self): with self._window.frame: with ui.VStack(height=0): with ui.HStack(): #logo logo_path = f"{self.extension_path}{LOGO_FILEPATH}" ui.Image(logo_path, width=50,height=50,fill_policy=ui.FillPolicy.PRESERVE_ASPECT_FIT,alignment=ui.Alignment.CENTER) ui.Spacer() ui.Button( CS_GOTO_BTN_TEXT,width=ui.Percent(10), style=style_btn_goto_motionverse,alignment=ui.Alignment.RIGHT_CENTER, clicked_fn=self.launch_motionverse_website) with ui.HStack(): # green/red status with ui.VStack(width=50, alignment=ui.Alignment.TOP): self._status_circle = ui.Circle( radius = 8,size_policy=ui.CircleSizePolicy.FIXED, style=style_status_circle_red ) ui.Spacer() with ui.VStack(): # CaptureStream device selection drop-down with ui.HStack(): ui.Label( CS_HOSTNAME_TEXT, width=ui.Percent(20), alignment=ui.Alignment.RIGHT_CENTER ) ui.Spacer(width=CS_H_SPACING) with ui.VStack(width=ui.Percent(50)): ui.Spacer() self.source_ip_field = ui.StringField( model=ui.SimpleStringModel("192.168.10.113"), height=0, visible=True ) ui.Spacer() ui.Label( CS_PORT_TEXT, width=ui.Percent(10), alignment=ui.Alignment.RIGHT_CENTER ) with ui.VStack(width=ui.Percent(10)): ui.Spacer() self.source_port_field = ui.StringField( model=ui.SimpleStringModel("4188"), height=0, visible=True ) ui.Spacer() # skeleton selection with ui.HStack(): ui.Label( SKEL_SOURCE_EDIT_TEXT, width=ui.Percent(20), alignment=ui.Alignment.RIGHT_CENTER ) ui.Spacer(width=CS_H_SPACING) with ui.VStack(width=ui.Percent(50)): ui.Spacer() self._skeleton_to_drive_stringfield = ui.StringField( model=ui.SimpleStringModel(SKEL_INVALID_TEXT), height=0, enabled=False ) ui.Spacer() ui.Spacer(width=CS_H_SPACING) self._skel_select_button = ui.Button( SKEL_SOURCE_BTN_TEXT, width=0, clicked_fn=self.select_skeleton ) # rig selection with ui.HStack(): ui.Label(RIG_DROPDOWN_TEXT, width=ui.Percent(20), alignment=ui.Alignment.RIGHT_CENTER) ui.Spacer(width=CS_H_SPACING) with ui.VStack(width=ui.Percent(75)): ui.Spacer() self._selected_rig_label = ui.Label("") ui.Spacer() # start/stop stream buttons with ui.HStack(): ui.Spacer(width=ui.Percent(20)) self._start_button = ui.Button( CS_START_BTN_TEXT, width=0, clicked_fn=self.start_streaming, enabled=not self.streaming_active, style=style_btn_disabled if self.streaming_active else style_btn_enabled, ) ui.Spacer(width=CS_H_SPACING) self._stop_button = ui.Button( CS_STOP_BTN_TEXT, width=0, clicked_fn=self.stop_streaming, enabled=self.streaming_active, style=style_btn_enabled if self.streaming_active else style_btn_disabled, ) ui.Spacer(height=5) def select_skeleton(self): paths = omni.usd.get_context().get_selection().get_selected_prim_paths() if paths: path = paths[0] try: self.ext.init_skeletons(path) except Exception as ex: self._skeleton_to_drive_stringfield.model.set_value(SKEL_INVALID_TEXT) self._selected_rig_label.text = self.ext.selected_rig_name or RIG_UNSUPPORTED_TEXT def launch_motionverse_website(self): webbrowser.open_new_tab(CS_URL) def update_ui(self): if self.streaming_active: self._start_button.enabled = False self._start_button.set_style(style_btn_disabled) self._stop_button.enabled = True self._stop_button.set_style(style_btn_enabled) else: self._start_button.enabled = self.ext.ready_to_stream self._start_button.set_style( style_btn_enabled if self.ext.ready_to_stream else style_btn_disabled ) self._stop_button.enabled = False self._stop_button.set_style(style_btn_disabled) if self.streaming_active: self._status_circle.set_style(style_status_circle_green) else: self._status_circle.set_style(style_status_circle_red) self._skeleton_to_drive_stringfield.model.set_value(self.ext.target_skeleton_path) def start_streaming(self): self.ext.connect() def stop_streaming(self): self.ext.disconnect("User cancelled") @property def streaming_active(self): return self._streaming_active @streaming_active.setter def streaming_active(self, value): self._streaming_active = value class MotionverseExtension(omni.ext.IExt): def __init__(self): self._net_io_task = None self._update_skeleton_task = None self.target_skeleton = None self.skel_cache = UsdSkel.Cache() def on_startup(self, ext_id): self.import_rig_mappings_from_json_files() self.ext_id = ext_id stream = omni.kit.app.get_app().get_update_event_stream() self.update_sub = stream.create_subscription_to_pop(self.update_ui, name="update frame") self.ui_controller = UIController(self) def connect(self): self.disconnect("Resetting connection") host = self.ui_controller.source_ip_field.model.as_string port = self.ui_controller.source_port_field.model.as_int loop = asyncio.get_event_loop() queue = asyncio.Queue(maxsize=10, loop=loop) if self._net_io_task: loop.run_until_complete(asyncio.wait({self._net_io_task}, timeout=1.0)) if self._update_skeleton_task: loop.run_until_complete(asyncio.wait({self._update_skeleton_task}, timeout=1.0)) self._net_io_task = loop.create_task(self._do_net_io(host, port, queue)) self._update_skeleton_task = loop.create_task(self._update_skeleton_loop(queue)) self._net_io_task.add_done_callback(self.on_task_complete) self._update_skeleton_task.add_done_callback(self.on_task_complete) def on_task_complete(self, fut=None): if fut is self._net_io_task: self._update_skeleton_task.cancel() elif fut is self._update_skeleton_task: self._net_io_task.cancel() self.ui_controller.streaming_active = False async def _do_net_io(self, host, port, queue): self.ui_controller.streaming_active = True writer = None try: reader, writer = await asyncio.open_connection(host, port) writer.write(b"ov") await self._read_client(reader, queue) except asyncio.CancelledError: print("Network streaming cancelled") except: carb.log_error(traceback.format_exc()) finally: if writer is not None: writer.close() await writer.wait_closed() print("TCP connection closed") print("Net I/O task stopped") async def _read_client(self, reader, queue): while True: message_data = await reader.readexactly(1660) await queue.put(message_data) async def _update_skeleton_loop(self, queue): try: while True: message = await queue.get() fd = FrameDetections() fd.ParseFromString(message) self.update_skeleton(fd) except asyncio.CancelledError: print("Skeleton update task cancelled") except: carb.log_error(traceback.format_exc()) def disconnect(self, reason=str()): streaming_active = False if self._net_io_task is not None: self._net_io_task.cancel() def import_rig_mappings_from_json_files(self): self.rig_mappings = [] rig_filenames = glob.glob(get_this_files_path() + "/xform_*.json") if rig_filenames is not None: for filename in rig_filenames: rig_mapfile = open(filename, "r") if rig_mapfile is not None: self.rig_mappings.append(json.load(rig_mapfile)) else: print("error - could not load file %s" % filename) def init_skeletons(self, skel_root_path): self.selected_rig_index = None self.motion_skel_anim = None self.selected_joints = None stage = omni.usd.get_context().get_stage() selected_skeleton = find_skeleton(skel_root_path) blendShapes = find_blendShapes(skel_root_path) print("skel_cache =====",self.skel_cache) skel_query = self.skel_cache.GetSkelQuery(selected_skeleton) print("selected_skeleton ====",selected_skeleton) print("blendShapes[0] ====",blendShapes[0]) # blendShape_query = UsdSkel.BlendShapeQuery(blendShapes[0]) # print("blendShape_query",blendShape_query) joint_tokens = skel_query.GetJointOrder() jointPaths = [Sdf.Path(jointToken) for jointToken in joint_tokens] all_joint_names = [jointPath.name for jointPath in jointPaths] # all_blendshape_names = [blendShapePath.name for blendShapePath in blendShapePaths] self.selected_rig_index = get_rig_index(all_joint_names, self.rig_mappings) assert self.selected_rig_index is not None, "Unsupported rig" self.target_skeleton = selected_skeleton self.target_skel_root = UsdSkel.Root.Find(self.target_skeleton.GetPrim()) # print("target_skeleton = ",self.target_skeleton.GetPrim()) # skel_root_rotate_xyz is a set of rotations in XYZ order used to align the rest pose # with wrnch's axes (+Y up, +Z forward) skel_root_rotate_xyz = self.rig_mappings[self.selected_rig_index]["skel_root_rotate_xyz"] rot_x = Gf.Rotation(Gf.Vec3d(1, 0, 0), skel_root_rotate_xyz[0]) rot_y = Gf.Rotation(Gf.Vec3d(0, 1, 0), skel_root_rotate_xyz[1]) rot_z = Gf.Rotation(Gf.Vec3d(0, 0, 1), skel_root_rotate_xyz[2]) self.rest_xform_adjust = Gf.Matrix4d() self.rest_xform_adjust.SetRotate(rot_x * rot_y * rot_z) self.rest_xform_adjust_inverse = self.rest_xform_adjust.GetInverse() if not skel_query.HasRestPose(): xforms = skel_query.ComputeJointLocalTransforms() self.target_skeleton.GetRestTransformsAttr().Set(xforms) self.skel_cache.Clear() def update_skeleton(self, fd): if self.selected_joints is None: self._init_animation(fd.body_pose_names) num_joints = len(self.rest_xforms_anim_global) root_index = self.motion_to_anim_index["Hips"] motion_xforms_global = Vt.Matrix4dArray(num_joints) for i, pose in enumerate(fd.body_poses): name = fd.body_pose_names[i] if name in self.motion_to_anim_index: anim_index = self.motion_to_anim_index[name] q = pose['rotation'] t = pose['position'] rot = Gf.Rotation(Gf.Quatd(q[3], q[0], q[1], q[2])) trans = Gf.Vec3d(t[0], t[1], t[2]) xform = Gf.Matrix4d() xform.SetTransform(rot, trans) motion_xforms_global[anim_index] = xform target_pose_xforms_global = Vt.Matrix4dArray( [ base_xform * motion_xform for motion_xform, base_xform in zip(motion_xforms_global, self.rest_xforms_anim_global) ] ) root_xform = self.rest_xform_adjust_inverse target_xforms_local = UsdSkel.ComputeJointLocalTransforms( self.anim_topology, target_pose_xforms_global, root_xform ) anim_rotations = Vt.QuatfArray([Gf.Quatf(xform.ExtractRotationQuat()) for xform in target_xforms_local]) height_offset = 0 # Apply root motion to the animation attr local_translations_attr = self.motion_skel_anim.GetTranslationsAttr() local_translations = local_translations_attr.Get(0) local_translations[root_index] = Gf.Vec3f( root_xform.Transform( Gf.Vec3d(0, 1, 0) * height_offset + motion_xforms_global[root_index].ExtractTranslation() ) ) local_translations_attr.Set(local_translations, 0) # Apply joint rotations to animation attr self.motion_skel_anim.GetRotationsAttr().Set(anim_rotations, 0) def _init_animation(self,selected_joints): stage = omni.usd.get_context().get_stage() rig_mapping = self.rig_mappings[self.selected_rig_index]["joint_mappings"] skel_query = self.skel_cache.GetSkelQuery(self.target_skeleton) joint_tokens = skel_query.GetJointOrder() joint_names = {Sdf.Path(token).name: token for token in joint_tokens} print(joint_names) # Lookup index of joint by token joint_token_indices = {token: index for index, token in enumerate(joint_tokens)} motion_to_token = { value: joint_names[key] for key, value in rig_mapping.items() if value in selected_joints } anim_tokens = Vt.TokenArray(motion_to_token.values()) assert len(anim_tokens) > 0 anim_token_indices = {token: index for index, token in enumerate(anim_tokens)} active_token_indices = [joint_token_indices[token] for token in anim_tokens] self.motion_to_anim_index = { motion_name: anim_token_indices[token] for motion_name, token in motion_to_token.items() } self.anim_topology = UsdSkel.Topology([Sdf.Path(token) for token in anim_tokens]) assert self.anim_topology.Validate() anim_path = self.target_skeleton.GetPath().AppendChild("SkelRoot") self.motion_skel_anim = UsdSkel.Animation.Define(stage, anim_path) print("anim_tokens=",anim_tokens) self.motion_skel_anim.GetJointsAttr().Set(anim_tokens) self.motion_skel_anim.GetBlendShapesAttr().Set(anim_tokens) # Set our UsdSkelAnimation as the animationSource of the UsdSkelSkeleton binding = UsdSkel.BindingAPI.Apply(self.target_skeleton.GetPrim()) binding.CreateAnimationSourceRel().SetTargets([self.motion_skel_anim.GetPrim().GetPath()]) # Set initial the scale, translation, and rotation attributes for the UsdSkelAnimation. # Note that these attributes need to be in the UsdSkelSkeleton's Local Space. root_xform = Gf.Matrix4d() root_xform.SetIdentity() root_xform = self.rest_xform_adjust identity_xform = Gf.Matrix4d() identity_xform.SetIdentity() rest_xforms_local = self.target_skeleton.GetRestTransformsAttr().Get() assert rest_xforms_local, "Skeleton has no restTransforms" skel_topology = skel_query.GetTopology() anim_start_index = active_token_indices[0] xform_accum = Gf.Matrix4d() xform_accum.SetIdentity() index = skel_topology.GetParent(anim_start_index) while index >= 0: xform_accum = rest_xforms_local[index] * xform_accum rest_xforms_local[index] = identity_xform index = skel_topology.GetParent(index) rest_xforms_local[anim_start_index] = xform_accum * rest_xforms_local[anim_start_index] # Set the rest pose transforms self.target_skeleton.GetRestTransformsAttr().Set(rest_xforms_local) # Joint transforms in world coordinates such that the t-pose is aligned with wrnch's # base t-pose (+Y up, +Z forward) rest_xforms_global = UsdSkel.ConcatJointTransforms(skel_topology, rest_xforms_local, root_xform) # Get the subset of the rest transforms that correspond to our UsdSkelAnimation attrs. # We're going to concatenate these to the wrx transforms to get the desired # pose self.rest_xforms_anim_global = Vt.Matrix4dArray([rest_xforms_global[i] for i in active_token_indices]) base_xforms_anim_local = UsdSkel.ComputeJointLocalTransforms( self.anim_topology, self.rest_xforms_anim_global, identity_xform ) self.motion_skel_anim.SetTransforms(base_xforms_anim_local, 0) self.selected_joints = set(selected_joints) def update_ui(self, dt): try: self.ui_controller.update_ui() except: self.disconnect("Error updating UI") raise def on_shutdown(self): self.ext = None self._window = None @property def ready_to_stream(self): has_skeleton_target = self.target_skeleton is not None and self.target_skeleton.GetPrim() return has_skeleton_target @property def target_skeleton_path(self): if not self.target_skeleton or not self.target_skeleton.GetPrim(): return "" else: return str(self.target_skeleton.GetPath()) @property def selected_rig_name(self): if self.selected_rig_index is not None: return self.rig_mappings[self.selected_rig_index]["display_name"] else: return None class FrameDetections(): def __init__(self): self.body_poses = None self.faces = None self.body_pose_names = ("Hips","LeftUpLeg","RightUpLeg","LeftLeg","RightLeg","LeftFoot","RightFoot","Spine","Spine1","Neck","Head","LeftShoulder","RightShoulder","LeftArm", "RightArm","LeftForeArm","RightForeArm","LeftHand","RightHand","LeftToeBase","RightToeBase","LeftHandThumb1","LeftHandThumb2","LeftHandThumb3", "LeftHandIndex1","LeftHandIndex2","LeftHandIndex3","LeftHandMiddle1","LeftHandMiddle2","LeftHandMiddle3","LeftHandRing1","LeftHandRing2","LeftHandRing3","LeftHandPinky1", "LeftHandPinky2","LeftHandPinky3","RightHandThumb1","RightHandThumb2","RightHandThumb3","RightHandIndex1","RightHandIndex2","RightHandIndex3","RightHandMiddle1", "RightHandMiddle2","RightHandMiddle3","RightHandRing1","RightHandRing2","RightHandRing3","RightHandPinky1","RightHandPinky2","RightHandPinky3") def ParseFromString(self,value): message_list=struct.unpack("415f",value) self.faces = message_list[:51] body_data = np.array(message_list[51:]).reshape(-1, 7) #joints num, 4+3 self.body_poses = [{'rotation': body_data[idx][:4], 'position': body_data[idx][4:]} for idx in range(len(self.body_pose_names))]
23,616
Python
39.302048
180
0.588626
Motionverse/MV-omniverse-extension/exts/motionverse.engine.coder/motionverse/engine/coder/scripts/styles.py
# # styles # style_btn_enabled = { "Button": {"border_radius": 5.0,"margin": 5.0,"padding": 10.0,"background_color": 0xFFFF7E09,"border_color": 0xFFFD761D}, "Button:hovered": {"background_color": 0xFFFF4F00}, "Button:pressed": {"background_color": 0xFFFAE26F}, "Button.Label": {"color": 0xFFFFFFFF}, } style_btn_disabled = { "Button": {"border_radius": 3.0,"margin": 5.0,"padding": 10.0,"background_color": 0xFFC0E0C0,"border_color": 0xFFFD7F1D}, "Button:hovered": {"background_color": 0xFFC0C0C0, "background_gradient_color": 0xFFFFAE5A}, "Button:pressed": {"background_color": 0xFFC0C0C0, "background_gradient_color": 0xFFFAB26D}, "Button.Label": {"color": 0xFF808080}, } style_status_circle_green = {"background_color": 0xFF00FF00, "border_width": 0} style_status_circle_red = {"background_color": 0xFF0000FF, "border_width": 0} style_btn_goto_motionverse = {"Button": {"border_width": 0.0, "border_radius": 3.0, "margin": 5.0, "padding": 10.0}}
982
Python
53.611108
125
0.677189
Motionverse/MV-omniverse-extension/exts/motionverse.engine.coder/motionverse/engine/coder/scripts/extension.py
import carb import omni.ext import omni.timeline import omni.usd import omni.kit.window.file import traceback import json import glob import asyncio from email import message from operator import le from pxr import Vt, Gf, UsdSkel, Usd, Sdf, UsdGeom from typing import cast, Union, List from .constants import * from .ui import * from .styles import * from .utils import * class MotionverseExtension(omni.ext.IExt): def __init__(self): self._net_io_task = None self._update_skeleton_task = None self.target_skeleton = None self.skel_root_path = None self.skel_cache = UsdSkel.Cache() def on_startup(self, ext_id): self.import_rig_mappings_from_json_files() self.ext_id = ext_id stream = omni.kit.app.get_app().get_update_event_stream() self.update_sub = stream.create_subscription_to_pop(self.update_ui, name="update frame") self.ui_controller = UIController(self) def connect(self): self.disconnect("Resetting connection") host = self.ui_controller.source_ip_field.model.as_string port = self.ui_controller.source_port_field.model.as_int loop = asyncio.get_event_loop() queue = asyncio.Queue(maxsize=10, loop=loop) if self._net_io_task: loop.run_until_complete(asyncio.wait({self._net_io_task}, timeout=1.0)) if self._update_skeleton_task: loop.run_until_complete(asyncio.wait({self._update_skeleton_task}, timeout=1.0)) self._net_io_task = loop.create_task(self._do_net_io(host, port, queue)) self._update_skeleton_task = loop.create_task(self._update_skeleton_loop(queue)) self._net_io_task.add_done_callback(self.on_task_complete) self._update_skeleton_task.add_done_callback(self.on_task_complete) def on_task_complete(self, fut=None): if fut is self._net_io_task: self._update_skeleton_task.cancel() elif fut is self._update_skeleton_task: self._net_io_task.cancel() self.ui_controller.streaming_active = False async def _do_net_io(self, host, port, queue): self.ui_controller.streaming_active = True writer = None try: reader, writer = await asyncio.open_connection(host, port) writer.write(b"ov") await self._read_client(reader, queue) except asyncio.CancelledError: log_info("Network streaming cancelled") except: carb.log_error(traceback.format_exc()) finally: if writer is not None: writer.close() await writer.wait_closed() log_info("TCP connection closed") log_info("Net I/O task stopped") async def _read_client(self, reader, queue): while True: message_data = await reader.readexactly(1660) await queue.put(message_data) async def _update_skeleton_loop(self, queue): try: while True: message = await queue.get() fd = FrameDetections() fd.ParseFromString(message) self.update_skeleton(fd) except asyncio.CancelledError: log_info("Skeleton update task cancelled") except: carb.log_error(traceback.format_exc()) def disconnect(self, reason=str()): streaming_active = False if self._net_io_task is not None: self._net_io_task.cancel() def import_rig_mappings_from_json_files(self): self.rig_mappings = [] rig_filenames = glob.glob(get_this_files_path() + "/xform_*.json") if rig_filenames is not None: for filename in rig_filenames: rig_mapfile = open(filename, "r") if rig_mapfile is not None: self.rig_mappings.append(json.load(rig_mapfile)) else: log_info("error - could not load file %s" % filename) def init_skeletons(self, skel_root_path): self.selected_rig_index = None self.motion_skel_anim = None self.selected_joints = None self.skel_root_path = skel_root_path selected_skeleton = find_skeleton(skel_root_path) skel_query = self.skel_cache.GetSkelQuery(selected_skeleton) joint_tokens = skel_query.GetJointOrder() jointPaths = [Sdf.Path(jointToken) for jointToken in joint_tokens] all_joint_names = [jointPath.name for jointPath in jointPaths] self.selected_rig_index = get_rig_index(all_joint_names, self.rig_mappings) assert self.selected_rig_index is not None, "Unsupported rig" self.target_skeleton = selected_skeleton self.target_skel_root = UsdSkel.Root.Find(self.target_skeleton.GetPrim()) skel_root_rotate_xyz = self.rig_mappings[self.selected_rig_index]["skel_root_rotate_xyz"] rot_x = Gf.Rotation(Gf.Vec3d(1, 0, 0), skel_root_rotate_xyz[0]) rot_y = Gf.Rotation(Gf.Vec3d(0, 1, 0), skel_root_rotate_xyz[1]) rot_z = Gf.Rotation(Gf.Vec3d(0, 0, 1), skel_root_rotate_xyz[2]) self.rest_xform_adjust = Gf.Matrix4d() self.rest_xform_adjust.SetRotate(rot_x * rot_y * rot_z) self.rest_xform_adjust_inverse = self.rest_xform_adjust.GetInverse() if not skel_query.HasRestPose(): xforms = skel_query.ComputeJointLocalTransforms() self.target_skeleton.GetRestTransformsAttr().Set(xforms) self.skel_cache.Clear() def update_skeleton(self, fd): if self.selected_joints is None: self._init_animation(fd.body_pose_names) num_joints = len(self.rest_xforms_anim_global) root_index = self.motion_to_anim_index["Hips"] motion_xforms_global = Vt.Matrix4dArray(num_joints) for i, pose in enumerate(fd.body_poses): name = fd.body_pose_names[i] if name in self.motion_to_anim_index: anim_index = self.motion_to_anim_index[name] q = pose['rotation'] t = pose['position'] rot = Gf.Rotation(Gf.Quatd(q[3], q[0], q[1], q[2])) trans = Gf.Vec3d(t[0], t[1], t[2]) xform = Gf.Matrix4d() xform.SetTransform(rot, trans) motion_xforms_global[anim_index] = xform target_pose_xforms_global = Vt.Matrix4dArray( [ base_xform * motion_xform for motion_xform, base_xform in zip(motion_xforms_global, self.rest_xforms_anim_global) ] ) root_xform = self.rest_xform_adjust_inverse target_xforms_local = UsdSkel.ComputeJointLocalTransforms( self.anim_topology, target_pose_xforms_global, root_xform ) anim_rotations = Vt.QuatfArray([Gf.Quatf(xform.ExtractRotationQuat()) for xform in target_xforms_local]) height_offset = 0 local_translations_attr = self.motion_skel_anim.GetTranslationsAttr() local_translations = local_translations_attr.Get(0) local_translations[root_index] = Gf.Vec3f( root_xform.Transform( Gf.Vec3d(0, 1, 0) * height_offset + motion_xforms_global[root_index].ExtractTranslation() ) ) local_translations_attr.Set(local_translations, 0) self.motion_skel_anim.GetRotationsAttr().Set(anim_rotations, 0) # self.motion_skel_anim.GetBlendShapeWeightsAttr().Set(fd.faces,0) def _init_animation(self,selected_joints): stage = omni.usd.get_context().get_stage() rig_mapping = self.rig_mappings[self.selected_rig_index]["joint_mappings"] skel_query = self.skel_cache.GetSkelQuery(self.target_skeleton) joint_tokens = skel_query.GetJointOrder() joint_names = {Sdf.Path(token).name: token for token in joint_tokens} joint_token_indices = {token: index for index, token in enumerate(joint_tokens)} motion_to_token = { value: joint_names[key] for key, value in rig_mapping.items() if value in selected_joints } anim_tokens = Vt.TokenArray(motion_to_token.values()) assert len(anim_tokens) > 0 anim_token_indices = {token: index for index, token in enumerate(anim_tokens)} active_token_indices = [joint_token_indices[token] for token in anim_tokens] self.motion_to_anim_index = { motion_name: anim_token_indices[token] for motion_name, token in motion_to_token.items() } self.anim_topology = UsdSkel.Topology([Sdf.Path(token) for token in anim_tokens]) assert self.anim_topology.Validate() anim_path = self.target_skeleton.GetPath().AppendChild("SkelRoot") self.motion_skel_anim = UsdSkel.Animation.Define(stage, anim_path) self.motion_skel_anim.GetJointsAttr().Set(anim_tokens) # self.motion_skel_anim.GetBlendShapesAttr().Set(anim_tokens) binding = UsdSkel.BindingAPI.Apply(self.target_skeleton.GetPrim()) binding.CreateAnimationSourceRel().SetTargets([self.motion_skel_anim.GetPrim().GetPath()]) root_xform = Gf.Matrix4d() root_xform.SetIdentity() root_xform = self.rest_xform_adjust identity_xform = Gf.Matrix4d() identity_xform.SetIdentity() rest_xforms_local = self.target_skeleton.GetRestTransformsAttr().Get() assert rest_xforms_local, "Skeleton has no restTransforms" skel_topology = skel_query.GetTopology() anim_start_index = active_token_indices[0] xform_accum = Gf.Matrix4d() xform_accum.SetIdentity() index = skel_topology.GetParent(anim_start_index) while index >= 0: xform_accum = rest_xforms_local[index] * xform_accum rest_xforms_local[index] = identity_xform index = skel_topology.GetParent(index) rest_xforms_local[anim_start_index] = xform_accum * rest_xforms_local[anim_start_index] self.target_skeleton.GetRestTransformsAttr().Set(rest_xforms_local) rest_xforms_global = UsdSkel.ConcatJointTransforms(skel_topology, rest_xforms_local, root_xform) self.rest_xforms_anim_global = Vt.Matrix4dArray([rest_xforms_global[i] for i in active_token_indices]) base_xforms_anim_local = UsdSkel.ComputeJointLocalTransforms( self.anim_topology, self.rest_xforms_anim_global, identity_xform ) self.motion_skel_anim.SetTransforms(base_xforms_anim_local, 0) self.selected_joints = set(selected_joints) def update_ui(self, dt): try: self.ui_controller.update_ui() except: self.disconnect("Error updating UI") raise def on_shutdown(self): log_info("on_shutdown") self.ui_controller.shutdown() self.ui_controller = None self.disconnect("Extension is shutting down") @property def ready_to_stream(self): has_skeleton_target = self.target_skeleton is not None and self.target_skeleton.GetPrim() return has_skeleton_target @property def target_skeleton_path(self): if not self.target_skeleton or not self.target_skeleton.GetPrim(): return "" else: return str(self.target_skeleton.GetPath()) @property def selected_rig_name(self): if self.selected_rig_index is not None: return self.rig_mappings[self.selected_rig_index]["display_name"] else: return None
11,565
Python
39.725352
112
0.626891
Motionverse/MV-omniverse-extension/exts/motionverse.engine.coder/motionverse/engine/coder/scripts/utils.py
import pathlib from typing import cast, Union, List import carb from pxr import Vt, Gf, UsdSkel, Usd, Sdf, UsdGeom import omni.timeline import omni.usd import omni.kit.window.file import struct import numpy as np def log_info(msg): carb.log_info("{}".format(msg)) def log_warn(msg): carb.log_warn("{}".format(msg)) def log_error(msg): carb.log_error("{}".format(msg)) def get_rig_index(model_joint_names, rig_mappings): candidates = [mapping["joint_mappings"].keys() for mapping in rig_mappings] index = None for i in range(len(candidates)): if all([(joint in model_joint_names) for joint in candidates[i]]): index = i return index def get_all_descendents(prim: Usd.Prim, result: List[Usd.Prim] = []): if len(result) == 0: result.append(prim) children = prim.GetChildren() result.extend(list(children)) for child in children: get_all_descendents(child, result) def find_skeleton(path): stage = omni.usd.get_context().get_stage() prim = stage.GetPrimAtPath(path) descendants = [] get_all_descendents(prim, descendants) skeleton = next(filter(lambda x: x.IsA(UsdSkel.Skeleton), descendants), None) assert skeleton is not None, "Could not find skeleton" return UsdSkel.Skeleton(skeleton) def find_blendShapes(path): stage = omni.usd.get_context().get_stage() prim = stage.GetPrimAtPath(path) descendants = [] get_all_descendents(prim, descendants) blendShapePrims = list(filter(lambda x: x.IsA(UsdSkel.BlendShape), descendants)) blendShapes = [UsdSkel.BlendShape(blendShape) for blendShape in blendShapePrims] return blendShapes def get_this_files_path(): return pathlib.Path(__file__).parent.absolute().as_posix() class FrameDetections(): def __init__(self): self.body_poses = None self.faces = None self.body_pose_names = ("Hips","LeftUpLeg","RightUpLeg","LeftLeg","RightLeg","LeftFoot","RightFoot","Spine","Spine1","Neck","Head","LeftShoulder","RightShoulder","LeftArm", "RightArm","LeftForeArm","RightForeArm","LeftHand","RightHand","LeftToeBase","RightToeBase","LeftHandThumb1","LeftHandThumb2","LeftHandThumb3", "LeftHandIndex1","LeftHandIndex2","LeftHandIndex3","LeftHandMiddle1","LeftHandMiddle2","LeftHandMiddle3","LeftHandRing1","LeftHandRing2","LeftHandRing3","LeftHandPinky1", "LeftHandPinky2","LeftHandPinky3","RightHandThumb1","RightHandThumb2","RightHandThumb3","RightHandIndex1","RightHandIndex2","RightHandIndex3","RightHandMiddle1", "RightHandMiddle2","RightHandMiddle3","RightHandRing1","RightHandRing2","RightHandRing3","RightHandPinky1","RightHandPinky2","RightHandPinky3") def ParseFromString(self,value): message_list=struct.unpack("415f",value) self.faces = message_list[:51] body_data = np.array(message_list[51:]).reshape(-1, 7) #joints num, 4+3 self.body_poses = [{'rotation': body_data[idx][:4], 'position': body_data[idx][4:]} for idx in range(len(self.body_pose_names))]
3,083
Python
42.436619
180
0.687642
Motionverse/MV-omniverse-extension/exts/motionverse.engine.coder/motionverse/engine/coder/scripts/ui.py
import omni.ui as ui import omni.kit.ui import omni.kit.app import omni.kit.window.filepicker import webbrowser from .styles import * from .constants import * # # UIController class # class UIController: def __init__(self, ext): self.ext = ext self.extension_path = omni.kit.app.get_app().get_extension_manager().get_extension_path(ext.ext_id) self._streaming_active = False self._window = ui.Window(WINDOW_NAME,width=600, height=260) self.build_ui() def build_ui(self): with self._window.frame: with ui.VStack(height=0): with ui.HStack(): #logo logo_path = f"{self.extension_path}{LOGO_FILEPATH}" ui.Image(logo_path, width=50,height=50,fill_policy=ui.FillPolicy.PRESERVE_ASPECT_FIT,alignment=ui.Alignment.CENTER) ui.Spacer() ui.Button( CS_GOTO_BTN_TEXT,width=ui.Percent(10), style=style_btn_goto_motionverse,alignment=ui.Alignment.RIGHT_CENTER, clicked_fn=self.launch_motionverse_website) with ui.HStack(): # green/red status with ui.VStack(width=50, alignment=ui.Alignment.TOP): self._status_circle = ui.Circle( radius = 8,size_policy=ui.CircleSizePolicy.FIXED, style=style_status_circle_red ) ui.Spacer() with ui.VStack(): # CaptureStream device selection drop-down with ui.HStack(): ui.Label( CS_HOSTNAME_TEXT, width=ui.Percent(20), alignment=ui.Alignment.RIGHT_CENTER ) ui.Spacer(width=CS_H_SPACING) with ui.VStack(width=ui.Percent(50)): ui.Spacer() self.source_ip_field = ui.StringField( model=ui.SimpleStringModel(DEFAULT_IP), height=0, visible=True ) ui.Spacer() ui.Label( CS_PORT_TEXT, width=ui.Percent(10), alignment=ui.Alignment.RIGHT_CENTER ) with ui.VStack(width=ui.Percent(10)): ui.Spacer() self.source_port_field = ui.StringField( model=ui.SimpleStringModel(DEFAULT_PORT), height=0, visible=True ) ui.Spacer() # skeleton selection with ui.HStack(): ui.Label( SKEL_SOURCE_EDIT_TEXT, width=ui.Percent(20), alignment=ui.Alignment.RIGHT_CENTER ) ui.Spacer(width=CS_H_SPACING) with ui.VStack(width=ui.Percent(50)): ui.Spacer() self._skeleton_to_drive_stringfield = ui.StringField( model=ui.SimpleStringModel(SKEL_INVALID_TEXT), height=0, enabled=False ) ui.Spacer() ui.Spacer(width=CS_H_SPACING) self._skel_select_button = ui.Button( SKEL_SOURCE_BTN_TEXT, width=0, clicked_fn=self.select_skeleton ) # rig selection with ui.HStack(): ui.Label(RIG_DROPDOWN_TEXT, width=ui.Percent(20), alignment=ui.Alignment.RIGHT_CENTER) ui.Spacer(width=CS_H_SPACING) with ui.VStack(width=ui.Percent(75)): ui.Spacer() self._selected_rig_label = ui.Label("") ui.Spacer() # start/stop stream buttons with ui.HStack(): ui.Spacer(width=ui.Percent(20)) self._start_button = ui.Button( CS_START_BTN_TEXT, width=0, clicked_fn=self.start_streaming, enabled=not self.streaming_active, style=style_btn_disabled if self.streaming_active else style_btn_enabled, ) ui.Spacer(width=CS_H_SPACING) self._stop_button = ui.Button( CS_STOP_BTN_TEXT, width=0, clicked_fn=self.stop_streaming, enabled=self.streaming_active, style=style_btn_enabled if self.streaming_active else style_btn_disabled, ) ui.Spacer(height=5) def shutdown(self): self._window.frame.clear() self._window = None def select_skeleton(self): paths = omni.usd.get_context().get_selection().get_selected_prim_paths() if paths: path = paths[0] try: self.ext.init_skeletons(path) except Exception as ex: self._skeleton_to_drive_stringfield.model.set_value(SKEL_INVALID_TEXT) self._selected_rig_label.text = self.ext.selected_rig_name or RIG_UNSUPPORTED_TEXT def launch_motionverse_website(self): webbrowser.open_new_tab(CS_URL) def update_ui(self): if self.streaming_active: self._start_button.enabled = False self._start_button.set_style(style_btn_disabled) self._stop_button.enabled = True self._stop_button.set_style(style_btn_enabled) else: self._start_button.enabled = self.ext.ready_to_stream self._start_button.set_style( style_btn_enabled if self.ext.ready_to_stream else style_btn_disabled ) self._stop_button.enabled = False self._stop_button.set_style(style_btn_disabled) if self.streaming_active: self._status_circle.set_style(style_status_circle_green) else: self._status_circle.set_style(style_status_circle_red) self._skeleton_to_drive_stringfield.model.set_value(self.ext.target_skeleton_path) def start_streaming(self): self.ext.connect() def stop_streaming(self): self.ext.disconnect("User cancelled") @property def streaming_active(self): return self._streaming_active @streaming_active.setter def streaming_active(self, value): self._streaming_active = value
7,135
Python
40.730994
177
0.476524
matiascodesal/omni-camera-reticle/exts/maticodes.viewport.reticle/maticodes/viewport/reticle/styles.py
from pathlib import Path import omni.ui as ui from omni.ui import color as cl CURRENT_PATH = Path(__file__).parent.absolute() ICON_PATH = CURRENT_PATH.parent.parent.parent.joinpath("icons") cl.action_safe_default = cl(1.0, 0.0, 0.0) cl.title_safe_default = cl(1.0, 1.0, 0.0) cl.custom_safe_default = cl(0.0, 1.0, 0.0) cl.letterbox_default = cl(0.0, 0.0, 0.0, 0.75) cl.comp_lines_default = cl(1.0, 1.0, 1.0, 0.6) safe_areas_group_style = { "Label:disabled": { "color": cl(1.0, 1.0, 1.0, 0.2) }, "FloatSlider:enabled": { "draw_mode": ui.SliderDrawMode.HANDLE, "background_color": cl(0.75, 0.75, 0.75, 1), "color": cl.black }, "FloatSlider:disabled": { "draw_mode": ui.SliderDrawMode.HANDLE, "background_color": cl(0.75, 0.75, 0.75, 0.2), "color": cl(0.0, 0.0, 1.0, 0.2) }, "CheckBox": { "background_color": cl(0.75, 0.75, 0.75, 1), "color": cl.black }, "Rectangle::ActionSwatch": { "background_color": cl.action_safe_default }, "Rectangle::TitleSwatch": { "background_color": cl.title_safe_default }, "Rectangle::CustomSwatch": { "background_color": cl.custom_safe_default } } comp_group_style = { "Button.Image::Off": { "image_url": str(ICON_PATH / "off.png") }, "Button.Image::Thirds": { "image_url": str(ICON_PATH / "thirds.png") }, "Button.Image::Quad": { "image_url": str(ICON_PATH / "quad.png") }, "Button.Image::Crosshair": { "image_url": str(ICON_PATH / "crosshair.png") }, "Button:checked": { "background_color": cl(1.0, 1.0, 1.0, 0.2) } }
1,688
Python
26.688524
63
0.560427
matiascodesal/omni-camera-reticle/exts/maticodes.viewport.reticle/maticodes/viewport/reticle/constants.py
"""Constants used by the CameraReticleExtension""" import enum class CompositionGuidelines(enum.IntEnum): """Enum representing all of the composition modes.""" OFF = 0 THIRDS = 1 QUAD = 2 CROSSHAIR = 3 DEFAULT_ACTION_SAFE_PERCENTAGE = 93 DEFAULT_TITLE_SAFE_PERCENTAGE = 90 DEFAULT_CUSTOM_SAFE_PERCENTAGE = 85 DEFAULT_LETTERBOX_RATIO = 2.35 DEFAULT_COMPOSITION_MODE = CompositionGuidelines.OFF SETTING_RESOLUTION_WIDTH = "/app/renderer/resolution/width" SETTING_RESOLUTION_HEIGHT = "/app/renderer/resolution/height" SETTING_RESOLUTION_FILL = "/app/runLoops/rendering_0/fillResolution"
609
Python
26.727272
68
0.760263
matiascodesal/omni-camera-reticle/exts/maticodes.viewport.reticle/maticodes/viewport/reticle/extension.py
import carb import omni.ext from omni.kit.viewport.utility import get_active_viewport_window from . import constants from .models import ReticleModel from .views import ReticleOverlay class CameraReticleExtension(omni.ext.IExt): def on_startup(self, ext_id): carb.log_info("[maticodes.viewport.reticle] CameraReticleExtension startup") # Reticle should ideally be used with "Fill Viewport" turned off. settings = carb.settings.get_settings() settings.set(constants.SETTING_RESOLUTION_FILL, False) viewport_window = get_active_viewport_window() if viewport_window is not None: reticle_model = ReticleModel() self.reticle = ReticleOverlay(reticle_model, viewport_window, ext_id) self.reticle.build_viewport_overlay() def on_shutdown(self): """ Executed when the extension is disabled.""" carb.log_info("[maticodes.viewport.reticle] CameraReticleExtension shutdown") self.reticle.destroy()
1,010
Python
33.862068
85
0.70495
matiascodesal/omni-camera-reticle/exts/maticodes.viewport.reticle/maticodes/viewport/reticle/__init__.py
from .extension import CameraReticleExtension
46
Python
22.499989
45
0.891304
matiascodesal/omni-camera-reticle/exts/maticodes.viewport.reticle/maticodes/viewport/reticle/models.py
"""Models used by the CameraReticleExtension""" import omni.ui as ui from . import constants class ReticleModel: """Model containing all of the data used by the ReticleOverlay and ReticleMenu The ReticleOverlay and ReticleMenu classes need to share the same data and stay in sync with updates from user input. This is achieve by passing the same ReticleModel object to both classes. """ def __init__(self): self.composition_mode = ui.SimpleIntModel(constants.DEFAULT_COMPOSITION_MODE) self.action_safe_enabled = ui.SimpleBoolModel(False) self.action_safe_percentage = ui.SimpleFloatModel(constants.DEFAULT_ACTION_SAFE_PERCENTAGE, min=0, max=100) self.title_safe_enabled = ui.SimpleBoolModel(False) self.title_safe_percentage = ui.SimpleFloatModel(constants.DEFAULT_TITLE_SAFE_PERCENTAGE, min=0, max=100) self.custom_safe_enabled = ui.SimpleBoolModel(False) self.custom_safe_percentage = ui.SimpleFloatModel(constants.DEFAULT_CUSTOM_SAFE_PERCENTAGE, min=0, max=100) self.letterbox_enabled = ui.SimpleBoolModel(False) self.letterbox_ratio = ui.SimpleFloatModel(constants.DEFAULT_LETTERBOX_RATIO, min=0.001) self._register_submodel_callbacks() self._callbacks = [] def _register_submodel_callbacks(self): """Register to listen to when any submodel values change.""" self.composition_mode.add_value_changed_fn(self._reticle_changed) self.action_safe_enabled.add_value_changed_fn(self._reticle_changed) self.action_safe_percentage.add_value_changed_fn(self._reticle_changed) self.title_safe_enabled.add_value_changed_fn(self._reticle_changed) self.title_safe_percentage.add_value_changed_fn(self._reticle_changed) self.custom_safe_enabled.add_value_changed_fn(self._reticle_changed) self.custom_safe_percentage.add_value_changed_fn(self._reticle_changed) self.letterbox_enabled.add_value_changed_fn(self._reticle_changed) self.letterbox_ratio.add_value_changed_fn(self._reticle_changed) def _reticle_changed(self, model): """Executes all registered callbacks of this model. Args: model (Any): The submodel that has changed. [Unused] """ for callback in self._callbacks: callback() def add_reticle_changed_fn(self, callback): """Add a callback to be executed whenever any ReticleModel submodel data changes. This is useful for rebuilding the overlay whenever any data changes. Args: callback (function): The function to call when the reticle model changes. """ self._callbacks.append(callback)
2,712
Python
44.98305
115
0.703171
matiascodesal/omni-camera-reticle/exts/maticodes.viewport.reticle/maticodes/viewport/reticle/views.py
from functools import partial import carb import omni.ui as ui from omni.ui import color as cl from omni.ui import scene from . import constants from .constants import CompositionGuidelines from .models import ReticleModel from . import styles class ReticleOverlay: """The reticle viewport overlay. Build the reticle graphics and ReticleMenu button on the given viewport window. """ _instances = [] def __init__(self, model: ReticleModel, vp_win: ui.Window, ext_id: str): """ReticleOverlay constructor Args: model (ReticleModel): The reticle model vp_win (Window): The viewport window to build the overlay on. ext_id (str): The extension id. """ self.model = model self.vp_win = vp_win self.ext_id = ext_id # Rebuild the overlay whenever the viewport window changes self.vp_win.set_height_changed_fn(self.on_window_changed) self.vp_win.set_width_changed_fn(self.on_window_changed) self._view_change_sub = None try: # VP2 resolution change sub self._view_change_sub = self.vp_win.viewport_api.subscribe_to_view_change(self.on_window_changed) except AttributeError: carb.log_info("Using Viewport Legacy: Reticle will not automatically update on resolution changes.") # Rebuild the overlay whenever the model changes self.model.add_reticle_changed_fn(self.build_viewport_overlay) ReticleOverlay._instances.append(self) resolution = self.vp_win.viewport_api.get_texture_resolution() self._aspect_ratio = resolution[0] / resolution[1] @classmethod def get_instances(cls): """Get all created instances of ReticleOverlay""" return cls._instances def __del__(self): self.destroy() def destroy(self): self._view_change_sub = None self.scene_view.scene.clear() self.scene_view = None self.reticle_menu.destroy() self.reticle_menu = None self.vp_win = None def on_window_changed(self, *args): """Update aspect ratio and rebuild overlay when viewport window changes.""" if self.vp_win is None: return settings = carb.settings.get_settings() if type(self.vp_win).__name__ == "LegacyViewportWindow": fill = settings.get(constants.SETTING_RESOLUTION_FILL) else: fill = self.vp_win.viewport_api.fill_frame if fill: width = self.vp_win.frame.computed_width + 8 height = self.vp_win.height else: width, height = self.vp_win.viewport_api.resolution self._aspect_ratio = width / height self.build_viewport_overlay() def get_aspect_ratio_flip_threshold(self): """Get magic number for aspect ratio policy. Aspect ratio policy doesn't seem to swap exactly when window_aspect_ratio == window_texture_aspect_ratio. This is a hack that approximates where the policy changes. """ return self.get_aspect_ratio() - self.get_aspect_ratio() * 0.05 def build_viewport_overlay(self, *args): """Build all viewport graphics and ReticleMenu button.""" if self.vp_win is not None: with self.vp_win.get_frame(self.ext_id): with ui.ZStack(): # Set the aspect ratio policy depending if the viewport is wider than it is taller or vice versa. if self.vp_win.width / self.vp_win.height > self.get_aspect_ratio_flip_threshold(): self.scene_view = scene.SceneView(aspect_ratio_policy=scene.AspectRatioPolicy.PRESERVE_ASPECT_VERTICAL) else: self.scene_view = scene.SceneView(aspect_ratio_policy=scene.AspectRatioPolicy.PRESERVE_ASPECT_HORIZONTAL) # Build all the scene view guidelines with self.scene_view.scene: if self.model.composition_mode.as_int == CompositionGuidelines.THIRDS: self._build_thirds() elif self.model.composition_mode.as_int == CompositionGuidelines.QUAD: self._build_quad() elif self.model.composition_mode.as_int == CompositionGuidelines.CROSSHAIR: self._build_crosshair() if self.model.action_safe_enabled.as_bool: self._build_safe_rect(self.model.action_safe_percentage.as_float / 100.0, color=cl.action_safe_default) if self.model.title_safe_enabled.as_bool: self._build_safe_rect(self.model.title_safe_percentage.as_float / 100.0, color=cl.title_safe_default) if self.model.custom_safe_enabled.as_bool: self._build_safe_rect(self.model.custom_safe_percentage.as_float / 100.0, color=cl.custom_safe_default) if self.model.letterbox_enabled.as_bool: self._build_letterbox() # Build ReticleMenu button with ui.VStack(): ui.Spacer() with ui.HStack(height=0): ui.Spacer() self.reticle_menu = ReticleMenu(self.model) def _build_thirds(self): """Build the scene ui graphics for the Thirds composition mode.""" aspect_ratio = self.get_aspect_ratio() line_color = cl.comp_lines_default inverse_ratio = 1 / aspect_ratio if self.scene_view.aspect_ratio_policy == scene.AspectRatioPolicy.PRESERVE_ASPECT_VERTICAL: scene.Line([-0.333 * aspect_ratio, -1, 0], [-0.333 * aspect_ratio, 1, 0], color=line_color) scene.Line([0.333 * aspect_ratio, -1, 0], [0.333 * aspect_ratio, 1, 0], color=line_color) scene.Line([-aspect_ratio, -0.333, 0], [aspect_ratio, -0.333, 0], color=line_color) scene.Line([-aspect_ratio, 0.333, 0], [aspect_ratio, 0.333, 0], color=line_color) else: scene.Line([-1, -0.333 * inverse_ratio, 0], [1, -0.333 * inverse_ratio, 0], color=line_color) scene.Line([-1, 0.333 * inverse_ratio, 0], [1, 0.333 * inverse_ratio, 0], color=line_color) scene.Line([-0.333, -inverse_ratio, 0], [-0.333, inverse_ratio, 0], color=line_color) scene.Line([0.333, -inverse_ratio, 0], [0.333, inverse_ratio, 0], color=line_color) def _build_quad(self): """Build the scene ui graphics for the Quad composition mode.""" aspect_ratio = self.get_aspect_ratio() line_color = cl.comp_lines_default inverse_ratio = 1 / aspect_ratio if self.scene_view.aspect_ratio_policy == scene.AspectRatioPolicy.PRESERVE_ASPECT_VERTICAL: scene.Line([0, -1, 0], [0, 1, 0], color=line_color) scene.Line([-aspect_ratio, 0, 0], [aspect_ratio, 0, 0], color=line_color) else: scene.Line([0, -inverse_ratio, 0], [0, inverse_ratio, 0], color=line_color) scene.Line([-1, 0, 0], [1, 0, 0], color=line_color) def _build_crosshair(self): """Build the scene ui graphics for the Crosshair composition mode.""" aspect_ratio = self.get_aspect_ratio() line_color = cl.comp_lines_default if self.scene_view.aspect_ratio_policy == scene.AspectRatioPolicy.PRESERVE_ASPECT_VERTICAL: scene.Line([0, 0.05 * aspect_ratio, 0], [0, 0.1 * aspect_ratio, 0], color=line_color) scene.Line([0, -0.05 * aspect_ratio, 0], [0, -0.1 * aspect_ratio, 0], color=line_color) scene.Line([0.05 * aspect_ratio, 0, 0], [0.1 * aspect_ratio, 0, 0], color=line_color) scene.Line([-0.05 * aspect_ratio, 0, 0], [-0.1 * aspect_ratio, 0, 0], color=line_color) else: scene.Line([0, 0.05 * 1, 0], [0, 0.1 * 1, 0], color=line_color) scene.Line([0, -0.05 * 1, 0], [0, -0.1 * 1, 0], color=line_color) scene.Line([0.05 * 1, 0, 0], [0.1 * 1, 0, 0], color=line_color) scene.Line([-0.05 * 1, 0, 0], [-0.1 * 1, 0, 0], color=line_color) scene.Points([[0.00005, 0, 0]], sizes=[2], colors=[line_color]) def _build_safe_rect(self, percentage, color): """Build the scene ui graphics for the safe area rectangle Args: percentage (float): The 0-1 percentage the render target that the rectangle should fill. color: The color to draw the rectangle wireframe with. """ aspect_ratio = self.get_aspect_ratio() inverse_ratio = 1 / aspect_ratio if self.scene_view.aspect_ratio_policy == scene.AspectRatioPolicy.PRESERVE_ASPECT_VERTICAL: scene.Rectangle(aspect_ratio*2*percentage, 1*2*percentage, thickness=1, wireframe=True, color=color) else: scene.Rectangle(1*2*percentage, inverse_ratio*2*percentage, thickness=1, wireframe=True, color=color) def _build_letterbox(self): """Build the scene ui graphics for the letterbox.""" aspect_ratio = self.get_aspect_ratio() letterbox_color = cl.letterbox_default letterbox_ratio = self.model.letterbox_ratio.as_float def build_letterbox_helper(width, height, x_offset, y_offset): move = scene.Matrix44.get_translation_matrix(x_offset, y_offset, 0) with scene.Transform(transform=move): scene.Rectangle(width * 2, height * 2, thickness=0, wireframe=False, color=letterbox_color) move = scene.Matrix44.get_translation_matrix(-x_offset, -y_offset, 0) with scene.Transform(transform=move): scene.Rectangle(width * 2, height * 2, thickness=0, wireframe=False, color=letterbox_color) if self.scene_view.aspect_ratio_policy == scene.AspectRatioPolicy.PRESERVE_ASPECT_VERTICAL: if letterbox_ratio >= aspect_ratio: height = 1 - aspect_ratio / letterbox_ratio rect_height = height / 2 rect_offset = 1 - rect_height build_letterbox_helper(aspect_ratio, rect_height, 0, rect_offset) else: width = aspect_ratio - letterbox_ratio rect_width = width / 2 rect_offset = aspect_ratio - rect_width build_letterbox_helper(rect_width, 1, rect_offset, 0) else: inverse_ratio = 1 / aspect_ratio if letterbox_ratio >= aspect_ratio: height = inverse_ratio - 1 / letterbox_ratio rect_height = height / 2 rect_offset = inverse_ratio - rect_height build_letterbox_helper(1, rect_height, 0, rect_offset) else: width = (aspect_ratio - letterbox_ratio) * inverse_ratio rect_width = width / 2 rect_offset = 1 - rect_width build_letterbox_helper(rect_width, inverse_ratio, rect_offset, 0) def get_aspect_ratio(self): """Get the aspect ratio of the viewport. Returns: float: The viewport aspect ratio. """ return self._aspect_ratio class ReticleMenu: """The popup reticle menu""" def __init__(self, model: ReticleModel): """ReticleMenu constructor Stores the model and builds the Reticle button. Args: model (ReticleModel): The reticle model """ self.model = model self.button = ui.Button("Reticle", width=0, height=0, mouse_pressed_fn=self.show_reticle_menu, style={"margin": 10, "padding": 5, "color": cl.white}) self.reticle_menu = None def destroy(self): self.button.destroy() self.button = None self.reticle_menu = None def on_group_check_changed(self, safe_area_group, model): """Enables/disables safe area groups When a safe area checkbox state changes, all the widgets of the respective group should be enabled/disabled. Args: safe_area_group (HStack): The safe area group to enable/disable model (SimpleBoolModel): The safe group checkbox model. """ safe_area_group.enabled = model.as_bool def on_composition_mode_changed(self, guideline_type): """Sets the selected composition mode. When a composition button is clicked, it should be checked on and the other buttons should be checked off. Sets the composition mode on the ReticleModel too. Args: guideline_type (_type_): _description_ """ self.model.composition_mode.set_value(guideline_type) self.comp_off_button.checked = guideline_type == CompositionGuidelines.OFF self.comp_thirds_button.checked = guideline_type == CompositionGuidelines.THIRDS self.comp_quad_button.checked = guideline_type == CompositionGuidelines.QUAD self.comp_crosshair_button.checked = guideline_type == CompositionGuidelines.CROSSHAIR def show_reticle_menu(self, x, y, button, modifier): """Build and show the reticle menu popup.""" self.reticle_menu = ui.Menu("Reticle", width=400, height=200) self.reticle_menu.clear() with self.reticle_menu: with ui.Frame(width=0, height=100): with ui.HStack(): with ui.VStack(): ui.Label("Composition", alignment=ui.Alignment.LEFT, height=30) with ui.VGrid(style=styles.comp_group_style, width=150, height=0, column_count=2, row_height=75): current_comp_mode = self.model.composition_mode.as_int with ui.HStack(): off_checked = current_comp_mode == CompositionGuidelines.OFF callback = partial(self.on_composition_mode_changed, CompositionGuidelines.OFF) self.comp_off_button = ui.Button("Off", name="Off", checked=off_checked, width=70, height=70, clicked_fn=callback) with ui.HStack(): thirds_checked = current_comp_mode == CompositionGuidelines.THIRDS callback = partial(self.on_composition_mode_changed, CompositionGuidelines.THIRDS) self.comp_thirds_button = ui.Button("Thirds", name="Thirds", checked=thirds_checked, width=70, height=70, clicked_fn=callback) with ui.HStack(): quad_checked = current_comp_mode == CompositionGuidelines.QUAD callback = partial(self.on_composition_mode_changed, CompositionGuidelines.QUAD) self.comp_quad_button = ui.Button("Quad", name="Quad", checked=quad_checked, width=70, height=70, clicked_fn=callback) with ui.HStack(): crosshair_checked = current_comp_mode == CompositionGuidelines.CROSSHAIR callback = partial(self.on_composition_mode_changed, CompositionGuidelines.CROSSHAIR) self.comp_crosshair_button = ui.Button("Crosshair", name="Crosshair", checked=crosshair_checked, width=70, height=70, clicked_fn=callback) ui.Spacer(width=10) with ui.VStack(style=styles.safe_areas_group_style): ui.Label("Safe Areas", alignment=ui.Alignment.LEFT, height=30) with ui.HStack(width=0): ui.Spacer(width=20) cb = ui.CheckBox(model=self.model.action_safe_enabled) action_safe_group = ui.HStack(enabled=self.model.action_safe_enabled.as_bool) callback = partial(self.on_group_check_changed, action_safe_group) cb.model.add_value_changed_fn(callback) with action_safe_group: ui.Spacer(width=10) ui.Label("Action Safe", alignment=ui.Alignment.TOP) ui.Spacer(width=14) with ui.VStack(): ui.FloatSlider(self.model.action_safe_percentage, width=100, format="%.0f%%", min=0, max=100, step=1) ui.Rectangle(name="ActionSwatch", height=5) ui.Spacer() with ui.HStack(width=0): ui.Spacer(width=20) cb = ui.CheckBox(model=self.model.title_safe_enabled) title_safe_group = ui.HStack(enabled=self.model.title_safe_enabled.as_bool) callback = partial(self.on_group_check_changed, title_safe_group) cb.model.add_value_changed_fn(callback) with title_safe_group: ui.Spacer(width=10) ui.Label("Title Safe", alignment=ui.Alignment.TOP) ui.Spacer(width=25) with ui.VStack(): ui.FloatSlider(self.model.title_safe_percentage, width=100, format="%.0f%%", min=0, max=100, step=1) ui.Rectangle(name="TitleSwatch", height=5) ui.Spacer() with ui.HStack(width=0): ui.Spacer(width=20) cb = ui.CheckBox(model=self.model.custom_safe_enabled) custom_safe_group = ui.HStack(enabled=self.model.custom_safe_enabled.as_bool) callback = partial(self.on_group_check_changed, custom_safe_group) cb.model.add_value_changed_fn(callback) with custom_safe_group: ui.Spacer(width=10) ui.Label("Custom Safe", alignment=ui.Alignment.TOP) ui.Spacer(width=5) with ui.VStack(): ui.FloatSlider(self.model.custom_safe_percentage, width=100, format="%.0f%%", min=0, max=100, step=1) ui.Rectangle(name="CustomSwatch", height=5) ui.Spacer() ui.Label("Letterbox", alignment=ui.Alignment.LEFT, height=30) with ui.HStack(width=0): ui.Spacer(width=20) cb = ui.CheckBox(model=self.model.letterbox_enabled) letterbox_group = ui.HStack(enabled=self.model.letterbox_enabled.as_bool) callback = partial(self.on_group_check_changed, letterbox_group) cb.model.add_value_changed_fn(callback) with letterbox_group: ui.Spacer(width=10) ui.Label("Letterbox Ratio", alignment=ui.Alignment.TOP) ui.Spacer(width=5) ui.FloatDrag(self.model.letterbox_ratio, width=35, min=0.001, step=0.01) self.reticle_menu.show_at(x - self.reticle_menu.width, y - self.reticle_menu.height)
20,348
Python
52.691293
129
0.543886
matiascodesal/omni-camera-reticle/exts/maticodes.viewport.reticle/maticodes/viewport/reticle/tests/reticle_tests.py
""" Tests Module TODO: * Write actual tests. """ from pathlib import Path from typing import Optional import carb import omni.kit import omni.ui as ui from omni.ui.tests.test_base import OmniUiTest from maticodes.viewport.reticle.extension import CameraReticleExtension CURRENT_PATH = Path(__file__).parent.joinpath("../../../../data") class TestReticle(OmniUiTest): # Before running each test async def setUp(self): await super().setUp() self._golden_img_dir = CURRENT_PATH.absolute().resolve().joinpath("tests") self._all_widgets = [] self._settings = carb.settings.get_settings() self._original_value = self._settings.get_as_int("/persistent/app/viewport/displayOptions") self._settings.set_int("/persistent/app/viewport/displayOptions", 0) # Create test area await self.create_test_area(256, 256) window_flags = ui.WINDOW_FLAGS_NO_SCROLLBAR | ui.WINDOW_FLAGS_NO_TITLE_BAR | ui.WINDOW_FLAGS_NO_RESIZE self._test_window = ui.Window( "Viewport", dockPreference=ui.DockPreference.DISABLED, flags=window_flags, width=256, height=256, position_x=0, position_y=0, ) # Override default background self._test_window.frame.set_style({"Window": {"background_color": 0xFF000000, "border_color": 0x0, "border_radius": 0}}) await omni.kit.app.get_app().next_update_async() await omni.kit.app.get_app().next_update_async() # After running each test async def tearDown(self): self._golden_img_dir = None self._test_window = None self._settings.set_int("/persistent/app/viewport/displayOptions", self._original_value) await super().tearDown() async def test_reticle_menu_button(self): await self.finalize_test(golden_img_dir=self._golden_img_dir, golden_img_name="test_reticle_menu_button.png")
1,962
Python
32.844827
128
0.649847
matiascodesal/omni-camera-reticle/exts/maticodes.viewport.reticle/maticodes/viewport/reticle/tests/__init__.py
from maticodes.viewport.reticle.tests.reticle_tests import TestReticle
70
Python
69.99993
70
0.885714
heavyai/omni-component/exts/heavyai.ui.component/heavyai/ui/component/extension.py
from __future__ import annotations import asyncio import contextlib from typing import Dict, Optional, TYPE_CHECKING import omni.kit.app if TYPE_CHECKING: import omni.ui as ui class Component: """ The base class that UI elements should be subclassed from Attributes ---------- name : Optional[str] The name of the root container. style : Optional[Dict] The local style of the root container. height : Optional[int] The height of the root container. width : Optional[int] The width of the root container style_type_name_override : Optional[str] By default, we use typeName to look up the style. But sometimes it's necessary to use a custom name. For example, when a widget as a part of another widget.(Label is a part of Button) This property can override the name to use in style. """ style: Optional[Dict] = None height: Optional[int] = None width: Optional[int] = None name: Optional[str] = None style_type_name_override: Optional[str] = None def __init__(self, render_on_init=True, **kwargs): """ Parameters ---------- render_on_init : bool If the render method should be called upon component creation """ # ui.Container is ui.VStack/HStack/ZStack/etc self._root: ui.Container = None self._debounce_task: asyncio.Future = None props = self.get_props() # grab declared component props for k, v in kwargs.items(): try: assert k in props # ensure the prop has been declared setattr(self, k, v) # set props except AssertionError: raise AssertionError(f"Prop '{k}' must be annotated") from None # in rare situations you may need to choose when the component initially renders if render_on_init: self.render() @classmethod def get_props(cls): d = {} for c in cls.mro(): try: d.update(**c.__annotations__) except AttributeError: pass return d @property def visible(self): if self._root: return self._root.visible return False @visible.setter def visible(self, new_visible): if not self._root: raise Exception("Component has not been rendered") from None self._root.visible = new_visible @property def enabled(self): if self._root: return self._root.enabled @enabled.setter def enabled(self, value): if self._root: self._root.enabled = value def get_root(self, Container: ui.Container, default_visible=True, **kwargs): """ Creates and returns a new container upon initial call. Clears the container and returns reference upon subsequent calls. This allows a component to be re-rendered without losing its positioning """ if self._root: self._root.clear() else: if self.height is not None: kwargs.update(height=self.height) if self.width is not None: kwargs.update(width=self.width) if self.style is not None: kwargs.update(style=self.style) if self.name is not None: kwargs.update(name=self.name) if self.style_type_name_override is not None: kwargs.update(style_type_name_override=self.style_type_name_override) self._root = Container(**kwargs) self._root.visible = default_visible return self._root async def render_async(self): """Waits for next frame before re-rendering""" await omni.kit.app.get_app().next_update_async() self.render() def update(self, loop=asyncio.get_event_loop()): """Used to re-render the component""" asyncio.ensure_future(self.render_async(), loop=loop) def update_debounce(self, delay=0.2): """ Queues re-render after a delay and resets the timer on subsequent calls if timer has not completed """ async def run_after_delay(): await asyncio.sleep(delay) await self.render_async() with contextlib.suppress(Exception): self._debounce_task.cancel() self._debounce_task = asyncio.ensure_future(run_after_delay()) def render(self): raise NotImplementedError() def __del__(self): """ Note: `__del__` is not reliably called when parent component is destroyed or re-rendered If a component requires clean-up (such as subscriptions, windows, frames, or event listeners), the parent component/class must manually call destroy when appropriate. """ self.destroy() def destroy(self): """ If a component requires clean-up (such as subscriptions, windows, frames, or event listeners), the parent component/class must manually call destroy when appropriate. """ pass
5,135
Python
31.506329
106
0.601558
syntway/model_exploder/exts/syntway.model_exploder/syntway/model_exploder/style.py
from pathlib import Path import omni.ui as ui from omni.ui import color as cl from .libs.ui_utils import UiPal THIS_FOLDER_PATH = Path(__file__).parent.absolute() EXT_ROOT_FOLDER_PATH = THIS_FOLDER_PATH.parent.parent ICONS_PATH = EXT_ROOT_FOLDER_PATH.joinpath("data").joinpath("icons") # window frame cascade WINDOW_FRAME = { "ComboBox": { "border_radius": 6, "margin": 0, }, "ComboBox:disabled": { "color": UiPal.TEXT_DISABLED, }, "Slider": { "draw_mode": ui.SliderDrawMode.HANDLE, "color": UiPal.TRANSP_NOT_0, "border_radius": 6, }, "Slider:disabled": { "secondary_color": UiPal.TEXT_DISABLED, }, "CheckBox:disabled": { "background_color": UiPal.TEXT_DISABLED, }, "Button.Label:disabled": { "color": UiPal.TEXT_DISABLED, }, "Button.Label::ever_bright": { "color": cl.white, }, "Button.Label::ever_bright:disabled": { "color": cl.white, }, "Image::info": { "image_url": str(ICONS_PATH / "info.svg"), "color": UiPal.TEXT, }, "Image::info:hovered": { "image_url": str(ICONS_PATH / "info.svg"), "color": cl.white, }, "Line": { "color": UiPal.TEXT_DISABLED }, "CollapsableFrame": { "border_radius": 4, }, }
1,358
Python
18.985294
68
0.552283
syntway/model_exploder/exts/syntway.model_exploder/syntway/model_exploder/extension.py
from functools import partial import asyncio import omni.ext import omni.ui as ui import omni.kit.commands import carb.settings from .libs.app_utils import call_on_parts_ready, call_after_update, get_setting_or from .window import Window from .engine import Engine from . import const class Extension(omni.ext.IExt): def on_startup(self, ext_id): # print("ext.on_startup", ext_id) self._window = None self._ext_id = ext_id def build(): ui.Workspace.set_show_window_fn(const.WINDOW_NAME, partial(self.show_window, None)) # carb.settings.get_settings().set("persistent/exts/syntway.model_exploder/windowShowOnStartup", True) show = get_setting_or(const.SETTINGS_PATH + "windowShowOnStartup", False) ed_menu = omni.kit.ui.get_editor_menu() if ed_menu: self._menu = ed_menu.add_item(const.MENU_PATH, self.show_window, toggle=True, value=show) if show: self.show_window(None, True) # ui.Workspace.show_window(WINDOW_NAME) call_on_parts_ready(build, 1) # stage ready def on_shutdown(self): # print("ext.on_shutdown") ui.Workspace.set_show_window_fn(const.WINDOW_NAME, None) ed_menu = omni.kit.ui.get_editor_menu() if ed_menu and omni.kit.ui.editor_menu.EditorMenu.has_item(const.MENU_PATH): ed_menu.remove_item(const.MENU_PATH) self._menu = None if self._window: self._window.destroy(True) self._window = None def show_window(self, menu, value): # print("ext.show_window", value, self._window) if value: # show if self._window is None: self._window = Window(const.WINDOW_NAME, self._ext_id) self._window.set_visibility_changed_fn(self._visibility_changed_fn) else: self._window.show() elif self._window: self._window.visible = False # will destroy in _visibility_changed_fn def _set_menu(self, value): # print("ext._set_menu", value) ed_menu = omni.kit.ui.get_editor_menu() if ed_menu: ed_menu.set_value(const.MENU_PATH, value) def _visibility_changed_fn(self, visible): # print("ext._visibility_changed_fn", visible) self._set_menu(visible) if not visible: # destroy window def destroy_window(): # print("ext.destroy_window", self._window) if self._window: self._window.destroy(False) self._window = None call_after_update(destroy_window) class ExplodeEngineApplyCommand(omni.kit.commands.Command): """ Undo/redoable command used by engine to apply final and initial position lists Don't use outside this extension. States are a tuple of (dist, change_list, time_code) """ def __init__(self, initial_state, final_state, stage): super().__init__() self._initial_state = initial_state self._final_state = final_state self._stage = stage def do(self): Engine.apply_state(self._final_state, self._stage, None) def undo(self): Engine.apply_state(self._initial_state, self._stage, None) omni.kit.commands.register_all_commands_in_module(__name__)
3,380
Python
26.266129
114
0.606805
syntway/model_exploder/exts/syntway.model_exploder/syntway/model_exploder/engine.py
import asyncio, copy import carb import omni.ext import omni.ui as ui from omni.ui import scene as sc from omni.ui import color as cl import omni.kit.commands import omni.usd import omni.timeline from pxr import Usd, UsdGeom, UsdSkel, Sdf, Tf import pxr.Gf as Gf import omni.kit.notification_manager as nm from omni.usd.commands import TransformPrimCommand, TransformPrimSRTCommand from .libs.usd_helper import UsdHelper from .libs.usd_utils import (set_prim_translation, set_prim_translation_fast, set_prim_transform, get_prim_transform, get_prim_translation, create_edit_context) from .libs.viewport_helper import ViewportHelper from .libs.app_helper import AppHelper from .libs.app_utils import get_setting_or, set_setting, call_after_update from . import const APPLY_ASYNC = True class Engine(): def __init__(self): self.meshes_base_aabb = Gf.Range3d() self._meshes = [] self._dist = 0 self._center_mode = get_setting_or(const.SETTINGS_PATH + const.CENTER_MODE_SETTING, const.DEFAULT_CENTER_MODE) self._dist_mult = get_setting_or(const.SETTINGS_PATH + const.DIST_MULT_SETTING, const.DEFAULT_DIST_MULT) self._order_accel = get_setting_or(const.SETTINGS_PATH + const.ACCEL_SETTING, const.ACCEL_DEFAULT) self._explo_center = Gf.Vec3d(0) self._last_explo_center = Gf.Vec3d(0) self._apply_needed = False self._apply_task = None self._recalc_changed_needed = set() self._ignore_next_objects_changed = 0 # 0=no, 1=only next, 2:all until reset self._dist_base_size = 100 self.usd = UsdHelper() self._app = AppHelper() self._app.add_update_event_fn(self._on_update) stream = omni.timeline.get_timeline_interface().get_timeline_event_stream() self._timeline_sub = stream.create_subscription_to_pop(self._on_timeline_event) def destroy(self): self._apply_cancel() self._timeline_sub = None self._recalc_changed_needed.clear() if self.usd: self.usd.remove_stage_objects_changed_fn(self._on_stage_objects_changed) self.usd.detach() self.usd = None if self._app: self._app.detach() self._app = None Engine._instance = None def reset(self, set_to_initial): self._apply_cancel() if set_to_initial and self.dist > 0: # if dist is 0, nothing to do self._apply(-2, self._explo_center, self._meshes) # returns prims to initial's self._meshes.clear() self._dist = 0 self.usd.remove_stage_objects_changed_fn(self._on_stage_objects_changed) def _on_update(self, _): if self._recalc_changed_needed: self._recalc_changed(self._recalc_changed_needed) self._recalc_changed_needed.clear() if self._apply_needed: if APPLY_ASYNC: if not self._apply_task or self._apply_task.done(): self._apply_needed = False dist = self._dist explo_center = Gf.Vec3d(self._explo_center) meshes = copy.copy(self._meshes) self._apply_task = asyncio.ensure_future(self._async_apply(dist, explo_center, meshes)) # else still applying last else: self._apply_needed = False self._apply(-1, self._explo_center, self._meshes) # returns prims to initial's def _on_stage_objects_changed(self, notice): if self._ignore_next_objects_changed: if self._ignore_next_objects_changed == 1: self._ignore_next_objects_changed = 0 return if not self._meshes: # should never happen? return # set filters out duplicate path property changes changed_paths = set(Sdf.Path.GetAbsoluteRootOrPrimPath(i) for i in notice.GetChangedInfoOnlyPaths()) # print("_on_stage_objects_changed", changed_paths) for n in changed_paths: ch_path = n.GetPrimPath().pathString # avoid camera changes if ch_path.startswith("/OmniverseKit_") or ch_path.endswith("/animationData"): continue for p in self._meshes: path = p["path"] if path.startswith(ch_path): self._recalc_changed_needed.add(path) def _on_timeline_event(self, e): # print("engine:_on_timeline_event", e.type) if self.has_meshes: if e.type == int(omni.timeline.TimelineEventType.CURRENT_TIME_CHANGED): self._ignore_next_objects_changed = 1 AVOID_CHILDREN_PRIM_TYPES = ["Camera"] # avoid recursion on these @staticmethod def _traverse_add_prim(list, prim): """Recursively traverse the hierarchy""" if not prim.IsValid(): # might not exist anymore return prim_t = prim.GetTypeName() if prim.HasAuthoredReferences(): # refs: check if any children ref_list = [] children = prim.GetChildren() for c in children: Engine._traverse_add_prim(ref_list, c) if ref_list: # add children but not itself list += ref_list else: # no children, add itself list.append(prim) return if prim.IsA(UsdGeom.PointInstancer) or prim.IsA(UsdSkel.Root): # instance, SkelRoot: add but don't recurse inside list.append(prim) return if prim.IsA(UsdGeom.Gprim): list.append(prim) if not prim_t in Engine.AVOID_CHILDREN_PRIM_TYPES: children = prim.GetChildren() for c in children: Engine._traverse_add_prim(list, c) def _sel_get_prim_paths_parent_first_order(self, paths): stage = self.usd.stage prims = [] for path in paths: prim = stage.GetPrimAtPath(path) prims.append(prim) u_prims = [] for p in prims: Engine._traverse_add_prim(u_prims, p) return u_prims def sel_capture(self, paths=None): # print("sel_capture") if paths is None: paths = self.usd.get_selected_prim_paths() # print("_sel_capture", paths) u_prims = self._sel_get_prim_paths_parent_first_order(paths) self._meshes = [] self._dist = 0 if len(u_prims) < 2: return False time_code = self.usd.timecode xform_cache = UsdGeom.XformCache(time_code) bbox_cache = UsdGeom.BBoxCache(time_code, [UsdGeom.Tokens.default_]) self._explo_center = Gf.Vec3d(0) # average of prim centroids aa_bounds = Gf.Range3d() # world positions for prim in u_prims: path = prim.GetPath().pathString lbb = bbox_cache.ComputeLocalBound(prim) lcent = lbb.ComputeCentroid() ltrans = get_prim_translation(prim, time_code) ldelta = ltrans - lcent # translation from centroid to the placing pos wbb = bbox_cache.ComputeWorldBound(prim) wbb_aa = wbb.ComputeAlignedRange() aa_bounds.UnionWith(wbb_aa) wtrans = wbb.ComputeCentroid() lmat = get_prim_transform(prim, False, xform_cache, time_code) # print(path, "local", lbb, lcent, ltrans, "world", wbb, wbb_aa, wtrans, lmat) # prim, prim_path, untransformed/local mid, world_mid, initial_local_translation entry = {"prim": prim, "path": path, "ini_wtrans": wtrans, "ldelta": ldelta, "ini_lmat": lmat} self._meshes.append(entry) # print(entry) self._explo_center += wtrans # centroid and base AA bounds self._explo_center /= len(u_prims) self._last_explo_center = self._explo_center self.meshes_base_aabb = aa_bounds # _dist_base_size size scale size = aa_bounds.GetSize() self._dist_base_size = max(size[0], size[1], size[2]) * 0.5 self._calc_dist_order() # print(time_code, self._explo_center, self._dist_base_size) self._ignore_next_objects_changed = 0 self.usd.add_stage_objects_changed_fn(self._on_stage_objects_changed) # print("sel_capture end") return True def _recalc_changed(self, ch_paths): time_code = self.usd.timecode bbox_cache = UsdGeom.BBoxCache(time_code, [UsdGeom.Tokens.default_]) dist = self._dist dist = self._calc_dist(dist) for p in self._meshes: path = p["path"] if path in ch_paths: # only if changed prim = p["prim"] lbb = bbox_cache.ComputeLocalBound(prim) lcent = lbb.ComputeCentroid() ltrans = get_prim_translation(prim, time_code) ldelta = ltrans - lcent wbb = bbox_cache.ComputeWorldBound(prim) new_wtrans = wbb.ComputeCentroid() # calc dir w_dir = new_wtrans - self._explo_center w_dir = self._calc_normalized_dir(w_dir) new_ini_wtrans = new_wtrans - w_dir * dist p["ini_wtrans"] = new_ini_wtrans p["ldelta"] = ldelta # print("changed", path, new_wtrans, ldelta) # not needed and conflicts with translate manipulator's dragging: self.apply_asap() self._calc_dist_order() def apply_asap(self): self._apply_needed = True def _apply_cancel(self): if APPLY_ASYNC: if self._apply_task: if self._apply_task.done(): return self._apply_task.cancel() async def _async_apply(self, dist_value, explo_center, meshes): self._apply(dist_value, explo_center, meshes) self._apply_task = None def _apply(self, dist, explo_center, meshes): """dist: -2: reset to stored initial pos, -1: use current self._dist, >=0: 0..1""" if not meshes: return # print("_apply", dist) time_code = self.usd.timecode changes = self._prepare_apply_state(dist, explo_center, meshes, time_code, True) is_reset = dist == -2 state = (is_reset, changes, time_code) Engine.apply_state(state, self.usd.stage, self) # print("_apply end") def _prepare_apply_state(self, dist, explo_center, meshes, time_code, with_prims): """dist: -2: reset to stored initial pos, -1: use current self._dist, >=0: 0..1""" if dist == -1: dist = self._dist # dist can now be [0..1] or -2 for reset to initial if dist >= 0: dist_factor = self._calc_dist(dist) else: dist_factor = dist time_code = self.usd.timecode xform_cache = UsdGeom.XformCache(time_code) changes = [] for mp in meshes: prim = mp["prim"] if not prim.IsValid(): # avoid any invalidated prims, deleted for example # print("skipping", prim) continue path = mp["path"] ini_wtrans = mp["ini_wtrans"] ldelta = mp["ldelta"] prim = mp["prim"] dist_order = mp["dist_order"] if dist_factor >= 0: # calc world pos # calc dir w_ini_vec = ini_wtrans - explo_center w_ini_len = w_ini_vec.GetLength() w_ini_len = max(w_ini_len, 1e-5) w_dir = self._calc_normalized_dir(w_ini_vec) order_factor = 1.0 + dist_order * self._order_accel w_vec = w_dir * dist_factor * order_factor dest_w_trans = ini_wtrans + w_vec # get local->parent->world transforms p2w = xform_cache.GetParentToWorldTransform(prim) # transform back from world to local coords w2p = p2w.GetInverse() dest_ptrans = w2p.Transform(dest_w_trans) # calc delta in mesh local/untransformed space dest_ltrans = dest_ptrans + ldelta # local trans, in parent space coords ltrans = (dest_ltrans[0], dest_ltrans[1], dest_ltrans[2]) #print(prim, dest_w_trans, ltrans) else: ltrans = mp["ini_lmat"] if with_prims: changes.append((prim, path, ltrans)) else: changes.append((None, path, ltrans)) return changes @staticmethod def apply_state(state, stage, instance): # print("apply_state", state, instance) is_reset, changes, time_code = state if instance: instance._ignore_next_objects_changed = 2 if not is_reset: """ Slower alternative: for ch in changes: prim, path, ltrans = ch # print(path,ltrans, type(ltrans)) cmd = TransformPrimSRTCommand(path=path, new_translation=ltrans, time_code=time_code) cmd.do() """ stage = stage sdf_change_block = 2 with Sdf.ChangeBlock(): for ch in changes: prim, path, lmat = ch if prim is None: prim = stage.GetPrimAtPath(path) # print(prim, ltrans) with create_edit_context(path, stage): set_prim_translation(prim, lmat, sdf_change_block=sdf_change_block, time_code=time_code) #set_prim_translation_fast(prim, lmat, sdf_change_block=sdf_change_block, time_code=time_code) else: for ch in changes: prim, path, ltrans = ch # print(path,ltrans, type(ltrans)) cmd = TransformPrimCommand(path=path, new_transform_matrix=ltrans, time_code=time_code) cmd.do() if instance: instance._ignore_next_objects_changed = 0 # print("apply_state end") def commit(self): time_code = self.usd.timecode dist = -2 changes = self._prepare_apply_state(dist, self._explo_center, self._meshes, time_code, False) is_reset = dist == -2 initial_state = (is_reset, changes, time_code) dist = -1 changes = self._prepare_apply_state(dist, self._explo_center, self._meshes, time_code, False) is_reset = dist == -2 final_state = (is_reset, changes, time_code) self._ignore_next_objects_changed = 2 stage = self.usd.stage omni.kit.commands.execute("ExplodeEngineApplyCommand", initial_state=initial_state, final_state=final_state, stage=stage) self._ignore_next_objects_changed = 0 self.reset(False) """ # compile transform list for undo time_code = self.usd.timecode xform_cache = UsdGeom.XformCache(time_code) self._ignore_next_objects_changed = 2 xforms=[] for mp in self._meshes: p = mp["prim"] path = mp["path"] ini_mat = mp["ini_lmat"] new_mat = get_prim_transform(p, False, xform_cache, time_code) xforms.append((path, new_mat, ini_mat, time_code, False)) self.reset(False) if xforms: if True: omni.kit.undo.begin_group() for x in xforms: omni.kit.commands.execute("TransformPrim", path=x[0], new_transform_matrix=x[1], old_transform_matrix=x[2] ) omni.kit.undo.end_group() else: omni.kit.commands.execute( "TransformPrims", prims_to_transform=xforms ) self._ignore_next_objects_changed = 0 """ def _calc_dist(self, dist): dist = dist ** const.DIST_EXP dist = dist * self._dist_base_size * self._dist_mult return dist def _calc_dir(self, dir): if self._center_mode >= 1 and self._center_mode <= 3: # around axis: zero axis displacement dir[self._center_mode - 1] = 0. elif self._center_mode >= 4: # from a plane i = self._center_mode - 4 dir[i] = 0. dir[(i + 1) % 3] = 0. def _calc_normalized_dir(self, dir): self._calc_dir(dir) if dir.GetLength() > 1e-6: dir.Normalize() return dir def _calc_dist_order(self): """dist_order is the 0..1 position of the mesh with regard to _explo_center""" min_len = float("inf") max_len = -1 len_list = [] for mp in self._meshes: vec = mp["ini_wtrans"] - self._explo_center self._calc_dir(vec) len = vec.GetLength() len = max(len, 1e-5) len_list.append(len) min_len = min(len, min_len) max_len = max(len, max_len) max_min_range = max_len - min_len max_min_range = max(max_min_range, 1e-5) index = 0 for mp in self._meshes: order = (len_list[index] - min_len) / max_min_range mp["dist_order"] = order index+=1 @property def has_meshes(self): return self.meshes_count >= 2 @property def meshes_count(self): return len(self._meshes) @property def stage_selection_meshes_count(self): paths = self.usd.get_selected_prim_paths() u_prims = self._sel_get_prim_paths_parent_first_order(paths) return len(u_prims) @property def center(self): return self._explo_center @center.setter def center(self, center): self._explo_center = center self._calc_dist_order() self.apply_asap() @property def dist(self): return self._dist @dist.setter def dist(self, d): self._dist = d self.apply_asap() @property def center_mode(self): return self._center_mode @center_mode.setter def center_mode(self, c): self._center_mode = c set_setting(const.SETTINGS_PATH + const.CENTER_MODE_SETTING, self._center_mode) self.apply_asap() @property def order_accel(self): return self._order_accel @order_accel.setter def order_accel(self, v): self._order_accel = v set_setting(const.SETTINGS_PATH + const.ACCEL_SETTING, self._order_accel) self.apply_asap() @property def dist_mult(self): return self._dist_mult @dist_mult.setter def dist_mult(self, m): self._dist_mult = m set_setting(const.SETTINGS_PATH + const.DIST_MULT_SETTING, self._dist_mult) self.apply_asap() def recenter(self): self._explo_center = self._last_explo_center self.apply_asap() def is_centered(self): return Gf.IsClose(self._explo_center, self._last_explo_center, 1e-6)
19,891
Python
26.437241
126
0.539641
syntway/model_exploder/exts/syntway.model_exploder/syntway/model_exploder/const.py
from omni.ui import color as cl DEV_MODE = 0 # extension/window WINDOW_NAME = "Model Exploder" MENU_PATH = f"Window/{WINDOW_NAME}" SETTINGS_PATH = "persistent/exts/syntway.model_exploder/" INFO_URL = "https://www.syntway.com/model_exploder/?info#how-to-use" # ui DISTANCE_LABEL = "Distance" CENTER_LABEL = "Center" SELECT_TO_EXPLODE_TEXT = "Start by selecting what to explode..." SELECT_TO_USE_TEXT = "Click to use the {0} selected parts" SELECTED_TEXT = "Exploding {0} parts" DONE_TEXT = "Apply" RESET_TEXT = "Cancel" CENTER_TEXT = "Center" RECENTER_TEXT = "Recenter" OPTIONS_TITLE = "Options" OPTIONS_DIST_MULT_LABEL = "Distance Multiplier" OPTIONS_DIST_MULT_COMBO_VALUES = [ ("1x", 1.), ("5x ", 5.), ("10x", 10.), ("100x", 100.) ] OPTIONS_ACCEL_LABEL = "Acceleration from Center" OPTIONS_ACCEL_MAX = 5. OPTIONS_BOUNDS_ALPHA_LABEL = "Initial Bounds Visibility" OPTIONS_BOUNDS_ALPHA_SETTING = "boundsAlpha" OPTIONS_BOUNDS_ALPHA_DEFAULT = 0.5 OPTIONS_UNSELECT_ON_USE_LABEL = "Unselect Parts on Use" OPTIONS_UNSELECT_ON_USE_SETTING = "unselectOnUse" OPTIONS_UNSELECT_ON_USE_DEFAULT = True TIMELINE_RESET_TEXT = "Timeline has changed: resetting exploded meshes..." CENTER_COMBO_LABELS = [ "Point", "X Axis", "Y Axis", # up "Z Axis", # up "XY Plane", # ground "YZ Plane", "ZX Plane" # ground ] CENTER_COMBO_AXIS_FIRST = 1 CENTER_COMBO_AXIS_SUFFIX = " (Vertical)" CENTER_COMBO_PLANE_FIRST = 4 CENTER_COMBO_PLANE_SUFFIX = " (Ground)" # engine CENTER_MANIP_LABEL_OFFSET = -11 CENTER_MANIP_LABEL_SIZE = 15 DEFAULT_CENTER_MODE = 0 CENTER_MODE_SETTING = "centerMode" DEFAULT_DIST_MULT = 5. DIST_MULT_SETTING = "distMult" ACCEL_DEFAULT = 1.68 ACCEL_SETTING = "orderAccel" DIST_EXP = 1.3 BOUNDS_BASE_AABB_COLOR = cl("#808080ff") # rgba order # tooltips TOOLTIP_USE = "First select the models to explode, then click this button to use." TOOLTIP_INFO = "Help and more info on this tool." TOOLTIP_DIST = "Select the explosion distance. For larger distances, see Options - Distance Multiplier." TOOLTIP_CENTER_MODE = """Select the explosion center type, which can be a point, an axis or a plane. You can drag the Center manipulator directly in the viewport to change its position.""" TOOLTIP_RECENTER = "Toggle the Center manipulator in the viewport back to the centroid of the used shapes." TOOLTIP_OPTIONS_ACCEL = """Exploded parts accelerate based on their initial distance from Center. This setting controls how farthest parts accelerate more than nearest ones.""" TOOLTIP_OPTIONS_DIST = """Multiply the explosion distance selected in the above slider. For smaller or larger explosion scales.""" TOOLTIP_OPTIONS_BOUNDS = """Visibility of the initial bounding box for the used shapes, from transparent to fully visible.""" TOOLTIP_OPTIONS_UNSELECT = """When starting to use a group of selected parts, should they be unselected for simpler visuals?""" TOOLTIP_CANCEL = "Cancel the tool and leave parts in their initial positions." TOOLTIP_APPLY = "Applies the current parts positions and adds an Undo-Redo state."
3,079
Python
28.333333
107
0.725885
syntway/model_exploder/exts/syntway.model_exploder/syntway/model_exploder/window.py
import asyncio, copy, webbrowser import carb import omni.ui as ui from omni.ui import scene as sc from omni.ui import color as cl import omni.kit.commands import omni.usd import omni.timeline from pxr import Usd, UsdGeom, UsdSkel, Sdf, Tf import pxr.Gf as Gf import omni.kit.notification_manager as nm from .libs.viewport_helper import ViewportHelper from .libs.app_utils import get_setting_or, set_setting, call_after_update from .libs.ui_utils import create_reset_button, create_tooltip_fn, UiPal, UiPal_refresh from .libs.manipulators import TranslateManipulator from .engine import Engine from . import const from . import style class Window(ui.Window): def __init__(self, title: str, ext_id: str, **kwargs): # print("win.__init__") self._ext_id = ext_id self._engine = Engine() self._scene_reg = None self._center_manip = None self._center_label = None self._center_label_transform = None self._base_aabb_lines = [] self._options_bounds_alpha = get_setting_or(const.SETTINGS_PATH + const.OPTIONS_BOUNDS_ALPHA_SETTING, const.OPTIONS_BOUNDS_ALPHA_DEFAULT) self._options_unselect_on_use = get_setting_or(const.SETTINGS_PATH + const.OPTIONS_UNSELECT_ON_USE_SETTING, const.OPTIONS_UNSELECT_ON_USE_DEFAULT) kwargs["auto_resize"] = True super().__init__(title, **kwargs) self.auto_resize = True self._ui_built = False self.frame.set_build_fn(self._build_fn) self._vp = ViewportHelper() # print(self._vp.info()) # create manipulator scene self._scene_reg = self._vp.register_scene_proxy(self._scene_create, self._scene_destroy, self._scene_get_visible, self._scene_set_visible, self._ext_id) self._engine.usd.add_stage_event_fn(self._on_stage_event) def destroy(self, is_ext_shutdown): # print("win.destroy", is_ext_shutdown) self._dist_slider = None self._use_button = None self._center_mode_combo = None self._recenter_button = None self._options = None self._options_dist_mult_combo = None self._options_accel_slider = None self._options_bounds_slider = None self._options_unselect_on_use_check = None self._done_button = None self._reset_button = None if self._center_manip: self._center_manip.destroy() self._center_manip = None self._center_label = None self._center_label_transform = None self._base_aabb_lines.clear() if self._scene_reg: self._vp.unregister_scene(self._scene_reg) self._scene_reg = None if self._vp: self._vp.detach() self._vp = None if self._engine: if self._engine.usd: self._engine.usd.remove_stage_event_fn(self._on_stage_event) if not is_ext_shutdown and self._engine.has_meshes and self._engine.dist != 0: self._engine.reset(True) # cancel current to intial positions self._engine.destroy() self._engine = None super().destroy() def _build_fn(self): """Called to build the UI once the window is visible""" # print(f"win._build_fn {self.visible}") UiPal_refresh() self.frame.style = style.WINDOW_FRAME with ui.VStack(width=386, style={"margin": 7}): # spacing=9, style={"margin": 7} with ui.VStack(height=0, spacing=11, style={"margin": 0}): # spacing=9, style={"margin": 7} with ui.HStack(skip_draw_when_clipped=True, spacing=5): self._use_button = ui.Button(const.SELECT_TO_EXPLODE_TEXT, name="ever_bright", height=24, clicked_fn=self._on_use_clicked, tooltip_fn=create_tooltip_fn(const.TOOLTIP_USE)) ui.Image(name="info", fill_policy=ui.FillPolicy.PRESERVE_ASPECT_FIT, width=18, height=24, mouse_pressed_fn=lambda *p: self._on_info(), tooltip_fn=create_tooltip_fn(const.TOOLTIP_INFO)) with ui.HStack(skip_draw_when_clipped=True, spacing=6): ui.Label(const.DISTANCE_LABEL, width=50, mouse_pressed_fn=lambda *p: self._on_dist_set_zero(), tooltip_fn=create_tooltip_fn(const.TOOLTIP_DIST)) self._dist_slider = ui.FloatSlider(min=0, max=1, # tooltip_fn=create_tooltip_fn(const.TOOLTIP_DIST) ) self._dist_slider.model.add_value_changed_fn(self._on_dist_slider_changed) with ui.HStack(skip_draw_when_clipped=True, spacing=6): ui.Label(const.CENTER_LABEL, width=50, tooltip_fn=create_tooltip_fn(const.TOOLTIP_CENTER_MODE)) self._center_mode_combo = ui.ComboBox(self._engine.center_mode, *const.CENTER_COMBO_LABELS, width=145, tooltip_fn=create_tooltip_fn(const.TOOLTIP_CENTER_MODE)) self._center_mode_combo.model.add_item_changed_fn(self._on_center_mode_changed) self._setup_center_combo_labels() self._recenter_button = ui.Button(const.RECENTER_TEXT, width=60, clicked_fn=self._on_recenter_clicked, tooltip_fn=create_tooltip_fn(const.TOOLTIP_RECENTER)) ui.Spacer(height=1) self._options = ui.CollapsableFrame(const.OPTIONS_TITLE, collapsed=not bool(const.DEV_MODE)) with self._options: with ui.VStack(spacing=0, style={"margin": 3}): with ui.HStack(spacing=6): ui.Label(const.OPTIONS_ACCEL_LABEL, tooltip_fn=create_tooltip_fn(const.TOOLTIP_OPTIONS_ACCEL)) with ui.HStack(): self._options_accel_slider = ui.FloatSlider(min=0, max=const.OPTIONS_ACCEL_MAX) self._options_accel_slider.model.set_value(self._engine._order_accel) self._options_accel_slider.model.add_value_changed_fn(self._on_options_accel_changed) create_reset_button(const.ACCEL_DEFAULT, self._options_accel_slider.model, self._options_accel_slider.model.set_value, self._options_accel_slider.model.add_value_changed_fn) with ui.HStack(spacing=6): ui.Label(const.OPTIONS_DIST_MULT_LABEL, tooltip_fn=create_tooltip_fn(const.TOOLTIP_OPTIONS_DIST)) with ui.HStack(): # locate dist_mult label index from self._engine.dist_mult def get_dist_mult_index(dist_mult): index = 0 for i in range(len(const.OPTIONS_DIST_MULT_COMBO_VALUES)): entry = const.OPTIONS_DIST_MULT_COMBO_VALUES[i] if dist_mult == entry[1]: index = i break return index self._options_dist_mult_combo = ui.ComboBox( get_dist_mult_index(self._engine.dist_mult), *[a[0] for a in const.OPTIONS_DIST_MULT_COMBO_VALUES], tooltip_fn=create_tooltip_fn(const.TOOLTIP_OPTIONS_DIST) ) self._options_dist_mult_combo.model.add_item_changed_fn(self._on_options_dist_mult_changed) create_reset_button(get_dist_mult_index(const.DEFAULT_DIST_MULT), self._options_dist_mult_combo.model.get_item_value_model(), self._options_dist_mult_combo.model.get_item_value_model().set_value, self._options_dist_mult_combo.model.add_item_changed_fn) with ui.HStack(spacing=6): ui.Label(const.OPTIONS_BOUNDS_ALPHA_LABEL, tooltip_fn=create_tooltip_fn(const.TOOLTIP_OPTIONS_BOUNDS)) with ui.HStack(): self._options_bounds_slider = ui.FloatSlider(min=0, max=1, #tooltip_fn=create_tooltip_fn(const.TOOLTIP_OPTIONS_BOUNDS) ) self._options_bounds_slider.model.set_value(self._options_bounds_alpha) self._options_bounds_slider.model.add_value_changed_fn(self._on_options_bounds_changed) create_reset_button(const.OPTIONS_BOUNDS_ALPHA_DEFAULT, self._options_bounds_slider.model, self._options_bounds_slider.model.set_value, self._options_bounds_slider.model.add_value_changed_fn) with ui.HStack(spacing=6): ui.Label(const.OPTIONS_UNSELECT_ON_USE_LABEL, tooltip_fn=create_tooltip_fn(const.TOOLTIP_OPTIONS_UNSELECT)) with ui.HStack(): self._options_unselect_on_use_check = ui.CheckBox(width=12, tooltip_fn=create_tooltip_fn(const.TOOLTIP_OPTIONS_UNSELECT)) self._options_unselect_on_use_check.model.set_value(self._options_unselect_on_use) self._options_unselect_on_use_check.model.add_value_changed_fn(self._on_options_unselect_changed) # ui.Spacer(width=1) ui.Line() create_reset_button(const.OPTIONS_UNSELECT_ON_USE_DEFAULT, self._options_unselect_on_use_check.model, self._options_unselect_on_use_check.model.set_value, self._options_unselect_on_use_check.model.add_value_changed_fn) ui.Spacer(height=1) with ui.HStack(skip_draw_when_clipped=True, spacing=9): self._reset_button = ui.Button(const.RESET_TEXT, clicked_fn=self._on_reset_clicked, tooltip_fn=create_tooltip_fn(const.TOOLTIP_CANCEL)) ui.Spacer() self._done_button = ui.Button(const.DONE_TEXT, clicked_fn=self._on_done_clicked, tooltip_fn=create_tooltip_fn(const.TOOLTIP_APPLY)) #ui.Button("Test", clicked_fn=self._on_test) self._ui_built = True self._refresh_ui() def _on_stage_event(self, ev: carb.events.IEvent): # print("Window._on_stage_event", ev.type) if not self._ui_built: # a stage event can call us before _build_fn() return if ev.type == int(omni.usd.StageEventType.SELECTION_CHANGED): if not self._engine.has_meshes: self._refresh_ui() elif ev.type == int(omni.usd.StageEventType.CLOSING): # print("Window.CLOSING") self._reset(False) # calls engine.reset #self._engine.usd.detach() elif ev.type == int(omni.usd.StageEventType.OPENED): # print("Window.OPENED") self._setup_center_combo_labels() def _refresh_ui(self): if not self._engine.has_meshes: # nothing selected self._dist_slider.enabled = False self._center_mode_combo.enabled = False self._recenter_button.enabled = False self._done_button.enabled = False self._reset_button.enabled = False sel_mesh_count = self._engine.stage_selection_meshes_count if sel_mesh_count >= 2: self._use_button.text = const.SELECT_TO_USE_TEXT.format(sel_mesh_count) self._use_button.enabled = True else: self._use_button.text = const.SELECT_TO_EXPLODE_TEXT self._use_button.enabled = False else: mesh_count = self._engine.meshes_count self._use_button.text = const.SELECTED_TEXT.format(mesh_count) self._use_button.enabled = False self._dist_slider.enabled = True self._center_mode_combo.enabled = True self._recenter_button.enabled = not self._engine.is_centered() self._done_button.enabled = True self._reset_button.enabled = True def _setup_center_combo_labels(self): model = self._center_mode_combo.model ch = model.get_item_children() up = self._engine.usd.stage_up_index if up == 1: # y up mark = [const.CENTER_COMBO_AXIS_FIRST + 1, const.CENTER_COMBO_PLANE_FIRST + 2] else: # z up mark = [const.CENTER_COMBO_AXIS_FIRST + 2, const.CENTER_COMBO_PLANE_FIRST + 0] for l in range(len(const.CENTER_COMBO_LABELS)): label = const.CENTER_COMBO_LABELS[l] if l in mark: if l < const.CENTER_COMBO_PLANE_FIRST: label += const.CENTER_COMBO_AXIS_SUFFIX else: label += const.CENTER_COMBO_PLANE_SUFFIX m = model.get_item_value_model(ch[l]) m.set_value(label) def _reset(self, set_to_initial): self._engine.reset(set_to_initial) self._enable_center_controls(False) self._enable_base_aabb(False) self._dist_slider.model.set_value(0) self._refresh_ui() def _on_use_clicked(self): if not self._engine.sel_capture(): self._reset(False) return self._sync_base_aabb() self._enable_base_aabb(True) self._enable_center_controls(True) if self._center_manip: self._set_center_manip_point(self._engine.center) if self._options_unselect_on_use: self._engine.usd.set_selected_prim_paths([]) self._refresh_ui() def _on_dist_set_zero(self): self._dist_slider.model.set_value(0) def _on_dist_slider_changed(self, model): self._engine.dist = model.as_float def _on_center_mode_changed(self, m, *args): self._engine.center_mode = m.get_item_value_model().get_value_as_int() def _on_recenter_clicked(self): self._engine.recenter() self._set_center_manip_point(self._engine.center) self._recenter_button.enabled = not self._engine.is_centered() def _on_done_clicked(self): self._engine.commit() self._reset(False) def _on_reset_clicked(self): self._reset(True) def _scene_create(self, vp_args): vp_api = vp_args["viewport_api"] if not self._vp.same_api(vp_api): # ensure scene is created in same viewport we're attached to return # print("_scene_create", vp_args, self._vp._api) self._center_manip = TranslateManipulator(viewport=self._vp, enabled=False, changed_fn=self._on_center_manip_changed ) self._center_label_transform = sc.Transform() # before next _sync self._sync_scene_label() with self._center_label_transform: with sc.Transform(look_at=sc.Transform.LookAt.CAMERA, scale_to=sc.Space.SCREEN): with sc.Transform(transform=sc.Matrix44.get_scale_matrix(2, 2, 1)): wup = self._engine.usd.stage_up wup *= const.CENTER_MANIP_LABEL_OFFSET with sc.Transform(transform=sc.Matrix44.get_translation_matrix(*wup)): self._center_label = sc.Label(const.CENTER_TEXT, alignment=ui.Alignment.CENTER, size=const.CENTER_MANIP_LABEL_SIZE, visible=False) self._create_base_aabb() def _scene_destroy(self): if self._center_manip: self._center_manip.destroy() self._center_manip = None def _scene_get_visible(self): return True def _scene_set_visible(self, value): if self._center_manip.enabled: # only set if manip is enabled self._center_manip.enabled = value def _on_center_manip_changed(self, action, manip): # print("_on_center_manip_changed") assert self._engine.has_meshes self._sync_scene_label() self._engine.center = manip.point self._recenter_button.enabled = not self._engine.is_centered() def _enable_center_controls(self, ena): if self._center_manip: self._center_manip.enabled = ena if self._center_label: self._center_label.visible = ena def _set_center_manip_point(self, wpt): self._center_manip.point = wpt self._sync_scene_label() def _sync_scene_label(self): wpt = Gf.Vec3d(self._center_manip.point) self._center_label_transform.transform = sc.Matrix44.get_translation_matrix(*wpt) def prepare_base_aabb_color(self): color = const.BOUNDS_BASE_AABB_COLOR color = (color & 0x00ffffff) | (int(self._options_bounds_alpha * 255) << 24) return color def _create_base_aabb(self): self._base_aabb_lines.clear() color = self.prepare_base_aabb_color() self._base_aabb_lines.append(sc.Line([0, 0, 0], [0, 0, 0], color=color, visible=False)) self._base_aabb_lines.append(sc.Line([0, 0, 0], [0, 0, 0], color=color, visible=False)) self._base_aabb_lines.append(sc.Line([0, 0, 0], [0, 0, 0], color=color, visible=False)) self._base_aabb_lines.append(sc.Line([0, 0, 0], [0, 0, 0], color=color, visible=False)) self._base_aabb_lines.append(sc.Line([0, 0, 0], [0, 0, 0], color=color, visible=False)) self._base_aabb_lines.append(sc.Line([0, 0, 0], [0, 0, 0], color=color, visible=False)) self._base_aabb_lines.append(sc.Line([0, 0, 0], [0, 0, 0], color=color, visible=False)) self._base_aabb_lines.append(sc.Line([0, 0, 0], [0, 0, 0], color=color, visible=False)) self._base_aabb_lines.append(sc.Line([0, 0, 0], [0, 0, 0], color=color, visible=False)) self._base_aabb_lines.append(sc.Line([0, 0, 0], [0, 0, 0], color=color, visible=False)) self._base_aabb_lines.append(sc.Line([0, 0, 0], [0, 0, 0], color=color, visible=False)) self._base_aabb_lines.append(sc.Line([0, 0, 0], [0, 0, 0], color=color, visible=False)) def _sync_base_aabb(self): """ points p# 4 5 6 7 0 1 2 3 lines 8| |9 10| |11 _4_ 5/ /6 -7- _0_ 1/ /2 -3- """ if self._engine.meshes_base_aabb.IsEmpty(): return mi, ma = self._engine.meshes_base_aabb.min, self._engine.meshes_base_aabb.max p0=[mi[0],mi[1],mi[2]] p1=[ma[0],mi[1],mi[2]] p2=[mi[0],mi[1],ma[2]] p3=[ma[0],mi[1],ma[2]] p4=[mi[0],ma[1],mi[2]] p5=[ma[0],ma[1],mi[2]] p6=[mi[0],ma[1],ma[2]] p7=[ma[0],ma[1],ma[2]] self._base_aabb_lines[0].start,self._base_aabb_lines[0].end, = p0,p1 self._base_aabb_lines[1].start,self._base_aabb_lines[1].end, = p0,p2 self._base_aabb_lines[2].start,self._base_aabb_lines[2].end, = p1,p3 self._base_aabb_lines[3].start,self._base_aabb_lines[3].end, = p2,p3 self._base_aabb_lines[4].start,self._base_aabb_lines[4].end, = p4,p5 self._base_aabb_lines[5].start,self._base_aabb_lines[5].end, = p4,p6 self._base_aabb_lines[6].start,self._base_aabb_lines[6].end, = p5,p7 self._base_aabb_lines[7].start,self._base_aabb_lines[7].end, = p6,p7 self._base_aabb_lines[8].start,self._base_aabb_lines[8].end, = p0,p4 self._base_aabb_lines[9].start,self._base_aabb_lines[9].end, = p1,p5 self._base_aabb_lines[10].start,self._base_aabb_lines[10].end, = p2,p6 self._base_aabb_lines[11].start,self._base_aabb_lines[11].end, = p3,p7 def _enable_base_aabb(self, ena): if self._engine.meshes_base_aabb.IsEmpty(): ena = False for l in self._base_aabb_lines: l.visible = ena def _on_options_dist_mult_changed(self, m, *args): index = m.get_item_value_model().get_value_as_int() mult = const.OPTIONS_DIST_MULT_COMBO_VALUES[index][1] self._engine.dist_mult = mult def _on_options_accel_changed(self, model): self._engine.order_accel = model.as_float def _on_options_bounds_changed(self, model): self._options_bounds_alpha = model.as_float set_setting(const.SETTINGS_PATH + const.OPTIONS_BOUNDS_ALPHA_SETTING, self._options_bounds_alpha) color = self.prepare_base_aabb_color() for l in self._base_aabb_lines: l.color = color def _on_options_unselect_changed(self, m): self._options_unselect_on_use = m.as_float set_setting(const.SETTINGS_PATH + const.OPTIONS_UNSELECT_ON_USE_SETTING, self._options_unselect_on_use) def _on_info(self): res = webbrowser.open(const.INFO_URL)
23,446
Python
37.063312
143
0.514416
syntway/model_exploder/exts/syntway.model_exploder/syntway/model_exploder/libs/app_utils.py
"""""" import asyncio, functools, sys import os.path import carb import omni.kit import omni.kit.viewport.utility as vut import omni.ui as ui import omni.usd from pxr import Gf, Tf, Sdf, Usd, UsdGeom, CameraUtil VERSION = 11 def call_after_update(fn, update_count=1): async def wait_for_update(count): while count: await omni.kit.app.get_app().next_update_async() count -= 1 fn() asyncio.ensure_future(wait_for_update(update_count)) def call_on_ready(is_ready_fn, on_ready_fn, max_tries=sys.maxsize): async def wait_for(): nonlocal max_tries while max_tries: await omni.kit.app.get_app().next_update_async() if is_ready_fn(): on_ready_fn() return max_tries -= 1 if is_ready_fn(): # straight away? on_ready_fn() return else: asyncio.ensure_future(wait_for()) def call_on_parts_ready(on_ready_fn, part_flags=1 | 2 | 4, max_tries=sys.maxsize, usd_context=None, usd_context_name='', window_name: str = None, ): """Call back when all parts in part_flags are ready: part_flags: Stage ready=1 Stage camera ready=2 -> implies stage ready Viewport non-zero frame size=4 """ def are_parts_ready(): ready_mask = 0 if part_flags & (1 | 2 | 4): api, win = vut.get_active_viewport_and_window(usd_context_name=usd_context_name, window_name=window_name) if part_flags & (1 | 2): if usd_context is None: ctx = omni.usd.get_context() else: ctx = usd_context if not ctx: return False stage = ctx.get_stage() if not stage: return False cam_prim = stage.GetPrimAtPath(api.camera_path) ready_mask = 1 | (2 if cam_prim.IsValid() else 0) if part_flags & 4: if not win: return False ws_win = ui.Workspace.get_window(win.name) if not ws_win: return False if not hasattr(ws_win, 'frame'): return False ws_win_frame = ws_win.frame if ws_win_frame.computed_width > 0 and ws_win_frame.computed_height > 0: ready_mask |= 4 return part_flags & ready_mask == part_flags call_on_ready(are_parts_ready, on_ready_fn, max_tries) # convenience calls def call_on_stage_ready(on_ready_fn, usd_context=None, max_tries=sys.maxsize): call_on_parts_ready(on_ready_fn, 1, usd_context=usd_context, max_tries=max_tries) def call_on_stage_camera_ready(on_ready_fn, usd_context=None, usd_context_name='', window_name: str = None, max_tries=sys.maxsize): call_on_parts_ready(on_ready_fn, 1 | 2, usd_context=usd_context, usd_context_name=usd_context_name, window_name=window_name, max_tries=max_tries) def get_setting_or(path, not_found_value): value = carb.settings.get_settings().get(path) if value is not None: return value else: return not_found_value def set_setting(path, value): carb.settings.get_settings().set(path, value) def delete_setting(path): carb.settings.get_settings().destroy_item(path) def get_extension_path(ext_id, sub_path=None): ext_path = omni.kit.app.get_app().get_extension_manager().get_extension_path(ext_id) if sub_path is not None: return os.path.join(ext_path, sub_path) else: return ext_path def matrix44_flatten(mat): """Get a omni.ui.scene.Matrix44 (an array[16]) from a pxr.Gf.Matrix4d or array[4][4].""" return [mat[0][0], mat[0][1], mat[0][2], mat[0][3], mat[1][0], mat[1][1], mat[1][2], mat[1][3], mat[2][0], mat[2][1], mat[2][2], mat[2][3], mat[3][0], mat[3][1], mat[3][2], mat[3][3]]
4,241
Python
25.185185
93
0.537373
syntway/model_exploder/exts/syntway.model_exploder/syntway/model_exploder/libs/manipulators.py
""" If you're getting Kit launch time errors related with omni.ui.scene, add omni.ui.scene to your extension dependencies in extension.toml: [dependencies] "omni.ui.scene" = {} """ from typing import Dict import carb import omni.kit, omni.usd from pxr import Gf, Sdf, Tf, Usd, UsdGeom from omni.kit.manipulator.viewport import ManipulatorFactory from omni.kit.manipulator.transform import AbstractTransformManipulatorModel, Operation from omni.kit.manipulator.transform.manipulator import TransformManipulator, Axis from omni.kit.manipulator.transform.simple_transform_model import SimpleTransformModel from omni.kit.manipulator.transform.gestures import TranslateChangedGesture, TranslateDragGesturePayload from .viewport_helper import ViewportHelper class TranslateManipulator(): VERSION = 9 def __init__(self, viewport: ViewportHelper, point=Gf.Vec3d(0, 0, 0), size=1., enabled=False, axes: Axis = Axis.ALL, style: Dict = {}, changed_fn=None): """ style: all colors in 0xAABBGGRR { "Translate.Axis::x": {"color": 0xAABBGGRR}, "Translate.Axis::y": {"color": }, "Translate.Axis::z": {"color": }, "Translate.Plane::x_y": {"color": }, "Translate.Plane::y_z": {"color": }, "Translate.Plane::z_x": {"color": }, "Translate.Point": {"color": 0xAABBGGRR, "type": "point"/"notpoint"}, } """ self._manip = None self._gesture = None self._changed_fn = None #if not viewport.is_attached: # raise AssertionError("Viewport not attached") self._is_legacy = viewport.is_legacy model = SimpleTransformModel() model.set_operation(Operation.TRANSLATE) model.set_floats(model.get_item("translate"), point) self._changed_fn = changed_fn self._gesture = TranslateGesture(viewport=viewport, changed_fn=self._on_changed_fn) if self._is_legacy: self._manip = ManipulatorFactory.create_manipulator(TransformManipulator, model=model, size=size, enabled=enabled, axes=axes, style=style, gestures=[self._gesture]) else: #self._manip = None #raise AssertionError("TranslateManipulator not currently usable on VP2") self._manip = TransformManipulator(model=model, size=size, enabled=enabled, axes=axes, style=style, gestures=[self._gesture]) def __del__(self): self.destroy() def destroy(self): if self._gesture: self._gesture.destroy() self._gesture = None if self._manip: if self._is_legacy: ManipulatorFactory.destroy_manipulator(self._manip) else: self._manip.destroy() self._manip = None if self._changed_fn: self._changed_fn = None @property def enabled(self): return self._manip.enabled @enabled.setter def enabled(self, ena): self._manip.enabled = ena @property def point(self): return self._manip.model.get_as_floats(self._manip.model.get_item("translate")) @point.setter def point(self, point): self._manip.model.set_floats(self._manip.model.get_item("translate"), [point[0], point[1], point[2]]) def set_changed_fn(self, fn): """ fn(action, manip) action: began=0,changed=1,ended=2,canceled=3 """ self._changed_fn = fn def _on_changed_fn(self, action, point): if self._changed_fn: self._changed_fn(action, self) """ class PointTranslateModel(SimpleTransformModel): def __init__(self, point): super().__init__() self.set_operation(Operation.TRANSLATE) self.set_floats(self.get_item("translate"), point) """ class TranslateGesture(TranslateChangedGesture): def __init__(self, viewport, changed_fn=None, **kwargs): TranslateChangedGesture.__init__(self) self._vp = viewport self.changed_fn = changed_fn def destroy(self): self._vp = None self.changed_fn = None def __del__(self): self.destroy() def on_began(self): # print("TranslateGesture.on_began", self._vp.window_name) if not self.gesture_payload or not self.sender or not isinstance(self.gesture_payload, TranslateDragGesturePayload): return model = self.sender.model if not model: return pt = model.get_as_floats(model.get_item("translate")) self._begin_point = Gf.Vec3d(*pt) if self._vp.is_legacy: self._vp.temp_select_enabled(False) if self.changed_fn: self.changed_fn(0, self._begin_point) def on_ended(self): # print("TranslateGesture.on_ended") if not self.gesture_payload or not self.sender or not isinstance(self.gesture_payload, TranslateDragGesturePayload): return model = self.sender.model if not model: return if self.changed_fn: pt = model.get_as_floats(model.get_item("translate")) self.changed_fn(2, Gf.Vec3d(*pt)) def on_canceled(self): # print("TranslateGesture.on_canceled") if not self.gesture_payload or not self.sender or not isinstance(self.gesture_payload, TranslateDragGesturePayload): return model = self.sender.model if not model: return if self.changed_fn: pt = model.get_as_floats(model.get_item("translate")) self.changed_fn(3, Gf.Vec3d(*pt)) def on_changed(self): # print("TranslateGesture.on_changed") if not self.gesture_payload or not self.sender or not isinstance(self.gesture_payload, TranslateDragGesturePayload): return model = self.sender.model if not model: return translate = Gf.Vec3d(*self.gesture_payload.moved) point = self._begin_point + translate model.set_floats(model.get_item("translate"), [point[0], point[1], point[2]]) if self.changed_fn: self.changed_fn(1, point)
6,989
Python
25.477273
124
0.546001
syntway/model_exploder/exts/syntway.model_exploder/syntway/model_exploder/libs/usd_utils.py
""" Notes: """ import omni.kit import omni.usd from pxr import Gf, Sdf, Usd, UsdGeom VERSION = 15 XFORM_OP_TRANSLATE_TYPE_TOKEN = UsdGeom.XformOp.GetOpTypeToken(UsdGeom.XformOp.TypeTranslate) XFORM_OP_TRANSLATE_ATTR_NAME = "xformOp:" + XFORM_OP_TRANSLATE_TYPE_TOKEN def get_prim_transform(prim, with_pivot, xform_cache=None, time_code=Usd.TimeCode.Default()): """Returns a prim's local transformation, converting mesh points into parent-space coords. with_pivot=True: returns GetLocalTransformation, where pivot and pivot^-1 are included into the translation. with_pivot=False will set translation to the actual translate XformOp. If no pivot is set, returns GetLocalTransformation() """ if xform_cache is None: xform_cache = UsdGeom.XformCache(time_code) mat, _ = xform_cache.GetLocalTransformation(prim) if with_pivot: return mat # remove pivot from local transform attr_name = XFORM_OP_TRANSLATE_ATTR_NAME op_attr = prim.GetAttribute(attr_name + ":pivot") if not op_attr: # no pivot, return mat return mat op_attr = prim.GetAttribute(attr_name) if op_attr: op = UsdGeom.XformOp(op_attr) if op: trans = op.Get(time_code) if trans is not None: mat.SetTranslateOnly(make_vec3_for_matrix4(mat, trans)) return mat # translation not found: set to identity translate mat.SetTranslateOnly(make_vec3_for_matrix4(mat, 0, 0, 0)) return mat def set_prim_transform(prim, mat, sdf_change_block=1, time_code=Usd.TimeCode.Default()): """sdf_change_block: 0: don't use, 1: use locally, 2: assume already began""" sdf_change_block = 0 stage = prim.GetStage() if sdf_change_block == 1: Sdf.BeginChangeBlock() xform = UsdGeom.Xformable(prim) ops = xform.GetOrderedXformOps() for op in ops: if op.GetOpType() == UsdGeom.XformOp.TypeTransform: _set_xform_op_time_code(op, mat, time_code, stage) if sdf_change_block == 1: Sdf.EndChangeBlock() return def get_or_add(op_type, prec): type_token = UsdGeom.XformOp.GetOpTypeToken(op_type) attr_name = "xformOp:" + type_token op_attr = prim.GetAttribute(attr_name) if op_attr: op = UsdGeom.XformOp(op_attr) if op: return op if sdf_change_block >= 1: Sdf.EndChangeBlock() op = xform.AddXformOp(op_type, prec) if sdf_change_block >= 1: Sdf.BeginChangeBlock() return op # not a transform: decompose matrix and set various S,R,T as needed _, _, scale, rot_mat, trans, _ = mat.Factor() rot_mat.Orthonormalize(False) rot = rot_mat.ExtractRotation() new_ops = [] # translation op = get_or_add(UsdGeom.XformOp.TypeTranslate, UsdGeom.XformOp.PrecisionDouble) if op: _set_xform_op_time_code(op, trans, time_code, stage) new_ops.append(op) # scale/rotate pivot (a translate) pivot_op = None attr_name = XFORM_OP_TRANSLATE_ATTR_NAME + ":pivot" op_attr = prim.GetAttribute(attr_name) if op_attr: pivot_op = UsdGeom.XformOp(op_attr) if pivot_op: new_ops.append(pivot_op) # rotation: pick first type rot_type, rot_prec = UsdGeom.XformOp.TypeRotateXYZ, UsdGeom.XformOp.PrecisionFloat for op in ops: op_type = op.GetOpType() if op_type >= UsdGeom.XformOp.TypeRotateX and op_type <= UsdGeom.XformOp.TypeOrient: rot_type, rot_prec = op_type, op.GetPrecision() break def rot_get_or_add(rot_type, axis_0, axis_1, axis_2, x, y, z, rot_prec ): angles = rot.Decompose(axis_0, axis_1, axis_2) rot_vals = Gf.Vec3f(angles[x], angles[y], angles[z]) # unscramble to x,y,z order that op.Set() needs op = get_or_add(rot_type, rot_prec) if op: _set_xform_op_time_code(op, rot_vals, time_code, stage) new_ops.append(op) # single rotation? if rot_type >= UsdGeom.XformOp.TypeRotateX and rot_type <= UsdGeom.XformOp.TypeRotateZ: angles = rot.Decompose(Gf.Vec3d.ZAxis(), Gf.Vec3d.YAxis(), Gf.Vec3d.XAxis()) op = get_or_add(UsdGeom.XformOp.TypeRotateX, rot_prec) if op: _set_xform_op_time_code(op, angles[2], time_code, stage) new_ops.append(op) op = get_or_add(UsdGeom.XformOp.TypeRotateY, rot_prec) if op: _set_xform_op_time_code(op, angles[1], time_code, stage) new_ops.append(op) op = get_or_add(UsdGeom.XformOp.TypeRotateZ, rot_prec) if op: _set_xform_op_time_code(op, angles[0], time_code, stage) new_ops.append(op) # quaternion? elif rot_type == UsdGeom.XformOp.TypeOrient: type_token = UsdGeom.XformOp.GetOpTypeToken(rot_type) attr_name = "xformOp:" + type_token op_attr = prim.GetAttribute(attr_name) if op_attr: op = UsdGeom.XformOp(op_attr) if op: _set_xform_op_time_code(op, rot.GetQuat(), time_code, stage) new_ops.append(op) # triple rotation? elif rot_type == UsdGeom.XformOp.TypeRotateXZY: rot_get_or_add(rot_type, Gf.Vec3d.YAxis(), Gf.Vec3d.ZAxis(), Gf.Vec3d.XAxis(), 2, 0, 1, rot_prec) elif rot_type == UsdGeom.XformOp.TypeRotateYXZ: rot_get_or_add(rot_type, Gf.Vec3d.ZAxis(), Gf.Vec3d.XAxis(), Gf.Vec3d.YAxis(), 1, 2, 0, rot_prec) elif rot_type == UsdGeom.XformOp.TypeRotateYZX: rot_get_or_add(rot_type, Gf.Vec3d.XAxis(), Gf.Vec3d.ZAxis(), Gf.Vec3d.YAxis(), 0, 2, 1, rot_prec) elif rot_type == UsdGeom.XformOp.TypeRotateZXY: rot_get_or_add(rot_type, Gf.Vec3d.YAxis(), Gf.Vec3d.XAxis(), Gf.Vec3d.ZAxis(), 1, 0, 2, rot_prec) elif rot_type == UsdGeom.XformOp.TypeRotateZYX: rot_get_or_add(rot_type, Gf.Vec3d.XAxis(), Gf.Vec3d.YAxis(), Gf.Vec3d.ZAxis(), 0, 1, 2, rot_prec) else: # just assume TypeRotateXYZ for any other rot_get_or_add(rot_type, Gf.Vec3d.ZAxis(), Gf.Vec3d.YAxis(), Gf.Vec3d.XAxis(), 2, 1, 0, rot_prec) # scale op = get_or_add(UsdGeom.XformOp.TypeScale, UsdGeom.XformOp.PrecisionFloat) if op: _set_xform_op_time_code(op, scale, time_code, stage) new_ops.append(op) # pivot_op^-1 if pivot_op is not None: for op in ops: if op.IsInverseOp() and \ op.GetOpType() == UsdGeom.XformOp.TypeTranslate and \ is_pivot_xform_op_name_suffix(op.GetOpName()): new_ops.append(op) break # and finally set new ops into xform xform.SetXformOpOrder(new_ops, xform.GetResetXformStack()) if sdf_change_block == 1: Sdf.EndChangeBlock() """ Note: touch_prim_xform() doesn't work, probably because the value is equal and caches are not rebuilt. But this does: lmat = get_prim_transform(prim, False, xform_cache, time_code) cmd = TransformPrimCommand(path=path, new_transform_matrix=lmat, time_code=time_code) #slower: cmd = TransformPrimSRTCommand(path=path, time_code=time_code) cmd.do() -------------------- def touch_prim_xform(prim, sdf_change_block=1, time_code=Usd.TimeCode.Default()): #sdf_change_block: 0: don't use, 1: use locally, 2: assume already began if sdf_change_block == 1: Sdf.BeginChangeBlock() xform = UsdGeom.Xformable(prim) ops = xform.GetOrderedXformOps() for op in ops: if not op.IsInverseOp(): op.Set(op.Get(time_code), time_code) break if sdf_change_block == 1: Sdf.EndChangeBlock() """ def get_prim_translation(prim, time_code=Usd.TimeCode.Default()): # remove pivot from local transform op_attr = prim.GetAttribute(XFORM_OP_TRANSLATE_ATTR_NAME) if op_attr: op = UsdGeom.XformOp(op_attr) if op: trans = op.Get(time_code) if trans is not None: return Gf.Vec3d(trans) # translation not found: return identity return Gf.Vec3d(0.) def set_prim_translation(prim, trans, sdf_change_block=1, time_code=Usd.TimeCode.Default()): """sdf_change_block: 0: don't use, 1: use locally, 2: assume already began""" # print(prim.GetPath().pathString) sdf_change_block = 0 mat_op = trans_op = None xform = UsdGeom.Xformable(prim) for op in xform.GetOrderedXformOps(): op_type = op.GetOpType() if op_type == UsdGeom.XformOp.TypeTransform: mat_op = op break elif op_type == UsdGeom.XformOp.TypeTranslate and not is_pivot_xform_op_name_suffix(op.GetOpName()): # op.SplitName() # simple translation, not pivot/invert trans_op = op break if mat_op: # has matrix op if sdf_change_block == 1: Sdf.BeginChangeBlock() mat = Gf.Matrix4d() mat.SetTranslate(trans) stage = prim.GetStage() _set_xform_op_time_code(mat_op, mat, time_code, stage) else: # set or add a translation xform op stage = prim.GetStage() # can't just set attr as order might not have been set if not trans_op: if sdf_change_block == 2: Sdf.EndChangeBlock() trans_op = _prepend_xform_op(xform, UsdGeom.XformOp.TypeTranslate, get_xform_op_precision(trans), time_code, stage) if sdf_change_block == 2: Sdf.BeginChangeBlock() if sdf_change_block == 1: Sdf.BeginChangeBlock() _set_xform_op_time_code(trans_op, trans, time_code, stage) if sdf_change_block == 1: Sdf.EndChangeBlock() def set_prim_translation_fast(prim, trans, sdf_change_block=1, time_code=Usd.TimeCode.Default()): """ As set_translation() but won't copy time samples from weaker layers. sdf_change_block: 0: don't use, 1: use locally, 2: assume already began see: https://graphics.pixar.com/usd/release/api/class_sdf_change_block.html """ sdf_change_block = 0 if prim.HasAttribute("xformOp:mat"): # has matrix op if sdf_change_block == 1: Sdf.BeginChangeBlock() at = prim.GetAttribute("xformOp:mat") if not at.GetNumTimeSamples(): time_code = Usd.TimeCode.Default() mat = at.Get(time_code) mat.SetTranslateOnly(trans) at.Set(mat, time_code) else: # set or add a translation xform op # can't just set attr as order might not have been set attr = prim.GetAttribute("xformOp:translate") op = UsdGeom.XformOp(attr) if not op: if sdf_change_block == 2: Sdf.EndChangeBlock() stage = prim.GetStage() xform = UsdGeom.Xformable(prim) op = _prepend_xform_op(xform, UsdGeom.XformOp.TypeTranslate, get_xform_op_precision(trans), time_code, stage) if sdf_change_block == 2: Sdf.BeginChangeBlock() if sdf_change_block == 1: Sdf.BeginChangeBlock() if not op.GetNumTimeSamples(): time_code = Usd.TimeCode.Default() op.Set(trans, time_code) # Gf.Vec3d() if sdf_change_block == 1: Sdf.EndChangeBlock() def _set_xform_op_time_code(xform_op, value, time_code, stage): prev = xform_op.Get(time_code) if not xform_op.GetNumTimeSamples(): # no time samples time_code = Usd.TimeCode.Default() if prev is None: if not time_code.IsDefault(): omni.usd.copy_timesamples_from_weaker_layer(stage, xform_op.GetAttr()) xform_op.Set(value, time_code) else: value_type = type(prev) # to preserve existing value type if not time_code.IsDefault(): omni.usd.copy_timesamples_from_weaker_layer(stage, xform_op.GetAttr()) xform_op.Set(value_type(value), time_code) def _prepend_xform_op(xform, op_type, prec, time_code, stage): # print("pre", _get_xform_op_order(xform)) prev_ops = xform.GetOrderedXformOps() xform.SetXformOpOrder([]) # print("mid", _get_xform_op_order(xform)) new_op = xform.AddXformOp(op_type, prec) for op in prev_ops: suffix = get_xform_op_name_suffix(op.GetOpName()) inverse = op.IsInverseOp() new = xform.AddXformOp(op.GetOpType(), op.GetPrecision(), suffix, inverse) if not inverse: value = op.Get(time_code) if value is not None: _set_xform_op_time_code(new, value, time_code, stage) # print("post", _get_xform_op_order(xform)) return new_op def get_xform_op_precision(t): if isinstance(t, Gf.Matrix4d) or isinstance(t, Gf.Vec3d): return UsdGeom.XformOp.PrecisionDouble else: return UsdGeom.XformOp.PrecisionFloat def get_vec3_type_for_matrix4(mat): if isinstance(mat, Gf.Matrix4d): return Gf.Vec3d else: return Gf.Vec3f def make_vec3_for_matrix4(mat, x, y=None, z=None): t = get_vec3_type_for_matrix4(mat) if y is None: return t(x[0], x[1], x[2]) else: return t(x, y, z) def _get_xform_op_order(xform): out = "" for op in xform.GetOrderedXformOps(): out += op.GetOpName() + "," return out XFORM_OP_INVERSE_PREFIX = "!invert!" def is_xform_op_name_inverse(op_name): return op_name.startswith(XFORM_OP_INVERSE_PREFIX) def get_xform_op_name_suffix(op_name): # or if is_xform_op_name_inverse(op_name): op_name = op_name.split(XFORM_OP_INVERSE_PREFIX, 1)[1] if op_name.startswith("xformOp:"): tags = op_name.split(":", 2) if len(tags) >= 3: return tags[2] return "" def is_pivot_xform_op_name_suffix(op_name): """or faster: "xformOp:" in op_name and "pivot" in op_name """ suffix = get_xform_op_name_suffix(op_name) if suffix != "": return suffix == "pivot" else: return False def create_edit_context(path, stage): """Unsafe from threading? No issues so far: https://graphics.pixar.com/usd/release/api/class_usd_edit_context.html#details """ layer, prim = omni.usd.find_spec_on_session_or_its_sublayers(stage, path) if not prim or not layer: return Usd.EditContext(stage) if prim.specifier == Sdf.SpecifierDef: return Usd.EditContext(stage, Usd.EditTarget(layer)) else: return Usd.EditContext(stage)
15,617
Python
27.14054
126
0.570212
syntway/model_exploder/exts/syntway.model_exploder/syntway/model_exploder/libs/ui_utils.py
""" Utility UI functions. """ from enum import IntEnum import omni.ui as ui VERSION = 4 class UiPaletteDark(IntEnum): """Colors in 0xAABBGGRR format. All colors with ff alpha if possible""" BACK = 0xff23211f # darker than WINDOW_BACK: general widget background, window title bar BACK_SELECTED = 0xff6e6e6e BACK_HOVERED = BACK_SELECTED TEXT = 0xffcccccc TEXT_SELECTED = 0xff8b8a8a TEXT_DISABLED = 0xff505050 WINDOW_BACK = 0xff454545 # lighter than BACK: window base color where darker controls are placed TOOLTIP_TEXT = 0xff303030 TOOLTIP_BACK = 0xffaadddd RESET = 0xffa07d4f # field reset button TRANSP = 0x00000000 TRANSP_NOT_0 = 0x00ffffff # some widgets collapse width if 0 is passed as a color UiPal = UiPaletteDark def UiPal_refresh(): global UiPal UiPal = UiPaletteDark def create_tooltip(text: str, tooltip_style=None, tooltip_text_style=None): if tooltip_style is None: tooltip_style = { "color": UiPal.TOOLTIP_TEXT, "background_color": UiPal.TOOLTIP_BACK, "margin": -1, "border_width": 0, } if tooltip_text_style is None: tooltip_text_style = {"margin": 3} with ui.ZStack(style=tooltip_style): ui.Rectangle() ui.Label(text, style=tooltip_text_style) def create_tooltip_fn(text: str, tooltip_style=None, tooltip_text_style=None): return lambda: create_tooltip(text, tooltip_style, tooltip_text_style) def create_reset_button(reset_value, widget_model, widget_set_value_fn, widget_add_value_changed_fn, style_on=None, style_off=None, on_tooltip_text=True, # True: use default, None: no tooltip ) -> ui.Rectangle: if style_on is None: style_on = { "background_color": UiPal.RESET, "border_radius": 2, "color": 0xffffffff } if style_off is None: style_off = {"background_color": UiPal.TEXT_DISABLED} if on_tooltip_text is True: on_tooltip_text = "Click to reset to default value" def update_rect(new_value, *_): if type(new_value) is ui.AbstractItemModel: new_value = new_value.get_item_value_model() if type(reset_value) is bool: new_value = new_value.as_bool elif type(reset_value) is int: new_value = new_value.as_int elif type(reset_value) is float: new_value = new_value.as_float # value changed? display reset button rect.visible = new_value != reset_value SIZE = 12 OFF_LEFT_PAD = 3 OFF_SIZE = 5 with ui.VStack(width=0, style={"margin": 0}): ui.Spacer() with ui.ZStack(width=SIZE, height=SIZE): # disabled reset button with ui.HStack(width=SIZE, height=SIZE): ui.Spacer(width=OFF_LEFT_PAD) with ui.VStack(width=SIZE, height=SIZE): ui.Spacer() ui.Rectangle(width=OFF_SIZE, height=OFF_SIZE, name="reset_off", style=style_off) ui.Spacer() # actionable reset button rect = ui.Rectangle( width=SIZE, height=SIZE, name="reset", alignment=ui.Alignment.V_CENTER, style=style_on, margin=0) if on_tooltip_text is not None: rect.set_tooltip_fn(create_tooltip_fn(on_tooltip_text)) rect.set_mouse_pressed_fn(lambda x, y, b, m: widget_set_value_fn(reset_value)) # initial rect visibility update_rect(widget_model) ui.Spacer() widget_add_value_changed_fn(update_rect) return rect
4,015
Python
25.077922
101
0.558655
syntway/model_exploder/exts/syntway.model_exploder/syntway/model_exploder/libs/viewport_helper.py
""" + Coordinate spaces: - 2D screen coordinate spaces ui: whole frame area float UI units, only equal to px units when omni.ui.Workspace.get_dpi_scale() is 1. ui = px_units / dpi_scale. Origin is left-top corner of the frame, 0..ui_size(). (app_ui = ui coordinates in Kit's app coordinates, window left-top is the origin.) (px: whole frame area integer screen monitor pixels, 0..px_size(). Use ui coords instead for units to scale in high density displays) 01: float 0..1 coordinates covering whole frame area. Origin is left-top corner. ndc: float -1..+1 Normalized Device Coordinates covering whole frame area. Origin is center, 1,1 is top-right corner of frame. iscene: coordinates in a SceneView with view and projection transforms both set to identity matrices. Origin is center, +x,+y is right-top corner. Can span -xy..+xy, where xy=iscene_half(). Fixed aspect ratio: a size displays at the same length in x and y. render: area where rendering displays with size fitted to frame area, which can occupy whole or only a part. NDC coords always extend -1..+1, origin is the center, 1,1 is top-right corner of frame. - 3D world space world: world space 3D coordinates + Coordinate/size conversions: - 2D screen spaces conv_iscene_from_render conv_iscene_from_ndc conv_iscene_from_01 conv_iscene_from_ui size_iscene_from_ui conv_render_from_ndc <-> conv_ndc_from_render conv_01_from_ui <-> conv_ui_from_01 conv_ndc_from_ui <-> conv_ui_from_ndc conv_01_from_app_ui, conv_ndc_from_app_ui, conv_ui_from_app_ui conv_ndc_from_01 <-> conv_01_from_ndc - 3D <-> 2D spaces conv_render_from_world conv_iscene_from_world pick_ray_from_render All conv_* methods accept points in Gf.Vec2*/Gf.Vec3* or tuple, but always return Gf.Vec2d/Gf.Vec3d points. + SceneView transformations get_transform_iscene_from_ui get_transform_iscene_from_render + Legacy Viewport: Extension omni.kit.viewport_legacy (was omni.kit.window.viewport) _win -> class omni.kit.viewport.utility.legacy_viewport_window.LegacyViewportWindow -> omni.ui.Window _api -> class omni.kit.viewport.utility.legacy_viewport_api.LegacyViewportAPI Use _win.legacy_window to get the actual IViewportWindow -> class omni.kit.viewport_legacy._viewport_legacy.IViewportWindow set_enabled_picking(), get_mouse_event_stream(), etc + Viewport Next Partially supported. Extensions omni.kit.viewport.window, omni.kit.widget.viewport + Notes - Don't store and always access ViewportHelper's frame or render area sizes as they may change due to user interactions, even when changing Kit between display monitors. """ import asyncio, functools import carb import omni.kit import omni.kit.viewport.utility as vut import omni.ui as ui """ Since omni.ui.scene may not not available on Kit's early launch, if you're launch time errors related with omni.ui.scene, add omni.ui.scene to your extension dependencies in extension.toml: [dependencies] "omni.ui.scene" = {} """ from omni.ui import scene as sc from pxr import Gf, Tf, Sdf, Usd, UsdGeom, CameraUtil SETTING_RENDER_WIDTH = "/app/renderer/resolution/width" SETTING_RENDER_HEIGHT = "/app/renderer/resolution/height" SETTING_CONFORM_POLICY = "/app/hydra/aperture/conform" SETTING_RENDER_FILL_LEGACY = "/app/runLoops/rendering_0/fillResolution" SETTING_RENDER_FILL = "/persistent/app/viewport/{api_id}/fillViewport" SETTING_DEFAULT_WINDOW_NAME = "/exts/omni.kit.viewport.window/startup/windowName" class ViewportHelper(): LIB_VERSION = 45 def __init__(self, window_name=None, attach: bool = True): self._win = None self._api = None self._ws_win_frame = None self._sub_render_width = None self._sub_render_height = None self._sub_render_fill = None self._is_legacy = True self._frame_mouse_fns = {} # frame: set(fn,fn,...) self._frame_size_changed_fns = {} # frame: set(fn,fn,...) self._render_changed_fns = set() # set(fn,fn,...) self._stage_objects_changed = None # [listener, set(fn,fn,...)] self._changed_fns = {} # fn: sub_flags if attach: res = self.attach(window_name=window_name) if not res: raise AssertionError("Could not attach") def __del__(self): self.detach() def attach(self, window_name=None, usd_context_name: str = '') -> bool: """ window_name: str: actual window name/title, like "Viewport" None: current/last active viewport int: index into ViewportHelper.get_window_names() Window selection order: .get_active_viewport_and_window() vut tries to attach "Viewport Next" first, then legacy "Viewport" windows.""" self.detach() if window_name is not None: if type(window_name) is int: wn_list = ViewportHelper.get_window_names() if window_name < len(wn_list): window_name = wn_list[window_name] else: raise AssertionError("Non-existent window_name") else: raise AssertionError("Bad window_name index") self._api,self._win = vut.get_active_viewport_and_window(usd_context_name=usd_context_name, window_name=window_name) if self._win is None or self._api is None: self._win = None self._api = None self._ws_win = None self._ws_win_frame = None return False if self.stage is None: raise AssertionError("Stage not available") self._is_legacy = hasattr(self._api, "legacy_window") self._ws_win = ui.Workspace.get_window(self._win.name) if self._ws_win is None: raise AssertionError("Workspace window not available") """ if not self._ws_win.visible: print("Viewport Window is not visible: can't attach") self.detach() return False """ if not hasattr(self._ws_win, 'frame'): self._ws_win_frame = None raise AssertionError("Workspace window frame not available") self._ws_win_frame = self._ws_win.frame return True def detach(self): settings = carb.settings.get_settings() if self._sub_render_width: settings.unsubscribe_to_change_events(self._sub_render_width) self._sub_render_width = None if self._sub_render_height: settings.unsubscribe_to_change_events(self._sub_render_height) self._sub_render_height = None if self._sub_render_fill: settings.unsubscribe_to_change_events(self._sub_render_fill) self._sub_render_fill = None if self._win is not None: if self._is_legacy: self._win.destroy() self._win = None self._api = None self._ws_win = None self._ws_win_frame = None self._frame_mouse_fns.clear() self._frame_size_changed_fns.clear() self._render_changed_fns.clear() self._changed_fns.clear() if self._stage_objects_changed is not None: if len(self._stage_objects_changed): self._stage_objects_changed[0].Revoke() self._stage_objects_changed = None @property def is_attached(self): return self._win is not None @property def window_name(self) -> str: return self._win.name @staticmethod def get_default_window_name(): return carb.settings.get_settings().get(SETTING_DEFAULT_WINDOW_NAME) or 'Viewport' @staticmethod def get_window_names(): try: from omni.kit.viewport.window import get_viewport_window_instances return [w.title for w in get_viewport_window_instances()] except ImportError: return [ViewportHelper.get_default_window_name()] @property def is_legacy(self): return self._is_legacy @property def camera_path(self) -> Sdf.Path: return self._api.camera_path @camera_path.setter def camera_path(self, camera_path): self._api.camera_path = camera_path def get_camera_view_proj(self): frustum = self.get_conformed_frustum() if frustum is None: return None return frustum.ComputeViewMatrix(), frustum.ComputeProjectionMatrix() def same_api(self, api) -> bool: return id(api) == id(self._api) def get_gf_camera(self): """Returns None if no valid prim found.""" cam = self._api.camera_path stage = self.stage if stage is None: raise AssertionError("Stage not available") cam_prim = stage.GetPrimAtPath( self.camera_path ) if cam_prim and cam_prim.IsValid(): usd_cam = UsdGeom.Camera(cam_prim) if usd_cam: return usd_cam.GetCamera() # fall over return None @property def fps(self) -> float: return self._api.fps @property def usd_context_name(self) -> str: return self._api.usd_context_name @property def usd_context(self): return self._api.usd_context @property def stage(self): return self.usd_context.get_stage() def get_frame(self, frame_id: str): return self._win.get_frame(frame_id) @property def ui_size(self): """ Due to DPI pixel multiplier, can return fractional. In DPI > 1 displays, this is UI units. Actual display pixels = UI units * omni.ui.Workspace.get_dpi_scale() """ if self._ws_win_frame is not None: return self._ws_win_frame.computed_width, self._ws_win_frame.computed_height else: return 1.,1. @property def px_size(self): """ Returns int size """ ui_size = self.ui_size dpi_mult = ui.Workspace.get_dpi_scale() return int(round(ui_size[0] * dpi_mult)), int(round(ui_size[1] * dpi_mult)) @property def ui_size_ratio(self): size = self.ui_size return size[0] / size[1] if size[1] else 1. @property def render_size_px(self): size = self._api.resolution return (int(size[0]), int(size[1])) @render_size_px.setter def render_size_px(self, size): self._api.resolution = (int(size[0]), int(size[1])) # render_size width/height ratio @property def render_size_ratio(self): size = self.render_size_px return size[0] / size[1] if size[1] else 1. """ ?Also render_rect_px, render_left_top_px """ """ Kit-103.1.2/3: render_fill_frame get/set does not work coherently Legacy Viewport: setting fill_frame makes viewport settings "Fill Viewport" disappear Viewport 2: only works setting to True Kit 104.0: Viewport 2: api is not initialized to setting: so we use setting @property def render_fill_frame(self): return self._api.fill_frame @render_fill_frame.setter def render_fill_frame(self, value: bool): self._api.fill_frame = value """ @property def render_fill_frame(self): if self._is_legacy: name = SETTING_RENDER_FILL_LEGACY else: name = SETTING_RENDER_FILL.format(api_id=self._api.id) return bool(carb.settings.get_settings().get(name)) @render_fill_frame.setter def render_fill_frame(self, value: bool): if self._is_legacy: name = SETTING_RENDER_FILL_LEGACY else: name = SETTING_RENDER_FILL.format(api_id=self._api.id) carb.settings.get_settings().set(name, value) def get_conformed_frustum(self): cam = self.get_gf_camera() if cam is None: raise AssertionError("Camera not available") frustum = cam.frustum conform_policy = ViewportHelper.get_conform_policy() CameraUtil.ConformWindow(frustum, conform_policy, self.render_size_ratio) return frustum @staticmethod def get_conform_policy(): """conform_policy: how is the render area fit into the frame area""" policy = carb.settings.get_settings().get(SETTING_CONFORM_POLICY) if policy is None or policy < 0 or policy > 5: return CameraUtil.MatchHorizontally else: policies = [ CameraUtil.MatchVertically, CameraUtil.MatchHorizontally, CameraUtil.Fit, CameraUtil.Crop, CameraUtil.DontConform, CameraUtil.DontConform, ] return policies[policy] def sync_scene_view(self, scene_view): """Must be called after viewport changes or before using a SceneView. A SceneView's "screen_aspect_ratio" is the ratio of what we call the render space""" frame_ratio = self.ui_size_ratio render_ratio = self.render_size_ratio if False and abs(frame_ratio - render_ratio) < 1e-6: # render equal to frame area: set to 0 ratio = 0 else: ratio = render_ratio if scene_view.screen_aspect_ratio != ratio: scene_view.screen_aspect_ratio = ratio # print("setup_scene_view asp_rat", scene_view.screen_aspect_ratio) #====================================================================== coord space conversion # generic NDC <-> 0..1 conversion @staticmethod def conv_ndc_from_01(coord): return Gf.Vec2d( coord[0]*2. - 1., -(coord[1]*2. - 1.) ) @staticmethod def conv_01_from_ndc(coord): return Gf.Vec2d( (coord[0] + 1.) * 0.5, (-coord[1] + 1.) * 0.5) def conv_01_from_ui(self, coord): width,height = self.ui_size return Gf.Vec2d(coord[0] / width, coord[1] / height) def conv_ui_from_01(self, coord): width,height = self.ui_size return Gf.Vec2d(coord[0] * width, coord[1] * height) def conv_ui_from_app_ui(self, coord): frame = self._win.frame return Gf.Vec2d(coord[0] - frame.screen_position_x, coord[1] - frame.screen_position_y) def conv_01_from_app_ui(self, coord): frame = self._win.frame return self.conv_01_from_ui( (coord[0] - frame.screen_position_x, coord[1] - frame.screen_position_y) ) def conv_ndc_from_ui(self, coord): xy = self.conv_01_from_ui(coord) return ViewportHelper.conv_ndc_from_01(xy) def conv_ui_from_ndc(self, coord): xy = ViewportHelper.conv_01_from_ndc(xy) return ViewportHelper.conv_ui_from_01(xy) def conv_ndc_from_app_ui(self, coord): xy = self.conv_01_from_app_ui(coord) return ViewportHelper.conv_ndc_from_01(xy) @property def _render_from_size_ratios(self): fr = self.ui_size frame_ratio = fr[0] / fr[1] if fr[1] else 1. render_ratio = self.render_size_ratio if frame_ratio >= render_ratio: # tex vertical -1..+1 return (frame_ratio / render_ratio, 1.) else: # return (1., render_ratio / frame_ratio) # coordinate conversion between frame-NDC and render(NDC) spaces def conv_render_from_ndc(self, frame_ndc): mx = frame_ndc[0] my = frame_ndc[1] ratios = self._render_from_size_ratios mx *= ratios[0] my *= ratios[1] return Gf.Vec2d(mx, my) def conv_ndc_from_render(self, render_ndc): mx,my = self.conv_render_from_ndc(render_ndc) return Gf.Vec2d(1./mx, 1./my) def iscene_size(self, scene_view): w,h = self.iscene_half(scene_view) return w*2.,h*2. def iscene_half(self, scene_view): frame_ratio = self.ui_size_ratio render_ratio = self.render_size_ratio fills = abs(frame_ratio - render_ratio) < 1e-6 lands = frame_ratio >= render_ratio asp_rat = scene_view.aspect_ratio_policy # print("fills,lands", fills, lands, frame_ratio, render_ratio) if asp_rat == sc.AspectRatioPolicy.PRESERVE_ASPECT_FIT: if fills and frame_ratio < 1: mul = 1.,1./frame_ratio elif lands: mul = frame_ratio,1. else: mul = render_ratio,render_ratio/frame_ratio elif asp_rat == sc.AspectRatioPolicy.PRESERVE_ASPECT_HORIZONTAL: if lands: mul = frame_ratio/render_ratio,1./render_ratio else: mul = 1.,1./frame_ratio elif asp_rat == sc.AspectRatioPolicy.PRESERVE_ASPECT_VERTICAL: if lands: mul = frame_ratio,1. else: mul = render_ratio,render_ratio/frame_ratio elif asp_rat == sc.AspectRatioPolicy.PRESERVE_ASPECT_CROP: if fills and frame_ratio < 1: mul=frame_ratio,1. elif lands: mul = frame_ratio/render_ratio,1./render_ratio elif frame_ratio >= 1: mul = 1.,1./frame_ratio else: mul = 1,1./frame_ratio elif asp_rat == sc.AspectRatioPolicy.STRETCH: if frame_ratio >= 1: mul = frame_ratio,1. else: mul = 1,1./frame_ratio else: mul = 1.,1. return mul def iscene_render_half(self, scene_view): """Render half size expressed in iscene coords""" frame_ratio = self.ui_size_ratio render_ratio = self.render_size_ratio fills = abs(frame_ratio - render_ratio) < 1e-6 lands = frame_ratio >= render_ratio asp_rat = scene_view.aspect_ratio_policy if asp_rat == sc.AspectRatioPolicy.PRESERVE_ASPECT_FIT: if fills and frame_ratio < 1: mul = 1.,1./frame_ratio else: mul = render_ratio,1. elif asp_rat == sc.AspectRatioPolicy.PRESERVE_ASPECT_HORIZONTAL: mul = 1.,1./render_ratio elif asp_rat == sc.AspectRatioPolicy.PRESERVE_ASPECT_VERTICAL: mul = render_ratio,1. elif asp_rat == sc.AspectRatioPolicy.PRESERVE_ASPECT_CROP: if fills and frame_ratio < 1: mul=frame_ratio,1. else: mul = 1.,1./render_ratio elif asp_rat == sc.AspectRatioPolicy.STRETCH: if fills and frame_ratio < 1: mul = 1.,1./render_ratio elif lands: mul = render_ratio,1. elif frame_ratio >= 1: mul = frame_ratio,frame_ratio/render_ratio else: mul = 1.,1./render_ratio else: mul = 1.,1. return mul def conv_iscene_from_render(self, render_pt, scene_view): mul = self.iscene_render_half(scene_view) return Gf.Vec2d(render_pt[0] * mul[0], render_pt[1] * mul[1]) def conv_iscene_from_01(self, ui01, scene_view): size = self.ui_size pt = ViewportHelper.conv_ndc_from_01(ui01) mul = self.iscene_half(scene_view) return Gf.Vec2d(pt[0] * mul[0], pt[1] * mul[1]) def conv_iscene_from_ndc(self, ndc, scene_view): mul = self.iscene_half(scene_view) return Gf.Vec2d(ndc[0] * mul[0], ndc[1] * mul[1]) def conv_iscene_from_ui(self, ui_pt, scene_view): size = self.ui_size pt = ui_pt[0] / size[0] * 2. - 1., ui_pt[1] / size[1] * 2. - 1. # pt now in NDC mul = self.iscene_half(scene_view) return Gf.Vec2d(pt[0] * mul[0], pt[1] * mul[1]) def size_iscene_from_ui(self, ui_size, scene_view): size = self.ui_size ui_sz = 2. * ui_size / size[0] mul = self.iscene_half(scene_view) return ui_sz * mul[0] def get_transform_iscene_from_ui(self, scene_view): size_ui = self.ui_size iscene_half = self.iscene_half(scene_view) return sc.Matrix44.get_scale_matrix(iscene_half[0], iscene_half[1], 1.) * \ sc.Matrix44.get_translation_matrix(-1., +1., 0) * \ sc.Matrix44.get_scale_matrix(2./size_ui[0], -2./size_ui[1], 1.) def get_transform_iscene_from_render(self, scene_view): iscene_render_half = self.iscene_render_half(scene_view) return sc.Matrix44.get_scale_matrix(iscene_render_half[0], iscene_render_half[1], 1.) #====================================================================== 3D world <-> 2D screen conversion def pick_ray_from_render(self, render_ndc, frustum=None): if frustum is None: frustum = self.get_conformed_frustum() pos = Gf.Vec2d(render_ndc[0],render_ndc[1]) return frustum.ComputePickRay(pos) """ From frame space NDC coords example: x,y = self.conv_render_from_ndc(frame_ndc) if x is None or x < -1.0 or x > 1.0 or y < -1.0 or y > 1.0: return None return get_pick_ray((x,y)) """ def conv_render_from_world(self, wpt): """ wpt can be Gd.Vec3*, (x,y,z), single value or list returns Gf.Vec2d, single value or list NDC coords """ view,proj = self.get_camera_view_proj() mat = view*proj if isinstance(wpt, list): wpt_list=wpt else: wpt_list=[wpt] rpt = [] for pt in wpt_list: r = mat.Transform( Gf.Vec3d(pt[0],pt[1],pt[2]) ) rpt.append(r) if isinstance(wpt, list): return rpt else: return rpt[0] def conv_iscene_from_world(self, wpt, scene_view): """ wpt can be Gd.Vec3*, (x,y,z) or list. Not single value. returns Gf.Vec2d, single value or list NDC coords """ view,proj = self.get_camera_view_proj() mat = view*proj if isinstance(wpt, list): wpt_list=wpt else: wpt_list=[wpt] mul = self.iscene_render_half(scene_view) spt = [] for pt in wpt_list: r = mat.Transform( Gf.Vec3d(pt[0],pt[1],pt[2]) ) s = Gf.Vec2d(r[0] * mul[0], r[1] * mul[1]) spt.append(s) if isinstance(wpt, list): return spt else: return spt[0] def add_frame_mouse_fn(self, frame, fn, coord_space=0): """Called function params: op: 0=press 1=move 2=release 3=double click 4=mouse wheel 5=mouse hovered (entered) frame x,y: coordinates inside frame, depending on coord_space: 0=01 space 1=ui space 2=ndc space 3=render space button: 0=left 1=right 2=middle mod flags: 1=shift 2=ctrl 4=alt (6=altGr = ctrl + alt) 0x40000000=unknown during move and release """ if not frame in self._frame_mouse_fns: self._frame_mouse_fns[frame] = set() fnlist = self._frame_mouse_fns[frame] if fn in fnlist: return fnlist.add(fn) last_button_pressed = None def dispatch(op, x,y, button, mod): for fn in fnlist: fn(op, x,y, button, mod) def to_space(x,y): if coord_space <= 1: p01 = self.conv_01_from_app_ui((x,y)) if coord_space == 0: return p01 else: return self.conv_ui_from_01(p01) else: pndc = self.conv_ndc_from_app_ui((x,y)) if coord_space == 2: return pndc else: return self.conv_render_from_ndc(pndc) def on_mouse_pressed(x,y, button, mod): nonlocal last_button_pressed x,y = to_space(x,y) dispatch(0, x,y, button, mod) last_button_pressed = button def on_mouse_moved(x,y, mod, unknown_always_true): #on move: x,y can go outside 0,1 x,y = to_space(x,y) dispatch(1, x,y, last_button_pressed, mod) def on_mouse_released(x,y, button, mod): nonlocal last_button_pressed x,y = to_space(x,y) dispatch(2, x,y, button, mod) last_button_pressed = None def on_mouse_double_clicked(x,y, button, mod): x,y = to_space(x,y) dispatch(3, x,y, button, mod) def on_mouse_wheel(x,y, mod): dispatch(4, x,y, None, mod) def on_mouse_hovered(entered): # x=entered info dispatch(5, entered, None, None, None) frame.set_mouse_pressed_fn(on_mouse_pressed) frame.set_mouse_moved_fn(on_mouse_moved) frame.set_mouse_released_fn(on_mouse_released) frame.set_mouse_double_clicked_fn(on_mouse_double_clicked) frame.set_mouse_wheel_fn(on_mouse_wheel) frame.set_mouse_hovered_fn(on_mouse_hovered) def add_frame_size_changed_fn(self, frame, fn): if not frame in self._frame_size_changed_fns: def on_frame_size_changed(): if not frame in self._frame_size_changed_fns: return for fn in self._frame_size_changed_fns[frame]: fn() frame.set_computed_content_size_changed_fn( on_frame_size_changed ) self._frame_size_changed_fns[frame] = set() fnlist = self._frame_size_changed_fns[frame] fnlist.add( fn ) def remove_frame_size_changed_fn(self, frame, fn): if frame in self._frame_size_changed_fns: fnlist = self._frame_size_changed_fns[frame] fnlist.discard( fn ) def add_render_changed_fn(self, fn): """Call fn handler on render resolution or fill mode changed""" if self._sub_render_width is None: def on_render_changed(*args): """ will render resolution/frame_fill take a frame to reflect """ async def async_func(): await omni.kit.app.get_app().next_update_async() for fn in self._render_changed_fns: fn() asyncio.ensure_future( async_func() ) settings = carb.settings.get_settings() self._sub_render_width = settings.subscribe_to_node_change_events(SETTING_RENDER_WIDTH, on_render_changed) self._sub_render_height = settings.subscribe_to_node_change_events(SETTING_RENDER_HEIGHT, on_render_changed) self._sub_render_fill = settings.subscribe_to_node_change_events(SETTING_RENDER_FILL, on_render_changed) self._render_changed_fns.add(fn) def remove_render_changed_fn(self, fn): if self._sub_render_width is not None: self._render_changed_fns.discard(fn) def add_camera_changed_fn(self, fn): """Call fn handler when USD camera changes""" if self._stage_objects_changed is None: # handler needs to be a method as Register won't hold reference to a local function listener = Tf.Notice.Register( Usd.Notice.ObjectsChanged, self._on_stage_objects_changed, self.stage) self._stage_objects_changed = [listener, set()] val = self._stage_objects_changed val[1].add(fn) def _on_stage_objects_changed(self, notice, stage): if stage != self.stage or self._stage_objects_changed is None: return # did active camera change? cam_path = self.camera_path for n in notice.GetChangedInfoOnlyPaths(): if n.GetPrimPath() == cam_path: # found camera for fn in self._stage_objects_changed[1]: fn() return def remove_camera_changed_fn(self, fn): if self._stage_objects_changed is not None: val = self._stage_objects_changed val[1].discard(fn) def add_changed_fn(self, fn, sub_flags = 1|2|4, frame = None): """Call handler on frame, render or camera changes, depending on sub_flags mask. sub_flags: 1=frame size changed (requires frame param), 2=render changed, 4=camera changed fn(changed_flag) """ self._changed_fns[fn] = sub_flags #overwrite any existing for fn # add everytime because functions avoid duplicates: but only if not using lambdas! if sub_flags & 1: if frame is None: raise AssertionError("Frame size changed: frame parameter cannot be None") self.add_frame_size_changed_fn(frame, self._on_frame_changed) if sub_flags & 2: self.add_render_changed_fn(self._on_render_changed) if sub_flags & 4: self.add_camera_changed_fn(self._on_camera_changed) def _on_frame_changed(self): self._on_changed(1) def _on_render_changed(self): self._on_changed(2) def _on_camera_changed(self): self._on_changed(4) def _on_changed(self, changed_flag): for fn, mask in self._changed_fns.items(): if mask & changed_flag: fn(changed_flag) def remove_changed_fn(self, fn, frame): if fn in self._changed_fns: if self._changed_fns[fn] & 1 and frame is None: raise AssertionError("Frame size changed: frame parameter cannot be None") del self._changed_fns[fn] if not len(self._changed_fns): if frame is not None: self.remove_frame_size_changed_fn(frame, self._on_frame_changed) self.remove_render_changed_fn(self._on_render_changed) self.remove_camera_changed_fn(self._on_camera_changed) def add_scene_view_update(self, scene_view): self._api.add_scene_view(scene_view) def remove_scene_view_update(self, scene_view): self._api.remove_scene_view(scene_view) def register_scene(self, scene_creator, ext_id_or_name: str): """Registers a scene creator into: VP1: a viewport window, where scene is immediately created VP2: calls RegisterScene with omni.kit.viewport.registry, to create scene in current (full window) viewports and any new ones. scene_creator object created with: scene_creator_class(dict) VP1 dict = {viewport_api} VP2 dict = {viewport_api: omni.kit.viewport.window.ViewportAPI, layer_provider: omni.kit.viewport.window.ViewportLayers, usd_context_name: str} """ if self.is_legacy: with self._win.get_frame(ext_id_or_name): scene_view = sc.SceneView() with scene_view.scene: sce = scene_creator({"viewport_api": self._api}) # have viewport update our SceneView self.add_scene_view_update(scene_view) return [scene_view, sce] else: try: from omni.kit.viewport.registry import RegisterScene scene_reg = RegisterScene(scene_creator, ext_id_or_name) return [scene_reg] except ImportError: return None def register_scene_proxy(self, create_fn, destroy_fn, get_visible_fn, set_visible_fn, ext_id_or_name: str): lamb = ViewportHelper.SceneCreatorProxy.make_lambda(create_fn, destroy_fn, get_visible_fn, set_visible_fn) return self.register_scene(lamb, ext_id_or_name) def unregister_scene(self, scene_reg): if scene_reg is None or not len(scene_reg): return if self.is_legacy: scene_view = scene_reg[0] self.remove_scene_view_update(scene_view) scene_view.destroy() scene_reg.clear() class SceneCreatorProxy: @staticmethod def make_lambda(create_fn, destroy_fn, get_visible_fn, set_visible_fn): return lambda vp_args: ViewportHelper.SceneCreatorProxy(vp_args, create_fn, destroy_fn, get_visible_fn, set_visible_fn) def __init__(self, vp_args: dict, create_fn, destroy_fn, get_visible_fn, set_visible_fn): # print("SceneCreatorProxy.__init__", vp_args) # dict_keys(['usd_context_name', 'layer_provider', 'viewport_api']) """@ATTN: a scene may be created in multiple viewports. It's up to the _create_fn() callee to make sure it's being called in the intended viewport by checking vp_args['viewport_api']""" self._create_fn = create_fn self._destroy_fn = destroy_fn self._get_visible_fn = get_visible_fn self._set_visible_fn = set_visible_fn self._create_fn(vp_args) def destroy(self): # print("SceneCreatorProxy.destroy") if self._destroy_fn: self._destroy_fn() self._create_fn = None self._destroy_fn = None self._get_visible_fn = None self._set_visible_fn = None def __del__(self): self.destroy() # called from viewport registry @property def visible(self): # print("SceneCreatorProxy.get_visible") if self._get_visible_fn: return self._get_visible_fn() else: return True @visible.setter def visible(self, value: bool): # print("SceneCreatorProxy.set_visible", value) if self._set_visible_fn: return self._set_visible_fn(value) @property def picking_enabled(self): """Object picking and selection rect.""" if self._is_legacy: self._win.legacy_window.is_enabled_picking() else: # print("picking_enabled only supported for legacy viewport") return True @picking_enabled.setter def picking_enabled(self, enabled): """Disables object picking and selection rect.""" if self._is_legacy: self._win.legacy_window.set_enabled_picking(enabled) else: # print("picking_enabled only supported for legacy viewport") pass def temp_select_enabled(self, enable_picking): """Disables object picking and selection rect until next mouse up. enable_picking: enable picking for surface snap """ if self._is_legacy: self._win.legacy_window.disable_selection_rect(enable_picking) else: # print("temp_select_enabled only supported for legacy viewport") pass @property def manipulating_camera(self): if self._is_legacy: return self._win.legacy_window.is_manipulating_camera() else: # print("is_manipulating_camera only supported for legacy viewport") return False def save_render(self, file_path: str, render_product_path: str = None): """Doesn't save any overlaid SceneView drawing""" vut.capture_viewport_to_file(self._api, file_path=file_path, is_hdr=False, render_product_path=render_product_path) def info(self, scene_view=None): out = f"window_name='{self.window_name}' is_legacy={self.is_legacy} usd_context_name='{self.usd_context_name} api_id='{self._api.id}'\n" out += f"ui_size={self.ui_size} dpi={omni.ui.Workspace.get_dpi_scale()} px_size={self.px_size} ui_size_ratio={self.ui_size_ratio}\n" out += f"render_size_px={self.render_size_px} render_fill_frame={self.render_fill_frame} render_ratio={self.render_size_ratio}\n" if scene_view is not None: out += f"iscene_half={self.iscene_half(scene_view)} iscene_size={self.iscene_size(scene_view)} iscene_render_half={self.iscene_render_half(scene_view)}\n" out += f"camera_path='{self.camera_path}'\n" out += f"camera frustrum={self.get_conformed_frustum()}\n" view,proj = self.get_camera_view_proj() out += f"camera matrixes: view={view} proj={proj}\n" out += f"conform_policy={self.get_conform_policy()}\n" if scene_view is not None: out += f"scene_view aspect_ratio={scene_view.aspect_ratio_policy}\n" out += f"fps={self.fps}\n" return out """Examples: vp = ViewportHelper() res = vp.attach() # "Viewport" "Viewport Next" print(f"attach res={res}") frame = vp.get_frame("id") #frame.clear() #with frame: # with ui.VStack(): # ui.Spacer() # ui.Label("LABEL", alignment=ui.Alignment.CENTER, style={"font_size": 72}) # ui.Button("TO") # ui.Spacer() print (vp.info()) #vp.camera_path = "OmniverseKit_Top" # OmniverseKit_Persp vp.save_render("c:/tmp/vp.png") """
37,345
Python
29.045052
166
0.578525
syntway/model_exploder/exts/syntway.model_exploder/syntway/model_exploder/libs/app_helper.py
"""""" import asyncio, functools, sys import os.path import carb import omni.kit class AppHelper(): VERSION = 10 SETTING_TRANSFORM_OP = "/app/transform/operation" def __init__(self, attach=True): self._app = None self._settings = None self._setting_changed = {} # {"setting_path": [subs, set(fn0,fn1,...)], } self._input = None self._update_event_sub = None self._update_event_fns = set() self._key_action_subs = {} # {"action_name": [sub, [(fn0,fn1), (fn1,fn2), ...]] } if attach: res = self.attach() if not res: raise AssertionError("Could not attach") def __del__(self): self.detach() def attach(self) -> bool: self.detach() self._app = omni.kit.app.get_app() # omni.kit.app self._app_win = omni.appwindow.get_default_app_window() # omni.appwindow self._settings = carb.settings.get_settings() return True def detach(self): self._update_event_sub = None self._update_event_fns.clear() for v in self._setting_changed.values(): self._settings.unsubscribe_to_change_events(v[0]) self._setting_changed = {} self._settings = None if self._input is not None: for v in self._key_action_subs.values(): self._input.unsubscribe_to_action_events(v[0]) self._key_action_subs = {} self._input = None if self._app is not None: self._app = None def add_update_event_fn(self, fn, order=0, subscription_name=None): """ 0=NEW_FRAME """ if self._update_event_sub is None: def on_update(ev): for fn in self._update_event_fns: fn(ev) self._update_event_sub = self._app.get_update_event_stream().create_subscription_to_pop(on_update, order=order, name=subscription_name) self._update_event_fns.clear() self._update_event_fns.add(fn) def remove_update_event_fn(self, fn, event_type=-1): if self._update_event_sub: self._update_event_fns.discard(fn) def add_setting_changed_fn(self, setting_path, fn): """ fn(value, event_type) """ if not setting_path in self._setting_changed: def on_changed(item, event_type): fns = self._setting_changed[setting_path][1] for fn in fns: fn(str(item), event_type) self._setting_changed[setting_path] = [None, set()] self._setting_changed[setting_path][0] = self._settings.subscribe_to_node_change_events(setting_path, on_changed) s = self._setting_changed[setting_path][1] s.add(fn) def get_setting(self, setting_path): return str( self._settings.get(setting_path) ) def set_setting(self, setting_path, value): self._settings.set(setting_path, value) def add_key_action_fn(self, action_name, key, key_modifiers, on_key_fn, is_key_enabled_fn=None): """ key_modifiers: 1=shift, 2=ctrl, alt=4""" if action_name in self._key_action_subs: sub = self._key_action_subs[action_name] if not (on_key_fn, is_key_enabled_fn) in sub[1]: # fn pair already there sub[1].append((on_key_fn, is_key_enabled_fn)) return if self._input is None: self._input = carb.input.acquire_input_interface() set_path = self._app_win.get_action_mapping_set_path() set = self._input.get_action_mapping_set_by_path(set_path) string = carb.input.get_string_from_action_mapping_desc(key, key_modifiers) path = set_path + "/" + action_name + "/0" self._settings.set_default_string(path, string) def on_action(action_name, event, *_): if not event.flags & carb.input.BUTTON_FLAG_PRESSED: return if not action_name in self._key_action_subs: return try: # avoid keys pressed during camera manipulation import omni.kit.viewport_legacy vp = omni.kit.viewport_legacy.get_viewport_interface().get_viewport_window() if vp.is_manipulating_camera(): return except Exception: pass sub = self._key_action_subs[action_name] for on_key_fn,is_key_enabled_fn in sub[1]: if is_key_enabled_fn is not None: if not is_key_enabled_fn(): continue on_key_fn() sub = [self._input.subscribe_to_action_events(set, action_name, functools.partial(on_action, action_name)), [(on_key_fn, is_key_enabled_fn)]] self._key_action_subs[action_name] = sub
5,108
Python
26.320855
125
0.536022
syntway/model_exploder/exts/syntway.model_exploder/syntway/model_exploder/libs/usd_helper.py
""" Notes: """ import omni.kit import omni.usd from pxr import Gf, Tf, Sdf, Usd, UsdGeom, CameraUtil from .app_utils import call_after_update class UsdHelper(): VERSION = 17 STAGE_CHANGED_SUB_PREFIX = "UsdHelper-stage-changed-ev" def __init__(self, attach=True, stage_opened_refresh=1 | 2): """ stage_opened_refresh: resubscribe events when a new stage finishes opening. A mask of: 1: resubscribe add_stage_event_fn handlers 2: resubscribe add_stage_objects_changed_fn handlers """ self._ctx = None self._stage_changed = {} # event_type: [sub, set(fn,fn,...)] self._stage_objects_changed = None # [listener, set(fn,fn,...)] self._stage_opened_refresh = stage_opened_refresh self._stage_opened_refresh_sub = None if attach: res = self.attach() if not res: raise AssertionError("Could not attach") def __del__(self): self.detach() def attach(self, usd_ctx=None) -> bool: """usd_ctx can be a string for context name, or an existing UsdContext.""" self.detach() if usd_ctx is None: usd_ctx = '' if isinstance(usd_ctx, str): self._ctx = omni.usd.get_context(usd_ctx) else: self._ctx = usd_ctx if self._stage_opened_refresh: self.add_stage_event_fn(self._on_stage_opened_refresh, omni.usd.StageEventType.OPENED) return True def detach(self): if self._ctx is not None: self._ctx = None if self._stage_objects_changed is not None: if len(self._stage_objects_changed): self._stage_objects_changed[0].Revoke() self._stage_objects_changed = None self._stage_changed.clear() self._stage_opened_refresh_sub = None @property def context(self): return self._ctx @property def stage(self): return self._ctx.get_stage() @property def stage_state(self) -> omni.usd.StageState: return self._ctx.get_stage_state() def is_stage_opened(self) -> bool: return self.stage_state == omni.usd.StageState.OPENED @property def stage_up(self): up = UsdGeom.GetStageUpAxis(self.stage) if up == UsdGeom.Tokens.y: return Gf.Vec3d(0, 1, 0) elif up == UsdGeom.Tokens.z: return Gf.Vec3d(0, 0, 1) else: # UsdGeom.Tokens.x return Gf.Vec3d(1, 0, 0) @property def stage_up_index(self): up = UsdGeom.GetStageUpAxis(self.stage) if up == UsdGeom.Tokens.y: return 1 elif up == UsdGeom.Tokens.z: return 2 else: # UsdGeom.Tokens.x: illegal return 0 @property def timecode(self) -> Usd.TimeCode: stage = self.stage """ if stage.HasAuthoredTimeCodeRange(): -> wrong: a stage might not have timeCodes authored, but its references may have. Using Usd.TimeCode.Default() in xform_cache.GetLocalTransformation(prim) won't fetch correct matrices for time_coded prims """ time = omni.timeline.get_timeline_interface().get_current_time() ret = Usd.TimeCode(omni.usd.get_frame_time_code(time, stage.GetTimeCodesPerSecond())) # or ret = Usd.TimeCode( time * stage.GetTimeCodesPerSecond() ) return ret def add_stage_event_fn(self, fn, event_type=-1): """ Doesn't depend on open stage and remains after closing-opening. Arg event_type = -1 to accept all, otherwise a single event of type omni.usd.StageEventType.*: (@Kit103) 0=SAVED 1=SAVE_FAILED 2=OPENING 3=OPENED 4=OPEN_FAILED 5=CLOSING 6=CLOSED 7=SELECTION_CHANGED 8=ASSETS_LOADED 9=ASSETS_LOAD_ABORTED 10=GIZMO_TRACKING_CHANGED 11=MDL_PARAM_LOADED 12=SETTINGS_LOADED 13=SETTINGS_SAVING 14=OMNIGRAPH_START_PLAY 15=OMNIGRAPH_STOP_PLAY 16=SIMULATION_START_PLAY 17=SIMULATION_STOP_PLAY 18=ANIMATION_START_PLAY 19=ANIMATION_STOP_PLAY 20=DIRTY_STATE_CHANGED """ event_type = int(event_type) if event_type not in self._stage_changed: sub = self._sub_stage_event(event_type) self._stage_changed[event_type] = [sub, set()] ch = self._stage_changed[event_type] ch[1].add(fn) def _sub_stage_event(self, event_type): sub_name = UsdHelper.STAGE_CHANGED_SUB_PREFIX + str(event_type) lamb = lambda ev: self._on_stage_event(ev, event_type) if event_type == -1: sub = self._ctx.get_stage_event_stream().create_subscription_to_pop(lamb, name=sub_name) else: sub = self._ctx.get_stage_event_stream().create_subscription_to_pop_by_type(event_type, lamb, name=sub_name) return sub def _on_stage_event(self, ev, target_event_type): # print("_on_stage_event", ev.type, target_event_type) if target_event_type in self._stage_changed: for fn in self._stage_changed[target_event_type][1]: fn(ev) def remove_stage_event_fn(self, fn, event_type=-1): """ Don't call from fn or will get: RuntimeError: Set changed size during iteration """ if event_type in self._stage_changed: ch = self._stage_changed[event_type] ch[1].discard(fn) def _on_stage_opened_refresh(self, ev): # print("_on_stage_opened_refresh", ev.type) def resub(): if self._stage_opened_refresh & 1: # print("resub _stage_changed") for event_type in self._stage_changed: ch = self._stage_changed[event_type] ch[0] = self._sub_stage_event(event_type) if self._stage_opened_refresh & 2 and self._stage_objects_changed is not None: # print("resub _stage_objects_changed") self._stage_objects_changed[0] = self._sub_stage_objects_changed() call_after_update(resub) def add_stage_objects_changed_fn(self, fn): # print("add_stage_objects_changed_fn") """ Depends on stage: if closed must call remove_stage_objects_changed_fn(), then on stage opened call add_stage_objects_changed_fn again. From https://graphics.pixar.com/usd/dev/api/class_usd_notice_1_1_objects_changed.html: Usd.Notice.ObjectsChanged: Object changes, either "resync" or "changed-info". "Resyncs" are potentially structural changes that invalidate entire subtrees of UsdObjects (including prims and properties). For example, if the path "/foo" is resynced, then all subpaths like "/foo/bar" and "/foo/bar.baz" may be arbitrarily changed. When a prim is resynced, say "/foo/bar", it might have been created or destroyed. In that case "/foo"'s list of children will have changed, but we do not consider "/foo" to be resynced. If we did, it would mean clients would have to consider all of "/foo/bar"'s siblings (and their descendants) to be resynced which might be egregious overinvalidation. In contrast, "changed-info" means that a nonstructural change has occurred, like an attribute value change or a value change to a metadata field not related to composition. This notice provides API for two client use-cases. Clients interested in testing whether specific objects are affected by the changes should use the AffectedObject() method (and the ResyncedObject() and ChangedInfoOnly() methods). Clients that wish to reason about all changes as a whole should use the GetResyncedPaths() and GetChangedInfoOnlyPaths() methods. fn(notice: Tf.notice) can call notice.GetChangedInfoOnlyPaths() """ if self._stage_objects_changed is None: # handler needs to be a method as Register won't hold reference to a local function listener = self._sub_stage_objects_changed() self._stage_objects_changed = [listener, set()] val = self._stage_objects_changed val[1].add(fn) # print("add") def _sub_stage_objects_changed(self): return Tf.Notice.Register(Usd.Notice.ObjectsChanged, self._on_stage_objects_changed, self.stage) def _on_stage_objects_changed(self, notice, stage): if stage != self.stage or self._stage_objects_changed is None: return for fn in self._stage_objects_changed[1]: fn(notice) def remove_stage_objects_changed_fn(self, fn): # print("remove_stage_objects_changed_fn") if self._stage_objects_changed is not None: val = self._stage_objects_changed val[1].discard(fn) # print("discard") def get_selected_prim_paths(self): sel = self.get_selection() return sel.get_selected_prim_paths() def set_selected_prim_paths(self, paths, expand_in_stage=False): sel = self.get_selection() sel.set_selected_prim_paths(paths, expand_in_stage) def get_selection(self): return self._ctx.get_selection() def set_pickable(self, enabled, prim_path="/"): """If disabled, Kit will still display selection rects but nothing will be selected.""" self._ctx.set_pickable(prim_path, enabled) """ Timeline events stream = omni.timeline.get_timeline_interface().get_timeline_event_stream() self._timeline_sub = stream.create_subscription_to_pop(self._on_timeline_event) 0=PLAY 1=PAUSE 2=STOP 3=CURRENT_TIME_CHANGED 4=CURRENT_TIME_TICKED 5=LOOP_MODE_CHANGED 6=START_TIME_CHANGED 7=END_TIME_CHANGED 8=TIME_CODE_PER_SECOND_CHANGED 9=AUTO_UPDATE_CHANGED 10=PREROLLING_CHANGED """
10,223
Python
28.80758
360
0.60002
NVIDIA-Omniverse/OmniIsaacGymEnvs/omniisaacgymenvs/scripts/random_policy.py
# Copyright (c) 2018-2022, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import gym import hydra from omegaconf import DictConfig import os import time import numpy as np import torch import omniisaacgymenvs from omniisaacgymenvs.envs.vec_env_rlgames import VecEnvRLGames from omniisaacgymenvs.utils.config_utils.path_utils import get_experience from omniisaacgymenvs.utils.hydra_cfg.hydra_utils import * from omniisaacgymenvs.utils.hydra_cfg.reformat import omegaconf_to_dict, print_dict from omniisaacgymenvs.utils.task_util import initialize_task @hydra.main(version_base=None, config_name="config", config_path="../cfg") def parse_hydra_configs(cfg: DictConfig): cfg_dict = omegaconf_to_dict(cfg) print_dict(cfg_dict) headless = cfg.headless render = not headless enable_viewport = "enable_cameras" in cfg.task.sim and cfg.task.sim.enable_cameras # select kit app file experience = get_experience(headless, cfg.enable_livestream, enable_viewport, cfg.enable_recording, cfg.kit_app) env = VecEnvRLGames( headless=headless, sim_device=cfg.device_id, enable_livestream=cfg.enable_livestream, enable_viewport=enable_viewport or cfg.enable_recording, experience=experience ) # parse experiment directory module_path = os.path.abspath(os.path.join(os.path.dirname(omniisaacgymenvs.__file__))) experiment_dir = os.path.join(module_path, "runs", cfg.train.params.config.name) # use gym RecordVideo wrapper for viewport recording if cfg.enable_recording: if cfg.recording_dir == '': videos_dir = os.path.join(experiment_dir, "videos") else: videos_dir = cfg.recording_dir video_interval = lambda step: step % cfg.recording_interval == 0 video_length = cfg.recording_length env.is_vector_env = True if env.metadata is None: env.metadata = {"render_modes": ["rgb_array"], "render_fps": cfg.recording_fps} else: env.metadata["render_modes"] = ["rgb_array"] env.metadata["render_fps"] = cfg.recording_fps env = gym.wrappers.RecordVideo( env, video_folder=videos_dir, step_trigger=video_interval, video_length=video_length ) # sets seed. if seed is -1 will pick a random one from omni.isaac.core.utils.torch.maths import set_seed cfg.seed = set_seed(cfg.seed, torch_deterministic=cfg.torch_deterministic) cfg_dict["seed"] = cfg.seed task = initialize_task(cfg_dict, env) num_frames = 0 first_frame = True prev_time = time.time() while env.simulation_app.is_running(): if env.world.is_playing(): if first_frame: env.reset() prev_time = time.time() first_frame = False # get upper and lower bounds of action space, sample actions randomly on this interval action_high = env.action_space.high[0] action_low = env.action_space.low[0] actions = (action_high - action_low) * torch.rand(env.num_envs, env.action_space.shape[0], device=task.rl_device) - action_high if time.time() - prev_time >= 1: print("FPS:", num_frames, "FPS * num_envs:", env.num_envs * num_frames) num_frames = 0 prev_time = time.time() else: num_frames += 1 env.step(actions) else: env.world.step(render=render) env.simulation_app.close() if __name__ == "__main__": parse_hydra_configs()
5,069
Python
38.92126
139
0.688301
NVIDIA-Omniverse/Blender-Addon-OmniPanel/omni_panel/__init__.py
# ##### BEGIN GPL LICENSE BLOCK ##### # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ##### END GPL LICENSE BLOCK ##### # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. bl_info = { "name": "Omni Panel", "author": "NVIDIA Corporation", "version": (1, 0, 0), "blender": (3, 0, 0), "location": "View3D > Toolbar > Omniverse", "description": "Nvidia Omniverse bake materials for export to usd", "warning": "", "doc_url": "", "category": "Omniverse", } import bpy #Import classes from .material_bake.operators import (OBJECT_OT_omni_bake_mapbake, OBJECT_OT_omni_bake_bgbake_status, OBJECT_OT_omni_bake_bgbake_import, OBJECT_OT_omni_bake_bgbake_clear) from .ui import (OBJECT_PT_omni_bake_panel, OmniBakePreferences) from .particle_bake.operators import(MyProperties, PARTICLES_OT_omni_hair_bake) #Classes list for register #List of all classes that will be registered classes = ([OBJECT_OT_omni_bake_mapbake, OBJECT_PT_omni_bake_panel, OmniBakePreferences, OBJECT_OT_omni_bake_bgbake_status, OBJECT_OT_omni_bake_bgbake_import, OBJECT_OT_omni_bake_bgbake_clear, MyProperties, PARTICLES_OT_omni_hair_bake]) def ShowMessageBox(message = "", title = "Message Box", icon = 'INFO'): def draw(self, context): self.layout.label(text=message) bpy.context.window_manager.popup_menu(draw, title = title, icon = icon) #---------------------UPDATE FUNCTIONS-------------------------------------------- def prepmesh_update(self, context): if context.scene.prepmesh == False: context.scene.hidesourceobjects = False else: context.scene.hidesourceobjects = True def texture_res_update(self, context): if context.scene.texture_res == "0.5k": context.scene.imgheight = 1024/2 context.scene.imgwidth = 1024/2 context.scene.render.bake.margin = 6 elif context.scene.texture_res == "1k": context.scene.imgheight = 1024 context.scene.imgwidth = 1024 context.scene.render.bake.margin = 10 elif context.scene.texture_res == "2k": context.scene.imgheight = 1024*2 context.scene.imgwidth = 1024*2 context.scene.render.bake.margin = 14 elif context.scene.texture_res == "4k": context.scene.imgheight = 1024*4 context.scene.imgwidth = 1024*4 context.scene.render.bake.margin = 20 elif context.scene.texture_res == "8k": context.scene.imgheight = 1024*8 context.scene.imgwidth = 1024*8 context.scene.render.bake.margin = 32 def newUVoption_update(self, context): if bpy.context.scene.newUVoption == True: bpy.context.scene.prefer_existing_sbmap = False def all_maps_update(self,context): bpy.context.scene.selected_col = True bpy.context.scene.selected_metal = True bpy.context.scene.selected_rough = True bpy.context.scene.selected_normal = True bpy.context.scene.selected_trans = True bpy.context.scene.selected_transrough = True bpy.context.scene.selected_emission = True bpy.context.scene.selected_specular = True bpy.context.scene.selected_alpha = True bpy.context.scene.selected_sss = True bpy.context.scene.selected_ssscol = True #-------------------END UPDATE FUNCTIONS---------------------------------------------- def register(): #Register classes global classes for cls in classes: bpy.utils.register_class(cls) global bl_info version = bl_info["version"] version = str(version[0]) + str(version[1]) + str(version[2]) OBJECT_PT_omni_bake_panel.version = f"{str(version[0])}.{str(version[1])}.{str(version[2])}" #Global variables des = "Texture Resolution" bpy.types.Scene.texture_res = bpy.props.EnumProperty(name="Texture Resolution", default="1k", description=des, items=[ ("0.5k", "0.5k", f"Texture Resolution of {1024/2} x {1024/2}"), ("1k", "1k", f"Texture Resolution of 1024 x 1024"), ("2k", "2k", f"Texture Resolution of {1024*2} x {1024*2}"), ("4k", "4k", f"Texture Resolution of {1024*4} x {1024*4}"), ("8k", "8k", f"Texture Resolution of {1024*8} x {1024*8}") ], update = texture_res_update) des = "Distance to cast rays from target object to selected object(s)" bpy.types.Scene.ray_distance = bpy.props.FloatProperty(name="Ray Distance", default = 0.2, description=des) bpy.types.Scene.ray_warning_given = bpy.props.BoolProperty(default = False) #--- MAPS ----------------------- des = "Bake all maps (Diffuse, Metal, SSS, SSS Col. Roughness, Normal, Transmission, Transmission Roughness, Emission, Specular, Alpha, Displacement)" bpy.types.Scene.all_maps = bpy.props.BoolProperty(name="Bake All Maps", default = True, description=des, update = all_maps_update) des = "Bake a PBR Colour map" bpy.types.Scene.selected_col = bpy.props.BoolProperty(name="Diffuse", default = True, description=des) des = "Bake a PBR Metalness map" bpy.types.Scene.selected_metal = bpy.props.BoolProperty(name="Metal", description=des, default= True) des = "Bake a PBR Roughness or Glossy map" bpy.types.Scene.selected_rough = bpy.props.BoolProperty(name="Roughness", description=des, default= True) des = "Bake a Normal map" bpy.types.Scene.selected_normal = bpy.props.BoolProperty(name="Normal", description=des, default= True) des = "Bake a PBR Transmission map" bpy.types.Scene.selected_trans = bpy.props.BoolProperty(name="Transmission", description=des, default= True) des = "Bake a PBR Transmission Roughness map" bpy.types.Scene.selected_transrough = bpy.props.BoolProperty(name="TR Rough", description=des, default= True) des = "Bake an Emission map" bpy.types.Scene.selected_emission = bpy.props.BoolProperty(name="Emission", description=des, default= True) des = "Bake a Subsurface map" bpy.types.Scene.selected_sss = bpy.props.BoolProperty(name="SSS", description=des, default= True) des = "Bake a Subsurface colour map" bpy.types.Scene.selected_ssscol = bpy.props.BoolProperty(name="SSS Col", description=des, default= True) des = "Bake a Specular/Reflection map" bpy.types.Scene.selected_specular = bpy.props.BoolProperty(name="Specular", description=des, default= True) des = "Bake a PBR Alpha map" bpy.types.Scene.selected_alpha = bpy.props.BoolProperty(name="Alpha", description=des, default= True) #------------------------------------------UVs----------------------------------------- des = "Use Smart UV Project to create a new UV map for your objects (or target object if baking to a target). See Blender Market FAQs for more details" bpy.types.Scene.newUVoption = bpy.props.BoolProperty(name="New UV(s)", description=des, update=newUVoption_update, default= False) des = "If one exists for the object being baked, use any existing UV maps called 'OmniBake' for baking (rather than the active UV map)" bpy.types.Scene.prefer_existing_sbmap = bpy.props.BoolProperty(name="Prefer existing UV maps called OmniBake", description=des) des = "New UV Method" bpy.types.Scene.newUVmethod = bpy.props.EnumProperty(name="New UV Method", default="SmartUVProject_Individual", description=des, items=[ ("SmartUVProject_Individual", "Smart UV Project (Individual)", "Each object gets a new UV map using Smart UV Project")]) des = "Margin between islands to use for Smart UV Project" bpy.types.Scene.unwrapmargin = bpy.props.FloatProperty(name="Margin", default=0.03, description=des) des = "Bake to normal UVs" bpy.types.Scene.uv_mode = bpy.props.EnumProperty(name="UV Mode", default="normal", description=des, items=[ ("normal", "Normal", "Normal UV maps")]) #--------------------------------Prep/CleanUp---------------------------------- des = "Create a copy of your selected objects in Blender (or target object if baking to a target) and apply the baked textures to it. If you are baking in the background, this happens after you import" bpy.types.Scene.prepmesh = bpy.props.BoolProperty(name="Copy objects and apply bakes", default = True, description=des, update=prepmesh_update) des = "Hide the source object that you baked from in the viewport after baking. If you are baking in the background, this happens after you import" bpy.types.Scene.hidesourceobjects = bpy.props.BoolProperty(name="Hide source objects after bake", default = True, description=des) des = "Set the height of the baked image that will be produced" bpy.types.Scene.imgheight = bpy.props.IntProperty(name="Height", default=1024, description=des) des = "Set the width of the baked image that will be produced" bpy.types.Scene.imgwidth = bpy.props.IntProperty(name="Width", default=1024, description=des) des="Name to apply to these bakes (is incorporated into the bakes file name, provided you have included this in the image format string - see addon preferences). NOTE: To maintain compatibility, only MS Windows acceptable characters will be used" bpy.types.Scene.batchName = bpy.props.StringProperty(name="Batch name", description=des, default="Bake1", maxlen=20) #---------------------Where To Bake?------------------------------------------- bpy.types.Scene.bgbake = bpy.props.EnumProperty(name="Background Bake", default="fg", items=[ ("fg", "Foreground", "Perform baking in the foreground. Blender will lock up until baking is complete"), ("bg", "Background", "Perform baking in the background, leaving you free to continue to work in Blender while the baking is being carried out") ]) #---------------------Filehanding & Particles------------------------------------------ bpy.types.Scene.particle_options = bpy.props.PointerProperty(type= MyProperties) #-------------------Additional Shaders------------------------------------------- des = "Allows for use of Add, Diffuse, Glossy, Glass, Refraction, Transparent, Anisotropic Shaders. May cause inconsistent results" bpy.types.Scene.more_shaders = bpy.props.BoolProperty(name="Use Additional Shader Types", default=False, description=des) def unregister(): #User preferences global classes for cls in classes: bpy.utils.unregister_class(cls) del bpy.types.Scene.particle_options del bpy.types.Scene.more_shaders del bpy.types.Scene.newUVoption del bpy.types.Scene.prepmesh del bpy.types.Scene.unwrapmargin del bpy.types.Scene.texture_res del bpy.types.Scene.hidesourceobjects del bpy.types.Scene.batchName del bpy.types.Scene.bgbake del bpy.types.Scene.imgheight del bpy.types.Scene.imgwidth
11,397
Python
47.918455
250
0.675529
NVIDIA-Omniverse/Blender-Addon-OmniPanel/omni_panel/ui.py
# ##### BEGIN GPL LICENSE BLOCK ##### # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ##### END GPL LICENSE BLOCK ##### # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. import bpy from .particle_bake.operators import* from .material_bake.background_bake import bgbake_ops from os.path import join, dirname import bpy.utils.previews #---------------Custom ICONs---------------------- def get_icons_directory(): icons_directory = join(dirname(__file__), "icons") return icons_directory #------------------------PANEL--------------------- class OBJECT_PT_omni_bake_panel(bpy.types.Panel): bl_space_type = 'VIEW_3D' bl_region_type = 'UI' bl_category = "Omniverse" bl_label = "NVIDIA OMNIVERSE" version = "0.0.0" #retrieve icons icons = bpy.utils.previews.new() icons_directory = get_icons_directory() icons.load("OMNIBLEND", join(icons_directory, "BlenderOMNI.png"), 'IMAGE') icons.load("OMNI", join(icons_directory, "ICON.png"), 'IMAGE') icons.load("BAKE",join(icons_directory, "Oven.png"), 'IMAGE') #draw the panel def draw(self, context): layout = self.layout #--------File Handling------------------- layout.label(text="Omniverse", icon_value=self.icons["OMNI"].icon_id) impExpCol = self.layout.column(align=True) impExpCol.label(text= "File Handling", icon='FILEBROWSER') impExpCol.operator('wm.usd_import', text='Import USD', icon='IMPORT') impExpCol.operator('wm.usd_export', text='Export USD', icon='EXPORT') #--------Particle Collection Instancing------------------- layout.separator() particleOptions = context.scene.particle_options particleCol = self.layout.column(align=True) particleCol.label(text = "Omni Particles", icon='PARTICLES') box = particleCol.box() column= box.column(align= True) column.prop(particleOptions, "deletePSystemAfterBake") row = column.row() row.prop(particleOptions, "animateData") if particleOptions.animateData: row = column.row(align=True) row.prop(particleOptions, "selectedStartFrame") row.prop(particleOptions, "selectedEndFrame") row = column.row() row.enabled = False row.label(text="Increased Calculation Time", icon= 'ERROR') row = column.row() row.scale_y = 1.5 row.operator('omni.hair_bake', text='Convert', icon='MOD_PARTICLE_INSTANCE') #Does not update while running. Set in "particle_bake.operators.py" # row = column.row() # row.scale_y = 1.2 # row.prop(particleOptions, "progressBar") #--------PBR Bake Settings------------------- layout.separator() column = layout.column(align= True) header = column.row() header.label(text = "Material Bake", icon = 'UV_DATA') box = column.box() row = box.row() if context.scene.all_maps == True: row.prop(context.scene, "all_maps", icon = 'CHECKBOX_HLT') if context.scene.all_maps == False: row.prop(context.scene, "all_maps", icon = 'CHECKBOX_DEHLT') column = box.column(align= True) row = column.row() row.prop(context.scene, "selected_col") row.prop(context.scene, "selected_metal") row = column.row() row.prop(context.scene, "selected_sss") row.prop(context.scene, "selected_ssscol") row = column.row() row.prop(context.scene, "selected_rough") row.prop(context.scene, "selected_normal") row = column.row() row.prop(context.scene, "selected_trans") row.prop(context.scene, "selected_transrough") row = column.row() row.prop(context.scene, "selected_emission") row.prop(context.scene, "selected_specular") row = column.row() row.prop(context.scene, "selected_alpha") row = column.row() colm = box.column(align=True) colm.prop(context.scene, "more_shaders") row = colm.row() row.enabled = False if context.scene.more_shaders: row.label(text="Inconsistent Results", icon= 'ERROR') #--------Texture Settings------------------- row = box.row() row.label(text="Texture Resolution:") row.scale_y = 0.5 row = box.row() row.prop(context.scene, "texture_res", expand=True) row.scale_y = 1 if context.scene.texture_res == "8k" or context.scene.texture_res == "4k": row = box.row() row.enabled = False row.label(text="Long Bake Times", icon= 'ERROR') #--------UV Settings------------------- column = box.column(align = True) row = column.row() row.prop(context.scene, "newUVoption") row.prop(context.scene, "unwrapmargin") #--------Other Settings------------------- column= box.column(align=True) row = column.row() if bpy.context.scene.bgbake == "fg": text = "Copy objects and apply bakes" else: text = "Copy objects and apply bakes (after import)" row.prop(context.scene, "prepmesh", text=text) if (context.scene.prepmesh == True): if bpy.context.scene.bgbake == "fg": text = "Hide source objects after bake" else: text = "Hide source objects after bake (after import)" row = column.row() row.prop(context.scene, "hidesourceobjects", text=text) #-------------Buttons------------------------- row = box.row() row.scale_y = 1.5 row.operator("object.omni_bake_mapbake", icon_value=self.icons["BAKE"].icon_id) row = column.row() row.scale_y = 1 row.prop(context.scene, "bgbake", expand=True) if context.scene.bgbake == "bg": row = column.row(align= True) # - BG status button col = row.column() if len(bgbake_ops.bgops_list) == 0: enable = False icon = "TIME" else: enable = True icon = "TIME" col.operator("object.omni_bake_bgbake_status", text="", icon=icon) col.enabled = enable # - BG import button col = row.column() if len(bgbake_ops.bgops_list_finished) != 0: enable = True icon = "IMPORT" else: enable = False icon = "IMPORT" col.operator("object.omni_bake_bgbake_import", text="", icon=icon) col.enabled = enable #BG erase button col = row.column() if len(bgbake_ops.bgops_list_finished) != 0: enable = True icon = "TRASH" else: enable = False icon = "TRASH" col.operator("object.omni_bake_bgbake_clear", text="", icon=icon) col.enabled = enable row.alignment = 'CENTER' row.label(text=f"Running {len(bgbake_ops.bgops_list)} | Finished {len(bgbake_ops.bgops_list_finished)}") #-------------Other material options------------------------- if len(bpy.context.selected_objects) != 0 and bpy.context.active_object != None: if bpy.context.active_object.select_get() and bpy.context.active_object.type == "MESH": layout.separator() column= layout.column(align= True) column.label(text= "Convert Material to:", icon= 'SHADING_RENDERED') box = column.box() materialCol = box.column(align=True) materialCol.operator('universalmaterialmap.create_template_omnipbr', text='OmniPBR') materialCol.operator('universalmaterialmap.create_template_omniglass', text='OmniGlass') class OmniBakePreferences(bpy.types.AddonPreferences): # this must match the add-on name, use '__package__' # when defining this in a submodule of a python package. bl_idname = __package__ img_name_format: bpy.props.StringProperty(name="Image format string", default="%OBJ%_%BATCH%_%BAKEMODE%_%BAKETYPE%") #Aliases diffuse_alias: bpy.props.StringProperty(name="Diffuse", default="diffuse") metal_alias: bpy.props.StringProperty(name="Metal", default="metalness") roughness_alias: bpy.props.StringProperty(name="Roughness", default="roughness") glossy_alias: bpy.props.StringProperty(name="Glossy", default="glossy") normal_alias: bpy.props.StringProperty(name="Normal", default="normal") transmission_alias: bpy.props.StringProperty(name="Transmission", default="transparency") transmissionrough_alias: bpy.props.StringProperty(name="Transmission Roughness", default="transparencyroughness") clearcoat_alias: bpy.props.StringProperty(name="Clearcost", default="clearcoat") clearcoatrough_alias: bpy.props.StringProperty(name="Clearcoat Roughness", default="clearcoatroughness") emission_alias: bpy.props.StringProperty(name="Emission", default="emission") specular_alias: bpy.props.StringProperty(name="Specular", default="specular") alpha_alias: bpy.props.StringProperty(name="Alpha", default="alpha") sss_alias: bpy.props.StringProperty(name="SSS", default="sss") ssscol_alias: bpy.props.StringProperty(name="SSS Colour", default="ssscol") @classmethod def reset_img_string(self): prefs = bpy.context.preferences.addons[__package__].preferences prefs.property_unset("img_name_format") bpy.ops.wm.save_userpref()
10,922
Python
37.192308
117
0.568211
NVIDIA-Omniverse/Blender-Addon-OmniPanel/omni_panel/particle_bake/__init__.py
# ##### BEGIN GPL LICENSE BLOCK ##### # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ##### END GPL LICENSE BLOCK ##### # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
858
Python
44.210524
74
0.7331
NVIDIA-Omniverse/Blender-Addon-OmniPanel/omni_panel/particle_bake/operators.py
# ##### BEGIN GPL LICENSE BLOCK ##### # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ##### END GPL LICENSE BLOCK ##### # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. import time import bpy import numpy as np class MyProperties(bpy.types.PropertyGroup): deletePSystemAfterBake: bpy.props.BoolProperty( name = "Delete PS after converting", description = "Delete selected particle system after conversion", default = False ) progressBar: bpy.props.StringProperty( name = "Progress", description = "Progress of Particle Conversion", default = "RUNNING" ) animateData: bpy.props.BoolProperty( name = "Keyframe Animation", description = "Add a keyframe for each particle for each of the specified frames", default = False ) selectedStartFrame: bpy.props.IntProperty( name = "Start", description = "Frame to begin keyframes", default = 1 ) selectedEndFrame: bpy.props.IntProperty( name = "End", description = "Frame to stop keyframes", default = 3 ) # def fixEndFrame(): # particleOptions = context.particle_options # particleOptions.selectedEndFrame = particleOptions.selectedStartFrame particleSystemVisibility = [] particleSystemRender = [] def getOriginalModifiers(parent): particleSystemVisibility.clear() particleSystemRender.clear() for mod in parent.modifiers: if mod.type == 'PARTICLE_SYSTEM': particleSystemVisibility.append(mod.show_viewport) particleSystemRender.append(mod.show_render) def restoreOriginalModifiers(parent): count = 0 for mod in parent.modifiers: if mod.type == 'PARTICLE_SYSTEM': mod.show_viewport = particleSystemVisibility[count] mod.show_render = particleSystemRender[count] count+=1 def hideOtherModifiers(parent, countH): count = 0 for mod in parent.modifiers: if mod.type == 'PARTICLE_SYSTEM': if countH != count: mod.show_viewport = False count += 1 def particleSystemVisible(parent, countP): countS = 0 for mod in parent.modifiers: if mod.type == 'PARTICLE_SYSTEM': if countP == countS: return mod.show_viewport else: countS += 1 # Omni Hair Bake class PARTICLES_OT_omni_hair_bake(bpy.types.Operator): """Convert blender particles for Omni scene instancing""" bl_idname = "omni.hair_bake" bl_label = "Omni Hair Bake" bl_options = {'REGISTER', 'UNDO'} # create undo state def execute(self, context): particleOptions = context.scene.particle_options startTime= time.time() print() print("____BEGINING PARTICLE CONVERSION______") #Deselect Non-meshes for obj in bpy.context.selected_objects: if obj.type != "MESH": obj.select_set(False) print("not mesh") #Do we still have an active object? if bpy.context.active_object == None: #Pick arbitary bpy.context.view_layer.objects.active = bpy.context.selected_objects[0] for parentObj in bpy.context.selected_objects: print() print("--Staring " + parentObj.name + ":") getOriginalModifiers(parentObj) countH = 0 countP = 0 countPS = 0 showEmmiter = False hasPS = False for currentPS in parentObj.particle_systems: hideOtherModifiers(parentObj, countH) countH+=1 hasVisible = particleSystemVisible(parentObj, countP) countP+=1 if currentPS != None and hasVisible: hasPS = True bpy.ops.object.select_all(action='DESELECT') renderType = currentPS.settings.render_type emmitOrHair = currentPS.settings.type if parentObj.show_instancer_for_viewport == True: showEmmiter = True if renderType == 'OBJECT' or renderType == 'COLLECTION': count = 0 listInst = [] listInstScale = [] # For Object Instances if renderType == 'OBJECT': instObj = currentPS.settings.instance_object # Duplicate Instanced Object dupInst = instObj.copy() bpy.context.collection.objects.link(dupInst) dupInst.select_set(True) dupInst.location = (0,0,0) bpy.ops.object.move_to_collection(collection_index=0, is_new=True, new_collection_name="INST_"+str(dupInst.name)) dupInst.select_set(False) count += 1 listInst.append(dupInst) listInstScale.append(instObj.scale) # For Collection Instances if renderType == 'COLLECTION': instCol = currentPS.settings.instance_collection.objects countW = 0 weight = 1 for obj in instCol: # Duplicate Instanced Object dupInst = obj.copy() bpy.context.collection.objects.link(dupInst) dupInst.select_set(True) dupInst.location = (0,0,0) bpy.ops.object.move_to_collection(collection_index=0, is_new=True, new_collection_name="INST_"+str(dupInst.name)) dupInst.select_set(False) if parentObj.particle_systems.active.settings.use_collection_count: weight = currentPS.settings.instance_weights[countW].count print("Instance Count: " + str(weight)) for i in range(weight): count += 1 listInst.append(dupInst) listInstScale.append(obj.scale) countW += 1 # For Path Instances *NOT SUPPORTED if renderType == 'PATH': print("path no good") return {'FINISHED'} if renderType == 'NONE': print("no instances") return {'FINISHED'} #DOES NOTHING RIGHT NOW #if overwriteExsisting: #bpy.ops.outliner.delete(hierarchy=True) # Variables parentObj.select_set(True) parentCollection = parentObj.users_collection[0] nameP = parentObj.particle_systems[countPS].name # get name of object's particle system # Create Empty as child o = bpy.data.objects.new( "empty", None) o.name = "EM_" + nameP o.parent = parentObj parentCollection.objects.link( o ) # FOR ANIMATED EMITTER DATA if particleOptions.animateData and emmitOrHair == 'EMITTER': print("--ANIMATED EMITTER--") #Prep for Keyframing collectionInstances = [] # Calculate Dependency Graph degp = bpy.context.evaluated_depsgraph_get() # Evaluate the depsgraph (Important step) particle_systems = parentObj.evaluated_get(degp).particle_systems # All particles of selected particle system activePS = particle_systems[countPS] particles = activePS.particles # Total Particles totalParticles = len(particles) #Currently does NOT work # if activePS.type == 'HAIR': # hairLength = particles[0].hair_length # print(hairLength) # print(bpy.types.ParticleHairKey.co_object(parentObj,parentObj.modifiers[0], particles[0])) # key = particles[0].hair_keys # print(key) # coo = key.co # print(coo) # print(particles[0].location) #Beginings of supporting use random, requires more thought # obInsttt = parentObj.evaluated_get(degp).object_instances # for i in obInsttt: # obj = i.object # print(obj.name) # for obj in degp.object_instances: # print(obj.instance_object) # print(obj.particle_system) # Handle instances for construction of scene collections **Fast** for i in range(totalParticles): childObj = particles[i] calculateChild = False if childObj.birth_time <= particleOptions.selectedEndFrame and childObj.die_time > particleOptions.selectedStartFrame: calculateChild = True if calculateChild: modInst = i % count #Works for "use count" but not "pick random" dupColName = str(listInst[modInst].users_collection[0].name) #Create Collection Instance source_collection = bpy.data.collections[dupColName] instance_obj = bpy.data.objects.new( name= "Inst_" + listInst[modInst].name + "." + str(i), object_data=None ) instance_obj.empty_display_type = 'SINGLE_ARROW' instance_obj.empty_display_size = .1 instance_obj.instance_collection = source_collection instance_obj.instance_type = 'COLLECTION' parentCollection.objects.link(instance_obj) instance_obj.parent = o instance_obj.matrix_parent_inverse = o.matrix_world.inverted() collectionInstances.append(instance_obj) print("Using " + str(len(collectionInstances))) print("Out of " + str(totalParticles) + " instances") collectionCount = len(collectionInstances) startFrame = particleOptions.selectedStartFrame endFrame = particleOptions.selectedEndFrame #Do we need to swap start and end frame? if particleOptions.selectedStartFrame > particleOptions.selectedEndFrame: endFrame = startFrame startFrame = particleOptions.selectedEndFrame for frame in range(startFrame, endFrame + 1): print("frame = " + str(frame)) bpy.context.scene.frame_current = frame # Calculate Dependency Graph for each frame degp = bpy.context.evaluated_depsgraph_get() particle_systems = parentObj.evaluated_get(degp).particle_systems particles = particle_systems[countPS].particles for i in range(collectionCount): activeCol = collectionInstances[i] activeDup = particles[i] #Keyframe Visibility, Scale, Location, and Rotation if activeDup.alive_state == 'UNBORN' or activeDup.alive_state == 'DEAD': activeCol.scale = (0,0,0) activeCol.keyframe_insert(data_path='scale') activeCol.hide_viewport = True activeCol.hide_render = True activeCol.keyframe_insert("hide_viewport") activeCol.keyframe_insert("hide_render") else: activeCol.hide_viewport = False activeCol.hide_render = False scale = activeDup.size activeCol.location = activeDup.location activeCol.rotation_mode = 'QUATERNION' activeCol.rotation_quaternion = activeDup.rotation activeCol.rotation_mode = 'XYZ' activeCol.scale = (scale, scale, scale) activeCol.keyframe_insert(data_path='location') activeCol.keyframe_insert(data_path='rotation_euler') activeCol.keyframe_insert(data_path='scale') activeCol.keyframe_insert("hide_viewport") activeCol.keyframe_insert("hide_render") # FOR ANIMATED HAIR DATA elif particleOptions.animateData and emmitOrHair == 'HAIR': print("--ANIMATED HAIR--") #Prep for Keyframing bpy.ops.object.duplicates_make_real(use_base_parent=True, use_hierarchy=True) # bake particles dups = bpy.context.selected_objects lengthDups = len(dups) collectionInstances = [] # Handle instances for construction of scene collections **Fast** for i in range(lengthDups): childObj = dups.pop(0) modInst = i % count #Works for "use count" but not "pick random" dupColName = str(listInst[modInst].users_collection[0].name) #Create Collection Instance source_collection = bpy.data.collections[dupColName] instance_obj = bpy.data.objects.new( name= "Inst_" + childObj.name, object_data=None ) instance_obj.empty_display_type = 'SINGLE_ARROW' instance_obj.empty_display_size = .1 instance_obj.instance_collection = source_collection instance_obj.instance_type = 'COLLECTION' parentCollection.objects.link(instance_obj) instance_obj.parent = o bpy.data.objects.remove(childObj, do_unlink=True) collectionInstances.append(instance_obj) print(str(len(collectionInstances)) + " instances") collectionCount = len(collectionInstances) startFrame = particleOptions.selectedStartFrame endFrame = particleOptions.selectedEndFrame #Do we need to swap start and end frame? if particleOptions.selectedStartFrame > particleOptions.selectedEndFrame: endFrame = startFrame startFrame = particleOptions.selectedEndFrame for frame in range(startFrame, endFrame + 1): print("frame = " + str(frame)) bpy.context.scene.frame_current = frame # Calculate hairs for each frame parentObj.select_set(True) bpy.ops.object.duplicates_make_real(use_base_parent=True, use_hierarchy=True) # bake particles tempdups = bpy.context.selected_objects for i in range(collectionCount): activeDup = tempdups.pop(0) activeCol = collectionInstances[i] #Keyframe Scale, Location, and Rotation activeCol.location = activeDup.location activeCol.rotation_euler = activeDup.rotation_euler activeCol.scale = activeDup.scale activeCol.keyframe_insert(data_path='location') activeCol.keyframe_insert(data_path='rotation_euler') activeCol.keyframe_insert(data_path='scale') bpy.data.objects.remove(activeDup, do_unlink=True) # FOR SINGLE FRAME CONVERSION else: print("--SINGLE FRAME--") bpy.ops.object.duplicates_make_real(use_base_parent=True, use_hierarchy=True) # bake particles dups = bpy.context.selected_objects lengthDups = len(dups) # Handle instances for construction of scene collections **Fast** for i in range(lengthDups): childObj = dups.pop(0) modInst = i % count dupColName = str(listInst[modInst].users_collection[0].name) loc=childObj.location rot=childObj.rotation_euler newScale = np.divide(childObj.scale, listInstScale[modInst]) #Create Collection Instance source_collection = bpy.data.collections[dupColName] instance_obj = bpy.data.objects.new( name= "Inst_" + childObj.name, object_data=None ) instance_obj.empty_display_type = 'SINGLE_ARROW' instance_obj.empty_display_size = .1 instance_obj.instance_collection = source_collection instance_obj.instance_type = 'COLLECTION' instance_obj.location = loc instance_obj.rotation_euler = rot instance_obj.scale = newScale parentCollection.objects.link(instance_obj) instance_obj.parent = o bpy.data.objects.remove(childObj, do_unlink=True) for obj in listInst: bpy.context.view_layer.layer_collection.children[obj.users_collection[0].name].exclude = True #Make parent object active object again parentObj.select_set(True) bpy.context.view_layer.objects.active = parentObj else: print("Must be object or collection instance") else: print("Object has no active particle system") restoreOriginalModifiers(parentObj) countPS += 1 #Handle PS after converting if particleOptions.deletePSystemAfterBake: if showEmmiter == False and hasPS == True: bpy.context.active_object.hide_render = True bpy.context.active_object.hide_set(True) countI = 0 for ps in range(len(parentObj.particle_systems)): if particleSystemVisibility[ps] == True: parentObj.particle_systems.active_index = countI bpy.ops.object.particle_system_remove() else: countI+=1 else: countI = 0 for mod in parentObj.modifiers: if mod.type == 'PARTICLE_SYSTEM': mod.show_viewport = False if particleSystemVisibility[countI] == True: mod.show_render = False countI+=1 print ("My program took", time.time() - startTime, " seconds to run") # run time return {'FINISHED'}
23,439
Python
46.258064
150
0.462477
NVIDIA-Omniverse/Blender-Addon-OmniPanel/omni_panel/material_bake/material_setup.py
# ##### BEGIN GPL LICENSE BLOCK ##### # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ##### END GPL LICENSE BLOCK ##### # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. import bpy from . import functions from .data import MasterOperation def find_node_from_label(label, nodes): for node in nodes: if node.label == label: return node return False def find_isocket_from_identifier(idname, node): for inputsocket in node.inputs: if inputsocket.identifier == idname: return inputsocket return False def find_osocket_from_identifier(idname, node): for outputsocket in node.outputs: if outputsocket.identifier == idname: return outputsocket return False def make_link(f_node_label, f_node_ident, to_node_label, to_node_ident, nodetree): fromnode = find_node_from_label(f_node_label, nodetree.nodes) if(fromnode == False): return False fromsocket = find_osocket_from_identifier(f_node_ident, fromnode) tonode = find_node_from_label(to_node_label, nodetree.nodes) if(tonode == False): return False tosocket = find_isocket_from_identifier(to_node_ident, tonode) nodetree.links.new(fromsocket, tosocket) return True def wipe_labels(nodes): for node in nodes: node.label = "" def get_image_from_tag(thisbake, objname): current_bake_op = MasterOperation.current_bake_operation global_mode = current_bake_op.bake_mode objname = functions.untrunc_if_needed(objname) batch_name = bpy.context.scene.batchName result = [] result = [img for img in bpy.data.images if\ ("SB_objname" in img and img["SB_objname"] == objname) and\ ("SB_batch" in img and img["SB_batch"] == batch_name) and\ ("SB_globalmode" in img and img["SB_globalmode"] == global_mode) and\ ("SB_thisbake" in img and img["SB_thisbake"] == thisbake)\ ] if len(result) > 0: return result[0] functions.printmsg(f"ERROR: No image with matching tag ({thisbake}) found for object {objname}") return False def create_principled_setup(nodetree, obj): functions.printmsg("Creating principled material") nodes = nodetree.nodes obj_name = obj.name.replace("_OmniBake", "") obj.active_material.cycles.displacement_method = 'BOTH' #First we wipe out any existing nodes for node in nodes: nodes.remove(node) # Node Frame node = nodes.new("NodeFrame") node.location = (0,0) node.use_custom_color = True node.color = (0.149763, 0.214035, 0.0590617) #Now create the Principled BSDF pnode = nodes.new("ShaderNodeBsdfPrincipled") pnode.location = (-25, 335) pnode.label = "pnode" pnode.use_custom_color = True pnode.color = (0.3375297784805298, 0.4575316309928894, 0.08615386486053467) pnode.parent = nodes["Frame"] #And the output node node = nodes.new("ShaderNodeOutputMaterial") node.location = (500, 200) node.label = "monode" node.show_options = False node.parent = nodes["Frame"] #----------------------------------------------------------------- #Node Image texture types Types if(bpy.context.scene.selected_col): image = get_image_from_tag("diffuse", obj_name) node = nodes.new("ShaderNodeTexImage") node.hide = True node.location = (-500, 250) node.label = "col_tex" node.image = image node.parent = nodes["Frame"] if(bpy.context.scene.selected_sss): node = nodes.new("ShaderNodeTexImage") node.hide = True node.location = (-500, 210) node.label = "sss_tex" image = get_image_from_tag("sss", obj_name) node.image = image node.parent = nodes["Frame"] if(bpy.context.scene.selected_ssscol): node = nodes.new("ShaderNodeTexImage") node.hide = True node.location = (-500, 170) node.label = "ssscol_tex" image = get_image_from_tag("ssscol", obj_name) node.image = image node.parent = nodes["Frame"] if(bpy.context.scene.selected_metal): node = nodes.new("ShaderNodeTexImage") node.hide = True node.location = (-500, 130) node.label = "metal_tex" image = get_image_from_tag("metalness", obj_name) node.image = image node.parent = nodes["Frame"] if(bpy.context.scene.selected_specular): node = nodes.new("ShaderNodeTexImage") node.hide = True node.location = (-500, 90) node.label = "specular_tex" image = get_image_from_tag("specular", obj_name) node.image = image node.parent = nodes["Frame"] if(bpy.context.scene.selected_rough): node = nodes.new("ShaderNodeTexImage") node.hide = True node.location = (-500, 50) node.label = "roughness_tex" image = get_image_from_tag("roughness", obj_name) node.image = image node.parent = nodes["Frame"] if(bpy.context.scene.selected_trans): node = nodes.new("ShaderNodeTexImage") node.hide = True node.location = (-500, -90) node.label = "transmission_tex" image = get_image_from_tag("transparency", obj_name) node.image = image node.parent = nodes["Frame"] if(bpy.context.scene.selected_transrough): node = nodes.new("ShaderNodeTexImage") node.hide = True node.location = (-500, -130) node.label = "transmissionrough_tex" image = get_image_from_tag("transparencyroughness", obj_name) node.image = image node.parent = nodes["Frame"] if(bpy.context.scene.selected_emission): node = nodes.new("ShaderNodeTexImage") node.hide = True node.location = (-500, -170) node.label = "emission_tex" image = get_image_from_tag("emission", obj_name) node.image = image node.parent = nodes["Frame"] if(bpy.context.scene.selected_alpha): node = nodes.new("ShaderNodeTexImage") node.hide = True node.location = (-500, -210) node.label = "alpha_tex" image = get_image_from_tag("alpha", obj_name) node.image = image node.parent = nodes["Frame"] if(bpy.context.scene.selected_normal): node = nodes.new("ShaderNodeTexImage") node.hide = True node.location = (-500, -318.7) node.label = "normal_tex" image = get_image_from_tag("normal", obj_name) node.image = image node.parent = nodes["Frame"] #----------------------------------------------------------------- # Additional normal map node for normal socket if(bpy.context.scene.selected_normal): node = nodes.new("ShaderNodeNormalMap") node.location = (-220, -240) node.label = "normalmap" node.show_options = False node.parent = nodes["Frame"] #----------------------------------------------------------------- make_link("emission_tex", "Color", "pnode", "Emission", nodetree) make_link("col_tex", "Color", "pnode", "Base Color", nodetree) make_link("metal_tex", "Color", "pnode", "Metallic", nodetree) make_link("roughness_tex", "Color", "pnode", "Roughness", nodetree) make_link("transmission_tex", "Color", "pnode", "Transmission", nodetree) make_link("transmissionrough_tex", "Color", "pnode", "Transmission Roughness", nodetree) make_link("normal_tex", "Color", "normalmap", "Color", nodetree) make_link("normalmap", "Normal", "pnode", "Normal", nodetree) make_link("specular_tex", "Color", "pnode", "Specular", nodetree) make_link("alpha_tex", "Color", "pnode", "Alpha", nodetree) make_link("sss_tex", "Color", "pnode", "Subsurface", nodetree) make_link("ssscol_tex", "Color", "pnode", "Subsurface Color", nodetree) make_link("pnode", "BSDF", "monode", "Surface", nodetree) #--------------------------------------------------- wipe_labels(nodes) node = nodes["Frame"] node.label = "OMNI PBR"
8,828
Python
33.088803
100
0.608518
NVIDIA-Omniverse/Blender-Addon-OmniPanel/omni_panel/material_bake/data.py
# ##### BEGIN GPL LICENSE BLOCK ##### # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ##### END GPL LICENSE BLOCK ##### # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. from .bake_operation import bakestolist class MasterOperation: current_bake_operation = None total_bake_operations = 0 this_bake_operation_num = 0 orig_UVs_dict = {} baked_textures = [] prepared_mesh_objects = [] batch_name = "" orig_objects = [] orig_active_object = "" orig_sample_count = 0 @staticmethod def clear(): # Master variables called throughout bake process MasterOperation.orig_UVs_dict = {} MasterOperation.total_bake_operations = 0 MasterOperation.current_bake_operation = None MasterOperation.this_bake_operation_num = 0 MasterOperation.prepared_mesh_objects = [] MasterOperation.baked_textures = [] MasterOperation.batch_name = "" # Variables to reset your scene to what it was before bake. MasterOperation.orig_objects = [] MasterOperation.orig_active_object = "" MasterOperation.orig_sample_count = 0 return True class BakeOperation: #Constants PBR = "pbr" def __init__(self): #Mapping of object name to active UVs self.bake_mode = BakeOperation.PBR #So the example in the user prefs will work self.bake_objects = [] self.active_object = None #normal self.uv_mode = "normal" #pbr stuff self.pbr_selected_bake_types = [] def assemble_pbr_bake_list(self): self.pbr_selected_bake_types = bakestolist()
2,334
Python
28.935897
86
0.667095
NVIDIA-Omniverse/Blender-Addon-OmniPanel/omni_panel/material_bake/operators.py
# ##### BEGIN GPL LICENSE BLOCK ##### # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ##### END GPL LICENSE BLOCK ##### # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. import bpy import sys import subprocess import os from .bake_operation import BakeStatus, bakestolist from .data import MasterOperation, BakeOperation from . import functions from . import bakefunctions from .background_bake import bgbake_ops from pathlib import Path import tempfile class OBJECT_OT_omni_bake_mapbake(bpy.types.Operator): """Start the baking process""" bl_idname = "object.omni_bake_mapbake" bl_label = "Bake" bl_options = {'REGISTER', 'UNDO'} # create undo state def execute(self, context): def commence_bake(needed_bake_modes): #Prepare the BakeStatus tracker for progress bar num_of_objects = 0 num_of_objects = len(bpy.context.selected_objects) total_maps = 0 for need in needed_bake_modes: if need == BakeOperation.PBR: total_maps+=(bakestolist(justcount=True) * num_of_objects) BakeStatus.total_maps = total_maps #Clear the MasterOperation stuff MasterOperation.clear() #Need to know the total operations MasterOperation.total_bake_operations = len(needed_bake_modes) #Master list of all ops bops = [] for need in needed_bake_modes: #Create operation bop = BakeOperation() #Set master level attributes #------------------------------- bop.bake_mode = need #------------------------------- bops.append(bop) functions.printmsg(f"Created operation for {need}") #Run queued operations for bop in bops: MasterOperation.this_bake_operation_num+=1 MasterOperation.current_bake_operation = bop if bop.bake_mode == BakeOperation.PBR: functions.printmsg("Running PBR bake") bakefunctions.doBake() return True ######################TEMP############################################### needed_bake_modes = [] needed_bake_modes.append(BakeOperation.PBR) #Clear the progress stuff BakeStatus.current_map = 0 BakeStatus.total_maps = 0 #If we have been called in background mode, just get on with it. Checks should be done. if "--background" in sys.argv: if "OmniBake_Bakes" in bpy.data.collections: #Remove any prior baked objects bpy.data.collections.remove(bpy.data.collections["OmniBake_Bakes"]) #Bake commence_bake(needed_bake_modes) self.report({"INFO"}, "Bake complete") return {'FINISHED'} functions.deselect_all_not_mesh() #We are in foreground, do usual checks result = True for need in needed_bake_modes: if not functions.startingChecks(bpy.context.selected_objects, need): result = False if not result: return {"CANCELLED"} #If the user requested background mode, fire that up now and exit if bpy.context.scene.bgbake == "bg": bpy.ops.wm.save_mainfile() filepath = filepath = bpy.data.filepath process = subprocess.Popen( [bpy.app.binary_path, "--background",filepath, "--python-expr",\ "import bpy;\ import os;\ from pathlib import Path;\ savepath=Path(bpy.data.filepath).parent / (str(os.getpid()) + \".blend\");\ bpy.ops.wm.save_as_mainfile(filepath=str(savepath), check_existing=False);\ bpy.ops.object.omni_bake_mapbake();"], shell=False) bgbake_ops.bgops_list.append([process, bpy.context.scene.prepmesh, bpy.context.scene.hidesourceobjects]) self.report({"INFO"}, "Background bake process started") return {'FINISHED'} #If we are doing this here and now, get on with it #Create a bake operation commence_bake(needed_bake_modes) self.report({"INFO"}, "Bake complete") return {'FINISHED'} #--------------------BACKGROUND BAKE---------------------------------- class OBJECT_OT_omni_bake_bgbake_status(bpy.types.Operator): bl_idname = "object.omni_bake_bgbake_status" bl_label = "Check on the status of bakes running in the background" def execute(self, context): msg_items = [] #Display remaining if len(bgbake_ops.bgops_list) == 0: msg_items.append("No background bakes are currently running") else: msg_items.append(f"--------------------------") for p in bgbake_ops.bgops_list: t = Path(tempfile.gettempdir()) t = t / f"OmniBake_Bgbake_{str(p[0].pid)}" try: with open(str(t), "r") as progfile: progress = progfile.readline() except: #No file yet, as no bake operation has completed yet. Holding message progress = 0 msg_items.append(f"RUNNING: Process ID: {str(p[0].pid)} - Progress {progress}%") msg_items.append(f"--------------------------") functions.ShowMessageBox(msg_items, "Background Bake Status(es)") return {'FINISHED'} class OBJECT_OT_omni_bake_bgbake_import(bpy.types.Operator): bl_idname = "object.omni_bake_bgbake_import" bl_label = "Import baked objects previously baked in the background" bl_options = {'REGISTER', 'UNDO'} # create undo state def execute(self, context): if bpy.context.mode != "OBJECT": self.report({"ERROR"}, "You must be in object mode") return {'CANCELLED'} for p in bgbake_ops.bgops_list_finished: savepath = Path(bpy.data.filepath).parent pid_str = str(p[0].pid) path = savepath / (pid_str + ".blend") path = str(path) + "\\Collection\\" #Record the objects and collections before append (as append doesn't give us a reference to the new stuff) functions.spot_new_items(initialise=True, item_type="objects") functions.spot_new_items(initialise=True, item_type="collections") functions.spot_new_items(initialise=True, item_type="images") #Append bpy.ops.wm.append(filename="OmniBake_Bakes", directory=path, use_recursive=False, active_collection=False) #If we didn't actually want the objects, delete them if not p[1]: #Delete objects we just imported (leaving only textures) for obj_name in functions.spot_new_items(initialise=False, item_type = "objects"): bpy.data.objects.remove(bpy.data.objects[obj_name]) for col_name in functions.spot_new_items(initialise=False, item_type = "collections"): bpy.data.collections.remove(bpy.data.collections[col_name]) #If we have to hide the source objects, do it if p[2]: #Get the newly introduced objects: objects_before_names = functions.spot_new_items(initialise=False, item_type="objects") for obj_name in objects_before_names: #Try this in case there are issues with long object names.. better than a crash try: bpy.data.objects[obj_name.replace("_Baked", "")].hide_set(True) except: pass #Delete the temp blend file try: os.remove(str(savepath / pid_str) + ".blend") os.remove(str(savepath / pid_str) + ".blend1") except: pass #Clear list for next time bgbake_ops.bgops_list_finished = [] #Confirm back to user self.report({"INFO"}, "Import complete") messagelist = [] messagelist.append(f"{len(functions.spot_new_items(initialise=False, item_type='objects'))} objects imported") messagelist.append(f"{len(functions.spot_new_items(initialise=False, item_type='images'))} textures imported") functions.ShowMessageBox(messagelist, "Import complete", icon = 'INFO') #If we imported an image, and we already had an image with the same name, get rid of the original in favour of the imported new_images_names = functions.spot_new_items(initialise=False, item_type="images") #Find any .001s for imgname in new_images_names: try: int(imgname[-3:]) #Delete the existing version bpy.data.images.remove(bpy.data.images[imgname[0:-4]]) #Rename our version bpy.data.images[imgname].name = imgname[0:-4] except ValueError: pass return {'FINISHED'} class OBJECT_OT_omni_bake_bgbake_clear(bpy.types.Operator): """Delete the background bakes because you don't want to import them into Blender. NOTE: If you chose to save bakes or FBX externally, these are safe and NOT deleted. This is just if you don't want to import into this Blender session""" bl_idname = "object.omni_bake_bgbake_clear" bl_label = "" bl_options = {'REGISTER', 'UNDO'} # create undo state def execute(self, context): savepath = Path(bpy.data.filepath).parent for p in bgbake_ops.bgops_list_finished: pid_str = str(p[0].pid) try: os.remove(str(savepath / pid_str) + ".blend") os.remove(str(savepath / pid_str) + ".blend1") except: pass bgbake_ops.bgops_list_finished = [] return {'FINISHED'}
11,531
Python
38.493151
240
0.540976
NVIDIA-Omniverse/Blender-Addon-OmniPanel/omni_panel/material_bake/bake_operation.py
# ##### BEGIN GPL LICENSE BLOCK ##### # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ##### END GPL LICENSE BLOCK ##### # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. import bpy # Bake helper method def bakestolist(justcount = False): #Assemble properties into list selectedbakes = [] selectedbakes.append("diffuse") if bpy.context.scene.selected_col else False selectedbakes.append("metalness") if bpy.context.scene.selected_metal else False selectedbakes.append("roughness") if bpy.context.scene.selected_rough else False selectedbakes.append("normal") if bpy.context.scene.selected_normal else False selectedbakes.append("transparency") if bpy.context.scene.selected_trans else False selectedbakes.append("transparencyroughness") if bpy.context.scene.selected_transrough else False selectedbakes.append("emission") if bpy.context.scene.selected_emission else False selectedbakes.append("specular") if bpy.context.scene.selected_specular else False selectedbakes.append("alpha") if bpy.context.scene.selected_alpha else False selectedbakes.append("sss") if bpy.context.scene.selected_sss else False selectedbakes.append("ssscol") if bpy.context.scene.selected_ssscol else False if justcount: return len(selectedbakes) else: return selectedbakes class BakeStatus: total_maps = 0 current_map = 0
2,095
Python
40.919999
101
0.741766
NVIDIA-Omniverse/Blender-Addon-OmniPanel/omni_panel/material_bake/bakefunctions.py
# ##### BEGIN GPL LICENSE BLOCK ##### # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ##### END GPL LICENSE BLOCK ##### # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. import bpy from . import functions import sys from .bake_operation import BakeStatus from .data import MasterOperation, BakeOperation def optimize(): current_bake_op = MasterOperation.current_bake_operation MasterOperation.orig_sample_count = bpy.context.scene.cycles.samples functions.printmsg("Reducing sample count to 16 for more efficient baking") bpy.context.scene.cycles.samples = 16 return True def undo_optimize(): #Restore sample count bpy.context.scene.cycles.samples = MasterOperation.orig_sample_count def common_bake_prep(): #--------------Set Bake Operation Variables---------------------------- current_bake_op = MasterOperation.current_bake_operation functions.printmsg("================================") functions.printmsg("---------Beginning Bake---------") functions.printmsg(f"{current_bake_op.bake_mode}") functions.printmsg("================================") #Run information op_num = MasterOperation.this_bake_operation_num firstop = False lastop = False if op_num == 1: firstop = True if op_num == MasterOperation.total_bake_operations: lastop = True #If this is a pbr bake, gather the selected maps if current_bake_op.bake_mode in {BakeOperation.PBR}: current_bake_op.assemble_pbr_bake_list() #Record batch name MasterOperation.batch_name = bpy.context.scene.batchName #Set values based on viewport selection current_bake_op.orig_objects = bpy.context.selected_objects.copy() current_bake_op.orig_active_object = bpy.context.active_object current_bake_op.bake_objects = bpy.context.selected_objects.copy() current_bake_op.active_object = bpy.context.active_object current_bake_op.orig_engine = bpy.context.scene.render.engine #Record original UVs for everyone if firstop: for obj in current_bake_op.bake_objects: try: MasterOperation.orig_UVs_dict[obj.name] = obj.data.uv_layers.active.name except AttributeError: MasterOperation.orig_UVs_dict[obj.name] = False #Record the rendering engine if firstop: MasterOperation.orig_engine = bpy.context.scene.render.engine current_bake_op.uv_mode = "normal" #---------------------------------------------------------------------- #Force it to cycles bpy.context.scene.render.engine = "CYCLES" bpy.context.scene.render.bake.use_selected_to_active = False functions.printmsg(f"Selected to active is now {bpy.context.scene.render.bake.use_selected_to_active}") #If the user doesn't have a GPU, but has still set the render device to GPU, set it to CPU if not bpy.context.preferences.addons["cycles"].preferences.has_active_device(): bpy.context.scene.cycles.device = "CPU" #Clear the trunc num for this session functions.trunc_num = 0 functions.trunc_dict = {} #Turn off that dam use clear. bpy.context.scene.render.bake.use_clear = False #Do what we are doing with UVs (only if we are the primary op) if firstop: functions.processUVS() #Optimize optimize() #Make sure the normal y setting is at default bpy.context.scene.render.bake.normal_g = "POS_Y" return True def common_bake_finishing(): #Run information current_bake_op = MasterOperation.current_bake_operation op_num = MasterOperation.this_bake_operation_num firstop = False lastop = False if op_num == 1: firstop = True if op_num == MasterOperation.total_bake_operations: lastop = True #Restore the original rendering engine if lastop: bpy.context.scene.render.engine = MasterOperation.orig_engine undo_optimize() #If prep mesh, or save object is selected, or running in the background, then do it #We do this on primary run only if firstop: if(bpy.context.scene.prepmesh or "--background" in sys.argv): functions.prepObjects(current_bake_op.bake_objects, current_bake_op.bake_mode) #If the user wants it, restore the original active UV map so we don't confuse anyone functions.restore_Original_UVs() #Restore the original object selection so we don't confuse anyone bpy.ops.object.select_all(action="DESELECT") for obj in current_bake_op.orig_objects: obj.select_set(True) bpy.context.view_layer.objects.active = current_bake_op.orig_active_object #Hide all the original objects if bpy.context.scene.prepmesh and bpy.context.scene.hidesourceobjects and lastop: for obj in current_bake_op.bake_objects: obj.hide_set(True) #Delete placeholder material if lastop and "OmniBake_Placeholder" in bpy.data.materials: bpy.data.materials.remove(bpy.data.materials["OmniBake_Placeholder"]) if "--background" in sys.argv: bpy.ops.wm.save_mainfile() def doBake(): current_bake_op = MasterOperation.current_bake_operation #Do the prep we need to do for all bake types common_bake_prep() #Loop over the bake modes we are using def doBake_actual(): IMGNAME = "" for thisbake in current_bake_op.pbr_selected_bake_types: for obj in current_bake_op.bake_objects: #Reset the already processed list mats_done = [] functions.printmsg(f"Baking object: {obj.name}") #Truncate if needed from this point forward OBJNAME = functions.trunc_if_needed(obj.name) #Create the image we need for this bake (Delete if exists) IMGNAME = functions.gen_image_name(obj.name, thisbake) functions.create_Images(IMGNAME, thisbake, obj.name) #Prep the materials one by one materials = obj.material_slots for matslot in materials: mat = bpy.data.materials.get(matslot.name) if mat.name in mats_done: functions.printmsg(f"Skipping material {mat.name}, already processed") #Skip this loop #We don't want to process any materials more than once or bad things happen continue else: mats_done.append(mat.name) #Make sure we are using nodes if not mat.use_nodes: functions.printmsg(f"Material {mat.name} wasn't using nodes. Have enabled nodes") mat.use_nodes = True nodetree = mat.node_tree nodes = nodetree.nodes #Take a copy of material to restore at the end of the process functions.backupMaterial(mat) #Create the image node and set to the bake texutre we are using imgnode = nodes.new("ShaderNodeTexImage") imgnode.image = bpy.data.images[IMGNAME] imgnode.label = "OmniBake" #Remove all disconnected nodes so don't interfere with typing the material functions.removeDisconnectedNodes(nodetree) #Use additional shader types functions.useAdditionalShaderTypes(nodetree, nodes) #Normal and emission bakes require no further material prep. Just skip the rest if(thisbake != "normal" and thisbake != "emission"): #Work out what type of material we are dealing with here and take correct action mat_type = functions.getMatType(nodetree) if(mat_type == "MIX"): functions.setup_mix_material(nodetree, thisbake) elif(mat_type == "PURE_E"): functions.setup_pure_e_material(nodetree, thisbake) elif(mat_type == "PURE_P"): functions.setup_pure_p_material(nodetree, thisbake) #Last action before leaving this material, make the image node selected and active functions.deselectAllNodes(nodes) imgnode.select = True nodetree.nodes.active = imgnode #Select only this object functions.selectOnlyThis(obj) #We are done with this image, set colour space functions.set_image_internal_col_space(bpy.data.images[IMGNAME], thisbake) #Bake the object for this bake mode functions.bakeoperation(thisbake, bpy.data.images[IMGNAME]) #Update tracking BakeStatus.current_map+=1 functions.printmsg(f"Bake maps {BakeStatus.current_map} of {BakeStatus.total_maps} complete") functions.write_bake_progress(BakeStatus.current_map, BakeStatus.total_maps) #Restore the original materials functions.printmsg("Restoring original materials") functions.restoreAllMaterials() functions.printmsg("Restore complete") #Last thing we do with this image is scale it functions.sacle_image_if_needed(bpy.data.images[IMGNAME]) #Do the bake at least once doBake_actual() #Finished baking. Perform wind down actions common_bake_finishing()
10,620
Python
36.932143
109
0.608004
NVIDIA-Omniverse/Blender-Addon-OmniPanel/omni_panel/material_bake/functions.py
# ##### BEGIN GPL LICENSE BLOCK ##### # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ##### END GPL LICENSE BLOCK ##### # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. from pathlib import Path from ..ui import OmniBakePreferences import bpy import os import sys import tempfile from . import material_setup from .data import MasterOperation #Global variables psocketname = { "diffuse": "Base Color", "metalness": "Metallic", "roughness": "Roughness", "normal": "Normal", "transparency": "Transmission", "transparencyroughness": "Transmission Roughness", "specular": "Specular", "alpha": "Alpha", "sss": "Subsurface", "ssscol": "Subsurface Color", "displacement": "Displacement" } def printmsg(msg): print(f"BAKE: {msg}") def gen_image_name(obj_name, baketype): current_bake_op = MasterOperation.current_bake_operation #First, let's get the format string we are working with prefs = bpy.context.preferences.addons[OmniBakePreferences.bl_idname].preferences image_name = prefs.img_name_format #The easy ones image_name = image_name.replace("%OBJ%", obj_name) image_name = image_name.replace("%BATCH%", bpy.context.scene.batchName) #Bake mode image_name = image_name.replace("%BAKEMODE%", current_bake_op.bake_mode) #The hard ones if baketype == "diffuse": image_name = image_name.replace("%BAKETYPE%", prefs.diffuse_alias) elif baketype == "metalness": image_name = image_name.replace("%BAKETYPE%", prefs.metal_alias) elif baketype == "roughness": image_name = image_name.replace("%BAKETYPE%", prefs.roughness_alias) elif baketype == "normal": image_name = image_name.replace("%BAKETYPE%", prefs.normal_alias) elif baketype == "transparency": image_name = image_name.replace("%BAKETYPE%", prefs.transmission_alias) elif baketype == "transparencyroughness": image_name = image_name.replace("%BAKETYPE%", prefs.transmissionrough_alias) elif baketype == "emission": image_name = image_name.replace("%BAKETYPE%", prefs.emission_alias) elif baketype == "specular": image_name = image_name.replace("%BAKETYPE%", prefs.specular_alias) elif baketype == "alpha": image_name = image_name.replace("%BAKETYPE%", prefs.alpha_alias) elif baketype == "sss": image_name = image_name.replace("%BAKETYPE%", prefs.sss_alias) elif baketype == "ssscol": image_name = image_name.replace("%BAKETYPE%", prefs.ssscol_alias) #Displacement is not currently Implemented elif baketype == "displacement": image_name = image_name.replace("%BAKETYPE%", prefs.displacement_alias) else: image_name = image_name.replace("%BAKETYPE%", baketype) return image_name def removeDisconnectedNodes(nodetree): nodes = nodetree.nodes #Loop through nodes repeat = False for node in nodes: if node.type == "BSDF_PRINCIPLED" and len(node.outputs[0].links) == 0: #Not a player, delete node nodes.remove(node) repeat = True elif node.type == "EMISSION" and len(node.outputs[0].links) == 0: #Not a player, delete node nodes.remove(node) repeat = True elif node.type == "MIX_SHADER" and len(node.outputs[0].links) == 0: #Not a player, delete node nodes.remove(node) repeat = True elif node.type == "ADD_SHADER" and len(node.outputs[0].links) == 0: #Not a player, delete node nodes.remove(node) repeat = True #Displacement is not currently Implemented elif node.type == "DISPLACEMENT" and len(node.outputs[0].links) == 0: #Not a player, delete node nodes.remove(node) repeat = True #If we removed any nodes, we need to do this again if repeat: removeDisconnectedNodes(nodetree) def backupMaterial(mat): dup = mat.copy() dup.name = mat.name + "_OmniBake" def restoreAllMaterials(): #Not efficient but, if we are going to do things this way, we need to loop over every object in the scene dellist = [] for obj in bpy.data.objects: for slot in obj.material_slots: origname = slot.name #Try to set to the corresponding material that was the backup try: slot.material = bpy.data.materials[origname + "_OmniBake"] #If not already on our list, log the original material (that we messed with) for mass deletion if origname not in dellist: dellist.append(origname) except KeyError: #Not been backed up yet. Must not have processed an object with that material yet pass #Delete the unused materials for matname in dellist: bpy.data.materials.remove(bpy.data.materials[matname]) #Rename all materials to the original name, leaving us where we started for mat in bpy.data.materials: if "_OmniBake" in mat.name: mat.name = mat.name.replace("_OmniBake", "") def create_Images(imgname, thisbake, objname): #thisbake is subtype e.g. diffuse, ao, etc. current_bake_op = MasterOperation.current_bake_operation global_mode = current_bake_op.bake_mode batch = MasterOperation.batch_name printmsg(f"Creating image {imgname}") #Get the image height and width from the interface IMGHEIGHT = bpy.context.scene.imgheight IMGWIDTH = bpy.context.scene.imgwidth #If it already exists, remove it. if(imgname in bpy.data.images): bpy.data.images.remove(bpy.data.images[imgname]) #Create image 32 bit or not 32 bit if thisbake == "normal" : image = bpy.data.images.new(imgname, IMGWIDTH, IMGHEIGHT, float_buffer=True) else: image = bpy.data.images.new(imgname, IMGWIDTH, IMGHEIGHT, float_buffer=False) #Set tags image["SB_objname"] = objname image["SB_batch"] = batch image["SB_globalmode"] = global_mode image["SB_thisbake"] = thisbake #Always mark new images fake user when generated in the background if "--background" in sys.argv: image.use_fake_user = True #Store it at bake operation level MasterOperation.baked_textures.append(image) def deselectAllNodes(nodes): for node in nodes: node.select = False def findSocketConnectedtoP(pnode, thisbake): #Get socket name for this bake mode socketname = psocketname[thisbake] #Get socket of the pnode socket = pnode.inputs[socketname] fromsocket = socket.links[0].from_socket #Return the socket connected to the pnode return fromsocket def createdummynodes(nodetree, thisbake): #Loop through pnodes nodes = nodetree.nodes for node in nodes: if node.type == "BSDF_PRINCIPLED": pnode = node #Get socket name for this bake mode socketname = psocketname[thisbake] #Get socket of the pnode psocket = pnode.inputs[socketname] #If it has something plugged in, we can leave it here if(len(psocket.links) > 0): continue #Get value of the unconnected socket val = psocket.default_value #If this is base col or ssscol, add an RGB node and set it's value to that of the socket if(socketname == "Base Color" or socketname == "Subsurface Color"): rgb = nodetree.nodes.new("ShaderNodeRGB") rgb.outputs[0].default_value = val rgb.label = "OmniBake" nodetree.links.new(rgb.outputs[0], psocket) #If this is anything else, use a value node else: vnode = nodetree.nodes.new("ShaderNodeValue") vnode.outputs[0].default_value = val vnode.label = "OmniBake" nodetree.links.new(vnode.outputs[0], psocket) def bakeoperation(thisbake, img): printmsg(f"Beginning bake for {thisbake}") if(thisbake != "normal"): bpy.ops.object.bake(type="EMIT", save_mode="INTERNAL", use_clear=True) else: bpy.ops.object.bake(type="NORMAL", save_mode="INTERNAL", use_clear=True) #Always pack the image for now img.pack() def startingChecks(objects, bakemode): messages = [] if len(objects) == 0: messages.append("ERROR: Nothing selected for bake") #Are any of our objects hidden? for obj in objects: if (obj.hide_viewport == True) or (obj.hide_get(view_layer=bpy.context.view_layer) == True): messages.append(f"ERROR: Object '{obj.name}' is hidden in viewport (eye icon in outliner) or in the current view lawyer (computer screen icon in outliner)") #What about hidden from rendering? for obj in objects: if obj.hide_render: messages.append(f"ERROR: Object '{obj.name}' is hidden for rendering (camera icon in outliner)") #None of the objects can have zero faces for obj in objects: if len(obj.data.polygons) < 1: messages.append(f"ERROR: Object '{obj.name}' has no faces") if(bpy.context.mode != "OBJECT"): messages.append("ERROR: Not in object mode") #PBR Bake Checks for obj in objects: #Is it mesh? if obj.type != "MESH": messages.append(f"ERROR: Object {obj.name} is not mesh") #Must continue here - other checks will throw exceptions continue #Are UVs OK? if bpy.context.scene.newUVoption == False and len(obj.data.uv_layers) == 0: messages.append(f"ERROR: Object {obj.name} has no UVs, and you aren't generating new ones") continue #Are materials OK? Fix if not if not checkObjectValidMaterialConfig(obj): fix_invalid_material_config(obj) #Do all materials have valid PBR config? if bpy.context.scene.more_shaders == False: for slot in obj.material_slots: mat = slot.material result = checkMatsValidforPBR(mat) if len(result) > 0: for node_name in result: messages.append(f"ERROR: Node '{node_name}' in material '{mat.name}' on object '{obj.name}' is not valid for PBR bake. In order to use more than just Princpled, Emission, and Mix Shaders, turn on 'Use additional Shader Types'!") else: for slot in obj.material_slots: mat = slot.material result = checkExtraMatsValidforPBR(mat) if len(result) > 0: for node_name in result: messages.append(f"ERROR: Node '{node_name}' in material '{mat.name}' on object '{obj.name}' is not supported") #Let's report back if len(messages) != 0: ShowMessageBox(messages, "Errors occured", "ERROR") return False else: #If we get here then everything looks good return True #------------------------------------------ def processUVS(): current_bake_op = MasterOperation.current_bake_operation #------------------NEW UVS ------------------------------------------------------------ if bpy.context.scene.newUVoption: printmsg("We are generating new UVs") printmsg("We are unwrapping each object individually with Smart UV Project") objs = current_bake_op.bake_objects for obj in objs: if("OmniBake" in obj.data.uv_layers): obj.data.uv_layers.remove(obj.data.uv_layers["OmniBake"]) obj.data.uv_layers.new(name="OmniBake") obj.data.uv_layers["OmniBake"].active = True #Will set active object selectOnlyThis(obj) #Blender 2.91 kindly breaks Smart UV Project in object mode so... yeah... thanks bpy.ops.object.mode_set(mode="EDIT", toggle=False) #Unhide any geo that's hidden in edit mode or it'll cause issues. bpy.ops.mesh.reveal() bpy.ops.mesh.select_all(action="SELECT") bpy.ops.mesh.reveal() bpy.ops.uv.smart_project(island_margin=bpy.context.scene.unwrapmargin) bpy.ops.object.mode_set(mode="OBJECT", toggle=False) #------------------END NEW UVS ------------------------------------------------------------ else: #i.e. New UV Option was not selected printmsg("We are working with the existing UVs") if bpy.context.scene.prefer_existing_sbmap: printmsg("We are preferring existing UV maps called OmniBake. Setting them to active") for obj in current_bake_op.bake_objects: if("OmniBake" in obj.data.uv_layers): obj.data.uv_layers["OmniBake"].active = True #Before we finish, restore the original selected and active objects bpy.ops.object.select_all(action="DESELECT") for obj in current_bake_op.orig_objects: obj.select_set(True) bpy.context.view_layer.objects.active = current_bake_op.orig_active_object #Done return True def restore_Original_UVs(): current_bake_op = MasterOperation.current_bake_operation #First the bake objects for obj in current_bake_op.bake_objects: if MasterOperation.orig_UVs_dict[obj. name] != None: original_uv = MasterOperation.orig_UVs_dict[obj.name] obj.data.uv_layers.active = obj.data.uv_layers[original_uv] def setupEmissionRunThrough(nodetree, m_output_node, thisbake, ismix=False): nodes = nodetree.nodes pnode = find_pnode(nodetree) #Create emission shader emissnode = nodes.new("ShaderNodeEmission") emissnode.label = "OmniBake" #Connect to output if(ismix): #Find the existing mix node before we create a new one existing_m_node = find_mnode(nodetree) #Add a mix shader node and label it mnode = nodes.new("ShaderNodeMixShader") mnode.label = "OmniBake" #Connect new mix node to the output fromsocket = mnode.outputs[0] tosocket = m_output_node.inputs[0] nodetree.links.new(fromsocket, tosocket) #Connect new emission node to the first mix slot (leaving second empty) fromsocket = emissnode.outputs[0] tosocket = mnode.inputs[1] nodetree.links.new(fromsocket, tosocket) #If there is one, plug the factor from the original mix node into our new mix node if(len(existing_m_node.inputs[0].links) > 0): fromsocket = existing_m_node.inputs[0].links[0].from_socket tosocket = mnode.inputs[0] nodetree.links.new(fromsocket, tosocket) #If no input, add a value node set to same as the mnode factor else: val = existing_m_node.inputs[0].default_value vnode = nodes.new("ShaderNodeValue") vnode.label = "OmniBake" vnode.outputs[0].default_value = val fromsocket = vnode.outputs[0] tosocket = mnode.inputs[0] nodetree.links.new(fromsocket, tosocket) else: #Just connect our new emission to the output fromsocket = emissnode.outputs[0] tosocket = m_output_node.inputs[0] nodetree.links.new(fromsocket, tosocket) #Create dummy nodes for the socket for this bake if needed createdummynodes(nodetree, pnode, thisbake) #Connect whatever is in Principled Shader for this bakemode to the emission fromsocket = findSocketConnectedtoP(pnode, thisbake) tosocket = emissnode.inputs[0] nodetree.links.new(fromsocket, tosocket) #---------------------Node Finders--------------------------- def find_pnode(nodetree): nodes = nodetree.nodes for node in nodes: if(node.type == "BSDF_PRINCIPLED"): return node #We never found it return False def find_enode(nodetree): nodes = nodetree.nodes for node in nodes: if(node.type == "EMISSION"): return node #We never found it return False def find_mnode(nodetree): nodes = nodetree.nodes for node in nodes: if(node.type == "MIX_SHADER"): return node #We never found it return False def find_onode(nodetree): nodes = nodetree.nodes for node in nodes: if(node.type == "OUTPUT_MATERIAL"): return node #We never found it return False def checkObjectValidMaterialConfig(obj): #Firstly, check it actually has material slots if len(obj.material_slots) == 0: return False #Check the material slots all have a material assigned for slot in obj.material_slots: if slot.material == None: return False #All materials must be using nodes for slot in obj.material_slots: if slot.material.use_nodes == False: return False #If we get here, everything looks good return True def getMatType(nodetree): if (find_pnode(nodetree) and find_mnode(nodetree)): return "MIX" elif(find_pnode(nodetree)): return "PURE_P" elif(find_enode(nodetree)): return "PURE_E" else: return "INVALID" def prepObjects(objs, baketype): current_bake_op = MasterOperation.current_bake_operation printmsg("Creating prepared object") #First we prepare objectes export_objects = [] for obj in objs: #-------------Create the prepared mesh---------------------------------------- #Object might have a truncated name. Should use this if it's there objname = trunc_if_needed(obj.name) new_obj = obj.copy() new_obj.data = obj.data.copy() new_obj["SB_createdfrom"] = obj.name #clear all materials new_obj.data.materials.clear() new_obj.name = objname + "_OmniBake" #Create a collection for our baked objects if it doesn't exist if "OmniBake_Bakes" not in bpy.data.collections: c = bpy.data.collections.new("OmniBake_Bakes") bpy.context.scene.collection.children.link(c) #Make sure it's visible and enabled for current view laywer or it screws things up bpy.context.view_layer.layer_collection.children["OmniBake_Bakes"].exclude = False bpy.context.view_layer.layer_collection.children["OmniBake_Bakes"].hide_viewport = False c = bpy.data.collections["OmniBake_Bakes"] #Link object to our new collection c.objects.link(new_obj) #Append this object to the export list export_objects.append(new_obj) #---------------------------------UVS-------------------------------------- uvlayers = new_obj.data.uv_layers #If we generated new UVs, it will be called "OmniBake" and we are using that. End of. #Same if we are being called for Sketchfab upload, and last bake used new UVs if bpy.context.scene.newUVoption: pass #If there is an existing map called OmniBake, and we are preferring it, use that elif ("OmniBake" in uvlayers) and bpy.context.scene.prefer_existing_sbmap: pass #Even if we are not preferring it, if there is just one map called OmniBake, we are using that elif ("OmniBake" in uvlayers) and len(uvlayers) <2: pass #If there is an existing map called OmniBake, and we are not preferring it, it has to go #Active map becommes OmniBake elif ("OmniBake" in uvlayers) and not bpy.context.scene.prefer_existing_sbmap: uvlayers.remove(uvlayers["OmniBake"]) active_layer = uvlayers.active active_layer.name = "OmniBake" #Finally, if none of the above apply, we are just using the active map #Active map becommes OmniBake else: active_layer = uvlayers.active active_layer.name = "OmniBake" #In all cases, we can now delete everything other than OmniBake deletelist = [] for uvlayer in uvlayers: if (uvlayer.name != "OmniBake"): deletelist.append(uvlayer.name) for uvname in deletelist: uvlayers.remove(uvlayers[uvname]) #---------------------------------END UVS-------------------------------------- #Create a new material #call it same as object + batchname + baked mat = bpy.data.materials.get(objname + "_" + bpy.context.scene.batchName + "_baked") if mat is None: mat = bpy.data.materials.new(name=objname + "_" + bpy.context.scene.batchName +"_baked") # Assign it to object mat.use_nodes = True new_obj.data.materials.append(mat) #Set up the materials for each object for obj in export_objects: #Should only have one material mat = obj.material_slots[0].material nodetree = mat.node_tree material_setup.create_principled_setup(nodetree, obj) #Change object name to avoid collisions obj.name = obj.name.replace("_OmniBake", "_Baked") bpy.ops.object.select_all(action="DESELECT") for obj in export_objects: obj.select_set(state=True) if (not bpy.context.scene.prepmesh) and (not "--background" in sys.argv): #Deleted duplicated objects for obj in export_objects: bpy.data.objects.remove(obj) #Add the created objects to the bake operation list to keep track of them else: for obj in export_objects: MasterOperation.prepared_mesh_objects.append(obj) def selectOnlyThis(obj): bpy.ops.object.select_all(action="DESELECT") obj.select_set(state=True) bpy.context.view_layer.objects.active = obj def setup_pure_p_material(nodetree, thisbake): #Create dummy nodes as needed createdummynodes(nodetree, thisbake) #Create emission shader nodes = nodetree.nodes m_output_node = find_onode(nodetree) loc = m_output_node.location #Create an emission shader emissnode = nodes.new("ShaderNodeEmission") emissnode.label = "OmniBake" emissnode.location = loc emissnode.location.y = emissnode.location.y + 200 #Connect our new emission to the output fromsocket = emissnode.outputs[0] tosocket = m_output_node.inputs[0] nodetree.links.new(fromsocket, tosocket) #Connect whatever is in Principled Shader for this bakemode to the emission fromsocket = findSocketConnectedtoP(find_pnode(nodetree), thisbake) tosocket = emissnode.inputs[0] nodetree.links.new(fromsocket, tosocket) def setup_pure_e_material(nodetree, thisbake): #If baking something other than emission, mute the emission modes so they don't contaiminate our bake if thisbake != "Emission": nodes = nodetree.nodes for node in nodes: if node.type == "EMISSION": node.mute = True node.label = "OmniBakeMuted" def setup_mix_material(nodetree, thisbake): #No need to mute emission nodes. They are automuted by setting the RGBMix to black nodes = nodetree.nodes #Create dummy nodes as needed createdummynodes(nodetree, thisbake) #For every mix shader, create a mixrgb above it #Also connect the factor input to the same thing created_mix_nodes = {} for node in nodes: if node.type == "MIX_SHADER": loc = node.location rgbmix = nodetree.nodes.new("ShaderNodeMixRGB") rgbmix.label = "OmniBake" rgbmix.location = loc rgbmix.location.y = rgbmix.location.y + 200 #If there is one, plug the factor from the original mix node into our new mix node if(len(node.inputs[0].links) > 0): fromsocket = node.inputs[0].links[0].from_socket tosocket = rgbmix.inputs["Fac"] nodetree.links.new(fromsocket, tosocket) #If no input, add a value node set to same as the mnode factor else: val = node.inputs[0].default_value vnode = nodes.new("ShaderNodeValue") vnode.label = "OmniBake" vnode.outputs[0].default_value = val fromsocket = vnode.outputs[0] tosocket = rgbmix.inputs[0] nodetree.links.new(fromsocket, tosocket) #Keep a dictionary with paired shader mix node created_mix_nodes[node.name] = rgbmix.name #Loop over the RGBMix nodes that we created for node in created_mix_nodes: mshader = nodes[node] rgb = nodes[created_mix_nodes[node]] #Mshader - Socket 1 #First, check if there is anything plugged in at all if len(mshader.inputs[1].links) > 0: fromnode = mshader.inputs[1].links[0].from_node if fromnode.type == "BSDF_PRINCIPLED": #Get the socket we are looking for, and plug it into RGB socket 1 fromsocket = findSocketConnectedtoP(fromnode, thisbake) nodetree.links.new(fromsocket, rgb.inputs[1]) elif fromnode.type == "MIX_SHADER": #If it's a mix shader on the other end, connect the equivilent RGB node #Get the RGB node for that mshader fromrgb = nodes[created_mix_nodes[fromnode.name]] fromsocket = fromrgb.outputs[0] nodetree.links.new(fromsocket, rgb.inputs[1]) elif fromnode.type == "EMISSION": #Set this input to black rgb.inputs[1].default_value = (0.0, 0.0, 0.0, 1) else: printmsg("Error, invalid node config") else: rgb.inputs[1].default_value = (0.0, 0.0, 0.0, 1) #Mshader - Socket 2 if len(mshader.inputs[2].links) > 0: fromnode = mshader.inputs[2].links[0].from_node if fromnode.type == "BSDF_PRINCIPLED": #Get the socket we are looking for, and plug it into RGB socket 2 fromsocket = findSocketConnectedtoP(fromnode, thisbake) nodetree.links.new(fromsocket, rgb.inputs[2]) elif fromnode.type == "MIX_SHADER": #If it's a mix shader on the other end, connect the equivilent RGB node #Get the RGB node for that mshader fromrgb = nodes[created_mix_nodes[fromnode.name]] fromsocket = fromrgb.outputs[0] nodetree.links.new(fromsocket, rgb.inputs[2]) elif fromnode.type == "EMISSION": #Set this input to black rgb.inputs[2].default_value = (0.0, 0.0, 0.0, 1) else: printmsg("Error, invalid node config") else: rgb.inputs[2].default_value = (0.0, 0.0, 0.0, 1) #Find the output node with location m_output_node = find_onode(nodetree) loc = m_output_node.location #Create an emission shader emissnode = nodes.new("ShaderNodeEmission") emissnode.label = "OmniBake" emissnode.location = loc emissnode.location.y = emissnode.location.y + 200 #Get the original mix node that was connected to the output node socket = m_output_node.inputs["Surface"] fromnode = socket.links[0].from_node #Find our created mix node that is paired with it rgbmix = nodes[created_mix_nodes[fromnode.name]] #Plug rgbmix into emission nodetree.links.new(rgbmix.outputs[0], emissnode.inputs[0]) #Plug emission into output nodetree.links.new(emissnode.outputs[0], m_output_node.inputs[0]) #------------Long Name Truncation----------------------- trunc_num = 0 trunc_dict = {} def trunc_if_needed(objectname): global trunc_num global trunc_dict #If we already truncated this, just return that if objectname in trunc_dict: printmsg(f"Object name {objectname} was previously truncated. Returning that.") return trunc_dict[objectname] #If not, let's see if we have to truncate it elif len(objectname) >= 38: printmsg(f"Object name {objectname} is too long and will be truncated") trunc_num += 1 truncdobjectname = objectname[0:34] + "~" + str(trunc_num) trunc_dict[objectname] = truncdobjectname return truncdobjectname #If nothing else, just return the original name else: return objectname def untrunc_if_needed(objectname): global trunc_num global trunc_dict for t in trunc_dict: if trunc_dict[t] == objectname: printmsg(f"Returning untruncated value {t}") return t return objectname def ShowMessageBox(messageitems_list, title, icon = 'INFO'): def draw(self, context): for m in messageitems_list: self.layout.label(text=m) bpy.context.window_manager.popup_menu(draw, title = title, icon = icon) #---------------Bake Progress-------------------------------------------- def write_bake_progress(current_operation, total_operations): progress = int((current_operation / total_operations) * 100) t = Path(tempfile.gettempdir()) t = t / f"OmniBake_Bgbake_{os.getpid()}" with open(str(t), "w") as progfile: progfile.write(str(progress)) #---------------End Bake Progress-------------------------------------------- past_items_dict = {} def spot_new_items(initialise=True, item_type="images"): global past_items_dict if item_type == "images": source = bpy.data.images elif item_type == "objects": source = bpy.data.objects elif item_type == "collections": source = bpy.data.collections #First run if initialise: #Set to empty list for this item type past_items_dict[item_type] = [] for source_item in source: past_items_dict[item_type].append(source_item.name) return True else: #Get the list of items for this item type from the dict past_items_list = past_items_dict[item_type] new_item_list_names = [] for source_item in source: if source_item.name not in past_items_list: new_item_list_names.append(source_item.name) return new_item_list_names #---------------Validation Checks------------------------------------------- def checkMatsValidforPBR(mat): nodes = mat.node_tree.nodes valid = True invalid_node_names = [] for node in nodes: if len(node.outputs) > 0: if node.outputs[0].type == "SHADER" and not (node.bl_idname == "ShaderNodeBsdfPrincipled" or node.bl_idname == "ShaderNodeMixShader" or node.bl_idname == "ShaderNodeEmission"): #But is it actually connected to anything? if len(node.outputs[0].links) >0: invalid_node_names.append(node.name) return invalid_node_names def checkExtraMatsValidforPBR(mat): nodes = mat.node_tree.nodes valid = True invalid_node_names = [] for node in nodes: if len(node.outputs) > 0: if node.outputs[0].type == "SHADER" and not (node.bl_idname == "ShaderNodeBsdfPrincipled" or node.bl_idname == "ShaderNodeMixShader" or node.bl_idname == "ShaderNodeAddShader" or node.bl_idname == "ShaderNodeEmission" or node.bl_idname == "ShaderNodeBsdfGlossy" or node.bl_idname == "ShaderNodeBsdfGlass" or node.bl_idname == "ShaderNodeBsdfRefraction" or node.bl_idname == "ShaderNodeBsdfDiffuse" or node.bl_idname == "ShaderNodeBsdfAnisotropic" or node.bl_idname == "ShaderNodeBsdfTransparent"): #But is it actually connected to anything? if len(node.outputs[0].links) >0: invalid_node_names.append(node.name) print(invalid_node_names) return invalid_node_names def deselect_all_not_mesh(): import bpy for obj in bpy.context.selected_objects: if obj.type != "MESH": obj.select_set(False) #Do we still have an active object? if bpy.context.active_object == None: #Pick arbitary bpy.context.view_layer.objects.active = bpy.context.selected_objects[0] def fix_invalid_material_config(obj): if "OmniBake_Placeholder" in bpy.data.materials: mat = bpy.data.materials["OmniBake_Placeholder"] else: mat = bpy.data.materials.new("OmniBake_Placeholder") bpy.data.materials["OmniBake_Placeholder"].use_nodes = True # Assign it to object if len(obj.material_slots) > 0: #Assign it to every empty slot for slot in obj.material_slots: if slot.material == None: slot.material = mat else: # no slots obj.data.materials.append(mat) #All materials must use nodes for slot in obj.material_slots: mat = slot.material if mat.use_nodes == False: mat.use_nodes = True return True def sacle_image_if_needed(img): printmsg("Scaling images if needed") context = bpy.context width = img.size[0] height = img.size[1] proposed_width = 0 proposed_height = 0 if context.scene.texture_res == "0.5k": proposed_width, proposed_height = 512,512 if context.scene.texture_res == "1k": proposed_width, proposed_height = 1024,1024 if context.scene.texture_res == "2k": proposed_width, proposed_height = 1024*2,1024*2 if context.scene.texture_res == "4k": proposed_width, proposed_height = 1024*4,1024*4 if context.scene.texture_res == "8k": proposed_width, proposed_height = 1024*8,1024*8 if width != proposed_width or height != proposed_height: img.scale(proposed_width, proposed_height) def set_image_internal_col_space(image, thisbake): if thisbake != "diffuse": image.colorspace_settings.name = "Non-Color" #------------------------Allow Additional Shaders---------------------------- def findProperInput(OName, pnode): for input in pnode.inputs: if OName == "Anisotropy": OName = "Anisotropic" if OName == "Rotation": OName = "Anisotropic Rotation" if OName == "Color": OName = "Base Color" if input.identifier == OName: return input def useAdditionalShaderTypes(nodetree, nodes): count = 0 for node in nodes: if (node.type == "BSDF_GLOSSY" or node.type == "BSDF_GLASS" or node.type == "BSDF_REFRACTION" or node.type == "BSDF_DIFFUSE" or node.type == "BSDF_ANISOTROPIC" or node.type == "BSDF_TRANSPARENT" or node.type == "ADD_SHADER"): if node.type == "ADD_SHADER": pnode = nodes.new("ShaderNodeMixShader") pnode.label = "mixNew" + str(count) else: pnode = nodes.new("ShaderNodeBsdfPrincipled") pnode.label = "BsdfNew" + str(count) pnode.location = node.location pnode.use_custom_color = True pnode.color = (0.3375297784805298, 0.4575316309928894, 0.08615386486053467) for input in node.inputs: if len(input.links) != 0: fromNode = input.links[0].from_node for output in fromNode.outputs: if len(output.links) != 0: for linkOut in output.links: if linkOut.to_node == node: inSocket = findProperInput(input.identifier, pnode) nodetree.links.new(output, inSocket) else: inSocket = findProperInput(input.identifier, pnode) if inSocket.name != "Shader": inSocket.default_value = input.default_value if len(node.outputs[0].links) != 0: for link in node.outputs[0].links: toNode = link.to_node for input in toNode.inputs: if len(input.links) != 0: if input.links[0].from_node == node: nodetree.links.new(pnode.outputs[0], input) if node.type == "BSDF_REFRACTION" or node.type == "BSDF_GLASS": pnode.inputs[15].default_value = 1 if node.type == "BSDF_DIFFUSE": pnode.inputs[5].default_value = 0 if node.type == "BSDF_ANISOTROPIC" or node.type == "BSDF_GLOSSY": pnode.inputs[4].default_value = 1 pnode.inputs[5].default_value = 0 if node.type == "BSDF_TRANSPARENT": pnode.inputs[7].default_value = 0 pnode.inputs[15].default_value = 1 pnode.inputs[14].default_value = 1 pnode.hide = True pnode.select = False nodetree.nodes.remove(node) count += 1
38,803
Python
36.419479
252
0.592093
NVIDIA-Omniverse/blender_omniverse_addons/omni_audio2face/__init__.py
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. bl_info = { "name": "Audio2Face Tools", "author": "NVIDIA Corporation", "version": (1, 0, 1), "blender": (3, 4, 0), "location": "View3D > Toolbar > Omniverse", "description": "NVIDIA Omniverse tools for working with Audio2Face", "warning": "", "doc_url": "", "category": "Omniverse", } ## ====================================================================== import sys from importlib import reload import bpy from bpy.props import (BoolProperty, CollectionProperty, EnumProperty, FloatProperty, IntProperty, PointerProperty, StringProperty) from omni_audio2face import (operators, ui) for module in (operators, ui): reload(module) from omni_audio2face.ui import OBJECT_PT_Audio2FacePanel from omni_audio2face.operators import ( OMNI_OT_PrepareScene, OMNI_OT_MarkExportMesh, OMNI_OT_ExportPreparedScene, OMNI_OT_ChooseUSDFile, OMNI_OT_ChooseAnimCache, OMNI_OT_ImportRigFile, OMNI_OT_TransferShapeData, OMNI_OT_ImportAnimation, ) ## ====================================================================== class Audio2FaceToolsSettings(bpy.types.PropertyGroup): ## shapes stuff use_face_selection: BoolProperty(description="Use Face Selection") export_project: BoolProperty(description="Export Project File", default=True) export_filepath: StringProperty(description="Export Path") import_filepath: StringProperty(description="Shapes Import Path") ## anim import settings import_anim_path: StringProperty(description="Anim Cache Path") anim_start_type: EnumProperty( items=[("CURRENT", "At Play Head", "Load Clip at the playhead"), ("CUSTOM", "Custom", "Choose a custom start frame")], default="CURRENT") anim_start_frame: IntProperty(default=0) anim_frame_rate: FloatProperty(default=60.0, min=1.0) anim_apply_scale: BoolProperty(default=True) anim_set_range: BoolProperty(default=False) anim_load_to: EnumProperty( items=[("CURRENT", "Current Action", "Load curves onto current Action"), ("CLIP", "Clip", "Load curves as a new Action for NLE use")], default="CURRENT") anim_overwrite: BoolProperty(default=False, name="Overwrite Existing Clips") ## Store pointers to all the meshes for the full setup. mesh_skin: PointerProperty(type=bpy.types.Object) mesh_tongue: PointerProperty(type=bpy.types.Object) mesh_eye_left: PointerProperty(type=bpy.types.Object) mesh_eye_right: PointerProperty(type=bpy.types.Object) mesh_gums_lower: PointerProperty(type=bpy.types.Object) transfer_apply_fix: BoolProperty(name="Apply Fix", description="Apply Basis to points not part of the head during transfer", default=False) ## ====================================================================== classes = ( Audio2FaceToolsSettings, OBJECT_PT_Audio2FacePanel, OMNI_OT_PrepareScene, OMNI_OT_MarkExportMesh, OMNI_OT_ExportPreparedScene, OMNI_OT_ChooseUSDFile, OMNI_OT_ChooseAnimCache, OMNI_OT_ImportRigFile, OMNI_OT_TransferShapeData, OMNI_OT_ImportAnimation, ) def register(): unregister() for item in classes: bpy.utils.register_class(item) bpy.types.Scene.audio2face = bpy.props.PointerProperty(type=Audio2FaceToolsSettings) bpy.types.Object.a2f_original = bpy.props.PointerProperty(type=bpy.types.Object) version = bl_info["version"] version = str(version[0]) + str(version[1]) + str(version[2]) OBJECT_PT_Audio2FacePanel.version = f"{str(version[0])}.{str(version[1])}.{str(version[2])}" ## ====================================================================== def unregister(): # User preferences for item in classes: try: bpy.utils.unregister_class(item) except: continue if hasattr(bpy.types.Scene, "audio2face"): del bpy.types.Scene.audio2face if hasattr(bpy.types.Object, "a2f_original"): del bpy.types.Object.a2f_original
3,862
Python
30.153226
93
0.682289
NVIDIA-Omniverse/blender_omniverse_addons/omni_audio2face/operators.py
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. import json import os import re import sys from typing import * import numpy as np import bpy import bmesh from bpy.props import (BoolProperty, EnumProperty, FloatProperty, IntProperty, StringProperty) from bpy.types import (Collection, Context, Event, Mesh, Object, Scene) from mathutils import * ## ====================================================================== def _get_filepath(scene:Scene, as_import:bool=False) -> str: if as_import: result = scene.audio2face.import_filepath.strip() else: result = scene.audio2face.export_filepath.strip() return result ## ====================================================================== def _get_or_create_collection(collection:Collection, name:str) -> Collection: """Find a child collection of the specified collection, or create it if it does not exist.""" result = collection.children.get(name, None) if not result: result = bpy.data.collections.new(name) collection.children.link(result) ## Make sure this is visible or things'll break in other ways down the line if result.is_evaluated: result = result.original result.hide_render = result.hide_viewport = result.hide_select = False result_lc = [x for x in bpy.context.view_layer.layer_collection.children if x.collection is result] if len(result_lc): result_lc = result_lc[0] result_lc.exclude = False result_lc.hide_viewport = False else: print(f"-- Warning: No layer collection found for {result.name}") return result ## ====================================================================== def ensure_scene_collections(scene:Scene) -> Tuple[bpy.types.Collection]: """Make sure that all Audio2Face scene collections exist.""" a2f_collection = _get_or_create_collection(scene.collection, "Audio2Face") a2f_export = _get_or_create_collection(a2f_collection, "A2F Export") a2f_export_static = _get_or_create_collection(a2f_export, "A2F Export Static") a2f_export_dynamic = _get_or_create_collection(a2f_export, "A2F Export Dynamic") return a2f_collection, a2f_export, a2f_export_static, a2f_export_dynamic ## ====================================================================== def _get_base_collection() -> Collection: return bpy.data.collections.get("Audio2Face", None) def _get_import_collection() -> Collection: return bpy.data.collections.get("A2F Import", None) def _get_export_collection() -> Collection: return bpy.data.collections.get("A2F Export", None) ## ====================================================================== class OMNI_OT_PrepareScene(bpy.types.Operator): """Prepares the active scene for interaction with Audio2Face""" bl_idname = "audio2face.prepare_scene" bl_label = "Prepare Scene for Audio2Face" bl_options = {"REGISTER", "UNDO"} @classmethod def poll(cls, context:Context) -> bool: return bool(context.scene) def execute(self, context:Context) -> Set[str]: scene = context.scene ensure_scene_collections(scene) self.report({"INFO"}, "A2F: Scene is prepped.") return {'FINISHED'} ## ====================================================================== def selected_mesh_objects(context:Context) -> List[Object]: """Return a filtered list of Mesh objects from the context.""" a2f_collection = bpy.data.collections.get("Audio2Face", None) export_objects = {x.name for x in a2f_collection.all_objects} if a2f_collection else {} result = [x for x in context.selected_objects if x.data and isinstance(x.data, bpy.types.Mesh)] result = list(filter(lambda x: not x.name in export_objects and x.data and isinstance(x.data, bpy.types.Mesh), result)) return result ## ====================================================================== def export_mesh_poll(context:Context) -> bool: """ Check for a mesh object selection if use_face_selection is false, or an edit mode face selection otherwise. """ valid_mesh = len(selected_mesh_objects(context)) is_poly_edit_mode = context.tool_settings.mesh_select_mode[2] if context.scene.audio2face.use_face_selection: if (context.mode == "EDIT_MESH" and is_poly_edit_mode and valid_mesh and len(context.active_object.data.count_selected_items()) and context.active_object.data.count_selected_items()[2]): return True else: if context.mode == "OBJECT" and valid_mesh: return True return False ## ====================================================================== def make_valid_name(name:str) -> str: result = name.replace("-","_").replace(" ","_").replace(".","_") return result ## ====================================================================== def process_export_mesh(orig:Object, target_collection:Collection, is_dynamic:bool, split:bool): """ Processes the selected mesh for export, adding original vertex indices and copying it over into the target collection. """ assert isinstance(orig.data, bpy.types.Mesh) obj_dupe_name = make_valid_name(orig.name) + "__Audio2Face_EX" if obj_dupe_name in bpy.data.objects: bpy.data.objects.remove(bpy.data.objects[obj_dupe_name]) mesh_dupe = orig.data.copy() mesh_dupe.name = make_valid_name(orig.data.name) + "__Audio2Face_EX" obj_dupe = bpy.data.objects.new(obj_dupe_name, mesh_dupe) target_collection.objects.link(obj_dupe) obj_dupe.a2f_original = orig bpy.ops.object.mode_set(mode="OBJECT") orig.select_set(False) obj_dupe.select_set(True) ## Clean out all extraneous data. for item in obj_dupe.modifiers, obj_dupe.vertex_groups: item.clear() obj_dupe.shape_key_clear() ## Add a custom data layer to remember the original point indices. attr = obj_dupe.data.attributes.get("index_orig", obj_dupe.data.attributes.new("index_orig", "INT", "POINT")) vertex_count = len(obj_dupe.data.vertices) attr.data.foreach_set("value", np.arange(vertex_count)) bpy.ops.object.mode_set(mode="OBJECT") if split: ## Delete all unselected faces. deps = bpy.context.evaluated_depsgraph_get() indices = [x.index for x in orig.data.polygons if not x.select] bm = bmesh.new() bm.from_object(obj_dupe, deps) bm.faces.ensure_lookup_table() ## Must convert to list; delete does not accept map objects selected = list(map(lambda x: bm.faces[x], indices)) bpy.ops.object.mode_set(mode="EDIT") bmesh.ops.delete(bm, geom=selected, context="FACES") bpy.ops.object.mode_set(mode="OBJECT") bm.to_mesh(obj_dupe.data) ## Make sure to snap the object into place. obj_dupe.matrix_world = orig.matrix_world.copy() return obj_dupe ## =====================================================a================= class OMNI_OT_MarkExportMesh(bpy.types.Operator): """Tags the selected mesh as static for Audio2Face.""" bl_idname = "audio2face.mark_export_mesh" bl_label = "Mark Mesh for Export" bl_options = {"REGISTER", "UNDO"} is_dynamic: BoolProperty(description="Mesh is Dynamic", default=False) @classmethod def poll(cls, context:Context) -> bool: return export_mesh_poll(context) def execute(self, context:Context) -> Set[str]: a2f_collection, a2f_export, a2f_export_static, a2f_export_dynamic = ensure_scene_collections(context.scene) target_collection = a2f_export_dynamic if self.is_dynamic else a2f_export_static split = context.scene.audio2face.use_face_selection processed_meshes = [] for mesh in selected_mesh_objects(context): context.view_layer.objects.active = mesh result = process_export_mesh(mesh, target_collection, self.is_dynamic, split) processed_meshes.append(result) context.view_layer.objects.active = processed_meshes[-1] return {'FINISHED'} ## ====================================================================== class OMNI_OT_ChooseUSDFile(bpy.types.Operator): """File chooser with proper extensions.""" bl_idname = "collections.usd_choose_file" bl_label = "Choose USD File" bl_options = {"REGISTER"} ## Required for specifying extensions. filepath: StringProperty(subtype="FILE_PATH") operation: EnumProperty(items=[("IMPORT", "Import", ""),("EXPORT", "Export", "")], default="IMPORT", options={"HIDDEN"}) filter_glob: StringProperty(default="*.usd;*.usda;*.usdc", options={"HIDDEN"}) check_existing: BoolProperty(default=True, options={"HIDDEN"}) def execute(self, context:Context): real_path = os.path.abspath(bpy.path.abspath(self.filepath)) real_path = real_path.replace("\\", "/") if self.operation == "EXPORT": context.scene.audio2face.export_filepath = real_path else: context.scene.audio2face.import_filepath = real_path return {"FINISHED"} def invoke(self, context:Context, event:Event) -> Set[str]: if len(self.filepath.strip()) == 0: self.filepath = "untitled.usdc" context.window_manager.fileselect_add(self) return {"RUNNING_MODAL"} ## ====================================================================== class OMNI_OT_ChooseAnimCache(bpy.types.Operator): """File chooser with proper extensions.""" bl_idname = "collections.usd_choose_anim_cache" bl_label = "Choose Animation Cache" bl_options = {"REGISTER"} ## Required for specifying extensions. filepath: StringProperty(subtype="FILE_PATH") filter_glob: StringProperty(default="*.usd;*.usda;*.usdc;*.json", options={"HIDDEN"}) check_existing: BoolProperty(default=True, options={"HIDDEN"}) def execute(self, context:Context): real_path = os.path.abspath(bpy.path.abspath(self.filepath)) real_path = real_path.replace("\\", "/") context.scene.audio2face.import_anim_path = real_path return {"FINISHED"} def invoke(self, context:Context, event:Event) -> Set[str]: context.window_manager.fileselect_add(self) return {"RUNNING_MODAL"} ## ====================================================================== class OMNI_OT_ExportPreparedScene(bpy.types.Operator): """Exports prepared scene as USD for Audio2Face.""" bl_idname = "audio2face.export_prepared_scene" bl_label = "Export Prepared Scene" bl_options = {"REGISTER"} @classmethod def poll(cls, context:Context) -> bool: a2f_export = _get_export_collection() child_count = len(a2f_export.all_objects) if a2f_export else 0 path = _get_filepath(context.scene) return a2f_export and child_count and len(path) def execute(self, context:Context) -> Set[str]: ## Grab filepath before the scene switches scene = context.scene filepath = _get_filepath(scene) export_scene = bpy.data.scenes.get("a2f_export", bpy.data.scenes.new("a2f_export")) for child_collection in list(export_scene.collection.children): export_scene.collection.children.remove(child_collection) export_collection = _get_export_collection() export_scene.collection.children.link(export_collection) context.window.scene = export_scene args = { "filepath": filepath, "start": scene.frame_current, "end": scene.frame_current, "convert_to_cm": False, "export_lights": False, "export_cameras": False, "export_materials": False, "export_textures": False, "default_prim_path": "/World", "root_prim_path": "/World", } result = bpy.ops.wm.usd_export(**args) context.window.scene = scene bpy.data.scenes.remove(export_scene) export_scene = None ## generate the project file if scene.audio2face.export_project: project_filename = os.path.basename(filepath) skin = scene.audio2face.mesh_skin tongue = scene.audio2face.mesh_tongue eye_left = scene.audio2face.mesh_eye_left eye_right= scene.audio2face.mesh_eye_right gums = scene.audio2face.mesh_gums_lower a2f_export_static = bpy.data.collections.get("A2F Export Static", None) static_objects = list(a2f_export_static.objects) if a2f_export_static else [] a2f_export_dynamic = bpy.data.collections.get("A2F Export Dynamic", None) dynamic_objects = list(a2f_export_dynamic.objects) if a2f_export_dynamic else [] for mesh in skin, tongue: if mesh in dynamic_objects: dynamic_objects.pop(dynamic_objects.index(mesh)) for mesh in eye_left, eye_right, gums: if mesh in static_objects: static_objects.pop(static_objects.index(mesh)) transfer_data = "" if skin: transfer_data += '\t\tstring mm:skin = "/World/character_root/{}/{}"\n'.format(make_valid_name(skin.name), make_valid_name(skin.data.name)) if tongue: transfer_data += '\t\tstring mm:tongue = "/World/character_root/{}/{}"\n'.format(make_valid_name(tongue.name), make_valid_name(tongue.data.name)) if eye_left: transfer_data += '\t\tstring[] mm:l_eye = ["/World/character_root/{}/{}"]\n'.format(make_valid_name(eye_left.name), make_valid_name(eye_left.data.name)) if eye_right: transfer_data += '\t\tstring[] mm:r_eye = ["/World/character_root/{}/{}"]\n'.format(make_valid_name(eye_right.name), make_valid_name(eye_right.data.name)) if gums: transfer_data += '\t\tstring[] mm:gums = ["/World/character_root/{}/{}"]\n'.format(make_valid_name(gums.name), make_valid_name(gums.data.name)) if len(static_objects): transfer_data += '\t\tstring[] mm:extra_static = [{}]\n'.format( ', '.join(['"/World/character_root/{}/{}"'.format(make_valid_name(x.name), make_valid_name(x.data.name)) for x in static_objects]) ) if len(dynamic_objects): transfer_data += '\t\tstring[] mm:extra_dynamic = [{}]\n'.format( ', '.join(['"/World/character_root/{}/{}"'.format(make_valid_name(x.name), make_valid_name(x.data.name)) for x in dynamic_objects]) ) template = "" template_path = os.sep.join([os.path.dirname(os.path.abspath(__file__)), "templates", "project_template.usda"]) with open(template_path, "r") as fp: template = fp.read() template = template.replace("%filepath%", project_filename) template = template.replace("%transfer_data%", transfer_data) project_usd_filepath = filepath.rpartition(".")[0] + "_project.usda" with open(project_usd_filepath, "w") as fp: fp.write(template) self.report({"INFO"}, f"Exported project to: '{project_usd_filepath}'") else: self.report({"INFO"}, f"Exported head to: '{filepath}'") return result ## ====================================================================== def _abs_path(file_path:str) -> str: if not len(file_path) > 2: return file_path if file_path[0] == '/' and file_path[1] == '/': file_path = bpy.path.abspath(file_path) return os.path.abspath(file_path) ## ====================================================================== class OMNI_OT_ImportRigFile(bpy.types.Operator): """Imports a rigged USD file from Audio2Face""" bl_idname = "audio2face.import_rig" bl_label = "Import Rig File" bl_options = {"REGISTER", "UNDO"} @classmethod def poll(cls, context:Context) -> bool: return len(_get_filepath(context.scene, as_import=True)) def execute(self, context:Context) -> Set[str]: filepath = _get_filepath(context.scene, as_import=True) args = { "filepath": filepath, "import_skeletons": False, "import_materials": False, } scene = context.scene ## Switching the active collection requires this odd code. base = _get_or_create_collection(scene.collection, "Audio2Face") import_col = _get_or_create_collection(base, "A2F Import") base_lc = [x for x in context.view_layer.layer_collection.children if x.collection is base][0] import_lc = [x for x in base_lc.children if x.collection is import_col][0] context.view_layer.active_layer_collection = import_lc if not context.mode == 'OBJECT': try: bpy.ops.object.mode_set(mode="OBJECT") except RuntimeError: pass if len(import_col.all_objects): bpy.ops.object.select_all(action="DESELECT") ## Let's clean out the import collection on each go to keep things simple bpy.ops.object.select_same_collection(collection=import_col.name) bpy.ops.object.delete() ## Make sure the import collection is selected so the imported objects ## get assigned to it. # scene.view_layers[0].active_layer_collection.collection = import_col bpy.ops.object.select_all(action='DESELECT') override = context.copy() override["collection"] = bpy.data.collections["A2F Import"] result = bpy.ops.wm.usd_import(**args) roots = [x for x in import_col.objects if not x.parent] for root in roots: ## bugfix: don't reset rotation, since there may have been a rotation ## carried over from the blender scene and we want to line up visibly ## even though it has no bearing on the shape transfer. root.scale = [1.0, 1.0, 1.0] ## Strip out any childless empties, like joint1. empties = [x for x in import_col.objects if not len(x.children) and x.type == "EMPTY"] for empty in empties: bpy.data.objects.remove(empty) self.report({"INFO"}, f"Imported Rig from: {filepath}") return {"FINISHED"} ## ====================================================================== class AnimData: """Small data holder unifying what's coming in from JSON and USD(A)""" def __init__(self, clip_name:str, shapes:List[str], key_data:List[List[float]], start_frame:int=0, frame_rate:float=60.0): self.clip_name = clip_name self.shapes = shapes self.num_frames = len(key_data) self.key_data = self._swizzle_data(key_data) self.start_frame = start_frame self.frame_rate = frame_rate def curves(self): for index, name in enumerate(self.shapes): yield f'key_blocks["{name}"].value', self.key_data[index] def _swizzle_data(self, data:List[List[float]]) -> List[List[float]]: """Massage the data a bit for writing directly to the curves""" result = [] for index, _ in enumerate(self.shapes): result.append( [data[frame][index] for frame in range(self.num_frames)] ) return result class OMNI_OT_ImportAnimation(bpy.types.Operator): """Imports a shape key animation from an Audio2Face USDA file or JSON""" bl_idname = "audio2face.import_animation" bl_label = "Import Animation" bl_options = {"REGISTER", "UNDO"} start_type: EnumProperty( name="Start Type", items=[("CURRENT", "Current Action", "Load Clip at the playhead"), ("CUSTOM", "Custom", "Choose a custom start frame")], default="CURRENT") start_frame: IntProperty(default=1, name="Start Frame", description="Align start of animation to this frame") frame_rate: FloatProperty(default=60.0, min=1.0, name="Frame Rate", description="Frame Rate of file you're importing") set_range: BoolProperty(default=False, name="Set Range", description="If checked, set the scene animation frame range to the imported file's range") apply_scale: BoolProperty(default=False, name="Apply Clip Scale", description="If checked and the clip framerate differs from the scene, scale the keys to match") load_to: EnumProperty( name="Load To", description="Load animation to current Action, or to a new Action Clip", items=[("CURRENT", "Current Action", "Load curves onto current Action"), ("CLIP", "Clip", "Load curves as a new Action Clip (for NLE use)")], default="CURRENT") overwrite: BoolProperty(default=False, name="Overwrite Existing Clips") @classmethod def poll(cls, context:Context) -> bool: have_file = len(context.scene.audio2face.import_anim_path) have_mesh = context.active_object and context.active_object.type == "MESH" have_selection = context.active_object in context.selected_objects is_object_mode = context.mode == "OBJECT" return all([have_file, have_mesh, have_selection, is_object_mode]) def apply_animation(self, animation:AnimData, ob:Object): shapes = ob.data.shape_keys action = None start_frame = bpy.context.scene.frame_current if self.start_type == "CURRENT" else self.start_frame if shapes.animation_data is None: shapes.animation_data_create() nla_tracks = shapes.animation_data.nla_tracks if self.load_to == "CLIP": def _predicate(track): for strip in track.strips: if strip.action and strip.action.name == animation.clip_name: return True return False if len(nla_tracks): existing_tracks = list(filter(_predicate, nla_tracks)) if len(existing_tracks) and not self.overwrite: self.report({"ERROR"}, f"Clip named {animation.clip_name} already exists; aborting.") return False else: ## remove the track(s) specified for overwrites for track in existing_tracks: self.report({"INFO"}, f"Removing old track {track.name}") nla_tracks.remove(track) if not animation.clip_name in bpy.data.actions: bpy.data.actions.new(animation.clip_name) action = bpy.data.actions[animation.clip_name] offset = 0 else: if not shapes.animation_data.action: bpy.data.actions.new(animation.clip_name) action = shapes.animation_data.action = bpy.data.actions[animation.clip_name] else: action = shapes.animation_data.action offset = start_frame ## clean out old curves to_clean = [] for curve in action.fcurves: for name in animation.shapes: if f'["{name}"]' in curve.data_path: to_clean.append(curve) for curve in to_clean: action.fcurves.remove(curve) scene_framerate = bpy.context.scene.render.fps clip_scale = 1.0 clip_to_scene_scale = scene_framerate / animation.frame_rate if self.apply_scale and self.load_to == "CURRENT" and not (int(animation.frame_rate) == int(scene_framerate)): clip_scale = clip_to_scene_scale for data_path, values in animation.curves(): curve = action.fcurves.new(data_path) curve.keyframe_points.add(len(values)) for index, value in enumerate(values): curve.keyframe_points[index].co = (float(index) * clip_scale + offset, value) if self.load_to == "CLIP": ## I'm really not sure if this is the correct idea, but when loading as clip ## we push a new NLA_Track and add the action as a strip, then offset it using ## the strip frame start. track = nla_tracks.new() track.name = animation.clip_name + "_NLE" strip = track.strips.new(animation.clip_name, start_frame, action) if self.apply_scale: strip.scale = clip_to_scene_scale for item in [x for x in nla_tracks if not x == track]: item.select = False track.select = True def load_animation_usda(self, clip_name:str, file_path:str) -> AnimData: """ Do a quick parse of the input USDA file in plain text, as we can't use the USD Python API yet. !TODO: When the USD Python API is available, switch to it instead. """ with open(file_path, "r") as fp: source = fp.read().strip() ## quick sanity checks; not robust! if not all([ source.startswith("#usda"), "framesPerSecond = " in source, "uniform token[] blendShapes = [" in source, "float[] blendShapeWeights.timeSamples = {" in source, "token[] custom:mh_curveNames = [" in source, "float[] custom:mh_curveValues.timeSamples = {" in source]): self.report({"ERROR"}, f"USDA not a weights animation cache: {file_path}") return None end_time = int(source.partition("endTimeCode = ")[-1].partition("\n")[0]) frame_rate = int(source.partition("framesPerSecond = ")[-1].partition("\n")[0]) start_frame = int(source.partition("startTimeCode = ")[-1].partition("\n")[0]) shape_names = source.partition("uniform token[] blendShapes = [")[-1].partition("]")[0] shape_names = shape_names.replace('"','').replace(' ', '').split(',') ## strip to timeSamples, split lines, then split off the index and parse out the arrays into floats samples = source.partition("float[] blendShapeWeights.timeSamples = {")[-1].partition("}")[0].strip().split('\n') weights = [list(map(float, x.partition(": [")[-1].rpartition("]")[0].replace(" ", "").split(","))) for x in samples] ## capture frame rate frame_rate = float(source.partition("framesPerSecond = ")[-1].partition("\n")[0]) return AnimData(clip_name=clip_name, shapes=shape_names, key_data=weights, frame_rate=frame_rate) def load_animation_json(self, clip_name:str, file_path:str) -> AnimData: assert file_path.lower().endswith(".json") file_path = _abs_path(file_path) data = None with open(file_path, "r") as fp: try: data = json.load(fp) except: return None if not "facsNames" in data or not "weightMat" in data or not "numFrames" in data: self.report({"ERROR"}, f"Malformed JSON file (missing data): {file_path}") return None if not data["numFrames"] == len(data["weightMat"]): self.report({"ERROR"}, f"Malformed JSON: malformed file. Expected {data['numFrames']} frames, found {len(data['weightMat'])} -- {file_path}") return None return AnimData(clip_name=clip_name, shapes=data["facsNames"], key_data=data["weightMat"], frame_rate=self.frame_rate) def load_animation(self, file_path:str, ob:Object) -> bool: assert ob and isinstance(ob, (bpy.types.Object)) if not file_path.endswith((".usda", ".json")): self.report({"Error"}, f"Path should point to a USDA or JSON file: {file_path}") return False clip_name = os.path.basename(file_path).partition(".")[0] self.report({"INFO"}, f"Loading anim: {file_path}") if file_path.endswith(".json"): data = self.load_animation_json(clip_name, file_path) else: data = self.load_animation_usda(clip_name, file_path) if data is None: self.report({"ERROR"}, f"Unable to load data from file {file_path}") return False self.apply_animation(data, ob) return True def execute(self, context:Context) -> Set[str]: scene = context.scene ob = context.active_object if not self.load_animation(scene.audio2face.import_anim_path, ob): return {"CANCELLED"} return {"FINISHED"} ## ====================================================================== class OMNI_OT_TransferShapeData(bpy.types.Operator): """Transfers shape data from imported rig heads to the original meshes.""" bl_idname = "audio2face.transfer_shape_data" bl_label = "Transfer Shape Data" bl_options = {"REGISTER", "UNDO"} apply_fix: BoolProperty(name="Apply Fix", description="Propate Basis shape to all parts of the mesh not covered by the head, to prevent vertex vomit.", default=False) @classmethod def poll(cls, context:Context) -> bool: collection = _get_import_collection() if collection is None: return False meshes = [x.name for x in collection.objects if x.type == "MESH"] return bool(len(meshes)) def _get_collection_meshes(self, collection:Collection) -> List["bpy.data.Mesh"]: result = [x for x in collection.all_objects if x.type == "MESH"] return result def _build_mapping_table(self, import_meshes:Collection, export_meshes:Collection) -> Dict: result = {} for imported in import_meshes: ## Intentionally doing the exported data name but the import object name ## because of how the imports work on both sides. token = imported.name.rpartition("__Audio2Face_EX")[0] for exported in export_meshes: exported_token = exported.data.name.rpartition("__Audio2Face_EX")[0] if exported_token == token: result[imported] = exported return result def _transfer_shapes(self, context:Context, source:Object, target:Object, mapping_object:Object) -> int: """ Transfers shapes from the source mesh to the target. :returns: The number of shapes transferred. """ assert source.data and source.data.shape_keys, "Source object has no shape key data." wm = context.window_manager result = 0 ## Run these to make sure they're all visible, checked, and in the view layer a2f_collection, _, _, _ = ensure_scene_collections(context.scene) _get_or_create_collection(a2f_collection, "A2F Import") blocks = source.data.shape_keys.key_blocks total_shapes = len(blocks) if not context.mode == "OBJECT" and context.active_object: bpy.ops.object.mode_set(mode="OBJECT") bpy.ops.object.select_all(action="DESELECT") source.select_set(True) target.select_set(True) context.view_layer.objects.active = target basis = target.data.shape_keys.key_blocks["Basis"] wm.progress_begin(0, total_shapes) start_index = len(target.data.shape_keys.key_blocks) ## Grab the mapping array using the new Attributes API. mapping_indices = np.zeros(len(source.data.vertices), dtype=np.int32) attr = mapping_object.data.attributes['index_orig'] attr.data.foreach_get("value", mapping_indices) for index, block in enumerate(blocks): if block.name == "Basis": continue target.shape_key_add(name=block.name, from_mix=False) target_key_block = target.data.shape_keys.key_blocks[block.name] target_key_block.relative_key = basis for index, target_index in enumerate(mapping_indices): target_key_block.data[target_index].co = block.data[index].co self.report({"INFO"}, f"Transferred shape {block.name} from {source.name} to {target.name}") result += 1 wm.progress_update(index) wm.progress_end() if self.apply_fix: self._select_verts_inverse(target, mapping_indices) bpy.ops.object.mode_set(mode="EDIT") wm.progress_begin(0, total_shapes) for index in range(start_index, start_index+total_shapes-1): shape = target.data.shape_keys.key_blocks[index] self.report({"INFO"}, f"Fixing shape: {shape.name}") target.active_shape_key_index = index bpy.ops.mesh.blend_from_shape(shape='Basis', blend=1.0, add=False) wm.progress_update(index) bpy.ops.object.mode_set(mode="OBJECT") wm.progress_end() return result def _select_verts_inverse(self, ob:Object, mapping_indices:Iterable[int]) -> int: """ Set the vertex selection of the target object to the inverse of what's in mapping_indices through the bmesh API. :returns: The number of vertices selected. """ result = 0 bm = bmesh.new() bm.from_mesh(ob.data) for v in bm.verts: should_set = not (v.index in mapping_indices) v.select_set(should_set) result += int(should_set) bm.to_mesh(ob.data) def _clean_shapes(self, ob:Object, shapes_list:List[str]) -> int: """ For each named shape, remove it from ob's shape keys. :returns: The number of shapes removed """ self.report({"INFO"}, f"Cleaning {', '.join(shapes_list)}") if ob.data.shape_keys is None: return 0 result = 0 for shape in shapes_list: key = ob.data.shape_keys.key_blocks.get(shape) if key: ob.shape_key_remove(key) result +=1 return result def execute(self, context:Context) -> Set[str]: ## Transfer shape data over automatically scene = context.scene export_meshes = self._get_collection_meshes(_get_export_collection()) import_meshes = self._get_collection_meshes(_get_import_collection()) total = 0 mapping_table = self._build_mapping_table(import_meshes, export_meshes).items() self.report({"INFO"}, f"{mapping_table}") for source, mapping_object in mapping_table: ## hop to the true original mesh target = mapping_object.a2f_original source_shapes = [x.name for x in source.data.shape_keys.key_blocks if not x.name == "Basis"] count = self._clean_shapes(target, source_shapes) self.report({"INFO"}, f"Cleaned {count} shape{'' if count == 1 else 's'} from {target.name}") ## regrab the target object now that it's been modified and we're ## holding onto an old pointer target = mapping_object.a2f_original ## bugfix: add a Basis target if none exists if target.data.shape_keys is None or not "Basis" in target.data.shape_keys.key_blocks: target.shape_key_add(name="Basis", from_mix=False) result = self._transfer_shapes(context, source, target, mapping_object) self.report({"INFO"}, f"Transferred {result} shape{'' if result == 1 else 's'} from {source.name} to {target.name}") total += result self.report({"INFO"}, f"Transferred {total} total shape{'' if total == 1 else 's'}") return {"FINISHED"}
31,692
Python
35.220571
151
0.669033
NVIDIA-Omniverse/blender_omniverse_addons/omni_audio2face/ui.py
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. import os from typing import * import bpy from bpy.utils import previews from omni_audio2face.operators import ( OMNI_OT_PrepareScene, OMNI_OT_MarkExportMesh, OMNI_OT_ChooseUSDFile, OMNI_OT_ChooseAnimCache, OMNI_OT_ExportPreparedScene, OMNI_OT_ImportRigFile, OMNI_OT_TransferShapeData, OMNI_OT_ImportAnimation, ) ## ====================================================================== def preload_icons() -> previews.ImagePreviewCollection: """Preload icons used by the interface.""" icons_directory = os.path.join(os.path.dirname(os.path.abspath(__file__)), "icons") all_icons = { "AUDIO2FACE": "omni_audio2face.png", } preview = previews.new() for name, filepath in all_icons.items(): preview.load(name, os.path.join(icons_directory, filepath), "IMAGE") return preview ## ====================================================================== class OBJECT_PT_Audio2FacePanel(bpy.types.Panel): bl_space_type = 'VIEW_3D' bl_region_type = 'UI' bl_category = "Omniverse" bl_label = "Audio2Face" bl_options = {"DEFAULT_CLOSED"} version = "0.0.0" icons = preload_icons() def draw_header(self, context): self.layout.label(text="", icon_value=self.icons["AUDIO2FACE"].icon_id) # draw the panel def draw(self, context): use_face_selection = context.scene.audio2face.use_face_selection is_poly_edit_mode = context.tool_settings.mesh_select_mode[2] and context.mode == "EDIT_MESH" a2f_export_static = bpy.data.collections.get("A2F Export Static", None) a2f_export_dynamic = bpy.data.collections.get("A2F Export Dynamic", None) layout = self.layout layout.label(text="Face Prep and Export", icon="EXPORT") row = layout.row(align=True) op = row.operator(OMNI_OT_MarkExportMesh.bl_idname, text="Export Static") op.is_dynamic = False op = row.operator(OMNI_OT_MarkExportMesh.bl_idname, text="Export Dynamic") op.is_dynamic = True row = layout.row(align=True) row.prop(context.scene.audio2face, "use_face_selection", text="") if use_face_selection and not is_poly_edit_mode: row.label(text="Use Faces: Must be in Polygon Edit Mode!", icon="ERROR") else: row.label(text="Use Face Selection?") ## mesh selections col = layout.column(align=True) if a2f_export_dynamic: col.prop_search(context.scene.audio2face, "mesh_skin", a2f_export_dynamic, "objects", text="Skin Mesh: ") col.prop_search(context.scene.audio2face, "mesh_tongue", a2f_export_dynamic, "objects", text="Tongue Mesh: ") else: col.label(text="Dynamic Meshes are required to set Skin and Tongue", icon="ERROR") col.label(text=" ") if a2f_export_static: col.prop_search(context.scene.audio2face, "mesh_eye_left", a2f_export_static, "objects", text="Left Eye Mesh: ") col.prop_search(context.scene.audio2face, "mesh_eye_right", a2f_export_static, "objects", text="Right Eye Mesh: ") col.prop_search(context.scene.audio2face, "mesh_gums_lower", a2f_export_static, "objects", text="Lower Gums Mesh: ") else: col.label(text="Static Meshes are required to set Eyes", icon="ERROR") col.label(text=" ") col = layout.column(align=True) row = col.row(align=True) row.prop(context.scene.audio2face, "export_filepath", text="Export Path: ") op = row.operator(OMNI_OT_ChooseUSDFile.bl_idname, text="", icon="FILE_FOLDER") op.operation = "EXPORT" col.prop(context.scene.audio2face, "export_project", text="Export With Project File") row = col.row(align=True) collection = bpy.data.collections.get("A2F Export", None) child_count = len(collection.all_objects) if collection else 0 args = { "text": "Export Face USD" if child_count else "No meshes available for Export", } op = row.operator(OMNI_OT_ExportPreparedScene.bl_idname, **args) ## Import Side -- after Audio2Face has transferred the shapes layout.separator() layout.label(text="Face Shapes Import", icon="IMPORT") col = layout.column(align=True) row = col.row(align=True) row.prop(context.scene.audio2face, "import_filepath", text="Shapes Import Path") op = row.operator(OMNI_OT_ChooseUSDFile.bl_idname, text="", icon="FILE_FOLDER") op.operation = "IMPORT" col = layout.column(align=True) col.operator(OMNI_OT_ImportRigFile.bl_idname) row = col.row(align=True) op = row.operator(OMNI_OT_TransferShapeData.bl_idname) op.apply_fix = context.scene.audio2face.transfer_apply_fix row.prop(context.scene.audio2face, "transfer_apply_fix", icon="MODIFIER", text="") col = layout.column(align=True) col.label(text="Anim Cache Path") row = col.row(align=True) row.prop(context.scene.audio2face, "import_anim_path", text="") row.operator(OMNI_OT_ChooseAnimCache.bl_idname, text="", icon="FILE_FOLDER") if context.scene.audio2face.import_anim_path.lower().endswith(".json"): col.prop(context.scene.audio2face, "anim_frame_rate", text="Source Framerate") row = col.row(align=True) row.prop(context.scene.audio2face, "anim_start_type", text="Start Frame") if context.scene.audio2face.anim_start_type == "CUSTOM": row.prop(context.scene.audio2face, "anim_start_frame", text="") col.prop(context.scene.audio2face, "anim_load_to", text="Load To") row = col.row(align=True) row.prop(context.scene.audio2face, "anim_apply_scale", text="Apply Clip Scale") if context.scene.audio2face.anim_load_to == "CLIP": row.prop(context.scene.audio2face, "anim_overwrite") op_label = ("Please change to Object Mode" if not context.mode == "OBJECT" else ("Import Animation Clip" if OMNI_OT_ImportAnimation.poll(context) else "Please Select Target Mesh")) op = col.operator(OMNI_OT_ImportAnimation.bl_idname, text=op_label) op.start_type = context.scene.audio2face.anim_start_type op.frame_rate = context.scene.audio2face.anim_frame_rate op.start_frame = context.scene.audio2face.anim_start_frame op.set_range = context.scene.audio2face.anim_set_range op.load_to = context.scene.audio2face.anim_load_to op.overwrite = context.scene.audio2face.anim_overwrite op.apply_scale = context.scene.audio2face.anim_apply_scale
6,105
Python
36.00606
119
0.702867
NVIDIA-Omniverse/blender_omniverse_addons/omni_panel/ui.py
# ##### BEGIN GPL LICENSE BLOCK ##### # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ##### END GPL LICENSE BLOCK ##### # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. from typing import * import bpy from bpy.types import (Context, Object, Material, Scene) from . particle_bake.operators import * from . material_bake.background_bake import bgbake_ops # from .material_bake_complex import OBJECT_OT_omni_material_bake from os.path import join, dirname import bpy.utils.previews from .material_bake import baker ## ====================================================================== def get_icons_directory(): icons_directory = join(dirname(__file__), "icons") return icons_directory ## ====================================================================== def _get_bake_types(scene:Scene) -> List[str]: result = [] bake_all = scene.all_maps if scene.selected_col or bake_all: result.append("DIFFUSE") if scene.selected_normal or bake_all: result.append("NORMAL") if scene.selected_emission or bake_all: result.append("EMIT") if scene.selected_specular or bake_all: result.append("GLOSSY") if scene.selected_rough or bake_all: result.append("ROUGHNESS") if scene.selected_trans or bake_all: result.append("TRANSMISSION") ## special types if scene.omni_bake.bake_metallic or bake_all: result.append("METALLIC") return ",".join(result) ## ====================================================================== class OBJECT_PT_omni_panel(bpy.types.Panel): bl_space_type = 'VIEW_3D' bl_region_type = 'UI' bl_category = "Omniverse" bl_label = "NVIDIA Omniverse" bl_options = {"DEFAULT_CLOSED"} version = "0.0.0" #retrieve icons icons = bpy.utils.previews.new() icons_directory = get_icons_directory() icons.load("OMNI", join(icons_directory, "ICON.png"), 'IMAGE') def draw_header(self, context): self.layout.label(text="", icon_value=self.icons["OMNI"].icon_id) def draw(self, context): layout = self.layout scene = context.scene # --------Particle Collection Instancing------------------- particleOptions = scene.particle_options particleCol = self.layout.column(align=True) particleCol.label(text="Omni Particles", icon='PARTICLES') box = particleCol.box() column = box.column(align=True) column.prop(particleOptions, "deletePSystemAfterBake") row = column.row() row.prop(particleOptions, "animateData") if particleOptions.animateData: row = column.row(align=True) row.prop(particleOptions, "selectedStartFrame") row.prop(particleOptions, "selectedEndFrame") row = column.row() row.enabled = False row.label(text="Increased Calculation Time", icon='ERROR') row = column.row() row.scale_y = 1.5 row.operator('omni.hair_bake', text='Convert', icon='MOD_PARTICLE_INSTANCE') if len(bpy.context.selected_objects) != 0 and bpy.context.active_object != None: if bpy.context.active_object.select_get() and bpy.context.active_object.type == "MESH": layout.separator() column = layout.column(align=True) column.label(text="Convert Material to:", icon='SHADING_RENDERED') box = column.box() materialCol = box.column(align=True) materialCol.operator('universalmaterialmap.create_template_omnipbr', text='OmniPBR') materialCol.operator('universalmaterialmap.create_template_omniglass', text='OmniGlass') ## ====================================================================== class OBJECT_PT_omni_bake_panel(bpy.types.Panel): bl_space_type = 'VIEW_3D' bl_region_type = 'UI' bl_category = "Omniverse" bl_label = "Material Baking" bl_options = {"DEFAULT_CLOSED"} version = "0.0.0" #retrieve icons icons = bpy.utils.previews.new() icons_directory = get_icons_directory() icons.load("OMNI", join(icons_directory, "ICON.png"), 'IMAGE') icons.load("BAKE",join(icons_directory, "Oven.png"), 'IMAGE') def draw_header(self, context): self.layout.label(text="", icon="UV_DATA") def draw(self, context): layout = self.layout scene = context.scene box = layout.box() #--------PBR Bake Settings------------------- row = box.row() if scene.all_maps == True: row.prop(scene, "all_maps", icon = 'CHECKBOX_HLT') else: row.prop(scene, "all_maps", icon = 'CHECKBOX_DEHLT') column = box.column(align= True) row = column.row() row.prop(scene, "selected_col") row.prop(scene, "selected_normal") row = column.row() row.prop(scene, "selected_rough") row.prop(scene, "selected_specular", text="Gloss") row = column.row() row.prop(scene, "selected_trans") row.prop(scene, "selected_emission") row = column.row() row.label(text="Special Maps") row = column.row() row.prop(scene.omni_bake, "bake_metallic") row.label(text=" ") #--------Texture Settings------------------- row = box.row() row.label(text="Texture Resolution:") row.scale_y = 0.5 row = box.row() row.prop(scene, "texture_res", expand=True) row.scale_y = 1 if scene.texture_res == "8k" or scene.texture_res == "4k": row = box.row() row.enabled = False row.label(text="Long Bake Times", icon= 'ERROR') #--------UV Settings------------------- column = box.column(align = True) row = column.row() row.prop(scene, "newUVoption") row.prop(scene, "unwrapmargin") #--------Other Settings------------------- column= box.column(align=True) row = column.row() if scene.bgbake == "fg": text = "Copy objects and apply bakes" else: text = "Copy objects and apply bakes (after import)" row.prop(scene, "prepmesh", text=text) if scene.prepmesh == True: if scene.bgbake == "fg": text = "Hide source objects after bake" else: text = "Hide source objects after bake (after import)" row = column.row() row.prop(scene, "hidesourceobjects", text=text) #-------------Buttons------------------------- row = box.row() try: row.prop(scene.cycles, "device", text="Device") except: pass row = box.row() row.scale_y = 1.5 op = row.operator("omni.bake_maps", icon_value=self.icons["BAKE"].icon_id) op.unwrap = scene.newUVoption op.bake_types = _get_bake_types(scene) op.merge_textures = scene.omni_bake.merge_textures op.hide_original = scene.hidesourceobjects op.width = op.height = { "0.5k": 512, "1k": 1024, "2k": 2048, "4k": 4096, "8k": 8192, }[scene.texture_res] can_bake_poll, error_data = baker.omni_bake_maps_poll(context) can_bake_poll_result = { -1: f"Cannot bake objects in collection {baker.COLLECTION_NAME}", -2: f"Material cannot be baked:", -3: "Cycles Renderer Add-on not loaded!" } if can_bake_poll < 0: row = box.row() row.label(text=can_bake_poll_result[can_bake_poll], icon="ERROR") if can_bake_poll == -2: mesh_name, material_name = error_data row = box.row() row.label(text=f"{material_name} on {mesh_name}") row = column.row() row.scale_y = 1 ##!TODO: Restore background baking # row.prop(context.scene, "bgbake", expand=True) if scene.bgbake == "bg": row = column.row(align= True) # - BG status button col = row.column() if len(bgbake_ops.bgops_list) == 0: enable = False icon = "TIME" else: enable = True icon = "TIME" col.operator("object.omni_bake_bgbake_status", text="", icon=icon) col.enabled = enable # - BG import button col = row.column() if len(bgbake_ops.bgops_list_finished) != 0: enable = True icon = "IMPORT" else: enable = False icon = "IMPORT" col.operator("object.omni_bake_bgbake_import", text="", icon=icon) col.enabled = enable #BG erase button col = row.column() if len(bgbake_ops.bgops_list_finished) != 0: enable = True icon = "TRASH" else: enable = False icon = "TRASH" col.operator("object.omni_bake_bgbake_clear", text="", icon=icon) col.enabled = enable row.alignment = 'CENTER' row.label(text=f"Running {len(bgbake_ops.bgops_list)} | Finished {len(bgbake_ops.bgops_list_finished)}") ## ====================================================================== class OmniBakePreferences(bpy.types.AddonPreferences): # this must match the add-on name, use '__package__' # when defining this in a submodule of a python package. bl_idname = __package__ img_name_format: bpy.props.StringProperty(name="Image format string", default="%OBJ%_%BATCH%_%BAKEMODE%_%BAKETYPE%") #Aliases diffuse_alias: bpy.props.StringProperty(name="Diffuse", default="diffuse") metal_alias: bpy.props.StringProperty(name="Metal", default="metalness") roughness_alias: bpy.props.StringProperty(name="Roughness", default="roughness") glossy_alias: bpy.props.StringProperty(name="Glossy", default="glossy") normal_alias: bpy.props.StringProperty(name="Normal", default="normal") transmission_alias: bpy.props.StringProperty(name="Transmission", default="transparency") transmissionrough_alias: bpy.props.StringProperty(name="Transmission Roughness", default="transparencyroughness") clearcoat_alias: bpy.props.StringProperty(name="Clearcost", default="clearcoat") clearcoatrough_alias: bpy.props.StringProperty(name="Clearcoat Roughness", default="clearcoatroughness") emission_alias: bpy.props.StringProperty(name="Emission", default="emission") specular_alias: bpy.props.StringProperty(name="Specular", default="specular") alpha_alias: bpy.props.StringProperty(name="Alpha", default="alpha") sss_alias: bpy.props.StringProperty(name="SSS", default="sss") ssscol_alias: bpy.props.StringProperty(name="SSS Colour", default="ssscol") @classmethod def reset_img_string(self): prefs = bpy.context.preferences.addons[__package__].preferences prefs.property_unset("img_name_format") bpy.ops.wm.save_userpref()
12,271
Python
34.98827
117
0.557412
NVIDIA-Omniverse/blender_omniverse_addons/omni_panel/workflow/usd_kind.py
from typing import * import bpy from bpy.types import (Collection, Context, Image, Object, Material, Mesh, Node, NodeSocket, NodeTree, Scene) from bpy.props import * ## ====================================================================== usd_kind_items = { ('COMPONENT', 'component', 'kind: component'), ('GROUP', 'group', 'kind: group'), ('ASSEMBLY', 'assembly', 'kind: assembly'), ('CUSTOM', 'custom', 'kind: custom'), } ## ====================================================================== def get_plural_count(items) -> (str, int): count = len(items) plural = '' if count == 1 else 's' return plural, count ## ====================================================================== class OBJECT_OT_omni_set_usd_kind(bpy.types.Operator): """Sets the USD Kind value on the selected objects.""" bl_idname = "omni.set_usd_kind" bl_label = "Set USD Kind" bl_options = {"REGISTER", "UNDO"} kind: EnumProperty(name='kind', description='USD Kind', items=usd_kind_items) custom_kind: StringProperty(default="") verbose: BoolProperty(default=False) @property ## read-only def value(self) -> str: return self.custom_kind if self.kind == "CUSTOM" else self.kind.lower() @classmethod def poll(cls, context:Context) -> bool: return bool(len(context.selected_objects)) def execute(self, context:Context) -> Set[str]: if self.kind == "NONE": self.report({"WARNING"}, "No kind specified-- nothing authored.") return {"CANCELLED"} for item in context.selected_objects: props = item.id_properties_ensure() props["usdkind"] = self.value props_ui = item.id_properties_ui("usdkind") props_ui.update(default=self.value, description="USD Kind") if self.verbose: plural, count = get_plural_count(context.selected_objects) self.report({"INFO"}, f"Set USD Kind to {self.value} for {count} object{plural}.") return {"FINISHED"} ## ====================================================================== class OBJECT_OT_omni_set_usd_kind_auto(bpy.types.Operator): """Sets the USD Kind value on scene objects, automatically.""" bl_idname = "omni.set_usd_kind_auto" bl_label = "Set USD Kind Auto" bl_options = {"REGISTER", "UNDO"} verbose: BoolProperty(default=False) def execute(self, context:Context) -> Set[str]: active = context.active_object selected = list(context.selected_objects) bpy.ops.object.select_all(action='DESELECT') ## heuristics ## First, assign "component" to all unparented empties unparented = [x for x in context.scene.collection.all_objects if not x.parent and x.type == "EMPTY"] for item in unparented: item.select_set(True) bpy.ops.omni.set_usd_kind(kind="COMPONENT") item.select_set(False) if self.verbose: plural, count = get_plural_count(unparented) self.report({"INFO"}, f"Set USD Kind Automatically on {count} object{plural}.") return {"FINISHED"} ## ====================================================================== class OBJECT_OT_omni_clear_usd_kind(bpy.types.Operator): """Clear USD Kind values on the selected objects.""" bl_idname = "omni.clear_usd_kind" bl_label = "Clear USD Kind" bl_options = {"REGISTER", "UNDO"} verbose: BoolProperty(default=False) @classmethod def poll(cls, context:Context) -> bool: return bool(len(context.selected_objects)) def execute(self, context:Context) -> Set[str]: from rna_prop_ui import rna_idprop_ui_prop_update total = 0 for item in context.selected_objects: if "usdkind" in item: rna_idprop_ui_prop_update(item, "usdkind") del item["usdkind"] total += 1 if self.verbose: plural, count = get_plural_count(range(total)) self.report({"INFO"}, f"Cleared USD Kind from {count} object{plural}.") return {"FINISHED"} ## ====================================================================== class OBJECT_PT_omni_usd_kind_panel(bpy.types.Panel): bl_space_type = 'VIEW_3D' bl_region_type = 'UI' bl_category = "Omniverse" bl_label = "USD Kind" def draw(self, context:Context): layout = self.layout scene = context.scene layout.label(text="USD Kind") row = layout.row() row.prop(scene.omni_usd_kind, "kind", text="Kind") if scene.omni_usd_kind.kind == "CUSTOM": row = layout.row() row.prop(scene.omni_usd_kind, "custom_kind", text="Custom Kind") col = layout.column(align=True) op = col.operator(OBJECT_OT_omni_set_usd_kind.bl_idname, icon="PLUS") op.kind = scene.omni_usd_kind.kind op.custom_kind = scene.omni_usd_kind.custom_kind op.verbose = True op = col.operator(OBJECT_OT_omni_clear_usd_kind.bl_idname, icon="X") op.verbose = True op = col.operator(OBJECT_OT_omni_set_usd_kind_auto.bl_idname, icon="BRUSH_DATA") op.verbose = True ## ====================================================================== class USDKindProperites(bpy.types.PropertyGroup): kind: EnumProperty(name='kind', description='USD Kind', items=usd_kind_items) custom_kind: StringProperty(default="") ## ====================================================================== classes = [ OBJECT_OT_omni_set_usd_kind, OBJECT_OT_omni_set_usd_kind_auto, OBJECT_OT_omni_clear_usd_kind, OBJECT_PT_omni_usd_kind_panel, USDKindProperites, ] def unregister(): for cls in reversed(classes): try: bpy.utils.unregister_class(cls) except ValueError: continue except RuntimeError: continue try: del bpy.types.Scene.omni_usd_kind except AttributeError: pass def register(): unregister() for cls in classes: bpy.utils.register_class(cls) bpy.types.Scene.omni_usd_kind = bpy.props.PointerProperty(type=USDKindProperites)
5,618
Python
27.668367
102
0.620862
NVIDIA-Omniverse/blender_omniverse_addons/omni_panel/material_bake/baker.py
from tempfile import NamedTemporaryFile from typing import * import addon_utils import bpy from bpy.types import (Collection, Context, Image, Object, Material, Mesh, Node, NodeSocket, NodeTree, Scene) from bpy.props import * from mathutils import * from omni_panel.material_bake import material_setup COLLECTION_NAME = "OmniBake_Bakes" def get_material_output(tree:NodeTree, engine:str="CYCLES") -> Optional[Node]: """ Find the material output node that applies only to a specific engine. :param tree: The NodeTree to search. :param engine: The engine to search for. :return: The Material Output Node associated with the engine, or None if not found. """ supported_engines = {"CYCLES", "EEVEE", "ALL"} assert engine in supported_engines, f"Only the following engines are supported: {','.join(supported_engines)}" result = [x for x in tree.nodes if x.type == "OUTPUT_MATERIAL" and x.target in {"ALL", engine}] if len(result): return result[0] return None def prepare_collection(scene:Scene) -> Collection: """ Ensures the bake Collection exists in the specified scene. :param scene: The scene to which you wish to add the bake Collection. :return: the bake Collection """ collection = bpy.data.collections.get(COLLECTION_NAME, None) or bpy.data.collections.new(COLLECTION_NAME) if not COLLECTION_NAME in scene.collection.children: scene.collection.children.link(collection) return collection def select_only(ob:Object): """ Ensure that only the specified object is selected. :param ob: Object to select """ bpy.ops.object.select_all(action="DESELECT") ob.select_set(state=True) bpy.context.view_layer.objects.active = ob def smart_unwrap_object(ob:Object, name:str="OmniBake"): """ Use Blenders built-in smart unwrap functionality to generate a new UV map. :param ob: Mesh Object to unwrap. """ bpy.ops.object.mode_set(mode="EDIT", toggle=False) # Unhide any geo that's hidden in edit mode or it'll cause issues. bpy.ops.mesh.reveal() bpy.ops.mesh.select_all(action="SELECT") bpy.ops.mesh.reveal() if name in ob.data.uv_layers: ob.data.uv_layers.remove(ob.data.uv_layers[name]) uv_layer = ob.data.uv_layers.new(name=name) uv_layer.active = True bpy.ops.uv.select_all(action="SELECT") bpy.ops.uv.smart_project(island_margin=0.0) bpy.ops.object.mode_set(mode="OBJECT", toggle=False) def prepare_mesh(ob:Object, collection: Collection, unwrap=False) -> Object: """ Duplicate the specified Object, also duplicating all its materials. :param ob: The object to duplicate. :param collection: After duplication, the object will be inserted into this Collection :param unwrap: If True, also smart unwrap the object's UVs. :return: The newly created duplicate object. """ assert not ob.name in collection.all_objects, f"{ob.name} is a baked mesh (cannot be used)" new_mesh_name = ob.data.name[:56] + "_baked" if new_mesh_name in bpy.data.meshes: bpy.data.meshes.remove(bpy.data.meshes[new_mesh_name]) new_mesh = ob.data.copy() new_mesh.name = new_mesh_name new_name = ob.name[:56] + "_baked" if new_name in bpy.data.objects: bpy.data.objects.remove(bpy.data.objects[new_name]) new_object = bpy.data.objects.new(new_name, new_mesh) collection.objects.link(new_object) select_only(new_object) new_object.matrix_world = ob.matrix_world.copy() if unwrap: smart_unwrap_object(new_object) for index, material in enumerate([x.material for x in new_object.material_slots]): new_material_name = material.name[:56] + "_baked" if new_material_name in bpy.data.materials: bpy.data.materials.remove(bpy.data.materials[new_material_name]) new_material = material.copy() new_material.name = new_material_name new_object.material_slots[index].material = new_material ob.hide_viewport = True return new_object ##!<--- TODO: Fix these def find_node_from_label(label:str, nodes:List[Node]) -> Node: for node in nodes: if node.label == label: return node return False def find_isocket_from_identifier(idname:str, node:Node) -> NodeSocket: for inputsocket in node.inputs: if inputsocket.identifier == idname: return inputsocket return False def find_osocket_from_identifier(idname, node): for outputsocket in node.outputs: if outputsocket.identifier == idname: return outputsocket return False def make_link(f_node_label, f_node_ident, to_node_label, to_node_ident, nodetree): fromnode = find_node_from_label(f_node_label, nodetree.nodes) if (fromnode == False): return False fromsocket = find_osocket_from_identifier(f_node_ident, fromnode) tonode = find_node_from_label(to_node_label, nodetree.nodes) if (tonode == False): return False tosocket = find_isocket_from_identifier(to_node_ident, tonode) nodetree.links.new(fromsocket, tosocket) return True ## ---> ## ====================================================================== ##!TODO: Shader type identification and bake setup def _nodes_for_type(node_tree:NodeTree, node_type:str) -> List[Node]: result = [x for x in node_tree.nodes if x.type == node_type] ## skip unconnected nodes from_nodes = [x.from_node for x in node_tree.links] to_nodes = [x.to_node for x in node_tree.links] all_nodes = set(from_nodes + to_nodes) result = list(filter(lambda x: x in all_nodes, result)) return result def output_nodes_for_engine(node_tree:NodeTree, engine:str) -> List[Node]: nodes = _nodes_for_type(node_tree, "OUTPUT_MATERIAL") return nodes def get_principled_nodes(node_tree:NodeTree) -> List[Node]: return _nodes_for_type(node_tree, "BSDF_PRINCIPLED") def identify_shader_type(node_tree:NodeTree) -> str: principled_nodes = get_principled_nodes(node_tree) emission_nodes = _nodes_for_type(node_tree, "EMISSION") mix_nodes = _nodes_for_type(node_tree, "MIX_SHADER") outputs = output_nodes_for_engine(node_tree, "CYCLES") total_shader_nodes = principled_nodes + emission_nodes + mix_nodes ## first type: principled straight into the output ## ---------------------------------------------------------------------- def create_principled_setup(material:Material, images:Dict[str,Image]): """ Creates a new shader setup in the tree of the specified material using the baked images, removing all old shader nodes. :param material: The material to change. :param images: The baked Images dictionary, name:Image pairs. """ node_tree = material.node_tree nodes = node_tree.nodes material.cycles.displacement_method = 'BOTH' principled_nodes = get_principled_nodes(node_tree) for node in filter(lambda x: not x in principled_nodes, nodes): nodes.remove(node) # Node Frame frame = nodes.new("NodeFrame") frame.location = (0, 0) frame.use_custom_color = True frame.color = (0.149763, 0.214035, 0.0590617) ## reuse the old BSDF if it exists to make sure the non-textured constant inputs are correct pnode = principled_nodes[0] if len(principled_nodes) else nodes.new("ShaderNodeBsdfPrincipled") pnode.location = (-25, 335) pnode.label = "pnode" pnode.use_custom_color = True pnode.color = (0.3375297784805298, 0.4575316309928894, 0.08615386486053467) pnode.parent = nodes["Frame"] # And the output node node = nodes.new("ShaderNodeOutputMaterial") node.location = (500, 200) node.label = "monode" node.show_options = False node.parent = nodes["Frame"] make_link("pnode", "BSDF", "monode", "Surface", node_tree) # ----------------------------------------------------------------- # 'COMBINED', 'AO', 'SHADOW', 'POSITION', 'NORMAL', 'UV', 'ROUGHNESS', # 'EMIT', 'ENVIRONMENT', 'DIFFUSE', 'GLOSSY', 'TRANSMISSION' ## These are the currently supported types. ## More could be supported at a future date. if "DIFFUSE" in images: node = nodes.new("ShaderNodeTexImage") node.hide = True node.location = (-500, 250) node.label = "col_tex" node.image = images["DIFFUSE"] node.parent = nodes["Frame"] make_link("col_tex", "Color", "pnode", "Base Color", node_tree) if "METALLIC" in images: node = nodes.new("ShaderNodeTexImage") node.hide = True node.location = (-500, 140) node.label = "metallic_tex" node.image = images["METALLIC"] node.parent = nodes["Frame"] make_link("metallic_tex", "Color", "pnode", "Metallic", node_tree) if "GLOSSY" in images: node = nodes.new("ShaderNodeTexImage") node.hide = True node.location = (-500, 90) node.label = "specular_tex" node.image = images["GLOSSY"] node.parent = nodes["Frame"] make_link("specular_tex", "Color", "pnode", "Specular", node_tree) if "ROUGHNESS" in images: node = nodes.new("ShaderNodeTexImage") node.hide = True node.location = (-500, 50) node.label = "roughness_tex" node.image = images["ROUGHNESS"] node.parent = nodes["Frame"] make_link("roughness_tex", "Color", "pnode", "Roughness", node_tree) if "TRANSMISSION" in images: node = nodes.new("ShaderNodeTexImage") node.hide = True node.location = (-500, -90) node.label = "transmission_tex" node.image = images["TRANSMISSION"] node.parent = nodes["Frame"] make_link("transmission_tex", "Color", "pnode", "Transmission", node_tree) if "EMIT" in images: node = nodes.new("ShaderNodeTexImage") node.hide = True node.location = (-500, -170) node.label = "emission_tex" node.image = images["EMIT"] node.parent = nodes["Frame"] make_link("emission_tex", "Color", "pnode", "Emission", node_tree) if "NORMAL" in images: node = nodes.new("ShaderNodeTexImage") node.hide = True node.location = (-500, -318.7) node.label = "normal_tex" image = images["NORMAL"] node.image = image node.parent = nodes["Frame"] # Additional normal map node for normal socket node = nodes.new("ShaderNodeNormalMap") node.location = (-220, -240) node.label = "normalmap" node.show_options = False node.parent = nodes["Frame"] make_link("normal_tex", "Color", "normalmap", "Color", node_tree) make_link("normalmap", "Normal", "pnode", "Normal", node_tree) # ----------------------------------------------------------------- ## wipe all labels for item in nodes: item.label = "" node = nodes["Frame"] node.label = "OMNI PBR" for type, image in images.items(): if type in {"DIFFUSE", "EMIT"}: image.colorspace_settings.name = "sRGB" else: image.colorspace_settings.name = "Non-Color" ## ====================================================================== def _selected_meshes(context:Context) -> List[Mesh]: """ :return: List[Mesh] of all selected mesh objects in active Blender Scene. """ return [x for x in context.selected_objects if x.type == "MESH"] def _material_can_be_baked(material:Material) -> bool: outputs = output_nodes_for_engine(material.node_tree, "CYCLES") if not len(outputs) == 1: return False try: from_node = outputs[0].inputs["Surface"].links[0].from_node except IndexError: return False ##!TODO: Support one level of mix with principled inputs if not from_node.type == "BSDF_PRINCIPLED": return False return True def omni_bake_maps_poll(context:Context) -> (int, Any): """ :return: 1 if we can bake 0 if no meshes are selected -1 if any selected meshes are already in the bake collection -2 if mesh contains non-bakeable materials -3 if Cycles renderer isn't loaded """ ## Cycles renderer is not available _, loaded_state = addon_utils.check("cycles") if not loaded_state: return (-3, None) selected = _selected_meshes(context) if not len(selected): return (0, None) for mesh in selected: for material in [slot.material for slot in mesh.material_slots]: if not _material_can_be_baked(material): return (-2, [mesh.name, material.name]) collection = bpy.data.collections.get(COLLECTION_NAME, None) if collection is None: ## We have selected meshes but no collection-- early out return (1, None) in_collection = [x for x in selected if x.name in collection.all_objects] if len(in_collection): return (-1, None) return (1, None) ## ====================================================================== class OmniBakerProperties(bpy.types.PropertyGroup): bake_metallic: BoolProperty(name="Metallic", default=True) merge_textures: BoolProperty(name="Merge Textures", description="Bake all materials for each object onto a single map", default=True) ## ====================================================================== class OBJECT_OT_omni_bake_maps(bpy.types.Operator): """Bake specified passes on the selected Mesh object.""" bl_idname = "omni.bake_maps" bl_label = "Bake Maps" bl_options = {"REGISTER", "UNDO"} base_bake_types = { ##!TODO: Possibly support these at a later date? # "COMBINED", "AO", "SHADOW", "POSITION", "UV", "ENVIRONMENT", "DIFFUSE", "NORMAL", "EMIT", "GLOSSY", "ROUGHNESS", "TRANSMISSION", } special_bake_types = { "METALLIC": "Metallic", } unwrap: BoolProperty(default=False, description="Unwrap") hide_original: BoolProperty(default=False, description="Hide Original") width: IntProperty(default=1024, min=128, max=8192, description="Width") height: IntProperty(default=1024, min=128, max=8192, description="Height") bake_types: StringProperty(default="DIFFUSE") merge_textures: BoolProperty(default=True, description="Merge Textures") @classmethod def poll(cls, context:Context) -> bool: return omni_bake_maps_poll(context)[0] == 1 def draw(self, context:Context): """Empty draw to disable the Operator Props Panel.""" pass def _get_bake_emission_target(self, node_tree:NodeTree) -> Node: bake_emission_name = "OmniBake_Emission" if not bake_emission_name in node_tree.nodes: node = node_tree.nodes.new("ShaderNodeEmission") node.name = bake_emission_name output = get_material_output(node_tree, "CYCLES") node.location = output.location + Vector((-200.0, -100.0)) return node_tree.nodes[bake_emission_name] def _copy_connection(self, material:Material, bsdf:Node, bake_type:str, target_socket:NodeSocket) -> bool: if not bake_type in self.special_bake_types: return False orig_socket = bsdf.inputs[self.special_bake_types[bake_type]] if not len(orig_socket.links): ## copy over the color and return if orig_socket.type == "VECTOR": for index in range(4): target_socket.default_value[index] = orig_socket.default_value elif orig_socket.type in {"VECTOR", "RGBA"}: for index in range(3): target_socket.default_value[index] = orig_socket.default_value[index] target_socket.default_value[3] = 1.0 else: ## should never arrive here return False else: input_socket = orig_socket.links[0].from_socket material.node_tree.links.new(input_socket, target_socket) return True def _create_bake_texture_names(self, ob:Object, bake_types:List[str]) -> List[str]: result = [] for material in [x.material for x in ob.material_slots]: material_name = material.name.rpartition('_baked')[0] for bake_type in bake_types: if self.merge_textures: image_name = f"{ob.name}__{bake_type}" else: image_name = f"{ob.name}_{material_name}_{bake_type}" result.append(image_name) return result def report(self, type:Set[str], message:str): print(message) super(OBJECT_OT_omni_bake_maps, self).report(type, message) def execute(self, context:Context) -> Set[str]: wm = context.window_manager scene = context.scene scene_engine = scene.render.engine scene.render.engine = "CYCLES" scene_use_clear = scene.render.bake.use_clear scene.render.bake.use_clear = False collection = prepare_collection(scene) all_bake_types = self.base_bake_types | self.special_bake_types.keys() valid_types_str = "Valid types are: " + ", ".join(all_bake_types) self.report({"INFO"}, f"Bake types: {self.bake_types}") bake_types = self.bake_types.split(",") if not len(bake_types): self.report({"ERROR"}, "No bake type specified. " + valid_types_str) for bake_type in bake_types: if not bake_type in all_bake_types: self.report({"ERROR"}, f"Bake type '{bake_type}' is not valid. " + valid_types_str) return {"CANCELLED"} selected_meshes = _selected_meshes(context) count = 0 total = 0 for mesh in selected_meshes: count += len(mesh.material_slots) * len(bake_types) wm.progress_begin(total, count) bpy.ops.object.mode_set(mode="OBJECT") for mesh_object in _selected_meshes(context): mesh_object.hide_select = mesh_object.hide_render = mesh_object.hide_viewport = False baked_ob = prepare_mesh(mesh_object, collection, unwrap=self.unwrap) uv_layer = "OmniBake" if self.unwrap else baked_ob.data.uv_layers.active.name bpy.ops.object.select_all(action="DESELECT") baked_ob.select_set(True) context.view_layer.objects.active = baked_ob self.report({"INFO"}, f"Baking Object {baked_ob.name}") baked_materials = [] ## Because of merge_textures, we have to create the names now and clear them ## before the whole bake process starts bake_image_names = self._create_bake_texture_names(baked_ob, bake_types) ## if merge_textures is on there'll be some repeats for image_name in set(bake_image_names): if image_name in bpy.data.images: bpy.data.images.remove(bpy.data.images[image_name]) image = bpy.data.images.new(image_name, self.width, self.height, float_buffer=(image_name.endswith(("NORMAL", "EMIT"))) ) # if bake_type in {"DIFFUSE", "EMIT"}: # image.colorspace_settings.name = "sRGB" # else: # image.colorspace_settings.name = "Non-Color" image.colorspace_settings.name = "Raw" if self.merge_textures: temp_file = NamedTemporaryFile(prefix=bake_type, suffix=".png", delete=False) image.filepath = temp_file.name image_index = 0 for material_index, material in enumerate([x.material for x in baked_ob.material_slots]): self.report({"INFO"}, f" => Material: {material.name}") tree = material.node_tree baked_ob.active_material_index = material_index for node in tree.nodes: node.select = False output = get_material_output(tree) bsdf = output.inputs["Surface"].links[0].from_node if "OmniBakeImage" in tree.nodes: tree.nodes.remove(tree.nodes["OmniBakeImage"]) bake_image_node = tree.nodes.new("ShaderNodeTexImage") bake_image_node.name = "OmniBakeImage" bake_image_node.location = output.location.copy() bake_image_node.location.x += 200.0 bake_image_node.select = True tree.nodes.active = bake_image_node ## for special cases bake_emission = self._get_bake_emission_target(tree) original_link = output.inputs["Surface"].links[0] original_from, original_to = original_link.from_socket, original_link.to_socket baked_images = {} for bake_type in bake_types: image_name = bake_image_names[image_index] image = bpy.data.images[image_name] bake_image_node.image = image.original if image.original else image self.report({"INFO"}, f"====> Baking {material.name} pass {bake_type}...") kwargs = {} if bake_type in {"DIFFUSE"}: ## ensure no black due to bad direct / indirect lighting kwargs["pass_filter"] = {"COLOR"} scene.render.bake.use_pass_indirect = False scene.render.bake.use_pass_direct = False if bake_type in self.special_bake_types: ## cheat by running the bake through emit after reconnecting real_bake_type = "EMIT" tree.links.new(bake_emission.outputs["Emission"], original_to) self._copy_connection(material, bsdf, bake_type, bake_emission.inputs["Color"]) else: real_bake_type = bake_type tree.links.new(original_from, original_to) ## have to do this every pass? if bake_type in {"DIFFUSE", "EMIT"}: image.colorspace_settings.name = "sRGB" else: image.colorspace_settings.name = "Non-Color" bpy.ops.object.bake(type=real_bake_type, width=self.width, height=self.height, uv_layer=uv_layer, use_clear=False, margin=1, **kwargs) if self.merge_textures: ## I know this seems weird, but if you don't save the file here ## post-bake when merging, the texture gets corrupted and you end ## up with a texture that's taking up ram, but can't be loaded ## for rendering (comes up pink in Cycles) image.save() self.report({"INFO"}, "... Done.") baked_images[bake_type] = image total += 1 image_index += 1 wm.progress_update(total) wm.update_tag() for node in bake_image_node, bake_emission: tree.nodes.remove(node) tree.links.new(original_from, original_to) baked_materials.append((material, baked_images)) for material, images in baked_materials: ## Perform conversion after all images are baked ## If this is not done, then errors can arise despite not ## replacing shader indices. create_principled_setup(material, images) for image in [bpy.data.images[x] for x in bake_image_names]: image.pack() ## Set new UV map as active if it exists if "OmniBake" in baked_ob.data.uv_layers: baked_ob.data.uv_layers["OmniBake"].active_render = True if self.hide_original: mesh_object.hide_set(True) wm.progress_end() scene.render.engine = scene_engine scene.render.bake.use_clear = scene_use_clear return {"FINISHED"} ## ====================================================================== module_classes = [ OBJECT_OT_omni_bake_maps, OmniBakerProperties, ] def register(): for cls in module_classes: bpy.utils.register_class(cls) bpy.types.Scene.omni_bake = bpy.props.PointerProperty(type=OmniBakerProperties) def unregister(): for cls in reversed(module_classes): bpy.utils.unregister_class(cls) try: del bpy.types.Scene.omni_bake except (AttributeError, RuntimeError): pass
21,781
Python
30.659884
111
0.678573
NVIDIA-Omniverse/blender_omniverse_addons/omni_optimization_panel/__init__.py
# ##### BEGIN GPL LICENSE BLOCK ##### # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ##### END GPL LICENSE BLOCK ##### # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. bl_info = { "name": "Omni Scene Optimization Panel", "author": "Nvidia", "description": "", "blender": (3, 4, 0), "version": (2, 0, 0), "location": "View3D > Toolbar > Omniverse", "warning": "", "category": "Omniverse" } from . import (operators, ui) def register(): operators.register() ui.register() def unregister(): operators.unregister() ui.unregister()
1,274
Python
27.333333
74
0.678964
NVIDIA-Omniverse/blender_omniverse_addons/omni_optimization_panel/operators.py
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. import os import subprocess import time from typing import * from importlib import reload import bpy from bpy.props import (BoolProperty, EnumProperty, FloatProperty, IntProperty, StringProperty) from bpy.types import (Context, Event, Object, Modifier, NodeTree, Scene) from mathutils import Vector from .properties import (OmniSceneOptChopPropertiesMixin, chopProperties) ## ====================================================================== symmetry_axis_items = [ ("X", "X", "X"), ("Y", "Y", "Y"), ("Z", "Z", "Z") ] generate_type_items = [ ("CONVEX_HULL", "Convex Hull", "Convex Hull"), ("BOUNDING_BOX", "Bounding Box", "Bounding Box") ] generate_name = "OmniSceneOptGenerate" ## ====================================================================== def selected_meshes(scene:Scene) -> List[Object]: result = [x for x in scene.collection.all_objects if x.type == "MESH" and x.select_get()] return result def get_plural_count(items) -> (str, int): count = len(items) plural = '' if count == 1 else 's' return plural, count ## ====================================================================== def preserve_selection(func, *args, **kwargs): def wrapper(*args, **kwargs): selection = [x.name for x in bpy.context.selected_objects] active = bpy.context.active_object.name if bpy.context.active_object else None result = func(*args, **kwargs) scene_objects = bpy.context.scene.objects to_select = [ scene_objects[x] for x in selection if x in scene_objects ] if active: active = scene_objects[active] if active in scene_objects else (to_select[-1] if len(to_select) else None) bpy.ops.object.select_all(action="DESELECT") for item in to_select: item.select_set(True) bpy.context.view_layer.objects.active = active return result return wrapper ## ====================================================================== class OmniSceneOptPropertiesMixin: """ Blender Properties that are shared between the in-scene preferences pointer and the various operators. """ verbose: BoolProperty(name="Verbose", description="Print information while running", default=False) selected: BoolProperty(name="Selected", description="Run on Selected Objects (if False, run on whole Scene)", default=False) ## export options export_textures: BoolProperty(name="Export Textures", description="Export textures when doing a background export", default=True) ## these are deliberate copies from ui.OmniYes.Properties validate: BoolProperty(name="Validate Meshes", description="Attempt to remove invalid geometry", default=True) weld: BoolProperty(name="Weld Verts", description="Weld loose vertices", default=False) weld_distance: FloatProperty(name="Weld Distance", description="Distance threshold for welds", default=0.0001, min=0.00001, step=0.00001) unwrap: BoolProperty(name="Unwrap Mesh UVs", description="Use the Smart Unwrap feature to add new UVs", default=False) unwrap_margin: FloatProperty(name="Margin", description="Distance between UV islands", default=0.00, min=0.0, step=0.01) decimate: BoolProperty(name="Decimate", description="Reduce polygon and vertex counts on meshes", default=False) decimate_ratio: IntProperty(name="Ratio", subtype="PERCENTAGE", description="Reduce face count to this percentage of original", default=50, min=10, max=100, step=5) decimate_use_symmetry: BoolProperty(name="Use Symmetry", description="Decimate with Symmetry across an axis", default=False) decimate_symmetry_axis: EnumProperty(name="Symmetry Axis", description="Axis for symmetry", items=symmetry_axis_items, default="X") decimate_min_face_count: IntProperty(name="Minimum Face Count", description="Do not decimate objects with less faces", default=500, min=100, step=10) decimate_remove_shape_keys: BoolProperty(name="Remove Shape Keys", description="Remove shape keys to allow meshes with shapes to be decimated", default=False) chop: BoolProperty(name="Chop Meshes", description="Physically divide meshes based on size and point count", default=False) generate: BoolProperty(name="Generate", description="Generate convex hulls or bounding boxes", default=False) merge: BoolProperty(name="Merge Selected", description="On Export, merge selected meshes into a single object", default=False) ## ====================================================================== class OmniSceneOptGeneratePropertiesMixin: generate_duplicate: BoolProperty(name="Create Duplicate", description="Generate a new object instead of replacing the original", default=True) generate_type: EnumProperty(name="Generate Type", description="Type of geometry to generate", items=generate_type_items, default="CONVEX_HULL") ## ====================================================================== """ This is a weird one. The decimate modifier was failing on multiple objects in order, but wrapping it in an Operator seems to fix the issues with making sure the correct things are selected in the Context. """ class OBJECT_OT_omni_sceneopt_decimate(bpy.types.Operator, OmniSceneOptPropertiesMixin): """Decimates the selected object using the Decimation modifier.""" bl_idname = "omni_sceneopt.decimate" bl_label = "Omni Scene Optimization: Decimate" bl_options = {"REGISTER", "UNDO"} ratio: IntProperty(name="Ratio", subtype="PERCENTAGE", description="Reduce face count to this percentage of original", default=50, min=10, max=100, step=5) use_symmetry: BoolProperty(name="Use Symmetry", description="Decimate with Symmetry across an axis", default=True) symmetry_axis: EnumProperty(name="Symmetry Axis", description="Axis for symmetry", items=symmetry_axis_items, default="X") min_face_count: IntProperty(name="Minimum Face Count", description="Do not decimate objects with less faces", default=500, min=100, step=10) @classmethod def poll(cls, context:Context) -> bool: return bool(context.active_object) def execute(self, context:Context) -> Set[str]: from .batch import lod result = lod.decimate_object(context.active_object, ratio=self.ratio / 100.0, use_symmetry=self.use_symmetry, symmetry_axis=self.symmetry_axis, min_face_count=self.min_face_count, create_duplicate=False) return {"FINISHED"} ## ====================================================================== class OmniOverrideMixin: def set_active(self, ob:Object): try: bpy.context.view_layer.objects.active = ob except RuntimeError as e: print(f"-- unable to set active: {ob.name} ({e}") def override(self, objects:List[Object], single=False): assert isinstance(objects, (list, tuple)), "'objects' is expected to be a list or tuple" assert len(objects), "'objects' cannot be empty" ## filter out objects not in current view layer objects = list(filter(lambda x: x.name in bpy.context.view_layer.objects, objects)) if single: objects = objects[0:1] override = { 'active_object': objects[0], 'edit_object': None, 'editable_objects': objects, 'object': objects[0], 'objects_in_mode': [], 'objects_in_mode_unique_data': [], 'selectable_objects': objects, 'selected_editable_objects': objects, 'selected_objects': objects, 'visible_objects': objects, } self.set_active(objects[0]) return bpy.context.temp_override(**override) def edit_override(self, objects:List[Object], single=False): assert isinstance(objects, (list, tuple)), "'objects' is expected to be a list or tuple" assert len(objects), "'objects' cannot be empty" if single: objects = objects[0:1] override = { 'active_object': objects[0], 'edit_object': objects[0], 'editable_objects': objects, 'object': objects[0], 'objects_in_mode': objects, 'objects_in_mode_unique_data': objects, 'selectable_objects': objects, 'selected_editable_objects': objects, 'selected_objects': objects, 'visible_objects': objects, } self.set_active(objects[0]) return bpy.context.temp_override(**override) ## ====================================================================== class OBJECT_OT_omni_sceneopt_optimize(bpy.types.Operator, OmniSceneOptPropertiesMixin, OmniSceneOptChopPropertiesMixin, OmniSceneOptGeneratePropertiesMixin, OmniOverrideMixin): """Run specified optimizations on the scene or on selected objects.""" bl_idname = "omni_sceneopt.optimize" bl_label = "Omni Scene Optimization: Optimize Scene" bl_options = {"REGISTER", "UNDO"} # def draw(self, context:Context): # """Empty draw to disable the Operator Props Panel.""" # pass def _object_mode(self): if not bpy.context.mode == "OBJECT": bpy.ops.object.mode_set(mode="OBJECT") def _edit_mode(self): if not bpy.context.mode == "EDIT_MESH": bpy.ops.object.mode_set(mode="EDIT") @staticmethod def _remove_shape_keys(ob:Object): assert ob.type == "MESH", "Cannot be run on non-Mesh Objects." ## Reversed because we want to remove Basis last, or we will end up ## with garbage baked in. for key in reversed(ob.data.shape_keys.key_blocks): ob.shape_key_remove(key) @staticmethod def _select_one(ob:Object): bpy.ops.object.select_all(action="DESELECT") ob.select_set(True) bpy.context.view_layer.objects.active = ob @staticmethod def _select_objects(objects:List[Object]): bpy.ops.object.select_all(action="DESELECT") for item in objects: item.select_set(True) bpy.context.view_layer.objects.active = objects[-1] @staticmethod def _get_evaluated(objects:List[Object]) -> List[Object]: deps = bpy.context.evaluated_depsgraph_get() return [x.evaluated_get(deps).original for x in objects] @staticmethod def _total_vertex_count(target_objects:List[Object]): deps = bpy.context.evaluated_depsgraph_get() eval_objs = [x.evaluated_get(deps) for x in target_objects] return sum([len(x.data.vertices) for x in eval_objs]) def do_validate(self, target_objects:List[Object]) -> List[Object]: """Expects to be run in Edit Mode with all meshes selected""" total_orig = self._total_vertex_count(target_objects) bpy.ops.mesh.select_all(action="SELECT") bpy.ops.mesh.dissolve_degenerate() total_result = self._total_vertex_count(target_objects) if self.verbose: plural, obj_count = get_plural_count(target_objects) message = f"Validated {obj_count} object{plural}." self.report({"INFO"}, message) return target_objects def do_weld(self, target_objects:List[Object]) -> List[Object]: """Expects to be run in Edit Mode with all meshes selected""" bpy.ops.mesh.remove_doubles(threshold=self.weld_distance, use_unselected=True) bpy.ops.mesh.normals_make_consistent(inside=False) return target_objects def do_unwrap(self, target_objects:List[Object]) -> List[Object]: bpy.ops.object.select_all(action="DESELECT") start = time.time() for item in target_objects: with self.edit_override([item]): bpy.ops.object.mode_set(mode="EDIT") bpy.ops.mesh.select_all(action="SELECT") bpy.ops.uv.smart_project(island_margin=0.0) bpy.ops.uv.select_all(action="SELECT") # bpy.ops.uv.average_islands_scale() # bpy.ops.uv.pack_islands(margin=self.unwrap_margin) bpy.ops.object.mode_set(mode="OBJECT") end = time.time() if self.verbose: plural, obj_count = get_plural_count(target_objects) message = f"Unwrapped {obj_count} object{plural} ({end-start:.02f} seconds)." self.report({"INFO"}, message) return target_objects def do_decimate(self, target_objects:List[Object]) -> List[Object]: assert bpy.context.mode == "OBJECT", "Decimate must be run in object mode." total_orig = self._total_vertex_count(target_objects) total_result = 0 start = time.time() for item in target_objects: if item.data.shape_keys and len(item.data.shape_keys.key_blocks): if not self.decimate_remove_shape_keys: self.report({"WARNING"}, f"[ Decimate ] Skipping {item.name} because it has shape keys.") continue else: self._remove_shape_keys(item) if len(item.data.polygons) < self.decimate_min_face_count: self.report({"INFO"}, f"{item.name} is under face count-- not decimating.") continue ## We're going to use the decimate modifier mod = item.modifiers.new("OmniLOD", type="DECIMATE") mod.decimate_type = "COLLAPSE" mod.ratio = self.decimate_ratio / 100.0 mod.use_collapse_triangulate = True mod.use_symmetry = self.decimate_use_symmetry mod.symmetry_axis = self.decimate_symmetry_axis ## we don't need a full context override here self.set_active(item) bpy.ops.object.modifier_apply(modifier=mod.name) total_result += len(item.data.vertices) end = time.time() if self.verbose: plural, obj_count = get_plural_count(target_objects) message = f"Decimated {obj_count} object{plural}. Vertex count original {total_orig} to {total_result} ({end-start:.02f} seconds)." self.report({"INFO"}, message) return target_objects def do_chop(self, target_objects:List[Object]): """ Assumes all objects are selected and that we are in Object mode """ assert bpy.context.mode == "OBJECT", "Chop must be run in object mode." scene = bpy.context.scene attributes = scene.omni_sceneopt_chop.attributes() attributes["selected_only"] = self.selected bpy.ops.omni_sceneopt.chop(**attributes) return target_objects def do_generate(self, target_objects:List[Object]): with self.override(target_objects): bpy.ops.omni_sceneopt.generate(generate_type=self.generate_type, generate_duplicate=self.generate_duplicate) return target_objects def execute(self, context:Context) -> Set[str]: start = time.time() active = context.active_object if self.selected: targets = selected_meshes(context.scene) else: targets = [x for x in context.scene.collection.all_objects if x.type == "MESH"] bpy.ops.object.select_all(action="DESELECT") [ x.select_set(True) for x in targets ] if active: self.set_active(active) if not len(targets): self.info({"ERROR"}, "No targets specified.") return {"CANCELLED"} self._object_mode() ## Have to do vertex counts outside edit mode! total_orig = self._total_vertex_count(targets) if self.validate or self.weld: with self.edit_override(targets): bpy.ops.object.mode_set(mode="EDIT") ## We can run these two operations together because they don't collide ## or cause issues between each other. if self.validate: self.do_validate(targets) if self.weld: self.do_weld(targets) ## Unfortunately, the rest are object-by-object operations self._object_mode() total_result = self._total_vertex_count(targets) if self.verbose and self.weld: plural, obj_count = get_plural_count(targets) message = f"Welded {obj_count} object{plural}. Vertex count original {total_orig} to {total_result}." self.report({"INFO"}, message) if self.unwrap: self.do_unwrap(targets) if self.decimate: self.do_decimate(targets) if self.chop: self.do_chop(targets) if self.generate: self.do_generate(targets) end = time.time() if self.verbose: self.report({"INFO"}, f"Optimization complete-- process took {end-start:.02f} seconds") return {"FINISHED"} ## ====================================================================== class OBJECT_OT_omni_sceneopt_chop(bpy.types.Operator, OmniSceneOptChopPropertiesMixin): """Chop the specified object into a grid of smaller ones""" bl_idname = "omni_sceneopt.chop" bl_label = "Omni Scene Optimizer: Chop" bl_options = {"REGISTER", "UNDO"} # def draw(self, context:Context): # """Empty draw to disable the Operator Props Panel.""" # pass def execute(self, context:Context) -> Set[str]: attributes = dict( merge=self.merge, cut_meshes=self.cut_meshes, max_vertices=self.max_vertices, min_box_size=self.min_box_size, max_depth=self.max_depth, print_updated_results=self.print_updated_results, create_bounds=self.create_bounds, selected_only=self.selected_only ) from .scripts.chop import Chop chopper = Chop() chopper.execute(self.attributes()) return {"FINISHED"} ## ====================================================================== class OBJECT_OT_omni_sceneopt_generate(bpy.types.Operator, OmniSceneOptGeneratePropertiesMixin, OmniOverrideMixin): """Generate geometry based on selected objects. Currently supported: Bounding Box, Convex Hull""" bl_idname = "omni_sceneopt.generate" bl_label = "Omni Scene Optimizer: Generate" bl_options = {"REGISTER", "UNDO"} # def draw(self, context:Context): # """Empty draw to disable the Operator Props Panel.""" # pass def create_geometry_nodes_group(self, group:NodeTree): """Create or return the shared Generate node group.""" node_type = { "CONVEX_HULL": "GeometryNodeConvexHull", "BOUNDING_BOX": "GeometryNodeBoundBox", }[self.generate_type] geometry_input = group.nodes["Group Input"] geometry_input.location = Vector((-1.5 * geometry_input.width, 0)) group_output = group.nodes["Group Output"] group_output.location = Vector((1.5 * group_output.width, 0)) node = group.nodes.new(node_type) node.name = "Processor" group.links.new(geometry_input.outputs['Geometry'], node.inputs['Geometry']) group.links.new(node.outputs[0], group_output.inputs['Geometry']) return bpy.data.node_groups[generate_name] def create_geometry_nodes_modifier(self, ob:Object) -> Modifier: if generate_name in ob.modifiers: ob.modifiers.remove(ob.modifiers[generate_name]) if generate_name in bpy.data.node_groups: bpy.data.node_groups.remove(bpy.data.node_groups[generate_name]) mod = ob.modifiers.new(name=generate_name, type="NODES") bpy.ops.node.new_geometry_node_group_assign() mod.node_group.name = generate_name self.create_geometry_nodes_group(mod.node_group) return mod def create_duplicate(self, ob:Object, token:str) -> Object: from .batch import lod duplicate = lod.duplicate_object(ob, token, weld=False) return duplicate @preserve_selection def apply_modifiers(self, target_objects:List[Object]): count = 0 for item in target_objects: if self.generate_duplicate: token = self.generate_type.rpartition("_")[-1] duplicate = self.create_duplicate(item, token=token) duplicate.parent = item.parent duplicate.matrix_world = item.matrix_world.copy() bpy.context.scene.collection.objects.unlink(duplicate) for collection in item.users_collection: collection.objects.link(duplicate) item = duplicate with self.override([item]): mod = self.create_geometry_nodes_modifier(item) bpy.context.view_layer.objects.active = item item.select_set(True) bpy.ops.object.modifier_apply(modifier=mod.name) count += 1 def execute(self, context:Context) -> Set[str]: changed = self.apply_modifiers(context.selected_objects) if changed: group = bpy.data.node_groups["OMNI_SCENEOPT_GENERATE"] bpy.data.node_groups.remove(group) return {"FINISHED"} ## ====================================================================== class OBJECT_OT_omni_progress(bpy.types.Operator): bl_idname = "omni.progress" bl_label = "Export Optimized USD" bl_options = {"REGISTER", "UNDO"} message: StringProperty(name="message", description="Message to print upon completion.", default="") _timer = None def modal(self, context:Context, event:Event) -> Set[str]: if context.scene.omni_progress_active is False: message = self.message.strip() if len(message): self.report({"INFO"}, message) return {"FINISHED"} context.area.tag_redraw() context.window.cursor_set("WAIT") return {"RUNNING_MODAL"} def invoke(self, context:Context, event:Event) -> Set[str]: context.scene.omni_progress_active = True self._timer = context.window_manager.event_timer_add(0.1, window=context.window) context.window_manager.modal_handler_add(self) context.window.cursor_set("WAIT") return {"RUNNING_MODAL"} ## ====================================================================== class OBJECT_OT_omni_sceneopt_export(bpy.types.Operator, OmniSceneOptPropertiesMixin, OmniSceneOptChopPropertiesMixin, OmniSceneOptGeneratePropertiesMixin): """Runs specified optimizations on the scene before running a USD Export""" bl_idname = "omni_sceneopt.export" bl_label = "Export USD" bl_options = {"REGISTER", "UNDO"} filepath: StringProperty(subtype="FILE_PATH") filter_glob: StringProperty(default="*.usd;*.usda;*.usdc", options={"HIDDEN"}) check_existing: BoolProperty(default=True, options={"HIDDEN"}) def draw(self, context:Context): """Empty draw to disable the Operator Props Panel.""" pass def invoke(self, context:Context, event:Event) -> Set[str]: if len(self.filepath.strip()) == 0: self.filepath = "untitled.usdc" context.window_manager.fileselect_add(self) return {"RUNNING_MODAL"} def execute(self, context:Context) -> Set[str]: output_path = bpy.path.abspath(self.filepath) script_path = os.sep.join((os.path.dirname(os.path.abspath(__file__)), "batch", "optimize_export.py")) bpy.ops.omni.progress(message=f"Finished background write to {output_path}") bpy.ops.wm.save_mainfile() command = " ".join([ '"{}"'.format(bpy.app.binary_path), "--background", '"{}"'.format(bpy.data.filepath), "--python", '"{}"'.format(script_path), "--", '"{}"'.format(output_path) ]) print(command) subprocess.check_output(command, shell=True) context.scene.omni_progress_active = False if self.verbose: self.report({"INFO"}, f"Exported optimized scene to: {output_path}") return {"FINISHED"} ## ====================================================================== classes = [ OBJECT_OT_omni_sceneopt_decimate, OBJECT_OT_omni_sceneopt_chop, OBJECT_OT_omni_sceneopt_generate, OBJECT_OT_omni_sceneopt_optimize, OBJECT_OT_omni_progress, OBJECT_OT_omni_sceneopt_export, chopProperties ] def unregister(): try: del bpy.types.Scene.omni_sceneopt_chop except AttributeError: pass try: del bpy.types.Scene.omni_progress_active except AttributeError: pass for cls in reversed(classes): try: bpy.utils.unregister_class(cls) except (ValueError, AttributeError, RuntimeError): continue def register(): for cls in classes: bpy.utils.register_class(cls) bpy.types.Scene.omni_sceneopt_chop = bpy.props.PointerProperty(type=chopProperties) bpy.types.Scene.omni_progress_active = bpy.props.BoolProperty(default=False)
23,131
Python
30.687671
134
0.67001
NVIDIA-Omniverse/blender_omniverse_addons/omni_optimization_panel/panel.py
# ##### BEGIN GPL LICENSE BLOCK ##### # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ##### END GPL LICENSE BLOCK ##### # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. from bpy.types import Panel from os.path import join, dirname import bpy.utils.previews #---------------Custom ICONs---------------------- def get_icons_directory(): icons_directory = join(dirname(__file__), "icons") return icons_directory class OPTIMIZE_PT_Panel(Panel): bl_space_type = "VIEW_3D" bl_region_type = "UI" bl_label = "OPTIMIZE SCENE" bl_category = "Omniverse" #retrieve icons icons = bpy.utils.previews.new() icons_directory = get_icons_directory() icons.load("OMNI", join(icons_directory, "ICON.png"), 'IMAGE') icons.load("GEAR", join(icons_directory, "gear.png"), 'IMAGE') def draw(self, context): layout = self.layout layout.label(text="Omniverse", icon_value=self.icons["OMNI"].icon_id) optimizeOptions = context.scene.optimize_options modifyOptions = context.scene.modify_options uvOptions = context.scene.uv_options chopOptions = context.scene.chop_options # OPERATOR SETTINGS box = layout.box() col = box.column(align= True) row = col.row(align=True) row.scale_y = 1.5 row.operator("optimize.scene", text = "Optimize Scene", icon_value=self.icons["GEAR"].icon_id) col.separator() row2 = col.row(align=True) row2.scale_y = 1.3 row2.prop(optimizeOptions, "operation", text="Operation") col.separator() col.prop(optimizeOptions, "print_attributes", expand= True) box2 = layout.box() box2.label(text= "OPERATION PROPERTIES:") col2 = box2.column(align= True) # MODIFY SETTINGS if optimizeOptions.operation == 'modify': row = col2.row(align= True) row.prop(modifyOptions, "modifier", text="Modifier") row2 = col2.row(align= True) row3 = col2.row(align= True) #DECIMATE if modifyOptions.modifier == 'DECIMATE': row2.prop(modifyOptions, "decimate_type", expand= True) if modifyOptions.decimate_type == 'COLLAPSE': row3.prop(modifyOptions, "ratio", expand= True) elif modifyOptions.decimate_type == 'UNSUBDIV': row3.prop(modifyOptions, "iterations", expand= True) elif modifyOptions.decimate_type == 'DISSOLVE': row3.prop(modifyOptions, "angle", expand= True) #REMESH elif modifyOptions.modifier == 'REMESH': row2.prop(modifyOptions, "remesh_type", expand= True) if modifyOptions.remesh_type == 'BLOCKS': row3.prop(modifyOptions, "oDepth", expand= True) if modifyOptions.remesh_type == 'SMOOTH': row3.prop(modifyOptions, "oDepth", expand= True) if modifyOptions.remesh_type == 'SHARP': row3.prop(modifyOptions, "oDepth", expand= True) if modifyOptions.remesh_type == 'VOXEL': row3.prop(modifyOptions, "voxel_size", expand= True) #NODES elif modifyOptions.modifier == 'NODES': row2.prop(modifyOptions, "geo_type") if modifyOptions.geo_type == "GeometryNodeSubdivisionSurface": row2.prop(modifyOptions, "geo_attribute", expand= True) col2.prop(modifyOptions, "selected_only", expand= True) col2.prop(modifyOptions, "apply_mod", expand= True) box3 = col2.box() col3 = box3.column(align=True) col3.label(text="FIX MESH BEFORE MODIFY") col3.prop(modifyOptions, "fix_bad_mesh", expand= True) if modifyOptions.fix_bad_mesh: col3.prop(modifyOptions, "dissolve_threshold", expand= True) col3.prop(modifyOptions, "merge_vertex", expand= True) if modifyOptions.merge_vertex: col3.prop(modifyOptions, "merge_threshold", expand= True) if modifyOptions.fix_bad_mesh or modifyOptions.merge_vertex: col3.prop(modifyOptions, "remove_existing_sharp", expand= True) col3.prop(modifyOptions, "fix_normals", expand= True) if modifyOptions.fix_normals: col3.prop(modifyOptions, "create_new_custom_normals", expand= True) # use_modifier_stack= modifyOptions.use_modifier_stack, # modifier_stack=[["DECIMATE", "COLLAPSE", 0.5]], # FIX MESH SETTINGS elif optimizeOptions.operation == 'fixMesh': col2.prop(modifyOptions, "selected_only", expand= True) col3 = col2.column(align=True) col3.prop(modifyOptions, "fix_bad_mesh", expand= True) if modifyOptions.fix_bad_mesh: col3.prop(modifyOptions, "dissolve_threshold", expand= True) col3.prop(modifyOptions, "merge_vertex", expand= True) if modifyOptions.merge_vertex: col3.prop(modifyOptions, "merge_threshold", expand= True) if modifyOptions.fix_bad_mesh or modifyOptions.merge_vertex: col3.prop(modifyOptions, "remove_existing_sharp", expand= True) col3.prop(modifyOptions, "fix_normals", expand= True) if modifyOptions.fix_normals: col3.prop(modifyOptions, "create_new_custom_normals", expand= True) # UV SETTINGS elif optimizeOptions.operation == 'uv': if uvOptions.unwrap_type == 'Smart': col2.label(text= "SMART UV CAN BE SLOW", icon='ERROR') else: col2.label(text= "Unwrap Type") col2.prop(uvOptions, "unwrap_type", expand= True) col2.prop(uvOptions, "selected_only", expand= True) col2.prop(uvOptions, "scale_to_bounds", expand= True) col2.prop(uvOptions, "clip_to_bounds", expand= True) col2.prop(uvOptions, "use_set_size", expand= True) if uvOptions.use_set_size: col2.prop(uvOptions, "set_size", expand= True) col2.prop(uvOptions, "print_updated_results", expand= True) # CHOP SETTINGS elif optimizeOptions.operation == 'chop': col2.prop(chopOptions, "selected_only", expand= True) col2.prop(chopOptions, "cut_meshes", expand= True) col2.prop(chopOptions, "max_vertices", expand= True) col2.prop(chopOptions, "min_box_size", expand= True) col2.prop(chopOptions, "max_depth", expand= True) col2.prop(chopOptions, "merge", expand= True) col2.prop(chopOptions, "create_bounds", expand= True) col2.prop(chopOptions, "print_updated_results", expand= True)
7,603
Python
44.532934
102
0.605682
NVIDIA-Omniverse/blender_omniverse_addons/omni_optimization_panel/properties.py
# ##### BEGIN GPL LICENSE BLOCK ##### # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ##### END GPL LICENSE BLOCK ##### # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. from typing import * from bpy.props import * import bpy class optimizeProperties(bpy.types.PropertyGroup): # PROPERTIES operation: EnumProperty( name="Operation", items= [ ('modify', 'MODIFY', 'run modify'), ('fixMesh', 'FIX MESH', 'run fix Mesh'), ('uv', 'UV UNWRAP', "run uv"), ('chop', 'CHOP', 'run chop')], description= "Choose the operation to run on the scene", default = 'modify' ) print_attributes: BoolProperty( name ="Print Attributes", description = "Print attributes used at the begging of operation", default = False ) class modProperties(bpy.types.PropertyGroup): # PROPERTIES selected_only: BoolProperty( name ="Use Selected Only", description = "Operate on selected objects only", default = False ) apply_mod: BoolProperty( name ="Apply Modifier", description = "Apply modifier after adding", default = True ) fix_bad_mesh: BoolProperty( name ="Fix Bad Mesh", description = "Remove zero area faces and zero length edges", default = False ) dissolve_threshold: FloatProperty( name="Dissolve Threshold", description = "Threshold value used with Fix Bad Mesh", default=0.08, min=0, max=50 ) merge_vertex: BoolProperty( name ="Merge Vertex", description = "Merge vertices by distance", default = False ) merge_threshold: FloatProperty( name="Merge Threshold", description = "Distance value used with merge vertex", default=0.01, min=0, max=50 ) remove_existing_sharp: BoolProperty( name ="Remove Existing Sharp", description = "Remove existing sharp edges from meshes. This helps sometimes after fixing bad meshes", default = True ) fix_normals: BoolProperty( name ="Fix Normals", description = "Remove existing custom split normals", default = False ) create_new_custom_normals: BoolProperty( name ="Create New Custom Normals", description = "Create new custom split normals", default = False ) # Some common modifier names for reference:'DECIMATE''REMESH''NODES''SUBSURF''SOLIDIFY''ARRAY''BEVEL' modifier: EnumProperty( name="Modifier", items= [ ('DECIMATE', 'Decimate', 'decimate geometry'), ('REMESH', 'Remesh', 'remesh geometry'), ('NODES', 'Nodes', 'add geometry node mod'), ('FIX', 'Fix Mesh', "fix mesh")], description= "Choose the modifier to apply to geometry", default = 'DECIMATE' ) # TODO: Implement this modifier stack properly. would allow for multiple modifiers to be queued and run at once # use_modifier_stack: BoolProperty( # name ="Use Modifier Stack", # description = "use stack of modifiers instead of a single modifier", # default = False # ) # modifier_stack: CollectionProperty( # type= optimizeProperties, # name="Modifiers", # description= "list of modifiers to be used", # default = [["DECIMATE", "COLLAPSE", 0.5]] # ) decimate_type: EnumProperty( items= [ ('COLLAPSE','collapse',"collapse geometry"), ('UNSUBDIV','unSubdivide',"un subdivide geometry"), ('DISSOLVE','planar',"dissolve geometry")], description = "Choose which type of decimation to perform.", default = "COLLAPSE" ) ratio: FloatProperty( name="Ratio", default=0.5, min=0.0, max=1.0 ) iterations: IntProperty( name="Iterations", default=2, min=0, max=50 ) angle: FloatProperty( name="Angle", default=15.0, min=0.0, max=180.0 ) remesh_type: EnumProperty( items= [ ('BLOCKS','blocks',"collapse geometry"), ('SMOOTH','smooth',"un subdivide geometry"), ('SHARP','sharp',"un subdivide geometry"), ('VOXEL','voxel',"dissolve geometry")], description = "Choose which type of remesh to perform.", default = "VOXEL" ) oDepth: IntProperty( name="Octree Depth", default=4, min=1, max=8 ) voxel_size: FloatProperty( name="Voxel Size", default=0.1, min=0.01, max=2.0 ) geo_type: EnumProperty( items= [ ('GeometryNodeConvexHull','convex hull',"basic convex hull"), ('GeometryNodeBoundBox','bounding box',"basic bounding box"), ('GeometryNodeSubdivisionSurface','subdiv',"subdivide geometry")], description = "Choose which type of geo node tree to add", default = "GeometryNodeBoundBox" ) geo_attribute: IntProperty( name="Attribute", description = "Additional attribute used for certain geo nodes", default=2, min=0, max=8 ) class uvProperties(bpy.types.PropertyGroup): # PROPERTIES selected_only: BoolProperty( name ="Use Selected Only", description = "Operate on selected objects only", default = False ) unwrap_type: EnumProperty( items= [ ('Cube','cube project',"basic convex hull"), ('Sphere','sphere project',"subdivide geometry"), ('Cylinder','cylinder project',"dissolve geometry"), ('Smart','smart project',"basic bounding box")], description = "Choose which type of unwrap process to use.", default = "Cube" ) scale_to_bounds: BoolProperty( name ="Scale To Bounds", description = "Scale UVs to 2D bounds", default = False ) clip_to_bounds: BoolProperty( name ="Clip To Bounds", description = "Clip UVs to 2D bounds", default = False ) use_set_size: BoolProperty( name ="Use Set Size", description = "Use a defined UV size for all objects", default = False ) set_size : FloatProperty( name="Set Size", default=2.0, min=0.01, max=100.0 ) print_updated_results: BoolProperty( name ="Print Updated Results", description = "Print updated results to console", default = True ) class OmniSceneOptChopPropertiesMixin: selected_only: BoolProperty( name="Split Selected Only", description="Operate on selected objects only", default=False ) print_updated_results: BoolProperty( name="Print Updated Results", description="Print updated results to console", default=True ) cut_meshes: BoolProperty( name="Cut Meshes", description="Cut meshes", default=True ) merge: BoolProperty( name="Merge", description="Merge split chunks after splitting is complete", default=False ) create_bounds: BoolProperty( name="Create Boundary Objects", description="Add generated boundary objects to scene", default=False ) max_depth: IntProperty( name="Max Depth", description="Maximum recursion depth", default=8, min=0, max=32 ) max_vertices: IntProperty( name="Max Vertices", description="Maximum vertices allowed per block", default=10000, min=0, max=1000000 ) min_box_size: FloatProperty( name="Min Box Size", description="Minimum dimension for a chunk to be created", default=1, min=0, max=10000 ) def attributes(self) -> Dict: return dict( merge=self.merge, cut_meshes=self.cut_meshes, max_vertices=self.max_vertices, min_box_size=self.min_box_size, max_depth=self.max_depth, print_updated_results=self.print_updated_results, create_bounds=self.create_bounds, selected_only=self.selected_only ) def set_attributes(self, attributes:Dict): for attr, value in attributes.items(): if hasattr(self, attr): setattr(self, attr, value) else: raise ValueError(f"OmniSceneOptChopPropertiesMixin: invalid attribute for set {attr}") class chopProperties(bpy.types.PropertyGroup, OmniSceneOptChopPropertiesMixin): pass
9,344
Python
27.842593
115
0.601241
NVIDIA-Omniverse/blender_omniverse_addons/omni_optimization_panel/ui.py
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. import os from typing import * import bpy from bpy.utils import previews from bpy.props import (BoolProperty, EnumProperty, FloatProperty, IntProperty, StringProperty) from bpy.types import (Context, Object, Operator, Scene) from .operators import ( OBJECT_OT_omni_sceneopt_optimize, OBJECT_OT_omni_sceneopt_export, OmniSceneOptPropertiesMixin, OmniSceneOptGeneratePropertiesMixin, selected_meshes, symmetry_axis_items ) ## ====================================================================== def preload_icons() -> previews.ImagePreviewCollection: """Preload icons used by the interface.""" icons_directory = os.path.join(os.path.dirname(os.path.abspath(__file__)), "icons") all_icons = { "GEAR": "gear.png", "ICON": "ICON.png", } preview = previews.new() for name, filepath in all_icons.items(): preview.load(name, os.path.join(icons_directory, filepath), "IMAGE") return preview ## ====================================================================== class OmniSceneOptProperties(bpy.types.PropertyGroup, OmniSceneOptPropertiesMixin, OmniSceneOptGeneratePropertiesMixin): """We're only here to register the mixins through the PropertyGroup""" pass ## ====================================================================== def can_run_optimization(scene:Scene) -> bool: if scene.omni_sceneopt.selected and not len(selected_meshes(scene)): return False has_operations = any(( scene.omni_sceneopt.validate, scene.omni_sceneopt.weld, scene.omni_sceneopt.decimate, scene.omni_sceneopt.unwrap, scene.omni_sceneopt.chop, scene.omni_sceneopt.generate, )) if not has_operations: return False return True ## ====================================================================== class OBJECT_PT_OmniOptimizationPanel(bpy.types.Panel): bl_space_type = 'VIEW_3D' bl_region_type = 'UI' bl_category = "Omniverse" bl_label = "Scene Optimizer" bl_options = {"DEFAULT_CLOSED"} icons = preload_icons() @staticmethod def _apply_parameters(settings, op:Operator): """Copy parameters from the scene-level settings blob to an operator""" invalid = {"bl_rna", "name", "rna_type"} for property_name in filter(lambda x: not x[0] == '_' and not x in invalid, dir(settings)): if hasattr(op, property_name): value = getattr(settings, property_name) setattr(op, property_name, value) op.verbose = True def draw_validate(self, layout, scene: Scene): box = layout.box() box.prop(scene.omni_sceneopt, "validate") def draw_weld(self, layout, scene: Scene): box = layout.box() box.prop(scene.omni_sceneopt, "weld") if not scene.omni_sceneopt.weld: return box.prop(scene.omni_sceneopt, "weld_distance") def draw_decimate(self, layout, scene: Scene): box = layout.box() box.prop(scene.omni_sceneopt, "decimate") if not scene.omni_sceneopt.decimate: return box.prop(scene.omni_sceneopt, "decimate_ratio") box.prop(scene.omni_sceneopt, "decimate_min_face_count") row = box.row() row.prop(scene.omni_sceneopt, "decimate_use_symmetry") row = row.row() row.prop(scene.omni_sceneopt, "decimate_symmetry_axis", text="") row.enabled = scene.omni_sceneopt.decimate_use_symmetry box.prop(scene.omni_sceneopt, "decimate_remove_shape_keys") def draw_unwrap(self, layout, scene: Scene): box = layout.box() box.prop(scene.omni_sceneopt, "unwrap") if not scene.omni_sceneopt.unwrap: return box.prop(scene.omni_sceneopt, "unwrap_margin") def draw_chop(self, layout, scene: Scene): box = layout.box() box.prop(scene.omni_sceneopt, "chop") if not scene.omni_sceneopt.chop: return col = box.column(align=True) col.prop(scene.omni_sceneopt_chop, "max_vertices") col.prop(scene.omni_sceneopt_chop, "min_box_size") col.prop(scene.omni_sceneopt_chop, "max_depth") box.prop(scene.omni_sceneopt_chop, "create_bounds") def draw_generate(self, layout, scene: Scene): box = layout.box() box.prop(scene.omni_sceneopt, "generate", text="Generate Bounding Mesh") if not scene.omni_sceneopt.generate: return col = box.column(align=True) col.prop(scene.omni_sceneopt, "generate_type") col.prop(scene.omni_sceneopt, "generate_duplicate") def draw_operators(self, layout, context:Context, scene:Scene): layout.label(text="") row = layout.row(align=True) row.label(text="Run Operations", icon="PLAY") row.prop(scene.omni_sceneopt, "selected", text="Selected Meshes Only") run_text = f"{'Selected' if scene.omni_sceneopt.selected else 'Scene'}" col = layout.column(align=True) op = col.operator(OBJECT_OT_omni_sceneopt_optimize.bl_idname, text=f"Optimize {run_text}", icon_value=self.icons["GEAR"].icon_id) self._apply_parameters(scene.omni_sceneopt, op) col.enabled = can_run_optimization(scene) col = layout.column(align=True) op = col.operator(OBJECT_OT_omni_sceneopt_export.bl_idname, text=f"Export Optimized Scene to USD", icon='EXPORT') self._apply_parameters(scene.omni_sceneopt, op) col.label(text="Export Options") row = col.row(align=True) row.prop(scene.omni_sceneopt, "merge") row.prop(scene.omni_sceneopt, "export_textures") def draw(self, context:Context): scene = context.scene layout = self.layout self.draw_validate(layout, scene=scene) self.draw_weld(layout, scene=scene) self.draw_unwrap(layout, scene=scene) self.draw_decimate(layout, scene=scene) self.draw_chop(layout, scene=scene) self.draw_generate(layout, scene=scene) self.draw_operators(layout, context, scene=scene) ## ====================================================================== classes = [ OBJECT_PT_OmniOptimizationPanel, OmniSceneOptProperties, ] def unregister(): try: del bpy.types.Scene.omni_sceneopt except (ValueError, AttributeError, RuntimeError): pass for cls in reversed(classes): try: bpy.utils.unregister_class(cls) except (ValueError, AttributeError, RuntimeError): continue def register(): for cls in classes: bpy.utils.register_class(cls) bpy.types.Scene.omni_sceneopt = bpy.props.PointerProperty(type=OmniSceneOptProperties)
6,169
Python
27.302752
94
0.677906
NVIDIA-Omniverse/blender_omniverse_addons/omni_optimization_panel/batch/lod.py
import argparse import os import sys from typing import * import bpy from bpy.types import (Collection, Context, Image, Object, Material, Mesh, Node, NodeSocket, NodeTree, Scene) from bpy.props import * from mathutils import * ## ====================================================================== def select_only(ob:Object): """ Ensure that only the specified object is selected. :param ob: Object to select """ bpy.ops.object.select_all(action="DESELECT") ob.select_set(state=True) bpy.context.view_layer.objects.active = ob ## -------------------------------------------------------------------------------- def _selected_meshes(context:Context, use_instancing=True) -> List[Mesh]: """ :return: List[Mesh] of all selected mesh objects in active Blender Scene. """ ## instances support meshes = [x for x in context.selected_objects if x.type == "MESH"] instances = [x for x in context.selected_objects if x.type == "EMPTY" and x.instance_collection] if use_instancing: for inst in instances: instance_meshes = [x for x in inst.instance_collection.all_objects if x.type == "MESH"] meshes += instance_meshes meshes = list(set(meshes)) return meshes ## -------------------------------------------------------------------------------- def copy_object_parenting(source_ob:Object, target_ob:Object): """ Copy parenting and Collection membership from a source object. """ target_collections = list(target_ob.users_collection) for collection in target_collections: collection.objects.unlink(target_ob) for collection in source_ob.users_collection: collection.objects.link(target_ob) target_ob.parent = source_ob.parent ## -------------------------------------------------------------------------------- def find_unique_name(name:str, library:Iterable) -> str: """ Given a Blender library, find a unique name that does not exist in it. """ if not name in library: return name index = 0 result_name = name + f".{index:03d}" while result_name in library: index += 1 result_name = name + f".{index:03d}" print(f"Unique Name: {result_name}") return result_name ## -------------------------------------------------------------------------------- def duplicate_object(ob:Object, token:str="D", weld=True) -> Object: """ Duplicates the specified object, maintaining the same parenting and collection memberships. """ base_name = "__".join((ob.name.rpartition("__")[0] if "__" in ob.name else ob.name, token)) base_data = "__".join((ob.data.name.rpartition("__")[0] if "__" in ob.data.name else ob.data.name, token)) if base_name in bpy.data.objects: base_name = find_unique_name(base_name, bpy.data.objects) if base_data in bpy.data.objects: base_data = find_unique_name(base_data, bpy.data.objects) data = ob.data.copy() data.name = base_data duplicate = bpy.data.objects.new(base_name, data) ## Ensure scene collection membership ## Prototypes might not have this or be in the view layer if not duplicate.name in bpy.context.scene.collection.all_objects: bpy.context.scene.collection.objects.link(duplicate) select_only(duplicate) ## decimate doesn't work on unwelded triangle soups if weld: bpy.ops.object.mode_set(mode="EDIT") bpy.ops.mesh.select_all(action="SELECT") bpy.ops.mesh.remove_doubles(threshold=0.01, use_unselected=True) bpy.ops.object.mode_set(mode="OBJECT") return duplicate ## -------------------------------------------------------------------------------- def delete_mesh_object(ob:Object): """ Removes object from the Blender library. """ base_name = ob.name data_name = ob.data.name bpy.data.objects.remove(bpy.data.objects[base_name]) bpy.data.meshes.remove(bpy.data.meshes[data_name]) ## -------------------------------------------------------------------------------- def decimate_object(ob:Object, token:str=None, ratio:float=0.5, use_symmetry:bool=False, symmetry_axis="X", min_face_count:int=3, create_duplicate=True): old_mode = bpy.context.mode scene = bpy.context.scene token = token or "DCM" if create_duplicate: target = duplicate_object(ob, token=token) else: target = ob if len(target.data.polygons) < min_face_count: print(f"{target.name} is under face count-- not decimating.") return target ## We're going to use the decimate modifier mod = target.modifiers.new("OmniLOD", type="DECIMATE") mod.decimate_type = "COLLAPSE" mod.ratio = ratio mod.use_collapse_triangulate = True mod.use_symmetry = use_symmetry mod.symmetry_axis = symmetry_axis bpy.ops.object.select_all(action="DESELECT") target.select_set(True) bpy.context.view_layer.objects.active = target bpy.ops.object.modifier_apply(modifier=mod.name) return target ## -------------------------------------------------------------------------------- def decimate_selected(ratios:List[float]=[0.5], min_face_count=3, use_symmetry:bool=False, symmetry_axis="X", use_instancing=True): assert isinstance(ratios, (list, tuple)), "Ratio should be a list of floats from 0.1 to 1.0" for value in ratios: assert 0.1 <= value <= 1.0, f"Invalid ratio value {value} -- should be between 0.1 and 1.0" selected_objects = list(bpy.context.selected_objects) active = bpy.context.view_layer.objects.active selected_meshes = _selected_meshes(bpy.context, use_instancing=use_instancing) total = len(selected_meshes) * len(ratios) count = 1 print(f"\n\n[ Generating {total} decimated LOD meshes (minimum face count: {min_face_count}]") for mesh in selected_meshes: welded_duplicate = duplicate_object(mesh, token="welded") for index, ratio in enumerate(ratios): padd = len(str(total)) - len(str(count)) token = f"LOD{index}" orig_count = len(welded_duplicate.data.vertices) lod_duplicate = decimate_object(welded_duplicate, ratio=ratio, token=token, use_symmetry=use_symmetry, symmetry_axis=symmetry_axis, min_face_count=min_face_count) print(f"[{'0'*padd}{count}/{total}] Decimating {mesh.name} to {ratio} ({orig_count} >> {len(lod_duplicate.data.vertices)}) ...") copy_object_parenting(mesh, lod_duplicate) count += 1 delete_mesh_object(welded_duplicate) print(f"\n[ Decimation complete ]\n\n") ## -------------------------------------------------------------------------------- def import_usd_file(filepath:str, root_prim:Optional[str]=None, visible_only:bool=False, use_instancing:bool=True): all_objects = bpy.context.scene.collection.all_objects names = [x.name for x in all_objects] try: bpy.ops.object.mode_set(mode="OBJECT") except RuntimeError: pass for name in names: ob = bpy.data.objects[name] bpy.data.objects.remove(ob) kwargs = { "filepath":filepath, "import_cameras": False, "import_curves": False, "import_lights": False, "import_materials": True, "import_blendshapes": False, "import_volumes": False, "import_skeletons": False, "import_shapes": False, "import_instance_proxies": True, "import_visible_only": visible_only, "read_mesh_uvs": True, "read_mesh_colors": False, "use_instancing": use_instancing, "validate_meshes": True, } if root_prim: ## if you end with a slash it fails kwargs["prim_path_mask"] = root_prim[:-1] if root_prim.endswith("/") else root_prim bpy.ops.wm.usd_import(**kwargs) print(f"Imported USD file: {filepath}") ## -------------------------------------------------------------------------------- def export_usd_file(filepath:str, use_instancing:bool=True): kwargs = { "filepath":filepath, "visible_objects_only": False, "default_prim_path": "/World", "root_prim_path": "/World", "generate_preview_surface": True, "export_materials": True, "export_uvmaps": True, "merge_transform_and_shape": True, "use_instancing": use_instancing, } bpy.ops.wm.usd_export(**kwargs) print(f"Wrote USD file with UVs: {filepath}") ## ====================================================================== if __name__ == "__main__": real_args = sys.argv[sys.argv.index("--") + 1:] if "--" in sys.argv else [] parser = argparse.ArgumentParser() parser.add_argument('--input', type=str, required=True, help="Path to input USD file") parser.add_argument('--output', type=str, help="Path to output USD file (default is input_LOD.usd)") parser.add_argument('--ratios', type=str, required=True, help='Ratios to use as a space-separated string, ex: "0.5 0.2"') parser.add_argument('--use_symmetry', action="store_true", default=False, help="Decimate with symmetry enabled.") parser.add_argument('--symmetry_axis', default="X", help="Symmetry axis to use (X, Y, or Z)") parser.add_argument('--visible_only', action="store_true", default=False, help="Only import visible prims from the input USD file.") parser.add_argument('--min_face_count', type=int, default=3, help="Minimum number of faces for decimation.") parser.add_argument('--no_instancing', action="store_false", help="Process the prototype meshes of instanced prims.") parser.add_argument('--root_prim', type=str, default=None, help="Root Prim to import. If unspecified, the whole file will be imported.") if not len(real_args): parser.print_help() sys.exit(1) args = parser.parse_args(real_args) input_file = os.path.abspath(args.input) split = input_file.rpartition(".") output_path = args.output or (split[0] + "_LOD." + split[-1]) ratios = args.ratios if not " " in ratios: ratios = [float(ratios)] else: ratios = list(map(lambda x: float(x), ratios.split(" "))) use_instancing = not args.no_instancing import_usd_file(input_file, root_prim=args.root_prim, visible_only=args.visible_only, use_instancing=use_instancing) bpy.ops.object.select_all(action="SELECT") decimate_selected(ratios=ratios, min_face_count=args.min_face_count, use_symmetry=args.use_symmetry, symmetry_axis=args.symmetry_axis, use_instancing=use_instancing) export_usd_file(output_path, use_instancing=use_instancing) sys.exit(0)
9,912
Python
32.94863
166
0.64659
NVIDIA-Omniverse/blender_omniverse_addons/omni_optimization_panel/batch/optimize_export.py
import os import sys import time import bpy from omni_optimization_panel.operators import OmniOverrideMixin omniover = OmniOverrideMixin() ## ====================================================================== def perform_scene_merge(): """ Combine all selected mesh objects into a single mesh. """ orig_scene = bpy.context.scene selected = [x for x in bpy.context.selected_objects if x.type == "MESH"] if not len(selected): print("-- No objects selected for merge.") return merge_collection = bpy.data.collections.new("MergeCollection") if not "MergeCollection" in bpy.data.collections else bpy.data.collections["MergeCollection"] merge_scene = bpy.data.scenes.new("MergeScene") if not "MergeScene" in bpy.data.scenes else bpy.data.scenes["MergeScene"] for child in merge_scene.collection.children: merge_scene.collection.children.unlink(child) for ob in merge_collection.all_objects: merge_collection.objects.unlink(ob) to_merge = set() sources = set() for item in selected: to_merge.add(item) merge_collection.objects.link(item) if not item.instance_type == "NONE": item.show_instancer_for_render = True child_set = set(item.children) to_merge |= child_set sources |= child_set merge_scene.collection.children.link(merge_collection) bpy.context.window.scene = merge_scene for item in to_merge: try: merge_collection.objects.link(item) except RuntimeError: continue ## make sure to remove shape keys and merge modifiers for all merge_collection objects for item in merge_collection.all_objects: with omniover.override([item], single=True): if item.data.shape_keys: bpy.ops.object.shape_key_remove(all=True, apply_mix=True) for mod in item.modifiers: bpy.ops.object.modifier_apply(modifier=mod.name, single_user=True) ## turns out the make_duplis_real function swaps selection for you, and ## leaves non-dupli objects selected bpy.ops.object.select_all(action="SELECT") bpy.ops.object.duplicates_make_real() ## this invert and delete is removing the old instancer objects bpy.ops.object.select_all(action="INVERT") for item in sources: item.select_set(True) bpy.ops.object.delete(use_global=False) bpy.ops.object.select_all(action="SELECT") ## need an active object for join poll() bpy.context.view_layer.objects.active = bpy.context.selected_objects[0] bpy.ops.object.join() ## ====================================================================== if __name__ == "__main__": real_args = sys.argv[sys.argv.index("--") + 1:] if "--" in sys.argv else [] if not len(real_args): print("-- No output path name.") sys.exit(-1) output_file = real_args[-1] ## make sure the add-on is properly loaded bpy.ops.preferences.addon_enable(module="omni_optimization_panel") start_time = time.time() ## pull all attribute names from all mixins for passing on to the optimizer sceneopts = bpy.context.scene.omni_sceneopt chopopts = bpy.context.scene.omni_sceneopt_chop skips = {"bl_rna", "name", "rna_type"} optimize_kwargs = {} for item in sceneopts, chopopts: for key in filter(lambda x: not x.startswith("__") and not x in skips, dir(item)): optimize_kwargs[key] = getattr(item, key) print(f"optimize kwargs: {optimize_kwargs}") if sceneopts.merge: ## merge before because of the possibility of objects getting created perform_scene_merge() bpy.ops.wm.save_as_mainfile(filepath=output_file.rpartition(".")[0]+".blend") ## always export whole scene optimize_kwargs["selected"] = False optimize_kwargs["verbose"] = True bpy.ops.omni_sceneopt.optimize(**optimize_kwargs) optimize_time = time.time() print(f"Optimization time: {(optimize_time - start_time):.2f} seconds.") export_kwargs = { "filepath": output_file, "visible_objects_only": False, "default_prim_path": "/World", "root_prim_path": "/World", "material_prim_path": "/World/materials", "generate_preview_surface": True, "export_materials": True, "export_uvmaps": True, "merge_transform_and_shape": True, "use_instancing": True, "export_textures": sceneopts.export_textures, } bpy.ops.wm.usd_export(**export_kwargs) export_time = time.time() print(f"Wrote optimized USD file: {output_file}") print(f"Export time: {(export_time - optimize_time):.2f} seconds.") print(f"Total time: {(export_time - start_time):.2f} seconds.") sys.exit(0)
4,378
Python
30.278571
157
0.693011
NVIDIA-Omniverse/blender_omniverse_addons/omni_optimization_panel/batch/uv.py
import argparse import os import sys from typing import * import bpy from bpy.types import (Collection, Context, Image, Object, Material, Mesh, Node, NodeSocket, NodeTree, Scene) from bpy.props import * from mathutils import * ## ====================================================================== OMNI_MATERIAL_NAME = "OmniUVTestMaterial" ## ====================================================================== def select_only(ob:Object): """ Ensure that only the specified object is selected. :param ob: Object to select """ bpy.ops.object.select_all(action="DESELECT") ob.select_set(state=True) bpy.context.view_layer.objects.active = ob ## -------------------------------------------------------------------------------- def _selected_meshes(context:Context) -> List[Mesh]: """ :return: List[Mesh] of all selected mesh objects in active Blender Scene. """ return [x for x in context.selected_objects if x.type == "MESH"] ## -------------------------------------------------------------------------------- def get_test_material() -> Material: image_name = "OmniUVGrid" if not image_name in bpy.data.images: bpy.ops.image.new(generated_type="COLOR_GRID", width=4096, height=4096, name=image_name, alpha=False) if not OMNI_MATERIAL_NAME in bpy.data.materials: image = bpy.data.images[image_name] material = bpy.data.materials.new(name=OMNI_MATERIAL_NAME) ## this creates the new graph material.use_nodes = True tree = material.node_tree shader = tree.nodes['Principled BSDF'] im_node = tree.nodes.new("ShaderNodeTexImage") im_node.location = [-300, 300] tree.links.new(im_node.outputs['Color'], shader.inputs['Base Color']) im_node.image = image return bpy.data.materials[OMNI_MATERIAL_NAME] ## -------------------------------------------------------------------------------- def apply_test_material(ob:Object): ##!TODO: Generate it select_only(ob) while len(ob.material_slots): bpy.ops.object.material_slot_remove() material = get_test_material() bpy.ops.object.material_slot_add() ob.material_slots[0].material = material ## -------------------------------------------------------------------------------- def unwrap_object(ob:Object, uv_layer_name="OmniUV", apply_material=False, margin=0.0): """ Unwraps the target object by creating a fixed duplicate and copying the UVs over to the original. """ old_mode = bpy.context.mode scene = bpy.context.scene if not old_mode == "OBJECT": bpy.ops.object.mode_set(mode="OBJECT") select_only(ob) uv_layers = list(ob.data.uv_layers) for layer in uv_layers: ob.data.uv_layers.remove(layer) bpy.ops.object.mode_set(mode="EDIT") bpy.ops.mesh.select_all(action='SELECT') bpy.ops.uv.cube_project() bpy.ops.object.mode_set(mode="OBJECT") duplicate = ob.copy() duplicate.data = ob.data.copy() scene.collection.objects.link(duplicate) ## if the two objects are sitting on each other it gets silly, ## so move the dupe over by double it's Y bounds size bound_size = Vector(duplicate.bound_box[0]) - Vector(duplicate.bound_box[-1]) duplicate.location.y += bound_size.y select_only(duplicate) bpy.ops.object.mode_set(mode="EDIT") bpy.ops.mesh.select_all(action='SELECT') bpy.ops.mesh.remove_doubles(threshold=0.01, use_unselected=True) bpy.ops.mesh.normals_make_consistent(inside=True) bpy.ops.object.mode_set(mode="OBJECT") bpy.ops.object.mode_set(mode="EDIT") bpy.ops.uv.select_all(action='SELECT') bpy.ops.uv.smart_project(island_margin=margin) bpy.ops.uv.average_islands_scale() bpy.ops.uv.pack_islands(margin=0) bpy.ops.object.mode_set(mode="OBJECT") ## copies from ACTIVE to all other SELECTED select_only(ob) ## This is incredibly broken # bpy.ops.object.data_transfer(data_type="UV") ## snap back now that good UVs exist; the two meshes need to be in the same ## position in space for the modifier to behave correctly. duplicate.matrix_world = ob.matrix_world.copy() modifier = ob.modifiers.new(type="DATA_TRANSFER", name="OmniBake_Transfer") modifier.object = duplicate modifier.use_loop_data = True modifier.data_types_loops = {'UV'} modifier.loop_mapping = 'NEAREST_NORMAL' select_only(ob) bpy.ops.object.modifier_apply(modifier=modifier.name) if apply_material: apply_test_material(ob) bpy.data.objects.remove(duplicate) ## -------------------------------------------------------------------------------- def unwrap_selected(uv_layer_name="OmniUV", apply_material=False, margin=0.0): old_mode = bpy.context.mode selected_objects = list(bpy.context.selected_objects) active = bpy.context.view_layer.objects.active selected_meshes = _selected_meshes(bpy.context) total = len(selected_meshes) count = 1 print(f"\n\n[ Unwrapping {total} meshes ]") for mesh in selected_meshes: padd = len(str(total)) - len(str(count)) print(f"[{'0'*padd}{count}/{total}] Unwrapping {mesh.name}...") unwrap_object(mesh, uv_layer_name=uv_layer_name, apply_material=apply_test_material) count += 1 print(f"\n[ Unwrapping complete ]\n\n") select_only(selected_objects[0]) for item in selected_objects[1:]: item.select_set(True) bpy.context.view_layer.objects.active = active if old_mode == "EDIT_MESH": bpy.ops.object.mode_set(mode="EDIT") ## -------------------------------------------------------------------------------- def import_usd_file(filepath:str, root_prim=None, visible_only=False): all_objects = bpy.context.scene.collection.all_objects names = [x.name for x in all_objects] try: bpy.ops.object.mode_set(mode="OBJECT") except RuntimeError: pass for name in names: ob = bpy.data.objects[name] bpy.data.objects.remove(ob) kwargs = { "filepath":filepath, "import_cameras": False, "import_curves": False, "import_lights": False, "import_materials": False, "import_blendshapes": False, "import_volumes": False, "import_skeletons": False, "import_shapes": False, "import_instance_proxies": True, "import_visible_only": visible_only, "read_mesh_uvs": False, "read_mesh_colors": False, } if root_prim: ## if you end with a slash it fails kwargs["prim_path_mask"] = root_prim[:-1] if root_prim.endswith("/") else root_prim bpy.ops.wm.usd_import(**kwargs) print(f"Imported USD file: {filepath}") ## -------------------------------------------------------------------------------- def export_usd_file(filepath:str): kwargs = { "filepath":filepath, "visible_objects_only": False, "default_prim_path": "/World", "root_prim_path": "/World", # "generate_preview_surface": False, # "generate_mdl": False, "merge_transform_and_shape": True, } bpy.ops.wm.usd_export(**kwargs) print(f"Wrote USD file with UVs: {filepath}") ## ====================================================================== if __name__ == "__main__": real_args = sys.argv[sys.argv.index("--") + 1:] if "--" in sys.argv else [] parser = argparse.ArgumentParser() parser.add_argument('--input', type=str, required=True, help="Path to input USD file") parser.add_argument('--output', type=str, help="Path to output USD file (default is input_UV.usd)") parser.add_argument('--margin', type=float, default=None, help="Island margin (default is 0.01)") parser.add_argument('--root_prim', type=str, default=None, help="Root Prim to import. If unspecified, the whole file will be imported.") parser.add_argument('--add_test_material', action="store_true") parser.add_argument('--visible_only', action="store_true", default=False) if not len(real_args): parser.print_help() sys.exit(1) args = parser.parse_args(real_args) input_file = os.path.abspath(args.input) split = input_file.rpartition(".") output_path = args.output or (split[0] + "_UV." + split[-1]) margin = args.margin or 0.0 import_usd_file(input_file, root_prim=args.root_prim, visible_only=args.visible_only) bpy.ops.object.select_all(action="SELECT") unwrap_selected(apply_material=args.add_test_material, margin=margin) export_usd_file(output_path) sys.exit(0)
8,005
Python
29.674329
103
0.639975
NVIDIA-Omniverse/blender_omniverse_addons/omni_optimization_panel/scripts/geo_nodes.py
# ##### BEGIN GPL LICENSE BLOCK ##### # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ##### END GPL LICENSE BLOCK ##### # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. import bpy from mathutils import Vector # the type of geometry node tree to create: # geometry nodes is currently under development, so feature set is not yet at a stage to be fully utilized # this puts in place a framework for more customizable and easily implementable optimizations in the future # geometry nodes is a modifier, but unlike "DECIMATE" or "REMESH", geometry nodes can be customized with a wide array of options. # similar to other modifiers, if there are multiple objects with the same geo node modifier, the calculations are done independently for each object. # currently this setup can be used for generating convex hulls, creating bounding box meshes, and subdividing geometry. # (GeometryNodeConvexHull, GeometryNodeBoundBox, GeometryNodeSubdivisionSurface) # as the nodes options in blender expand, A lot more can be done wit it. # more on geometry nodes: https://docs.blender.org/manual/en/latest/modeling/geometry_nodes/index.html#geometry-nodes def new_GeometryNodes_group(): # create a new empty node group that can be used in a GeometryNodes modifier # tree only contains a simple input/output node setup # the input node gives a geometry, and the output node takes a geometry. # nodes then have input and output SOCKET(S). # this basic tree setup will accesses the output socket of the input node in order to connect it to the input socket of the output node # in order to make these connections, physical links between index values of inputs and outputs need to be made # this tree on its own will do nothing. In order to make changes to the geometry, more nodes must be inserted node_group = bpy.data.node_groups.new('GeometryNodes', 'GeometryNodeTree') # this is the container for the nodes inNode = node_group.nodes.new('NodeGroupInput') # this is the input node and gives the geometry to be modified. inNode.outputs.new('NodeSocketGeometry', 'Geometry') # gets reference to the output socket on the input node outNode = node_group.nodes.new('NodeGroupOutput') # this is the output node and returns the geometry that modified. outNode.inputs.new('NodeSocketGeometry', 'Geometry') # gets reference to the input socket on the output node node_group.links.new(inNode.outputs['Geometry'], outNode.inputs['Geometry']) # makes the link between the two nodes at the given sockets inNode.location = Vector((-1.5*inNode.width, 0)) # sets the position of the node in 2d space so that they are readable in the GUI outNode.location = Vector((1.5*outNode.width, 0)) return node_group # now that there is a basic node tree, additional nodes can be inserted into the tree to modify the geometry def geoTreeBasic(geo_tree, nodes, group_in, group_out, geo_type, attribute): # once the base geo tree has been created, we can insert additional pieces # this includes: convex hull, bounding box, subdivide new_node = nodes.new(geo_type) # create a new node of the specified type # insert that node between the input and output node geo_tree.links.new(group_in.outputs['Geometry'], new_node.inputs[0]) geo_tree.links.new(new_node.outputs[0], group_out.inputs['Geometry']) if geo_type == 'GeometryNodeSubdivisionSurface': # subsurf node requires an additional input value geo_tree.nodes["Subdivision Surface"].inputs[1].default_value = attribute def geoNodes(objects, geo_type, attribute): # TODO: When Geo Nodes develops further, hopefully all other modifier ops can be done through nodes # (currently does not support decimate/remesh) modifier = 'NODES' # create empty tree - this tree is a container for nodes geo_tree = new_GeometryNodes_group() # add tree to all objects for obj in objects: # for each object in selected objects, add the desired modifier and adjust its properties mod = obj.modifiers.new(name = modifier, type=modifier) # set name of modifier based on its type mod.node_group = geo_tree #bpy.data.node_groups[geo_tree.name] # alter tree - once the default tree has been created, additional nodes can be added in nodes = geo_tree.nodes group_in = nodes.get('Group Input') # keep track of the input node group_out = nodes.get('Group Output') # keep track of the output node geoTreeBasic(geo_tree, nodes, group_in, group_out, geo_type, attribute) # adds node to make modifications to the geometry
5,272
Python
63.304877
149
0.744499
NVIDIA-Omniverse/blender_omniverse_addons/omni_optimization_panel/scripts/run_ops_wo_update.py
# ##### BEGIN GPL LICENSE BLOCK ##### # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ##### END GPL LICENSE BLOCK ##### # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. from bpy.ops import _BPyOpsSubModOp view_layer_update = _BPyOpsSubModOp._view_layer_update def open_update(): # blender operator calls update the scene each time after running # updating the scene can take a long time, esp for large scenes. So we want to delay update until we are finished # there is not an official way to suppress this update, so we need to use a workaround def dummy_view_layer_update(context): # tricks blender into thinking the scene has been updated and instead passes pass _BPyOpsSubModOp._view_layer_update = dummy_view_layer_update def close_update(): # in the end, still need to update scene, so this manually calls update _BPyOpsSubModOp._view_layer_update = view_layer_update
1,619
Python
42.783783
118
0.731316
NVIDIA-Omniverse/blender_omniverse_addons/omni_optimization_panel/scripts/chop.py
# ##### BEGIN GPL LICENSE BLOCK ##### # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ##### END GPL LICENSE BLOCK ##### # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. import bpy, bmesh from mathutils import Vector import time from . import blender_class, run_ops_wo_update, select_mesh, bounds, utils, fix_mesh class Chop(blender_class.BlenderClass): # settings for GUI version only bl_idname = "chop.scene" bl_label = "Chop Scene" bl_description = "Recursively split scene in half until reaches a desired threshold" bl_options = {"REGISTER", "UNDO"} print_results = True def __init__(self): self._default_attributes = dict( merge= True, # optionally merge meshes in each split chunk after split recursion is complete cut_meshes=True, # split all meshes intersecting each cut plane # Cannot set this very low since split creates new triangles(if quads...) max_vertices= 100000, # a vertex threshold value, that once a chunk is below, the splitting terminates min_box_size= 1, # a size threshold that once a chunk is smaller than, the splitting terminates max_depth= 16, # a recursion depth threshold that once is reached, the splitting terminates print_updated_results= True, # print progress to console create_bounds = False, # create new bounds objects for displaying the cut boundaries. Mostly useful for GUI selected_only = False # uses only objects selected in scene. For GUI version only ) def execute(self, in_attributes=None): attributes = self.get_attributes(in_attributes) context = bpy.context Chop.print_results = attributes["print_updated_results"] Stats.resetValues() Stats.startTime = time.time() then = Stats.startTime # select objects selected = select_mesh.setSelected(context, attributes["selected_only"], deselectAll = False) if len(selected): # run only if there are selected mesh objects in the scene self.split(context, selected, attributes) # starts the splitting process now = time.time() # time after it finished Stats.printTermination() if attributes['merge']: Stats.printMerge() print("TIME FOR SPLIT: ", round(now-then, 3)) else: utils.do_print_error("NO MESH OBJECTS") return {'FINISHED'} def getSplitPlane(self, obj_details): # the cut plane used in split. Aligned perpendicular to the longest dimension of the bounds # find longest side var = {obj_details.x.distance: "x", obj_details.y.distance: "y", obj_details.z.distance: "z"} max_dim = var.get(max(var)) # get the axis name of maximum of the three dims # adjust the plane normal depending on the axis with the largest dimension if max_dim == "x": normal = [1,0,0,0] axis = "x" elif max_dim == "y": normal = [0,1,0,0] axis = "y" else: normal = [0,0,1,0] axis = "z" # get data for sub-boxes midPt = [obj_details.x.mid,obj_details.y.mid,obj_details.z.mid] # get center of bounds to be able to create the next set of bounds return midPt, normal, axis def getSplitBoxes(self, obj_details, attributes): # get the bounds for the two successive splits during recursion # find longest side var = {obj_details.x.distance: "x", obj_details.y.distance: "y", obj_details.z.distance: "z"} mx = var.get(max(var)) # get the axis name of maximum of the three dims mid_0 = [obj_details.x.max, obj_details.y.max, obj_details.z.max] # the longest axis value will be replaced with a mid point high = mid_0.copy() # maximum value of bounds mid_1 = [obj_details.x.min, obj_details.y.min, obj_details.z.min] # the longest axis value will be replaced with a mid point low = mid_1.copy() # minimum value fo bounds midPt = [obj_details.x.mid,obj_details.y.mid,obj_details.z.mid] # center point of previous bounds # replace the mid point of new bounds depending on the axis with the largest dimension if mx == "x": mid_0[0] = midPt[0] mid_1[0] = midPt[0] elif mx == "y": mid_0[1] = midPt[1] mid_1[1] = midPt[1] else: mid_0[2] = midPt[2] mid_1[2] = midPt[2] # Create sub-bounds. These are the two halves of the previous bounds, split along the longest axis of the bounds # only need two points to calculate bounds, uses the maximum/minimum value point (high/low) and the set mid point (mid_0/mid_1) coords_1 = [high[:], mid_1[:]] # put the points in a list box_0 = bounds.bounds(coords_1) # gather attributes of new bounds (max, min, mid, and dim of each axis) coords_0 = [low[:], mid_0[:]] # put the points in a list box_1 = bounds.bounds(coords_0) # gather attributes of new bounds (max, min, mid, and dim of each axis) if attributes["create_bounds"]: # optionally create display objects for viewing bounds bounds.boundsObj(coords_1) bounds.boundsObj(coords_0) return box_0, box_1 def boxTooSmall(self, obj_details, attributes): # returns whether bounds of current occurrences is too small # find longest sides dims = [obj_details.x.distance, obj_details.y.distance, obj_details.z.distance] # get the dimensions of each axis of the bounds if max(dims) < attributes["min_box_size"]: # if the maximum of the three dims is less than the specified min_box_size return True # continue recursion return False # end recursion def parentEmpty(self, part, children): # for parenting new created objects from split parent_name = part.name # part is the original object that was split. keep track of its name parent_col = part.users_collection[0] # track the collection of the part as well parent_parent = part.parent # if the part object has an existing parent track that too bpy.data.objects.remove(part, do_unlink=True) # now that that info is stored, part can be deleted and removed from the scene # an empty will take the place of the original part obj = bpy.data.objects.new(parent_name, None) # create an empty object that will inherit the name of part parent_col.objects.link(obj) # connect this object to part's collection obj.parent = parent_parent # make this empty the child of part's parent for child in children: # make the newly created objects from the split operation children of the empty child.parent = obj def newObj(self, bm, parent): # create a new object for each half of a split obj = parent.copy() # parent is the original mesh being split. this contains data such as material, # so it is easiest to start with a copy of the object obj.data = parent.data.copy() # need to copy the object mesh data separately # TODO: obj.animation_data = sibling.animation_data.copy() # not sure if animation data should be copied. This would do that. parent.users_collection[0].objects.link(obj) # apply bmesh to new mesh bm.to_mesh(obj.data) # Once the new object is formed, bmesh data created during the split process can be transferred to the new obj bm.free() # always do this when finished with a bmesh return obj def checkIntersect(self, obj, axis, center): # for checking cut plane intersection while splitting # intersection is checked by testing the objects bounds rather than each vertex individually obj_details = bounds.bounds([obj.matrix_world @ Vector(v) for v in obj.bound_box]) tolerance = .01 # a tolerance value for intersection to prevent cutting a mesh that is in line with cut plane # TODO: may need to have user control over this tolerance, or define it relative to total scene size. # check for intersection depending on the direction of the cutting # boolean is created for both sides of cut plane. # rather than a single boolean checking for intersection, return if mesh is on one or both sides of cut plane. if axis == "x": intersect_0 = obj_details.x.max > center[0] + tolerance intersect_1 = obj_details.x.min < center[0] - tolerance elif axis == "y": intersect_0 = obj_details.y.max > center[1] + tolerance intersect_1 = obj_details.y.min < center[1] - tolerance elif axis == "z": intersect_0 = obj_details.z.max > center[2] + tolerance intersect_1 = obj_details.z.min < center[2] - tolerance return intersect_0, intersect_1 def doSplit(self, partsToSplit, planeOrigin, planeNormal, axis): # perform the actual split # split separates the occurrences into two. those halves need to be stored in their own new lists occurrences_0 = [] occurrences_1 = [] for part in partsToSplit: # iterate over occurrences intersect_0, intersect_1 = self.checkIntersect(part, axis, planeOrigin) # only perform split if object intersects the cut plane. if intersect_0 and intersect_1: # if mesh has vertices on both sides of cut plane Stats.printPart(part) # print the part being processed co = part.matrix_world.inverted() @ Vector(planeOrigin) # splitting takes place relative to object space not world space. normDir = part.matrix_world.transposed() @ Vector(planeNormal) # need to adjust plane origin and normal for each object. bmi = bmesh.new() # 'bmesh' in Blender is data type that contains the 'edit mesh' for an object # It allows for much greater control over mesh properties and operations bmi.from_mesh(part.data) # attach the mesh to the bmesh container so that changes can be made bmo = bmi.copy() # must use two separate bmesh objects because two new occurrence lists are being written to # bisect_plane is how to split a mesh using a plane. It can only save one side of the split result at a time, so it is done twice # save inner mesh data bmesh.ops.bisect_plane(bmi, geom=bmi.verts[:]+bmi.edges[:]+bmi.faces[:], # the geometry to be split, which is the first bmesh just created dist=0.0001, # a threshold value for the split to check vertex proximity to cut plane # TODO: may need to have user control over this tolerance, or define it relative to total scene size. plane_co=co, # the cut plane plane_no=(normDir.x,normDir.y,normDir.z), # the plane normal direction clear_inner=True, # remove the geometry on the positive side of the cut plane clear_outer=False) # keep the geometry on the negative side of the cut plane # save outer mesh data bmesh.ops.bisect_plane(bmo, geom=bmo.verts[:]+bmo.edges[:]+bmo.faces[:], # the geometry to be split, which is the second bmesh just created dist=0.0001, # a threshold value for the split to check vertex proximity to cut plane plane_co=co, # the cut plane plane_no=(normDir.x,normDir.y,normDir.z), # the plane normal direction clear_inner=False, # keep the geometry on the positive side of the cut plane clear_outer=True) # remove the geometry on the negative side of the cut plane # make the bmesh the object's mesh # need to transfer the altered bmesh data back to the original mesh children = [] # create a list that will contain the newly created split meshes obj = self.newObj(bmi, part) # create a new mesh object to attach the inner bmesh data to occurrences_0.append(obj) # add new object to inner occurrence list children.append(obj) # add new object to children list obj2 = self.newObj(bmo, part) # create a new mesh object to attach the outer bmesh data to occurrences_1.append(obj2) # add new object to outer occurrence list children.append(obj2) # add new object to children list self.parentEmpty(part, children) # use children list to fix object parents if Chop.print_results: utils.printClearLine() # clear last printed line before continuing # if there are vertices on only one side of the cut plane there is nothing to split so place the existing mesh into the appropriate list elif intersect_0: occurrences_0.append(part) # add object to inner occurrence list part.select_set(False) # deselect object else: occurrences_1.append(part )# add object to outer occurrence list part.select_set(False) # deselect object # bisect_plane can create empty objects, or zero vert count meshes. remove those objects before continuing occurrences_0 = fix_mesh.deleteEmptyXforms(occurrences_0) # update occurrences_0 occurrences_1 = fix_mesh.deleteEmptyXforms(occurrences_1) # update occurrences_1 return occurrences_0, occurrences_1 def doMerge(self, partsToMerge): # for merging individual meshes within each chunk after split is complete if len(partsToMerge) > 1: # if there is only one mesh or zero meshes, there is no merging to do then = time.time() # time at the beginning of merge ctx = bpy.context.copy() #making a copy of the current context allows for temporary modifications to be made # in this case, the temporary context is switching the active and selected objects # this allows avoiding needing to deselect and reselect after the merge ctx['selected_editable_objects'] = partsToMerge # set the meshes in the chunk being merged to be selected ctx['active_object'] = partsToMerge[0] # set active object. Blender needs active object to be the selected object parents = [] # a list that will contain the parent of each part being merged for merge in partsToMerge: parents.append(merge.parent) run_ops_wo_update.open_update() # allows for operators to be run without updating scene bpy.ops.object.join(ctx) # merges all parts into one run_ops_wo_update.close_update() # must always call close_update if open_update is called now = time.time() # time after merging is complete Stats.mergeTime += (now-then) # add time to total merge time to get an output of total time spent on merge def recursiveSplit(self, occurrences, attributes, obj_details, depth): # runs checks before each split, and handles recursion if not occurrences: # if there are no occurrences, end recursion Stats.printPercent(depth, True) # optionally print results before ending recursion return # Check for maximum recursive depth has been reached to terminate and merge if attributes["max_depth"] != 0 and depth >= attributes["max_depth"]: # if max recursion depth is 0, the check will be ignored Stats.chunks += 1 # each split creates a new chunk, adds only chunks from completed recursive branches Stats.printMsg_maxDepth += 1 # "REACHED MAX DEPTH" Stats.printPercent(depth) # optionally print results before ending recursion if attributes["merge"]: # if merging, do so now self.doMerge(occurrences) return # Check for vertex count threshold and bbox size to terminate and merge vertices = utils.getVertexCount(occurrences) if self.boxTooSmall(obj_details, attributes) or vertices < attributes["max_vertices"]: Stats.chunks += 1 # each split creates a new chunk, adds only chunks form completed recursive branches if vertices < attributes["max_vertices"]: Stats.printMsg_vertexGoal += 1 # "REACHED VERTEX GOAL" elif self.boxTooSmall(obj_details, attributes): # or vertices < attributes["max_vertices"]: Stats.printMsg_boxSize += 1 # "BOX TOO SMALL" Stats.printPercent(depth) # optionally print results before ending recursion if attributes["merge"]: # if merging, do so now self.doMerge(occurrences) return # Keep subdividing planeOrigin, planeNormal, axis = self.getSplitPlane(obj_details) # calculate components for cutter object # Do the split and merge if attributes["cut_meshes"]: # splits meshes in scene based on cut plane and separates them into two halves occurrences_0, occurrences_1 = self.doSplit(occurrences, planeOrigin, planeNormal, axis) depth += 1 # if split has taken place, increment recursive depth count # Recurse. Get bounding box for each half. box_0, box_1 = self.getSplitBoxes(obj_details, attributes) self.recursiveSplit(occurrences_0, attributes, box_0, depth) self.recursiveSplit(occurrences_1, attributes, box_1, depth) def split(self, context, selected, attributes): # preps original occurrences and file for split occurrences = selected # tracks the objects for each recursive split # on the first split, this is the selected objects. # Initial bbox includes all original occurrences boundsCombined = bounds.boundingBox(occurrences) # gets the combined bounds coordinates of the occurrences obj_details = bounds.bounds(boundsCombined) # create a dictionary of specific statistics for each axis of bounds if attributes["create_bounds"]: # optionally create a bounds object for each recursive split. target_coll_name = "BOUNDARIES" # put these objects in a separate collection to keep scene organized target_coll = bpy.data.collections.new(target_coll_name) # create a new collection in the master scene collection context.scene.collection.children.link(target_coll) # link the newly created collection to the scene bounds.boundsObj(boundsCombined) # create bounds obj depth = 0 # tracks recursive depth print("-----SPLIT HAS BEGUN-----") Stats.printPercent(depth) # for optionally printing progress of operation self.recursiveSplit(occurrences, attributes, obj_details, depth) # begin recursive split class Stats(): startTime= 0 # start time of script execution, used for calculating progress printMsg_vertexGoal = 0 # for tracking number of times recursion terminated because vertex goal was reached printMsg_boxSize = 0 # for tracking number of times recursion terminated because box was too small printMsg_maxDepth = 0 # for tracking number of times recursion terminated because max recursive depth was exceeded percent_worked = 0 # for tracking amount of scene that contains objects for progress calculation percent_empty = 0 # for tracking amount of scene that is empty for progress calculation chunks = 0 # the number of parts created after the recursive split. each chunk may contain multiple meshes/objects mergeTime = 0 # for tracking the amount of time spent merging chunks def resetValues(): # reset values before running Stats.startTime= 0 Stats.printMsg_vertexGoal = 0 Stats.printMsg_boxSize = 0 Stats.printMsg_maxDepth = 0 Stats.percent_worked = 0 Stats.percent_empty = 0 Stats.chunks = 0 Stats.mergeTime = 0 # for printing progress statistics to console def printTermination(): print("Reached Vertex Goal: ", Stats.printMsg_vertexGoal, # print number of times recursion terminated because vertex goal was reached " Box Too Small: ", Stats.printMsg_boxSize, # print number of times recursion terminated because box was too small " Exceeded Max Depth: ", Stats.printMsg_maxDepth) # print number of times recursion terminated because max recursive depth was exceeded print("chunks: ", Stats.chunks) # print total number of chunks created from split def printMerge(): print("merge time: ", Stats.mergeTime) # print the total time the merging took def printPart(part): if Chop.print_results: print("current part being split: ", part) # want to keep track of latest part being split in order to more easily debug if blender crashes def printPercent(depth, empty=False): # for printing progress of recursive split if Chop.print_results: if depth != 0: if empty: # generated chunk contains no geometry it is considered empty Stats.percent_empty += 100/pow(2,depth) # calculated as a fraction of 2 raised to the recursive depth. Gives a measurement of total volume complete elif depth: # cannot calculate if depth is zero due to division by zero Stats.percent_worked += 100/pow(2,depth) # calculated as a fraction of 2 raised to the recursive depth. Gives a measurement of total volume complete total = Stats.percent_empty + Stats.percent_worked # percent of bounds volume calculated. Includes empty and occupied chunks percent_real = Stats.percent_worked/(100-Stats.percent_empty)*100 # calculated based on a ratio of chunks with split meshes to empty chunks. # this results in a more accurate calculation of remaining time because empty chunks take virtually zero time to process #timer now = time.time() # current time elapsed in operation if percent_real > 0: # if at least one occupied chunk has been calculated est_comp_time = f"{((now-Stats.startTime)/percent_real*100 - (now-Stats.startTime)):1.0f}" # estimation of remaining time # based on what has already been processed else: est_comp_time = "Unknown" utils.printClearLine() utils.printClearLine() # print results to console print("\033[93m" + "Percent_empty: ", f"{Stats.percent_empty:.1f}" , "%, Percent_worked: ", f"{Stats.percent_worked:.1f}", "%, Total: ", f"{total:.1f}", "%, Real: ", f"{percent_real:.1f}", "%") print("Estimated time remaining: ", est_comp_time, "s, Depth: ", depth, "\033[0m") else: print() # empty lines to prep for the progress printing print() # empty lines to prep for the progress printing
23,807
Python
59.580153
168
0.656278
NVIDIA-Omniverse/blender_omniverse_addons/omni_optimization_panel/scripts/fix_mesh.py
# ##### BEGIN GPL LICENSE BLOCK ##### # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ##### END GPL LICENSE BLOCK ##### # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. import bpy import bmesh import time from functools import reduce from . import blender_class, run_ops_wo_update, select_mesh, utils class FixMesh(blender_class.BlenderClass): # settings for GUI version only bl_idname = "fix.mesh" bl_label = "Fix Mesh" bl_description = "fix bad meshes in the scene" bl_options = {"REGISTER", "UNDO"} def __init__(self): self._default_attributes = dict( selected_only=False, # uses only objects selected in scene. For GUI version only fix_bad_mesh = True, # used to remove zero are faces and zero length edges based on the 'dissolve_threshold' dissolve_threshold = 0.08, # threshold value for 'fix_bad_mesh' merge_vertex = False, # merge connected and disconnected vertices of a mesh by a distance threshold merge_threshold = 0.01, # distance value to use for merge_vertex remove_existing_sharp = True, # when removing zero area faces, edge data can become messed up, causing bad normals. This helps minimize that. fix_normals = True, # optionally fix normals. useful for after 'fix_bad_mesh' to fix the normals as well. create_new_custom_normals = True # will auto generate new sharp edges (based on angle) ) def execute(self, in_attributes=None): attributes = self.get_attributes(in_attributes) context = bpy.context then = time.time() # start time of script execution if context.mode != 'OBJECT': # must be in object mode to perform the rest of the operations. bpy.ops.object.mode_set(mode='OBJECT') # select objects selected = select_mesh.setSelected(context, attributes["selected_only"], deselectAll = False) if len(selected): # run only if there are selected mesh objects in the scene # if removing zero-area-faces/zero-length-edges or merging vertices by distance: if attributes["fix_bad_mesh"] or attributes["merge_vertex"]: self.fixBadMesh( selected, attributes["dissolve_threshold"], attributes["fix_bad_mesh"], attributes["merge_vertex"], attributes["merge_threshold"], attributes["remove_existing_sharp"]) if attributes["fix_normals"]: # optionally fix bad normals (can often arise after fixing bad mesh) self.fixNormals(selected, attributes["create_new_custom_normals"]) else: utils.do_print_error("NO MESH OBJECTS") now = time.time() # time after it finished print("TIME FOR FIX MESH: ", round(now-then, 3)) return {'FINISHED'} def fixBadMesh(self, selected, dissolveThreshold = 0.08, fixBadMesh = False, mergeVertex = False, mergeThreshold = 0.1, removeExistingSharp = True): # once degenerate dissolve geometry node exists (needs to be developed by Blender), replace this with a GN setup # that would go towards producing non-destructive workflows, which is a goal for the GUI version # for printing vertex and face data startingVerts = utils.getVertexCount(selected) startingFaces = utils.getFaceCount(selected) bm = bmesh.new() # 'bmesh' in BLender is data type that contains the 'edit mesh' for an object # It allows for much greater control over mesh properties and operations for object in selected: # loop through each selected object utils.printPart(object) # print the current part being fixed. mesh = object.data # all mesh objects contain mesh data, that is what we need to alter, not the object itself bm.from_mesh(mesh) # attach the mesh to the bmesh container so that changes can be made if fixBadMesh: bmesh.ops.dissolve_degenerate( # for removing zero area faces and zero length edges bm, dist=dissolveThreshold, edges=bm.edges ) if mergeVertex: bmesh.ops.remove_doubles( bm, verts=bm.verts, dist=mergeThreshold ) # Clear sharp state for all edges. This step reduces problems that arise from bad normals if removeExistingSharp: for edge in bm.edges: edge.smooth = True # smooth is the opposite of sharp, so setting to smooth is the same as removing sharp bm.to_mesh(mesh) # need to transfer the altered bmesh data back to the original mesh bm.clear() # always clear a bmesh after use utils.printClearLine() # remove last print, so that printPart can be updated # print vertex and face data endingVerts = utils.getVertexCount(selected) endingFaces = utils.getFaceCount(selected) vertsRemoved = startingVerts-endingVerts facesRemoved = startingFaces-endingFaces print("Fix Mesh Statistics:") utils.do_print("Starting Verts: " + str(startingVerts) + ", Ending Verts: " + str(endingVerts) + ", Verts Removed: " + str(vertsRemoved)) utils.do_print("Starting Faces: " + str(startingFaces) + ", Ending Faces: " + str(endingFaces) + ", Faces Removed: " + str(facesRemoved)) def fixNormals(self, selected, createNewCustomNormals): run_ops_wo_update.open_update() # allows for operators to be run without updating scene # important especially when working with loops for o in selected: if o.type != 'MESH': continue bpy.context.view_layer.objects.active = o mesh = o.data if mesh.has_custom_normals: bpy.ops.mesh.customdata_custom_splitnormals_clear() if createNewCustomNormals: bpy.ops.mesh.customdata_custom_splitnormals_add() run_ops_wo_update.close_update() # must always call close_update if open_update is called def deleteEmptyXforms(occurrences): # Delete objects with no meshes, or zero vertex count meshes # first separate occurrences into two lists to get meshes with zero vertex count def partition(p, l): # uses lambda function to efficiently parse data return reduce(lambda x, y: x[not p(y)].append(y) or x, l, ([], [])) # if obj has vertices place in x, else place in y occurrences_clean, occurrences_dirty = partition(lambda obj:len(obj.data.vertices), occurrences) # delete obj with zero vertex count or no meshes for obj in occurrences_dirty: bpy.data.objects.remove(obj, do_unlink=True) # return good meshes return occurrences_clean
7,637
Python
48.597402
153
0.647506
NVIDIA-Omniverse/blender_omniverse_addons/omni_optimization_panel/scripts/bounds.py
# ##### BEGIN GPL LICENSE BLOCK ##### # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ##### END GPL LICENSE BLOCK ##### # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. import bpy, bmesh from mathutils import Vector import collections def boundsObj(points): # for displaying the bounds of each split chunk mesh = bpy.data.meshes.new("mesh") # add a new mesh obj = bpy.data.objects.new("MyObject", mesh) # add a new object using the new mesh # link the new bounds object to the newly created collection in split. # this is the last collection added to the scene, hence index of len -1 bpy.context.scene.collection.children[len( bpy.context.scene.collection.children)-1].objects.link(obj) obj.display_type = 'BOUNDS' # display only the objects bounds in the Blender viewport. bm = bmesh.new() # 'bmesh' in Blender is data type that contains the 'edit mesh' for an object # allows control over vertices, edges, and faces for point in points: # iterate over input bounds(points) bm.verts.new(point) # add a new vert # make the bmesh the object's mesh bm.to_mesh(obj.data) # transfer bmesh data to the new obj bm.free() # always do this when finished with a bmesh return obj def boundingBox(objects): # the bounding box used for calculating the split plane if not isinstance(objects, list): # if objects is not a list convert it to one objects = [objects] points_co_global = [] # list of all vertices of all objects from list with global coordinates for obj in objects: # iterate over objects list and add its vertices to list points_co_global.extend([obj.matrix_world @ Vector(v) for v in obj.bound_box]) # must add points in world space return points_co_global def bounds(coords): # returns a dictionary containing details of split bounds zipped = zip(*coords) # The zip() function returns a zip object, which is an iterator of tuples push_axis = [] # list that will contain useful for each axis for (axis, _list) in zip('xyz', zipped): # for x, y, and z axis calculate set of values and add them to list info = lambda: None info.max = max(_list) # the maximum value of bounds for each axis info.min = min(_list) # the minimum value of bounds for each axis info.distance = info.max - info.min # the length of the bounds for each axis info.mid = (info.max + info.min)/2 # the center point of bounds for each axis push_axis.append(info) # add this info to push_axis originals = dict(zip(['x', 'y', 'z'], push_axis)) # create dictionary wit the values from push_axis o_details = collections.namedtuple('object_details', ['x', 'y', 'z']) # organize dictionary to be accessed easier return o_details(**originals)
3,481
Python
47.36111
119
0.703533
NVIDIA-Omniverse/blender_omniverse_addons/omni_optimization_panel/scripts/remesh.py
# ##### BEGIN GPL LICENSE BLOCK ##### # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ##### END GPL LICENSE BLOCK ##### # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # Remeshing reconstructs a mesh to produce clean/uniform geometry, but removes all UV mappings from an object # There are four different remesh methods. (BLOCKS, SMOOTH, SHARP, VOXEL) # https://docs.blender.org/manual/en/latest/modeling/modifiers/generate/remesh.html#remesh-modifier def remesh(objects, remesh_type, prop): modifier = 'REMESH' # sets type of modifier to be used for obj in objects: # for each object in selected objects, add the desired modifier and adjust its properties mod = obj.modifiers.new(name = modifier, type=modifier) # set name of modifier based on its type mod.mode = remesh_type # sets remesh type (BLOCKS, SMOOTH, SHARP, VOXEL) # first three modes produce almost identical typology, but with differing amounts of smoothing (BLOCKS, SMOOTH, SHARP) if remesh_type == 'BLOCKS': # "There is no smoothing at all." mod.octree_depth = prop # controls the resolution of most of the remesh modifiers. # the higher the number, the more geometry created (2^x) elif remesh_type == 'SMOOTH': # "Output a smooth surface." mod.octree_depth = prop # the higher the number, the more geometry created (2^x) elif remesh_type == 'SHARP': # "Similar to Smooth, but preserves sharp edges and corners." mod.octree_depth = prop # the higher the number, the more geometry created (2^x) elif remesh_type == 'VOXEL': # "Uses an OpenVDB to generate a new manifold mesh from the current geometry # while trying to preserve the mesh’s original volume." mod.voxel_size = prop # used for voxel remesh to control resolution. the lower the number, the more geometry created (x) else: raise TypeError('Invalid Remesh Type') return
2,657
Python
54.374999
132
0.703049
NVIDIA-Omniverse/blender_omniverse_addons/omni_optimization_panel/scripts/process_attributes.py
# ##### BEGIN GPL LICENSE BLOCK ##### # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ##### END GPL LICENSE BLOCK ##### # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. from bpy.types import Operator from . import modify, fix_mesh, chop, uv, utils class OPTIMIZE_OT_Scene(Operator): bl_idname = "optimize.scene" bl_label = "Optimize Scene" bl_description = "Optimize scene based on operation and set parameters" bl_options = {"REGISTER", "UNDO"} def execute(self, context): self.get_attributes(context) return {'FINISHED'} def get_attributes(self, context): optimizeOptions = context.scene.optimize_options modifyOptions = context.scene.modify_options uvOptions = context.scene.uv_options chopOptions = context.scene.chop_options if optimizeOptions.operation == "modify": attributes = dict( selected_only= modifyOptions.selected_only, apply_mod= modifyOptions.apply_mod, fix_bad_mesh = modifyOptions.fix_bad_mesh, dissolve_threshold = modifyOptions.dissolve_threshold, merge_vertex = modifyOptions.merge_vertex, merge_threshold = modifyOptions.merge_threshold, remove_existing_sharp = modifyOptions.remove_existing_sharp, fix_normals = modifyOptions.fix_normals, create_new_custom_normals = modifyOptions.create_new_custom_normals, modifier= modifyOptions.modifier, # use_modifier_stack= modifyOptions.use_modifier_stack, # modifier_stack= modifyOptions.modifier_stack, decimate_type= modifyOptions.decimate_type, ratio= modifyOptions.ratio, iterations= modifyOptions.iterations, angle= modifyOptions.angle, remesh_type= modifyOptions.remesh_type, oDepth= modifyOptions.oDepth, voxel_size= modifyOptions.voxel_size, geo_type= modifyOptions.geo_type, geo_attribute= modifyOptions.geo_attribute ) elif optimizeOptions.operation == "fixMesh": attributes = dict( selected_only=modifyOptions.selected_only, fix_bad_mesh = modifyOptions.fix_bad_mesh, dissolve_threshold = modifyOptions.dissolve_threshold, merge_vertex = modifyOptions.merge_vertex, merge_threshold = modifyOptions.merge_threshold, remove_existing_sharp = modifyOptions.remove_existing_sharp, fix_normals = modifyOptions.fix_normals, create_new_custom_normals = modifyOptions.create_new_custom_normals ) elif optimizeOptions.operation == "uv": attributes = dict( selected_only= uvOptions.selected_only, scale_to_bounds = uvOptions.scale_to_bounds, clip_to_bounds = uvOptions.clip_to_bounds, unwrap_type = uvOptions.unwrap_type, use_set_size = uvOptions.use_set_size, set_size = uvOptions.set_size, print_updated_results= uvOptions.print_updated_results ) elif optimizeOptions.operation == "chop": attributes = dict( merge= chopOptions.merge, cut_meshes= chopOptions.cut_meshes, max_vertices= chopOptions.max_vertices, min_box_size= chopOptions.min_box_size, max_depth= chopOptions.max_depth, print_updated_results= chopOptions.print_updated_results, create_bounds = chopOptions.create_bounds, selected_only = chopOptions.selected_only ) if optimizeOptions.print_attributes: print(attributes) self.process_operation(optimizeOptions.operation, attributes) def process_operation(self, operation, attributes): start = utils.start_time() blender_cmd = None if operation == 'modify': # Modify Scene blender_cmd = modify.Modify() elif operation == 'fixMesh': # Clean Scene blender_cmd = fix_mesh.FixMesh() elif operation == 'chop': # Chop Scene blender_cmd = chop.Chop() elif operation == 'uv': # Unwrap scene blender_cmd = uv.uvUnwrap() elif operation == "noop": # Runs the load/save USD round trip without modifying the scene. utils.do_print("No-op for this scene") return else: utils.do_print_error("Unknown operation: " + operation + " - add function call to process_file in process.py") return # Run the command if blender_cmd: blender_cmd.execute(attributes) else: utils.do_print_error("No Blender class found to run") utils.report_time(start, "operation")
5,736
Python
40.273381
122
0.61175
NVIDIA-Omniverse/blender_omniverse_addons/omni_optimization_panel/scripts/utils.py
# ##### BEGIN GPL LICENSE BLOCK ##### # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ##### END GPL LICENSE BLOCK ##### # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # Generic utility functions for Blender import json import sys from timeit import default_timer as timer import bpy def do_print(msg): # Flush so prints immediately. print("\033[93m" + msg + "\033[0m", flush=True) def do_print_error(msg): # Flush so prints immediately. print("\033[91m" + msg + "\033[0m", flush=True) def start_time(): return timer() def report_time(start, msg): end = timer() do_print("Elapsed time for {}: {:.3f}".format(msg, end-start)) def print_python_version(): do_print("Python version: %s.%s" % (sys.version_info.major, sys.version_info.minor)) def open_file(inputPath): start = timer() # Load scene. Clears any existing file before loading if inputPath.endswith(tuple([".usd", ".usda", ".usdc"])): do_print("Load file: " + inputPath) bpy.ops.wm.usd_import(filepath=inputPath) elif inputPath.endswith(".fbx"): bpy.ops.import_scene.fbx(filepath=inputPath) else: do_print_error("Unrecognized file, not loaded: " + inputPath) return False end = timer() do_print("Elapsed time to load file: " + "{:.3f}".format(end-start)) return True def save_file(outputPath): # Save scene. Only writes diffs, so faster than export. start = timer() do_print("Save file: " + outputPath) bpy.ops.wm.usd_export(filepath=outputPath) end = timer() do_print("Elapsed time to save file: " + "{:.3f}".format(end-start)) return True def clear_scene(): # This seems to be difficult with Blender. Partially working code: bpy.ops.wm.read_factory_settings(use_empty=True) def process_json_config(operation): return json.loads(operation) if operation else None def getVertexCount(occurrences): # returns the vertex count of all current occurrences for threshold testing during recursion vertexCount = 0 for obj in occurrences: vertexCount += len(obj.data.vertices) return vertexCount def getFaceCount(occurrences): # returns the face count of all current occurrences for threshold testing during recursion faceCount = 0 for obj in occurrences: faceCount += len(obj.data.polygons) return faceCount def printPart(part): print("current part being operated on: ", part.name) def printClearLine(): LINE_UP = '\033[1A' # command to move up a line in the console LINE_CLEAR = '\x1b[2K' # command to clear current line in the console print(LINE_UP, end=LINE_CLEAR) # don't want endless print statements
3,371
Python
33.408163
125
0.69119
NVIDIA-Omniverse/blender_omniverse_addons/omni_optimization_panel/scripts/select_mesh.py
# ##### BEGIN GPL LICENSE BLOCK ##### # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ##### END GPL LICENSE BLOCK ##### # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # for selecting only mesh objects in the scene. To be used by multiple other files. def setSelected(context, selectedOnly = False, deselectAll = True): def select(input): for obj in input: if obj.type == 'MESH': # only mesh objects, ignore lights/cameras/curves/etc. selected.append(obj) # add object to array if deselectAll: # may want all objects deselected at end of processing obj.select_set(False) # make sure all objects are deselected before continuing. else: obj.select_set(obj.type == 'MESH') # select only mesh objects selected = [] # an empty array that will be used to store the objects that need to be unwrapped objects=[ob for ob in context.view_layer.objects if ob.visible_get()] # only want to look at visible objects. process will fail otherwise if not selectedOnly: # selectedOnly is for GUI version only select(objects) elif len(context.selected_objects): # run only if there are selected objects in the scene to isolate just the selected meshes select(context.selected_objects) return selected
2,025
Python
46.116278
141
0.698765
NVIDIA-Omniverse/blender_omniverse_addons/omni_optimization_panel/scripts/blender_class.py
# ##### BEGIN GPL LICENSE BLOCK ##### # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ##### END GPL LICENSE BLOCK ##### # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. from abc import ABC, abstractmethod import json from . import utils class BlenderClass(ABC): def __init__(self): self._default_attributes = dict() def get_attributes(self, in_attributes): attributes = {**self._default_attributes, **in_attributes} # utils.do_print("Attributes: " + json.dumps(attributes, indent=4, sort_keys=False)) return attributes @abstractmethod def execute(self, in_attributes=None): pass
1,332
Python
32.324999
92
0.705706
NVIDIA-Omniverse/blender_omniverse_addons/omni_optimization_panel/scripts/decimate.py
# ##### BEGIN GPL LICENSE BLOCK ##### # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ##### END GPL LICENSE BLOCK ##### # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # Decimation reduces geometry while maintaining form and UVs # There are three different decimation methods. Each method produces different results, with its own pros/cons) # https://docs.blender.org/manual/en/latest/modeling/modifiers/generate/decimate.html#decimate-modifier def decimate(objects, decimate_type, prop): modifier = 'DECIMATE' # sets type of modifier to be used for obj in objects: # for each object in selected objects, add the desired modifier and adjust its properties if len(obj.data.polygons) > 3: # decimation cannot be performed on meshes with 3 or less faces mod = obj.modifiers.new(name = modifier, type=modifier) # set name of modifier based on its type mod.decimate_type = decimate_type # sets decimation type if decimate_type == 'COLLAPSE': # "Merges vertices together progressively, taking the shape of the mesh into account."" mod.ratio = prop # the ratio value used for collapse decimation. Is a ratio of total faces. (x/1) elif decimate_type == 'UNSUBDIV': # "It is intended for meshes with a mainly grid-based topology (without giving uneven geometry)" mod.iterations = prop # the number of un-subdivisions performed. The higher the number, the less geometry remaining (1/2^x) elif decimate_type == 'DISSOLVE': # "It reduces details on forms comprised of mainly flat surfaces." mod.angle_limit = prop # the reduction is limited to an angle between faces (x degrees) mod.delimit = {'UV'} else: raise TypeError('Invalid Decimate Type') return
2,515
Python
54.91111
142
0.702982
NVIDIA-Omniverse/blender_omniverse_addons/omni_optimization_panel/scripts/modify.py
# ##### BEGIN GPL LICENSE BLOCK ##### # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ##### END GPL LICENSE BLOCK ##### # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. import bpy import time import math from . import blender_class, select_mesh, fix_mesh, decimate, remesh, geo_nodes, utils # Master Class for all modifiers class Modify(blender_class.BlenderClass): # settings for GUI version only bl_idname = "modify.scene" bl_label = "Modify Scene" bl_description = "Modify the scene based on set parameters" bl_options = {"REGISTER", "UNDO"} def __init__(self): self._default_attributes = dict( selected_only=True, # uses only objects selected in scene. For GUI version only apply_mod=True, # applies the generated modifiers. Should always be true for command line running fix_bad_mesh = True, # used to remove zero are faces and zero length edges based on the 'dissolve_threshold' dissolve_threshold = .08, # threshold value for 'fix_bad_mesh' merge_vertex = False, # merge connected and disconnected vertices of a mesh by a distance threshold merge_threshold = 0.01, # distance value to use for merge_vertex remove_existing_sharp = True, # when removing zero area faces, edge data can become messed up, causing bad normals. This helps minimize that. fix_normals = True, # optionally fix normals. useful for after 'fix_bad_mesh' to fix the normals as well. create_new_custom_normals = True, # useful for after 'fix_bad_mesh' to fix the normals as well. modifier= "DECIMATE", # determines which modifier type to use if 'use_modifier_stack' is False. (DECIMATE, REMESH, NODES, or SUBSURF) # Some common modifier names for reference:'DECIMATE''REMESH''NODES''SUBSURF''SOLIDIFY''ARRAY''BEVEL' use_modifier_stack= False, # allows use of more that one modifier sequentially. Useful for more specific customizable workflows. modifier_stack=[["DECIMATE", "COLLAPSE", 0.5]], # determines which modifier(s) to use if 'use_modifier_stack' is True.(DECIMATE, REMESH, NODES) # Modifiers are procedural adjustments to a mesh. The modifiers are stored in 'modifier_stack'. # Most modifiers have different options for calculation. for instance the 'DECIMATE' modifier options are stored in 'decimate_type' decimate_type="COLLAPSE", # the type of decimation being performed(COLLAPSE, UNSUBDIV, or DISSOLVE) # Each method produces different results, with its own pros/cons) # https://docs.google.com/document/d/1pkMZxgW4Xn_KJymFlKOo5XIkK2YleVYtyLJztTUTyAY/edit # COLLAPSE: "Merges vertices together progressively, taking the shape of the mesh into account."" # UNSUBDIV: "It is intended for meshes with a mainly grid-based topology (without giving uneven geometry)" # DISSOLVE: "It reduces details on forms comprised of mainly flat surfaces." ratio=0.5, # the ratio value used for collapse decimation. iterations=2, # the number of un-subdivisions performed angle=15.0, # attribute used when performing dissolve decimation. remesh_type="VOXEL", # the type of remesh being performed(BLOCKS, SMOOTH, SHARP, VOXEL) # remeshing removes all UV mappings from an object # https://docs.blender.org/manual/en/latest/modeling/modifiers/generate/remesh.html#remesh-modifier # first three modes produce almost identical typology, but with differing amounts of smoothing (BLOCKS, SMOOTH, SHARP) # BLOCKS: "There is no smoothing at all." # SMOOTH: "Output a smooth surface." # SHARP: "Similar to Smooth, but preserves sharp edges and corners." # VOXEL: "Uses an OpenVDB to generate a new manifold mesh from the current geometry while trying to preserve the mesh’s original volume." oDepth=4, # stands for octree depth and controls the resolution of most of the remesh modifiers voxel_size=0.1, # used for voxel remesh to control resolution geo_type="GeometryNodeBoundBox", # the type of geometry node tree to create: # (GeometryNodeConvexHull, GeometryNodeBoundBox, GeometryNodeSubdivisionSurface) # geometry nodes is currently under development, so feature set is not yet at a stage to be fully utilized # this puts in place a framework for more customizable and easily implementable optimizations in the future # more on geometry nodes: https://docs.blender.org/manual/en/latest/modeling/geometry_nodes/index.html#geometry-nodes geo_attribute=2 # a generic attribute variable that can be used for the different geo node types ) def execute(self, in_attributes=None): attributes = self.get_attributes(in_attributes) context = bpy.context then = time.time() # start time of script execution. # shorthands for multi-used attributes modifier = attributes["modifier"] decimate_type = attributes["decimate_type"] angle = attributes["angle"] remesh_type = attributes["remesh_type"] if context.mode != 'OBJECT': # must be in object mode to perform the rest of the operations. bpy.ops.object.mode_set(mode='OBJECT') # select objects selected = select_mesh.setSelected(context, attributes["selected_only"], deselectAll = False) if len(selected): # run only if there are selected mesh objects in the scene if attributes["fix_bad_mesh"]: # optionally fix bad meshes. Can also be done separately before hand fix_mesh.FixMesh.fixBadMesh( self, selected, attributes["dissolve_threshold"], attributes["fix_bad_mesh"], attributes["merge_vertex"], attributes["merge_threshold"], attributes["remove_existing_sharp"]) if attributes["fix_normals"]: # optionally fix bad normals (can often arise after fixing bad mesh) fix_mesh.FixMesh.fixNormals(self, selected, attributes["create_new_custom_normals"]) # for printing vertex and face data startingVerts = utils.getVertexCount(selected) startingFaces = utils.getFaceCount(selected) if attributes["use_modifier_stack"]: for mod in attributes["modifier_stack"]: self.run_modifier(selected, mod[0], mod[1], mod[2]) else: #Decimate if modifier == 'DECIMATE': sub_mod = decimate_type if decimate_type == 'COLLAPSE': prop = attributes["ratio"] elif decimate_type == 'UNSUBDIV': prop = attributes["iterations"] elif decimate_type == 'DISSOLVE': angle = math.radians(angle) # need to change angle to radians for the modifier prop = angle #Remesh elif modifier == 'REMESH': sub_mod = remesh_type if remesh_type == 'BLOCKS' or remesh_type == 'SMOOTH' or remesh_type == 'SHARP': prop = attributes["oDepth"] if remesh_type == 'VOXEL': prop = attributes["voxel_size"] #Geometry Nodes elif modifier == 'NODES': sub_mod = attributes["geo_type"] prop = attributes["geo_attribute"] else: sub_mod = None prop = None self.run_modifier(selected, modifier, sub_mod, prop) raise RuntimeError # apply modifiers once above loop is complete if attributes["apply_mod"]: context.view_layer.objects.active = selected[0] # need to set one of the selected objects as the active object # arbitrarily choosing to set the first object in selected_objects list. (there can only be one AO, but multiple SO) # this is necessary for the applying the modifiers. bpy.ops.object.convert(target='MESH') # applies all modifiers of each selected mesh. this preps the scene for proper export. # print vertex and face data endingVerts = utils.getVertexCount(selected) endingFaces = utils.getFaceCount(selected) vertsRemoved = startingVerts-endingVerts facesRemoved = startingFaces-endingFaces print("Modify Mesh Statistics:") utils.do_print("Starting Verts: " + str(startingVerts) + ", Ending Verts: " + str(endingVerts) + ", Verts Removed: " + str(vertsRemoved)) utils.do_print("Starting Faces: " + str(startingFaces) + ", Ending Faces: " + str(endingFaces) + ", Faces Removed: " + str(facesRemoved)) else: utils.do_print_error("NO MESH OBJECTS") now = time.time() # time after it finished. print("TIME FOR MODIFY: ", round(now-then, 3)) return {'FINISHED'} # "return {"FINISHED"} (or return{"CANCELED"}) is how Blender understands that an operator call is complete def run_modifier(self, objects, modifier, sub_mod = None, prop = None): # RUN BASED ON TYPE OF MODIFIER AND MODIFIER SUB_TYPE. Each modifier requires different input variables/values # Decimate if modifier == 'DECIMATE': decimate.decimate(objects, sub_mod, prop) # Remesh elif modifier == 'REMESH': remesh.remesh(objects, sub_mod, prop) # Geometry Nodes elif modifier == 'NODES': geo_nodes.geoNodes(objects, sub_mod, prop)
10,769
Python
58.175824
155
0.626613
NVIDIA-Omniverse/blender_omniverse_addons/omni_optimization_panel/scripts/uv.py
# ##### BEGIN GPL LICENSE BLOCK ##### # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ##### END GPL LICENSE BLOCK ##### # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. import bpy import time import contextlib from . import blender_class, run_ops_wo_update, select_mesh, utils class uvUnwrap(blender_class.BlenderClass): # settings for GUI version only bl_idname = "uv.unwrap_batch" bl_label = "Batch UV Unwrap" bl_description = "batch uv unwrap objects" bl_options = {"REGISTER", "UNDO"} def __init__(self): self._default_attributes = dict( selected_only= False, # uses only objects selected in scene. For GUI version only scale_to_bounds = False, # determines if the unwrapped map gets scaled to the square uv image bounds clip_to_bounds = False, # if unwrapping exceeds bounds, it will be clipped off unwrap_type = 'Cube', # the method for unwrapping (cube, sphere, cylinder, or smart) use_set_size = False, # for cube and cylinder project, use specified projection size for all objects. # Overrides scale_to_bounds to False set_size = 2, # projection size for cube and cylinder project print_updated_results= True # print progress to console ) def execute(self, in_attributes=None): attributes = self.get_attributes(in_attributes) context = bpy.context then = time.time() # start time of script execution # blender operates in modes/contexts, and certain operations can only be performed in certain contexts if bpy.context.mode != 'OBJECT': # make sure context is object mode. bpy.ops.object.mode_set(mode='OBJECT') # if it is not, set it to object mode run_ops_wo_update.open_update() # allows for operators to be run without updating scene # important especially when working with loops self.unwrap(context, attributes) run_ops_wo_update.close_update() # must always call close_update if open_update is called now = time.time() # time after it finished print("TIME FOR UNWRAP: ", round(now-then, 3)) return {"FINISHED"} def unwrap(self, context, attributes): scaleBounds = attributes["scale_to_bounds"] clipBounds = attributes["clip_to_bounds"] unwrapType = attributes["unwrap_type"] use_set_size = attributes["use_set_size"] set_size = attributes["set_size"] print_updated_results = attributes["print_updated_results"] # select objects selected = select_mesh.setSelected(context, attributes["selected_only"], deselectAll = True) if len(selected): # run only if there are mesh objects in the 'selected' array LINE_UP = '\033[1A' # command to move up a line in the console LINE_CLEAR = '\x1b[2K' # command to clear current line in the console count = 0 # counter for which object is being calculated then = time.time() # start time of loop execution for object in selected: # unwrap each object separately object.select_set(True) # select object. This is now the only selected object context.view_layer.objects.active = object # set active object. Blender needs active object to be the selected object bpy.ops.object.mode_set(mode='EDIT') # make sure context is edit mode. Context switching is object dependent, must be after selection bpy.ops.mesh.select_all(action='SELECT') # select all mesh vertices. only selected vertices will be uv unwrapped # for smart UV projection if unwrapType == "Smart": # smart UV can take a long time, so this prints out a progress bar if count and print_updated_results: # if the first object has already been calculated and results should be printed with contextlib.redirect_stdout(None): # smartUV prints an output sometimes. We don't want/need this output this suppresses it self.smartUV(scaleBounds) # perform the uv unwrap now = time.time() # time after unwrapping is complete timeElapsed = now - then remaining = len(selected)-count # number of remaining objects timeLeft = timeElapsed/count * remaining # estimation of remaining time print(LINE_UP, end=LINE_CLEAR) # don't want endless print statements print(LINE_UP, end=LINE_CLEAR) # don't want endless print statements # so move up and clear the previously printed lines and overwrite them print("Object Count = ", count, " Objects Remaining = ", remaining) print(" Elapsed Time = ", round(timeElapsed,3), " Time Remaining = ", round(timeLeft,3)) # print results to console else: # if calculating the first object or not printing results self.smartUV(scaleBounds) # perform the uv unwrap if print_updated_results: print("Object Count = 0") print("Time Remaining = UNKOWN") # for cube projection elif unwrapType == "Cube": self.cubeUV(scaleBounds, clipBounds, use_set_size, set_size) # perform the uv unwrap # for sphere projection elif unwrapType == "Sphere": self.sphereUV(scaleBounds, clipBounds) # perform the uv unwrap # for cylinder projection elif unwrapType == "Cylinder": self.cylinderUV(scaleBounds, clipBounds, use_set_size, set_size) # perform the uv unwrap bpy.ops.object.mode_set(mode='OBJECT') # once complete, make sure context is object mode. # Must be in object mode to select the next object object.select_set(False) # deselect the current object. Now there are again no objects selected count += 1 # increase the object counter for obj in selected: # reselect all originally selected meshes obj.select_set(True) else: utils.do_print_error("NO MESH OBJECTS") return {'FINISHED'} # methods for running each type of uv projection def smartUV(self, scale): bpy.ops.uv.smart_project(correct_aspect=True, scale_to_bounds=scale) def cubeUV(self, scale, clip, use_set_size, size): if use_set_size: # user sets cube_size value of cube projection bpy.ops.uv.cube_project(scale_to_bounds=False, clip_to_bounds=clip, cube_size=size) else: bpy.ops.uv.cube_project(scale_to_bounds=scale, clip_to_bounds=clip) def sphereUV(self, scale, clip): bpy.ops.uv.sphere_project(direction='ALIGN_TO_OBJECT', scale_to_bounds=scale, clip_to_bounds=clip) # 'ALIGN_TO_OBJECT' sets the direction of the projection to be consistent regardless of view position/direction def cylinderUV(self, scale, clip, use_set_size, size): if use_set_size: # user sets radius value of cylinder projection bpy.ops.uv.cylinder_project(direction='ALIGN_TO_OBJECT', scale_to_bounds=False, clip_to_bounds=clip, radius=size) else: bpy.ops.uv.cylinder_project(direction='ALIGN_TO_OBJECT', scale_to_bounds=scale, clip_to_bounds=clip)
8,297
Python
52.192307
150
0.632277
NVIDIA-Omniverse/blender_omniverse_addons/omni/__init__.py
# ##### BEGIN GPL LICENSE BLOCK ##### # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ##### END GPL LICENSE BLOCK ##### # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. """ To invoke in Blender script editor: import bpy bpy.ops.universalmaterialmap.generator() bpy.ops.universalmaterialmap.converter() INFO_HT_header Header VIEW3D_HT_tool_header Info Header: INFO_HT_HEADER 3D View Header: VIEW3D_HT_HEADER Timeline Header: TIME_HT_HEADER Outliner Header: OUTLINER_HT_HEADER Properties Header: PROPERTIES_HT_HEADER, etc. """ """ Menu location problem https://blender.stackexchange.com/questions/3393/add-custom-menu-at-specific-location-in-the-header#:~:text=Blender%20has%20a%20built%20in,%3EPython%2D%3EUI%20Menu. """ bl_info = { 'name': 'Universal Material Map', 'author': 'NVIDIA Corporation', 'description': 'A Blender AddOn based on the Universal Material Map framework.', 'blender': (3, 1, 0), 'location': 'View3D', 'warning': '', 'category': 'Omniverse' } import sys import importlib import bpy from .universalmaterialmap.blender import developer_mode if developer_mode: print('UMM DEBUG: Initializing "{0}"'.format(__file__)) ordered_module_names = [ 'omni.universalmaterialmap', 'omni.universalmaterialmap.core', 'omni.universalmaterialmap.core.feature', 'omni.universalmaterialmap.core.singleton', 'omni.universalmaterialmap.core.data', 'omni.universalmaterialmap.core.util', 'omni.universalmaterialmap.core.operator', 'omni.universalmaterialmap.core.service', 'omni.universalmaterialmap.core.service.core', 'omni.universalmaterialmap.core.service.delegate', 'omni.universalmaterialmap.core.service.resources', 'omni.universalmaterialmap.core.service.store', 'omni.universalmaterialmap.core.converter', 'omni.universalmaterialmap.core.converter.core', 'omni.universalmaterialmap.core.converter.util', 'omni.universalmaterialmap.core.generator', 'omni.universalmaterialmap.core.generator.core', 'omni.universalmaterialmap.core.generator.util', 'omni.universalmaterialmap.blender', 'omni.universalmaterialmap.blender.menu', 'omni.universalmaterialmap.blender.converter', 'omni.universalmaterialmap.blender.generator', 'omni.universalmaterialmap.blender.material', ] for module_name in sys.modules: if 'omni.' not in module_name: continue if module_name not in ordered_module_names: raise Exception('Unexpected module name in sys.modules: {0}'.format(module_name)) for module_name in ordered_module_names: if module_name in sys.modules: print('UMM reloading: {0}'.format(module_name)) importlib.reload(sys.modules.get(module_name)) if developer_mode: from .universalmaterialmap.blender.converter import OT_InstanceToDataConverter, OT_DataToInstanceConverter, OT_DataToDataConverter, OT_ApplyDataToInstance, OT_DescribeShaderGraph from .universalmaterialmap.blender.converter import OT_CreateTemplateOmniPBR, OT_CreateTemplateOmniGlass from .universalmaterialmap.blender.menu import UniversalMaterialMapMenu from .universalmaterialmap.blender.generator import OT_Generator else: from .universalmaterialmap.blender.converter import OT_CreateTemplateOmniPBR, OT_CreateTemplateOmniGlass from .universalmaterialmap.blender.menu import UniversalMaterialMapMenu def draw_item(self, context): layout = self.layout layout.menu(UniversalMaterialMapMenu.bl_idname) def register(): bpy.utils.register_class(OT_CreateTemplateOmniPBR) bpy.utils.register_class(OT_CreateTemplateOmniGlass) if developer_mode: bpy.utils.register_class(OT_DataToInstanceConverter) bpy.utils.register_class(OT_DataToDataConverter) bpy.utils.register_class(OT_ApplyDataToInstance) bpy.utils.register_class(OT_InstanceToDataConverter) bpy.utils.register_class(OT_DescribeShaderGraph) bpy.utils.register_class(OT_Generator) bpy.utils.register_class(UniversalMaterialMapMenu) # lets add ourselves to the main header bpy.types.NODE_HT_header.append(draw_item) def unregister(): bpy.utils.unregister_class(OT_CreateTemplateOmniPBR) bpy.utils.unregister_class(OT_CreateTemplateOmniGlass) if developer_mode: bpy.utils.unregister_class(OT_DataToInstanceConverter) bpy.utils.unregister_class(OT_DataToDataConverter) bpy.utils.unregister_class(OT_ApplyDataToInstance) bpy.utils.unregister_class(OT_InstanceToDataConverter) bpy.utils.unregister_class(OT_DescribeShaderGraph) bpy.utils.unregister_class(OT_Generator) bpy.utils.unregister_class(UniversalMaterialMapMenu) bpy.types.NODE_HT_header.remove(draw_item) if __name__ == "__main__": register() # The menu can also be called from scripts # bpy.ops.wm.call_menu(name=UniversalMaterialMapMenu.bl_idname)
5,725
Python
35.471337
182
0.731528
NVIDIA-Omniverse/blender_omniverse_addons/omni/universalmaterialmap/core/util.py
# ##### BEGIN GPL LICENSE BLOCK ##### # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ##### END GPL LICENSE BLOCK ##### # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. import typing import sys from .data import Plug def to_plug_value_type(value: typing.Any, assumed_value_type: str) -> str: """Returns matching :class:`omni.universalmaterialmap.core.data.Plug` value type.""" if sys.version_info.major < 3: if isinstance(value, basestring): return Plug.VALUE_TYPE_STRING else: if isinstance(value, str): return Plug.VALUE_TYPE_STRING if type(value) == bool: return Plug.VALUE_TYPE_BOOLEAN if isinstance(value, int): return Plug.VALUE_TYPE_INTEGER if isinstance(value, float): return Plug.VALUE_TYPE_FLOAT try: test = iter(value) is_iterable = True except TypeError: is_iterable = False if is_iterable: if assumed_value_type == Plug.VALUE_TYPE_LIST: return Plug.VALUE_TYPE_LIST bum_booleans = 0 num_integers = 0 num_floats = 0 num_strings = 0 for o in value: if sys.version_info.major < 3: if isinstance(value, basestring): num_strings += 1 continue else: if isinstance(value, str): num_strings += 1 continue if type(o) == bool: bum_booleans += 1 continue if isinstance(o, int): num_integers += 1 continue if isinstance(o, float): num_floats += 1 if num_floats > 0: if len(value) == 2: return Plug.VALUE_TYPE_VECTOR2 if len(value) == 3: return Plug.VALUE_TYPE_VECTOR3 if len(value) == 4: return Plug.VALUE_TYPE_VECTOR4 if len(value) == 2 and assumed_value_type == Plug.VALUE_TYPE_VECTOR2: return assumed_value_type if len(value) == 3 and assumed_value_type == Plug.VALUE_TYPE_VECTOR3: return assumed_value_type if len(value) == 4 and assumed_value_type == Plug.VALUE_TYPE_VECTOR4: return assumed_value_type return Plug.VALUE_TYPE_LIST return Plug.VALUE_TYPE_ANY def get_extension_from_image_file_format(format:str, base_name:str) -> str: """ For image formats that have multiple possible extensions, determine if we should stick with the current format specifier or use the one from the filename itself. """ format = format.lower() split = base_name.rpartition(".")[-1] extension = split.lower() if len(split) else None if format == "open_exr": format = "exr" elif format == "jpeg": format = extension if extension in {"jpeg", "jpg"} else "jpg" elif format == "tiff": format = extension if extension in {"tiff", "tif"} else "tif" elif format == "targa_raw": format = "tga" return format
3,780
Python
31.042373
88
0.598677
NVIDIA-Omniverse/blender_omniverse_addons/omni/universalmaterialmap/core/data.py
# ##### BEGIN GPL LICENSE BLOCK ##### # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ##### END GPL LICENSE BLOCK ##### # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. import typing import uuid import sys import importlib from .service.core import IDelegate class ChangeNotification(object): def __init__(self, item: object, property_name: str, old_value: typing.Any, new_value: typing.Any): super(ChangeNotification, self).__init__() self._item: object = item self._property_name: str = property_name self._old_value: typing.Any = old_value self._new_value: typing.Any = new_value @property def item(self) -> object: """ """ return self._item @property def property_name(self) -> str: """ """ return self._property_name @property def old_value(self) -> typing.Any: """ """ return self._old_value @property def new_value(self) -> typing.Any: """ """ return self._new_value class Notifying(object): """Base class providing change notification capability""" def __init__(self): super(Notifying, self).__init__() self._changed_callbacks: typing.Dict[uuid.uuid4, typing.Callable[[ChangeNotification], typing.NoReturn]] = dict() def add_changed_fn(self, callback: typing.Callable[[ChangeNotification], typing.NoReturn]) -> uuid.uuid4: for key, value in self._changed_callbacks.items(): if value == callback: return key key = uuid.uuid4() self._changed_callbacks[key] = callback return key def remove_changed_fn(self, callback_id: uuid.uuid4) -> None: if callback_id in self._changed_callbacks.keys(): del self._changed_callbacks[callback_id] def _notify(self, notification: ChangeNotification): for callback in self._changed_callbacks.values(): callback(notification) def destroy(self): self._changed_callbacks = None class Subscribing(Notifying): def __init__(self): super(Subscribing, self).__init__() self._subscriptions: typing.Dict[Notifying, uuid.uuid4] = dict() def _subscribe(self, notifying: Notifying) -> uuid.uuid4: if notifying in self._subscriptions.keys(): return self._subscriptions[notifying] self._subscriptions[notifying] = notifying.add_changed_fn(self._on_notification) def _unsubscribe(self, notifying: Notifying) -> None: if notifying in self._subscriptions.keys(): callback_id = self._subscriptions[notifying] del self._subscriptions[notifying] notifying.remove_changed_fn(callback_id=callback_id) def _on_notification(self, notification: ChangeNotification) -> None: pass class ManagedListInsert(object): def __init__(self, notifying: Notifying, index: int): super(ManagedListInsert, self).__init__() self._notifying: Notifying = notifying self._index: int = index @property def notifying(self) -> Notifying: """ """ return self._notifying @property def index(self) -> int: """ """ return self._index class ManagedListRemove(object): def __init__(self, notifying: Notifying, index: int): super(ManagedListRemove, self).__init__() self._notifying: Notifying = notifying self._index: int = index @property def notifying(self) -> Notifying: """ """ return self._notifying @property def index(self) -> int: """ """ return self._index class ManagedListNotification(object): ADDED_ITEMS: int = 0 UPDATED_ITEMS: int = 1 REMOVED_ITEMS: int = 2 def __init__(self, managed_list: 'ManagedList', items: typing.List[typing.Union[ManagedListInsert, ChangeNotification, ManagedListRemove]]): super(ManagedListNotification, self).__init__() self._managed_list: ManagedList = managed_list self._inserted_items: typing.List[ManagedListInsert] = [] self._change_notifications: typing.List[ChangeNotification] = [] self._removed_items: typing.List[ManagedListRemove] = [] self._kind: int = -1 if isinstance(items[0], ManagedListInsert): self._kind = ManagedListNotification.ADDED_ITEMS self._inserted_items = typing.cast(typing.List[ManagedListInsert], items) elif isinstance(items[0], ChangeNotification): self._kind = ManagedListNotification.UPDATED_ITEMS self._change_notifications = typing.cast(typing.List[ChangeNotification], items) elif isinstance(items[0], ManagedListRemove): self._kind = ManagedListNotification.REMOVED_ITEMS self._removed_items = typing.cast(typing.List[ManagedListRemove], items) else: raise Exception('Unexpected object: "{0}" of type "{1}".'.format(items[0], type(items[0]))) @property def managed_list(self) -> 'ManagedList': """ """ return self._managed_list @property def kind(self) -> int: """ """ return self._kind @property def inserted_items(self) -> typing.List[ManagedListInsert]: """ """ return self._inserted_items @property def change_notifications(self) -> typing.List[ChangeNotification]: """ """ return self._change_notifications @property def removed_items(self) -> typing.List[ManagedListRemove]: """ """ return self._removed_items class ManagedList(object): def __init__(self, items: typing.List[Notifying] = None): super(ManagedList, self).__init__() self._subscriptions: typing.Dict[Notifying, uuid.uuid4] = dict() self._changed_callbacks: typing.Dict[uuid.uuid4, typing.Callable[[ManagedListNotification], typing.NoReturn]] = dict() self._managed_items: typing.List[Notifying] = [] if items: for o in items: self._manage_item(notifying=o) def __iter__(self): return iter(self._managed_items) def _manage_item(self, notifying: Notifying) -> typing.Union[Notifying, None]: """ Subscribes to managed item. Returns item only if it became managed. """ if notifying in self._managed_items: return None self._managed_items.append(notifying) self._subscriptions[notifying] = notifying.add_changed_fn(self._on_notification) return notifying def _unmanage_item(self, notifying: Notifying) -> typing.Union[typing.Tuple[Notifying, int], typing.Tuple[None, int]]: """ Unsubscribes to managed item. Returns item only if it became unmanaged. """ if notifying not in self._managed_items: return None, -1 index = self._managed_items.index(notifying) self._managed_items.remove(notifying) callback_id = self._subscriptions[notifying] del self._subscriptions[notifying] notifying.remove_changed_fn(callback_id=callback_id) return notifying, index def _on_notification(self, notification: ChangeNotification) -> None: self._notify( notification=ManagedListNotification( managed_list=self, items=[notification] ) ) def _notify(self, notification: ManagedListNotification): for callback in self._changed_callbacks.values(): callback(notification) def add_changed_fn(self, callback: typing.Callable[[ManagedListNotification], typing.NoReturn]) -> uuid.uuid4: for key, value in self._changed_callbacks.items(): if value == callback: return key key = uuid.uuid4() self._changed_callbacks[key] = callback return key def remove_changed_fn(self, callback_id: uuid.uuid4) -> None: if callback_id in self._changed_callbacks.keys(): del self._changed_callbacks[callback_id] def append(self, notifying: Notifying) -> None: if self._manage_item(notifying=notifying) is not None: self._notify( ManagedListNotification( managed_list=self, items=[ManagedListInsert(notifying=notifying, index=self.index(notifying=notifying))] ) ) def extend(self, notifying: typing.List[Notifying]) -> None: added = [] for o in notifying: o = self._manage_item(notifying=o) if o: added.append(o) if len(added) == 0: return self._notify( ManagedListNotification( managed_list=self, items=[ManagedListInsert(notifying=o, index=self.index(notifying=o)) for o in added] ) ) def remove(self, notifying: Notifying) -> None: notifying, index = self._unmanage_item(notifying=notifying) if notifying: self._notify( ManagedListNotification( managed_list=self, items=[ManagedListRemove(notifying=notifying, index=index)] ) ) def remove_all(self) -> None: items = [ManagedListRemove(notifying=o, index=i) for i, o in enumerate(self._managed_items)] for callback_id, notifying in self._subscriptions.items(): notifying.remove_changed_fn(callback_id=callback_id) self._subscriptions = dict() self._managed_items = [] self._notify( ManagedListNotification( managed_list=self, items=items ) ) def pop(self, index: int = 0) -> Notifying: notifying, index = self._unmanage_item(self._managed_items[index]) self._notify( ManagedListNotification( managed_list=self, items=[ManagedListRemove(notifying=notifying, index=index)] ) ) return notifying def index(self, notifying: Notifying) -> int: if notifying in self._managed_items: return self._managed_items.index(notifying) return -1 class Serializable(Subscribing): """Base class providing serialization method template""" def __init__(self): super(Serializable, self).__init__() def serialize(self) -> dict: """ """ return dict() def deserialize(self, data: dict) -> None: """ """ pass class Base(Serializable): """Base class providing id property""" @classmethod def Create(cls) -> 'Base': return cls() def __init__(self): super(Base, self).__init__() self._id: str = str(uuid.uuid4()) def serialize(self) -> dict: """ """ output = super(Base, self).serialize() output['_id'] = self._id return output def deserialize(self, data: dict) -> None: """ """ super(Base, self).deserialize(data=data) self._id = data['_id'] if '_id' in data.keys() else str(uuid.uuid4()) @property def id(self) -> str: """ """ return self._id class DagNode(Base): """Base class providing input and outputs of :class:`omni.universalmaterialmap.core.data.Plug` """ def __init__(self): super(DagNode, self).__init__() self._inputs: typing.List[Plug] = [] self._outputs: typing.List[Plug] = [] self._computing: bool = False def serialize(self) -> dict: """ """ output = super(DagNode, self).serialize() output['_inputs'] = [plug.serialize() for plug in self.inputs] output['_outputs'] = [plug.serialize() for plug in self.outputs] return output def deserialize(self, data: dict) -> None: """ """ super(DagNode, self).deserialize(data=data) old_inputs = self._inputs[:] old_outputs = self._outputs[:] while len(self._inputs): self._unsubscribe(notifying=self._inputs.pop()) while len(self._outputs): self._unsubscribe(notifying=self._outputs.pop()) plugs = [] if '_inputs' in data.keys(): for o in data['_inputs']: plug = Plug(parent=self) plug.deserialize(data=o) plugs.append(plug) self._inputs = plugs plugs = [] if '_outputs' in data.keys(): for o in data['_outputs']: plug = Plug(parent=self) plug.deserialize(data=o) plugs.append(plug) self._outputs = plugs for o in self._inputs: self._subscribe(notifying=o) for o in self._outputs: self._subscribe(notifying=o) if not old_inputs == self._inputs: self._notify( ChangeNotification( item=self, property_name='inputs', old_value=old_inputs, new_value=self._inputs[:] ) ) if not old_inputs == self._outputs: self._notify( ChangeNotification( item=self, property_name='outputs', old_value=old_outputs, new_value=self._outputs[:] ) ) def _on_notification(self, notification: ChangeNotification) -> None: if notification.item == self: return # Re-broadcast notification self._notify(notification=notification) def invalidate(self, plug: 'Plug'): pass def compute(self) -> None: """ """ if self._computing: return self._computing = True self._compute_inputs(input_plugs=self._inputs) self._compute_outputs(output_plugs=self._outputs) self._computing = False def _compute_inputs(self, input_plugs: typing.List['Plug']): # Compute dependencies for plug in input_plugs: if not plug.input: continue if not plug.input.parent: continue if not plug.input.is_invalid: continue plug.input.parent.compute() # Set computed_value for plug in input_plugs: if plug.input: plug.computed_value = plug.input.computed_value else: plug.computed_value = plug.value def _compute_outputs(self, output_plugs: typing.List['Plug']): # Compute dependencies for plug in output_plugs: if not plug.input: continue if not plug.input.parent: continue if not plug.input.is_invalid: continue plug.input.parent.compute() # Set computed_value for plug in output_plugs: if plug.input: plug.computed_value = plug.input.computed_value else: plug.computed_value = plug.value def add_input(self) -> 'Plug': raise NotImplementedError() def can_remove_plug(self, plug: 'Plug') -> bool: return plug.is_removable def remove_plug(self, plug: 'Plug') -> None: if not plug.is_removable: raise Exception('Plug is not removable') notifications = [] if plug in self._inputs: old_value = self._inputs[:] self._unsubscribe(notifying=plug) self._inputs.remove(plug) notifications.append( ChangeNotification( item=self, property_name='inputs', old_value=old_value, new_value=self._inputs[:] ) ) if plug in self._outputs: old_value = self._outputs[:] self._unsubscribe(notifying=plug) self._outputs.remove(plug) notifications.append( ChangeNotification( item=self, property_name='outputs', old_value=old_value, new_value=self._outputs[:] ) ) destination: Plug for destination in plug.outputs: destination.input = None for notification in notifications: self._notify(notification=notification) @property def can_add_input(self) -> bool: return False @property def inputs(self) -> typing.List['Plug']: """ """ return self._inputs @property def outputs(self) -> typing.List['Plug']: """ """ return self._outputs class GraphEntity(DagNode): """Base class providing omni.kit.widget.graph properties for a data item.""" OPEN = 0 MINIMIZED = 1 CLOSED = 2 def __init__(self): super(GraphEntity, self).__init__() self._display_name: str = '' self._position: typing.Union[typing.Tuple[float, float], None] = None self._expansion_state: int = GraphEntity.OPEN self._show_inputs: bool = True self._show_outputs: bool = True self._show_peripheral: bool = False def serialize(self) -> dict: """ """ output = super(GraphEntity, self).serialize() output['_display_name'] = self._display_name output['_position'] = self._position output['_expansion_state'] = self._expansion_state output['_show_inputs'] = self._show_inputs output['_show_outputs'] = self._show_outputs output['_show_peripheral'] = self._show_peripheral return output def deserialize(self, data: dict) -> None: """ """ super(GraphEntity, self).deserialize(data=data) self._display_name = data['_display_name'] if '_display_name' in data.keys() else '' self._position = data['_position'] if '_position' in data.keys() else None self._expansion_state = data['_expansion_state'] if '_expansion_state' in data.keys() else GraphEntity.OPEN self._show_inputs = data['_show_inputs'] if '_show_inputs' in data.keys() else True self._show_outputs = data['_show_outputs'] if '_show_outputs' in data.keys() else True self._show_peripheral = data['_show_peripheral'] if '_show_peripheral' in data.keys() else False @property def display_name(self) -> str: """ """ return self._display_name @display_name.setter def display_name(self, value: str) -> None: """ """ if self._display_name is value: return notification = ChangeNotification( item=self, property_name='display_name', old_value=self._display_name, new_value=value ) self._display_name = value self._notify(notification=notification) @property def position(self) -> typing.Union[typing.Tuple[float, float], None]: """ """ return self._position @position.setter def position(self, value: typing.Union[typing.Tuple[float, float], None]) -> None: """ """ if self._position is value: return notification = ChangeNotification( item=self, property_name='position', old_value=self._position, new_value=value ) self._position = value self._notify(notification=notification) @property def expansion_state(self) -> int: """ """ return self._expansion_state @expansion_state.setter def expansion_state(self, value: int) -> None: """ """ if self._expansion_state is value: return notification = ChangeNotification( item=self, property_name='expansion_state', old_value=self._expansion_state, new_value=value ) self._expansion_state = value self._notify(notification=notification) @property def show_inputs(self) -> bool: """ """ return self._show_inputs @show_inputs.setter def show_inputs(self, value: bool) -> None: """ """ if self._show_inputs is value: return notification = ChangeNotification( item=self, property_name='show_inputs', old_value=self._show_inputs, new_value=value ) self._show_inputs = value self._notify(notification=notification) @property def show_outputs(self) -> bool: """ """ return self._show_outputs @show_outputs.setter def show_outputs(self, value: bool) -> None: """ """ if self._show_outputs is value: return notification = ChangeNotification( item=self, property_name='show_outputs', old_value=self._show_outputs, new_value=value ) self._show_outputs = value self._notify(notification=notification) @property def show_peripheral(self) -> bool: """ """ return self._show_peripheral @show_peripheral.setter def show_peripheral(self, value: bool) -> None: """ """ if self._show_peripheral is value: return notification = ChangeNotification( item=self, property_name='show_peripheral', old_value=self._show_peripheral, new_value=value ) self._show_peripheral = value self._notify(notification=notification) class Connection(Serializable): def __init__(self): super(Connection, self).__init__() self._source_id = '' self._destination_id = '' def serialize(self) -> dict: output = super(Connection, self).serialize() output['_source_id'] = self._source_id output['_destination_id'] = self._destination_id return output def deserialize(self, data: dict) -> None: super(Connection, self).deserialize(data=data) self._source_id = data['_source_id'] if '_source_id' in data.keys() else '' self._destination_id = data['_destination_id'] if '_destination_id' in data.keys() else '' @property def source_id(self): return self._source_id @property def destination_id(self): return self._destination_id class Plug(Base): """ A Plug can be: a source an output both a source and an output a container for a static value - most likely as an output a container for an editable value - most likely as an output plug.default_value Starting point and for resetting. plug.value Apply as computed_value if there is no input or dependency providing a value. plug.computed_value Final value. Could be thought of as plug.output_value. Plug is_dirty on input connect input disconnect value change if not connected A Plug is_dirty if it is_dirty its input is_dirty any dependency is_dirty """ VALUE_TYPE_ANY = 'any' VALUE_TYPE_FLOAT = 'float' VALUE_TYPE_INTEGER = 'int' VALUE_TYPE_STRING = 'str' VALUE_TYPE_BOOLEAN = 'bool' VALUE_TYPE_NODE_ID = 'node_id' VALUE_TYPE_VECTOR2 = 'vector2' VALUE_TYPE_VECTOR3 = 'vector3' VALUE_TYPE_VECTOR4 = 'vector4' VALUE_TYPE_ENUM = 'enum' VALUE_TYPE_LIST = 'list' VALUE_TYPES = [ VALUE_TYPE_ANY, VALUE_TYPE_FLOAT, VALUE_TYPE_INTEGER, VALUE_TYPE_STRING, VALUE_TYPE_BOOLEAN, VALUE_TYPE_NODE_ID, VALUE_TYPE_VECTOR2, VALUE_TYPE_VECTOR3, VALUE_TYPE_VECTOR4, VALUE_TYPE_ENUM, VALUE_TYPE_LIST, ] @classmethod def Create( cls, parent: DagNode, name: str, display_name: str, value_type: str = 'any', editable: bool = False, is_removable: bool = False, ) -> 'Plug': instance = cls(parent=parent) instance._name = name instance._display_name = display_name instance._value_type = value_type instance._is_editable = editable instance._is_removable = is_removable return instance def __init__(self, parent: DagNode): super(Plug, self).__init__() self._parent: DagNode = parent self._name: str = '' self._display_name: str = '' self._value_type: str = Plug.VALUE_TYPE_ANY self._internal_value_type: str = Plug.VALUE_TYPE_ANY self._is_peripheral: bool = False self._is_editable: bool = False self._is_removable: bool = False self._default_value: typing.Any = None self._computed_value: typing.Any = None self._value: typing.Any = None self._is_invalid: bool = False self._input: typing.Union[Plug, typing.NoReturn] = None self._outputs: typing.List[Plug] = [] self._enum_values: typing.List = [] def serialize(self) -> dict: output = super(Plug, self).serialize() output['_name'] = self._name output['_display_name'] = self._display_name output['_value_type'] = self._value_type output['_internal_value_type'] = self._internal_value_type output['_is_peripheral'] = self._is_peripheral output['_is_editable'] = self._is_editable output['_is_removable'] = self._is_removable output['_default_value'] = self._default_value output['_value'] = self._value output['_enum_values'] = self._enum_values return output def deserialize(self, data: dict) -> None: super(Plug, self).deserialize(data=data) self._input = None self._name = data['_name'] if '_name' in data.keys() else '' self._display_name = data['_display_name'] if '_display_name' in data.keys() else '' self._value_type = data['_value_type'] if '_value_type' in data.keys() else Plug.VALUE_TYPE_ANY self._internal_value_type = data['_internal_value_type'] if '_internal_value_type' in data.keys() else None self._is_peripheral = data['_is_peripheral'] if '_is_peripheral' in data.keys() else False self._is_editable = data['_is_editable'] if '_is_editable' in data.keys() else False self._is_removable = data['_is_removable'] if '_is_removable' in data.keys() else False self._default_value = data['_default_value'] if '_default_value' in data.keys() else None self._value = data['_value'] if '_value' in data.keys() else self._default_value self._enum_values = data['_enum_values'] if '_enum_values' in data.keys() else [] def invalidate(self) -> None: if self._is_invalid: return self._is_invalid = True if self.parent: self.parent.invalidate(self) @property def parent(self) -> DagNode: return self._parent @property def name(self) -> str: return self._name @name.setter def name(self, value: str) -> None: if self._name is value: return notification = ChangeNotification( item=self, property_name='name', old_value=self._name, new_value=value ) self._name = value self._notify(notification=notification) @property def display_name(self) -> str: return self._display_name @display_name.setter def display_name(self, value: str) -> None: if self._display_name is value: return notification = ChangeNotification( item=self, property_name='display_name', old_value=self._display_name, new_value=value ) self._display_name = value self._notify(notification=notification) @property def value_type(self) -> str: return self._value_type @value_type.setter def value_type(self, value: str) -> None: if self._value_type is value: return notification = ChangeNotification( item=self, property_name='value_type', old_value=self._value_type, new_value=value ) self._value_type = value self._notify(notification=notification) @property def internal_value_type(self) -> str: return self._internal_value_type @internal_value_type.setter def internal_value_type(self, value: str) -> None: if self._internal_value_type is value: return notification = ChangeNotification( item=self, property_name='internal_value_type', old_value=self._internal_value_type, new_value=value ) self._internal_value_type = value self._notify(notification=notification) @property def is_removable(self) -> bool: return self._is_removable @property def is_peripheral(self) -> bool: return self._is_peripheral @is_peripheral.setter def is_peripheral(self, value: bool) -> None: if self._is_peripheral is value: return notification = ChangeNotification( item=self, property_name='is_peripheral', old_value=self._is_peripheral, new_value=value ) self._is_peripheral = value self._notify(notification=notification) @property def computed_value(self) -> typing.Any: return self._computed_value @computed_value.setter def computed_value(self, value: typing.Any) -> None: if self._computed_value is value: self._is_invalid = False self._value = self._computed_value return notification = ChangeNotification( item=self, property_name='computed_value', old_value=self._computed_value, new_value=value ) if self._input and self._input.is_invalid: print('WARNING: Universal Material Map: Compute encountered an unexpected state: input invalid after compute. Results may be incorrect.') print('\tplug: "{0}"'.format(self.name)) if self._parent: print('\tplug.parent: "{0}"'.format(self._parent.__class__.__name__)) print('\tplug.input: "{0}"'.format(self._input.name)) if self._input.parent: print('\tplug.input.parent: "{0}"'.format(self._input.parent.__class__.__name__)) return self._is_invalid = False self._computed_value = value self._value = self._computed_value self._notify(notification=notification) @property def value(self) -> typing.Any: return self._value @value.setter def value(self, value: typing.Any) -> None: if self._value is value: return notification = ChangeNotification( item=self, property_name='value', old_value=self._value, new_value=value ) self._value = value self._notify(notification=notification) if self._input is None: self.invalidate() @property def is_invalid(self) -> typing.Any: if self._input and self._input._is_invalid: return True return self._is_invalid @property def input(self) -> typing.Union['Plug', typing.NoReturn]: return self._input @input.setter def input(self, value: typing.Union['Plug', typing.NoReturn]) -> None: if self._input is value: return notification = ChangeNotification( item=self, property_name='input', old_value=self._input, new_value=value ) self._input = value self._notify(notification=notification) self.invalidate() @property def outputs(self) -> typing.List['Plug']: return self._outputs @property def is_editable(self) -> bool: return self._is_editable @is_editable.setter def is_editable(self, value: bool) -> None: if self._is_editable is value: return notification = ChangeNotification( item=self, property_name='is_editable', old_value=self._is_editable, new_value=value ) self._is_editable = value self._notify(notification=notification) @property def default_value(self) -> typing.Any: return self._default_value @default_value.setter def default_value(self, value: typing.Any) -> None: if self._default_value is value: return notification = ChangeNotification( item=self, property_name='default_value', old_value=self._default_value, new_value=value ) self._default_value = value self._notify(notification=notification) @property def enum_values(self) -> typing.List: return self._enum_values @enum_values.setter def enum_values(self, value: typing.List) -> None: if self._enum_values is value: return notification = ChangeNotification( item=self, property_name='enum_values', old_value=self._enum_values, new_value=value ) self._enum_values = value self._notify(notification=notification) class Node(DagNode): @classmethod def Create(cls, class_name: str) -> 'Node': instance = typing.cast(Node, super(Node, cls).Create()) instance._class_name = class_name return instance def __init__(self): super(Node, self).__init__() self._class_name: str = '' def serialize(self) -> dict: output = super(Node, self).serialize() output['_class_name'] = self._class_name return output def deserialize(self, data: dict) -> None: super(Node, self).deserialize(data=data) self._class_name = data['_class_name'] if '_class_name' in data.keys() else '' @property def class_name(self): return self._class_name class Client(Serializable): ANY_VERSION = 'any' NO_VERSION = 'none' DCC_OMNIVERSE_CREATE = 'Omniverse Create' DCC_3DS_MAX = '3ds MAX' DCC_MAYA = 'Maya' DCC_HOUDINI = 'Houdini' DCC_SUBSTANCE_DESIGNER = 'Substance Designer' DCC_SUBSTANCE_PAINTER = 'Substance Painter' DCC_BLENDER = 'Blender' @classmethod def Autodesk_3dsMax(cls, version: str = ANY_VERSION) -> 'Client': instance = Client() instance._name = Client.DCC_3DS_MAX instance._version = version return instance @classmethod def Autodesk_Maya(cls, version: str = ANY_VERSION) -> 'Client': instance = Client() instance._name = Client.DCC_MAYA instance._version = version return instance @classmethod def OmniverseCreate(cls, version: str = ANY_VERSION) -> 'Client': instance = Client() instance._name = Client.DCC_OMNIVERSE_CREATE instance._version = version return instance @classmethod def Blender(cls, version: str = ANY_VERSION) -> 'Client': instance = Client() instance._name = Client.DCC_BLENDER instance._version = version return instance def __init__(self): super(Client, self).__init__() self._name: str = '' self._version: str = '' def __eq__(self, other: 'Client') -> bool: if not isinstance(other, Client): return False return other.name == self._name and other.version == self._version def is_compatible(self, other: 'Client') -> bool: if not isinstance(other, Client): return False if other == self: return True return other._version == Client.ANY_VERSION or self._version == Client.ANY_VERSION def serialize(self) -> dict: output = super(Client, self).serialize() output['_name'] = self._name output['_version'] = self._version return output def deserialize(self, data: dict) -> None: super(Client, self).deserialize(data=data) self._name = data['_name'] if '_name' in data.keys() else '' self._version = data['_version'] if '_version' in data.keys() else '' @property def name(self) -> str: return self._name @name.setter def name(self, value: str) -> None: self._name = value @property def version(self) -> str: return self._version @version.setter def version(self, value: str) -> None: self._version = value class AssemblyMetadata(Serializable): CATEGORY_BASE = 'Base Materials' CATEGORY_CONNECTOR = 'Connector Materials' CATEGORIES = [ CATEGORY_BASE, CATEGORY_CONNECTOR, ] def __init__(self): super(AssemblyMetadata, self).__init__() self._category = '' self._name = '' self._keywords: typing.List[str] = [] self._supported_clients: typing.List[Client] = [] def serialize(self) -> dict: output = super(AssemblyMetadata, self).serialize() output['_category'] = self._category output['_name'] = self._name output['_keywords'] = self._keywords output['_supported_clients'] = [o.serialize() for o in self._supported_clients] return output def deserialize(self, data: dict) -> None: super(AssemblyMetadata, self).deserialize(data=data) self._category = data['_category'] if '_category' in data.keys() else '' self._name = data['_name'] if '_name' in data.keys() else '' self._keywords = data['_keywords'] if '_keywords' in data.keys() else '' items = [] if '_supported_clients' in data.keys(): for o in data['_supported_clients']: item = Client() item.deserialize(data=o) items.append(item) self._supported_clients = items @property def category(self) -> str: return self._category @category.setter def category(self, value: str) -> None: self._category = value @property def name(self) -> str: return self._name @name.setter def name(self, value: str) -> None: self._name = value @property def keywords(self) -> typing.List[str]: return self._keywords @keywords.setter def keywords(self, value: typing.List[str]) -> None: self._keywords = value @property def supported_clients(self) -> typing.List[Client]: return self._supported_clients class Target(GraphEntity): def __init__(self): super(Target, self).__init__() self._nodes: typing.List[Node] = [] self._metadata: AssemblyMetadata = AssemblyMetadata() self._root_node_id: str = '' self._root_node: Node = None self._revision: int = 0 self._store_id: str = '' self._connections: typing.List[Connection] = [] def serialize(self) -> dict: output = super(Target, self).serialize() output['_nodes'] = [node.serialize() for node in self.nodes] output['_metadata'] = self._metadata.serialize() output['_root_node_id'] = self._root_node_id output['_revision'] = self._revision output['_connections'] = [o.serialize() for o in self._connections] return output def deserialize(self, data: dict) -> None: super(Target, self).deserialize(data=data) self._root_node_id = data['_root_node_id'] if '_root_node_id' in data.keys() else '' nodes = [] if '_nodes' in data.keys(): for o in data['_nodes']: node = Node() node.deserialize(data=o) nodes.append(node) self._nodes = nodes root_node = None if self._root_node_id: for node in self._nodes: if node.id == self._root_node_id: root_node = node break self._root_node = root_node metadata = AssemblyMetadata() if '_metadata' in data.keys(): metadata.deserialize(data=data['_metadata']) self._metadata = metadata self._revision = data['_revision'] if '_revision' in data.keys() else 0 items = [] if '_connections' in data.keys(): for o in data['_connections']: item = Connection() item.deserialize(data=o) items.append(item) self._connections = items for connection in self._connections: input_plug: Plug = None output_plug: Plug = None for node in self._nodes: for plug in node.inputs: if connection.source_id == plug.id: input_plug = plug elif connection.destination_id == plug.id: input_plug = plug for plug in node.outputs: if connection.source_id == plug.id: output_plug = plug elif connection.destination_id == plug.id: output_plug = plug if input_plug is not None and output_plug is not None: break if input_plug is None or output_plug is None: continue if output_plug not in input_plug.outputs: input_plug.outputs.append(output_plug) output_plug.input = input_plug def connect(self, source: Plug, destination: Plug) -> None: for connection in self._connections: if connection.source_id == source.id and connection.destination_id == destination.id: return connection = Connection() connection._source_id = source.id connection._destination_id = destination.id self._connections.append(connection) if destination not in source.outputs: source.outputs.append(destination) destination.input = source @property def nodes(self) -> typing.List[Node]: return self._nodes @property def metadata(self) -> AssemblyMetadata: return self._metadata @property def root_node(self) -> Node: return self._root_node @root_node.setter def root_node(self, value: Node) -> None: self._root_node = value self._root_node_id = self._root_node.id if self._root_node else '' @property def revision(self) -> int: return self._revision @revision.setter def revision(self, value: int) -> None: self._revision = value @property def store_id(self) -> str: return self._store_id @store_id.setter def store_id(self, value: int) -> None: if self._store_id is value: return notification = ChangeNotification( item=self, property_name='store_id', old_value=self._store_id, new_value=value ) self._store_id = value self._notify(notification=notification) class TargetInstance(GraphEntity): @classmethod def FromAssembly(cls, assembly: Target) -> 'TargetInstance': instance = cls() instance._target_id = assembly.id instance.target = assembly instance.display_name = assembly.display_name return instance def __init__(self): super(TargetInstance, self).__init__() self._target_id: str = '' self._target: typing.Union[Target, typing.NoReturn] = None self._is_setting_target = False def serialize(self) -> dict: super(TargetInstance, self).serialize() output = GraphEntity.serialize(self) output['_target_id'] = self._target_id output['_inputs'] = [] output['_outputs'] = [] return output def deserialize(self, data: dict) -> None: """ Does not invoke super on DagNode base class because inputs and outputs are derived from assembly instance. """ data['_inputs'] = [] data['_outputs'] = [] GraphEntity.deserialize(self, data=data) self._target_id = data['_target_id'] if '_target_id' in data.keys() else '' def invalidate(self, plug: 'Plug' = None): """ Invalidate any plug that is a destination of an output plug named plug.name. """ # If a destination is invalidated it is assumed compute will be invoked once a destination endpoint has been found do_compute = True output: Plug destination: Plug for output in self.outputs: if not plug or output.name == plug.name: for destination in output.outputs: destination.invalidate() do_compute = False if do_compute: self.compute() @property def target_id(self) -> str: return self._target_id @property def target(self) -> typing.Union[Target, typing.NoReturn]: return self._target @target.setter def target(self, value: typing.Union[Target, typing.NoReturn]) -> None: if self._target is value: return if not self._target_id and value: raise Exception('Target ID "" does not match assembly instance "{0}".'.format(value.id)) if self._target_id and not value: raise Exception('Target ID "{0}" does not match assembly instance "None".'.format(self._target_id)) if self._target_id and value and not self._target_id == value.id: raise Exception('Target ID "{0}" does not match assembly instance "{1}".'.format(self._target_id, value.id)) self._is_setting_target = True notification = ChangeNotification( item=self, property_name='target', old_value=self._target, new_value=value ) self._target = value self._inputs = [] self._outputs = [] if self._target: node_id_plug = Plug.Create( parent=self, name='node_id_output', display_name='Node Id', value_type=Plug.VALUE_TYPE_STRING ) node_id_plug._id = self._target.id node_id_plug.value = self._target.id self._outputs.append(node_id_plug) for node in self._target.nodes: for o in node.inputs: plug = Plug(parent=self) plug.deserialize(data=o.serialize()) self._inputs.append(plug) for o in node.outputs: plug = Plug(parent=self) plug.deserialize(data=o.serialize()) self._outputs.append(plug) self._is_setting_target = False self._notify(notification=notification) self.invalidate() class Operator(Base): def __init__( self, id: str, name: str, required_inputs: int, min_inputs: int, max_inputs: int, num_outputs: int, ): super(Operator, self).__init__() self._id = id self._name: str = name self._required_inputs: int = required_inputs self._min_inputs: int = min_inputs self._max_inputs: int = max_inputs self._num_outputs: int = num_outputs self._computing: bool = False def compute(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): """ Base class only computes input_plugs. It is assumed that extending class computes output plugs. """ if self._computing: return self._computing = True if len(input_plugs) < self._required_inputs: raise Exception('Array of inputs not of required length "{0}". Actual length "{1}". Operator: "{2}"'.format(self._required_inputs, len(input_plugs), self.__class__.__name__)) for plug in input_plugs: if plug.input: if plug.input in input_plugs: print('WARNING: Universal Material Map: Invalid state in compute graph. Compute cancelled.') print('\tInput {0}.{1} is dependent on another input on the same node.'.format(plug.parent.display_name, plug.name)) print('\tDependency: {0}.{1}'.format(plug.input.parent.display_name, plug.input.name)) print('\tThis is not supported.') print('\tComputations likely to not behave as expected. It is recommended you restart the solution using this data.') self._computing = False return if plug.input in output_plugs: print('WARNING: Universal Material Map: Invalid state in compute graph. Compute cancelled.') print('\tInput {0}.{1} is dependent on another output on the same node.'.format( plug.parent.display_name, plug.name)) print('\tDependency: {0}.{1}'.format(plug.input.parent.display_name, plug.input.name)) print('\tThis is not supported.') print('\tComputations likely to not behave as expected. It is recommended you restart the solution using this data.') self._computing = False return for plug in output_plugs: if plug.input: if plug.input in output_plugs: print('WARNING: Universal Material Map: Invalid state in compute graph. Compute cancelled.') print('\tInput {0}.{1} is dependent on another output on the same node.'.format( plug.parent.display_name, plug.name)) print('\tDependency: {0}.{1}'.format(plug.input.parent.display_name, plug.input.name)) print('\tThis is not supported.') print('\tComputations likely to not behave as expected. It is recommended you restart the solution using this data.') self._computing = False return self._compute_inputs(input_plugs=input_plugs) self._compute_outputs(input_plugs=input_plugs, output_plugs=output_plugs) self._computing = False def _compute_inputs(self, input_plugs: typing.List[Plug]): # Compute dependencies for plug in input_plugs: if not plug.input: continue if not plug.input.parent: continue if not plug.input.is_invalid: continue plug.input.parent.compute() # Set computed_value for plug in input_plugs: if plug.input: plug.computed_value = plug.input.computed_value else: plug.computed_value = plug.value def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): raise NotImplementedError(self.__class__) def generate_input(self, parent: 'DagNode', index: int) -> Plug: """ Base class provides method template but does nothing. """ pass def generate_output(self, parent: 'DagNode', index: int) -> Plug: """ Base class provides method template but does nothing. """ pass def test(self) -> None: parent = OperatorInstance() inputs = [] while len(inputs) < self.min_inputs: inputs.append( self.generate_input(parent=parent, index=len(inputs)) ) outputs = [] while len(outputs) < self.num_outputs: outputs.append( self.generate_output(parent=parent, index=len(outputs)) ) self._prepare_plugs_for_test(input_plugs=inputs, output_plugs=outputs) self._perform_test(input_plugs=inputs, output_plugs=outputs) self._assert_test(input_plugs=inputs, output_plugs=outputs) def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): pass def _perform_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): self.compute(input_plugs=input_plugs, output_plugs=output_plugs) def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): raise NotImplementedError() def remove_plug(self, operator_instance: 'OperatorInstance', plug: 'Plug') -> None: if not plug.is_removable: raise Exception('Plug is not removable') notifications = [] if plug in operator_instance._inputs: old_value = operator_instance._inputs[:] operator_instance._inputs.remove(plug) operator_instance._unsubscribe(notifying=plug) notifications.append( ChangeNotification( item=operator_instance, property_name='inputs', old_value=old_value, new_value=operator_instance._inputs[:] ) ) if plug in operator_instance._outputs: old_value = operator_instance._outputs[:] operator_instance._outputs.remove(plug) operator_instance._unsubscribe(notifying=plug) notifications.append( ChangeNotification( item=operator_instance, property_name='outputs', old_value=old_value, new_value=operator_instance._outputs[:] ) ) destination: Plug for destination in plug.outputs: destination.input = None for notification in notifications: for callback in operator_instance._changed_callbacks.values(): callback(notification) @property def name(self) -> str: return self._name @property def min_inputs(self) -> int: return self._min_inputs @property def max_inputs(self) -> int: return self._max_inputs @property def required_inputs(self) -> int: return self._required_inputs @property def num_outputs(self) -> int: return self._num_outputs class GraphOutput(Operator): """ Output resolves to a node id. """ def __init__(self): super(GraphOutput, self).__init__( id='5f39ab48-5bee-46fe-9a22-0f678013568e', name='Graph Output', required_inputs=1, min_inputs=1, max_inputs=1, num_outputs=1 ) def generate_input(self, parent: DagNode, index: int) -> Plug: if index == 0: return Plug.Create(parent=parent, name='input_node_id', display_name='Node Id', value_type=Plug.VALUE_TYPE_NODE_ID) raise Exception('Input index "{0}" not supported.'.format(index)) def generate_output(self, parent: DagNode, index: int) -> Plug: if index == 0: return Plug.Create(parent=parent, name='output_node_id', display_name='Node Id', value_type=Plug.VALUE_TYPE_NODE_ID) raise Exception('Output index "{0}" not supported.'.format(index)) def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): output_plugs[0].computed_value = input_plugs[0].computed_value def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): input_plugs[0].computed_value = self.id def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): for output in output_plugs: if not output.computed_value == self.id: raise Exception('Test failed.') class OperatorInstance(GraphEntity): @classmethod def FromOperator(cls, operator: Operator) -> 'OperatorInstance': instance = OperatorInstance() instance._is_deserializing = True instance._operator = operator instance._display_name = operator.name while len(instance._inputs) < operator.min_inputs: instance._inputs.append( operator.generate_input(parent=instance, index=len(instance._inputs)) ) while len(instance._outputs) < operator.num_outputs: instance._outputs.append( operator.generate_output(parent=instance, index=len(instance._outputs)) ) instance._operator_module = operator.__class__.__module__ instance._operator_class_name = operator.__class__.__name__ instance._is_deserializing = False instance.invalidate() return instance def __init__(self): super(OperatorInstance, self).__init__() self._description: str = '' self._operator_module: str = '' self._operator_class_name: str = '' self._operator: Operator = None self._is_deserializing = False def serialize(self) -> dict: output = super(OperatorInstance, self).serialize() output['_description'] = self._description output['_operator_module'] = self._operator_module output['_operator_class_name'] = self._operator_class_name return output def deserialize(self, data: dict) -> None: self._is_deserializing = True super(OperatorInstance, self).deserialize(data=data) self._description = data['_description'] if '_description' in data.keys() else '' self._operator_module = data['_operator_module'] if '_operator_module' in data.keys() else '' self._operator_class_name = data['_operator_class_name'] if '_operator_class_name' in data.keys() else '' if not self._operator_module: raise Exception('Unexpected data: no valid "operator module" defined') if not self._operator_class_name: raise Exception('Unexpected data: no valid "operator class name" defined') if self._operator_module not in sys.modules.keys(): importlib.import_module(self._operator_module) module_pointer = sys.modules[self._operator_module] class_pointer = module_pointer.__dict__[self._operator_class_name] self._operator = typing.cast(Operator, class_pointer()) notifying = [] while len(self._inputs) < self._operator.min_inputs: plug = self._operator.generate_input(parent=self, index=len(self._inputs)) self._inputs.append(plug) notifying.append(plug) while len(self._outputs) < self._operator.num_outputs: plug = self._operator.generate_output(parent=self, index=len(self._outputs)) self._outputs.append(plug) notifying.append(plug) self._is_deserializing = False for o in notifying: self._subscribe(notifying=o) self.invalidate() def invalidate(self, plug: 'Plug' = None): """ Because one plug changed we assume any connected plug to any output needs to be invalidated. """ if self._is_deserializing: return # Set all outputs to invalid output: Plug for output in self.outputs: output._is_invalid = True # If a destination is invalidated it is assumed compute will be invoked once a destination endpoint has been found do_compute = True destination: Plug for output in self.outputs: for destination in output.outputs: destination.invalidate() do_compute = False if do_compute: self.compute() def compute(self) -> None: if self._operator: self._operator.compute(input_plugs=self._inputs, output_plugs=self._outputs) def add_input(self) -> Plug: if not self.can_add_input: raise Exception('Cannot add another input.') old_value = self._inputs[:] plug = self._operator.generate_input(parent=self, index=len(self._inputs)) self._inputs.append(plug) self._subscribe(notifying=plug) notification = ChangeNotification( item=self, property_name='inputs', old_value=old_value, new_value=self._inputs[:] ) self._notify(notification=notification) for o in self.outputs: o.invalidate() return plug def remove_plug(self, plug: 'Plug') -> None: self._operator.remove_plug(operator_instance=self, plug=plug) @property def operator(self) -> Operator: return self._operator @property def description(self) -> str: return self._description @description.setter def description(self, value: str) -> None: if self._description is value: return notification = ChangeNotification( item=self, property_name='description', old_value=self._description, new_value=value ) self._description = value self._notify(notification=notification) @DagNode.can_add_input.getter def can_add_input(self) -> bool: if self._operator.max_inputs == -1: return True return len(self._inputs) < self._operator.max_inputs - 1 class StyleInfo(object): def __init__( self, name: str, background_color: int, border_color: int, connection_color: int, node_background_color: int, footer_icon_filename: str, ): super(StyleInfo, self).__init__() self._name: str = name self._background_color: int = background_color self._border_color: int = border_color self._connection_color: int = connection_color self._node_background_color: int = node_background_color self._footer_icon_filename: str = footer_icon_filename @property def name(self) -> str: return self._name @property def background_color(self) -> int: return self._background_color @property def border_color(self) -> int: return self._border_color @property def connection_color(self) -> int: return self._connection_color @property def node_background_color(self) -> int: return self._node_background_color @property def footer_icon_filename(self) -> str: return self._footer_icon_filename class ConversionGraph(Base): # STYLE_OUTPUT: StyleInfo = StyleInfo( # name='output', # background_color=0xFF2E2E2E, # border_color=0xFFB97E9C, # connection_color=0xFF80C26F, # node_background_color=0xFF444444, # footer_icon_filename='Material.svg' # ) STYLE_SOURCE_NODE: StyleInfo = StyleInfo( name='source_node', background_color=0xFF2E2E2E, border_color=0xFFE5AAC8, connection_color=0xFF80C26F, node_background_color=0xFF444444, footer_icon_filename='Material.svg' ) STYLE_ASSEMBLY_REFERENCE: StyleInfo = StyleInfo( name='assembly_reference', background_color=0xFF2E2E2E, border_color=0xFFB97E9C, connection_color=0xFF80C26F, node_background_color=0xFF444444, footer_icon_filename='Material.svg' ) STYLE_OPERATOR_INSTANCE: StyleInfo = StyleInfo( name='operator_instance', background_color=0xFF34302A, border_color=0xFFCD923A, connection_color=0xFF80C26F, node_background_color=0xFF444444, footer_icon_filename='constant_color.svg' ) STYLE_VALUE_RESOLVER: StyleInfo = StyleInfo( name='value_resolver', background_color=0xFF34302A, border_color=0xFFCD923A, connection_color=0xFF80C26F, node_background_color=0xFF444444, footer_icon_filename='value_resolver.svg' ) STYLE_BOOLEAN_SWITCH: StyleInfo = StyleInfo( name='boolean_switch', background_color=0xFF34302A, border_color=0xFFCD923A, connection_color=0xFF80C26F, node_background_color=0xFF444444, footer_icon_filename='boolean_switch.svg' ) STYLE_CONSTANT_BOOLEAN: StyleInfo = StyleInfo( name='constant_boolean', background_color=0xFF34302A, border_color=0xFFCD923A, connection_color=0xFF80C26F, node_background_color=0xFF444444, footer_icon_filename='constant_boolean.svg' ) STYLE_CONSTANT_COLOR: StyleInfo = StyleInfo( name='constant_color', background_color=0xFF34302A, border_color=0xFFCD923A, connection_color=0xFF80C26F, node_background_color=0xFF444444, footer_icon_filename='constant_color.svg' ) STYLE_CONSTANT_FLOAT: StyleInfo = StyleInfo( name='constant_float', background_color=0xFF34302A, border_color=0xFFCD923A, connection_color=0xFF80C26F, node_background_color=0xFF444444, footer_icon_filename='constant_float.svg' ) STYLE_CONSTANT_INTEGER: StyleInfo = StyleInfo( name='constant_integer', background_color=0xFF34302A, border_color=0xFFCD923A, connection_color=0xFF80C26F, node_background_color=0xFF444444, footer_icon_filename='constant_integer.svg' ) STYLE_CONSTANT_STRING: StyleInfo = StyleInfo( name='constant_string', background_color=0xFF34302A, border_color=0xFFCD923A, connection_color=0xFF80C26F, node_background_color=0xFF444444, footer_icon_filename='constant_string.svg' ) STYLE_EQUAL: StyleInfo = StyleInfo( name='equal', background_color=0xFF34302A, border_color=0xFFCD923A, connection_color=0xFF80C26F, node_background_color=0xFF444444, footer_icon_filename='equal.svg' ) STYLE_GREATER_THAN: StyleInfo = StyleInfo( name='greater_than', background_color=0xFF34302A, border_color=0xFFCD923A, connection_color=0xFF80C26F, node_background_color=0xFF444444, footer_icon_filename='greater_than.svg' ) STYLE_LESS_THAN: StyleInfo = StyleInfo( name='less_than', background_color=0xFF34302A, border_color=0xFFCD923A, connection_color=0xFF80C26F, node_background_color=0xFF444444, footer_icon_filename='less_than.svg' ) STYLE_MERGE_RGB: StyleInfo = StyleInfo( name='merge_rgb', background_color=0xFF34302A, border_color=0xFFCD923A, connection_color=0xFF80C26F, node_background_color=0xFF444444, footer_icon_filename='merge_rgb.svg' ) STYLE_NOT: StyleInfo = StyleInfo( name='not', background_color=0xFF34302A, border_color=0xFFCD923A, connection_color=0xFF80C26F, node_background_color=0xFF444444, footer_icon_filename='not.svg' ) STYLE_OR: StyleInfo = StyleInfo( name='or', background_color=0xFF34302A, border_color=0xFFCD923A, connection_color=0xFF80C26F, node_background_color=0xFF444444, footer_icon_filename='or.svg' ) STYLE_SPLIT_RGB: StyleInfo = StyleInfo( name='split_rgb', background_color=0xFF34302A, border_color=0xFFCD923A, connection_color=0xFF80C26F, node_background_color=0xFF444444, footer_icon_filename='split_rgb.svg' ) STYLE_TRANSPARENCY_RESOLVER: StyleInfo = StyleInfo( name='transparency_resolver', background_color=0xFF34302A, border_color=0xFFCD923A, connection_color=0xFF80C26F, node_background_color=0xFF444444, footer_icon_filename='transparency_resolver.svg' ) STYLE_OUTPUT: StyleInfo = StyleInfo( name='output', background_color=0xFF34302A, border_color=0xFFCD923A, connection_color=0xFF80C26F, node_background_color=0xFF444444, footer_icon_filename='output.svg' ) STYLE_INFOS = ( STYLE_OUTPUT, STYLE_SOURCE_NODE, STYLE_ASSEMBLY_REFERENCE, STYLE_OPERATOR_INSTANCE, STYLE_VALUE_RESOLVER, STYLE_BOOLEAN_SWITCH, STYLE_CONSTANT_BOOLEAN, STYLE_CONSTANT_COLOR, STYLE_CONSTANT_FLOAT, STYLE_CONSTANT_INTEGER, STYLE_CONSTANT_STRING, STYLE_EQUAL, STYLE_GREATER_THAN, STYLE_LESS_THAN, STYLE_NOT, STYLE_OR, STYLE_SPLIT_RGB, STYLE_TRANSPARENCY_RESOLVER, STYLE_MERGE_RGB, ) def __init__(self): super(ConversionGraph, self).__init__() self._graph_output: OperatorInstance = OperatorInstance.FromOperator(operator=GraphOutput()) self._target_instances: typing.List[TargetInstance] = [] self._operator_instances: typing.List[OperatorInstance] = [self._graph_output] self._connections: typing.List[Connection] = [] self._library: Library = None self._source_node_id: str = '' self._source_node: TargetInstance = None self._filename: str = '' self._exists_on_disk: bool = False self._revision: int = 0 def _on_notification(self, notification: ChangeNotification) -> None: if notification.item == self: return # Re-broadcast notification self._notify(notification=notification) def serialize(self) -> dict: output = super(ConversionGraph, self).serialize() output['_target_instances'] = [o.serialize() for o in self._target_instances] output['_operator_instances'] = [o.serialize() for o in self._operator_instances] output['_connections'] = [o.serialize() for o in self._connections] output['_source_node_id'] = self._source_node_id output['_revision'] = self._revision return output def deserialize(self, data: dict) -> None: super(ConversionGraph, self).deserialize(data=data) notifications = [] # _source_node_id old = self._source_node_id new = data['_source_node_id'] if '_source_node_id' in data.keys() else '' if not old == new: self._source_node_id = new notifications.append( ChangeNotification( item=self, property_name='source_node_id', old_value=old, new_value=new ) ) # _revision old = self._revision new = data['_revision'] if '_revision' in data.keys() else 0 if not old == new: self._revision = new notifications.append( ChangeNotification( item=self, property_name='revision', old_value=old, new_value=new ) ) # _target_instances old = self._target_instances[:] while len(self._target_instances): self._unsubscribe(notifying=self._target_instances.pop()) items = [] if '_target_instances' in data.keys(): for o in data['_target_instances']: item = TargetInstance() item.deserialize(data=o) items.append(item) self._target_instances = items if not self._target_instances == old: notifications.append( ChangeNotification( item=self, property_name='target_instances', old_value=old, new_value=self._target_instances ) ) # _source_node old = self._source_node source_node = None if self._source_node_id: items = [o for o in self._target_instances if o.id == self._source_node_id] source_node = items[0] if len(items) else None self._source_node = source_node if not self._source_node == old: notifications.append( ChangeNotification( item=self, property_name='source_node', old_value=old, new_value=self._source_node ) ) # _operator_instances # _graph_output old_operator_instances = self._operator_instances old_graph_output = self._graph_output items = [] self._graph_output = None if '_operator_instances' in data.keys(): for o in data['_operator_instances']: item = OperatorInstance() item.deserialize(data=o) items.append(item) if isinstance(item.operator, GraphOutput): self._graph_output = item if not self._graph_output: self._graph_output = OperatorInstance.FromOperator(operator=GraphOutput()) items.insert(0, self._graph_output) self._operator_instances = items if not self._operator_instances == old_operator_instances: notifications.append( ChangeNotification( item=self, property_name='operator_instances', old_value=old_operator_instances, new_value=self._operator_instances ) ) if not self._graph_output == old_graph_output: notifications.append( ChangeNotification( item=self, property_name='old_graph_output', old_value=old_operator_instances, new_value=self._graph_output ) ) items = [] if '_connections' in data.keys(): for o in data['_connections']: item = Connection() item.deserialize(data=o) items.append(item) self._connections = items for o in self._target_instances: self._subscribe(notifying=o) for o in self._operator_instances: self._subscribe(notifying=o) for o in notifications: self._notify(notification=o) def build_dag(self) -> None: for connection in self._connections: source = self._get_plug(plug_id=connection.source_id) destination = self._get_plug(plug_id=connection.destination_id) if not source or not destination: continue if destination not in source.outputs: source.outputs.append(destination) destination.input = source def _get_plug(self, plug_id: str) -> typing.Union[Plug, typing.NoReturn]: for assembly_reference in self._target_instances: for plug in assembly_reference.inputs: if plug.id == plug_id: return plug for plug in assembly_reference.outputs: if plug.id == plug_id: return plug for operator_instance in self._operator_instances: for plug in operator_instance.outputs: if plug.id == plug_id: return plug for plug in operator_instance.inputs: if plug.id == plug_id: return plug return None def add_node(self, node: OperatorInstance) -> None: self._operator_instances.append(node) def add_connection(self, source: Plug, destination: Plug) -> None: connection = Connection() connection._source_id = source.id connection._destination_id = destination.id self._connections.append(connection) if destination not in source.outputs: source.outputs.append(destination) destination.input = source def add(self, entity: GraphEntity) -> None: if isinstance(entity, TargetInstance): if entity in self._target_instances: return self._target_instances.append(entity) self._subscribe(notifying=entity) return if isinstance(entity, OperatorInstance): if entity in self._operator_instances: return self._operator_instances.append(entity) self._subscribe(notifying=entity) return raise NotImplementedError() def can_be_removed(self, entity: GraphEntity) -> bool: if not entity: return False if entity not in self._target_instances and entity not in self._operator_instances: return False if entity == self._graph_output: return False return True def remove(self, entity: GraphEntity) -> None: if not self.can_be_removed(entity=entity): raise Exception('Not allowed: entity is not allowed to be deleted.') if isinstance(entity, TargetInstance): if entity in self._target_instances: self._unsubscribe(notifying=entity) self._target_instances.remove(entity) to_remove = [] for connection in self._connections: if connection.source_id == entity.id or connection.destination_id == entity.id: to_remove.append(connection) for connection in to_remove: self.remove_connection(connection=connection) return if isinstance(entity, OperatorInstance): if entity in self._operator_instances: self._unsubscribe(notifying=entity) self._operator_instances.remove(entity) to_remove = [] for connection in self._connections: if connection.source_id == entity.id or connection.destination_id == entity.id: to_remove.append(connection) for connection in to_remove: self.remove_connection(connection=connection) return raise NotImplementedError() def remove_connection(self, connection: Connection) -> None: if connection in self._connections: self._connections.remove(connection) source = self._get_plug(plug_id=connection.source_id) destination = self._get_plug(plug_id=connection.destination_id) if source and destination: if destination in source.outputs: source.outputs.remove(destination) if destination.input == source: destination.input = None def get_entity_by_id(self, identifier: str) -> typing.Union[GraphEntity, typing.NoReturn]: entities = [entity for entity in self._target_instances if entity.id == identifier] if len(entities): return entities[0] entities = [entity for entity in self._operator_instances if entity.id == identifier] if len(entities): return entities[0] return None def get_output_entity(self) -> typing.Union[TargetInstance, typing.NoReturn]: """ Computes the dependency graph and returns the resulting Target reference. Make sure relevant source node plug values have been set prior to invoking this method. """ if not self._graph_output: return None self._graph_output.invalidate() assembly_id = self._graph_output.outputs[0].computed_value for item in self._target_instances: if item.target_id == assembly_id: return item return None def get_object_style_name(self, entity: GraphEntity) -> str: if not entity: return '' # TODO: Style computed output entity # if entity == self.get_output_entity(): # return ConversionGraph.STYLE_OUTPUT.name if entity == self.source_node: return ConversionGraph.STYLE_SOURCE_NODE.name if isinstance(entity, TargetInstance): return ConversionGraph.STYLE_ASSEMBLY_REFERENCE.name if isinstance(entity, OperatorInstance): if entity.operator: if entity.operator.__class__.__name__ == 'ConstantBoolean': return ConversionGraph.STYLE_CONSTANT_BOOLEAN.name if entity.operator.__class__.__name__ == 'ConstantColor': return ConversionGraph.STYLE_CONSTANT_COLOR.name if entity.operator.__class__.__name__ == 'ConstantFloat': return ConversionGraph.STYLE_CONSTANT_FLOAT.name if entity.operator.__class__.__name__ == 'ConstantInteger': return ConversionGraph.STYLE_CONSTANT_INTEGER.name if entity.operator.__class__.__name__ == 'ConstantString': return ConversionGraph.STYLE_CONSTANT_STRING.name if entity.operator.__class__.__name__ == 'BooleanSwitch': return ConversionGraph.STYLE_BOOLEAN_SWITCH.name if entity.operator.__class__.__name__ == 'ValueResolver': return ConversionGraph.STYLE_VALUE_RESOLVER.name if entity.operator.__class__.__name__ == 'SplitRGB': return ConversionGraph.STYLE_SPLIT_RGB.name if entity.operator.__class__.__name__ == 'MergeRGB': return ConversionGraph.STYLE_MERGE_RGB.name if entity.operator.__class__.__name__ == 'LessThan': return ConversionGraph.STYLE_LESS_THAN.name if entity.operator.__class__.__name__ == 'GreaterThan': return ConversionGraph.STYLE_GREATER_THAN.name if entity.operator.__class__.__name__ == 'Or': return ConversionGraph.STYLE_OR.name if entity.operator.__class__.__name__ == 'Equal': return ConversionGraph.STYLE_EQUAL.name if entity.operator.__class__.__name__ == 'Not': return ConversionGraph.STYLE_NOT.name if entity.operator.__class__.__name__ == 'MayaTransparencyResolver': return ConversionGraph.STYLE_TRANSPARENCY_RESOLVER.name if entity.operator.__class__.__name__ == 'GraphOutput': return ConversionGraph.STYLE_OUTPUT.name return ConversionGraph.STYLE_OPERATOR_INSTANCE.name return '' def get_output_targets(self) -> typing.List[TargetInstance]: return [o for o in self._target_instances if not o == self._source_node] @property def target_instances(self) -> typing.List[TargetInstance]: return self._target_instances[:] @property def operator_instances(self) -> typing.List[OperatorInstance]: return self._operator_instances[:] @property def connections(self) -> typing.List[Connection]: return self._connections[:] @property def filename(self) -> str: return self._filename @filename.setter def filename(self, value: str) -> None: if self._filename is value: return notification = ChangeNotification( item=self, property_name='filename', old_value=self._filename, new_value=value ) self._filename = value self._notify(notification=notification) @property def library(self) -> 'Library': return self._library @property def graph_output(self) -> OperatorInstance: return self._graph_output @property def source_node(self) -> TargetInstance: return self._source_node @source_node.setter def source_node(self, value: TargetInstance) -> None: if self._source_node is value: return node_notification = ChangeNotification( item=self, property_name='source_node', old_value=self._source_node, new_value=value ) node_id_notification = ChangeNotification( item=self, property_name='source_node_id', old_value=self._source_node_id, new_value=value.id if value else '' ) self._source_node = value self._source_node_id = self._source_node.id if self._source_node else '' self._notify(notification=node_notification) self._notify(notification=node_id_notification) @property def exists_on_disk(self) -> bool: return self._exists_on_disk @property def revision(self) -> int: return self._revision @revision.setter def revision(self, value: int) -> None: if self._revision is value: return notification = ChangeNotification( item=self, property_name='revision', old_value=self._revision, new_value=value ) self._revision = value self._notify(notification=notification) class FileHeader(Serializable): @classmethod def FromInstance(cls, instance: Serializable) -> 'FileHeader': header = cls() header._module = instance.__class__.__module__ header._class_name = instance.__class__.__name__ return header @classmethod def FromData(cls, data: dict) -> 'FileHeader': if '_module' not in data.keys(): raise Exception('Unexpected data: key "_module" not in dictionary') if '_class_name' not in data.keys(): raise Exception('Unexpected data: key "_class_name" not in dictionary') header = cls() header._module = data['_module'] header._class_name = data['_class_name'] return header def __init__(self): super(FileHeader, self).__init__() self._module = '' self._class_name = '' def serialize(self) -> dict: output = dict() output['_module'] = self._module output['_class_name'] = self._class_name return output @property def module(self) -> str: return self._module @property def class_name(self) -> str: return self._class_name class FileUtility(Serializable): @classmethod def FromInstance(cls, instance: Serializable) -> 'FileUtility': utility = cls() utility._header = FileHeader.FromInstance(instance=instance) utility._content = instance return utility @classmethod def FromData(cls, data: dict) -> 'FileUtility': if '_header' not in data.keys(): raise Exception('Unexpected data: key "_header" not in dictionary') if '_content' not in data.keys(): raise Exception('Unexpected data: key "_content" not in dictionary') utility = cls() utility._header = FileHeader.FromData(data=data['_header']) if utility._header.module not in sys.modules.keys(): importlib.import_module(utility._header.module) module_pointer = sys.modules[utility._header.module] class_pointer = module_pointer.__dict__[utility._header.class_name] utility._content = class_pointer() if isinstance(utility._content, Serializable): utility._content.deserialize(data=data['_content']) return utility def __init__(self): super(FileUtility, self).__init__() self._header: FileHeader = None self._content: Serializable = None def serialize(self) -> dict: output = dict() output['_header'] = self._header.serialize() output['_content'] = self._content.serialize() return output def assert_content_serializable(self): data = self.content.serialize() self._assert(data=data) def _assert(self, data: dict): for key, value in data.items(): if isinstance(value, dict): self._assert(data=value) elif isinstance(value, list): for item in value: if isinstance(item, dict): self._assert(data=item) else: print(item) else: print(key, value) @property def header(self) -> FileHeader: return self._header @property def content(self) -> Serializable: return self._content class Library(Base): """ A Library represents a UMM data set. It can contain any of the following types of files: - Settings - Conversion Graph - Target - Conversion Manifest A Library is divided into a "core" and a "user" data set. "core": - Files provided by NVIDIA. - Installed and updated by UMM. - Adding, editing, and deleting files require running in "Developer Mode". - Types: - Conversion Graph - Target - Conversion Manifest "user" - Files created and updated by user. - Types: - Conversion Graph - Target - Conversion Manifest Overrides ./core/Conversion Manifest ...or... each file header has an attribute: source = core, source = user if source == core then it is read-only to users. TARGET: problem with that is what if user needs to update an existing target? ...why would they? ...because they may want to edit property states in the Target... would want their own. CONVERSION GRAPH ...they could just Save As and make a different one. no problem here. do need to change the 'source' attribute to 'user' though. CONVERSION MANIFEST 2 files ConversionManifest.json ConversionManifest_user.json (overrides ConversionManifest.json) Limitation: User cannot all together remove a manifest item """ @classmethod def Create( cls, library_id: str, name: str, manifest: IDelegate = None, conversion_graph: IDelegate = None, target: IDelegate = None, settings: IDelegate = None ) -> 'Library': instance = typing.cast(Library, super(Library, cls).Create()) instance._id = library_id instance._name = name instance._manifest = manifest instance._conversion_graph = conversion_graph instance._target = target instance._settings = settings return instance def __init__(self): super(Library, self).__init__() self._name: str = '' self._manifest: typing.Union[IDelegate, typing.NoReturn] = None self._conversion_graph: typing.Union[IDelegate, typing.NoReturn] = None self._target: typing.Union[IDelegate, typing.NoReturn] = None self._settings: typing.Union[IDelegate, typing.NoReturn] = None def serialize(self) -> dict: output = super(Library, self).serialize() output['_name'] = self._name return output def deserialize(self, data: dict) -> None: super(Library, self).deserialize(data=data) self._name = data['_name'] if '_name' in data.keys() else '' @property def name(self) -> str: return self._name @name.setter def name(self, value: str) -> None: self._name = value @property def manifest(self) -> typing.Union[IDelegate, typing.NoReturn]: return self._manifest @property def conversion_graph(self) -> typing.Union[IDelegate, typing.NoReturn]: return self._conversion_graph @property def target(self) -> typing.Union[IDelegate, typing.NoReturn]: return self._target @property def settings(self) -> typing.Union[IDelegate, typing.NoReturn]: return self._settings @property def is_read_only(self) -> bool: return not self._conversion_graph or not self._target or not self._conversion_graph class Settings(Serializable): def __init__(self): super(Settings, self).__init__() self._libraries: typing.List[Library] = [] self._store_id = 'Settings.json' self._render_contexts: typing.List[str] = [] def serialize(self) -> dict: output = super(Settings, self).serialize() output['_libraries'] = [o.serialize() for o in self._libraries] output['_render_contexts'] = self._render_contexts return output def deserialize(self, data: dict) -> None: super(Settings, self).deserialize(data=data) items = [] if '_libraries' in data.keys(): for o in data['_libraries']: item = Library() item.deserialize(data=o) items.append(item) self._libraries = items self._render_contexts = data['_render_contexts'] if '_render_contexts' in data.keys() else [] @property def libraries(self) -> typing.List[Library]: return self._libraries @property def store_id(self) -> str: return self._store_id @property def render_contexts(self) -> typing.List[str]: return self._render_contexts class ClassInfo(object): def __init__(self, display_name: str, class_name: str): super(ClassInfo, self).__init__() self._display_name = display_name self._class_name = class_name @property def display_name(self) -> str: return self._display_name @property def class_name(self) -> str: return self._class_name class OmniMDL(object): OMNI_GLASS: ClassInfo = ClassInfo(display_name='Omni Glass', class_name='OmniGlass.mdl|OmniGlass') OMNI_GLASS_OPACITY: ClassInfo = ClassInfo(display_name='Omni Glass Opacity', class_name='OmniGlass_Opacity.mdl|OmniGlass_Opacity') OMNI_PBR: ClassInfo = ClassInfo(display_name='Omni PBR', class_name='OmniPBR.mdl|OmniPBR') OMNI_PBR_CLEAR_COAT: ClassInfo = ClassInfo(display_name='Omni PBR Clear Coat', class_name='OmniPBR_ClearCoat.mdl|OmniPBR_ClearCoat') OMNI_PBR_CLEAR_COAT_OPACITY: ClassInfo = ClassInfo(display_name='Omni PBR Clear Coat Opacity', class_name='OmniPBR_ClearCoat_Opacity.mdl|OmniPBR_ClearCoat_Opacity') OMNI_PBR_OPACITY = ClassInfo(display_name='Omni PBR Opacity', class_name='OmniPBR_Opacity.mdl|OmniPBR_Opacity') OMNI_SURFACE: ClassInfo = ClassInfo(display_name='OmniSurface', class_name='OmniSurface.mdl|OmniSurface') OMNI_SURFACE_LITE: ClassInfo = ClassInfo(display_name='OmniSurfaceLite', class_name='OmniSurfaceLite.mdl|OmniSurfaceLite') OMNI_SURFACE_UBER: ClassInfo = ClassInfo(display_name='OmniSurfaceUber', class_name='OmniSurfaceUber.mdl|OmniSurfaceUber') class MayaShader(object): LAMBERT: ClassInfo = ClassInfo(display_name='Lambert', class_name='lambert') class ConversionMap(Serializable): @classmethod def Create( cls, render_context: str, application: str, document: ConversionGraph, ) -> 'ConversionMap': if not isinstance(document, ConversionGraph): raise Exception('Argument "document" unexpected class: "{0}"'.format(type(document))) instance = cls() instance._render_context = render_context instance._application = application instance._conversion_graph_id = document.id instance._conversion_graph = document return instance def __init__(self): super(ConversionMap, self).__init__() self._render_context: str = '' self._application: str = '' self._conversion_graph_id: str = '' self._conversion_graph: ConversionGraph = None def __eq__(self, other: 'ConversionMap') -> bool: if not isinstance(other, ConversionMap): return False if not self.render_context == other.render_context: return False if not self.application == other.application: return False if not self.conversion_graph_id == other.conversion_graph_id: return False return True def serialize(self) -> dict: output = super(ConversionMap, self).serialize() output['_render_context'] = self._render_context output['_application'] = self._application output['_conversion_graph_id'] = self._conversion_graph_id return output def deserialize(self, data: dict) -> None: super(ConversionMap, self).deserialize(data=data) self._render_context = data['_render_context'] if '_render_context' in data.keys() else '' self._application = data['_application'] if '_application' in data.keys() else '' self._conversion_graph_id = data['_conversion_graph_id'] if '_conversion_graph_id' in data.keys() else '' self._conversion_graph = None @property def render_context(self) -> str: return self._render_context @property def application(self) -> str: return self._application @property def conversion_graph_id(self) -> str: return self._conversion_graph_id @property def conversion_graph(self) -> ConversionGraph: return self._conversion_graph class ConversionManifest(Serializable): def __init__(self): super(ConversionManifest, self).__init__() self._version_major: int = 100 self._version_minor: int = 0 self._conversion_maps: typing.List[ConversionMap] = [] self._store_id = 'ConversionManifest.json' def serialize(self) -> dict: output = super(ConversionManifest, self).serialize() output['_version_major'] = self._version_major output['_version_minor'] = self._version_minor output['_conversion_maps'] = [o.serialize() for o in self._conversion_maps] return output def deserialize(self, data: dict) -> None: super(ConversionManifest, self).deserialize(data=data) self._version_major = data['_version_major'] if '_version_major' in data.keys() else 100 self._version_minor = data['_version_minor'] if '_version_minor' in data.keys() else 0 items = [] if '_conversion_maps' in data.keys(): for o in data['_conversion_maps']: item = ConversionMap() item.deserialize(data=o) items.append(item) self._conversion_maps = items def set_version(self, major: int = 100, minor: int = 0) -> None: self._version_major = major self._version_minor = minor def add( self, render_context: str, application: str, document: ConversionGraph, ) -> ConversionMap: item = ConversionMap.Create( render_context=render_context, application=application, document=document, ) self._conversion_maps.append(item) return item def remove(self, item: ConversionMap) -> None: if item in self._conversion_maps: self._conversion_maps.remove(item) @property def conversion_maps(self) -> typing.List[ConversionMap]: return self._conversion_maps[:] @property def version(self) -> str: return '{0}.{1}'.format(self._version_major, self._version_minor) @property def version_major(self) -> int: return self._version_major @property def version_minor(self) -> int: return self._version_minor @property def store_id(self) -> str: return self._store_id
100,965
Python
32.949563
187
0.58241
NVIDIA-Omniverse/blender_omniverse_addons/omni/universalmaterialmap/core/operator.py
# ##### BEGIN GPL LICENSE BLOCK ##### # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ##### END GPL LICENSE BLOCK ##### # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. import sys import typing from .data import Operator, Plug, DagNode, OperatorInstance from . import util class ConstantFloat(Operator): def __init__(self): super(ConstantFloat, self).__init__( id='293c38db-c9b3-4b37-ab02-c4ff6052bcb6', name='Constant Float', required_inputs=0, min_inputs=0, max_inputs=0, num_outputs=1 ) def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): output_plugs[0].computed_value = output_plugs[0].value if output_plugs[0].value else 0.0 def generate_input(self, parent: DagNode, index: int) -> Plug: raise Exception('Input index "{0}" not supported.'.format(index)) def generate_output(self, parent: DagNode, index: int) -> Plug: if index == 0: plug = Plug.Create( parent=parent, name='value', display_name='Float', value_type=Plug.VALUE_TYPE_FLOAT, editable=True ) plug.value = 0.0 return plug raise Exception('Output index "{0}" not supported.'.format(index)) def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): output_plugs[0].value = len(self.id) * 0.3 def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): for output in output_plugs: if not output.computed_value == len(self.id) * 0.3: raise Exception('Test failed.') class ConstantInteger(Operator): def __init__(self): super(ConstantInteger, self).__init__( id='293c38db-c9b3-4b37-ab02-c4ff6052bcb7', name='Constant Integer', required_inputs=0, min_inputs=0, max_inputs=0, num_outputs=1 ) def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): output_plugs[0].computed_value = output_plugs[0].value if output_plugs[0].value else 0 def generate_input(self, parent: DagNode, index: int) -> Plug: raise Exception('Input index "{0}" not supported.'.format(index)) def generate_output(self, parent: DagNode, index: int) -> Plug: if index == 0: plug = Plug.Create( parent=parent, name='value', display_name='Integer', value_type=Plug.VALUE_TYPE_INTEGER, editable=True ) plug.value = 0 return plug raise Exception('Output index "{0}" not supported.'.format(index)) def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): output_plugs[0].value = len(self.id) def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): for output in output_plugs: if not output.computed_value == len(self.id): raise Exception('Test failed.') class ConstantBoolean(Operator): def __init__(self): super(ConstantBoolean, self).__init__( id='293c38db-c9b3-4b37-ab02-c4ff6052bcb8', name='Constant Boolean', required_inputs=0, min_inputs=0, max_inputs=0, num_outputs=1 ) def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): output_plugs[0].computed_value = output_plugs[0].value if output_plugs[0].value else False def generate_input(self, parent: DagNode, index: int) -> Plug: raise Exception('Input index "{0}" not supported.'.format(index)) def generate_output(self, parent: DagNode, index: int) -> Plug: if index == 0: plug = Plug.Create( parent=parent, name='value', display_name='Boolean', value_type=Plug.VALUE_TYPE_BOOLEAN, editable=True ) plug.value = True return plug raise Exception('Output index "{0}" not supported.'.format(index)) def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): output_plugs[0].value = False def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): for output in output_plugs: if output.computed_value: raise Exception('Test failed.') class ConstantString(Operator): def __init__(self): super(ConstantString, self).__init__( id='cb169ec0-5ddb-45eb-98d1-5d09f1ca759g', name='Constant String', required_inputs=0, min_inputs=0, max_inputs=0, num_outputs=1 ) def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): output_plugs[0].computed_value = output_plugs[0].value if output_plugs[0].value else '' # print('ConstantString._compute_outputs(): output_plugs[0].computed_value', output_plugs[0].computed_value) def generate_input(self, parent: DagNode, index: int) -> Plug: raise Exception('Input index "{0}" not supported.'.format(index)) def generate_output(self, parent: DagNode, index: int) -> Plug: if index == 0: plug = Plug.Create( parent=parent, name='value', display_name='String', value_type=Plug.VALUE_TYPE_STRING, editable=True ) plug.value = '' plug.default_value = '' return plug raise Exception('Output index "{0}" not supported.'.format(index)) def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): output_plugs[0].value = self.id def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): for output in output_plugs: if not output.computed_value == self.id: raise Exception('Test failed.') class ConstantRGB(Operator): def __init__(self): super(ConstantRGB, self).__init__( id='60f21797-dd62-4b06-9721-53882aa42e81', name='Constant RGB', required_inputs=0, min_inputs=0, max_inputs=0, num_outputs=1 ) def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): output_plugs[0].computed_value = output_plugs[0].value if output_plugs[0].value else (0, 0, 0) def generate_input(self, parent: DagNode, index: int) -> Plug: raise Exception('Input index "{0}" not supported.'.format(index)) def generate_output(self, parent: DagNode, index: int) -> Plug: if index == 0: plug = Plug.Create( parent=parent, name='value', display_name='Color', value_type=Plug.VALUE_TYPE_VECTOR3, editable=True ) plug.value = (0, 0, 0) return plug raise Exception('Output index "{0}" not supported.'.format(index)) def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): output_plugs[0].value = (0.1, 0.2, 0.3) def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): for output in output_plugs: if not output.computed_value == (0.1, 0.2, 0.3): raise Exception('Test failed.') class ConstantRGBA(Operator): def __init__(self): super(ConstantRGBA, self).__init__( id='0ab39d82-5862-4332-af7a-329200ae1d14', name='Constant RGBA', required_inputs=0, min_inputs=0, max_inputs=0, num_outputs=1 ) def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): output_plugs[0].computed_value = output_plugs[0].value if output_plugs[0].value else (0, 0, 0, 0) def generate_input(self, parent: DagNode, index: int) -> Plug: raise Exception('Input index "{0}" not supported.'.format(index)) def generate_output(self, parent: DagNode, index: int) -> Plug: if index == 0: plug = Plug.Create( parent=parent, name='value', display_name='Color', value_type=Plug.VALUE_TYPE_VECTOR4, editable=True ) plug.value = (0, 0, 0, 1) return plug raise Exception('Output index "{0}" not supported.'.format(index)) def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): output_plugs[0].value = (0.1, 0.2, 0.3, 0.4) def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): for output in output_plugs: if not output.computed_value == (0.1, 0.2, 0.3, 0.4): raise Exception('Test failed.') class BooleanSwitch(Operator): """ Outputs the value of input 2 if input 1 is TRUE. Otherwise input 3 will be output. Input 1 must be a boolean. Input 2 and 3 can be of any value type. """ def __init__(self): super(BooleanSwitch, self).__init__( id='a628ab13-f19f-45b3-81cf-6824dd6e7b5d', name='Boolean Switch', required_inputs=3, min_inputs=3, max_inputs=3, num_outputs=1 ) def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): debug = False value = None if debug: print('BooleanSwitch') print('\tinput_plugs[0].input:', input_plugs[0].input) if input_plugs[0].input is not None: if debug: print('\tinput_plugs[0].input.computed_value:', input_plugs[0].input.computed_value) print('\tinput_plugs[1].input:', input_plugs[1].input) if input_plugs[1].input is not None: print('\tinput_plugs[1].input.computed_value:', input_plugs[1].input.computed_value) print('\tinput_plugs[2].input:', input_plugs[2].input) if input_plugs[2].input is not None: print('\tinput_plugs[2].input.computed_value:', input_plugs[2].input.computed_value) if input_plugs[0].input.computed_value: value = input_plugs[1].input.computed_value if input_plugs[1].input is not None else False else: value = input_plugs[2].input.computed_value if input_plugs[2].input is not None else False elif debug: print('\tskipping evaluating inputs') if debug: print('\tvalue:', value) print('\toutput_plugs[0].computed_value is value', output_plugs[0].computed_value is value) output_plugs[0].computed_value = value if value is not None else False def generate_input(self, parent: DagNode, index: int) -> Plug: if index == 0: plug = Plug.Create(parent=parent, name='input_boolean', display_name='Boolean', value_type=Plug.VALUE_TYPE_BOOLEAN) plug.value = False return plug if index == 1: return Plug.Create(parent=parent, name='on_true', display_name='True Output', value_type=Plug.VALUE_TYPE_ANY) if index == 2: return Plug.Create(parent=parent, name='on_false', display_name='False Output', value_type=Plug.VALUE_TYPE_ANY) raise Exception('Input index "{0}" not supported.'.format(index)) def generate_output(self, parent: DagNode, index: int) -> Plug: if index == 0: plug = Plug.Create(parent=parent, name='output', display_name='Output', value_type=Plug.VALUE_TYPE_ANY) plug.value = False return plug raise Exception('Output index "{0}" not supported.'.format(index)) def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): fake = OperatorInstance.FromOperator(operator=ConstantBoolean()) fake.outputs[0].value = True input_plugs[0].input = fake.outputs[0] fake = OperatorInstance.FromOperator(operator=ConstantString()) fake.outputs[0].value = 'Input 1 value' input_plugs[1].input = fake.outputs[0] fake = OperatorInstance.FromOperator(operator=ConstantString()) fake.outputs[0].value = 'Input 2 value' input_plugs[2].input = fake.outputs[0] def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): for output in output_plugs: if not output.computed_value == 'Input 1 value': raise Exception('Test failed.') class SplitRGB(Operator): def __init__(self): super(SplitRGB, self).__init__( id='1cbcf8c6-328c-49b6-b4fc-d16fd78d4868', name='Split RGB', required_inputs=1, min_inputs=1, max_inputs=1, num_outputs=3 ) def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): if input_plugs[0].input is None: output_plugs[0].computed_value = 0 output_plugs[1].computed_value = 0 output_plugs[2].computed_value = 0 else: value = input_plugs[0].input.computed_value try: test = iter(value) is_iterable = True except TypeError: is_iterable = False if is_iterable and len(value) == 3: output_plugs[0].computed_value = value[0] output_plugs[1].computed_value = value[1] output_plugs[2].computed_value = value[2] else: output_plugs[0].computed_value = output_plugs[0].default_value output_plugs[1].computed_value = output_plugs[1].default_value output_plugs[2].computed_value = output_plugs[2].default_value def generate_input(self, parent: DagNode, index: int) -> Plug: if index == 0: return Plug.Create(parent=parent, name='input_rgb', display_name='RGB', value_type=Plug.VALUE_TYPE_VECTOR3) raise Exception('Input index "{0}" not supported.'.format(index)) def generate_output(self, parent: DagNode, index: int) -> Plug: if index == 0: plug = Plug.Create( parent=parent, name='red', display_name='Red', value_type=Plug.VALUE_TYPE_FLOAT, editable=False ) plug.value = 0 return plug if index == 1: plug = Plug.Create( parent=parent, name='green', display_name='Green', value_type=Plug.VALUE_TYPE_FLOAT, editable=False ) plug.value = 0 return plug if index == 2: plug = Plug.Create( parent=parent, name='blue', display_name='Blue', value_type=Plug.VALUE_TYPE_FLOAT, editable=False ) plug.value = 0 return plug raise Exception('Output index "{0}" not supported.'.format(index)) def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): fake = OperatorInstance.FromOperator(operator=ConstantRGB()) fake.outputs[0].value = (0.1, 0.2, 0.3) input_plugs[0].input = fake.outputs[0] def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): if not output_plugs[0].computed_value == 0.1: raise Exception('Test failed.') if not output_plugs[1].computed_value == 0.2: raise Exception('Test failed.') if not output_plugs[2].computed_value == 0.3: raise Exception('Test failed.') class MergeRGB(Operator): def __init__(self): super(MergeRGB, self).__init__( id='1cbcf8c6-328d-49b6-b4fc-d16fd78d4868', name='Merge RGB', required_inputs=3, min_inputs=3, max_inputs=3, num_outputs=1 ) def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): rgb = [0.0, 0.0, 0.0] for i in range(3): if input_plugs[i].input is not None: assumed_value_type = input_plugs[i].input.value_type if util.to_plug_value_type(value=input_plugs[i].input.computed_value, assumed_value_type=assumed_value_type) == Plug.VALUE_TYPE_FLOAT: rgb[i] = input_plugs[i].input.computed_value output_plugs[0].computed_value = tuple(rgb) def generate_input(self, parent: DagNode, index: int) -> Plug: if index == 0: return Plug.Create(parent=parent, name='input_r', display_name='R', value_type=Plug.VALUE_TYPE_FLOAT) if index == 1: return Plug.Create(parent=parent, name='input_g', display_name='G', value_type=Plug.VALUE_TYPE_FLOAT) if index == 2: return Plug.Create(parent=parent, name='input_B', display_name='B', value_type=Plug.VALUE_TYPE_FLOAT) raise Exception('Input index "{0}" not supported.'.format(index)) def generate_output(self, parent: DagNode, index: int) -> Plug: if index == 0: plug = Plug.Create( parent=parent, name='rgb', display_name='RGB', value_type=Plug.VALUE_TYPE_VECTOR3, editable=False ) plug.value = (0, 0, 0) return plug raise Exception('Output index "{0}" not supported.'.format(index)) def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): fake = OperatorInstance.FromOperator(operator=ConstantFloat()) fake.outputs[0].value = 0.1 input_plugs[0].input = fake.outputs[0] fake = OperatorInstance.FromOperator(operator=ConstantFloat()) fake.outputs[0].value = 0.2 input_plugs[1].input = fake.outputs[0] fake = OperatorInstance.FromOperator(operator=ConstantFloat()) fake.outputs[0].value = 0.3 input_plugs[2].input = fake.outputs[0] def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): if not output_plugs[0].computed_value == (0.1, 0.2, 0.3): raise Exception('Test failed.') class SplitRGBA(Operator): def __init__(self): super(SplitRGBA, self).__init__( id='2c48e13c-2b58-48b9-a3b6-5f977c402b2e', name='Split RGBA', required_inputs=1, min_inputs=1, max_inputs=1, num_outputs=4 ) def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): if input_plugs[0].input is None: output_plugs[0].computed_value = 0 output_plugs[1].computed_value = 0 output_plugs[2].computed_value = 0 output_plugs[3].computed_value = 0 return value = input_plugs[0].input.computed_value try: test = iter(value) is_iterable = True except TypeError: is_iterable = False if is_iterable and len(value) == 4: output_plugs[0].computed_value = value[0] output_plugs[1].computed_value = value[1] output_plugs[2].computed_value = value[2] output_plugs[3].computed_value = value[3] else: output_plugs[0].computed_value = output_plugs[0].default_value output_plugs[1].computed_value = output_plugs[1].default_value output_plugs[2].computed_value = output_plugs[2].default_value output_plugs[3].computed_value = output_plugs[3].default_value def generate_input(self, parent: DagNode, index: int) -> Plug: if index == 0: return Plug.Create(parent=parent, name='input_rgba', display_name='RGBA', value_type=Plug.VALUE_TYPE_VECTOR4) raise Exception('Input index "{0}" not supported.'.format(index)) def generate_output(self, parent: DagNode, index: int) -> Plug: if index == 0: plug = Plug.Create( parent=parent, name='red', display_name='Red', value_type=Plug.VALUE_TYPE_FLOAT, editable=False ) plug.value = 0 return plug if index == 1: plug = Plug.Create( parent=parent, name='green', display_name='Green', value_type=Plug.VALUE_TYPE_FLOAT, editable=False ) plug.value = 0 return plug if index == 2: plug = Plug.Create( parent=parent, name='blue', display_name='Blue', value_type=Plug.VALUE_TYPE_FLOAT, editable=False ) plug.value = 0 return plug if index == 3: plug = Plug.Create( parent=parent, name='alpha', display_name='Alpha', value_type=Plug.VALUE_TYPE_FLOAT, editable=False ) plug.value = 0 return plug raise Exception('Output index "{0}" not supported.'.format(index)) def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): fake = OperatorInstance.FromOperator(operator=ConstantRGB()) fake.outputs[0].value = (0.1, 0.2, 0.3, 0.4) input_plugs[0].input = fake.outputs[0] def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): if not output_plugs[0].computed_value == 0.1: raise Exception('Test failed.') if not output_plugs[1].computed_value == 0.2: raise Exception('Test failed.') if not output_plugs[2].computed_value == 0.3: raise Exception('Test failed.') if not output_plugs[3].computed_value == 0.4: raise Exception('Test failed.') class MergeRGBA(Operator): def __init__(self): super(MergeRGBA, self).__init__( id='92e57f3d-8514-4786-a4ed-2767139a15eb', name='Merge RGBA', required_inputs=4, min_inputs=4, max_inputs=4, num_outputs=1 ) def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): rgba = [0.0, 0.0, 0.0, 0.0] for i in range(4): if input_plugs[i].input is not None: assumed_value_type = input_plugs[i].input.value_type if util.to_plug_value_type(value=input_plugs[i].input.computed_value, assumed_value_type=assumed_value_type) == Plug.VALUE_TYPE_FLOAT: rgba[i] = input_plugs[i].input.computed_value output_plugs[0].computed_value = tuple(rgba) def generate_input(self, parent: DagNode, index: int) -> Plug: if index == 0: return Plug.Create(parent=parent, name='input_r', display_name='R', value_type=Plug.VALUE_TYPE_FLOAT) if index == 1: return Plug.Create(parent=parent, name='input_g', display_name='G', value_type=Plug.VALUE_TYPE_FLOAT) if index == 2: return Plug.Create(parent=parent, name='input_b', display_name='B', value_type=Plug.VALUE_TYPE_FLOAT) if index == 3: return Plug.Create(parent=parent, name='input_a', display_name='A', value_type=Plug.VALUE_TYPE_FLOAT) raise Exception('Input index "{0}" not supported.'.format(index)) def generate_output(self, parent: DagNode, index: int) -> Plug: if index == 0: plug = Plug.Create( parent=parent, name='rgba', display_name='RGBA', value_type=Plug.VALUE_TYPE_VECTOR3, editable=False ) plug.value = (0, 0, 0, 0) return plug raise Exception('Output index "{0}" not supported.'.format(index)) def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): fake = OperatorInstance.FromOperator(operator=ConstantFloat()) fake.outputs[0].value = 0.1 input_plugs[0].input = fake.outputs[0] fake = OperatorInstance.FromOperator(operator=ConstantFloat()) fake.outputs[0].value = 0.2 input_plugs[1].input = fake.outputs[0] fake = OperatorInstance.FromOperator(operator=ConstantFloat()) fake.outputs[0].value = 0.3 input_plugs[2].input = fake.outputs[0] fake = OperatorInstance.FromOperator(operator=ConstantFloat()) fake.outputs[0].value = 0.4 input_plugs[3].input = fake.outputs[0] def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): if not output_plugs[0].computed_value == (0.1, 0.2, 0.3, 0.4): raise Exception('Test failed.') class LessThan(Operator): def __init__(self): super(LessThan, self).__init__( id='996df9bd-08d5-451b-a67c-80d0de7fba32', name='Less Than', required_inputs=2, min_inputs=2, max_inputs=2, num_outputs=1 ) def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): if input_plugs[0].input is None or input_plugs[1].input is None: for output in output_plugs: output.computed_value = False return value = input_plugs[0].input.computed_value compare = input_plugs[1].input.computed_value result = False try: result = value < compare except Exception as error: print('WARNING: Universal Material Map: ' 'unable to compare if "{0}" is less than "{1}". ' 'Setting output to "{2}".'.format( value, compare, result )) output_plugs[0].computed_value = result def generate_input(self, parent: DagNode, index: int) -> Plug: if index == 0: return Plug.Create(parent=parent, name='value', display_name='Value', value_type=Plug.VALUE_TYPE_FLOAT) if index == 1: return Plug.Create(parent=parent, name='comparison', display_name='Comparison', value_type=Plug.VALUE_TYPE_FLOAT) raise Exception('Input index "{0}" not supported.'.format(index)) def generate_output(self, parent: DagNode, index: int) -> Plug: if index == 0: return Plug.Create(parent=parent, name='output', display_name='Is Less Than', value_type=Plug.VALUE_TYPE_BOOLEAN) raise Exception('Output index "{0}" not supported.'.format(index)) def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): fake = OperatorInstance.FromOperator(operator=ConstantFloat()) fake.outputs[0].value = 0.1 input_plugs[0].input = fake.outputs[0] fake = OperatorInstance.FromOperator(operator=ConstantFloat()) fake.outputs[0].value = 0.2 input_plugs[1].input = fake.outputs[0] def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): if not output_plugs[0].computed_value: raise Exception('Test failed.') class GreaterThan(Operator): def __init__(self): super(GreaterThan, self).__init__( id='1e751c3a-f6cd-43a2-aa72-22cb9d82ad19', name='Greater Than', required_inputs=2, min_inputs=2, max_inputs=2, num_outputs=1 ) def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): if input_plugs[0].input is None or input_plugs[1].input is None: output_plugs[0].computed_value = False return value = input_plugs[0].input.computed_value compare = input_plugs[1].input.computed_value result = False try: result = value > compare except Exception as error: print('WARNING: Universal Material Map: ' 'unable to compare if "{0}" is greater than "{1}". ' 'Setting output to "{2}".'.format( value, compare, result )) output_plugs[0].computed_value = result def generate_input(self, parent: DagNode, index: int) -> Plug: if index == 0: return Plug.Create(parent=parent, name='value', display_name='Value', value_type=Plug.VALUE_TYPE_FLOAT) if index == 1: return Plug.Create(parent=parent, name='comparison', display_name='Comparison', value_type=Plug.VALUE_TYPE_FLOAT) raise Exception('Input index "{0}" not supported.'.format(index)) def generate_output(self, parent: DagNode, index: int) -> Plug: if index == 0: return Plug.Create(parent=parent, name='output', display_name='Is Greater Than', value_type=Plug.VALUE_TYPE_BOOLEAN) raise Exception('Output index "{0}" not supported.'.format(index)) def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): fake = OperatorInstance.FromOperator(operator=ConstantFloat()) fake.outputs[0].value = 0.1 input_plugs[0].input = fake.outputs[0] fake = OperatorInstance.FromOperator(operator=ConstantFloat()) fake.outputs[0].value = 0.2 input_plugs[1].input = fake.outputs[0] def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): if output_plugs[0].computed_value: raise Exception('Test failed.') class Or(Operator): def __init__(self): super(Or, self).__init__( id='d0288faf-cb2e-4765-8923-1a368b45f62c', name='Or', required_inputs=2, min_inputs=2, max_inputs=2, num_outputs=1 ) def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): if input_plugs[0].input is None and input_plugs[1].input is None: output_plugs[0].computed_value = False return value_1 = input_plugs[0].input.computed_value if input_plugs[0].input else False value_2 = input_plugs[1].input.computed_value if input_plugs[1].input else False if value_1 is None and value_2 is None: output_plugs[0].computed_value = False return if value_1 is None: output_plugs[0].computed_value = True if value_2 else False return if value_2 is None: output_plugs[0].computed_value = True if value_1 else False return output_plugs[0].computed_value = value_1 or value_2 def generate_input(self, parent: DagNode, index: int) -> Plug: if index == 0: return Plug.Create(parent=parent, name='value_1', display_name='Value 1', value_type=Plug.VALUE_TYPE_ANY) if index == 1: return Plug.Create(parent=parent, name='value_2', display_name='Value 2', value_type=Plug.VALUE_TYPE_ANY) raise Exception('Input index "{0}" not supported.'.format(index)) def generate_output(self, parent: DagNode, index: int) -> Plug: if index == 0: return Plug.Create(parent=parent, name='output', display_name='Is True', value_type=Plug.VALUE_TYPE_BOOLEAN) raise Exception('Output index "{0}" not supported.'.format(index)) def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): fake = OperatorInstance.FromOperator(operator=ConstantBoolean()) fake.outputs[0].value = True input_plugs[0].input = fake.outputs[0] fake = OperatorInstance.FromOperator(operator=ConstantBoolean()) fake.outputs[0].value = False input_plugs[1].input = fake.outputs[0] def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): if not output_plugs[0].computed_value: raise Exception('Test failed.') class And(Operator): def __init__(self): super(And, self).__init__( id='9c5e4fb9-9948-4075-a7d6-ae9bc04e25b5', name='And', required_inputs=2, min_inputs=2, max_inputs=2, num_outputs=1 ) def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): if input_plugs[0].input is None and input_plugs[1].input is None: output_plugs[0].computed_value = False return value_1 = input_plugs[0].input.computed_value if input_plugs[0].input else False value_2 = input_plugs[1].input.computed_value if input_plugs[1].input else False if value_1 is None and value_2 is None: output_plugs[0].computed_value = False return if value_1 is None: output_plugs[0].computed_value = True if value_2 else False return if value_2 is None: output_plugs[0].computed_value = True if value_1 else False return output_plugs[0].computed_value = value_1 and value_2 def generate_input(self, parent: DagNode, index: int) -> Plug: if index == 0: return Plug.Create(parent=parent, name='value_1', display_name='Value 1', value_type=Plug.VALUE_TYPE_ANY) if index == 1: return Plug.Create(parent=parent, name='value_2', display_name='Value 2', value_type=Plug.VALUE_TYPE_ANY) raise Exception('Input index "{0}" not supported.'.format(index)) def generate_output(self, parent: DagNode, index: int) -> Plug: if index == 0: return Plug.Create(parent=parent, name='output', display_name='Is True', value_type=Plug.VALUE_TYPE_BOOLEAN) raise Exception('Output index "{0}" not supported.'.format(index)) def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): fake = OperatorInstance.FromOperator(operator=ConstantBoolean()) fake.outputs[0].value = True input_plugs[0].input = fake.outputs[0] fake = OperatorInstance.FromOperator(operator=ConstantBoolean()) fake.outputs[0].value = True input_plugs[1].input = fake.outputs[0] def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): if not output_plugs[0].computed_value: raise Exception('Test failed.') class Equal(Operator): def __init__(self): super(Equal, self).__init__( id='fb353972-aebd-4d32-8231-f644f75d322c', name='Equal', required_inputs=2, min_inputs=2, max_inputs=2, num_outputs=1 ) def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): if input_plugs[0].input is None and input_plugs[1].input is None: output_plugs[0].computed_value = True return if input_plugs[0].input is None or input_plugs[1].input is None: output_plugs[0].computed_value = False return value_1 = input_plugs[0].input.computed_value value_2 = input_plugs[1].input.computed_value if value_1 is None and value_2 is None: output_plugs[0].computed_value = True return if value_1 is None or value_2 is None: output_plugs[0].computed_value = False return result = False try: result = value_1 == value_2 except Exception as error: print('WARNING: Universal Material Map: ' 'unable to compare if "{0}" is equal to "{1}". ' 'Setting output to "{2}".'.format( value_1, value_2, result )) output_plugs[0].computed_value = result def generate_input(self, parent: DagNode, index: int) -> Plug: if index == 0: return Plug.Create(parent=parent, name='value_1', display_name='Value 1', value_type=Plug.VALUE_TYPE_ANY) if index == 1: return Plug.Create(parent=parent, name='value_1', display_name='Value 2', value_type=Plug.VALUE_TYPE_ANY) raise Exception('Input index "{0}" not supported.'.format(index)) def generate_output(self, parent: DagNode, index: int) -> Plug: if index == 0: return Plug.Create(parent=parent, name='output', display_name='Are Equal', value_type=Plug.VALUE_TYPE_BOOLEAN) raise Exception('Output index "{0}" not supported.'.format(index)) def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): fake = OperatorInstance.FromOperator(operator=ConstantString()) fake.outputs[0].value = self.id input_plugs[0].input = fake.outputs[0] fake = OperatorInstance.FromOperator(operator=ConstantString()) fake.outputs[0].value = self.id input_plugs[1].input = fake.outputs[0] def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): if not output_plugs[0].computed_value: raise Exception('Test failed.') class Not(Operator): def __init__(self): super(Not, self).__init__( id='7b8b67df-ce2e-445c-98b7-36ea695c77e3', name='Not', required_inputs=1, min_inputs=1, max_inputs=1, num_outputs=1 ) def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): if input_plugs[0].input is None: output_plugs[0].computed_value = False return value_1 = input_plugs[0].input.computed_value if value_1 is None: output_plugs[0].computed_value = False return output_plugs[0].computed_value = not value_1 def generate_input(self, parent: DagNode, index: int) -> Plug: if index == 0: return Plug.Create(parent=parent, name='value', display_name='Boolean', value_type=Plug.VALUE_TYPE_BOOLEAN) raise Exception('Input index "{0}" not supported.'.format(index)) def generate_output(self, parent: DagNode, index: int) -> Plug: if index == 0: return Plug.Create(parent=parent, name='output', display_name='Boolean', value_type=Plug.VALUE_TYPE_BOOLEAN) raise Exception('Output index "{0}" not supported.'.format(index)) def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): fake = OperatorInstance.FromOperator(operator=ConstantBoolean()) fake.outputs[0].value = False input_plugs[0].input = fake.outputs[0] def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): if not output_plugs[0].computed_value: raise Exception('Test failed.') class ValueTest(Operator): def __init__(self): super(ValueTest, self).__init__( id='2899f66b-2e8d-467b-98d1-5f590cf98e7a', name='Value Test', required_inputs=1, min_inputs=1, max_inputs=1, num_outputs=1 ) def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): if input_plugs[0].input is None: output_plugs[0].computed_value = None return output_plugs[0].computed_value = input_plugs[0].input.computed_value def generate_input(self, parent: DagNode, index: int) -> Plug: if index == 0: return Plug.Create(parent=parent, name='input', display_name='Input', value_type=Plug.VALUE_TYPE_ANY) raise Exception('Input index "{0}" not supported.'.format(index)) def generate_output(self, parent: DagNode, index: int) -> Plug: if index == 0: return Plug.Create(parent=parent, name='output', display_name='Output', value_type=Plug.VALUE_TYPE_ANY) raise Exception('Output index "{0}" not supported.'.format(index)) def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): fake = OperatorInstance.FromOperator(operator=ConstantInteger()) fake.outputs[0].value = 10 input_plugs[0].input = fake.outputs[0] def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): if not output_plugs[0].computed_value == 10: raise Exception('Test failed.') class ValueResolver(Operator): def __init__(self): super(ValueResolver, self).__init__( id='74306cd0-b668-4a92-9e15-7b23486bd89a', name='Value Resolver', required_inputs=8, min_inputs=8, max_inputs=8, num_outputs=7 ) def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): assumed_value_type = input_plugs[0].input.value_type if input_plugs[0].input else input_plugs[0].value_type computed_value = input_plugs[0].input.computed_value if input_plugs[0].input else False value_type = util.to_plug_value_type(value=computed_value, assumed_value_type=assumed_value_type) if value_type == Plug.VALUE_TYPE_BOOLEAN: output_plugs[0].computed_value = computed_value else: output_plugs[0].computed_value = input_plugs[1].computed_value if value_type == Plug.VALUE_TYPE_VECTOR3: output_plugs[1].computed_value = computed_value else: output_plugs[1].computed_value = input_plugs[2].computed_value if value_type == Plug.VALUE_TYPE_FLOAT: output_plugs[2].computed_value = computed_value else: output_plugs[2].computed_value = input_plugs[3].computed_value if value_type == Plug.VALUE_TYPE_INTEGER: output_plugs[3].computed_value = computed_value else: output_plugs[3].computed_value = input_plugs[4].computed_value if value_type == Plug.VALUE_TYPE_STRING: output_plugs[4].computed_value = computed_value else: output_plugs[4].computed_value = input_plugs[5].computed_value if value_type == Plug.VALUE_TYPE_VECTOR4: output_plugs[5].computed_value = computed_value else: output_plugs[5].computed_value = input_plugs[6].computed_value if value_type == Plug.VALUE_TYPE_LIST: output_plugs[6].computed_value = computed_value else: output_plugs[6].computed_value = input_plugs[7].computed_value for index, input_plug in enumerate(input_plugs): if index == 0: continue input_plug.is_editable = not input_plug.input for output_plug in output_plugs: output_plug.is_editable = False def generate_input(self, parent: DagNode, index: int) -> Plug: if index == 0: return Plug.Create(parent=parent, name='input', display_name='Input', value_type=Plug.VALUE_TYPE_ANY) if index == 1: plug = Plug.Create( parent=parent, name='boolean', display_name='Boolean', value_type=Plug.VALUE_TYPE_BOOLEAN, editable=True, ) plug.value = False return plug if index == 2: plug = Plug.Create( parent=parent, name='color', display_name='Color', value_type=Plug.VALUE_TYPE_VECTOR3, editable=True, ) plug.value = (0, 0, 0) return plug if index == 3: plug = Plug.Create( parent=parent, name='float', display_name='Float', value_type=Plug.VALUE_TYPE_FLOAT, editable=True, ) plug.value = 0 return plug if index == 4: plug = Plug.Create( parent=parent, name='integer', display_name='Integer', value_type=Plug.VALUE_TYPE_INTEGER, editable=True, ) plug.value = 0 return plug if index == 5: plug = Plug.Create( parent=parent, name='string', display_name='String', value_type=Plug.VALUE_TYPE_STRING, editable=True, ) plug.value = '' return plug if index == 6: plug = Plug.Create( parent=parent, name='rgba', display_name='RGBA', value_type=Plug.VALUE_TYPE_VECTOR4, editable=True, ) plug.value = (0, 0, 0, 1) return plug if index == 7: plug = Plug.Create( parent=parent, name='list', display_name='List', value_type=Plug.VALUE_TYPE_LIST, editable=False, ) plug.value = [] return plug raise Exception('Input index "{0}" not supported.'.format(index)) def generate_output(self, parent: DagNode, index: int) -> Plug: if index == 0: plug = Plug.Create( parent=parent, name='boolean', display_name='Boolean', value_type=Plug.VALUE_TYPE_BOOLEAN, editable=False, ) plug.value = False return plug if index == 1: plug = Plug.Create( parent=parent, name='color', display_name='Color', value_type=Plug.VALUE_TYPE_VECTOR3, editable=False, ) plug.value = (0, 0, 0) return plug if index == 2: plug = Plug.Create( parent=parent, name='float', display_name='Float', value_type=Plug.VALUE_TYPE_FLOAT, editable=False, ) plug.value = 0 return plug if index == 3: plug = Plug.Create( parent=parent, name='integer', display_name='Integer', value_type=Plug.VALUE_TYPE_INTEGER, editable=False, ) plug.value = 0 return plug if index == 4: plug = Plug.Create( parent=parent, name='string', display_name='String', value_type=Plug.VALUE_TYPE_STRING, editable=False, ) plug.value = '' return plug if index == 5: plug = Plug.Create( parent=parent, name='rgba', display_name='RGBA', value_type=Plug.VALUE_TYPE_VECTOR4, editable=False, ) plug.value = (0, 0, 0, 1) return plug if index == 6: plug = Plug.Create( parent=parent, name='list', display_name='List', value_type=Plug.VALUE_TYPE_LIST, editable=False, ) plug.value = [] return plug raise Exception('Output index "{0}" not supported.'.format(index)) def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): fake = OperatorInstance.FromOperator(operator=ConstantInteger()) fake.outputs[0].value = 10 input_plugs[0].input = fake.outputs[0] def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): if not output_plugs[3].computed_value == 10: raise Exception('Test failed.') class MayaTransparencyResolver(Operator): """ Specialty operator based on Maya transparency attribute. If the input is of type string - and is not an empty string - then the output will be TRUE. If the input is a tripple float - and any value is greater than zero - then the output will also be TRUE. In all other cases the output will be FALSE. """ def __init__(self): super(MayaTransparencyResolver, self).__init__( id='2b523832-ac84-4051-9064-6046121dcd48', name='Maya Transparency Resolver', required_inputs=1, min_inputs=1, max_inputs=1, num_outputs=1 ) def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): is_transparent = False assumed_value_type = input_plugs[0].input.value_type if input_plugs[0].input else input_plugs[0].value_type computed_value = input_plugs[0].input.computed_value if input_plugs[0].input else False value_type = util.to_plug_value_type(value=computed_value, assumed_value_type=assumed_value_type) if value_type == Plug.VALUE_TYPE_STRING: is_transparent = not computed_value == '' elif value_type == Plug.VALUE_TYPE_VECTOR3: for value in computed_value: if value > 0: is_transparent = True break elif value_type == Plug.VALUE_TYPE_FLOAT: is_transparent = computed_value > 0 output_plugs[0].computed_value = is_transparent def generate_input(self, parent: DagNode, index: int) -> Plug: if index == 0: return Plug.Create(parent=parent, name='input', display_name='Input', value_type=Plug.VALUE_TYPE_ANY) raise Exception('Input index "{0}" not supported.'.format(index)) def generate_output(self, parent: DagNode, index: int) -> Plug: if index == 0: plug = Plug.Create( parent=parent, name='is_transparent', display_name='Is Transparent', value_type=Plug.VALUE_TYPE_BOOLEAN, ) plug.value = False return plug raise Exception('Output index "{0}" not supported.'.format(index)) def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): fake = OperatorInstance.FromOperator(operator=ConstantRGB()) fake.outputs[0].value = (0.5, 0.5, 0.5) input_plugs[0].input = fake.outputs[0] def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): if not output_plugs[0].computed_value: raise Exception('Test failed.') class ListGenerator(Operator): def __init__(self): super(ListGenerator, self).__init__( id='a410f7a0-280a-451f-a26c-faf9a8e302b4', name='List Generator', required_inputs=0, min_inputs=0, max_inputs=-1, num_outputs=1 ) def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): output = [] for input_plug in input_plugs: output.append(input_plug.computed_value) output_plugs[0].computed_value = output def generate_input(self, parent: DagNode, index: int) -> Plug: return Plug.Create( parent=parent, name='[{0}]'.format(index), display_name='[{0}]'.format(index), value_type=Plug.VALUE_TYPE_ANY, editable=False, is_removable=True, ) def generate_output(self, parent: DagNode, index: int) -> Plug: if index == 0: return Plug.Create(parent=parent, name='list', display_name='list', value_type=Plug.VALUE_TYPE_LIST) raise Exception('Output index "{0}" not supported.'.format(index)) def remove_plug(self, operator_instance: 'OperatorInstance', plug: 'Plug') -> None: super(ListGenerator, self).remove_plug(operator_instance=operator_instance, plug=plug) for index, plug in enumerate(operator_instance.inputs): plug.name = '[{0}]'.format(index) plug.display_name = '[{0}]'.format(index) for plug in operator_instance.outputs: plug.invalidate() def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): pass def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): pass class ListIndex(Operator): def __init__(self): super(ListIndex, self).__init__( id='e4a81506-fb6b-4729-8273-f68e97f5bc6b', name='List Index', required_inputs=2, min_inputs=2, max_inputs=2, num_outputs=1 ) def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): try: test = iter(input_plugs[0].computed_value) index = input_plugs[1].computed_value if 0 <= index < len(input_plugs[0].computed_value): output_plugs[0].computed_value = input_plugs[0].computed_value[index] else: output_plugs[0].computed_value = None except TypeError: output_plugs[0].computed_value = None def generate_input(self, parent: DagNode, index: int) -> Plug: if index == 0: return Plug.Create(parent=parent, name='list', display_name='List', value_type=Plug.VALUE_TYPE_LIST) if index == 1: plug = Plug.Create( parent=parent, name='index', display_name='Index', value_type=Plug.VALUE_TYPE_INTEGER, editable=True ) plug.computed_value = 0 return plug raise Exception('Input index "{0}" not supported.'.format(index)) def generate_output(self, parent: DagNode, index: int) -> Plug: if index == 0: return Plug.Create(parent=parent, name='output', display_name='Output', value_type=Plug.VALUE_TYPE_ANY) raise Exception('Output index "{0}" not supported.'.format(index)) def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): input_plugs[0].value = ['hello', 'world'] input_plugs[1].value = 1 def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): if not output_plugs[0].computed_value == 'world': raise Exception('Test failed.') class MDLColorSpace(Operator): def __init__(self): super(MDLColorSpace, self).__init__( id='cf0b97c8-fb55-4cf3-8afc-23ebd4a0a6c7', name='MDL Color Space', required_inputs=0, min_inputs=0, max_inputs=0, num_outputs=1 ) def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): output_plugs[0].computed_value = output_plugs[0].value if output_plugs[0].value else 'auto' def generate_input(self, parent: DagNode, index: int) -> Plug: raise Exception('Input index "{0}" not supported.'.format(index)) def generate_output(self, parent: DagNode, index: int) -> Plug: if index == 0: plug = Plug.Create( parent=parent, name='color_space', display_name='Color Space', value_type=Plug.VALUE_TYPE_ENUM, editable=True ) plug.enum_values = ['auto', 'raw', 'sRGB'] plug.default_value = 'auto' plug.value = 'auto' return plug raise Exception('Output index "{0}" not supported.'.format(index)) def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): output_plugs[0].value = output_plugs[0].enum_values[2] def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): if not output_plugs[0].computed_value == output_plugs[0].enum_values[2]: raise Exception('Test failed.') class MDLTextureResolver(Operator): def __init__(self): super(MDLTextureResolver, self).__init__( id='af766adb-cf54-4a8b-a598-44b04fbcf630', name='MDL Texture Resolver', required_inputs=2, min_inputs=2, max_inputs=2, num_outputs=1 ) def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): filepath = input_plugs[0].input.computed_value if input_plugs[0].input else '' value_type = util.to_plug_value_type(value=filepath, assumed_value_type=Plug.VALUE_TYPE_STRING) filepath = filepath if value_type == Plug.VALUE_TYPE_STRING else '' colorspace = input_plugs[1].computed_value output_plugs[0].computed_value = [filepath, colorspace] def generate_input(self, parent: DagNode, index: int) -> Plug: if index == 0: return Plug.Create(parent=parent, name='input', display_name='Input', value_type=Plug.VALUE_TYPE_STRING) if index == 1: plug = Plug.Create( parent=parent, name='color_space', display_name='Color Space', value_type=Plug.VALUE_TYPE_ENUM, editable=True ) plug.enum_values = ['auto', 'raw', 'sRGB'] plug.default_value = 'auto' plug.value = 'auto' return plug raise Exception('Input index "{0}" not supported.'.format(index)) def generate_output(self, parent: DagNode, index: int) -> Plug: if index == 0: plug = Plug.Create( parent=parent, name='list', display_name='List', value_type=Plug.VALUE_TYPE_LIST, editable=False, ) plug.default_value = ['', 'auto'] plug.value = ['', 'auto'] return plug raise Exception('Output index "{0}" not supported.'.format(index)) def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): input_plugs[0].value = 'c:/folder/color.png' input_plugs[1].value = 'raw' def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): if not output_plugs[3].computed_value == ['c:/folder/color.png', 'raw']: raise Exception('Test failed.') class SplitTextureData(Operator): def __init__(self): super(SplitTextureData, self).__init__( id='6a411798-434c-4ad4-b464-0bd2e78cdcec', name='Split Texture Data', required_inputs=1, min_inputs=1, max_inputs=1, num_outputs=2 ) def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): is_valid_input = False try: value = input_plugs[0].computed_value test = iter(value) if len(value) == 2: if sys.version_info.major < 3: if isinstance(value[0], basestring) and isinstance(value[1], basestring): is_valid_input = True else: if isinstance(value[0], str) and isinstance(value[1], str): is_valid_input = True except TypeError: pass if is_valid_input: output_plugs[0].computed_value = value[0] output_plugs[1].computed_value = value[1] else: output_plugs[0].computed_value = '' output_plugs[1].computed_value = 'auto' def generate_input(self, parent: DagNode, index: int) -> Plug: if index == 0: plug = Plug.Create(parent=parent, name='list', display_name='List', value_type=Plug.VALUE_TYPE_LIST) plug.default_value = ['', 'auto'] plug.computed_value = ['', 'auto'] return plug raise Exception('Input index "{0}" not supported.'.format(index)) def generate_output(self, parent: DagNode, index: int) -> Plug: if index == 0: plug = Plug.Create(parent=parent, name='texture_path', display_name='Texture Path', value_type=Plug.VALUE_TYPE_STRING) plug.default_value = '' plug.computed_value = '' return plug if index == 1: plug = Plug.Create(parent=parent, name='color_space', display_name='Color Space', value_type=Plug.VALUE_TYPE_STRING) plug.default_value = 'auto' plug.computed_value = 'auto' return plug raise Exception('Output index "{0}" not supported.'.format(index)) def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): input_plugs[0].computed_value = ['hello.png', 'world'] def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): if not output_plugs[0].computed_value == 'hello.png': raise Exception('Test failed.') if not output_plugs[1].computed_value == 'world': raise Exception('Test failed.') class Multiply(Operator): def __init__(self): super(Multiply, self).__init__( id='0f5c9828-f582-48aa-b055-c12b91e692a7', name='Multiply', required_inputs=0, min_inputs=2, max_inputs=-1, num_outputs=1 ) def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): values = [] for input_plug in input_plugs: if isinstance(input_plug.computed_value, int): values.append(input_plug.computed_value) continue if isinstance(input_plug.computed_value, float): values.append(input_plug.computed_value) if len(values) < 2: output_plugs[0].computed_value = 0 else: product = 1.0 for o in values: product *= o output_plugs[0].computed_value = product for input_plug in input_plugs: input_plug.is_editable = not input_plug.input def generate_input(self, parent: DagNode, index: int) -> Plug: plug = Plug.Create( parent=parent, name='[{0}]'.format(index), display_name='[{0}]'.format(index), value_type=Plug.VALUE_TYPE_FLOAT, editable=True, is_removable=index > 1, ) plug.default_value = 1.0 plug.value = 1.0 plug.computed_value = 1.0 return plug def generate_output(self, parent: DagNode, index: int) -> Plug: if index == 0: return Plug.Create(parent=parent, name='product', display_name='product', value_type=Plug.VALUE_TYPE_FLOAT) raise Exception('Output index "{0}" not supported.'.format(index)) def remove_plug(self, operator_instance: 'OperatorInstance', plug: 'Plug') -> None: super(Multiply, self).remove_plug(operator_instance=operator_instance, plug=plug) for index, plug in enumerate(operator_instance.inputs): plug.name = '[{0}]'.format(index) plug.display_name = '[{0}]'.format(index) for plug in operator_instance.outputs: plug.invalidate() def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): input_plugs[0].computed_value = 2 input_plugs[1].computed_value = 2 def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): if not output_plugs[0].computed_value == 4: raise Exception('Test failed.') class ColorSpaceResolver(Operator): MAPPING = { 'MDL|auto|Blender': 'sRGB', 'MDL|srgb|Blender': 'sRGB', 'MDL|raw|Blender': 'Raw', 'Blender|filmic log|MDL': 'raw', 'Blender|linear|MDL': 'raw', 'Blender|linear aces|MDL': 'raw', 'Blender|non-color|MDL': 'raw', 'Blender|raw|MDL': 'raw', 'Blender|srgb|MDL': 'sRGB', 'Blender|xyz|MDL': 'raw', } DEFAULT = { 'Blender': 'Linear', 'MDL': 'auto', } def __init__(self): super(ColorSpaceResolver, self).__init__( id='c159df8f-a0a2-4300-b897-e8eaa689a901', name='Color Space Resolver', required_inputs=3, min_inputs=3, max_inputs=3, num_outputs=1 ) def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): color_space = input_plugs[0].computed_value.lower() from_color_space = input_plugs[1].computed_value to_color_space = input_plugs[2].computed_value key = '{0}|{1}|{2}'.format( from_color_space, color_space, to_color_space ) if key in ColorSpaceResolver.MAPPING: output_plugs[0].computed_value = ColorSpaceResolver.MAPPING[key] else: output_plugs[0].computed_value = ColorSpaceResolver.DEFAULT[to_color_space] def generate_input(self, parent: DagNode, index: int) -> Plug: if index == 0: plug = Plug.Create( parent=parent, name='color_space', display_name='Color Space', value_type=Plug.VALUE_TYPE_STRING, editable=False, is_removable=False, ) plug.default_value = '' plug.computed_value = '' return plug if index == 1: plug = Plug.Create( parent=parent, name='from_color_space', display_name='From', value_type=Plug.VALUE_TYPE_ENUM, editable=True ) plug.enum_values = ['MDL', 'Blender'] plug.default_value = 'MDL' plug.computed_value = 'MDL' return plug if index == 2: plug = Plug.Create( parent=parent, name='to_color_space', display_name='To', value_type=Plug.VALUE_TYPE_ENUM, editable=True ) plug.enum_values = ['Blender', 'MDL'] plug.default_value = 'Blender' plug.computed_value = 'Blender' return plug raise Exception('Input index "{0}" not supported.'.format(index)) def generate_output(self, parent: DagNode, index: int) -> Plug: if index == 0: plug = Plug.Create( parent=parent, name='color_space', display_name='Color Space', value_type=Plug.VALUE_TYPE_STRING, editable=False ) plug.default_value = '' plug.computed_value = '' return plug raise Exception('Output index "{0}" not supported.'.format(index)) def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): raise NotImplementedError() def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): if not output_plugs[0].computed_value == output_plugs[0].enum_values[2]: raise Exception('Test failed.') class Add(Operator): def __init__(self): super(Add, self).__init__( id='f2818669-5454-4599-8792-2cb09f055bf9', name='Add', required_inputs=0, min_inputs=2, max_inputs=-1, num_outputs=1 ) def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): output = 0 for input_plug in input_plugs: try: output += input_plug.computed_value except: pass output_plugs[0].computed_value = output def generate_input(self, parent: DagNode, index: int) -> Plug: plug = Plug.Create( parent=parent, name='[{0}]'.format(index), display_name='[{0}]'.format(index), value_type=Plug.VALUE_TYPE_FLOAT, editable=True, is_removable=True, ) plug.default_value = 0.0 plug.computed_value = 0.0 return plug def generate_output(self, parent: DagNode, index: int) -> Plug: if index == 0: return Plug.Create(parent=parent, name='sum', display_name='sum', value_type=Plug.VALUE_TYPE_FLOAT) raise Exception('Output index "{0}" not supported.'.format(index)) def remove_plug(self, operator_instance: 'OperatorInstance', plug: 'Plug') -> None: super(Add, self).remove_plug(operator_instance=operator_instance, plug=plug) for index, plug in enumerate(operator_instance.inputs): plug.name = '[{0}]'.format(index) plug.display_name = '[{0}]'.format(index) for plug in operator_instance.outputs: plug.invalidate() def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): pass def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): pass class Subtract(Operator): def __init__(self): super(Subtract, self).__init__( id='15f523f3-4e94-43a5-8306-92d07cbfa48c', name='Subtract', required_inputs=0, min_inputs=2, max_inputs=-1, num_outputs=1 ) def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): output = None for input_plug in input_plugs: try: if output is None: output = input_plug.computed_value else: output -= input_plug.computed_value except: pass output_plugs[0].computed_value = output def generate_input(self, parent: DagNode, index: int) -> Plug: plug = Plug.Create( parent=parent, name='[{0}]'.format(index), display_name='[{0}]'.format(index), value_type=Plug.VALUE_TYPE_FLOAT, editable=True, is_removable=True, ) plug.default_value = 0.0 plug.computed_value = 0.0 return plug def generate_output(self, parent: DagNode, index: int) -> Plug: if index == 0: return Plug.Create(parent=parent, name='difference', display_name='difference', value_type=Plug.VALUE_TYPE_FLOAT) raise Exception('Output index "{0}" not supported.'.format(index)) def remove_plug(self, operator_instance: 'OperatorInstance', plug: 'Plug') -> None: super(Subtract, self).remove_plug(operator_instance=operator_instance, plug=plug) for index, plug in enumerate(operator_instance.inputs): plug.name = '[{0}]'.format(index) plug.display_name = '[{0}]'.format(index) for plug in operator_instance.outputs: plug.invalidate() def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): pass def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): pass class Remap(Operator): def __init__(self): super(Remap, self).__init__( id='2405c02a-facc-47a6-80ef-d35d959b0cd4', name='Remap', required_inputs=5, min_inputs=5, max_inputs=5, num_outputs=1 ) def _compute_outputs(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): result = 0.0 old_value = input_plugs[0].computed_value try: test = iter(old_value) is_iterable = True except TypeError: is_iterable = False if not is_iterable: try: old_min = input_plugs[1].computed_value old_max = input_plugs[2].computed_value new_min = input_plugs[3].computed_value new_max = input_plugs[4].computed_value result = ((old_value - old_min) / (old_max - old_min)) * (new_max - new_min) + new_min except: pass else: result = [] for o in old_value: try: old_min = input_plugs[1].computed_value old_max = input_plugs[2].computed_value new_min = input_plugs[3].computed_value new_max = input_plugs[4].computed_value result.append(((o - old_min) / (old_max - old_min)) * (new_max - new_min) + new_min) except: pass output_plugs[0].computed_value = result def generate_input(self, parent: DagNode, index: int) -> Plug: if index == 0: plug = Plug.Create(parent=parent, name='value', display_name='Value', value_type=Plug.VALUE_TYPE_ANY) plug.default_value = 0 plug.computed_value = 0 return plug if index == 1: plug = Plug.Create(parent=parent, name='old_min', display_name='Old Min', value_type=Plug.VALUE_TYPE_FLOAT) plug.is_editable = True plug.default_value = 0 plug.computed_value = 0 return plug if index == 2: plug = Plug.Create(parent=parent, name='old_max', display_name='Old Max', value_type=Plug.VALUE_TYPE_FLOAT) plug.is_editable = True plug.default_value = 1 plug.computed_value = 1 return plug if index == 3: plug = Plug.Create(parent=parent, name='new_min', display_name='New Min', value_type=Plug.VALUE_TYPE_FLOAT) plug.is_editable = True plug.default_value = 0 plug.computed_value = 0 return plug if index == 4: plug = Plug.Create(parent=parent, name='new_max', display_name='New Max', value_type=Plug.VALUE_TYPE_FLOAT) plug.is_editable = True plug.default_value = 10 plug.computed_value = 10 return plug raise Exception('Input index "{0}" not supported.'.format(index)) def generate_output(self, parent: DagNode, index: int) -> Plug: if index == 0: return Plug.Create(parent=parent, name='remapped_value', display_name='Remapped Value', value_type=Plug.VALUE_TYPE_FLOAT) raise Exception('Output index "{0}" not supported.'.format(index)) def _prepare_plugs_for_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): input_plugs[0].computed_value = 0.5 input_plugs[1].computed_value = 0 input_plugs[2].computed_value = 1 input_plugs[3].computed_value = 1 input_plugs[4].computed_value = 0 def _assert_test(self, input_plugs: typing.List[Plug], output_plugs: typing.List[Plug]): if not output_plugs[0].computed_value == 0.5: raise Exception('Test failed.')
77,143
Python
37.765829
150
0.570421
NVIDIA-Omniverse/blender_omniverse_addons/omni/universalmaterialmap/core/generator/util.py
# ##### BEGIN GPL LICENSE BLOCK ##### # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ##### END GPL LICENSE BLOCK ##### # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. import sys import typing from ..data import Library, Target from .core import IGenerator __generators: typing.List['IGenerator'] = [] def register(generator: IGenerator) -> typing.NoReturn: """ Registers the generator at the top of the internal list - overriding previously registered generators - for future queries and processes. """ generators = getattr(sys.modules[__name__], '__generators') if generator not in generators: generators.insert(0, generator) def un_register(generator: IGenerator) -> typing.NoReturn: """ Removes the generator from internal list of generators and will ignore it for future queries and processes. """ generators = getattr(sys.modules[__name__], '__generators') if generator in generators: generators.remove(generator) def can_generate_target(class_name: str) -> bool: """ """ generators = getattr(sys.modules[__name__], '__generators') for generator in generators: if generator.can_generate_target(class_name=class_name): return True return False def generate_target(class_name: str) -> typing.Tuple[Library, Target]: """ """ generators = getattr(sys.modules[__name__], '__generators') for generator in generators: if generator.can_generate_target(class_name=class_name): print('UMM using generator "{0}" for class_name "{1}".'.format(generator, class_name)) return generator.generate_target(class_name=class_name) raise Exception('Registered generators does not support action.') def generate_targets() -> typing.List[typing.Tuple[Library, Target]]: """ Generates targets from all registered workers that are able to. """ targets = [] generators = getattr(sys.modules[__name__], '__generators') for generator in generators: if generator.can_generate_targets(): print('UMM using generator "{0}" for generating targets.'.format(generator)) targets.extend(generator.generate_targets()) return targets def can_generate_target_from_instance(instance: object) -> bool: """ """ generators = getattr(sys.modules[__name__], '__generators') for generator in generators: if generator.can_generate_target_from_instance(instance=instance): return True return False def generate_target_from_instance(instance: object) -> typing.List[typing.Tuple[Library, Target]]: """ Generates targets from all registered workers that are able to. """ generators = getattr(sys.modules[__name__], '__generators') for generator in generators: if generator.can_generate_target_from_instance(instance=instance): print('UMM using generator "{0}" for instance "{1}".'.format(generator, instance)) return generator.generate_target_from_instance(instance=instance)
3,695
Python
40.066666
149
0.696076
NVIDIA-Omniverse/blender_omniverse_addons/omni/universalmaterialmap/core/converter/util.py
# ##### BEGIN GPL LICENSE BLOCK ##### # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ##### END GPL LICENSE BLOCK ##### # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. """ Convert Queries & Actions ######################### DCC Connectors and other conversion solutions will want to use this module. There are three different conversion strategies available: 1. Source *class* and *data*. The framework finds a suitable conversion template and returns data indicating a *target class* and data for setting its attributes. For example: .. code:: from omni.universalmaterialmap.core.converter import util if util.can_convert_data_to_data( class_name='lambert', render_context='MDL', source_data=[ ('color', 'color_texture.png'), ('normalCamera', 'normal_texture.png') ]): data = util.convert_data_to_data( class_name='lambert', render_context='MDL', source_data=[ ('color', 'color_texture.png'), ('normalCamera', 'normal_texture.png') ] ) ...could return: .. code:: [ ('umm_target_class', 'omnipbr'), ('diffuse_texture', 'color_texture.png'), ('normalmap_texture', 'normal_texture.png'), ] Note that the first value pair :code:`('umm_target_class', 'omnipbr')` indicates the object class that should be used for conversion. All other value pairs indicate attribute names and attribute values. Using this strategy puts very little responsibility on the conversion workers to understand assets. They merely have to apply the arguments to a conversion template, compute the internal graph, and spit out the results. It also means that the solution invoking the converter will have to gather the necessary arguments from some object or data source. 2. Source *instance* into conversion data. Here we use an object instance in order to get the same data as in strategy #1 above. For example: .. code:: from omni.universalmaterialmap.core.converter import util if util.can_convert_instance( instance=MyLambertPyNode, render_context='MDL'): data = util.convert_instance_to_data( instance=MyLambertPyNode, render_context='MDL' ) ...could return: .. code:: [ ('umm_target_class', 'omnipbr'), ('diffuse_texture', 'color_texture.png'), ('normalmap_texture', 'normal_texture.png'), ] Note that the first value pair :code:`('umm_target_class', 'omnipbr')` indicates the object class that should be used for conversion. All other value pairs indicate attribute names and attribute values. The advantage here is that the user of the framework can rely on a converter's understanding of objects and attributes. The downside is that there has to be an actual asset or dependency graph loaded. 3. Source *instance* into converted object. In this approach the converter will create a new object and set its properties/attributes based on a conversion template. For example: .. code:: from omni.universalmaterialmap.core.converter import util if util.can_convert_instance( instance=MyLambertPyNode, render_context='MDL'): node = util.convert_instance_to_instance( instance=MyLambertPyNode, render_context='MDL' ) ...could create and return an MDL material in the current Maya scene. Manifest Query ############## Module has methods for querying its conversion capabilities as indicated by library manifests. This could be useful when wanting to expose commands for converting assets within a DCC application scene. Note that this API does not require any data or object instance argument. It's a more *general* query. .. code:: from omni.universalmaterialmap.core.converter import util manifest = util.get_conversion_manifest() # Returns data indicating what source class can be converted to a render context. # # Example: # [ # ('lambert', 'MDL'), # ('blinn', 'MDL'), # ] if (my_class_name, 'MDL') in manifest: # Do something """ import sys import typing import traceback from .. import data from .core import ICoreConverter, IDataConverter, IObjectConverter _debug_mode = False __converters: typing.List['ICoreConverter'] = [] TARGET_CLASS_IDENTIFIER = 'umm_target_class' def register(converter: ICoreConverter) -> typing.NoReturn: """ Registers the converter at the top of the internal list - overriding previously registered converters - for future queries and processes. """ converters = getattr(sys.modules[__name__], '__converters') if converter not in converters: if _debug_mode: print('UMM: core.converter.util: Registering converter: "{0}"'.format(converter)) converters.insert(0, converter) elif _debug_mode: print('UMM: core.converter.util: Not registering converter because it is already registered: "{0}"'.format(converter)) def un_register(converter: ICoreConverter) -> typing.NoReturn: """ Removes the converter from internal list of converters and will ignore it for future queries and processes. """ converters = getattr(sys.modules[__name__], '__converters') if converter in converters: if _debug_mode: print('UMM: core.converter.util: un-registering converter: "{0}"'.format(converter)) converters.remove(converter) elif _debug_mode: print('UMM: core.converter.util: Not un-registering converter because it not registered to begin with: "{0}"'.format(converter)) def can_create_instance(class_name: str) -> bool: """ Resolves if a converter can create a node. """ converters = getattr(sys.modules[__name__], '__converters') for converter in converters: if isinstance(converter, IObjectConverter): if converter.can_create_instance(class_name=class_name): if _debug_mode: print('UMM: core.converter.util: converter can create instance: "{0}"'.format(converter)) return True if _debug_mode: print('UMM: core.converter.util: no converter can create instance.') return False def create_instance(class_name: str) -> object: """ Creates an asset using the first converter in the internal list that supports the class_name. """ converters = getattr(sys.modules[__name__], '__converters') for converter in converters: if isinstance(converter, IObjectConverter): if converter.can_create_instance(class_name=class_name): if _debug_mode: print('UMM: core.converter.util: converter creating instance: "{0}"'.format(converter)) return converter.create_instance(class_name=class_name) raise Exception('Registered converters does not support class "{0}".'.format(class_name)) def can_set_plug_value(instance: object, plug: data.Plug) -> bool: """ Resolves if a converter can set the plug's value given the instance and its attributes. """ converters = getattr(sys.modules[__name__], '__converters') for converter in converters: if isinstance(converter, IObjectConverter): if _debug_mode: print('UMM: core.converter.util: converter can set plug value: "{0}"'.format(converter)) if converter.can_set_plug_value(instance=instance, plug=plug): return True if _debug_mode: print('UMM: core.converter.util: converter cannot set plug value given instance "{0}" and plug "{1}"'.format(instance, plug)) return False def set_plug_value(instance: object, plug: data.Plug) -> typing.NoReturn: """ Sets the plug's value given the value of the instance's attribute named the same as the plug. """ converters = getattr(sys.modules[__name__], '__converters') for converter in converters: if isinstance(converter, IObjectConverter): if converter.can_set_plug_value(instance=instance, plug=plug): if _debug_mode: print('UMM: core.converter.util: converter setting plug value: "{0}"'.format(converter)) return converter.set_plug_value(instance=instance, plug=plug) raise Exception('Registered converters does not support action.') def can_set_instance_attribute(instance: object, name: str) -> bool: """ Resolves if a converter can set an attribute by the given name on the instance. """ converters = getattr(sys.modules[__name__], '__converters') for converter in converters: if isinstance(converter, IObjectConverter): if _debug_mode: print('UMM: core.converter.util: converter can set instance attribute: "{0}", "{1}", "{2}"'.format(converter, instance, name)) if converter.can_set_instance_attribute(instance=instance, name=name): return True if _debug_mode: print('UMM: core.converter.util: cannot set instance attribute: "{0}", "{1}"'.format(instance, name)) return False def set_instance_attribute(instance: object, name: str, value: typing.Any) -> typing.NoReturn: """ Sets the named attribute on the instance to the value. """ converters = getattr(sys.modules[__name__], '__converters') for converter in converters: if isinstance(converter, IObjectConverter): if converter.can_set_instance_attribute(instance=instance, name=name): if _debug_mode: print('UMM: core.converter.util: converter setting instance attribute: "{0}", "{1}", "{2}", "{3}"'.format(converter, instance, name, value)) return converter.set_instance_attribute(instance=instance, name=name, value=value) raise Exception('Registered converters does not support action.') def can_convert_instance(instance: object, render_context: str) -> bool: """ Resolves if a converter can convert the instance to another object given the render_context. """ converters = getattr(sys.modules[__name__], '__converters') for converter in converters: if isinstance(converter, IObjectConverter): if _debug_mode: print('UMM: core.converter.util: converter can convert instance: "{0}", "{1}", "{2}"'.format(converter, instance, render_context)) if converter.can_convert_instance(instance=instance, render_context=render_context): return True return False def convert_instance_to_instance(instance: object, render_context: str) -> typing.Any: """ Interprets the instance and instantiates another object given the render_context. """ converters = getattr(sys.modules[__name__], '__converters') for converter in converters: if isinstance(converter, IObjectConverter): if converter.can_convert_instance(instance=instance, render_context=render_context): if _debug_mode: print('UMM: core.converter.util: converter converting instance: "{0}", "{1}", "{2}"'.format(converter, instance, render_context)) return converter.convert_instance_to_instance(instance=instance, render_context=render_context) raise Exception('Registered converters does not support action.') def can_convert_instance_to_data(instance: object, render_context: str) -> bool: """ Resolves if a converter can convert the instance to another object given the render_context. """ try: converters = getattr(sys.modules[__name__], '__converters') for converter in converters: if isinstance(converter, IObjectConverter): if converter.can_convert_instance_to_data(instance=instance, render_context=render_context): return True except Exception as error: print('Warning: Universal Material Map: function "can_convert_instance_to_data": Unexpected error:') print('\targument "instance" = "{0}"'.format(instance)) print('\targument "render_context" = "{0}"'.format(render_context)) print('\terror: {0}'.format(error)) print('\tcallstack: {0}'.format(traceback.format_exc())) return False def convert_instance_to_data(instance: object, render_context: str) -> typing.List[typing.Tuple[str, typing.Any]]: """ Returns a list of key value pairs in tuples. The first pair is ("umm_target_class", "the_class_name") indicating the conversion target class. """ try: converters = getattr(sys.modules[__name__], '__converters') for converter in converters: if isinstance(converter, IObjectConverter): if converter.can_convert_instance_to_data(instance=instance, render_context=render_context): result = converter.convert_instance_to_data(instance=instance, render_context=render_context) print('Universal Material Map: convert_instance_to_data({0}, "{1}") generated data:'.format(instance, render_context)) print('\t(') for o in result: print('\t\t{0}'.format(o)) print('\t)') return result except Exception as error: print('Warning: Universal Material Map: function "convert_instance_to_data": Unexpected error:') print('\targument "instance" = "{0}"'.format(instance)) print('\targument "render_context" = "{0}"'.format(render_context)) print('\terror: {0}'.format(error)) print('\tcallstack: {0}'.format(traceback.format_exc())) result = dict() result['umm_notification'] = 'unexpected_error' result['message'] = 'Not able to convert "{0}" for render context "{1}" because there was an unexpected error. Details: {2}'.format(instance, render_context, error) return result raise Exception('Registered converters does not support action.') def can_convert_attribute_values(instance: object, render_context: str, destination: object) -> bool: """ Resolves if the instance's attribute values can be converted and set on the destination object's attributes. """ converters = getattr(sys.modules[__name__], '__converters') for converter in converters: if isinstance(converter, IObjectConverter): if converter.can_convert_attribute_values(instance=instance, render_context=render_context, destination=destination): return True return False def convert_attribute_values(instance: object, render_context: str, destination: object) -> typing.NoReturn: """ Attribute values are converted and set on the destination object's attributes. """ converters = getattr(sys.modules[__name__], '__converters') for converter in converters: if isinstance(converter, IObjectConverter): if converter.can_convert_attribute_values(instance=instance, render_context=render_context, destination=destination): return converter.convert_attribute_values(instance=instance, render_context=render_context, destination=destination) raise Exception('Registered converters does not support action.') def can_convert_data_to_data(class_name: str, render_context: str, source_data: typing.List[typing.Tuple[str, typing.Any]]) -> bool: """ Resolves if a converter can convert the given class and source_data to another class and target data. """ converters = getattr(sys.modules[__name__], '__converters') for converter in converters: if isinstance(converter, IDataConverter): if converter.can_convert_data_to_data(class_name=class_name, render_context=render_context, source_data=source_data): return True return False def convert_data_to_data(class_name: str, render_context: str, source_data: typing.List[typing.Tuple[str, typing.Any]]) -> typing.List[typing.Tuple[str, typing.Any]]: """ Returns a list of key value pairs in tuples. The first pair is ("umm_target_class", "the_class_name") indicating the conversion target class. """ converters = getattr(sys.modules[__name__], '__converters') for converter in converters: if isinstance(converter, IDataConverter): if converter.can_convert_data_to_data(class_name=class_name, render_context=render_context, source_data=source_data): result = converter.convert_data_to_data(class_name=class_name, render_context=render_context, source_data=source_data) print('Universal Material Map: convert_data_to_data("{0}", "{1}") generated data:'.format(class_name, render_context)) print('\t(') for o in result: print('\t\t{0}'.format(o)) print('\t)') return result raise Exception('Registered converters does not support action.') def can_apply_data_to_instance(source_class_name: str, render_context: str, source_data: typing.List[typing.Tuple[str, typing.Any]], instance: object) -> bool: """ Resolves if a converter can create one or more instances given the arguments. """ converters = getattr(sys.modules[__name__], '__converters') for converter in converters: if isinstance(converter, IObjectConverter): if converter.can_apply_data_to_instance(source_class_name=source_class_name, render_context=render_context, source_data=source_data, instance=instance): return True return False def apply_data_to_instance(source_class_name: str, render_context: str, source_data: typing.List[typing.Tuple[str, typing.Any]], instance: object) -> dict: """ Returns a list of created objects. """ try: converters = getattr(sys.modules[__name__], '__converters') for converter in converters: if isinstance(converter, IObjectConverter): if converter.can_apply_data_to_instance(source_class_name=source_class_name, render_context=render_context, source_data=source_data, instance=instance): converter.apply_data_to_instance(source_class_name=source_class_name, render_context=render_context, source_data=source_data, instance=instance) print('Universal Material Map: apply_data_to_instance("{0}", "{1}") completed.'.format(instance, render_context)) result = dict() result['umm_notification'] = 'success' result['message'] = 'Material conversion data applied to "{0}".'.format(instance) return result result = dict() result['umm_notification'] = 'incomplete_process' result['message'] = 'Not able to convert type "{0}" for render context "{1}" because there is no Conversion Graph for that scenario. No changes were applied to "{2}".'.format(source_class_name, render_context, instance) return result except Exception as error: print('UMM: Unexpected error: {0}'.format(traceback.format_exc())) result = dict() result['umm_notification'] = 'unexpected_error' result['message'] = 'Not able to convert type "{0}" for render context "{1}" because there was an unexpected error. Some changes may have been applied to "{2}". Details: {3}'.format(source_class_name, render_context, instance, error) return result def get_conversion_manifest() -> typing.List[typing.Tuple[str, str]]: """ Returns data indicating what source class can be converted to a render context. Example: [('lambert', 'MDL'), ('blinn', 'MDL'),] """ manifest: typing.List[typing.Tuple[str, str]] = [] converters = getattr(sys.modules[__name__], '__converters') for converter in converters: manifest.extend(converter.get_conversion_manifest()) return manifest
20,886
Python
47.687646
241
0.655559
NVIDIA-Omniverse/blender_omniverse_addons/omni/universalmaterialmap/core/converter/core.py
# ##### BEGIN GPL LICENSE BLOCK ##### # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ##### END GPL LICENSE BLOCK ##### # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. from abc import ABCMeta, abstractmethod import typing from ..data import Plug class ICoreConverter(metaclass=ABCMeta): """ """ @abstractmethod def __init__(self): super(ICoreConverter, self).__init__() @abstractmethod def get_conversion_manifest(self) -> typing.List[typing.Tuple[str, str]]: """ Returns data indicating what source class can be converted to a render context. Example: [('lambert', 'MDL'), ('blinn', 'MDL'),] """ raise NotImplementedError() class IObjectConverter(ICoreConverter): """ """ @abstractmethod def can_create_instance(self, class_name: str) -> bool: """ Returns true if worker can generate an object of the given class name. """ raise NotImplementedError() @abstractmethod def create_instance(self, class_name: str) -> object: """ Creates an object of the given class name. """ raise NotImplementedError() @abstractmethod def can_set_plug_value(self, instance: object, plug: Plug) -> bool: """ Returns true if worker can set the plug's value given the instance and its attributes. """ raise NotImplementedError() @abstractmethod def set_plug_value(self, instance: object, plug: Plug) -> typing.NoReturn: """ Sets the plug's value given the value of the instance's attribute named the same as the plug. """ raise NotImplementedError() @abstractmethod def can_set_instance_attribute(self, instance: object, name: str): """ Resolves if worker can set an attribute by the given name on the instance. """ return False @abstractmethod def set_instance_attribute(self, instance: object, name: str, value: typing.Any) -> typing.NoReturn: """ Sets the named attribute on the instance to the value. """ raise NotImplementedError() @abstractmethod def can_convert_instance(self, instance: object, render_context: str) -> bool: """ Resolves if worker can convert the instance to another object given the render_context. """ return False @abstractmethod def convert_instance_to_instance(self, instance: object, render_context: str) -> typing.Any: """ Converts the instance to another object given the render_context. """ raise NotImplementedError() @abstractmethod def can_convert_instance_to_data(self, instance: object, render_context: str) -> bool: """ Resolves if worker can convert the instance to another object given the render_context. """ return False @abstractmethod def convert_instance_to_data(self, instance: object, render_context: str) -> typing.List[typing.Tuple[str, typing.Any]]: """ Returns a list of key value pairs in tuples. The first pair is ("umm_target_class", "the_class_name") indicating the conversion target class. """ raise NotImplementedError() @abstractmethod def can_convert_attribute_values(self, instance: object, render_context: str, destination: object) -> bool: """ Resolves if the instance's attribute values can be converted and set on the destination object's attributes. """ raise NotImplementedError() @abstractmethod def convert_attribute_values(self, instance: object, render_context: str, destination: object) -> typing.NoReturn: """ Attribute values are converted and set on the destination object's attributes. """ raise NotImplementedError() @abstractmethod def can_apply_data_to_instance(self, source_class_name: str, render_context: str, source_data: typing.List[typing.Tuple[str, typing.Any]], instance: object) -> bool: """ Resolves if worker can convert the instance to another object given the render_context. """ return False @abstractmethod def apply_data_to_instance(self, source_class_name: str, render_context: str, source_data: typing.List[typing.Tuple[str, typing.Any]], instance: object) -> dict: """ Returns a notification object Examples: { 'umm_notification': "success", 'message': "Material \"Material_A\" was successfully converted from \"OmniPBR\" data." } { 'umm_notification': "incomplete_process", 'message': "Not able to convert \"Material_B\" using \"CustomMDL\" since there is no Conversion Graph supporting that scenario." } { 'umm_notification': "unexpected_error", 'message': "Not able to convert \"Material_C\" using \"OmniGlass\" due to an unexpected error. Details: \"cannot set property to None\"." } """ raise NotImplementedError() class IDataConverter(ICoreConverter): """ """ @abstractmethod def can_convert_data_to_data(self, class_name: str, render_context: str, source_data: typing.List[typing.Tuple[str, typing.Any]]) -> bool: """ Resolves if worker can convert the given class and source_data to another class and target data. """ return False @abstractmethod def convert_data_to_data(self, class_name: str, render_context: str, source_data: typing.List[typing.Tuple[str, typing.Any]]) -> typing.List[typing.Tuple[str, typing.Any]]: """ Returns a list of key value pairs in tuples. The first pair is ("umm_target_class", "the_class_name") indicating the conversion target class. """ raise NotImplementedError()
6,404
Python
40.590909
176
0.665209
NVIDIA-Omniverse/blender_omniverse_addons/omni/universalmaterialmap/core/service/store.py
# ##### BEGIN GPL LICENSE BLOCK ##### # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ##### END GPL LICENSE BLOCK ##### # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. import typing import os import uuid import traceback from .. import data from .. import operator from ..feature import POLLING from ..singleton import Singleton from .core import ChangeEvent, IDelegate from .delegate import Filesystem, FilesystemManifest, FilesystemSettings from .resources import install COMMON_LIBRARY_ID = '327ef29b-8358-441b-b2f0-4a16a9afd349' libraries_directory = os.path.expanduser('~').replace('\\', '/') if not libraries_directory.endswith('/Documents'): # os.path.expanduser() has different behaviour between 2.7 and 3 libraries_directory = '{0}/Documents'.format(libraries_directory) libraries_directory = '{0}/Omniverse'.format(libraries_directory) common_library_directory = '{0}/ConnectorCommon/UMMLibrary'.format(libraries_directory) cache_directory = '{0}/Cache'.format(common_library_directory) COMMON_LIBRARY = data.Library.Create( library_id=COMMON_LIBRARY_ID, name='Common', manifest=FilesystemManifest(root_directory='{0}'.format(common_library_directory)), conversion_graph=Filesystem(root_directory='{0}/ConversionGraph'.format(common_library_directory)), target=Filesystem(root_directory='{0}/Target'.format(common_library_directory)), settings=FilesystemSettings(root_directory='{0}'.format(common_library_directory)), ) DEFAULT_LIBRARIES = [COMMON_LIBRARY] class _ItemProvider(object): """ Class provides IO interface for a single UMM Library item. """ def __init__(self, identifier: str, library_delegate: IDelegate = None, cache_delegate: IDelegate = None): super(_ItemProvider, self).__init__() self._library_delegate: typing.Union[IDelegate, typing.NoReturn] = library_delegate self._cache_delegate: typing.Union[IDelegate, typing.NoReturn] = cache_delegate self._identifier: str = identifier self._file_util: typing.Union[data.FileUtility, typing.NoReturn] = None self._content_cache: dict = dict() def revert(self) -> None: if self._file_util: self._file_util.content.deserialize(data=self._content_cache) def has_unsaved_changes(self) -> bool: if not self._file_util: return False return not self._file_util.content.serialize() == self._content_cache def read(self, update: bool = False) -> None: """ TODO: Check if path has changed since last read from disk. """ if not self._library_delegate and not self._cache_delegate: raise Exception('Not supported: No delegate available to read().') # update_cache() assumes that read() prioritizes reading with library delegate! delegate = self._library_delegate if self._library_delegate else self._cache_delegate if not self._file_util: contents = delegate.read(identifier=self._identifier) if contents is not None: self._file_util = data.FileUtility.FromData(data=contents) self._update_content_cache() elif update: contents = delegate.read(identifier=self._identifier) self._file_util.content.deserialize(data=contents) def create(self, instance: data.Serializable) -> None: self._file_util = data.FileUtility.FromInstance(instance=instance) self.write() def write(self, content: data.Serializable = None) -> None: if not self._library_delegate and not self._cache_delegate: raise Exception('Not supported: No delegate available to write().') if content: if not self._file_util: self._file_util = data.FileUtility.FromInstance(instance=content) else: self._file_util._content = content elif not self._file_util: raise Exception('Not supported: _ItemProvider not initialized properly prior to "write()"') contents = self._file_util.serialize() if self._library_delegate: self._library_delegate.write(identifier=self._identifier, contents=contents) if self._cache_delegate: self._cache_delegate.write(identifier=self._identifier, contents=contents) self._update_content_cache() def delete(self) -> None: if not self._library_delegate and not self._cache_delegate: raise Exception('Not supported: No delegate available to delete().') if self._library_delegate: self._library_delegate.delete(identifier=self._identifier) if self._cache_delegate: self._cache_delegate.delete(identifier=self._identifier) self._file_util = None self._content_cache = None def _update_content_cache(self) -> None: if not self._file_util: self._content_cache = dict() else: self._content_cache = self._file_util.content.serialize() def update_cache(self) -> bool: if not self._library_delegate or not self._cache_delegate: return False # Assumes that read() prioritizes reading with library delegate! try: self.read() except Exception as error: print('Warning: Universal Material Map error reading data with identifier "{0}". Cache will not be updated due to the read error.\n\tDetails: "{1}".\n\tCallstack: {2}'.format(self._identifier, error, traceback.format_exc())) return False self._cache_delegate.write(identifier=self._identifier, contents=self._file_util.serialize()) def on_shutdown(self): self._cache_delegate = None self._library_delegate = None self._identifier = None self._file_util = None self._content_cache = None @property def content(self) -> data.Serializable: return self._file_util.content class _LibraryProvider(object): """ Class provides IO interface for a single UMM Library. """ @staticmethod def _transfer_data(source: IDelegate, target: IDelegate) -> bool: """ Returns True if transfer was made. """ if not source or not target: return False for identifier in source.get_ids(): target.write(identifier=identifier, contents=source.read(identifier=identifier)) return True def __init__(self, library: data.Library): super(_LibraryProvider, self).__init__() self._library: data.Library = library if POLLING: self._manifest_subscription: uuid.uuid4 = None self._conversion_graph_subscription: uuid.uuid4 = None self._target_subscription: uuid.uuid4 = None self._manifest_cache: typing.Union[IDelegate, typing.NoReturn] = None self._conversion_graph_cache: typing.Union[IDelegate, typing.NoReturn] = None self._target_cache: typing.Union[IDelegate, typing.NoReturn] = None self._settings_cache: typing.Union[IDelegate, typing.NoReturn] = None self._manifest_providers: typing.Dict[str, _ItemProvider] = dict() self._conversion_graph_providers: typing.Dict[str, _ItemProvider] = dict() self._target_providers: typing.Dict[str, _ItemProvider] = dict() self._settings_providers: typing.Dict[str, _ItemProvider] = dict() self._initialize() def _initialize(self) -> None: cache: _ItemProvider for cache in self._manifest_providers.values(): cache.on_shutdown() for cache in self._conversion_graph_providers.values(): cache.on_shutdown() for cache in self._target_providers.values(): cache.on_shutdown() for cache in self._settings_providers.values(): cache.on_shutdown() self._manifest_providers = dict() self._conversion_graph_providers = dict() self._target_providers = dict() self._settings_providers = dict() if not self._library: return if not self._library.id == COMMON_LIBRARY_ID: self._manifest_cache = FilesystemManifest( root_directory='{0}/{1}'.format(cache_directory, self._library.id) ) self._conversion_graph_cache = Filesystem( root_directory='{0}/{1}/ConversionGraph'.format(cache_directory, self._library.id) ) self._target_cache = Filesystem( root_directory='{0}/{1}/Target'.format(cache_directory, self._library.id) ) self._settings_cache = FilesystemSettings( root_directory='{0}/{1}'.format(cache_directory, self._library.id) ) if not self._library.id == COMMON_LIBRARY_ID and not self._library.is_read_only: self._update_cache() def _update_cache(self) -> None: if self._library.is_read_only: return self._update_cache_table( source=self._library.manifest, target=self._manifest_cache, providers=self._manifest_providers, ) self._update_cache_table( source=self._library.conversion_graph, target=self._conversion_graph_cache, providers=self._conversion_graph_providers, ) self._update_cache_table( source=self._library.target, target=self._target_cache, providers=self._target_providers, ) self._update_cache_table( source=self._library.settings, target=self._settings_cache, providers=self._settings_providers, ) def _update_cache_table(self, source: IDelegate, target: IDelegate, providers: dict) -> None: if self._library.is_read_only: return if not source or not target: return for identifier in source.get_ids(): if identifier not in providers.keys(): provider = _ItemProvider( identifier=identifier, library_delegate=source, cache_delegate=target ) providers[identifier] = provider else: provider = providers[identifier] provider.update_cache() def get_settings(self) -> typing.List[data.Settings]: if not self._library.settings: return [] settings: typing.List[data.Settings] = [] for identifier in self._library.settings.get_ids(): if identifier not in self._settings_providers.keys(): cache = _ItemProvider( identifier=identifier, library_delegate=self._library.settings, cache_delegate=self._settings_cache ) self._settings_providers[identifier] = cache else: cache = self._settings_providers[identifier] cache.read() setting = typing.cast(data.Settings, cache.content) settings.append(setting) return settings def get_manifests(self) -> typing.List[data.ConversionManifest]: delegate = self._library.manifest if self._library.manifest else self._manifest_cache if not delegate: return [] manifests: typing.List[data.ConversionManifest] = [] conversion_graphs: typing.List[data.ConversionGraph] = None for identifier in delegate.get_ids(): if identifier not in self._manifest_providers.keys(): cache = _ItemProvider( identifier=identifier, library_delegate=self._library.manifest, cache_delegate=self._manifest_cache ) self._manifest_providers[identifier] = cache else: cache = self._manifest_providers[identifier] cache.read() manifest = typing.cast(data.ConversionManifest, cache.content) if not conversion_graphs: conversion_graphs = self.get_conversion_graphs() for item in manifest.conversion_maps: if not item._conversion_graph: for conversion_graph in conversion_graphs: if conversion_graph.id == item.conversion_graph_id: item._conversion_graph = conversion_graph break manifests.append(manifest) if POLLING: if self._library.manifest and not self._manifest_subscription: self._manifest_subscription = self._library.manifest.add_change_subscription(callback=self._on_store_manifest_changes) return manifests def get_conversion_graphs(self) -> typing.List[data.ConversionGraph]: delegate = self._library.conversion_graph if self._library.conversion_graph else self._conversion_graph_cache if not delegate: return [] conversion_graphs: typing.List[data.ConversionGraph] = [] for identifier in delegate.get_ids(): if identifier not in self._conversion_graph_providers.keys(): cache = _ItemProvider( identifier=identifier, library_delegate=self._library.conversion_graph, cache_delegate=self._conversion_graph_cache ) try: cache.read() except Exception as error: print('Warning: Universal Material Map error reading Conversion Graph data with identifier "{0}". Graph will not be available for use inside UMM.\n\tDetails: "{1}".\n\tCallstack: {2}'.format(identifier, error, traceback.format_exc())) continue self._conversion_graph_providers[identifier] = cache else: cache = self._conversion_graph_providers[identifier] try: cache.read() except Exception as error: print('Warning: Universal Material Map error reading Conversion Graph data with identifier "{0}". Graph will not be available for use inside UMM.\n\tDetails: "{1}".\n\tCallstack: {2}'.format(identifier, error, traceback.format_exc())) continue conversion_graph = typing.cast(data.ConversionGraph, cache.content) conversion_graph._library = self._library conversion_graph.filename = identifier conversion_graph._exists_on_disk = True conversion_graphs.append(conversion_graph) if POLLING: if self._library.conversion_graph and not self._conversion_graph_subscription: self._conversion_graph_subscription = self._library.conversion_graph.add_change_subscription(callback=self._on_store_conversion_graph_changes) return conversion_graphs def get_targets(self) -> typing.List[data.Target]: delegate = self._library.target if self._library.target else self._target_cache if not delegate: return [] targets: typing.List[data.Target] = [] for identifier in delegate.get_ids(): if identifier not in self._target_providers.keys(): cache = _ItemProvider( identifier=identifier, library_delegate=self._library.target, cache_delegate=self._target_cache ) self._target_providers[identifier] = cache else: cache = self._target_providers[identifier] cache.read() target = typing.cast(data.Target, cache.content) target.store_id = identifier targets.append(target) if POLLING: if self._library.target and not self._target_subscription: self._target_subscription = self._library.target.add_change_subscription(callback=self._on_store_target_changes) return targets def _on_store_manifest_changes(self, event: ChangeEvent) -> None: if not POLLING: raise NotImplementedError() print('_on_store_manifest_changes', event) def _on_store_conversion_graph_changes(self, event: ChangeEvent) -> None: if not POLLING: raise NotImplementedError() print('_on_store_conversion_graph_changes', event) def _on_store_target_changes(self, event: ChangeEvent) -> None: if not POLLING: raise NotImplementedError() print('_on_store_target_changes...', event, self) def revert(self, item: data.Serializable) -> bool: """ Returns True if the item existed in a data store and was successfully reverted. """ if isinstance(item, data.ConversionGraph): if item.filename not in self._conversion_graph_providers.keys(): return False filename = item.filename library = item.library cache = self._conversion_graph_providers[item.filename] cache.revert() item.filename = filename item._library = library item._exists_on_disk = True return True if isinstance(item, data.Target): if item.store_id not in self._target_providers.keys(): return False cache = self._target_providers[item.store_id] cache.revert() return True if isinstance(item, data.ConversionManifest): if item.store_id not in self._manifest_providers.keys(): return False cache = self._manifest_providers[item.store_id] cache.revert() return True if isinstance(item, data.Settings): if item.store_id not in self._settings_providers.keys(): return False cache = self._settings_providers[item.store_id] cache.revert() return True def write(self, item: data.Serializable, identifier: str = None, overwrite: bool = False) -> None: if isinstance(item, data.Settings): if not item.store_id: raise Exception('Not supported: Settings must have a valid store id in order to write the item.') if not self._library.settings: raise Exception('Library "{0}" with id="{1}" does not support a Settings store.'.format(self._library.name, self._library.id)) if item.store_id not in self._settings_providers.keys(): cache = _ItemProvider( identifier=item.store_id, library_delegate=self._library.settings, cache_delegate=self._settings_cache ) self._settings_providers[item.store_id] = cache else: if not overwrite: return cache = self._settings_providers[item.store_id] cache.write(content=item) return if isinstance(item, data.ConversionManifest): if not item.store_id: raise Exception('Not supported: Conversion Manifest must have a valid store id in order to write the item.') if item.store_id not in self._manifest_providers.keys(): cache = _ItemProvider( identifier=item.store_id, library_delegate=self._library.manifest, cache_delegate=self._manifest_cache ) self._manifest_providers[item.store_id] = cache else: if not overwrite: return cache = self._manifest_providers[item.store_id] cache.write(content=item) return if isinstance(item, data.ConversionGraph): if not item.filename and not identifier: raise Exception('Not supported: Conversion Manifest must have a valid store id in order to write the item.') key = identifier if identifier else item.filename if key not in self._conversion_graph_providers.keys(): cache = _ItemProvider( identifier=key, library_delegate=self._library.conversion_graph, cache_delegate=self._conversion_graph_cache ) self._conversion_graph_providers[key] = cache else: if not overwrite: return cache = self._conversion_graph_providers[key] item.revision += 1 cache.write(content=item) if identifier: item.filename = identifier item._exists_on_disk = True item._library = self._library return if isinstance(item, data.Target): if not item.store_id: raise Exception( 'Not supported: Conversion Manifest must have a valid store id in order to write the item.') if item.store_id not in self._target_providers.keys(): cache = _ItemProvider( identifier=item.store_id, library_delegate=self._library.target, cache_delegate=self._target_cache ) self._target_providers[item.store_id] = cache else: if not overwrite: return cache = self._target_providers[item.store_id] cache.write(content=item) return raise NotImplementedError() def delete(self, item: data.Serializable) -> None: if isinstance(item, data.Settings): if not item.store_id: raise Exception('Not supported: Settings must have a valid store id in order to write the item.') if not self._library.settings: raise Exception('Library "{0}" with id="{1}" does not support a Settings store.'.format(self._library.name, self._library.id)) if item.store_id not in self._settings_providers.keys(): return cache = self._settings_providers[item.store_id] cache.delete() cache.on_shutdown() del self._settings_providers[item.store_id] return if isinstance(item, data.ConversionManifest): if not item.store_id: raise Exception('Not supported: Conversion Manifest must have a valid store id in order to write the item.') if item.store_id not in self._manifest_providers.keys(): return cache = self._manifest_providers[item.store_id] cache.delete() cache.on_shutdown() del self._manifest_providers[item.store_id] return if isinstance(item, data.ConversionGraph): if not item.filename: raise Exception('Not supported: Conversion Manifest must have a valid store id in order to write the item.') if item.filename not in self._conversion_graph_providers.keys(): return cache = self._conversion_graph_providers[item.filename] cache.delete() cache.on_shutdown() del self._conversion_graph_providers[item.filename] return if isinstance(item, data.Target): if not item.store_id: raise Exception( 'Not supported: Conversion Manifest must have a valid store id in order to write the item.') if item.store_id not in self._target_providers.keys(): return cache = self._target_providers[item.store_id] cache.write(content=item) cache.on_shutdown() del self._target_providers[item.store_id] return raise NotImplementedError() def can_show_in_store(self, item: data.Serializable) -> bool: if isinstance(item, data.ConversionGraph): delegate = self._library.conversion_graph if self._library.conversion_graph else self._conversion_graph_cache if not delegate: return False return delegate.can_show_in_store(identifier=item.filename) if isinstance(item, data.Target): delegate = self._library.target if self._library.target else self._target_cache if not delegate: return False return delegate.can_show_in_store(identifier=item.store_id) return False def show_in_store(self, item: data.Serializable) -> None: if isinstance(item, data.ConversionGraph): delegate = self._library.conversion_graph if self._library.conversion_graph else self._conversion_graph_cache if not delegate: return return delegate.show_in_store(identifier=item.filename) if isinstance(item, data.Target): delegate = self._library.target if self._library.target else self._target_cache if not delegate: return return delegate.show_in_store(identifier=item.store_id) @property def library(self) -> data.Library: return self._library @library.setter def library(self, value: data.Library) -> None: if self._library == value: return if POLLING: if self._library: if self._manifest_subscription and self._library.manifest: self._library.manifest.remove_change_subscription(subscription_id=self._manifest_subscription) if self._conversion_graph_subscription and self._library.conversion_graph: self._library.conversion_graph.remove_change_subscription(subscription_id=self._conversion_graph_subscription) if self._target_subscription and self._library.target: self._library.target.remove_change_subscription(subscription_id=self._target_subscription) self._library = value self._initialize() @Singleton class __Manager: def __init__(self): install() self._library_caches: typing.Dict[str, _LibraryProvider] = dict() self._operators: typing.List[data.Operator] = [ operator.And(), operator.Add(), operator.BooleanSwitch(), operator.ColorSpaceResolver(), operator.ConstantBoolean(), operator.ConstantFloat(), operator.ConstantInteger(), operator.ConstantRGB(), operator.ConstantRGBA(), operator.ConstantString(), operator.Equal(), operator.GreaterThan(), operator.LessThan(), operator.ListGenerator(), operator.ListIndex(), operator.MayaTransparencyResolver(), operator.MergeRGB(), operator.MergeRGBA(), operator.MDLColorSpace(), operator.MDLTextureResolver(), operator.Multiply(), operator.Not(), operator.Or(), operator.Remap(), operator.SplitRGB(), operator.SplitRGBA(), operator.SplitTextureData(), operator.Subtract(), operator.ValueResolver(), operator.ValueTest(), ] for o in self._operators: if len([item for item in self._operators if item.id == o.id]) == 1: continue raise Exception('Operator id "{0}" is not unique.'.format(o.id)) provider = _LibraryProvider(library=COMMON_LIBRARY) self._library_caches[COMMON_LIBRARY_ID] = provider render_contexts = [ 'MDL', 'USDPreview', 'Blender', ] settings = provider.get_settings() if len(settings) == 0: self._settings: data.Settings = data.Settings() for render_context in render_contexts: self._settings.render_contexts.append(render_context) self._settings.render_contexts.append(render_context) self._save_settings() else: self._settings: data.Settings = settings[0] added_render_context = False for render_context in render_contexts: if render_context not in self._settings.render_contexts: self._settings.render_contexts.append(render_context) added_render_context = True if added_render_context: self._save_settings() for i in range(len(self._settings.libraries)): for library in DEFAULT_LIBRARIES: if self._settings.libraries[i].id == library.id: self._settings.libraries[i] = library break for library in DEFAULT_LIBRARIES: if len([o for o in self._settings.libraries if o.id == library.id]) == 0: self._settings.libraries.append(library) for library in self._settings.libraries: self.register_library(library=library) def _save_settings(self) -> None: if COMMON_LIBRARY_ID not in self._library_caches.keys(): raise Exception('Not supported: Common library not in cache. Unable to save settings.') cache = self._library_caches[COMMON_LIBRARY_ID] cache.write(item=self._settings, identifier=None, overwrite=True) def register_library(self, library: data.Library) -> None: preferences_changed = False to_remove = [] for item in self._settings.libraries: if item.id == library.id: if not item == library: to_remove.append(item) for item in to_remove: self._settings.libraries.remove(item) preferences_changed = True if library not in self._settings.libraries: self._settings.libraries.append(library) preferences_changed = True if preferences_changed: self._save_settings() if library.id not in self._library_caches.keys(): self._library_caches[library.id] = _LibraryProvider(library=library) else: cache = self._library_caches[library.id] cache.library = library def register_render_contexts(self, context: str) -> None: """Register a render context such as MDL or USD Preview.""" if context not in self._settings.render_contexts: self._settings.render_contexts.append(context) self._save_settings() def get_assembly(self, reference: data.TargetInstance) -> typing.Union[data.Target, None]: cache: _LibraryProvider for cache in self._library_caches.values(): for target in cache.get_targets(): if target.id == reference.target_id: return target return None def get_assemblies(self, library: data.Library = None) -> typing.List[data.Target]: if library: if library.id not in self._library_caches.keys(): return [] cache = self._library_caches[library.id] return cache.get_targets() targets: typing.List[data.Target] = [] cache: _LibraryProvider for cache in self._library_caches.values(): targets.extend(cache.get_targets()) return targets def get_documents(self, library: data.Library = None) -> typing.List[data.ConversionGraph]: conversion_graphs: typing.List[data.ConversionGraph] = [] if library: if library.id not in self._library_caches.keys(): return [] cache = self._library_caches[library.id] conversion_graphs = cache.get_conversion_graphs() else: cache: _LibraryProvider for cache in self._library_caches.values(): conversion_graphs.extend(cache.get_conversion_graphs()) for conversion_graph in conversion_graphs: self._completed_document_serialization(conversion_graph=conversion_graph) return conversion_graphs def get_document(self, library: data.Library, document_filename: str) -> typing.Union[data.ConversionGraph, typing.NoReturn]: if library.id not in self._library_caches.keys(): return None cache = self._library_caches[library.id] for conversion_graph in cache.get_conversion_graphs(): if conversion_graph.filename == document_filename: self._completed_document_serialization(conversion_graph=conversion_graph) return conversion_graph return None def can_show_in_filesystem(self, document: data.ConversionGraph) -> bool: if not document.library: return False if document.library.id not in self._library_caches.keys(): return False cache = self._library_caches[document.library.id] return cache.can_show_in_store(item=document) def show_in_filesystem(self, document: data.ConversionGraph) -> None: if not document.library: return if document.library.id not in self._library_caches.keys(): return cache = self._library_caches[document.library.id] cache.show_in_store(item=document) def get_document_by_id(self, library: data.Library, document_id: str) -> typing.Union[data.ConversionGraph, typing.NoReturn]: for conversion_graph in self.get_documents(library=library): if conversion_graph.id == document_id: return conversion_graph return None def create_new_document(self, library: data.Library) -> data.ConversionGraph: conversion_graph = data.ConversionGraph() conversion_graph._library = library conversion_graph.filename = '' self._completed_document_serialization(conversion_graph=conversion_graph) return conversion_graph def _completed_document_serialization(self, conversion_graph: data.ConversionGraph) -> None: build_dag = len(conversion_graph.target_instances) == 0 for reference in conversion_graph.target_instances: if reference.target and reference.target.id == reference.target_id: continue reference.target = self.get_assembly(reference=reference) build_dag = True if build_dag: conversion_graph.build_dag() def create_from_source(self, source: data.ConversionGraph) -> data.ConversionGraph: new_conversion_graph = data.ConversionGraph() new_id = new_conversion_graph.id new_conversion_graph.deserialize(data=source.serialize()) new_conversion_graph._id = new_id new_conversion_graph._library = source.library new_conversion_graph.filename = source.filename self._completed_document_serialization(conversion_graph=new_conversion_graph) return new_conversion_graph def revert(self, library: data.Library, instance: data.Serializable) -> bool: """ Returns True if the file existed on disk and was successfully reverted. """ if not library: return False if library.id not in self._library_caches.keys(): return False cache = self._library_caches[library.id] if cache.revert(item=instance): if isinstance(instance, data.ConversionGraph): self._completed_document_serialization(conversion_graph=instance) return True return False def find_documents(self, source_class: str, library: data.Library = None) -> typing.List[data.ConversionGraph]: conversion_graphs = [] for conversion_graph in self.get_documents(library=library): if not conversion_graph.source_node: continue for node in conversion_graph.source_node.target.nodes: if node.class_name == source_class: conversion_graphs.append(conversion_graph) return conversion_graphs def find_assembly(self, assembly_class: str, library: data.Library = None) -> typing.List[data.Target]: targets = [] for target in self.get_assemblies(library=library): for node in target.nodes: if node.class_name == assembly_class: targets.append(target) break return targets def _get_manifest_filepath(self, library: data.Library) -> str: return '{0}/ConversionManifest.json'.format(library.path) def get_conversion_manifest(self, library: data.Library) -> data.ConversionManifest: if library.id not in self._library_caches.keys(): return data.ConversionManifest() cache = self._library_caches[library.id] manifests = cache.get_manifests() if len(manifests): manifest = manifests[0] for conversion_map in manifest.conversion_maps: if conversion_map.conversion_graph is None: continue self._completed_document_serialization(conversion_graph=conversion_map.conversion_graph) return manifest return data.ConversionManifest() def save_conversion_manifest(self, library: data.Library, manifest: data.ConversionManifest) -> None: if library.id not in self._library_caches.keys(): return cache = self._library_caches[library.id] cache.write(item=manifest) def write(self, filename: str, instance: data.Serializable, library: data.Library, overwrite: bool = False) -> None: if not filename.strip(): raise Exception('Invalid filename: empty string.') if library.id not in self._library_caches.keys(): raise Exception('Cannot write to a library that is not registered') if not filename.lower().endswith('.json'): filename = '{0}.json'.format(filename) cache = self._library_caches[library.id] cache.write(item=instance, identifier=filename, overwrite=overwrite) def delete_document(self, document: data.ConversionGraph) -> bool: if not document.library: return False if document.library.id not in self._library_caches.keys(): return False cache = self._library_caches[document.library.id] cache.delete(item=document) return True def is_graph_entity_id(self, identifier: str) -> bool: for item in self.get_assemblies(): if item.id == identifier: return True return False def get_graph_entity(self, identifier: str) -> data.GraphEntity: for item in self.get_assemblies(): if item.id == identifier: return data.TargetInstance.FromAssembly(assembly=item) for item in self.get_operators(): if item.id == identifier: return data.OperatorInstance.FromOperator(operator=item) raise Exception('Graph Entity with id "{0}" cannot be found'.format(identifier)) def register_operator(self, operator: data.Operator): if operator not in self._operators: self._operators.append(operator) def get_operators(self) -> typing.List[data.Operator]: return self._operators def is_operator_id(self, identifier: str) -> bool: for item in self.get_operators(): if item.id == identifier: return True return False def on_shutdown(self): if len(self._library_caches.keys()): provider: _LibraryProvider for provider in self._library_caches.values(): provider.library = None self._library_caches = dict() @property def libraries(self) -> typing.List[data.Library]: return self._settings.libraries def register_library(library: data.Library) -> None: """ """ __Manager().register_library(library=library) def get_libraries() -> typing.List[data.Library]: """ """ return __Manager().libraries def get_library(library_id: str) -> data.Library: """ """ for library in __Manager().libraries: if library.id == library_id: return library raise Exception('Library with id "{0}" not found.'.format(library_id)) def get_assembly(reference: data.TargetInstance) -> data.Target: """ """ # TODO: Is this still needed? return __Manager().get_assembly(reference=reference) def write(filename: str, instance: data.Serializable, library: data.Library, overwrite: bool = False) -> None: """ """ __Manager().write(filename=filename, instance=instance, library=library, overwrite=overwrite) def get_assemblies(library: data.Library = None) -> typing.List[data.Target]: """ """ return __Manager().get_assemblies(library=library) def is_graph_entity_id(identifier: str) -> bool: """ """ return __Manager().is_graph_entity_id(identifier=identifier) def get_graph_entity(identifier: str) -> data.GraphEntity: """ """ return __Manager().get_graph_entity(identifier=identifier) def get_documents(library: data.Library = None) -> typing.List[data.ConversionGraph]: """ """ return __Manager().get_documents(library=library) def get_document(library: data.Library, document_filename: str) -> typing.Union[data.ConversionGraph, typing.NoReturn]: """ """ # TODO: Is this still needed? return __Manager().get_document(library=library, document_filename=document_filename) def create_new_document(library: data.Library) -> data.ConversionGraph: """ """ return __Manager().create_new_document(library=library) def create_from_source(source: data.ConversionGraph) -> data.ConversionGraph: """ """ return __Manager().create_from_source(source=source) def revert(library: data.Library, instance: data.Serializable) -> bool: """ Returns True if the file existed on disk and was successfully reverted. """ return __Manager().revert(library, instance) def find_documents(source_class: str, library: data.Library = None) -> typing.List[data.ConversionGraph]: """ """ # TODO: Is this still needed? return __Manager().find_documents(source_class=source_class, library=library) def find_assembly(assembly_class: str, library: data.Library = None) -> typing.List[data.Target]: """ """ # TODO: Is this still needed? return __Manager().find_assembly(assembly_class=assembly_class, library=library) def register_operator(operator: data.Operator): """ """ __Manager().register_operator(operator=operator) def get_operators() -> typing.List[data.Operator]: """ """ return __Manager().get_operators() def is_operator_id(identifier: str) -> bool: """ """ return __Manager().is_operator_id(identifier=identifier) def delete_document(document: data.ConversionGraph) -> bool: """ """ return __Manager().delete_document(document=document) def get_conversion_manifest(library: data.Library) -> data.ConversionManifest: """ """ return __Manager().get_conversion_manifest(library=library) def get_render_contexts() -> typing.List[str]: """Returns list of registered render contexts.""" return __Manager()._settings.render_contexts[:] def register_render_contexts(context: str) -> None: """Register a render context such as MDL or USD Preview.""" __Manager().register_render_contexts(context=context) def can_show_in_filesystem(document: data.ConversionGraph) -> bool: """Checks if the operating system can display where a document is saved on disk.""" return __Manager().can_show_in_filesystem(document=document) def show_in_filesystem(document: data.ConversionGraph) -> None: """Makes the operating system display where a document is saved on disk.""" return __Manager().show_in_filesystem(document=document) def on_shutdown() -> None: """Makes the operating system display where a document is saved on disk.""" return __Manager().on_shutdown()
44,912
Python
38.60582
254
0.614557
NVIDIA-Omniverse/blender_omniverse_addons/omni/universalmaterialmap/core/service/delegate.py
# ##### BEGIN GPL LICENSE BLOCK ##### # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ##### END GPL LICENSE BLOCK ##### # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. import typing import os import json import subprocess import threading import platform import uuid from ..feature import POLLING from .core import ChangeEvent, IDelegate class Filesystem(IDelegate): def __init__(self, root_directory: str): super(Filesystem, self).__init__() if POLLING: self.__is_polling: bool = False self.__poll_timer: threading.Timer = None self.__poll_data: typing.Dict[str, float] = dict() self.__poll_subscriptions: typing.Dict[uuid.uuid4, typing.Callable[[ChangeEvent], typing.NoReturn]] = dict() self.__pending_write_ids: typing.List[str] = [] self.__pending_delete_ids: typing.List[str] = [] self._root_directory: str = root_directory def __start_polling(self) -> None: if not POLLING: return if self.__is_polling: return self.__is_polling = True # Store current state in self.__poll_data so that __on_timer we only notify of changes since starting to poll self.__poll_data = dict() self.__pending_change_ids = [] identifiers = self.get_ids() for identifier in identifiers: filepath = '{0}/{1}'.format(self._root_directory, identifier) modified_time = os.path.getmtime(filepath) if platform.system() == 'Windows' else os.stat(filepath).st_mtime self.__poll_data[identifier] = modified_time self.__poll_timer = threading.Timer(5, self.__on_timer) self.__poll_timer.start() def __on_timer(self): print('UMM PING') if not POLLING: return if not self.__is_polling: return try: identifiers = self.get_ids() added = [o for o in identifiers if o not in self.__poll_data.keys() and o not in self.__pending_write_ids] removed = [o for o in self.__poll_data.keys() if o not in identifiers and o not in self.__pending_delete_ids] modified_maybe = [o for o in identifiers if o not in added and o not in removed and o not in self.__pending_write_ids] modified = [] for identifier in modified_maybe: filepath = '{0}/{1}'.format(self._root_directory, identifier) modified_time = os.path.getmtime(filepath) if platform.system() == 'Windows' else os.stat(filepath).st_mtime if self.__poll_data[identifier] == modified_time: continue modified.append(identifier) self.__poll_data[identifier] = modified_time for identifier in added: filepath = '{0}/{1}'.format(self._root_directory, identifier) self.__poll_data[identifier] = os.path.getmtime(filepath) if platform.system() == 'Windows' else os.stat(filepath).st_mtime for identifier in removed: del self.__poll_data[identifier] if len(added) + len(modified) + len(removed) > 0: event = ChangeEvent(added=tuple(added), modified=tuple(modified), removed=tuple(removed)) for callbacks in self.__poll_subscriptions.values(): callbacks(event) except Exception as error: print('WARNING: Universal Material Map failed to poll {0} for file changes.\nDetail: {1}'.format(self._root_directory, error)) self.__poll_timer.run() def __stop_polling(self) -> None: if not POLLING: return self.__is_polling = False try: self.__poll_timer.cancel() except: pass self.__poll_data = dict() def can_poll(self) -> bool: if not POLLING: return False return True def start_polling(self): if not POLLING: return self.__start_polling() def stop_polling(self): if not POLLING: return self.__stop_polling() def add_change_subscription(self, callback: typing.Callable[[ChangeEvent], typing.NoReturn]) -> uuid.uuid4: if not POLLING: raise NotImplementedError('Polling feature not enabled.') for key, value in self.__poll_subscriptions.items(): if value == callback: return key key = uuid.uuid4() self.__poll_subscriptions[key] = callback self.start_polling() return key def remove_change_subscription(self, subscription_id: uuid.uuid4) -> None: if not POLLING: raise NotImplementedError('Polling feature not enabled.') if subscription_id in self.__poll_subscriptions.keys(): del self.__poll_subscriptions[subscription_id] if len(self.__poll_subscriptions.keys()) == 0: self.stop_polling() def get_ids(self) -> typing.List[str]: identifiers: typing.List[str] = [] for directory, sub_directories, filenames in os.walk(self._root_directory): for filename in filenames: if not filename.lower().endswith('.json'): continue identifiers.append(filename) break return identifiers def read(self, identifier: str) -> typing.Union[typing.Dict, typing.NoReturn]: if not identifier.lower().endswith('.json'): raise Exception('Invalid identifier: "{0}" does not end with ".json".'.format(identifier)) filepath = '{0}/{1}'.format(self._root_directory, identifier) if os.path.exists(filepath): try: with open(filepath, 'r') as pointer: contents = json.load(pointer) if not isinstance(contents, dict): raise Exception('Not supported: Load of file "{0}" did not resolve to a dictionary. Could be due to reading same file twice too fast.'.format(filepath)) return contents except Exception as error: print('Failed to open file "{0}"'.format(filepath)) raise error return None def write(self, identifier: str, contents: typing.Dict) -> None: if not identifier.lower().endswith('.json'): raise Exception('Invalid identifier: "{0}" does not end with ".json".'.format(identifier)) if not isinstance(contents, dict): raise Exception('Not supported: Argument "contents" is not an instance of dict.') if not os.path.exists(self._root_directory): os.makedirs(self._root_directory) if POLLING: if identifier not in self.__pending_write_ids: self.__pending_write_ids.append(identifier) filepath = '{0}/{1}'.format(self._root_directory, identifier) with open(filepath, 'w') as pointer: json.dump(contents, pointer, indent=4) if POLLING: # Store the modified time so that we don't trigger a notification. We only want notifications when changes are caused by external modifiers. self.__poll_data[identifier] = os.path.getmtime(filepath) if platform.system() == 'Windows' else os.stat(filepath).st_mtime self.__pending_write_ids.remove(identifier) def delete(self, identifier: str) -> None: if not identifier.lower().endswith('.json'): raise Exception('Invalid identifier: "{0}" does not end with ".json".'.format(identifier)) if POLLING: if identifier not in self.__pending_delete_ids: self.__pending_delete_ids.append(identifier) filepath = '{0}/{1}'.format(self._root_directory, identifier) if os.path.exists(filepath): os.remove(filepath) if POLLING: # Remove the item from self.__poll_data so that we don't trigger a notification. We only want notifications when changes are caused by external modifiers. if identifier in self.__poll_data.keys(): del self.__poll_data[identifier] self.__pending_delete_ids.remove(identifier) def can_show_in_store(self, identifier: str) -> bool: filepath = '{0}/{1}'.format(self._root_directory, identifier) return os.path.exists(filepath) def show_in_store(self, identifier: str) -> None: filepath = '{0}/{1}'.format(self._root_directory, identifier) if os.path.exists(filepath): subprocess.Popen(r'explorer /select,"{0}"'.format(filepath.replace('/', '\\'))) class FilesystemManifest(Filesystem): def __init__(self, root_directory: str): super(FilesystemManifest, self).__init__(root_directory=root_directory) def get_ids(self) -> typing.List[str]: identifiers: typing.List[str] = [] for directory, sub_directories, filenames in os.walk(self._root_directory): for filename in filenames: if not filename.lower() == 'conversionmanifest.json': continue identifiers.append(filename) break return identifiers class FilesystemSettings(Filesystem): def __init__(self, root_directory: str): super(FilesystemSettings, self).__init__(root_directory=root_directory) def get_ids(self) -> typing.List[str]: identifiers: typing.List[str] = [] for directory, sub_directories, filenames in os.walk(self._root_directory): for filename in filenames: if not filename.lower() == 'settings.json': continue identifiers.append(filename) break return identifiers
10,456
Python
40.995984
176
0.608072
NVIDIA-Omniverse/blender_omniverse_addons/omni/universalmaterialmap/core/service/core.py
# ##### BEGIN GPL LICENSE BLOCK ##### # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ##### END GPL LICENSE BLOCK ##### # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. import abc import typing import uuid class ChangeEvent(object): def __init__(self, added: typing.Tuple[str], modified: typing.Tuple[str], removed: typing.Tuple[str]): super(ChangeEvent, self).__init__() self.__added: typing.Tuple[str] = added self.__modified: typing.Tuple[str] = modified self.__removed: typing.Tuple[str] = removed def __str__(self): o = 'omni.universalmaterialmap.core.service.core.ChangeEvent(' o += '\n\tadded: ' o += ', '.join(self.__added) o += '\n\tmodified: ' o += ', '.join(self.__modified) o += '\n\tremoved: ' o += ', '.join(self.__removed) o += '\n)' return o @property def added(self) -> typing.Tuple[str]: return self.__added @property def modified(self) -> typing.Tuple[str]: return self.__modified @property def removed(self) -> typing.Tuple[str]: return self.__removed class IDelegate(metaclass=abc.ABCMeta): """ Interface for an online library database table. """ @abc.abstractmethod def get_ids(self) -> typing.List[str]: """ Returns a list of identifiers. """ raise NotImplementedError @abc.abstractmethod def read(self, identifier: str) -> typing.Dict: """ Returns a JSON dictionary if an item by the given identifier exists - otherwise None """ raise NotImplementedError @abc.abstractmethod def write(self, identifier: str, contents: typing.Dict) -> str: """ Creates or updates an item by using the JSON contents data. """ raise NotImplementedError @abc.abstractmethod def delete(self, identifier: str) -> None: """ Deletes an item by the given identifier if it exists. """ raise NotImplementedError @abc.abstractmethod def can_show_in_store(self, identifier: str) -> bool: """ Deletes an item by the given identifier if it exists. """ raise NotImplementedError @abc.abstractmethod def show_in_store(self, identifier: str) -> None: """ Deletes an item by the given identifier if it exists. """ raise NotImplementedError @abc.abstractmethod def can_poll(self) -> bool: """ States if delegate is able to poll file changes and provide subscription to those changes. """ raise NotImplementedError @abc.abstractmethod def start_polling(self) -> None: """ Starts monitoring files for changes. """ raise NotImplementedError @abc.abstractmethod def stop_polling(self) -> None: """ Stops monitoring files for changes. """ raise NotImplementedError @abc.abstractmethod def add_change_subscription(self, callback: typing.Callable[[ChangeEvent], typing.NoReturn]) -> uuid.uuid4: """ Creates a subscription for file changes in location managed by delegate. """ raise NotImplementedError @abc.abstractmethod def remove_change_subscription(self, subscription_id: uuid.uuid4) -> None: """ Removes the subscription for file changes in location managed by delegate. """ raise NotImplementedError
4,024
Python
34.307017
111
0.657306
NVIDIA-Omniverse/blender_omniverse_addons/omni/universalmaterialmap/core/service/resources/__init__.py
# ##### BEGIN GPL LICENSE BLOCK ##### # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ##### END GPL LICENSE BLOCK ##### # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. import os import shutil import json import inspect from ...data import FileUtility, Target, ConversionGraph, ConversionManifest def __copy(source_path: str, destination_path: str) -> None: try: shutil.copy(source_path, destination_path) except Exception as error: print('Error installing UMM data. Unable to copy source "{0}" to destination "{1}".\n Details: {2}'.format(source_path, destination_path, error)) raise error def __install_library(source_root: str, destination_root: str) -> None: source_root = source_root.replace('\\', '/') destination_root = destination_root.replace('\\', '/') for directory, sub_directories, filenames in os.walk(source_root): directory = directory.replace('\\', '/') destination_directory = directory.replace(source_root, destination_root) destination_directory_created = os.path.exists(destination_directory) for filename in filenames: if not filename.lower().endswith('.json'): continue source_path = '{0}/{1}'.format(directory, filename) destination_path = '{0}/{1}'.format(destination_directory, filename) if not destination_directory_created: try: os.makedirs(destination_directory) destination_directory_created = True except Exception as error: print('Universal Material Map error installing data. Unable to create directory "{0}".\n Details: {1}'.format(destination_directory, error)) raise error if not os.path.exists(destination_path): __copy(source_path=source_path, destination_path=destination_path) print('Universal Material Map installed "{0}".'.format(destination_path)) continue try: with open(source_path, 'r') as fp: source = FileUtility.FromData(data=json.load(fp)).content except Exception as error: print('Universal Material Map error installing data. Unable to read source "{0}". \n Details: {1}'.format(source_path, error)) raise error try: with open(destination_path, 'r') as fp: destination = FileUtility.FromData(data=json.load(fp)).content except Exception as error: print('Warning: Universal Material Map error installing data. Unable to read destination "{0}". It is assumed that the installed version is more recent than the one attempted to be installed.\n Details: {1}'.format(destination_path, error)) continue if isinstance(source, Target) and isinstance(destination, Target): if source.revision > destination.revision: __copy(source_path=source_path, destination_path=destination_path) print('Universal Material Map installed the more recent revision #{0} of "{1}".'.format(source.revision, destination_path)) continue if isinstance(source, ConversionGraph) and isinstance(destination, ConversionGraph): if source.revision > destination.revision: __copy(source_path=source_path, destination_path=destination_path) print('Universal Material Map installed the more recent revision #{0} of "{1}".'.format(source.revision, destination_path)) continue if isinstance(source, ConversionManifest) and isinstance(destination, ConversionManifest): if source.version_major < destination.version_major: continue if source.version_minor <= destination.version_minor: continue __copy(source_path=source_path, destination_path=destination_path) print('Universal Material Map installed the more recent revision #{0}.{1} of "{2}".'.format(source.version_major, source.version_minor, destination_path)) continue def install() -> None: current_path = inspect.getfile(inspect.currentframe()).replace('\\', '/') current_path = current_path[:current_path.rfind('/')] library_names = [] for o in os.listdir(current_path): path = '{0}/{1}'.format(current_path, o) if os.path.isdir(path) and not o == '__pycache__': library_names.append(o) libraries_directory = os.path.expanduser('~').replace('\\', '/') if not libraries_directory.endswith('/Documents'): # os.path.expanduser() has different behaviour between 2.7 and 3 libraries_directory = '{0}/Documents'.format(libraries_directory) libraries_directory = '{0}/Omniverse'.format(libraries_directory) for library_name in library_names: source_root = '{0}/{1}/UMMLibrary'.format(current_path, library_name) destination_root = '{0}/{1}/UMMLibrary'.format(libraries_directory, library_name) __install_library(source_root=source_root, destination_root=destination_root)
5,935
Python
49.735042
256
0.643134
NVIDIA-Omniverse/blender_omniverse_addons/omni/universalmaterialmap/blender/converter.py
# ##### BEGIN GPL LICENSE BLOCK ##### # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ##### END GPL LICENSE BLOCK ##### # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. import typing import sys import traceback import os import re import json import math import bpy import bpy_types from . import get_library, get_value, CORE_MATERIAL_PROPERTIES, create_template, developer_mode, get_template_data_by_shader_node, get_template_data_by_class_name, create_from_template from ..core.converter.core import ICoreConverter, IObjectConverter, IDataConverter from ..core.converter import util from ..core.service import store from ..core.data import Plug, ConversionManifest, DagNode, ConversionGraph, TargetInstance from ..core.util import get_extension_from_image_file_format __initialized: bool = False __manifest: ConversionManifest = None def _get_manifest() -> ConversionManifest: if not getattr(sys.modules[__name__], '__manifest'): setattr(sys.modules[__name__], '__manifest', store.get_conversion_manifest(library=get_library())) if developer_mode: manifest: ConversionManifest = getattr(sys.modules[__name__], '__manifest') print('UMM DEBUG: blender.converter._get_manifest(): num entries = "{0}"'.format(len(manifest.conversion_maps))) for conversion_map in manifest.conversion_maps: print('UMM DEBUG: blender.converter._get_manifest(): Entry: graph_id = "{0}", render_context = "{1}"'.format(conversion_map.conversion_graph_id, conversion_map.render_context)) return getattr(sys.modules[__name__], '__manifest') def _get_conversion_graph_impl(source_class: str, render_context: str) -> typing.Union[ConversionGraph, typing.NoReturn]: if developer_mode: print('UMM DEBUG: blender.converter._get_conversion_graph_impl(source_class="{0}", render_context="{1}")'.format(source_class, render_context)) for conversion_map in _get_manifest().conversion_maps: if not conversion_map.render_context == render_context: if developer_mode: print('UMM DEBUG: blender.converter._get_conversion_graph_impl: conversion_map.render_context "{0}" != "{1}")'.format(conversion_map.render_context, render_context)) continue if not conversion_map.conversion_graph: if developer_mode: print('UMM DEBUG: blender.converter._get_conversion_graph_impl: conversion_map.conversion_graph "{0}")'.format(conversion_map.conversion_graph)) continue if not conversion_map.conversion_graph.source_node: if developer_mode: print('UMM DEBUG: blender.converter._get_conversion_graph_impl: conversion_map.source_node "{0}")'.format(conversion_map.conversion_graph.source_node)) continue if not conversion_map.conversion_graph.source_node.target.root_node.class_name == source_class: if developer_mode: print('UMM DEBUG: blender.converter._get_conversion_graph_impl: conversion_map.conversion_graph.source_node.target.root_node.class_name "{0}" != "{1}")'.format(conversion_map.conversion_graph.source_node.target.root_node.class_name, source_class)) continue if developer_mode: print('UMM DEBUG: blender.converter._get_conversion_graph_impl: found match "{0}")'.format(conversion_map.conversion_graph.filename)) return conversion_map.conversion_graph if developer_mode: print('UMM DEBUG: blender.converter._get_conversion_graph_impl: found no match!)') return None def _instance_to_output_entity(graph: ConversionGraph, instance: object) -> TargetInstance: if developer_mode: print('_instance_to_output_entity') for output in graph.source_node.outputs: if output.name == 'node_id_output': continue if util.can_set_plug_value(instance=instance, plug=output): util.set_plug_value(instance=instance, plug=output) else: print('UMM Warning: Unable to set output plug "{0}"... using default value of "{1}"'.format(output.name, output.default_value)) output.value = output.default_value return graph.get_output_entity() def _data_to_output_entity(graph: ConversionGraph, data: typing.List[typing.Tuple[str, typing.Any]]) -> TargetInstance: for output in graph.source_node.outputs: if output.name == 'node_id_output': continue o = [o for o in data if o[0] == output.name] if len(o): output.value = o[0][1] else: output.value = output.default_value return graph.get_output_entity() def _instance_to_data(instance: object, graph: ConversionGraph) -> typing.List[typing.Tuple[str, typing.Any]]: target_instance = _instance_to_output_entity(graph=graph, instance=instance) if developer_mode: print('_instance_to_data') print('\ttarget_instance.target.store_id', target_instance.target.store_id) # Compute target attribute values attribute_data = [(util.TARGET_CLASS_IDENTIFIER, target_instance.target.root_node.class_name)] for plug in target_instance.inputs: if not plug.input: continue if developer_mode: print('\t{} is invalid: {}'.format(plug.name, plug.is_invalid)) if plug.is_invalid and isinstance(plug.parent, DagNode): plug.parent.compute() if developer_mode: print('\t{} computed value = {}'.format(plug.name, plug.computed_value)) attribute_data.append((plug.name, plug.computed_value)) return attribute_data def _to_convertible_instance(instance: object, material: bpy.types.Material = None) -> object: if developer_mode: print('_to_convertible_instance', type(instance)) if material is None: if isinstance(instance, bpy.types.Material): material = instance else: for m in bpy.data.materials: if not m.use_nodes: continue if not len([o for o in m.node_tree.nodes if o == instance]): continue material = m break if material is None: return instance if not material.use_nodes: return material if instance == material: # Find the Surface Shader. for link in material.node_tree.links: if not isinstance(link, bpy.types.NodeLink): continue if not isinstance(link.to_node, bpy.types.ShaderNodeOutputMaterial): continue if not link.to_socket.name == 'Surface': continue result = _to_convertible_instance(instance=link.from_node, material=material) if result is not None: return result # No surface shader found - return instance return instance if isinstance(instance, bpy.types.ShaderNodeAddShader): for link in material.node_tree.links: if not isinstance(link, bpy.types.NodeLink): continue if not link.to_node == instance: continue # if not link.to_socket.name == 'Shader': # continue result = _to_convertible_instance(instance=link.from_node, material=material) if result is not None: return result # if isinstance(instance, bpy.types.ShaderNodeBsdfGlass): # return instance # if isinstance(instance, bpy.types.ShaderNodeBsdfGlossy): # return instance if isinstance(instance, bpy.types.ShaderNodeBsdfPrincipled): return instance # if isinstance(instance, bpy.types.ShaderNodeBsdfRefraction): # return instance # if isinstance(instance, bpy.types.ShaderNodeBsdfTranslucent): # return instance # if isinstance(instance, bpy.types.ShaderNodeBsdfTransparent): # return instance # if isinstance(instance, bpy.types.ShaderNodeEeveeSpecular): # return instance # if isinstance(instance, bpy.types.ShaderNodeEmission): # return instance # if isinstance(instance, bpy.types.ShaderNodeSubsurfaceScattering): # return instance return None class CoreConverter(ICoreConverter): def __init__(self): super(CoreConverter, self).__init__() def get_conversion_manifest(self) -> typing.List[typing.Tuple[str, str]]: """ Returns data indicating what source class can be converted to a render context. Example: [('lambert', 'MDL'), ('blinn', 'MDL'),] """ output = [] for conversion_map in _get_manifest().conversion_maps: if not conversion_map.render_context: continue if not conversion_map.conversion_graph: continue if not conversion_map.conversion_graph.source_node: continue output.append((conversion_map.conversion_graph.source_node.target.root_node.class_name, conversion_map.render_context)) return output class ObjectConverter(CoreConverter, IObjectConverter): """ """ MATERIAL_CLASS = 'bpy.types.Material' SHADER_NODES = [ 'bpy.types.ShaderNodeBsdfGlass', 'bpy.types.ShaderNodeBsdfGlossy', 'bpy.types.ShaderNodeBsdfPrincipled', 'bpy.types.ShaderNodeBsdfRefraction', 'bpy.types.ShaderNodeBsdfTranslucent', 'bpy.types.ShaderNodeBsdfTransparent', 'bpy.types.ShaderNodeEeveeSpecular', 'bpy.types.ShaderNodeEmission', 'bpy.types.ShaderNodeSubsurfaceScattering', ] def can_create_instance(self, class_name: str) -> bool: """ Returns true if worker can generate an object of the given class name. """ if class_name == ObjectConverter.MATERIAL_CLASS: return True return class_name in ObjectConverter.SHADER_NODES def create_instance(self, class_name: str, name: str = 'material') -> object: """ Creates an object of the given class name. """ material = bpy.data.materials.new(name=name) if class_name in ObjectConverter.SHADER_NODES: material.use_nodes = True return material def can_set_plug_value(self, instance: object, plug: Plug) -> bool: """ Returns true if worker can set the plug's value given the instance and its attributes. """ if plug.input: return False if isinstance(instance, bpy.types.Material): for o in CORE_MATERIAL_PROPERTIES: if o[0] == plug.name: return hasattr(instance, plug.name) return False if isinstance(instance, bpy_types.ShaderNode): return len([o for o in instance.inputs if o.name == plug.name]) == 1 return False def set_plug_value(self, instance: object, plug: Plug) -> typing.NoReturn: """ Sets the plug's value given the value of the instance's attribute named the same as the plug. """ if isinstance(instance, bpy.types.Material): plug.value = getattr(instance, plug.name) if developer_mode: print('set_plug_value') print('\tinstance', type(instance)) print('\tname', plug.name) print('\tvalue', plug.value) return inputs = [o for o in instance.inputs if o.name == plug.name] if not len(inputs) == 1: return plug.value = get_value(socket=inputs[0]) if developer_mode: # print('set_plug_value') # print('\tinstance', type(instance)) # print('\tname', plug.name) # print('\tvalue', plug.value) print('\tset_plug_value: {} = {}'.format(plug.name, plug.value)) def can_set_instance_attribute(self, instance: object, name: str): """ Resolves if worker can set an attribute by the given name on the instance. """ return False def set_instance_attribute(self, instance: object, name: str, value: typing.Any) -> typing.NoReturn: """ Sets the named attribute on the instance to the value. """ raise NotImplementedError() def can_convert_instance(self, instance: object, render_context: str) -> bool: """ Resolves if worker can convert the instance to another object given the render_context. """ return False def convert_instance_to_instance(self, instance: object, render_context: str) -> typing.Any: """ Converts the instance to another object given the render_context. """ raise NotImplementedError() def can_convert_instance_to_data(self, instance: object, render_context: str) -> bool: """ Resolves if worker can convert the instance to another object given the render_context. """ node = _to_convertible_instance(instance=instance) if node is not None and not node == instance: if developer_mode: print('Found graph node to use instead of bpy.types.Material: {0}'.format(type(node))) instance = node template, template_map, template_shader_name, material = get_template_data_by_shader_node(shader_node=instance) if template is None: class_name = '{0}.{1}'.format(instance.__class__.__module__, instance.__class__.__name__) conversion_graph = _get_conversion_graph_impl(source_class=class_name, render_context=render_context) if not conversion_graph: return False try: destination_target_instance = _instance_to_output_entity(graph=conversion_graph, instance=instance) except Exception as error: print('Warning: Unable to get destination assembly using document "{0}".\nDetails: {1}'.format(conversion_graph.filename, error)) return False return destination_target_instance is not None else: conversion_graph = _get_conversion_graph_impl(source_class=template_shader_name, render_context=render_context) return conversion_graph is not None def convert_instance_to_data(self, instance: object, render_context: str) -> typing.List[typing.Tuple[str, typing.Any]]: """ Returns a list of key value pairs in tuples. The first pair is ("umm_target_class", "the_class_name") indicating the conversion target class. """ node = _to_convertible_instance(instance=instance) if node is not None and not node == instance: if developer_mode: print('Found graph node to use instead of bpy.types.Material: {0}'.format(type(node))) instance = node template, template_map, template_shader_name, material = get_template_data_by_shader_node(shader_node=instance) if template is None: class_name = '{0}.{1}'.format(instance.__class__.__module__, instance.__class__.__name__) conversion_graph = _get_conversion_graph_impl(source_class=class_name, render_context=render_context) return _instance_to_data(instance=instance, graph=conversion_graph) else: conversion_graph = _get_conversion_graph_impl(source_class=template_shader_name, render_context=render_context) if developer_mode: print('conversion_graph', conversion_graph.filename) # set plug values on conversion_graph.source_node.outputs for output in conversion_graph.source_node.outputs: if output.name == 'node_id_output': continue if developer_mode: print('output', output.name) internal_node = None for a in conversion_graph.source_node.target.nodes: for b in a.outputs: if output.id == b.id: internal_node = a break if internal_node is not None: break if internal_node is None: raise NotImplementedError(f"No internal node found for {output.name}") map_definition = None for o in template_map['maps']: if o['blender_node'] == internal_node.id and o['blender_socket'] == output.name: map_definition = o break if map_definition is None: raise NotImplementedError(f"No map definition found for {output.name}") if developer_mode: print('map_definition', map_definition['blender_node']) if map_definition['blender_node'] == '': output.value = output.default_value if developer_mode: print('output.value', output.value) continue for shader_node in material.node_tree.nodes: if not shader_node.name == map_definition['blender_node']: continue if isinstance(shader_node, bpy.types.ShaderNodeTexImage): if map_definition['blender_socket'] == 'image': if shader_node.image and (shader_node.image.source == 'FILE' or shader_node.image.source == 'TILED'): print(f'UMM: image.filepath: "{shader_node.image.filepath}"') print(f'UMM: image.source: "{shader_node.image.source}"') print(f'UMM: image.file_format: "{shader_node.image.file_format}"') value = shader_node.image.filepath if (shader_node.image.source == 'TILED'): # Find all numbers in the path. numbers = re.findall('[0-9]+', value) if (len(numbers) > 0): # Get the string representation of the last number. num_str = str(numbers[-1]) # Replace the number substring with '<UDIM>'. split_items = value.rsplit(num_str, 1) if (len(split_items) == 2): value = split_items[0] + '<UDIM>' + split_items[1] try: if value is None or value == '': file_format = shader_node.image.file_format file_format = get_extension_from_image_file_format(file_format, shader_node.image.name) if not shader_node.image.name.endswith(file_format): value = f'{shader_node.image.name}.{file_format}' else: value = shader_node.image.name output.value = [value, shader_node.image.colorspace_settings.name] else: output.value = [os.path.abspath(bpy.path.abspath(value)), shader_node.image.colorspace_settings.name] except Exception as error: print('Warning: Universal Material Map: Unable to evaluate absolute file path of texture "{0}". Detail: {1}'.format(shader_node.image.filepath, error)) output.value = ['', 'raw'] print(f'UMM: output.value: "{output.value}"') else: if developer_mode: print('setting default value for output.value') if not shader_node.image: print('\tshader_node.image == None') else: print('\tshader_node.image.source == {}'.format(shader_node.image.source)) output.value = ['', 'raw'] if developer_mode: print('output.value', output.value) break raise NotImplementedError(f"No support for bpy.types.ShaderNodeTexImage {map_definition['blender_socket']}") if isinstance(shader_node, bpy.types.ShaderNodeBsdfPrincipled): socket: bpy.types.NodeSocketStandard = shader_node.inputs[map_definition['blender_socket']] output.value = socket.default_value if developer_mode: print('output.value', output.value) break if isinstance(shader_node, bpy.types.ShaderNodeGroup): if map_definition['blender_socket'] not in shader_node.inputs.keys(): if developer_mode: print(f'{map_definition["blender_socket"]} not in shader_node.inputs.keys()') break socket: bpy.types.NodeSocketStandard = shader_node.inputs[map_definition['blender_socket']] output.value = socket.default_value if developer_mode: print('output.value', output.value) break if isinstance(shader_node, bpy.types.ShaderNodeMapping): socket: bpy.types.NodeSocketStandard = shader_node.inputs[map_definition['blender_socket']] value = socket.default_value if output.name == 'Rotation': value = [ math.degrees(value[0]), math.degrees(value[1]), math.degrees(value[2]) ] output.value = value if developer_mode: print('output.value', output.value) break # compute to target_instance for output target_instance = conversion_graph.get_output_entity() if developer_mode: print('_instance_to_data') print('\ttarget_instance.target.store_id', target_instance.target.store_id) # Compute target attribute values attribute_data = [(util.TARGET_CLASS_IDENTIFIER, target_instance.target.root_node.class_name)] for plug in target_instance.inputs: if not plug.input: continue if developer_mode: print('\t{} is invalid: {}'.format(plug.name, plug.is_invalid)) if plug.is_invalid and isinstance(plug.parent, DagNode): plug.parent.compute() if developer_mode: print('\t{} computed value = {}'.format(plug.name, plug.computed_value)) value = plug.computed_value if plug.internal_value_type == 'bool': value = True if value else False attribute_data.append((plug.name, value)) return attribute_data def can_convert_attribute_values(self, instance: object, render_context: str, destination: object) -> bool: """ Resolves if the instance's attribute values can be converted and set on the destination object's attributes. """ raise NotImplementedError() def convert_attribute_values(self, instance: object, render_context: str, destination: object) -> typing.NoReturn: """ Attribute values are converted and set on the destination object's attributes. """ raise NotImplementedError() def can_apply_data_to_instance(self, source_class_name: str, render_context: str, source_data: typing.List[typing.Tuple[str, typing.Any]], instance: object) -> bool: """ Resolves if worker can convert the instance to another object given the render_context. """ if developer_mode: print('can_apply_data_to_instance()') if not isinstance(instance, bpy.types.Material): if developer_mode: print('can_apply_data_to_instance: FALSE - instance not bpy.types.Material') return False if not render_context == 'Blender': if developer_mode: print('can_apply_data_to_instance: FALSE - render_context not "Blender"') return False conversion_graph = _get_conversion_graph_impl(source_class=source_class_name, render_context=render_context) if not conversion_graph: if developer_mode: print('can_apply_data_to_instance: FALSE - conversion_graph is None') return False if developer_mode: print(f'conversion_graph {conversion_graph.filename}') try: destination_target_instance = _data_to_output_entity(graph=conversion_graph, data=source_data) except Exception as error: print('Warning: Unable to get destination assembly using document "{0}".\nDetails: {1}'.format(conversion_graph.filename, error)) return False if developer_mode: if destination_target_instance is None: print('destination_target_instance is None') elif destination_target_instance is None: print('destination_target_instance.target is None') else: print('destination_target_instance.target is not None') if destination_target_instance is None or destination_target_instance.target is None: return False if developer_mode: print(f'num destination_target_instance.target.nodes: {len(destination_target_instance.target.nodes)}') if len(destination_target_instance.target.nodes) < 2: return True template, template_map = get_template_data_by_class_name(class_name=destination_target_instance.target.root_node.class_name) if developer_mode: print(f'return {template is not None}') return template is not None def apply_data_to_instance(self, source_class_name: str, render_context: str, source_data: typing.List[typing.Tuple[str, typing.Any]], instance: object) -> None: """ Implementation requires that `instance` is type `bpy.types.Material`. """ if developer_mode: print('apply_data_to_instance()') if not isinstance(instance, bpy.types.Material): raise Exception('instance type not supported', type(instance)) if not render_context == 'Blender': raise Exception('render_context not supported', render_context) conversion_graph = _get_conversion_graph_impl(source_class=source_class_name, render_context=render_context) # This only works for Blender import of MDL/USDPreview. Blender export would need to use convert_instance_to_data(). destination_target_instance = _data_to_output_entity(graph=conversion_graph, data=source_data) material: bpy.types.Material = instance # Make sure we're using nodes material.use_nodes = True # Remove existing nodes - we're starting from scratch - assuming Blender import to_delete = [o for o in material.node_tree.nodes] while len(to_delete): material.node_tree.nodes.remove(to_delete.pop()) if len(destination_target_instance.target.nodes) < 2: # Create base graph output_node = material.node_tree.nodes.new('ShaderNodeOutputMaterial') output_node.location = [300.0, 300.0] bsdf_node = material.node_tree.nodes.new('ShaderNodeBsdfPrincipled') bsdf_node.location = [0.0, 300.0] material.node_tree.links.new(bsdf_node.outputs[0], output_node.inputs[0]) node_cache = dict() node_location = [-500, 300] # Create graph if texture value for plug in destination_target_instance.inputs: if not plug.input: continue if isinstance(plug.computed_value, list) or isinstance(plug.computed_value, tuple): if len(plug.computed_value) == 2 and isinstance(plug.computed_value[0], str) and isinstance(plug.computed_value[1], str): key = '{0}|{1}'.format(plug.computed_value[0], plug.computed_value[1]) if key in node_cache.keys(): node = node_cache[key] else: try: path = plug.computed_value[0] if not path == '': node = material.node_tree.nodes.new('ShaderNodeTexImage') path = plug.computed_value[0] if '<UDIM>' in path: pattern = path.replace('\\', '/') pattern = pattern.replace('<UDIM>', '[0-9][0-9][0-9][0-9]') directory = pattern[:pattern.rfind('/') + 1] pattern = pattern.replace(directory, '') image_set = False for item in os.listdir(directory): if re.match(pattern, item): tile_path = '{}{}'.format(directory, item) if not os.path.isfile(tile_path): continue if not image_set: node.image = bpy.data.images.load(tile_path) node.image.source = 'TILED' image_set = True continue tile_indexes = re.findall('[0-9][0-9][0-9][0-9]', item) node.image.tiles.new(int(tile_indexes[-1])) else: node.image = bpy.data.images.load(path) node.image.colorspace_settings.name = plug.computed_value[1] else: continue except Exception as error: print('Warning: UMM failed to properly setup a ShaderNodeTexImage. Details: {0}\n{1}'.format(error, traceback.format_exc())) continue node_cache[key] = node node.location = node_location node_location[1] -= 300 bsdf_input = [o for o in bsdf_node.inputs if o.name == plug.name][0] if plug.name == 'Metallic': separate_node = None for link in material.node_tree.links: if link.from_node == node and link.to_node.__class__.__name__ == 'ShaderNodeSeparateRGB': separate_node = link.to_node break if separate_node is None: separate_node = material.node_tree.nodes.new('ShaderNodeSeparateRGB') separate_node.location = [node.location[0] + 250, node.location[1]] material.node_tree.links.new(node.outputs[0], separate_node.inputs[0]) material.node_tree.links.new(separate_node.outputs[2], bsdf_input) elif plug.name == 'Roughness': separate_node = None for link in material.node_tree.links: if link.from_node == node and link.to_node.__class__.__name__ == 'ShaderNodeSeparateRGB': separate_node = link.to_node break if separate_node is None: separate_node = material.node_tree.nodes.new('ShaderNodeSeparateRGB') separate_node.location = [node.location[0] + 250, node.location[1]] material.node_tree.links.new(node.outputs[0], separate_node.inputs[0]) material.node_tree.links.new(separate_node.outputs[1], bsdf_input) elif plug.name == 'Normal': normal_node = None for link in material.node_tree.links: if link.from_node == node and link.to_node.__class__.__name__ == 'ShaderNodeNormalMap': normal_node = link.to_node break if normal_node is None: normal_node = material.node_tree.nodes.new('ShaderNodeNormalMap') normal_node.location = [node.location[0] + 250, node.location[1]] material.node_tree.links.new(node.outputs[0], normal_node.inputs[1]) material.node_tree.links.new(normal_node.outputs[0], bsdf_input) else: material.node_tree.links.new(node.outputs[0], bsdf_input) continue # Set Value blender_inputs = [o for o in bsdf_node.inputs if o.name == plug.name] if len(blender_inputs) == 0: for property_name, property_object in bsdf_node.rna_type.properties.items(): if not property_name == plug.name: continue if property_object.is_readonly: break try: setattr(bsdf_node, property_name, plug.computed_value) except Exception as error: print('Warning: Universal Material Map: Unexpected error when setting property "{0}" to value "{1}": "{2}"'.format(property_name, plug.computed_value, error)) else: if isinstance(blender_inputs[0], bpy.types.NodeSocketShader): continue try: blender_inputs[0].default_value = plug.computed_value except Exception as error: print('Warning: Universal Material Map: Unexpected error when setting input "{0}" to value "{1}": "{2}"'.format(plug.name, plug.computed_value, error)) return if developer_mode: print(f'TEMPLATE CREATION BASED ON {destination_target_instance.target.root_node.class_name}') # find template to use template, template_map = get_template_data_by_class_name(class_name=destination_target_instance.target.root_node.class_name) if developer_mode: print(f"TEMPLATE NAME {template['name']}") # create graph create_from_template(material=material, template=template) # set attributes use_albedo_map = False use_normal_map = False use_detail_normal_map = False use_emission_map = False for input_plug in destination_target_instance.inputs: # if developer_mode: # print('input_plug', input_plug.name) internal_node = None for a in destination_target_instance.target.nodes: for b in a.inputs: if input_plug.id == b.id: internal_node = a break if internal_node is not None: break if internal_node is None: raise NotImplementedError(f"No internal node found for {input_plug.name}") map_definition = None for o in template_map['maps']: if o['blender_node'] == internal_node.id and o['blender_socket'] == input_plug.name: map_definition = o break if map_definition is None: raise NotImplementedError(f"No map definition found for {internal_node.id} {input_plug.name}") for shader_node in material.node_tree.nodes: if not shader_node.name == map_definition['blender_node']: continue # if developer_mode: # print(f'node: {shader_node.name}') if isinstance(shader_node, bpy.types.ShaderNodeTexImage): if map_definition['blender_socket'] == 'image': # if developer_mode: # print(f'\tbpy.types.ShaderNodeTexImage: path: {input_plug.computed_value[0]}') # print(f'\tbpy.types.ShaderNodeTexImage: colorspace: {input_plug.computed_value[1]}') path = input_plug.computed_value[0] if not path == '': if '<UDIM>' in path: pattern = path.replace('\\', '/') pattern = pattern.replace('<UDIM>', '[0-9][0-9][0-9][0-9]') directory = pattern[:pattern.rfind('/') + 1] pattern = pattern.replace(directory, '') image_set = False for item in os.listdir(directory): if re.match(pattern, item): tile_path = '{}{}'.format(directory, item) if not os.path.isfile(tile_path): continue if not image_set: shader_node.image = bpy.data.images.load(tile_path) shader_node.image.source = 'TILED' image_set = True continue tile_indexes = re.findall('[0-9][0-9][0-9][0-9]', item) shader_node.image.tiles.new(int(tile_indexes[-1])) else: shader_node.image = bpy.data.images.load(path) if map_definition['blender_node'] == 'Albedo Map': use_albedo_map = True if map_definition['blender_node'] == 'Normal Map': use_normal_map = True if map_definition['blender_node'] == 'Detail Normal Map': use_detail_normal_map = True if map_definition['blender_node'] == 'Emissive Map': use_emission_map = True shader_node.image.colorspace_settings.name = input_plug.computed_value[1] continue raise NotImplementedError( f"No support for bpy.types.ShaderNodeTexImage {map_definition['blender_socket']}") if isinstance(shader_node, bpy.types.ShaderNodeBsdfPrincipled): blender_inputs = [o for o in shader_node.inputs if o.name == input_plug.name] if len(blender_inputs) == 0: for property_name, property_object in shader_node.rna_type.properties.items(): if not property_name == input_plug.name: continue if property_object.is_readonly: break try: setattr(shader_node, property_name, input_plug.computed_value) except Exception as error: print('Warning: Universal Material Map: Unexpected error when setting property "{0}" to value "{1}": "{2}"'.format(property_name, input_plug.computed_value, error)) else: if isinstance(blender_inputs[0], bpy.types.NodeSocketShader): continue try: blender_inputs[0].default_value = input_plug.computed_value except Exception as error: print('Warning: Universal Material Map: Unexpected error when setting input "{0}" to value "{1}": "{2}"'.format(input_plug.name, input_plug.computed_value, error)) continue if isinstance(shader_node, bpy.types.ShaderNodeGroup): blender_inputs = [o for o in shader_node.inputs if o.name == input_plug.name] if len(blender_inputs) == 0: for property_name, property_object in shader_node.rna_type.properties.items(): if not property_name == input_plug.name: continue if property_object.is_readonly: break try: setattr(shader_node, property_name, input_plug.computed_value) except Exception as error: print('Warning: Universal Material Map: Unexpected error when setting property "{0}" to value "{1}": "{2}"'.format(property_name, input_plug.computed_value, error)) else: if isinstance(blender_inputs[0], bpy.types.NodeSocketShader): continue try: blender_inputs[0].default_value = input_plug.computed_value except Exception as error: print('Warning: Universal Material Map: Unexpected error when setting input "{0}" to value "{1}": "{2}"'.format(input_plug.name, input_plug.computed_value, error)) continue if isinstance(shader_node, bpy.types.ShaderNodeMapping): blender_inputs = [o for o in shader_node.inputs if o.name == input_plug.name] value = input_plug.computed_value if input_plug.name == 'Rotation': value[0] = math.radians(value[0]) value[1] = math.radians(value[1]) value[2] = math.radians(value[2]) if len(blender_inputs) == 0: for property_name, property_object in shader_node.rna_type.properties.items(): if not property_name == input_plug.name: continue if property_object.is_readonly: break try: setattr(shader_node, property_name, value) except Exception as error: print('Warning: Universal Material Map: Unexpected error when setting property "{0}" to value "{1}": "{2}"'.format(property_name, input_plug.computed_value, error)) else: if isinstance(blender_inputs[0], bpy.types.NodeSocketShader): continue try: blender_inputs[0].default_value = value except Exception as error: print('Warning: Universal Material Map: Unexpected error when setting input "{0}" to value "{1}": "{2}"'.format(input_plug.name, input_plug.computed_value, error)) continue # UX assist with special attributes for shader_node in material.node_tree.nodes: if shader_node.name == 'OmniPBR Compute' and isinstance(shader_node, bpy.types.ShaderNodeGroup): shader_node.inputs['Use Albedo Map'].default_value = 1 if use_albedo_map else 0 shader_node.inputs['Use Normal Map'].default_value = 1 if use_normal_map else 0 shader_node.inputs['Use Detail Normal Map'].default_value = 1 if use_detail_normal_map else 0 shader_node.inputs['Use Emission Map'].default_value = 1 if use_emission_map else 0 break class DataConverter(CoreConverter, IDataConverter): """ """ def can_convert_data_to_data(self, class_name: str, render_context: str, source_data: typing.List[typing.Tuple[str, typing.Any]]) -> bool: """ Resolves if worker can convert the given class and source_data to another class and target data. """ conversion_graph = _get_conversion_graph_impl(source_class=class_name, render_context=render_context) if not conversion_graph: return False try: destination_target_instance = _data_to_output_entity(graph=conversion_graph, data=source_data) except Exception as error: print('Warning: Unable to get destination assembly using document "{0}".\nDetails: {1}'.format(conversion_graph.filename, error)) return False return destination_target_instance is not None def convert_data_to_data(self, class_name: str, render_context: str, source_data: typing.List[typing.Tuple[str, typing.Any]]) -> typing.List[typing.Tuple[str, typing.Any]]: """ Returns a list of key value pairs in tuples. The first pair is ("umm_target_class", "the_class_name") indicating the conversion target class. """ if developer_mode: print('UMM DEBUG: DataConverter.convert_data_to_data()') print('\tclass_name="{0}"'.format(class_name)) print('\trender_context="{0}"'.format(render_context)) print('\tsource_data=[') for o in source_data: if o[1] == '': print('\t\t("{0}", ""),'.format(o[0])) continue print('\t\t("{0}", {1}),'.format(o[0], o[1])) print('\t]') conversion_graph = _get_conversion_graph_impl(source_class=class_name, render_context=render_context) destination_target_instance = _data_to_output_entity(graph=conversion_graph, data=source_data) attribute_data = [(util.TARGET_CLASS_IDENTIFIER, destination_target_instance.target.root_node.class_name)] for plug in destination_target_instance.inputs: if not plug.input: continue if plug.is_invalid and isinstance(plug.parent, DagNode): plug.parent.compute() attribute_data.append((plug.name, plug.computed_value)) return attribute_data class OT_InstanceToDataConverter(bpy.types.Operator): bl_idname = 'universalmaterialmap.instance_to_data_converter' bl_label = 'Universal Material Map Converter Operator' bl_description = 'Universal Material Map Converter' def execute(self, context): print('Conversion Operator: execute') # Get object by name: bpy.data.objects['Cube'] # Get material by name: bpy.data.materials['MyMaterial'] # node = [o for o in bpy.context.active_object.active_material.node_tree.nodes if o.select][0] print('selected_node', bpy.context.active_object, type(bpy.context.active_object)) # print('\n'.join(dir(bpy.context.active_object))) material_slot: bpy.types.MaterialSlot # https://docs.blender.org/api/current/bpy.types.MaterialSlot.html?highlight=materialslot#bpy.types.MaterialSlot for material_slot in bpy.context.active_object.material_slots: material: bpy.types.Material = material_slot.material if material.node_tree: for node in material.node_tree.nodes: if isinstance(node, bpy.types.ShaderNodeOutputMaterial): for input in node.inputs: if not input.type == 'SHADER': continue if not input.is_linked: continue for link in input.links: if not isinstance(link, bpy.types.NodeLink): continue if not link.is_valid: continue instance = link.from_node for render_context in ['MDL', 'USDPreview']: if util.can_convert_instance_to_data(instance=instance, render_context=render_context): util.convert_instance_to_data(instance=instance, render_context=render_context) else: print('Information: Universal Material Map: Not able to convert instance "{0}" to data with render context "{1}"'.format(instance, render_context)) else: instance = material for render_context in ['MDL', 'USDPreview']: if util.can_convert_instance_to_data(instance=instance, render_context=render_context): util.convert_instance_to_data(instance=instance, render_context=render_context) else: print('Information: Universal Material Map: Not able to convert instance "{0}" to data with render context "{1}"'.format(instance, render_context)) return {'FINISHED'} class OT_DataToInstanceConverter(bpy.types.Operator): bl_idname = 'universalmaterialmap.data_to_instance_converter' bl_label = 'Universal Material Map Converter Operator' bl_description = 'Universal Material Map Converter' def execute(self, context): render_context = 'Blender' source_class = 'OmniPBR.mdl|OmniPBR' sample_data = [ ('diffuse_color_constant', (0.800000011920929, 0.800000011920929, 0.800000011920929)), ('diffuse_texture', ''), ('reflection_roughness_constant', 0.4000000059604645), ('reflectionroughness_texture', ''), ('metallic_constant', 0.0), ('metallic_texture', ''), ('specular_level', 0.5), ('enable_emission', True), ('emissive_color', (0.0, 0.0, 0.0)), ('emissive_color_texture', ''), ('emissive_intensity', 1.0), ('normalmap_texture', ''), ('enable_opacity', True), ('opacity_constant', 1.0), ] if util.can_convert_data_to_data(class_name=source_class, render_context=render_context, source_data=sample_data): converted_data = util.convert_data_to_data(class_name=source_class, render_context=render_context, source_data=sample_data) destination_class = converted_data[0][1] if util.can_create_instance(class_name=destination_class): instance = util.create_instance(class_name=destination_class) print('instance "{0}".'.format(instance)) temp = converted_data[:] while len(temp): item = temp.pop(0) property_name = item[0] property_value = item[1] if util.can_set_instance_attribute(instance=instance, name=property_name): util.set_instance_attribute(instance=instance, name=property_name, value=property_value) else: print('Cannot create instance from "{0}".'.format(source_class)) return {'FINISHED'} class OT_DataToDataConverter(bpy.types.Operator): bl_idname = 'universalmaterialmap.data_to_data_converter' bl_label = 'Universal Material Map Converter Operator' bl_description = 'Universal Material Map Converter' def execute(self, context): render_context = 'Blender' source_class = 'OmniPBR.mdl|OmniPBR' sample_data = [ ('diffuse_color_constant', (0.800000011920929, 0.800000011920929, 0.800000011920929)), ('diffuse_texture', ''), ('reflection_roughness_constant', 0.4000000059604645), ('reflectionroughness_texture', ''), ('metallic_constant', 0.0), ('metallic_texture', ''), ('specular_level', 0.5), ('enable_emission', True), ('emissive_color', (0.0, 0.0, 0.0)), ('emissive_color_texture', ''), ('emissive_intensity', 1.0), ('normalmap_texture', ''), ('enable_opacity', True), ('opacity_constant', 1.0), ] if util.can_convert_data_to_data(class_name=source_class, render_context=render_context, source_data=sample_data): converted_data = util.convert_data_to_data(class_name=source_class, render_context=render_context, source_data=sample_data) print('converted_data:', converted_data) else: print('UMM Failed to convert data. util.can_convert_data_to_data() returned False') return {'FINISHED'} class OT_ApplyDataToInstance(bpy.types.Operator): bl_idname = 'universalmaterialmap.apply_data_to_instance' bl_label = 'Universal Material Map Apply Data To Instance Operator' bl_description = 'Universal Material Map Converter' def execute(self, context): if not bpy.context: return {'FINISHED'} if not bpy.context.active_object: return {'FINISHED'} if not bpy.context.active_object.active_material: return {'FINISHED'} instance = bpy.context.active_object.active_material render_context = 'Blender' source_class = 'OmniPBR.mdl|OmniPBR' sample_data = [ ('albedo_add', 0.02), # Adds a constant value to the diffuse color ('albedo_desaturation', 0.19999999), # Desaturates the diffuse color ('ao_texture', ('', 'raw')), ('ao_to_diffuse', 1), # Controls the amount of ambient occlusion multiplied into the diffuse color channel ('bump_factor', 10), # Strength of normal map ('diffuse_color_constant', (0.800000011920929, 0.800000011920929, 0.800000011920929)), ('diffuse_texture', ('D:/Blender_GTC_2021/Marbles/assets/standalone/A_bumper/textures/play_bumper/blue/play_bumperw_albedo.png', 'sRGB')), ('diffuse_tint', (0.96202534, 0.8118357, 0.8118357)), # When enabled, this color value is multiplied over the final albedo color ('enable_emission', 0), ('enable_ORM_texture', 1), ('metallic_constant', 1), ('metallic_texture', ('', 'raw')), ('metallic_texture_influence', 1), ('normalmap_texture', ('D:/Blender_GTC_2021/Marbles/assets/standalone/A_bumper/textures/play_bumper/blue/play_bumperw_normal.png', 'raw')), ('ORM_texture', ('D:/Blender_GTC_2021/Marbles/assets/standalone/A_bumper/textures/play_bumper/blue/play_bumperw_orm.png', 'raw')), ('reflection_roughness_constant', 1), # Higher roughness values lead to more blurry reflections ('reflection_roughness_texture_influence', 1), # Blends between the constant value and the lookup of the roughness texture ('reflectionroughness_texture', ('', 'raw')), ('texture_rotate', 45), ('texture_scale', (2, 2)), ('texture_translate', (0.1, 0.9)), ] if util.can_apply_data_to_instance(source_class_name=source_class, render_context=render_context, source_data=sample_data, instance=instance): util.apply_data_to_instance(source_class_name=source_class, render_context=render_context, source_data=sample_data, instance=instance) else: print('UMM Failed to convert data. util.can_convert_data_to_data() returned False') return {'FINISHED'} class OT_CreateTemplateOmniPBR(bpy.types.Operator): bl_idname = 'universalmaterialmap.create_template_omnipbr' bl_label = 'Convert to OmniPBR Graph' bl_description = 'Universal Material Map Converter' def execute(self, context): if not bpy.context: return {'FINISHED'} if not bpy.context.active_object: return {'FINISHED'} if not bpy.context.active_object.active_material: return {'FINISHED'} create_template(source_class='OmniPBR', material=bpy.context.active_object.active_material) return {'FINISHED'} class OT_CreateTemplateOmniGlass(bpy.types.Operator): bl_idname = 'universalmaterialmap.create_template_omniglass' bl_label = 'Convert to OmniGlass Graph' bl_description = 'Universal Material Map Converter' def execute(self, context): if not bpy.context: return {'FINISHED'} if not bpy.context.active_object: return {'FINISHED'} if not bpy.context.active_object.active_material: return {'FINISHED'} create_template(source_class='OmniGlass', material=bpy.context.active_object.active_material) return {'FINISHED'} class OT_DescribeShaderGraph(bpy.types.Operator): bl_idname = 'universalmaterialmap.describe_shader_graph' bl_label = 'Universal Material Map Describe Shader Graph Operator' bl_description = 'Universal Material Map' @staticmethod def describe_node(node) -> dict: node_definition = dict() node_definition['name'] = node.name node_definition['label'] = node.label node_definition['location'] = [node.location[0], node.location[1]] node_definition['width'] = node.width node_definition['height'] = node.height node_definition['parent'] = node.parent.name if node.parent else None node_definition['class'] = type(node).__name__ node_definition['inputs'] = [] node_definition['outputs'] = [] node_definition['nodes'] = [] node_definition['links'] = [] node_definition['properties'] = [] node_definition['texts'] = [] if node_definition['class'] == 'NodeFrame': node_definition['properties'].append( { 'name': 'use_custom_color', 'value': node.use_custom_color, } ) node_definition['properties'].append( { 'name': 'color', 'value': [node.color[0], node.color[1], node.color[2]], } ) node_definition['properties'].append( { 'name': 'shrink', 'value': node.shrink, } ) if node.text is not None: text_definition = dict() text_definition['name'] = node.text.name text_definition['contents'] = node.text.as_string() node_definition['texts'].append(text_definition) elif node_definition['class'] == 'ShaderNodeRGB': for index, output in enumerate(node.outputs): definition = dict() definition['index'] = index definition['name'] = output.name definition['class'] = type(output).__name__ if definition['class'] == 'NodeSocketColor': default_value = output.default_value definition['default_value'] = [default_value[0], default_value[1], default_value[2], default_value[3]] else: raise NotImplementedError() node_definition['outputs'].append(definition) elif node_definition['class'] == 'ShaderNodeMixRGB': node_definition['properties'].append( { 'name': 'blend_type', 'value': node.blend_type, } ) node_definition['properties'].append( { 'name': 'use_clamp', 'value': node.use_clamp, } ) for index, input in enumerate(node.inputs): definition = dict() definition['index'] = index definition['name'] = input.name definition['class'] = type(input).__name__ if definition['class'] == 'NodeSocketFloatFactor': definition['default_value'] = node.inputs[input.name].default_value elif definition['class'] == 'NodeSocketColor': default_value = node.inputs[input.name].default_value definition['default_value'] = [default_value[0], default_value[1], default_value[2], default_value[3]] else: raise NotImplementedError() node_definition['inputs'].append(definition) elif node_definition['class'] == 'ShaderNodeGroup': for index, input in enumerate(node.inputs): definition = dict() definition['index'] = index definition['name'] = input.name definition['class'] = type(input).__name__ if definition['class'] == 'NodeSocketFloatFactor': definition['min_value'] = node.node_tree.inputs[input.name].min_value definition['max_value'] = node.node_tree.inputs[input.name].max_value definition['default_value'] = node.inputs[input.name].default_value elif definition['class'] == 'NodeSocketIntFactor': definition['min_value'] = node.node_tree.inputs[input.name].min_value definition['max_value'] = node.node_tree.inputs[input.name].max_value definition['default_value'] = node.inputs[input.name].default_value elif definition['class'] == 'NodeSocketColor': default_value = node.inputs[input.name].default_value definition['default_value'] = [default_value[0], default_value[1], default_value[2], default_value[3]] else: raise NotImplementedError() node_definition['inputs'].append(definition) for index, output in enumerate(node.outputs): definition = dict() definition['index'] = index definition['name'] = output.name definition['class'] = type(output).__name__ node_definition['outputs'].append(definition) for child in node.node_tree.nodes: node_definition['nodes'].append(OT_DescribeShaderGraph.describe_node(child)) for link in node.node_tree.links: if not isinstance(link, bpy.types.NodeLink): continue if not link.is_valid: continue link_definition = dict() link_definition['from_node'] = link.from_node.name link_definition['from_socket'] = link.from_socket.name link_definition['to_node'] = link.to_node.name link_definition['to_socket'] = link.to_socket.name node_definition['links'].append(link_definition) elif node_definition['class'] == 'ShaderNodeUVMap': pass elif node_definition['class'] == 'ShaderNodeTexImage': pass elif node_definition['class'] == 'ShaderNodeOutputMaterial': pass elif node_definition['class'] == 'ShaderNodeBsdfPrincipled': pass elif node_definition['class'] == 'ShaderNodeMapping': pass elif node_definition['class'] == 'ShaderNodeNormalMap': pass elif node_definition['class'] == 'ShaderNodeHueSaturation': pass elif node_definition['class'] == 'ShaderNodeSeparateRGB': pass elif node_definition['class'] == 'NodeGroupInput': pass elif node_definition['class'] == 'NodeGroupOutput': pass elif node_definition['class'] == 'ShaderNodeMath': node_definition['properties'].append( { 'name': 'operation', 'value': node.operation, } ) node_definition['properties'].append( { 'name': 'use_clamp', 'value': node.use_clamp, } ) elif node_definition['class'] == 'ShaderNodeVectorMath': node_definition['properties'].append( { 'name': 'operation', 'value': node.operation, } ) else: raise NotImplementedError(node_definition['class']) return node_definition def execute(self, context): material = bpy.context.active_object.active_material output = dict() output['name'] = 'Principled Omni Glass' output['nodes'] = [] output['links'] = [] for node in material.node_tree.nodes: output['nodes'].append(OT_DescribeShaderGraph.describe_node(node)) for link in material.node_tree.links: if not isinstance(link, bpy.types.NodeLink): continue if not link.is_valid: continue link_definition = dict() link_definition['from_node'] = link.from_node.name link_definition['from_socket'] = link.from_socket.name link_definition['to_node'] = link.to_node.name link_definition['to_socket'] = link.to_socket.name output['links'].append(link_definition) print(json.dumps(output, indent=4)) return {'FINISHED'} def initialize(): if getattr(sys.modules[__name__], '__initialized'): return setattr(sys.modules[__name__], '__initialized', True) util.register(converter=DataConverter()) util.register(converter=ObjectConverter()) print('Universal Material Map: Registered Converter classes.') initialize()
67,817
Python
49.724009
263
0.552177
NVIDIA-Omniverse/blender_omniverse_addons/omni/universalmaterialmap/blender/material.py
# ##### BEGIN GPL LICENSE BLOCK ##### # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ##### END GPL LICENSE BLOCK ##### # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. import typing import traceback import bpy from ..core.converter import util def apply_data_to_instance(instance_name: str, source_class: str, render_context: str, source_data: typing.List[typing.Tuple[str, typing.Any]]) -> dict: ## bugfix: Extract class correctly from exporters that name the class like a Python function call. real_source_class = source_class.partition("(")[0] try: for material in bpy.data.materials: if not isinstance(material, bpy.types.Material): continue if material.name == instance_name: if util.can_apply_data_to_instance(source_class_name=real_source_class, render_context=render_context, source_data=source_data, instance=material): return util.apply_data_to_instance(source_class_name=real_source_class, render_context=render_context, source_data=source_data, instance=material) print(f'Omniverse UMM: Unable to apply data at import for material "{instance_name}". This is not an error - just means that conversion data does not support the material.') result = dict() result['umm_notification'] = 'incomplete_process' result['message'] = 'Not able to convert type "{0}" for render context "{1}" because there is no Conversion Graph for that scenario. No changes were applied to "{2}".'.format(real_source_class, render_context, instance_name) return result except Exception as error: print('Warning: Universal Material Map: function "apply_data_to_instance": Unexpected error:') print('\targument "instance_name" = "{0}"'.format(instance_name)) print('\targument "source_class" = "{0}"'.format(real_source_class)) print('\targument "render_context" = "{0}"'.format(render_context)) print('\targument "source_data" = "{0}"'.format(source_data)) print('\terror: {0}'.format(error)) print('\tcallstack: {0}'.format(traceback.format_exc())) result = dict() result['umm_notification'] = 'unexpected_error' result['message'] = 'Not able to convert type "{0}" for render context "{1}" because there was an unexpected error. Some changes may have been applied to "{2}". Details: {3}'.format(real_source_class, render_context, instance_name, error) return result def convert_instance_to_data(instance_name: str, render_context: str) -> typing.List[typing.Tuple[str, typing.Any]]: try: for material in bpy.data.materials: if not isinstance(material, bpy.types.Material): continue if material.name == instance_name: if util.can_convert_instance_to_data(instance=material, render_context=render_context): return util.convert_instance_to_data(instance=material, render_context=render_context) result = dict() result['umm_notification'] = 'incomplete_process' result['message'] = 'Not able to convert material "{0}" for render context "{1}" because there is no Conversion Graph for that scenario.'.format(instance_name, render_context) return result except Exception as error: print('Warning: Universal Material Map: function "convert_instance_to_data": Unexpected error:') print('\targument "instance_name" = "{0}"'.format(instance_name)) print('\targument "render_context" = "{0}"'.format(render_context)) print('\terror: {0}'.format(error)) print('\tcallstack: {0}'.format(traceback.format_exc())) result = dict() result['umm_notification'] = 'unexpected_error' result['message'] = 'Not able to convert material "{0}" for render context "{1}" there was an unexpected error. Details: {2}'.format(instance_name, render_context, error) return result result = dict() result['umm_notification'] = 'incomplete_process' result['message'] = 'Not able to convert material "{0}" for render context "{1}" because there is no Conversion Graph for that scenario.'.format(instance_name, render_context) return result
5,004
Python
57.197674
246
0.670464
NVIDIA-Omniverse/blender_omniverse_addons/omni/universalmaterialmap/blender/__init__.py
# ##### BEGIN GPL LICENSE BLOCK ##### # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ##### END GPL LICENSE BLOCK ##### # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. import typing import os import re import sys import json import bpy from ..core.data import Library from ..core.feature import POLLING from ..core.service import store from ..core.service import delegate from ..core.util import get_extension_from_image_file_format LIBRARY_ID = '195c69e1-7765-4a16-bb3a-ecaa222876d9' __initialized = False developer_mode: bool = False CORE_MATERIAL_PROPERTIES = [ ('diffuse_color', 'RGBA'), ('metallic', 'VALUE'), ('specular_color', 'STRING'), ('roughness', 'VALUE'), ('use_backface_culling', 'BOOLEAN'), ('blend_method', 'STRING'), ('shadow_method', 'STRING'), ('alpha_threshold', 'VALUE'), ('use_screen_refraction', 'BOOLEAN'), ('refraction_depth', 'VALUE'), ('use_sss_translucency', 'BOOLEAN'), ('pass_index', 'INT'), ] def show_message(message: str = '', title: str = 'Message Box', icon: str = 'INFO'): try: def draw(self, context): self.layout.label(text=message) bpy.context.window_manager.popup_menu(draw, title=title, icon=icon) except: print('{0}\n{1}'.format(title, message)) def initialize(): if getattr(sys.modules[__name__], '__initialized'): return setattr(sys.modules[__name__], '__initialized', True) directory = os.path.expanduser('~').replace('\\', '/') if not directory.endswith('/Documents'): directory = '{0}/Documents'.format(directory) directory = '{0}/Omniverse/Blender/UMMLibrary'.format(directory) library = Library.Create( library_id=LIBRARY_ID, name='Blender', manifest=delegate.FilesystemManifest(root_directory='{0}'.format(directory)), conversion_graph=delegate.Filesystem(root_directory='{0}/ConversionGraph'.format(directory)), target=delegate.Filesystem(root_directory='{0}/Target'.format(directory)), ) store.register_library(library=library) from ..blender import converter converter.initialize() from ..blender import generator generator.initialize() if POLLING: # TODO: On application exit > un_initialize() pass def un_initialize(): if POLLING: store.on_shutdown() def get_library(): """ :return: omni.universalmaterialmap.core.data.Library """ initialize() return store.get_library(library_id=LIBRARY_ID) def __get_value_impl(socket: bpy.types.NodeSocketStandard, depth=0, max_depth=100) -> typing.Any: # Local utility function which returns a file extension # corresponding to the given image file format string. # This mimics similar logic used in the Blender USD IO # C++ implementation. debug = False if debug: print('__get_value_impl: depth={0}'.format(depth)) if depth > max_depth: if debug: print('\t reached max_depth ({0}). terminating recursion'.format(max_depth)) return None if debug: print('\tsocket.is_linked'.format(socket.is_linked)) if socket.is_linked: for link in socket.links: if not isinstance(link, bpy.types.NodeLink): if debug: print('\t\tlink is not bpy.types.NodeLink: {0}'.format(type(link))) continue if not link.is_valid: if debug: print('\t\tlink is not valid') continue instance = link.from_node if debug: print('\t\tlink.from_node: {0}'.format(type(instance))) if isinstance(instance, bpy.types.ShaderNodeTexImage): print(f'UMM: image.filepath: "{instance.image.filepath}"') print(f'UMM: image.source: "{instance.image.source}"') print(f'UMM: image.file_format: "{instance.image.file_format}"') if debug: print('\t\tinstance.image: {0}'.format(instance.image)) if instance.image: print('\t\tinstance.image.source: {0}'.format(instance.image.source)) if instance.image and (instance.image.source == 'FILE' or instance.image.source == 'TILED'): value = instance.image.filepath if (instance.image.source == 'TILED'): # Find all numbers in the path. numbers = re.findall('[0-9]+', value) if (len(numbers) > 0): # Get the string representation of the last number. num_str = str(numbers[-1]) # Replace the number substring with '<UDIM>'. split_items = value.rsplit(num_str, 1) if (len(split_items)==2): value = split_items[0] + '<UDIM>' + split_items[1] if debug: print('\t\tinstance.image.filepath: {0}'.format(value)) try: if value and instance.image.packed_file: # The image is packed, so ignore the filepath, which is likely # invalid, and return just the base name. value = bpy.path.basename(value) # Make sure the file has a valid extension for # the expected format. file_format = instance.image.file_format file_format = get_extension_from_image_file_format(file_format, base_name=value) value = bpy.path.ensure_ext(value, '.' + file_format) print(f'UMM: packed image data: "{[value, instance.image.colorspace_settings.name]}"') return [value, instance.image.colorspace_settings.name] if value is None or value == '': file_format = instance.image.file_format file_format = get_extension_from_image_file_format(file_format) value = f'{instance.image.name}.{file_format}' if debug: print(f'\t\tvalue: {value}') print(f'UMM: image data: "{[value, instance.image.colorspace_settings.name]}"') return [value, instance.image.colorspace_settings.name] return [os.path.abspath(bpy.path.abspath(value)), instance.image.colorspace_settings.name] except Exception as error: print('Warning: Universal Material Map: Unable to evaluate absolute file path of texture "{0}". Detail: {1}'.format(instance.image.filepath, error)) return None if isinstance(instance, bpy.types.ShaderNodeNormalMap): for o in instance.inputs: if o.name == 'Color': value = __get_value_impl(socket=o, depth=depth + 1, max_depth=max_depth) if value: return value for o in instance.inputs: value = __get_value_impl(socket=o, depth=depth + 1, max_depth=max_depth) if debug: print('\t\tre-entrant: input="{0}", value="{1}"'.format(o.name, value)) if value: return value return None def get_value(socket: bpy.types.NodeSocketStandard) -> typing.Any: debug = False value = __get_value_impl(socket=socket) if debug: print('get_value', value, socket.default_value) return socket.default_value if not value else value def _create_node_from_template(node_tree: bpy.types.NodeTree, node_definition: dict, parent: object = None) -> object: node = node_tree.nodes.new(node_definition['class']) if parent: node.parent = parent node.name = node_definition['name'] node.label = node_definition['label'] node.location = node_definition['location'] if node_definition['class'] == 'NodeFrame': node.width = node_definition['width'] node.height = node_definition['height'] for o in node_definition['properties']: setattr(node, o['name'], o['value']) if node_definition['class'] == 'NodeFrame': for text_definition in node_definition['texts']: existing = None for o in bpy.data.texts: if o.name == text_definition['name']: existing = o break if existing is None: existing = bpy.data.texts.new(text_definition['name']) existing.write(text_definition['contents']) node.text = existing node.location = node_definition['location'] elif node_definition['class'] == 'ShaderNodeGroup': node.node_tree = bpy.data.node_groups.new('node tree', 'ShaderNodeTree') child_cache = dict() for child_definition in node_definition['nodes']: child_cache[child_definition['name']] = _create_node_from_template(node_tree=node.node_tree, node_definition=child_definition) for input_definition in node_definition['inputs']: node.node_tree.inputs.new(input_definition['class'], input_definition['name']) if input_definition['class'] == 'NodeSocketFloatFactor': node.node_tree.inputs[input_definition['name']].min_value = input_definition['min_value'] node.node_tree.inputs[input_definition['name']].max_value = input_definition['max_value'] node.node_tree.inputs[input_definition['name']].default_value = input_definition['default_value'] node.inputs[input_definition['name']].default_value = input_definition['default_value'] if input_definition['class'] == 'NodeSocketIntFactor': node.node_tree.inputs[input_definition['name']].min_value = input_definition['min_value'] node.node_tree.inputs[input_definition['name']].max_value = input_definition['max_value'] node.node_tree.inputs[input_definition['name']].default_value = input_definition['default_value'] node.inputs[input_definition['name']].default_value = input_definition['default_value'] if input_definition['class'] == 'NodeSocketColor': node.node_tree.inputs[input_definition['name']].default_value = input_definition['default_value'] node.inputs[input_definition['name']].default_value = input_definition['default_value'] for output_definition in node_definition['outputs']: node.node_tree.outputs.new(output_definition['class'], output_definition['name']) for link_definition in node_definition['links']: from_node = child_cache[link_definition['from_node']] from_socket = [o for o in from_node.outputs if o.name == link_definition['from_socket']][0] to_node = child_cache[link_definition['to_node']] to_socket = [o for o in to_node.inputs if o.name == link_definition['to_socket']][0] node.node_tree.links.new(from_socket, to_socket) node.width = node_definition['width'] node.height = node_definition['height'] node.location = node_definition['location'] elif node_definition['class'] == 'ShaderNodeMixRGB': for input_definition in node_definition['inputs']: if input_definition['class'] == 'NodeSocketFloatFactor': node.inputs[input_definition['name']].default_value = input_definition['default_value'] if input_definition['class'] == 'NodeSocketColor': node.inputs[input_definition['name']].default_value = input_definition['default_value'] elif node_definition['class'] == 'ShaderNodeRGB': for output_definition in node_definition['outputs']: if output_definition['class'] == 'NodeSocketColor': node.outputs[output_definition['name']].default_value = output_definition['default_value'] return node def create_template(source_class: str, material: bpy.types.Material) -> None: template_filepath = '{}'.format(__file__).replace('\\', '/') template_filepath = template_filepath[:template_filepath.rfind('/')] template_filepath = '{}/template/{}.json'.format(template_filepath, source_class.lower()) if not os.path.exists(template_filepath): return with open(template_filepath, 'r') as template_file: template = json.load(template_file) # Make sure we're using nodes. material.use_nodes = True # Remove existing nodes - we're starting from scratch. to_delete = [o for o in material.node_tree.nodes] while len(to_delete): material.node_tree.nodes.remove(to_delete.pop()) # Create nodes according to template. child_cache = dict() for node_definition in template['nodes']: if node_definition['parent'] is None: node = _create_node_from_template(node_tree=material.node_tree, node_definition=node_definition) child_cache[node_definition['name']] = node for node_definition in template['nodes']: if node_definition['parent'] is not None: parent = child_cache[node_definition['parent']] node = _create_node_from_template(node_tree=material.node_tree, node_definition=node_definition, parent=parent) child_cache[node_definition['name']] = node for link_definition in template['links']: from_node = child_cache[link_definition['from_node']] from_socket = [o for o in from_node.outputs if o.name == link_definition['from_socket']][0] to_node = child_cache[link_definition['to_node']] to_socket = [o for o in to_node.inputs if o.name == link_definition['to_socket']][0] material.node_tree.links.new(from_socket, to_socket) def create_from_template(material: bpy.types.Material, template: dict) -> None: # Make sure we're using nodes. material.use_nodes = True # Create nodes according to template. child_cache = dict() for node_definition in template['nodes']: if node_definition['parent'] is None: node = _create_node_from_template(node_tree=material.node_tree, node_definition=node_definition) child_cache[node_definition['name']] = node for node_definition in template['nodes']: if node_definition['parent'] is not None: parent = child_cache[node_definition['parent']] node = _create_node_from_template(node_tree=material.node_tree, node_definition=node_definition, parent=parent) child_cache[node_definition['name']] = node for link_definition in template['links']: from_node = child_cache[link_definition['from_node']] from_socket = [o for o in from_node.outputs if o.name == link_definition['from_socket']][0] to_node = child_cache[link_definition['to_node']] to_socket = [o for o in to_node.inputs if o.name == link_definition['to_socket']][0] material.node_tree.links.new(from_socket, to_socket) def get_parent_material(shader_node: object) -> bpy.types.Material: for material in bpy.data.materials: if shader_node == material: return material if not material.use_nodes: continue if not material.node_tree or not material.node_tree.nodes: continue for node in material.node_tree.nodes: if shader_node == node: return material return None def get_template_data_by_shader_node(shader_node: object) -> typing.Tuple[typing.Dict, typing.Dict, str, bpy.types.Material]: material: bpy.types.Material = get_parent_material(shader_node=shader_node) if material and material.use_nodes and material.node_tree and material.node_tree.nodes: template_directory = '{}'.format(__file__).replace('\\', '/') template_directory = template_directory[:template_directory.rfind('/')] template_directory = f'{template_directory}/template' for item in os.listdir(template_directory): if item.lower().endswith('_map.json'): continue if not item.lower().endswith('.json'): continue template_filepath = f'{template_directory}/{item}' with open(template_filepath, 'r') as template_file: template = json.load(template_file) material_has_all_template_nodes = True for node_definition in template['nodes']: found_node = False for node in material.node_tree.nodes: if node.name == node_definition['name']: found_node = True break if not found_node: material_has_all_template_nodes = False break if not material_has_all_template_nodes: continue template_has_all_material_nodes = True for node in material.node_tree.nodes: found_template = False for node_definition in template['nodes']: if node.name == node_definition['name']: found_template = True break if not found_template: template_has_all_material_nodes = False break if not template_has_all_material_nodes: continue template_shader_name = template['name'] map_filename = '{}_map.json'.format(item[:item.rfind('.')]) template_map_filepath = f'{template_directory}/{map_filename}' with open(template_map_filepath, 'r') as template_map_file: template_map = json.load(template_map_file) return template, template_map, template_shader_name, material return None, None, None, None def get_template_data_by_class_name(class_name: str) -> typing.Tuple[typing.Dict, typing.Dict]: template_directory = '{}'.format(__file__).replace('\\', '/') template_directory = template_directory[:template_directory.rfind('/')] template_directory = f'{template_directory}/template' for item in os.listdir(template_directory): if item.lower().endswith('_map.json'): continue if not item.lower().endswith('.json'): continue template_filepath = f'{template_directory}/{item}' with open(template_filepath, 'r') as template_file: template = json.load(template_file) if not template['name'] == class_name: continue map_filename = '{}_map.json'.format(item[:item.rfind('.')]) template_map_filepath = f'{template_directory}/{map_filename}' with open(template_map_filepath, 'r') as template_map_file: template_map = json.load(template_map_file) return template, template_map return None, None
19,919
Python
43.663677
172
0.599377
NVIDIA-Omniverse/blender_omniverse_addons/omni/universalmaterialmap/blender/menu.py
# ##### BEGIN GPL LICENSE BLOCK ##### # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ##### END GPL LICENSE BLOCK ##### # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. import bpy from . import developer_mode class UniversalMaterialMapMenu(bpy.types.Menu): bl_label = "Omniverse" bl_idname = "OBJECT_MT_umm_node_menu" def draw(self, context): layout = self.layout layout.operator('universalmaterialmap.create_template_omnipbr', text='Replace with OmniPBR graph template') layout.operator('universalmaterialmap.create_template_omniglass', text='Replace with OmniGlass graph template') if developer_mode: layout.operator('universalmaterialmap.generator', text='DEV: Generate Targets') layout.operator('universalmaterialmap.instance_to_data_converter', text='DEV: Convert Instance to Data') layout.operator('universalmaterialmap.data_to_instance_converter', text='DEV: Convert Data to Instance') layout.operator('universalmaterialmap.data_to_data_converter', text='DEV: Convert Data to Data') layout.operator('universalmaterialmap.apply_data_to_instance', text='DEV: Apply Data to Instance') layout.operator('universalmaterialmap.describe_shader_graph', text='DEV: Describe Shader Graph')
1,999
Python
45.511627
119
0.724362
NVIDIA-Omniverse/kit-app-template/source/extensions/omni.usd_explorer.setup/omni/usd_explorer/setup/menubar_helper.py
from pathlib import Path import carb import carb.settings import carb.tokens import omni.ui as ui from omni.ui import color as cl ICON_PATH = carb.tokens.get_tokens_interface().resolve("${omni.usd_explorer.setup}/data/icons") VIEW_MENUBAR_STYLE = { "MenuBar.Window": {"background_color": 0xA0000000}, "MenuBar.Item.Background": { "background_color": 0, }, "Menu.Item.Background": { "background_color": 0, } } VIEWPORT_CAMERA_STYLE = { "Menu.Item.Icon::Expand": {"image_url": f"{ICON_PATH}/caret_s2_right_dark.svg", "color": cl.viewport_menubar_light}, "Menu.Item.Icon::Expand:checked": {"image_url": f"{ICON_PATH}/caret_s2_left_dark.svg"}, } class MenubarHelper: def __init__(self) -> None: self._settings = carb.settings.get_settings() # Set menubar background and style try: from omni.kit.viewport.menubar.core import DEFAULT_MENUBAR_NAME from omni.kit.viewport.menubar.core import get_instance as get_menubar_instance instance = get_menubar_instance() if not instance: # pragma: no cover return default_menubar = instance.get_menubar(DEFAULT_MENUBAR_NAME) default_menubar.background_visible = True default_menubar.style.update(VIEW_MENUBAR_STYLE) default_menubar.show_separator = True except ImportError: # pragma: no cover carb.log_warn("Viewport menubar not found!") try: import omni.kit.viewport.menubar.camera self._camera_menubar_instance = omni.kit.viewport.menubar.camera.get_instance() if not self._camera_menubar_instance: # pragma: no cover return # Change expand button icon self._camera_menubar_instance._camera_menu._style.update(VIEWPORT_CAMERA_STYLE) # New menu item for camera speed self._camera_menubar_instance.register_menu_item(self._create_camera_speed, order=100) # OM-76591 - Removing "Create from view" item - Bob self._camera_menubar_instance.deregister_menu_item(self._camera_menubar_instance._camera_menu._build_create_camera) except ImportError: carb.log_warn("Viewport menubar not found!") self._camera_menubar_instance = None except AttributeError: # pragma: no cover self._camera_menubar_instance = None # Hide default render and settings menubar self._settings.set("/persistent/exts/omni.kit.viewport.menubar.render/visible", False) self._settings.set("/persistent/exts/omni.kit.viewport.menubar.settings/visible", False) def destroy(self) -> None: if self._camera_menubar_instance: self._camera_menubar_instance.deregister_menu_item(self._create_camera_speed) def _create_camera_speed(self, _vc, _r: ui.Menu) -> None: from omni.kit.viewport.menubar.core import SettingModel, SliderMenuDelegate ui.MenuItem( "Speed", hide_on_click=False, delegate=SliderMenuDelegate( model=SettingModel("/persistent/app/viewport/camMoveVelocity", draggable=True), min=self._settings.get_as_float("/persistent/app/viewport/camVelocityMin") or 0.01, max=self._settings.get_as_float("/persistent/app/viewport/camVelocityMax"), tooltip="Set the Fly Mode navigation speed", width=0, reserve_status=True, ), )
3,517
Python
42.974999
127
0.642593
NVIDIA-Omniverse/kit-app-template/source/extensions/omni.usd_explorer.setup/omni/usd_explorer/setup/__init__.py
from .setup import *
21
Python
9.999995
20
0.714286
NVIDIA-Omniverse/kit-app-template/source/extensions/omni.usd_explorer.setup/omni/usd_explorer/setup/setup.py
import asyncio import weakref from functools import partial import os from pathlib import Path from typing import cast, Optional import omni.client import omni.ext import omni.kit.menu.utils import omni.kit.app import omni.kit.context_menu import omni.kit.ui import omni.usd from omni.kit.quicklayout import QuickLayout from omni.kit.menu.utils import MenuLayout from omni.kit.window.title import get_main_window_title from omni.kit.usd.layers import LayerUtils from omni.kit.viewport.menubar.core import get_instance as get_mb_inst, DEFAULT_MENUBAR_NAME from omni.kit.viewport.menubar.core.viewport_menu_model import ViewportMenuModel from omni.kit.viewport.utility import get_active_viewport, get_active_viewport_window, disable_selection import carb import carb.settings import carb.dictionary import carb.events import carb.tokens import carb.input import omni.kit.imgui as _imgui from pxr import Sdf, Usd from .navigation import Navigation from .menu_helper import MenuHelper from .menubar_helper import MenubarHelper from .stage_template import SunnySkyStage from .ui_state_manager import UIStateManager SETTINGS_PATH_FOCUSED = "/app/workspace/currentFocused" APPLICATION_MODE_PATH = "/app/application_mode" MODAL_TOOL_ACTIVE_PATH = "/app/tools/modal_tool_active" CURRENT_TOOL_PATH = "/app/viewport/currentTool" ROOT_WINDOW_NAME = "DockSpace" ICON_PATH = carb.tokens.get_tokens_interface().resolve("${omni.usd_explorer.setup}/data/icons") SETTINGS_STARTUP_EXPAND_VIEWPORT = "/app/startup/expandViewport" VIEWPORT_CONTEXT_MENU_PATH = "/exts/omni.kit.window.viewport/showContextMenu" TELEPORT_VISIBLE_PATH = "/persistent/exts/omni.kit.viewport.navigation.teleport/visible" async def _load_layout_startup(layout_file: str, keep_windows_open: bool=False) -> None: try: # few frames delay to avoid the conflict with the layout of omni.kit.mainwindow for i in range(3): await omni.kit.app.get_app().next_update_async() # type: ignore QuickLayout.load_file(layout_file, keep_windows_open) # WOR: some layout don't happy collectly the first time await omni.kit.app.get_app().next_update_async() # type: ignore QuickLayout.load_file(layout_file, keep_windows_open) except Exception as exc: # pragma: no cover (Can't be tested because a non-existing layout file prints an log_error in QuickLayout and does not throw an exception) carb.log_warn(f"Failed to load layout {layout_file}: {exc}") async def _load_layout(layout_file: str, keep_windows_open:bool=False) -> None: try: # few frames delay to avoid the conflict with the layout of omni.kit.mainwindow for i in range(3): await omni.kit.app.get_app().next_update_async() # type: ignore QuickLayout.load_file(layout_file, keep_windows_open) except Exception as exc: # pragma: no cover (Can't be tested because a non-existing layout file prints an log_error in QuickLayout and does not throw an exception) carb.log_warn(f"Failed to load layout {layout_file}: {exc}") async def _clear_startup_scene_edits() -> None: try: for i in range(50): # This could possibly be a smaller value. I want to ensure this happens after RTX startup await omni.kit.app.get_app().next_update_async() # type: ignore omni.usd.get_context().set_pending_edit(False) except Exception as exc: # pragma: no cover carb.log_warn(f"Failed to clear stage edits on startup: {exc}") # This extension is mostly loading the Layout updating menu class SetupExtension(omni.ext.IExt): # ext_id is current extension id. It can be used with extension manager to query additional information, like where # this extension is located on filesystem. @property def _app(self): return omni.kit.app.get_app() @property def _settings(self): return carb.settings.get_settings() def on_startup(self, ext_id: str) -> None: self._ext_id = ext_id self._menubar_helper = MenubarHelper() self._menu_helper = MenuHelper() # using imgui directly to adjust some color and Variable imgui = _imgui.acquire_imgui() # match Create overides imgui.push_style_color(_imgui.StyleColor.ScrollbarGrab, carb.Float4(0.4, 0.4, 0.4, 1)) imgui.push_style_color(_imgui.StyleColor.ScrollbarGrabHovered, carb.Float4(0.6, 0.6, 0.6, 1)) imgui.push_style_color(_imgui.StyleColor.ScrollbarGrabActive, carb.Float4(0.8, 0.8, 0.8, 1)) # DockSplitterSize is the variable that drive the size of the Dock Split connection imgui.push_style_var_float(_imgui.StyleVar.DockSplitterSize, 2) # setup the Layout for your app self._layouts_path = carb.tokens.get_tokens_interface().resolve("${omni.usd_explorer.setup}/layouts") layout_file = Path(self._layouts_path).joinpath(f"{self._settings.get('/app/layout/name')}.json") self.__setup_window_task = asyncio.ensure_future(_load_layout_startup(f"{layout_file}", True)) self.review_layout_path = str(Path(self._layouts_path) / "comment_layout.json") self.default_layout_path = str(Path(self._layouts_path) / "default.json") self.layout_user_path = str(Path(self._layouts_path) / "layout_user.json") # remove the user defined layout so that we always load the default layout when startup if os.path.exists(self.layout_user_path): os.remove(self.layout_user_path) # setup the menu and their layout self._current_layout_priority = 0 self._layout_menu_items = [] self._layout_file_menu() self._menu_layout = [] if self._settings.get_as_bool('/app/view/debug/menus'): self._layout_menu() # setup the Application Title window_title = get_main_window_title() if window_title: window_title.set_app_version(self._settings.get_as_string("/app/titleVersion")) # self._context_menu() self._register_my_menu() self._navigation = Navigation() self._navigation.on_startup(ext_id) self._application_mode_changed_sub = self._settings.subscribe_to_node_change_events( APPLICATION_MODE_PATH, weakref.proxy(self)._on_application_mode_changed ) self._set_viewport_menubar_visibility(False) self._test = asyncio.ensure_future(_clear_startup_scene_edits()) # OM-95865: Ensure teleport on by default. self._usd_context = omni.usd.get_context() self._stage_event_sub = self._usd_context.get_stage_event_stream().create_subscription_to_pop( self._on_stage_open_event, name="TeleportDefaultOn" ) if self._settings.get_as_bool(SETTINGS_STARTUP_EXPAND_VIEWPORT): self._set_viewport_fill_on() self._stage_templates = [SunnySkyStage()] disable_selection(get_active_viewport()) self._ui_state_manager = UIStateManager() self._setup_ui_state_changes() omni.kit.menu.utils.add_layout([ MenuLayout.Menu("Window", [ MenuLayout.Item("Viewport", source="Window/Viewport/Viewport 1"), MenuLayout.Item("Playlist", remove=True), MenuLayout.Item("Layout", remove=True), MenuLayout.Item("" if any(v in self._app.get_app_version() for v in ("alpha", "beta")) else "Extensions", remove=True), MenuLayout.Sort(exclude_items=["Extensions"], sort_submenus=True), ]) ]) def show_documentation(*x): import webbrowser webbrowser.open("http://docs.omniverse.nvidia.com/explorer") self._help_menu_items = [ omni.kit.menu.utils.MenuItemDescription(name="Documentation", onclick_fn=show_documentation, appear_after=[omni.kit.menu.utils.MenuItemOrder.FIRST]) ] omni.kit.menu.utils.add_menu_items(self._help_menu_items, name="Help") def _on_stage_open_event(self, event: carb.events.IEvent) -> None: if event.type == int(omni.usd.StageEventType.OPENED): app_mode = self._settings.get_as_string(APPLICATION_MODE_PATH).lower() # exit all tools self._settings.set(CURRENT_TOOL_PATH, "none") # OM-95865, OMFP-1993: Activate Teleport upon scene load ... # OMFP-2743: ... but only when in Review mode. if app_mode == "review": asyncio.ensure_future(self._stage_post_open_teleport_toggle()) # toggle RMB viewport context menu based on application mode value = False if app_mode == "review" else True self._settings.set(VIEWPORT_CONTEXT_MENU_PATH, value) # teleport is activated after loading a stage and app is in Review mode async def _stage_post_open_teleport_toggle(self) -> None: await self._app.next_update_async() if hasattr(self, "_usd_context") and self._usd_context is not None and not self._usd_context.is_new_stage(): self._settings.set("/exts/omni.kit.viewport.navigation.core/activeOperation", "teleport") def _set_viewport_fill_on(self) -> None: vp_window = get_active_viewport_window() vp_widget = vp_window.viewport_widget if vp_window else None if vp_widget: vp_widget.expand_viewport = True def _set_viewport_menubar_visibility(self, show: bool) -> None: mb_inst = get_mb_inst() if mb_inst and hasattr(mb_inst, "get_menubar"): main_menubar = mb_inst.get_menubar(DEFAULT_MENUBAR_NAME) if main_menubar.visible_model.as_bool != show: main_menubar.visible_model.set_value(show) ViewportMenuModel()._item_changed(None) # type: ignore def _on_application_mode_changed(self, item: carb.dictionary.Item, _typ: carb.settings.ChangeEventType) -> None: if self._settings.get_as_string(APPLICATION_MODE_PATH).lower() == "review": omni.usd.get_context().get_selection().clear_selected_prim_paths() disable_selection(get_active_viewport()) current_mode: str = cast(str, item.get_dict()) asyncio.ensure_future(self.defer_load_layout(current_mode)) async def defer_load_layout(self, current_mode: str) -> None: keep_windows = True # Focus Mode Toolbar self._settings.set_bool(SETTINGS_PATH_FOCUSED, True) # current_mode not in ("review", "layout")) # Turn off all tools and modal self._settings.set_string(CURRENT_TOOL_PATH, "none") self._settings.set_bool(MODAL_TOOL_ACTIVE_PATH, False) if current_mode == "review": # save the current layout for restoring later if switch back QuickLayout.save_file(self.layout_user_path) # we don't want to keep any windows except the ones which are visible in self.review_layout_path await _load_layout(self.review_layout_path, False) else: # current_mode == "layout": # check if there is any user modified layout, if yes use that one layout_filename = self.layout_user_path if os.path.exists(self.layout_user_path) else self.default_layout_path await _load_layout(layout_filename, keep_windows) self._set_viewport_menubar_visibility(current_mode == "layout") def _setup_ui_state_changes(self) -> None: windows_to_hide_on_modal = ["Measure", "Section", "Waypoints"] self._ui_state_manager.add_hide_on_modal(window_names=windows_to_hide_on_modal, restore=True) window_titles = ["Markups", "Waypoints"] for window in window_titles: setting_name = f'/exts/omni.usd_explorer.setup/{window}/visible' self._ui_state_manager.add_window_visibility_setting(window, setting_name) # toggle icon visibilites based on window visibility self._ui_state_manager.add_settings_copy_dependency( source_path="/exts/omni.usd_explorer.setup/Markups/visible", target_path="/exts/omni.kit.markup.core/show_icons", ) self._ui_state_manager.add_settings_copy_dependency( source_path="/exts/omni.usd_explorer.setup/Waypoints/visible", target_path="/exts/omni.kit.waypoint.core/show_icons", ) def _custom_quicklayout_menu(self) -> None: # we setup a simple ways to Load custom layout from the exts def add_layout_menu_entry(name, parameter, key): import inspect editor_menu = omni.kit.ui.get_editor_menu() layouts_path = carb.tokens.get_tokens_interface().resolve("${omni.usd_explorer.setup}/layouts") menu_path = f"Layout/{name}" menu = editor_menu.add_item(menu_path, None, False, self._current_layout_priority) # type: ignore self._current_layout_priority = self._current_layout_priority + 1 if inspect.isfunction(parameter): # pragma: no cover (Never used, see commented out section below regarding quick save/load) menu_action = omni.kit.menu.utils.add_action_to_menu( menu_path, lambda *_: asyncio.ensure_future(parameter()), name, (carb.input.KEYBOARD_MODIFIER_FLAG_CONTROL, key), ) else: menu_action = omni.kit.menu.utils.add_action_to_menu( menu_path, lambda *_: asyncio.ensure_future(_load_layout(f"{layouts_path}/{parameter}.json")), name, (carb.input.KEYBOARD_MODIFIER_FLAG_CONTROL, key), ) self._layout_menu_items.append((menu, menu_action)) add_layout_menu_entry("Reset Layout", "default", carb.input.KeyboardInput.KEY_1) add_layout_menu_entry("Viewport Only", "viewport_only", carb.input.KeyboardInput.KEY_2) add_layout_menu_entry("Markup Editor", "markup_editor", carb.input.KeyboardInput.KEY_3) # add_layout_menu_entry("Waypoint Viewer", "waypoint_viewer", carb.input.KeyboardInput.KEY_4) # # you can enable Quick Save and Quick Load here # if False: # # create Quick Load & Quick Save # from omni.kit.quicklayout import QuickLayout # async def quick_save(): # QuickLayout.quick_save(None, None) # async def quick_load(): # QuickLayout.quick_load(None, None) # add_layout_menu_entry("Quick Save", quick_save, carb.input.KeyboardInput.KEY_7) # add_layout_menu_entry("Quick Load", quick_load, carb.input.KeyboardInput.KEY_8) def _register_my_menu(self) -> None: context_menu: Optional[omni.kit.context_menu.ContextMenuExtension] = omni.kit.context_menu.get_instance() if not context_menu: # pragma: no cover return def _layout_file_menu(self) -> None: self._menu_file_layout = [ MenuLayout.Menu( "File", [ MenuLayout.Item("New"), MenuLayout.Item("New From Stage Template"), MenuLayout.Item("Open"), MenuLayout.Item("Open Recent"), MenuLayout.Seperator(), MenuLayout.Item("Re-open with New Edit Layer"), MenuLayout.Seperator(), MenuLayout.Item("Share"), MenuLayout.Seperator(), MenuLayout.Item("Save"), MenuLayout.Item("Save As..."), MenuLayout.Item("Save With Options"), MenuLayout.Item("Save Selected"), MenuLayout.Item("Save Flattened As...", remove=True), MenuLayout.Seperator(), MenuLayout.Item("Collect As..."), MenuLayout.Item("Export"), MenuLayout.Seperator(), MenuLayout.Item("Import"), MenuLayout.Item("Add Reference"), MenuLayout.Item("Add Payload"), MenuLayout.Seperator(), MenuLayout.Item("Exit"), ] ) ] omni.kit.menu.utils.add_layout(self._menu_file_layout) def _layout_menu(self) -> None: self._menu_layout = [ MenuLayout.Menu( "Window", [ MenuLayout.SubMenu( "Animation", [ MenuLayout.Item("Timeline"), MenuLayout.Item("Sequencer"), MenuLayout.Item("Curve Editor"), MenuLayout.Item("Retargeting"), MenuLayout.Item("Animation Graph"), MenuLayout.Item("Animation Graph Samples"), ], ), MenuLayout.SubMenu( "Layout", [ MenuLayout.Item("Quick Save", remove=True), MenuLayout.Item("Quick Load", remove=True), ], ), MenuLayout.SubMenu( "Browsers", [ MenuLayout.Item("Content", source="Window/Content"), MenuLayout.Item("Materials"), MenuLayout.Item("Skies"), ], ), MenuLayout.SubMenu( "Rendering", [ MenuLayout.Item("Render Settings"), MenuLayout.Item("Movie Capture"), MenuLayout.Item("MDL Material Graph"), MenuLayout.Item("Tablet XR"), ], ), MenuLayout.SubMenu( "Simulation", [ MenuLayout.Group( "Flow", [ MenuLayout.Item("Presets", source="Window/Flow/Presets"), MenuLayout.Item("Monitor", source="Window/Flow/Monitor"), ], ), MenuLayout.Group( "Blast", [ MenuLayout.Item("Settings", source="Window/Blast/Settings"), MenuLayout.SubMenu( "Documentation", [ MenuLayout.Item("Kit UI", source="Window/Blast/Documentation/Kit UI"), MenuLayout.Item( "Programming", source="Window/Blast/Documentation/Programming" ), MenuLayout.Item( "USD Schemas", source="Window/Blast/Documentation/USD Schemas" ), ], ), ], ), MenuLayout.Item("Debug"), # MenuLayout.Item("Performance"), MenuLayout.Group( "Physics", [ MenuLayout.Item("Demo Scenes"), MenuLayout.Item("Settings", source="Window/Physics/Settings"), MenuLayout.Item("Debug"), MenuLayout.Item("Test Runner"), MenuLayout.Item("Character Controller"), MenuLayout.Item("OmniPVD"), MenuLayout.Item("Physics Helpers"), ], ), ], ), MenuLayout.SubMenu( "Utilities", [ MenuLayout.Item("Console"), MenuLayout.Item("Profiler"), MenuLayout.Item("USD Paths"), MenuLayout.Item("Statistics"), MenuLayout.Item("Activity Monitor"), ], ), # Remove 'Viewport 2' entry MenuLayout.SubMenu( "Viewport", [ MenuLayout.Item("Viewport 2", remove=True), ], ), MenuLayout.Sort(exclude_items=["Extensions"]), MenuLayout.Item("New Viewport Window", remove=True), ], ), # that is you enable the Quick Layout Menu MenuLayout.Menu( "Layout", [ MenuLayout.Item("Default", source="Reset Layout"), MenuLayout.Item("Viewport Only"), MenuLayout.Item("Markup Editor"), MenuLayout.Item("Waypoint Viewer"), MenuLayout.Seperator(), MenuLayout.Item("UI Toggle Visibility", source="Window/UI Toggle Visibility"), MenuLayout.Item("Fullscreen Mode", source="Window/Fullscreen Mode"), MenuLayout.Seperator(), MenuLayout.Item("Save Layout", source="Window/Layout/Save Layout..."), MenuLayout.Item("Load Layout", source="Window/Layout/Load Layout..."), # MenuLayout.Seperator(), # MenuLayout.Item("Quick Save", source="Window/Layout/Quick Save"), # MenuLayout.Item("Quick Load", source="Window/Layout/Quick Load"), ], ), MenuLayout.Menu("Tools", [MenuLayout.SubMenu("Animation", remove=True)]), ] omni.kit.menu.utils.add_layout(self._menu_layout) # type: ignore # if you want to support the Quick Layout Menu self._custom_quicklayout_menu() def on_shutdown(self): if self._menu_layout: omni.kit.menu.utils.remove_layout(self._menu_layout) # type: ignore self._menu_layout.clear() self._layout_menu_items.clear() self._navigation.on_shutdown() del self._navigation self._settings.unsubscribe_to_change_events(self._application_mode_changed_sub) del self._application_mode_changed_sub self._stage_event_sub = None # From View setup self._menubar_helper.destroy() if self._menu_helper and hasattr(self._menu_helper, "destroy"): self._menu_helper.destroy() self._menu_helper = None self._stage_templates = []
23,462
Python
45.005882
167
0.557753
NVIDIA-Omniverse/kit-app-template/source/extensions/omni.usd_explorer.setup/omni/usd_explorer/setup/navigation.py
import asyncio import carb import carb.settings import carb.tokens import carb.dictionary import omni.kit.app import omni.ext import omni.ui as ui import omni.kit.actions.core from omni.kit.viewport.navigation.core import ( NAVIGATION_TOOL_OPERATION_ACTIVE, ViewportNavigationTooltip, get_navigation_bar, ) __all__ = ["Navigation"] CURRENT_TOOL_PATH = "/app/viewport/currentTool" SETTING_NAVIGATION_ROOT = "/exts/omni.kit.tool.navigation/" NAVIGATION_BAR_VISIBLE_PATH = "/exts/omni.kit.viewport.navigation.core/isVisible" APPLICATION_MODE_PATH = "/app/application_mode" WALK_VISIBLE_PATH = "/persistent/exts/omni.kit.viewport.navigation.walk/visible" CAPTURE_VISIBLE_PATH = "/persistent/exts/omni.kit.viewport.navigation.capture/visible" MARKUP_VISIBLE_PATH = "/persistent/exts/omni.kit.viewport.navigation.markup/visible" MEASURE_VISIBLE_PATH = "/persistent/exts/omni.kit.viewport.navigation.measure/visible" SECTION_VISIBLE_PATH = "/persistent/exts/omni.kit.viewport.navigation.section/visible" TELEPORT_SEPARATOR_VISIBLE_PATH = "/persistent/exts/omni.kit.viewport.navigation.teleport/spvisible" WAYPOINT_VISIBLE_PATH = "/persistent/exts/omni.kit.viewport.navigation.waypoint/visible" VIEWPORT_CONTEXT_MENU_PATH = "/exts/omni.kit.window.viewport/showContextMenu" MENUBAR_APP_MODES_PATH = "/exts/omni.kit.usd_presenter.main.menubar/include_modify_mode" WELCOME_WINDOW_VISIBLE_PATH = "/exts/omni.kit.usd_presenter.window.welcome/visible" ACTIVE_OPERATION_PATH = "/exts/omni.kit.viewport.navigation.core/activeOperation" class Navigation: NAVIGATION_BAR_NAME = None # ext_id is current extension id. It can be used with extension manager to query additional information, like where # this extension is located on filesystem. def on_startup(self, ext_id: str) -> None: sections = ext_id.split("-") self._ext_name = sections[0] self._settings = carb.settings.get_settings() self._navigation_bar = get_navigation_bar() self._tool_bar_button = None self._dict = carb.dictionary.get_dictionary() self._panel_visible = True self._navigation_bar.show() self._settings.set(CURRENT_TOOL_PATH, "navigation") self._settings.set(NAVIGATION_TOOL_OPERATION_ACTIVE, "teleport") self._viewport_welcome_window_visibility_changed_sub = self._settings.subscribe_to_node_change_events( WELCOME_WINDOW_VISIBLE_PATH, self._on_welcome_window_visibility_change ) # OMFP-1799 Set nav bar visibility defaults. These should remain fixed now. self._settings.set(WALK_VISIBLE_PATH, False) self._settings.set(MARKUP_VISIBLE_PATH, True) self._settings.set(WAYPOINT_VISIBLE_PATH, True) self._settings.set(TELEPORT_SEPARATOR_VISIBLE_PATH, True) self._settings.set(CAPTURE_VISIBLE_PATH, True) self._settings.set(MEASURE_VISIBLE_PATH, True) self._settings.set(SECTION_VISIBLE_PATH, True) self._application_mode_changed_sub = self._settings.subscribe_to_node_change_events( APPLICATION_MODE_PATH, self._on_application_mode_changed ) self._show_tooltips = False self._nav_bar_visibility_sub = self._settings.subscribe_to_node_change_events( NAVIGATION_BAR_VISIBLE_PATH, self._delay_reset_tooltip) _prev_navbar_vis = None _prev_tool = None _prev_operation = None def _on_welcome_window_visibility_change(self, item: carb.dictionary.Item, *_) -> None: if not isinstance(self._dict, (carb.dictionary.IDictionary, dict)): return welcome_window_vis = self._dict.get(item) # preserve the state of the navbar upon closing the Welcome window if the app is in Layout mode if self._settings.get_as_string(APPLICATION_MODE_PATH).lower() == "layout": # preserve the state of the navbar visibility if welcome_window_vis: self._prev_navbar_vis = self._settings.get_as_bool(NAVIGATION_BAR_VISIBLE_PATH) self._settings.set(NAVIGATION_BAR_VISIBLE_PATH, not(welcome_window_vis)) self._prev_tool = self._settings.get(CURRENT_TOOL_PATH) self._prev_operation = self._settings.get(ACTIVE_OPERATION_PATH) else: # restore the state of the navbar visibility if self._prev_navbar_vis is not None: self._settings.set(NAVIGATION_BAR_VISIBLE_PATH, self._prev_navbar_vis) self._prev_navbar_vis = None if self._prev_tool is not None: self._settings.set(CURRENT_TOOL_PATH, self._prev_tool) if self._prev_operation is not None: self._settings.set(ACTIVE_OPERATION_PATH, self._prev_operation) return else: if welcome_window_vis: self._settings.set(NAVIGATION_TOOL_OPERATION_ACTIVE, "none") else: self._settings.set(NAVIGATION_TOOL_OPERATION_ACTIVE, "teleport") self._settings.set(NAVIGATION_BAR_VISIBLE_PATH, not(welcome_window_vis)) def _on_application_mode_changed(self, item: carb.dictionary.Item, *_) -> None: if not isinstance(self._dict, (carb.dictionary.IDictionary, dict)): return current_mode = self._dict.get(item) self._test = asyncio.ensure_future(self._switch_by_mode(current_mode)) async def _switch_by_mode(self, current_mode: str) -> None: await omni.kit.app.get_app().next_update_async() state = True if current_mode == "review" else False self._settings.set(NAVIGATION_BAR_VISIBLE_PATH, state) self._settings.set(VIEWPORT_CONTEXT_MENU_PATH, not(state)) # toggle RMB viewport context menu self._delay_reset_tooltip(None) # OM-92161: Need to reset the tooltip when change the mode def _delay_reset_tooltip(self, *_) -> None: async def delay_set_tooltip() -> None: for _i in range(4): await omni.kit.app.get_app().next_update_async() # type: ignore ViewportNavigationTooltip.set_visible(self._show_tooltips) asyncio.ensure_future(delay_set_tooltip()) def _on_showtips_click(self, *_) -> None: self._show_tooltips = not self._show_tooltips ViewportNavigationTooltip.set_visible(self._show_tooltips) def on_shutdown(self) -> None: self._navigation_bar = None self._viewport_welcome_window_visibility_changed_sub = None self._settings.unsubscribe_to_change_events(self._application_mode_changed_sub) # type:ignore self._application_mode_changed_sub = None self._dict = None
6,679
Python
45.713286
119
0.676898
NVIDIA-Omniverse/kit-app-template/source/extensions/omni.usd_explorer.setup/omni/usd_explorer/setup/ui_state_manager.py
import carb.dictionary import carb.settings import omni.ui as ui from functools import partial from typing import Any, Dict, List, Tuple, Union MODAL_TOOL_ACTIVE_PATH = "/app/tools/modal_tool_active" class UIStateManager: def __init__(self) -> None: self._settings = carb.settings.acquire_settings_interface() self._modal_changed_sub = self._settings.subscribe_to_node_change_events( MODAL_TOOL_ACTIVE_PATH, self._on_modal_setting_changed ) self._hide_on_modal: List[Tuple[str,bool]] = [] self._modal_restore_window_states: Dict[str,bool] = {} self._settings_dependencies: Dict[Tuple(str,str), Dict[Any, Any]] = {} self._settings_changed_subs = {} self._window_settings = {} self._window_vis_changed_id = ui.Workspace.set_window_visibility_changed_callback(self._on_window_vis_changed) def destroy(self) -> None: if self._settings: if self._modal_changed_sub: self._settings.unsubscribe_to_change_events(self._modal_changed_sub) self._settings = None self._hide_on_modal = [] self._modal_restore_window_states = {} self._settings_dependencies = {} self._window_settings = {} if self._window_vis_changed_id: ui.Workspace.remove_window_visibility_changed_callback(self._window_vis_changed_id) self._window_vis_changed_id = None def __del__(self) -> None: self.destroy() def add_hide_on_modal(self, window_names: Union[str, List[str]], restore: bool) -> None: if isinstance(window_names, str): window_names = [window_names] for window_name in window_names: if window_name not in self._hide_on_modal: self._hide_on_modal.append((window_name, restore)) def remove_hide_on_modal(self, window_names: Union[str, List[str]]) -> None: if isinstance(window_names, str): window_names = [window_names] self._hide_on_modal = [item for item in self._hide_on_modal if item[0] not in window_names] def add_window_visibility_setting(self, window_name: str, setting_path: str) -> None: window = ui.Workspace.get_window(window_name) if window is not None: self._settings.set(setting_path, window.visible) else: # handle the case when the window is created later self._settings.set(setting_path, False) if window_name not in self._window_settings.keys(): self._window_settings[window_name] = [] self._window_settings[window_name].append(setting_path) def remove_window_visibility_setting(self, window_name: str, setting_path: str) -> None: if window_name in self._window_settings.keys(): setting_list = self._window_settings[window_name] if setting_path in setting_list: setting_list.remove(setting_path) if len(setting_list) == 0: del self._window_settings[window_name] def remove_all_window_visibility_settings(self, window_name: str) -> None: if window_name in self._window_settings.keys(): del self._window_settings[window_name] def add_settings_dependency(self, source_path: str, target_path: str, value_map: Dict[Any, Any]) -> None: key = (source_path, target_path) if key in self._settings_dependencies.keys(): carb.log_error(f'Settings dependency {source_path} -> {target_path} already exists. Ignoring.') return self._settings_dependencies[key] = value_map self._settings_changed_subs[key] = self._settings.subscribe_to_node_change_events( source_path, partial(self._on_settings_dependency_changed, source_path) ) def add_settings_copy_dependency(self, source_path: str, target_path: str) -> None: self.add_settings_dependency(source_path, target_path, None) def remove_settings_dependency(self, source_path: str, target_path: str) -> None: key = (source_path, target_path) if key in self._settings_dependencies.keys(): del self._settings_dependencies[key] if key in self._settings_changed_subs.keys(): sub = self._settings_changed_subs.pop(key) self._settings.unsubscribe_to_change_events(sub) def _on_settings_dependency_changed(self, path: str, item, event_type) -> None: value = self._settings.get(path) # setting does not exist if value is None: return target_settings = [source_target[1] for source_target in self._settings_dependencies.keys() if source_target[0] == path] for target_setting in target_settings: value_map = self._settings_dependencies[(path, target_setting)] # None means copy everything if value_map is None: self._settings.set(target_setting, value) elif value in value_map.keys(): self._settings.set(target_setting, value_map[value]) def _on_modal_setting_changed(self, item, event_type) -> None: modal = self._settings.get_as_bool(MODAL_TOOL_ACTIVE_PATH) if modal: self._hide_windows() else: self._restore_windows() def _hide_windows(self) -> None: for window_info in self._hide_on_modal: window_name, restore_later = window_info[0], window_info[1] window = ui.Workspace.get_window(window_name) if window is not None: if restore_later: self._modal_restore_window_states[window_name] = window.visible window.visible = False def _restore_windows(self) -> None: for window_info in self._hide_on_modal: window_name, restore_later = window_info[0], window_info[1] if restore_later: if window_name in self._modal_restore_window_states.keys(): old_visibility = self._modal_restore_window_states[window_name] if old_visibility is not None: window = ui.Workspace.get_window(window_name) if window is not None: window.visible = old_visibility self._modal_restore_window_states[window_name] = None def _on_window_vis_changed(self, title: str, state: bool) -> None: if title in self._window_settings.keys(): for setting in self._window_settings[title]: self._settings.set_bool(setting, state)
6,634
Python
44.136054
128
0.611999
NVIDIA-Omniverse/kit-app-template/source/extensions/omni.usd_explorer.setup/omni/usd_explorer/setup/stage_template.py
import carb import omni.ext import omni.kit.commands from omni.kit.stage_templates import register_template, unregister_template from pxr import Gf, Sdf, Usd, UsdGeom, UsdLux class SunnySkyStage: def __init__(self): register_template("SunnySky", self.new_stage) def __del__(self): unregister_template("SunnySky") def new_stage(self, rootname, usd_context_name): # Create basic DistantLight usd_context = omni.usd.get_context(usd_context_name) stage = usd_context.get_stage() # get up axis up_axis = UsdGeom.GetStageUpAxis(stage) with Usd.EditContext(stage, stage.GetRootLayer()): # create Environment omni.kit.commands.execute( "CreatePrim", prim_path="/Environment", prim_type="Xform", select_new_prim=False, create_default_xform=True, context_name=usd_context_name ) texture_path = carb.tokens.get_tokens_interface().resolve("${omni.usd_explorer.setup}/data/light_rigs/HDR/partly_cloudy.hdr") # create Sky omni.kit.commands.execute( "CreatePrim", prim_path="/Environment/Sky", prim_type="DomeLight", select_new_prim=False, attributes={ UsdLux.Tokens.inputsIntensity: 1000, UsdLux.Tokens.inputsTextureFile: texture_path, UsdLux.Tokens.inputsTextureFormat: UsdLux.Tokens.latlong, UsdLux.Tokens.inputsSpecular: 1, UsdGeom.Tokens.visibility: "inherited", } if hasattr(UsdLux.Tokens, 'inputsIntensity') else \ { UsdLux.Tokens.intensity: 1000, UsdLux.Tokens.textureFile: texture_path, UsdLux.Tokens.textureFormat: UsdLux.Tokens.latlong, UsdGeom.Tokens.visibility: "inherited", }, create_default_xform=True, context_name=usd_context_name ) prim = stage.GetPrimAtPath("/Environment/Sky") prim.CreateAttribute("xformOp:scale", Sdf.ValueTypeNames.Double3, False).Set(Gf.Vec3d(1, 1, 1)) prim.CreateAttribute("xformOp:translate", Sdf.ValueTypeNames.Double3, False).Set(Gf.Vec3d(0, 0, 0)) if up_axis == "Y": prim.CreateAttribute("xformOp:rotateXYZ", Sdf.ValueTypeNames.Double3, False).Set(Gf.Vec3d(270, 0, 0)) else: prim.CreateAttribute("xformOp:rotateXYZ", Sdf.ValueTypeNames.Double3, False).Set(Gf.Vec3d(0, 0, 90)) prim.CreateAttribute("xformOpOrder", Sdf.ValueTypeNames.String, False).Set(["xformOp:translate", "xformOp:rotateXYZ", "xformOp:scale"]) # create DistantLight omni.kit.commands.execute( "CreatePrim", prim_path="/Environment/DistantLight", prim_type="DistantLight", select_new_prim=False, attributes={ UsdLux.Tokens.inputsAngle: 4.3, UsdLux.Tokens.inputsIntensity: 3000, UsdGeom.Tokens.visibility: "inherited", } if hasattr(UsdLux.Tokens, 'inputsIntensity') else \ { UsdLux.Tokens.angle: 4.3, UsdLux.Tokens.intensity: 3000, UsdGeom.Tokens.visibility: "inherited", }, create_default_xform=True, context_name=usd_context_name ) prim = stage.GetPrimAtPath("/Environment/DistantLight") prim.CreateAttribute("xformOp:scale", Sdf.ValueTypeNames.Double3, False).Set(Gf.Vec3d(1, 1, 1)) prim.CreateAttribute("xformOp:translate", Sdf.ValueTypeNames.Double3, False).Set(Gf.Vec3d(0, 0, 0)) if up_axis == "Y": prim.CreateAttribute("xformOp:rotateXYZ", Sdf.ValueTypeNames.Double3, False).Set(Gf.Vec3d(310.6366313590111, -125.93251524567805, 0.8821359067542289)) else: prim.CreateAttribute("xformOp:rotateXYZ", Sdf.ValueTypeNames.Double3, False).Set(Gf.Vec3d(41.35092544555664, 0.517652153968811, -35.92928695678711)) prim.CreateAttribute("xformOpOrder", Sdf.ValueTypeNames.String, False).Set(["xformOp:translate", "xformOp:rotateXYZ", "xformOp:scale"])
4,590
Python
48.902173
166
0.56732
NVIDIA-Omniverse/kit-app-template/source/extensions/omni.usd_explorer.setup/omni/usd_explorer/setup/menu_helper.py
import asyncio import carb.settings import omni.kit.app import omni.kit.commands import omni.kit.menu.utils import omni.renderer_capture from omni.kit.menu.utils import MenuLayout SETTINGS_APPLICATION_MODE_PATH = "/app/application_mode" class MenuHelper: def __init__(self) -> None: self._settings = carb.settings.get_settings() self._current_layout = None self._pending_layout = None self._changing_layout_task: asyncio.Task = None self._menu_layout_empty = [] self._menu_layout_modify = [] omni.kit.menu.utils.add_hook(self._menu_hook) self._app_mode_sub = self._settings.subscribe_to_node_change_events( SETTINGS_APPLICATION_MODE_PATH, self._on_application_mode_changed ) self._menu_hook() def destroy(self) -> None: omni.kit.menu.utils.remove_hook(self._menu_hook) if self._changing_layout_task and not self._changing_layout_task.done(): self._changing_layout_task.cancel() self._changing_layout_task = None if self._app_mode_sub: self._settings.unsubscribe_to_change_events(self._app_mode_sub) self._app_mode_sub = None self._app_ready_sub = None if self._current_layout: omni.kit.menu.utils.remove_layout(self._current_layout) self._current_layout = None def _menu_hook(self, *args, **kwargs) -> None: if self._settings.get_as_bool("/app/view/debug/menus"): return LAYOUT_EMPTY_ALLOWED_MENUS = set() LAYOUT_MODIFY_ALLOWED_MENUS = {"File", "Edit", "Window", "Tools", "Help"} # make NEW list object instead of clear original # the original list may be held by self._current_layout and omni.kit.menu.utils self._menu_layout_empty = [] self._menu_layout_modify = [] menu_instance = omni.kit.menu.utils.get_instance() if not menu_instance: # pragma: no cover return # Build new layouts using allowlists for key in menu_instance._menu_defs: if key.lower().endswith("widget"): continue if key not in LAYOUT_EMPTY_ALLOWED_MENUS: self._menu_layout_empty.append(MenuLayout.Menu(key, remove=True)) if key not in LAYOUT_MODIFY_ALLOWED_MENUS: self._menu_layout_modify.append(MenuLayout.Menu(key, remove=True)) # Remove 'Viewport 2' entry if key == "Window": for menu_item_1 in menu_instance._menu_defs[key]: for menu_item_2 in menu_item_1: if menu_item_2.name == "Viewport": menu_item_2.sub_menu = [mi for mi in menu_item_2.sub_menu if mi.name != "Viewport 2"] if self._changing_layout_task is None or self._changing_layout_task.done(): self._changing_layout_task = asyncio.ensure_future(self._delayed_change_layout()) def _on_application_mode_changed(self, *args) -> None: if self._changing_layout_task is None or self._changing_layout_task.done(): self._changing_layout_task = asyncio.ensure_future(self._delayed_change_layout()) async def _delayed_change_layout(self): mode = self._settings.get_as_string(SETTINGS_APPLICATION_MODE_PATH) if mode in ["present", "review"]: pending_layout = self._menu_layout_empty else: pending_layout = self._menu_layout_modify # Don't change layout inside of menu callback _on_application_mode_changed # omni.ui throws error if self._current_layout: # OMFP-2737: Do no rebuild menu (change menu layout) if layout is same # Here only check number of layout menu items and name of every of layout menu item same_layout = len(self._current_layout) == len(pending_layout) if same_layout: for index, item in enumerate(self._current_layout): if item.name != pending_layout[index].name: same_layout = False if same_layout: return omni.kit.menu.utils.remove_layout(self._current_layout) self._current_layout = None omni.kit.menu.utils.add_layout(pending_layout) # type: ignore self._current_layout = pending_layout.copy() self._changing_layout_task = None
4,434
Python
37.565217
113
0.608029
NVIDIA-Omniverse/kit-app-template/source/extensions/omni.usd_explorer.setup/omni/usd_explorer/setup/tests/test_release_config.py
import carb.settings import carb.tokens import omni.kit.app import omni.kit.test class TestConfig(omni.kit.test.AsyncTestCase): async def test_l1_public_release_configuration(self): settings = carb.settings.get_settings() app_version = settings.get("/app/version") # This test covers a moment in time when we switch version to RC. # Following test cases must be satisfied. is_rc = "-rc." in app_version # title_format_string = settings.get("exts/omni.kit.window.modifier.titlebar/titleFormatString") # if is_rc: # Make sure the title format string doesn't use app version if app version contains rc # title_using_app_version = "/app/version" in title_format_string # self.assertFalse(is_rc and title_using_app_version, "check failed: title format string contains app version which contains 'rc'") # Make sure the title format string has "Beta" in it # title_has_beta = "Beta" in title_format_string # self.assertTrue(title_has_beta, "check failed: title format string does not have 'Beta ' in it") # if is_rc: # Make sure the title format string doesn't use app version if app version contains rc # title_using_app_version = "/app/version" in title_format_string # self.assertFalse(is_rc and title_using_app_version, "check failed: title format string contains app version which contains 'rc'") # Make sure the title format string has "Beta" in it # title_has_beta = "Beta" in title_format_string # self.assertTrue(title_has_beta, "check failed: title format string does not have 'Beta ' in it") # Make sure we set build to external when going into RC release mode # external = settings.get("/privacy/externalBuild") or False # self.assertEqual( # external, # is_rc, # "check failed: is this an RC build? %s Is /privacy/externalBuild set to true? %s" % (is_rc, external), # ) # if is_rc: # # Make sure we remove some extensions from public release # EXTENSIONS = [ # # "omni.kit.profiler.tracy", # "omni.kit.window.jira", # "omni.kit.testing.services", # "omni.kit.tests.usd_stress", # "omni.kit.tests.basic_validation", # # "omni.kit.extension.reports", # ] # manager = omni.kit.app.get_app().get_extension_manager() # ext_names = {e["name"] for e in manager.get_extensions()} # for ext in EXTENSIONS: # self.assertEqual( # ext in ext_names, # False, # f"looks like {ext} was not removed from public build", # ) async def test_l1_usd_explorer_and_usd_explorer_full_have_same_version(self): manager = omni.kit.app.get_app().get_extension_manager() EXTENSIONS = [ "omni.usd_explorer", "omni.usd_explorer.full", ] # need to find both extensions and they need the same version id usd_explorer_exts = [e for e in manager.get_extensions() if e.get("name", "") in EXTENSIONS] self.assertEqual(len(usd_explorer_exts), 2) self.assertEqual( usd_explorer_exts[0]["version"], usd_explorer_exts[1]["version"], "omni.usd_explorer.kit and omni.usd_explorer.full.kit have different versions", )
3,572
Python
43.662499
143
0.594905
NVIDIA-Omniverse/kit-app-template/source/extensions/omni.usd_explorer.setup/omni/usd_explorer/setup/tests/test_state_manager.py
## Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. ## ## NVIDIA CORPORATION and its licensors retain all intellectual property ## and proprietary rights in and to this software, related documentation ## and any modifications thereto. Any use, reproduction, disclosure or ## distribution of this software and related documentation without an express ## license agreement from NVIDIA CORPORATION is strictly prohibited. ## import carb.settings import omni.kit.app import omni.ui as ui from omni.kit.test import AsyncTestCase from ..ui_state_manager import UIStateManager, MODAL_TOOL_ACTIVE_PATH class TestUIStateManager(AsyncTestCase): async def setUp(self): self._sm = UIStateManager() self._settings = carb.settings.get_settings() async def tearDown(self): self._sm = None async def test_destroy(self): self._sm.add_hide_on_modal('dummy', False) self._sm.add_settings_copy_dependency('a', 'b') self._sm.add_settings_dependency('c', 'd', {1: 2}) self._sm.add_window_visibility_setting('my_window', 'my_setting') self._sm.destroy() async def test_hide_on_modal(self): self._settings.set_bool(MODAL_TOOL_ACTIVE_PATH, False) self._sm.add_hide_on_modal('NO_RESTORE', False) self._sm.add_hide_on_modal(['A_RESTORE', 'B_RESTORE'], True) window_no_restore = ui.Window('NO_RESTORE') window_restore_1 = ui.Window('A_RESTORE') window_restore_2 = ui.Window('B_RESTORE') window_no_restore.visible = True window_restore_1.visible = True window_restore_2.visible = False await self._wait() self._settings.set_bool(MODAL_TOOL_ACTIVE_PATH, True) await self._wait() self.assertFalse(window_no_restore.visible) self.assertFalse(window_restore_1.visible) self.assertFalse(window_restore_2.visible) self._settings.set_bool(MODAL_TOOL_ACTIVE_PATH, False) await self._wait() self.assertFalse(window_no_restore.visible) self.assertTrue(window_restore_1.visible) self.assertFalse(window_restore_2.visible) self._sm.remove_hide_on_modal(window_restore_1.title) self._settings.set_bool(MODAL_TOOL_ACTIVE_PATH, True) await self._wait() self.assertTrue(window_restore_1.visible) self._settings.set_bool(MODAL_TOOL_ACTIVE_PATH, False) async def test_window_visibility_setting(self): window_name = 'Dummy' setting_path = '/apps/dummy' setting_path2 = '/apps/dummy2' window = ui.Window(window_name) window.visible = True await self._wait() self._sm.add_window_visibility_setting(window_name=window_name, setting_path=setting_path) self._sm.add_window_visibility_setting(window_name=window_name, setting_path=setting_path2) self.assertIsNotNone(self._settings.get(setting_path)) self.assertTrue(self._settings.get(setting_path)) self.assertTrue(self._settings.get(setting_path2)) window.visible = False self.assertFalse(self._settings.get(setting_path)) self.assertFalse(self._settings.get(setting_path2)) window.visible = True self.assertTrue(self._settings.get(setting_path)) self.assertTrue(self._settings.get(setting_path2)) self._sm.remove_window_visibility_setting(window_name=window_name, setting_path=setting_path) window.visible = False self.assertTrue(self._settings.get(setting_path)) self.assertFalse(self._settings.get(setting_path2)) self._sm.remove_all_window_visibility_settings(window_name=window_name) window.visible = True self.assertFalse(self._settings.get(setting_path2)) async def test_setting_dependency(self): setting_path_copy_from = '/app/copy_from' setting_path_copy_to = '/ext/copy_to' setting_path_map_from = '/ext/map_from' setting_path_map_to = '/something/map_to' self._sm.add_settings_copy_dependency(setting_path_copy_from, setting_path_copy_to) self._settings.set_string(setting_path_copy_from, 'hello_world') self.assertEqual(self._settings.get(setting_path_copy_from), self._settings.get(setting_path_copy_to)) # doesn't work the other way around self._settings.set_string(setting_path_copy_to, 'no_copy_back') self.assertEqual(self._settings.get(setting_path_copy_from), 'hello_world') self._sm.add_settings_dependency(setting_path_map_from, setting_path_map_to, {1: 2, 3: 4}) self._settings.set_int(setting_path_map_from, 1) self.assertEqual(self._settings.get(setting_path_map_to), 2) self._settings.set_int(setting_path_map_from, 3) self.assertEqual(self._settings.get(setting_path_map_to), 4) # not in the map self._settings.set_int(setting_path_map_from, 42) self.assertEqual(self._settings.get(setting_path_map_to), 4) self.assertEqual(self._settings.get(setting_path_copy_from), 'hello_world') self.assertEqual(self._settings.get(setting_path_copy_to), 'no_copy_back') self._sm.remove_settings_dependency(setting_path_copy_from, setting_path_copy_to) self._settings.set_string(setting_path_copy_from, 'this_is_not_copied') self.assertEqual(self._settings.get(setting_path_copy_to), 'no_copy_back') async def _wait(self, frames: int = 5): for _ in range(frames): await omni.kit.app.get_app().next_update_async()
5,552
Python
42.046511
110
0.67219
NVIDIA-Omniverse/kit-app-template/source/extensions/omni.usd_explorer.setup/omni/usd_explorer/setup/tests/__init__.py
# run startup tests first from .test_app_startup import * # run all other tests after from .test_extensions import * from .test_release_config import * from .test import * from .test_state_manager import *
206
Python
24.874997
34
0.757282
NVIDIA-Omniverse/kit-app-template/source/extensions/omni.usd_explorer.setup/omni/usd_explorer/setup/tests/test.py
import omni.kit.app from omni.ui.tests.test_base import OmniUiTest from omni.kit import ui_test ext_id = 'omni.usd_explorer.setup' class TestSetupToolExtension(OmniUiTest): async def test_extension(self): manager = omni.kit.app.get_app().get_extension_manager() self.assertTrue(ext_id) self.assertTrue(manager.is_extension_enabled(ext_id)) app = omni.kit.app.get_app() for _ in range(500): await app.next_update_async() manager.set_extension_enabled(ext_id, False) await ui_test.human_delay() self.assertTrue(not manager.is_extension_enabled(ext_id)) manager.set_extension_enabled(ext_id, True) await ui_test.human_delay() self.assertTrue(manager.is_extension_enabled(ext_id)) async def test_menubar_helper_camera_dependency(self): manager = omni.kit.app.get_app().get_extension_manager() manager.set_extension_enabled(ext_id, False) await ui_test.human_delay() self.assertFalse(manager.is_extension_enabled(ext_id)) manager.set_extension_enabled('omni.kit.viewport.menubar.camera', True) await ui_test.human_delay() manager.set_extension_enabled(ext_id, True) await ui_test.human_delay() self.assertTrue(manager.is_extension_enabled(ext_id)) manager.set_extension_enabled(ext_id, False) await ui_test.human_delay() self.assertFalse(manager.is_extension_enabled(ext_id)) manager.set_extension_enabled(ext_id, True) await ui_test.human_delay() self.assertTrue(manager.is_extension_enabled(ext_id)) async def test_menu_helper(self): from ..menu_helper import MenuHelper menu_helper = MenuHelper() menu_helper.destroy() async def test_menubar_helper_menu(self): from ..menubar_helper import MenubarHelper menubar_helper = MenubarHelper() menubar_helper._create_camera_speed(None, None) menubar_helper.destroy() async def test_menu_helper_debug_setting(self): SETTINGS_VIEW_DEBUG_MENUS = '/app/view/debug/menus' import carb.settings settings = carb.settings.get_settings() manager = omni.kit.app.get_app().get_extension_manager() manager.set_extension_enabled(ext_id, False) await ui_test.human_delay() self.assertFalse(manager.is_extension_enabled(ext_id)) orig_value = settings.get(SETTINGS_VIEW_DEBUG_MENUS) settings.set_bool(SETTINGS_VIEW_DEBUG_MENUS, True) manager.set_extension_enabled(ext_id, True) await ui_test.human_delay() self.assertTrue(manager.is_extension_enabled(ext_id)) manager.set_extension_enabled(ext_id, False) await ui_test.human_delay() self.assertFalse(manager.is_extension_enabled(ext_id)) settings.set_bool(SETTINGS_VIEW_DEBUG_MENUS, orig_value) manager.set_extension_enabled(ext_id, True) await ui_test.human_delay() self.assertTrue(manager.is_extension_enabled(ext_id)) async def test_menu_helper_application_mode_change(self): from ..menu_helper import SETTINGS_APPLICATION_MODE_PATH import carb.settings settings = carb.settings.get_settings() settings.set_string(SETTINGS_APPLICATION_MODE_PATH, 'modify') await ui_test.human_delay() settings.set_string(SETTINGS_APPLICATION_MODE_PATH, 'welcome') await ui_test.human_delay() settings.set_string(SETTINGS_APPLICATION_MODE_PATH, 'modify') await ui_test.human_delay() settings.set_string(SETTINGS_APPLICATION_MODE_PATH, 'comment') await ui_test.human_delay() settings.set_string(SETTINGS_APPLICATION_MODE_PATH, 'modify') await ui_test.human_delay() async def test_menu_helper_widget_menu(self): import omni.kit.menu.utils omni.kit.menu.utils.add_menu_items([], name='test widget') from ..menu_helper import MenuHelper menu_helper = MenuHelper() menu_helper.destroy() async def test_startup_expand_viewport(self): from ..setup import SETTINGS_STARTUP_EXPAND_VIEWPORT import carb.settings settings = carb.settings.get_settings() orig_value = settings.get(SETTINGS_STARTUP_EXPAND_VIEWPORT) settings.set_bool(SETTINGS_STARTUP_EXPAND_VIEWPORT, True) manager = omni.kit.app.get_app().get_extension_manager() manager.set_extension_enabled(ext_id, False) await ui_test.human_delay() self.assertFalse(manager.is_extension_enabled(ext_id)) manager.set_extension_enabled(ext_id, True) await ui_test.human_delay() self.assertTrue(manager.is_extension_enabled(ext_id)) settings.set_bool(SETTINGS_STARTUP_EXPAND_VIEWPORT, orig_value) manager.set_extension_enabled(ext_id, False) await ui_test.human_delay() self.assertFalse(manager.is_extension_enabled(ext_id)) manager.set_extension_enabled(ext_id, True) await ui_test.human_delay() self.assertTrue(manager.is_extension_enabled(ext_id)) async def test_navigation_invalid_dict(self): from ..navigation import Navigation navigation = Navigation() navigation._show_tooltips = False navigation._dict = 42 navigation._on_application_mode_changed(None, None) navigation._on_showtips_click() async def test_navigation_current_tool_mode_change(self): from ..navigation import CURRENT_TOOL_PATH, APPLICATION_MODE_PATH import carb.settings settings = carb.settings.get_settings() settings.set_string(APPLICATION_MODE_PATH, 'modify') await ui_test.human_delay() settings.set_string(CURRENT_TOOL_PATH, 'markup') await ui_test.human_delay() settings.set_string(CURRENT_TOOL_PATH, 'navigation') await ui_test.human_delay() settings.set_string(CURRENT_TOOL_PATH, 'markup') await ui_test.human_delay() settings.set_string(CURRENT_TOOL_PATH, 'welcome') await ui_test.human_delay() settings.set_string(CURRENT_TOOL_PATH, 'navigation') await ui_test.human_delay() settings.set_string(CURRENT_TOOL_PATH, 'markup') await ui_test.human_delay() settings.set_string(CURRENT_TOOL_PATH, 'navigation') await ui_test.human_delay() async def test_setup_clear_startup_scene_edits(self): from ..setup import _clear_startup_scene_edits await _clear_startup_scene_edits() import omni.usd self.assertFalse(omni.usd.get_context().has_pending_edit()) async def test_stage_template(self): import omni.kit.stage_templates omni.kit.stage_templates.new_stage(template='SunnySky')
6,826
Python
34.190721
79
0.665397
NVIDIA-Omniverse/kit-app-template/source/extensions/omni.usd_explorer.setup/omni/usd_explorer/setup/tests/test_app_startup.py
## Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. ## ## NVIDIA CORPORATION and its licensors retain all intellectual property ## and proprietary rights in and to this software, related documentation ## and any modifications thereto. Any use, reproduction, disclosure or ## distribution of this software and related documentation without an express ## license agreement from NVIDIA CORPORATION is strictly prohibited. ## import omni.kit.app from omni.kit.test import AsyncTestCase class TestAppStartup(AsyncTestCase): async def test_l1_app_startup_time(self): """Get startup time - send to nvdf""" for _ in range(60): await omni.kit.app.get_app().next_update_async() try: from omni.kit.core.tests import app_startup_time app_startup_time(self.id()) except: # noqa pass self.assertTrue(True) async def test_l1_app_startup_warning_count(self): """Get the count of warnings during startup - send to nvdf""" for _ in range(60): await omni.kit.app.get_app().next_update_async() try: from omni.kit.core.tests import app_startup_warning_count app_startup_warning_count(self.id()) except: # noqa pass self.assertTrue(True)
1,323
Python
32.948717
77
0.657596
NVIDIA-Omniverse/kit-app-template/source/extensions/omni.usd_explorer.setup/omni/usd_explorer/setup/tests/test_extensions.py
## Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. ## ## NVIDIA CORPORATION and its licensors retain all intellectual property ## and proprietary rights in and to this software, related documentation ## and any modifications thereto. Any use, reproduction, disclosure or ## distribution of this software and related documentation without an express ## license agreement from NVIDIA CORPORATION is strictly prohibited. ## import sys import carb.settings import omni.kit.app import omni.kit.actions.core from omni.kit.core.tests import validate_extensions_load, validate_extensions_tests from omni.kit.test import AsyncTestCase from pxr import Usd, UsdGeom, Gf class TestUSDExplorerExtensions(AsyncTestCase): async def test_l1_extensions_have_tests(self): """Loop all enabled extensions to see if they have at least one (1) unittest""" await omni.kit.app.get_app().next_update_async() await omni.kit.app.get_app().next_update_async() # This list should be empty or near empty ideally EXCLUSION_LIST = [ # extensions from Kit "omni.mdl", "omni.ansel.init", # extensions from USD Explorer ] # These extensions only run tests on win32 for now if sys.platform != "win32": EXCLUSION_LIST.append("omni.hydra.scene_api") EXCLUSION_LIST.append("omni.rtx.tests") self.assertEqual(validate_extensions_tests(EXCLUSION_LIST), 0) async def test_l1_extensions_load(self): """Loop all enabled extensions to see if they loaded correctly""" self.assertEqual(validate_extensions_load(), 0) async def test_regression_omfp_2304(self): """Regression test for OMFP-2304""" loaded_omni_kit_collaboration_selection_outline = False manager = omni.kit.app.get_app().get_extension_manager() for ext in manager.get_extensions(): if ext["name"] == "omni.kit.collaboration.selection_outline": loaded_omni_kit_collaboration_selection_outline = True break self.assertTrue(loaded_omni_kit_collaboration_selection_outline) async def _wait(self, frames: int = 10): for _ in range(frames): await omni.kit.app.get_app().next_update_async() async def wait_stage_loading(self): while True: _, files_loaded, total_files = omni.usd.get_context().get_stage_loading_status() if files_loaded or total_files: await self._wait() continue break await self._wait(100) async def _get_1_1_1_rotation(self) -> Gf.Vec3d: """Loads a stage and returns the transformation of the (1,1,1) vector by the directional light's rotation""" await self._wait() omni.kit.actions.core.execute_action("omni.kit.window.file", "new") await self.wait_stage_loading() context = omni.usd.get_context() self.assertIsNotNone(context) stage = context.get_stage() self.assertIsNotNone(stage) prim_path = '/Environment/DistantLight' prim = stage.GetPrimAtPath(prim_path) self.assertTrue(prim.IsValid()) # Extract the prim's transformation matrix in world space xformAPI = UsdGeom.XformCache() transform_matrix_world = xformAPI.GetLocalToWorldTransform(prim) unit_point = Gf.Vec3d(1, 1, 1) transformed_point = transform_matrix_world.Transform(unit_point) return transformed_point async def test_regression_omfp_OMFP_3314(self): """Regression test for OMFP-3314""" settings = carb.settings.get_settings() UP_AXIS_PATH = "/persistent/app/stage/upAxis" settings.set("/persistent/app/newStage/defaultTemplate", "SunnySky") settings.set_string(UP_AXIS_PATH, "Z") point_z_up = await self._get_1_1_1_rotation() settings.set_string(UP_AXIS_PATH, "Y") point_y_up = await self._get_1_1_1_rotation() # with the default camera position: # in y-up: z points bottom left, x points bottom right, y points up # in z-up: x points bottom left, y points bottom right, z points up places = 4 self.assertAlmostEqual(point_y_up[2], point_z_up[0], places=places) self.assertAlmostEqual(point_y_up[0], point_z_up[1], places=places) self.assertAlmostEqual(point_y_up[1], point_z_up[2], places=places)
4,461
Python
40.314814
116
0.656355
NVIDIA-Omniverse/kit-app-template/source/extensions/omni.hello.world/omni/hello/world/extension.py
# Copyright 2019-2023 NVIDIA CORPORATION # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import omni.ext import omni.ui as ui # Functions and vars are available to other extension as usual in python: `example.python_ext.some_public_function(x)` def some_public_function(x: int): print(f"[omni.hello.world] some_public_function was called with {x}") return x ** x # Any class derived from `omni.ext.IExt` in top level module (defined in `python.modules` of `extension.toml`) will be # instantiated when extension gets enabled and `on_startup(ext_id)` will be called. Later when extension gets disabled # on_shutdown() is called. class MyExtension(omni.ext.IExt): # ext_id is current extension id. It can be used with extension manager to query additional information, like where # this extension is located on filesystem. def on_startup(self, ext_id): print("[omni.hello.world] MyExtension startup") self._count = 0 self._window = ui.Window("My Window", width=300, height=300) with self._window.frame: with ui.VStack(): label = ui.Label("") def on_click(): self._count += 1 label.text = f"count: {self._count}" def on_reset(): self._count = 0 label.text = "empty" on_reset() with ui.HStack(): ui.Button("Add", clicked_fn=on_click) ui.Button("Reset", clicked_fn=on_reset) def on_shutdown(self): print("[omni.hello.world] MyExtension shutdown")
2,141
Python
36.578947
119
0.64269
NVIDIA-Omniverse/kit-app-template/source/extensions/omni.hello.world/omni/hello/world/tests/test_hello_world.py
# Copyright 2019-2023 NVIDIA CORPORATION # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # NOTE: # omni.kit.test - std python's unittest module with additional wrapping to add suport for async/await tests # For most things refer to unittest docs: https://docs.python.org/3/library/unittest.html import omni.kit.test # Extnsion for writing UI tests (simulate UI interaction) import omni.kit.ui_test as ui_test # Import extension python module we are testing with absolute import path, as if we are external user (other extension) import omni.hello.world # Having a test class dervived from omni.kit.test.AsyncTestCase declared on the root of module will make it auto-discoverable by omni.kit.test class Test(omni.kit.test.AsyncTestCase): # Before running each test async def setUp(self): pass # After running each test async def tearDown(self): pass # Actual test, notice it is "async" function, so "await" can be used if needed async def test_hello_public_function(self): result = omni.hello.world.some_public_function(4) self.assertEqual(result, 256) async def test_window_button(self): # Find a label in our window label = ui_test.find("My Window//Frame/**/Label[*]") # Find buttons in our window add_button = ui_test.find("My Window//Frame/**/Button[*].text=='Add'") reset_button = ui_test.find("My Window//Frame/**/Button[*].text=='Reset'") # Click reset button await reset_button.click() self.assertEqual(label.widget.text, "empty") await add_button.click() self.assertEqual(label.widget.text, "count: 1") await add_button.click() self.assertEqual(label.widget.text, "count: 2")
2,253
Python
35.950819
142
0.70395
NVIDIA-Omniverse/IsaacGymEnvs/setup.py
"""Installation script for the 'isaacgymenvs' python package.""" from __future__ import absolute_import from __future__ import print_function from __future__ import division from setuptools import setup, find_packages import os root_dir = os.path.dirname(os.path.realpath(__file__)) # Minimum dependencies required prior to installation INSTALL_REQUIRES = [ # RL "gym==0.23.1", "torch", "omegaconf", "termcolor", "jinja2", "hydra-core>=1.2", "rl-games>=1.6.0", "pyvirtualdisplay", "urdfpy==0.0.22", "pysdf==0.1.9", "warp-lang==0.10.1", "trimesh==3.23.5", ] # Installation operation setup( name="isaacgymenvs", author="NVIDIA", version="1.5.1", description="Benchmark environments for high-speed robot learning in NVIDIA IsaacGym.", keywords=["robotics", "rl"], include_package_data=True, python_requires=">=3.6", install_requires=INSTALL_REQUIRES, packages=find_packages("."), classifiers=["Natural Language :: English", "Programming Language :: Python :: 3.6, 3.7, 3.8"], zip_safe=False, ) # EOF
1,107
Python
21.612244
99
0.644986
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/__init__.py
import hydra from hydra import compose, initialize from hydra.core.hydra_config import HydraConfig from omegaconf import DictConfig, OmegaConf from isaacgymenvs.utils.reformat import omegaconf_to_dict OmegaConf.register_new_resolver('eq', lambda x, y: x.lower()==y.lower()) OmegaConf.register_new_resolver('contains', lambda x, y: x.lower() in y.lower()) OmegaConf.register_new_resolver('if', lambda pred, a, b: a if pred else b) OmegaConf.register_new_resolver('resolve_default', lambda default, arg: default if arg=='' else arg) def make( seed: int, task: str, num_envs: int, sim_device: str, rl_device: str, graphics_device_id: int = -1, headless: bool = False, multi_gpu: bool = False, virtual_screen_capture: bool = False, force_render: bool = True, cfg: DictConfig = None ): from isaacgymenvs.utils.rlgames_utils import get_rlgames_env_creator # create hydra config if no config passed in if cfg is None: # reset current hydra config if already parsed (but not passed in here) if HydraConfig.initialized(): task = HydraConfig.get().runtime.choices['task'] hydra.core.global_hydra.GlobalHydra.instance().clear() with initialize(config_path="./cfg"): cfg = compose(config_name="config", overrides=[f"task={task}"]) cfg_dict = omegaconf_to_dict(cfg.task) cfg_dict['env']['numEnvs'] = num_envs # reuse existing config else: cfg_dict = omegaconf_to_dict(cfg.task) create_rlgpu_env = get_rlgames_env_creator( seed=seed, task_config=cfg_dict, task_name=cfg_dict["name"], sim_device=sim_device, rl_device=rl_device, graphics_device_id=graphics_device_id, headless=headless, multi_gpu=multi_gpu, virtual_screen_capture=virtual_screen_capture, force_render=force_render, ) return create_rlgpu_env()
1,953
Python
33.892857
100
0.656938