file_path
stringlengths
22
162
content
stringlengths
19
501k
size
int64
19
501k
lang
stringclasses
1 value
avg_line_length
float64
6.33
100
max_line_length
int64
18
935
alphanum_fraction
float64
0.34
0.93
yizhouzhao/VRKitchen2.0-IndoorKit/exts/vrkitchen.indoor.kit/vrkitchen/indoor/kit/layout/fluid/schemaHelpers.py
from pxr import Usd, UsdGeom, Sdf, Gf, Vt, PhysxSchema class PhysxParticleInstancePrototype: def __init__(self, mass=0.0, phase=0): self.mass = mass self.phase = phase def addPhysxParticleSystem( stage, particle_system_path, contact_offset, rest_offset, particle_contact_offset, solid_rest_offset, fluid_rest_offset, solver_position_iterations, solver_velocity_iterations, wind, scenePath, ): particle_system = PhysxSchema.PhysxParticleSystem.Define(stage, particle_system_path) if particle_system: particle_system.CreateContactOffsetAttr().Set(contact_offset) particle_system.CreateRestOffsetAttr().Set(rest_offset) particle_system.CreateParticleContactOffsetAttr().Set(particle_contact_offset) particle_system.CreateSolidRestOffsetAttr().Set(solid_rest_offset) particle_system.CreateFluidRestOffsetAttr().Set(fluid_rest_offset) particle_system.CreateSolverPositionIterationCountAttr().Set(solver_position_iterations) particle_system.CreateSolverVelocityIterationCountAttr().Set(solver_velocity_iterations) particle_system.CreateWindAttr().Set(wind) # Reference simulation owner using PhysxPhysicsAPI physics_api = PhysxSchema.PhysxPhysicsAPI.Apply(particle_system.GetPrim()) physics_api.CreateSimulationOwnerRel().SetTargets([scenePath]) return particle_system else: return None def addPhysxParticlesSimple(stage, path, prototypes, prototype_indices, positions, velocities, particle_system_path): prototype_base_path = path.pathString + "/particlePrototype" # Create point instancer shape_list = UsdGeom.PointInstancer.Define(stage, path) mesh_list = shape_list.GetPrototypesRel() # Create particle instance prototypes for i in range(len(prototypes)): prototype_path = prototype_base_path + str(i) geom_sphere = UsdGeom.Sphere.Define(stage, Sdf.Path(prototype_path)) particle_instance_api = PhysxSchema.PhysxParticleAPI.Apply(geom_sphere.GetPrim()) particle_instance_api.CreateSelfCollisionAttr().Set(prototypes[i].selfCollision) particle_instance_api.CreateFluidAttr().Set(prototypes[i].fluid) particle_instance_api.CreateParticleGroupAttr().Set(prototypes[i].collisionGroup) particle_instance_api.CreateMassAttr().Set(prototypes[i].mass) # Reference simulation owner using PhysxPhysicsAPI physics_api = PhysxSchema.PhysxPhysicsAPI.Apply(geom_sphere.GetPrim()) physics_api.CreateSimulationOwnerRel().SetTargets([particle_system_path]) # add mesh references to point instancer mesh_list.AddTarget(Sdf.Path(prototype_path)) # Set particle instance data mesh_indices = [] for i in range(len(positions)): mesh_indices.append(prototype_indices[i]) orientations = [Gf.Quath(1.0, Gf.Vec3h(0.0, 0.0, 0.0))] * len(positions) angular_velocities = [Gf.Vec3f(0.0, 0.0, 0.0)] * len(positions) shape_list.GetProtoIndicesAttr().Set(mesh_indices) shape_list.GetPositionsAttr().Set(positions) shape_list.GetOrientationsAttr().Set(orientations) shape_list.GetVelocitiesAttr().Set(velocities) shape_list.GetAngularVelocitiesAttr().Set(angular_velocities) def addPhysxClothWithConstraints( stage, path, positions, normals, rest_positions, velocities, inv_masses, triangle_indices, spring_connections, spring_stiffnesses, spring_dampings, spring_rest_lengths, self_collision, self_collision_filter, inv_gravity, particle_group, particle_system_path, ): mesh = UsdGeom.Mesh.Define(stage, path) prim = mesh.GetPrim() mesh.CreateDoubleSidedAttr().Set(True) vertex_count_attr = mesh.CreateFaceVertexCountsAttr() vertex_indices_attr = mesh.CreateFaceVertexIndicesAttr() norm_attr = mesh.CreateNormalsAttr() point_attr = mesh.CreatePointsAttr() # Triangle array's vertex count per face is always 3 vertex_count = 3 array_size = int(len(triangle_indices) / 3) index_array = Vt.IntArray(array_size, vertex_count) vertex_count_attr.Set(index_array) vertex_indices_attr.Set(triangle_indices) norm_attr.Set(normals) point_attr.Set(positions) cloth_api = PhysxSchema.PhysxClothAPI.Apply(prim) cloth_api.CreateSelfCollisionAttr().Set(self_collision) cloth_api.CreateSelfCollisionFilterAttr().Set(self_collision_filter) cloth_api.CreateParticleGroupAttr().Set(particle_group) # Reference simulation owner using PhysxPhysicsAPI physics_api = PhysxSchema.PhysxPhysicsAPI.Apply(prim) physics_api.CreateSimulationOwnerRel().SetTargets([particle_system_path]) # Custom attributes prim.CreateAttribute("invGravity", Sdf.ValueTypeNames.Bool).Set(inv_gravity) prim.CreateAttribute("springConnections", Sdf.ValueTypeNames.Int2Array).Set(spring_connections) prim.CreateAttribute("springStiffnesses", Sdf.ValueTypeNames.FloatArray).Set(spring_stiffnesses) prim.CreateAttribute("springDampings", Sdf.ValueTypeNames.FloatArray).Set(spring_dampings) prim.CreateAttribute("springRestLengths", Sdf.ValueTypeNames.FloatArray).Set(spring_rest_lengths) prim.CreateAttribute("restPositions", Sdf.ValueTypeNames.Point3fArray).Set(rest_positions) prim.CreateAttribute("velocities", Sdf.ValueTypeNames.Point3fArray).Set(velocities) prim.CreateAttribute("inverseMasses", Sdf.ValueTypeNames.FloatArray).Set(inv_masses) def addPhysxCloth( stage, path, dynamic_mesh_path, initial_velocity, initial_mass, stretch_stiffness, bend_stiffness, shear_stiffness, self_collision, self_collision_filter, inv_gravity, particle_group, particle_system_path, ): mesh = UsdGeom.Mesh.Define(stage, path) prim = mesh.GetPrim() if dynamic_mesh_path: prim.GetReferences().AddReference(dynamic_mesh_path) cloth_api = PhysxSchema.PhysxClothAPI.Apply(prim) cloth_api.CreateDefaultParticleVelocityAttr().Set(initial_velocity) cloth_api.CreateDefaultParticleMassAttr().Set(initial_mass) cloth_api.CreateStretchStiffnessAttr().Set(stretch_stiffness) cloth_api.CreateBendStiffnessAttr().Set(bend_stiffness) cloth_api.CreateShearStiffnessAttr().Set(shear_stiffness) cloth_api.CreateSelfCollisionAttr().Set(self_collision) cloth_api.CreateSelfCollisionFilterAttr().Set(self_collision_filter) cloth_api.CreateParticleGroupAttr().Set(particle_group) # Reference simulation owner using PhysxPhysicsAPI physics_api = PhysxSchema.PhysxPhysicsAPI.Apply(prim) physics_api.CreateSimulationOwnerRel().SetTargets([particle_system_path]) # Custom attributes prim.CreateAttribute("invGravity", Sdf.ValueTypeNames.Bool).Set(inv_gravity) def applyInflatableApi(stage, path, pressure): prim = stage.GetPrimAtPath(path) # TODO: Add more checks here if prim.IsValid(): inflatable_api = PhysxSchema.PhysxInflatableAPI.Apply(prim) inflatable_api.CreatePressureAttr().Set(pressure) def _get_rigid_attachments(stage, prim: Usd.Prim): attachments = [] rigidAttachmentRel = prim.CreateRelationship("physxRigidAttachments") for attachment_path in rigidAttachmentRel.GetTargets(): attachment = PhysxSchema.PhysxRigidAttachment.Get(stage, attachment_path) if attachment: attachments.append(attachment) return attachments # def _get_rigid_attachment_target(attachment: PhysxSchema.PhysxRigidAttachment): # targets = attachment.GetRigidRel().GetTargets() # if len(targets) <= 0: # return Sdf.Path() # else: # return targets[0] # def _create_rigid_attachment( # stage, attachment_path: Sdf.Path, rigidbody_path: Sdf.Path, deformable_path: Sdf.Path # ) -> PhysxSchema.PhysxRigidAttachment: # attachment = PhysxSchema.PhysxRigidAttachment.Define(stage, attachment_path) # attachment.GetRigidRel().SetTargets([rigidbody_path]) # attachment.GetDeformableRel().SetTargets([deformable_path]) # return attachment # def add_deformable_to_rigid_body_attachment( # stage, target_attachment_path: Sdf.Path, deformable_path: Sdf.Path, rigid_path: Sdf.Path # ): # deformable_prim = stage.GetPrimAtPath(deformable_path) # softbody_xformable = UsdGeom.Xformable(deformable_prim) # rigidbody_prim = stage.GetPrimAtPath(rigid_path) # rigidbody_xformable = UsdGeom.Xformable(rigidbody_prim) # attachments = _get_rigid_attachments(stage, deformable_prim) # if any(_get_rigid_attachment_target(attachment) == rigid_path for attachment in attachments): # return False # # Create new attachment # attachment = _create_rigid_attachment(stage, target_attachment_path, rigid_path, deformable_path) # attachment_prim = attachment.GetPrim() # attachment_prim.CreateAttribute("physxEnableHaloParticleFiltering", Sdf.ValueTypeNames.Bool).Set(True) # attachment_prim.CreateAttribute("physxEnableVolumeParticleAttachments", Sdf.ValueTypeNames.Bool).Set(True) # attachment_prim.CreateAttribute("physxEnableSurfaceTetraAttachments", Sdf.ValueTypeNames.Bool).Set(False) # sb_bound = softbody_xformable.ComputeLocalBound( # Usd.TimeCode.Default(), purpose1=softbody_xformable.GetPurposeAttr().Get() # ) # sb_size = sb_bound.ComputeAlignedBox().GetSize() # avg_dim = (sb_size[0] + sb_size[1] + sb_size[2]) / 3.0 # default_rad = avg_dim * 0.05 # attachment_prim.CreateAttribute("physxHaloParticleFilteringRadius", Sdf.ValueTypeNames.Float).Set(default_rad * 4) # attachment_prim.CreateAttribute("physxVolumeParticleAttachmentRadius", Sdf.ValueTypeNames.Float).Set(default_rad) # attachment_prim.CreateAttribute("physxSurfaceSamplingRadius", Sdf.ValueTypeNames.Float).Set(default_rad) # # Update soft body relationship # attachments.append(attachment) # attachment_paths = [attachment.GetPath() for attachment in attachments] # deformable_prim.CreateRelationship("physxRigidAttachments").SetTargets(attachment_paths) # # Store the global xforms # globalPose = rigidbody_xformable.ComputeLocalToWorldTransform(Usd.TimeCode.Default()) # attachment_prim.CreateAttribute("physxRigidBodyXform", Sdf.ValueTypeNames.Matrix4d).Set(globalPose) # globalPose = softbody_xformable.ComputeLocalToWorldTransform(Usd.TimeCode.Default()) # attachment_prim.CreateAttribute("physxDeformableXform", Sdf.ValueTypeNames.Matrix4d).Set(globalPose)
10,557
Python
38.103704
120
0.735531
yizhouzhao/VRKitchen2.0-IndoorKit/exts/vrkitchen.indoor.kit/vrkitchen/indoor/kit/layout/fluid/faucet.py
import carb import math from pathlib import Path from pxr import Usd, UsdLux, UsdGeom, Sdf, Gf, Vt, UsdPhysics, PhysxSchema import sys #put schemaHelpers.py into path from omni.kitchen.asset.layout.fluid.schemaHelpers import PhysxParticleInstancePrototype, addPhysxParticleSystem import omni.timeline from typing import List from omni.kitchen.asset.task_check.newJointCheck import JointCheck import math ASYNC_SIMULATION = "/persistent/physics/asyncSimRender" def setGridFilteringPass(gridFilteringFlags: int, passIndex: int, operation: int, numRepetitions: int = 1): numRepetitions = max(0, numRepetitions - 1) shift = passIndex * 4 gridFilteringFlags &= ~(3 << shift) gridFilteringFlags |= (((operation) << 2) | numRepetitions) << shift return gridFilteringFlags def norm(a): square_sum = 0 for item in a: square_sum += item * item return math.sqrt(square_sum) # https://math.stackexchange.com/questions/2346982/slerp-inverse-given-3-quaternions-find-t def quarternion_slerp_inverse(q0, q1, q): q1_inv = q1.GetInverse() q0_inv = q0.GetInverse() q_inv = q.GetInverse() tmp_1 = (q0_inv * q).GetNormalized() real = tmp_1.GetReal() img = [ tmp_1.GetImaginary()[0], tmp_1.GetImaginary()[1], tmp_1.GetImaginary()[2] ] # print("1: ", real) # print("term 1 cos: ", math.acos(real)) term21 = [ math.acos(real) / norm(img) * item for item in img] log_tmp1 = [0, term21[0], term21[1], term21[2]] tmp_2 = (q0_inv * q1).GetNormalized() real = tmp_2.GetReal() img = [ tmp_2.GetImaginary()[0], tmp_2.GetImaginary()[1], tmp_2.GetImaginary()[2] ] # print("2: ", real) # print("term 2 cos: ", math.acos(real)) term22 = [ math.acos(real) / norm(img) * item for item in img ] log_tmp2 = [0, term22[0], term22[1], term22[2]] rates = [] if abs(term21[0]) < 0.0001 and abs(term22[0]) < 0.0001: rates.append(None) else: t1 = (term21[0] / term22[0]) rates.append(t1) if abs(term21[1]) < 0.0001 and abs(term22[1]) < 0.0001: rates.append(None) else: t2 = (term21[1] / term22[1]) rates.append(t2) if abs(term21[2]) < 0.0001 and abs(term22[2]) < 0.0001: rates.append(None) else: t3 = (term21[2] / term22[2]) rates.append(t3) # print("rates pre: ", rates) rates = list(filter(lambda x: x is not None, rates)) # print("rates post: ", rates) # length = len(rates) # for i in range(length): # for j in range(i+1, length): # if not abs(rates[i] - rates[j]) <= 0.001: # raise Exception("not the same") # print("rates: ", rates) return max(rates) # https://math.stackexchange.com/questions/167827/compute-angle-between-quaternions-in-matlab def rotation_diff(q0, q1): z = q0.GetNormalized() * q1.GetNormalized().GetConjugate() z_real = abs(z.GetReal()) if z_real > 1: z_real = 1 elif z_real < -1: z_real = -1 angle = math.acos(abs(z_real)) * 2 return math.degrees(angle) class Faucet(): def __init__(self, particle_params = None, iso_surface_params = None, liquid_material_path = "/World/Looks/OmniSurface_ClearWater", inflow_path:str = "/World/faucet/inflow", link_paths:List[str] = ["/World/faucet/link_0"] ): """! Faucet class @param particle_params : parameters for particles @param iso_surface_params: parameters for iso_surface @param liquid_material_path: parameters for liquid materials @param inflow_path: used to compute the location of water drops @param link_paths: used to compute the rotation of faucet handle and determine the speed and size of water drops @param particle_params: parameters related to particle systems @return an instance of Faucet class """ # particle Instance path self.particleInstanceStr_tmp = "/particlesInstance" self.particle_params = particle_params self.iso_surface_params = iso_surface_params self.liquid_material_path = liquid_material_path #Not sure if the isregistry thing works isregistry = carb.settings.acquire_settings_interface() self._async_simulation = carb.settings.get_settings().get_as_bool(ASYNC_SIMULATION) isregistry.set_bool(ASYNC_SIMULATION, True) isregistry.set_int("persistent/simulation/minFrameRate", 30) self.stage = omni.usd.get_context().get_stage() self.inflow_path = inflow_path self.link_paths = link_paths self.list_of_point_instancers = [] self.active_indexes_for_point_instancers = [] self.rate_checkers = [] for link in link_paths: path = Path(link) self.rate_checkers.append(JointCheck( str(path.parent), str(path.name) )) self.create() def point_sphere(self, samples, scale): """! create locations for each particles @param samples: the number of particles per sphere @param scale: the scale(radius) of the water drop """ indices = [x + 0.5 for x in range(0, samples)] phi = [math.acos(1 - 2 * x / samples) for x in indices] theta = [math.pi * (1 + 5**0.5) * x for x in indices] x = [math.cos(th) * math.sin(ph) * scale for (th, ph) in zip(theta, phi)] y = [math.sin(th) * math.sin(ph) * scale for (th, ph) in zip(theta, phi)] z = [math.cos(ph) * scale for ph in phi] points = [Gf.Vec3f(x, y, z) for (x, y, z) in zip(x, y, z)] return points def create_ball(self, pos, rate = 1): """! create a water drop @param pos: the center of the water drop @param rate: the number of particles for each water drop """ # create sphere on points points = self.point_sphere( 10+int(90 * rate), 1) # basePos = Gf.Vec3f(11.0, 12.0, 35.0) + pos basePos = pos positions = [Gf.Vec3f(x) + Gf.Vec3f(basePos) for x in points] radius = 0.2 # particleSpacing = 2.0 * radius * 0.6 particleSpacing = 2.0 * radius * 0.6 positions_list = positions velocities_list = [Gf.Vec3f(0.0, 0.0, 0.0)] * len(positions) protoIndices_list = [0] * len(positions) protoIndices = Vt.IntArray(protoIndices_list) positions = Vt.Vec3fArray(positions_list) velocities = Vt.Vec3fArray(velocities_list) # particleInstanceStr = "/particlesInstance" + str(self.it) particleInstanceStr = omni.usd.get_stage_next_free_path(self.stage, self.particleInstanceStr_tmp, False) particleInstancePath = Sdf.Path(particleInstanceStr) # Create point instancer pointInstancer = UsdGeom.PointInstancer.Define(self.stage, particleInstancePath) prototypeRel = pointInstancer.GetPrototypesRel() # Create particle instance prototypes particlePrototype = PhysxParticleInstancePrototype() particlePrototype.selfCollision = True particlePrototype.fluid = True particlePrototype.collisionGroup = 0 particlePrototype.mass = 0.5e-5 prototypePath = particleInstancePath.pathString + "/particlePrototype" sphere = UsdGeom.Sphere.Define(self.stage, Sdf.Path(prototypePath)) spherePrim = sphere.GetPrim() sphere.GetRadiusAttr().Set(particleSpacing) spherePrim = sphere.GetPrim() spherePrim.GetAttribute('visibility').Set('invisible') spherePrim.CreateAttribute("enableAnisotropy", Sdf.ValueTypeNames.Bool, True).Set(True) particleInstanceApi = PhysxSchema.PhysxParticleAPI.Apply(spherePrim) particleInstanceApi.CreateSelfCollisionAttr().Set(particlePrototype.selfCollision) particleInstanceApi.CreateFluidAttr().Set(particlePrototype.fluid) particleInstanceApi.CreateParticleGroupAttr().Set(particlePrototype.collisionGroup) particleInstanceApi.CreateMassAttr().Set(particlePrototype.mass) # Reference simulation owner using PhysxPhysicsAPI physicsApi = PhysxSchema.PhysxPhysicsAPI.Apply(spherePrim) physicsApi.CreateSimulationOwnerRel().SetTargets([self.particleSystemPath]) # add prototype references to point instancer prototypeRel.AddTarget(Sdf.Path(prototypePath)) # Set active particle indices activeIndices = [] for i in range(len(positions)): activeIndices.append(protoIndices[i]) orientations = [Gf.Quath(1.0, Gf.Vec3h(0.0, 0.0, 0.0))] * len(positions) angular_velocities = [Gf.Vec3f(0.0, 0.0, 0.0)] * len(positions) pointInstancer.GetProtoIndicesAttr().Set(activeIndices) pointInstancer.GetPositionsAttr().Set(positions) pointInstancer.GetOrientationsAttr().Set(orientations) pointInstancer.GetVelocitiesAttr().Set(velocities) pointInstancer.GetAngularVelocitiesAttr().Set(angular_velocities) self.list_of_point_instancers.append(pointInstancer) self.active_indexes_for_point_instancers.append(activeIndices) def create(self): """! initialize the related parameters for faucet create physics scenes create particle systems create isosurface """ self._setup_callbacks() self.it = 0 self.counter = 10 # Physics scene scenePath = Sdf.Path("/physicsScene") # Particle System self.particleSystemPath = omni.usd.get_stage_next_free_path(self.stage, "/particleSystem", False) # particleSystemPath = Sdf.Path("/particleSystem0") self.particleSystemPath = self.particleSystemPath _fluidSphereDiameter = 0.24 _solverPositionIterations = 10 _solverVelocityIterations = 1 _particleSystemSchemaParameters = { "contact_offset": 0.3, "particle_contact_offset": 0.25, "rest_offset": 0.25, "solid_rest_offset": 0, "fluid_rest_offset": 0.5 * _fluidSphereDiameter + 0.03, "solver_position_iterations": _solverPositionIterations, "solver_velocity_iterations": _solverVelocityIterations, "wind": Gf.Vec3f(0, 0, 0), } addPhysxParticleSystem( self.stage, self.particleSystemPath, **_particleSystemSchemaParameters, scenePath = scenePath ) particleSystem = self.stage.GetPrimAtPath(self.particleSystemPath) # particle system settings if self.particle_params is not None: for key,value in self.particle_params.items(): if isinstance(value, list): particleSystem.CreateAttribute(key, value[0], value[1]).Set(value[2]) else: particleSystem.GetAttribute(key).Set(value) # apply isoSurface params if self.iso_surface_params is not None: particleSystem.CreateAttribute("enableIsosurface", Sdf.ValueTypeNames.Bool, True).Set(True) for key,value in self.iso_surface_params.items(): if isinstance(value, list): particleSystem.CreateAttribute(key, value[0], value[1]).Set(value[2]) else: particleSystem.GetAttribute(key).Set(value) self.stage.SetInterpolationType(Usd.InterpolationTypeHeld) def _setup_callbacks(self): """! callbacks registered with timeline and physics steps to drop water """ # callbacks self._timeline = omni.timeline.get_timeline_interface() stream = self._timeline.get_timeline_event_stream() self._timeline_subscription = stream.create_subscription_to_pop(self._on_timeline_event) # subscribe to Physics updates: self._physics_update_subscription = omni.physx.get_physx_interface().subscribe_physics_step_events( self.on_physics_step ) # events = omni.physx.get_physx_interface().get_simulation_event_stream() # self._simulation_event_sub = events.create_subscription_to_pop(self._on_simulation_event) def _on_timeline_event(self, e): if e.type == int(omni.timeline.TimelineEventType.STOP): self.it = 0 self._physics_update_subscription = None self._timeline_subscription = None def on_physics_step(self, dt): xformCache = UsdGeom.XformCache() # compute location to dispense water pose = xformCache.GetLocalToWorldTransform(self.stage.GetPrimAtPath(self.inflow_path)) pos_faucet = Gf.Vec3f(pose.ExtractTranslation()) ##TODO hangle multiple faucet handles rate = self.rate_checkers[0].compute_distance()/100.0 if rate > 1: rate = 1 if self.it == 0: iso2Prim = self.stage.GetPrimAtPath(self.particleSystemPath+"/Isosurface") rel = iso2Prim.CreateRelationship("material:binding", False) # rel.SetTargets([Sdf.Path(self.liquid_material_path)]) # rel.SetTargets([Sdf.Path("/World/Looks/OmniSurface_OrangeJuice")]) #TODO we can have the water keep running, but we should delete some particles that are too old and not in containers. #this implementation will stop after 300 balls if self.it > 300: return if rate < 0.1: return # emit a ball based on rate if (self.counter < 20 - rate): self.counter = self.counter + 1 return self.counter = 0 self.it = self.it + 1 self.create_ball( pos_faucet, rate) def __del__(self): self._physics_update_subscription = None self._timeline_subscription = None #TODO not sure if this works isregistry = carb.settings.acquire_settings_interface() isregistry.set_bool(ASYNC_SIMULATION, self._async_simulation) # if __name__ == '__main__': from omni.physx import acquire_physx_interface physx = acquire_physx_interface() physx.overwrite_gpu_setting(1) physx.reset_simulation() particle_params = { "cohesion": 0.02, "smoothing": 0.8, "anisotropyScale": 1.0, "anisotropyMin": 0.2, "anisotropyMax": 2.0, "viscosity": 0.0091, "surfaceTension": 0.0074, "particleFriction": 0.1, "maxParticleNeighborhood": [ Sdf.ValueTypeNames.Int, True, 64], "maxParticles": 20000 } filterSmooth = 1 filtering = 0 passIndex = 0 filtering = setGridFilteringPass(filtering, passIndex, filterSmooth) passIndex = passIndex + 1 filtering = setGridFilteringPass(filtering, passIndex, filterSmooth) passIndex = passIndex + 1 iso_surface_params = { "maxIsosurfaceVertices": [Sdf.ValueTypeNames.Int, True, 1024 * 1024], "maxIsosurfaceTriangles": [Sdf.ValueTypeNames.Int, True, 2 * 1024 * 1024], "maxNumIsosurfaceSubgrids": [Sdf.ValueTypeNames.Int, True, 1024 * 4], "isosurfaceGridSpacing": [Sdf.ValueTypeNames.Float, True, 0.2], "isosurfaceKernelRadius": [Sdf.ValueTypeNames.Float, True, 0.5 ], "isosurfaceLevel": [ Sdf.ValueTypeNames.Float, True, -0.3 ], "isosurfaceGridFilteringFlags": [Sdf.ValueTypeNames.Int, True, filtering ], "isosurfaceGridSmoothingRadiusRelativeToCellSize": [Sdf.ValueTypeNames.Float, True, 0.3 ], "isosurfaceEnableAnisotropy": [Sdf.ValueTypeNames.Bool, True, False ], "isosurfaceAnisotropyMin": [ Sdf.ValueTypeNames.Float, True, 0.1 ], "isosurfaceAnisotropyMax": [ Sdf.ValueTypeNames.Float, True, 2.0 ], "isosurfaceAnisotropyRadius": [ Sdf.ValueTypeNames.Float, True, 0.5 ], "numIsosurfaceMeshSmoothingPasses": [ Sdf.ValueTypeNames.Int, True, 5 ], "numIsosurfaceMeshNormalSmoothingPasses": [ Sdf.ValueTypeNames.Int, True, 5 ], "isosurfaceDoNotCastShadows": [Sdf.ValueTypeNames.Bool, True, True ] } # fluid_fill = Faucet(particle_params=particle_params, iso_surface_params=iso_surface_params, # liquid_material_path = "/World/Looks/OmniSurface_ClearWater", # inflow_path = "/World/faucet/inflow", # link_paths = ["/World/faucet/link_1/joint_0"]) # fluid_fill = Faucet(particle_params=particle_params, iso_surface_params=iso_surface_params, # liquid_material_path = "/World/Looks/OmniSurface_ClearWater", # inflow_path = "/World/mobility/inflow", # link_paths = ["/World/mobility/link_1/joint_0"])
16,733
Python
38.006993
125
0.633419
yizhouzhao/VRKitchen2.0-IndoorKit/exts/vrkitchen.indoor.kit/vrkitchen/indoor/kit/robot_setup/numpy_utils.py
import numpy as np def orientation_error(desired, current): cc = quat_conjugate(current) q_r = quat_mul(desired, cc) return q_r[:, 0:3] * np.sign(q_r[:, 3])[:, None] def quat_mul(a, b): assert a.shape == b.shape shape = a.shape a = a.reshape(-1, 4) b = b.reshape(-1, 4) x1, y1, z1, w1 = a[:, 0], a[:, 1], a[:, 2], a[:, 3] x2, y2, z2, w2 = b[:, 0], b[:, 1], b[:, 2], b[:, 3] ww = (z1 + x1) * (x2 + y2) yy = (w1 - y1) * (w2 + z2) zz = (w1 + y1) * (w2 - z2) xx = ww + yy + zz qq = 0.5 * (xx + (z1 - x1) * (x2 - y2)) w = qq - ww + (z1 - y1) * (y2 - z2) x = qq - xx + (x1 + w1) * (x2 + w2) y = qq - yy + (w1 - x1) * (y2 + z2) z = qq - zz + (z1 + y1) * (w2 - x2) quat = np.stack([x, y, z, w], axis=-1).reshape(shape) return quat def normalize(x, eps: float = 1e-9): return x / np.clip(np.linalg.norm(x, axis=-1), a_min=eps, a_max=None)[:, None] def quat_unit(a): return normalize(a) def quat_from_angle_axis(angle, axis): theta = (angle / 2)[:, None] xyz = normalize(axis) * np.sin(theta) w = np.cos(theta) return quat_unit(np.concatenate([xyz, w], axis=-1)) def quat_rotate(q, v): shape = q.shape q_w = q[:, -1] q_vec = q[:, :3] a = v * (2.0 * q_w ** 2 - 1.0)[:, None] b = np.cross(q_vec, v) * q_w[:, None] * 2.0 c = q_vec * np.sum(q_vec * v, axis=1).reshape(shape[0], -1) * 2.0 return a + b + c def quat_conjugate(a): shape = a.shape a = a.reshape(-1, 4) return np.concatenate((-a[:, :3], a[:, -1:]), axis=-1).reshape(shape) def quat_axis(q, axis=0): basis_vec = np.zeros((q.shape[0], 3)) basis_vec[:, axis] = 1 return quat_rotate(q, basis_vec)
1,714
Python
24.984848
82
0.491832
yizhouzhao/VRKitchen2.0-IndoorKit/exts/vrkitchen.indoor.kit/vrkitchen/indoor/kit/robot_setup/controller.py
# controller import carb class Controller(): w = False s = False a = False d = False q = False e = False up = False down = False left = False right = False # Controller.scale = 0.1 left_control = False def __init__(self) -> None: self.user_control = 0.25 self.network_control = 0.25 Controller.reset_movement() @classmethod def reset_movement(cls): Controller.w = False Controller.s = False Controller.a = False Controller.d = False Controller.q = False Controller.e = False Controller.up = False Controller.down = False Controller.left = False Controller.right = False # Controller.left_control = False def handle_keyboard_event(self, event): if ( event.type == carb.input.KeyboardEventType.KEY_PRESS or event.type == carb.input.KeyboardEventType.KEY_REPEAT ): # print("event input", event.input) if event.input == carb.input.KeyboardInput.W: Controller.w = True if event.input == carb.input.KeyboardInput.S: Controller.s = True if event.input == carb.input.KeyboardInput.A: Controller.a = True if event.input == carb.input.KeyboardInput.D: Controller.d = True if event.input == carb.input.KeyboardInput.Q: Controller.q = True if event.input == carb.input.KeyboardInput.E: Controller.e = True if event.input == carb.input.KeyboardInput.UP: Controller.up = True if event.input == carb.input.KeyboardInput.DOWN: Controller.down = True if event.input == carb.input.KeyboardInput.LEFT: Controller.left = True if event.input == carb.input.KeyboardInput.RIGHT: Controller.right = True if event.input == carb.input.KeyboardInput.LEFT_CONTROL: Controller.left_control = True if event.type == carb.input.KeyboardEventType.KEY_RELEASE: # print("event release", event.input) if event.input == carb.input.KeyboardInput.W: Controller.w = False if event.input == carb.input.KeyboardInput.S: Controller.s = False if event.input == carb.input.KeyboardInput.A: Controller.a = False if event.input == carb.input.KeyboardInput.D: Controller.d = False if event.input == carb.input.KeyboardInput.Q: Controller.q = False if event.input == carb.input.KeyboardInput.E: Controller.e = False if event.input == carb.input.KeyboardInput.UP: Controller.up = False if event.input == carb.input.KeyboardInput.DOWN: Controller.down = False if event.input == carb.input.KeyboardInput.LEFT: Controller.left = False if event.input == carb.input.KeyboardInput.RIGHT: Controller.right = False if event.input == carb.input.KeyboardInput.LEFT_CONTROL: Controller.left_control = False def PoolUserControl(self): return self.user_control def PoolNetworkControl(self): return 0.1 if Controller.w else 0.25 def QueryMove(self): move = [0, 0, 0] if Controller.w: move[0] += 1 if Controller.s: move[0] -= 1 if Controller.a: move[1] += 1 if Controller.d: move[1] -= 1 if Controller.q: move[2] -= 1 if Controller.e: move[2] += 1 return move def QueryRotation(self): rotation = [0, 0] if Controller.up: rotation[0] += 1 if Controller.down: rotation[0] -= 1 if Controller.left: rotation[1] += 1 if Controller.right: rotation[1] -= 1 return rotation def QueryGripper(self): if not Controller.left_control: return 1 # open else: return -1 # close
4,343
Python
29.591549
68
0.534423
yizhouzhao/VRKitchen2.0-IndoorKit/exts/vrkitchen.indoor.kit/vrkitchen/indoor/kit/robot_setup/franka_tensor.py
from numpy.lib.index_tricks import fill_diagonal import omni import carb import types import numpy as np import importlib import os import shutil from ..param import IS_IN_CREAT, IS_IN_ISAAC_SIM, APP_VERION, SAVE_ROOT from .controller import Controller from .numpy_utils import orientation_error from pxr import Usd, UsdGeom, Gf class FrankaTensor(): def __init__(self, save_path, build_HUD = True): """ Franka tensor controller ::params: save_path: path to save the recordings build_HUD: build UI """ carb.log_info("Franks Tensor started (only in Create/Isaac-Sim >= 2022.1.0)") self._is_stopped = True self._tensor_started = False self._tensor_api = None self._flatcache_was_enabled = True self._tensorapi_was_enabled = True # stage self.stage = omni.usd.get_context().get_stage() self.franka_prim = self.stage.GetPrimAtPath("/World/game/franka") # property self.is_replay = False self.is_record = False # counting and index self.count_down = 80 self.button_status = 0 self.npz_index = 0 self.is_start = True # setup subscriptions: self._setup_callbacks() self._enable_tensor_api() # task info self.save_path = save_path self.record_lines = [] # controller self.controller = Controller() def _enable_tensor_api(self): manager = omni.kit.app.get_app().get_extension_manager() self._tensorapi_was_enabled = manager.is_extension_enabled("omni.physx.tensors") if not self._tensorapi_was_enabled: manager.set_extension_enabled_immediate("omni.physx.tensors", True) self._tensor_api = importlib.import_module("omni.physics.tensors") # "PRIVATE" METHODS # def _can_callback_physics_step(self) -> bool: if self._is_stopped: return False if self._tensor_started or self._tensor_api is None: return True self._tensor_started = True self.on_tensor_start(self._tensor_api) return True def on_tensor_start(self, tensorApi: types.ModuleType): """ This method is called when 1. the tensor API is enabled, and 2. when the simulation data is ready for the user to setup views using the tensor API. """ # if IS_IN_CREAT and APP_VERION >= "2022.1.1": sim = tensorApi.create_simulation_view("numpy") sim.set_subspace_roots("/World/game/*") # franka view self.frankas = sim.create_articulation_view("/World/game/franka") self.franka_indices = np.arange(self.frankas.count, dtype=np.int32) # !!! # self.default_dof_pos = np.array([0.0, 0.0, 0.0, -0.95, 0.0, 1.12, 0.0, 0.02, 0.02]) self.default_dof_pos = np.array([1.2024134e-02, -5.6960440e-01, 7.3155526e-05, -2.8114836e+00, -4.8544933e-03, 3.0270250e+00, 7.2893953e-01, 3.9919264e+00, 4.0000000e+00]) # set default dof pos: init_dof_pos = np.stack(1 * [np.array(self.default_dof_pos, dtype=np.float32)]) self.frankas.set_dof_position_targets(init_dof_pos, self.franka_indices) self.last_gripper_action = 1 # open as default # end effector view self.hands = sim.create_rigid_body_view("/World/game/franka/panda_hand") # get initial hand transforms # init_hand_transforms = self.hands.get_transforms().copy() # self.hand_pos = init_hand_transforms[:, :3] # self.hand_rot = init_hand_transforms[:, 3:] # target # self.target_pos = self.default_dof_pos[None, :] # self.target_hand_transform = init_hand_transforms def _setup_callbacks(self): stream = omni.timeline.get_timeline_interface().get_timeline_event_stream() self._timeline_sub = stream.create_subscription_to_pop(self._on_timeline_event) # subscribe to Physics updates: self._physics_update_sub = omni.physx.get_physx_interface().subscribe_physics_step_events(self._on_physics_step) events = omni.physx.get_physx_interface().get_simulation_event_stream_v2() self._simulation_event_subscription = events.create_subscription_to_pop(self.on_simulation_event) # subscribute to keyboard self._appwindow = omni.appwindow.get_default_app_window() self._input = carb.input.acquire_input_interface() self._keyboard = self._appwindow.get_keyboard() self._sub_keyboard = self._input.subscribe_to_keyboard_events(self._keyboard, self._sub_keyboard_event) def _sub_keyboard_event(self, event, *args, **kwargs): self.controller.handle_keyboard_event(event) def _on_timeline_event(self, e): if e.type == int(omni.timeline.TimelineEventType.STOP): self._is_stopped = True self._tensor_started = False # !!! self._timeline_sub = None self._simulation_event_subscription = None self._physics_update_sub = None self._input.unsubscribe_to_keyboard_events(self._keyboard, self._sub_keyboard) if e.type == int(omni.timeline.TimelineEventType.PLAY): self._is_stopped = False # call user implementation # self.on_timeline_event(e) def _on_physics_step(self, dt): if not self._can_callback_physics_step(): return # call user implementation self.on_physics_step(dt) def on_simulation_event(self, e): """ This method is called on simulation events. See omni.physx.bindings._physx.SimulationEvent. """ pass def on_physics_step(self, dt): """ This method is called on each physics step callback, and the first callback is issued after the on_tensor_start method is called if the tensor API is enabled. """ self.count_down -= 1 # self.dof_pos = self.frankas.get_dof_positions() # print("dof_pos", self.dof_pos) # playing if not self.is_replay: if self.count_down == 0: self.count_down = 6 # TODO: unify count_down is play and replay if self.is_record: current_dof_pos = self.frankas.get_dof_positions() with open(os.path.join(self.save_path, 'record.csv'), 'a') as f: f.write(",".join(list([str(e) for e in current_dof_pos[0]] + [str(self.last_gripper_action)])) + '\n') # get movement from keyboard move_vec = self.controller.QueryMove() query_move = move_vec != [0, 0, 0] # get rotation from keyboard rotation_vec = self.controller.QueryRotation() query_rotation = rotation_vec != [0, 0] # get gripper gripper_val = self.controller.QueryGripper() query_gripper = self.last_gripper_action != gripper_val # get end effector transforms hand_transforms = self.hands.get_transforms().copy() current_hand_pos, current_hand_rot = hand_transforms[:, :3], hand_transforms[:, 3:] # update record if query_move or query_rotation or query_gripper or self.is_start: self.hand_pos = current_hand_pos self.hand_rot = current_hand_rot self.last_gripper_action = gripper_val self.is_start = False # print("current_dof_pos", self.frankas.get_dof_positions()) # # if no input # if not query_move and not query_rotation and not query_gripper: # return # get franka xform mat # FIXME: time code? mat = UsdGeom.Xformable(self.franka_prim).ComputeLocalToWorldTransform(Usd.TimeCode.Default()) move_vec_4d = Gf.Vec4d(move_vec[0], move_vec[1], move_vec[2], 0) hand_move = move_vec_4d * mat hand_move_np = np.array([[hand_move[0], hand_move[1], hand_move[2]]]) target_pos = self.hand_pos + hand_move_np target_rot = self.hand_rot dof_target = self.move_to_target(target_pos, target_rot) if query_rotation: dof_target[...,5] += rotation_vec[0] * 0.1 # slowly but surely dof_target[...,6] += rotation_vec[1] * 0.2 # print("last_gripper_action", self.last_gripper_action) dof_target[...,[-2, -1]] = 5 if self.last_gripper_action > 0 else -1 self.frankas.set_dof_position_targets(dof_target, np.arange(1)) # replaying else: # self.is_replay: if self.count_down == 0: self.count_down = 4 # pause when record not exist if len(self.record_lines) == 0: omni.timeline.get_timeline_interface().pause() return # load joint record_line = self.record_lines.pop(0) self.target_pos = np.array([record_line[:-1]]) self.last_gripper_action = record_line[-1] # load discreet gripper self.target_pos[...,[-2, -1]] = 5 if self.last_gripper_action > 0 else -1 # print("target_pos", self.target_pos) self.frankas.set_dof_position_targets(self.target_pos, self.franka_indices) def load_record(self): if not os.path.exists(os.path.join(self.save_path, 'record.csv')): carb.log_error( "please start & record first") return with open(os.path.join(self.save_path, 'record.csv'), 'r') as f: for line in f.readlines(): self.record_lines.append([float(e) for e in line.split(",")]) ######################################### robot control ######################################### def move_to_target(self, goal_pos, goal_rot): """ Move hand to target points """ # get end effector transforms hand_transforms = self.hands.get_transforms().copy() hand_pos, hand_rot = hand_transforms[:, :3], hand_transforms[:, 3:] #hand_rot = hand_rot[:,[1,2,3,0]] # WXYZ # get franka DOF states dof_pos = self.frankas.get_dof_positions() # compute position and orientation error pos_err = goal_pos - hand_pos orn_err = orientation_error(goal_rot, hand_rot) dpose = np.concatenate([pos_err, orn_err], -1)[:, None].transpose(0, 2, 1) jacobians = self.frankas.get_jacobians() # jacobian entries corresponding to franka hand franka_hand_index = 8 # !!! j_eef = jacobians[:, franka_hand_index - 1, :] # solve damped least squares j_eef_T = np.transpose(j_eef, (0, 2, 1)) d = 0.05 # damping term lmbda = np.eye(6) * (d ** 2) u = (j_eef_T @ np.linalg.inv(j_eef @ j_eef_T + lmbda) @ dpose).reshape(1, 9) # update position targets pos_targets = dof_pos + u # * 0.3 return pos_targets
11,463
Python
36.342019
126
0.565297
yizhouzhao/VRKitchen2.0-IndoorKit/exts/vrkitchen.indoor.kit/vrkitchen/indoor/kit/task_check/__init__.py
from .base_checker import BaseChecker # from .grasp_checker import GraspChecker # from .joint_checker import JointChecker # from .orient_checker import OrientChecker # from .container_checker import ContainerChecker # from .water_checker import WaterChecker # from .tap_water_checker import TapWaterChecker
307
Python
42.999994
49
0.820847
yizhouzhao/VRKitchen2.0-IndoorKit/exts/vrkitchen.indoor.kit/vrkitchen/indoor/kit/task_check/base_checker.py
from lib2to3.pgen2.token import BACKQUOTE import os import json from pxr import PhysxSchema, UsdPhysics # task completion checking import pxr import omni import carb from omni.physx.scripts import physicsUtils from ..param import DATA_PATH_NEW from ..layout.randomizer import Randomizer class BaseChecker(): SUCCESS_UI = None IS_REPLAY = False def __init__(self, task_type, task_id, robot_id, mission_id, annotator="Steven", run_time = True) -> None: """ ::params: :run_time: is run-time task checker or not """ # property self.task_type = task_type self.task_id = str(task_id) self.mission_id = str(mission_id) self.robot_id = str(robot_id) self.data_path = DATA_PATH_NEW self.annotator = annotator # keep the old mission identifier temporarily self.old_mission_identifier = self.task_type + " " + self.task_id + " " + self.robot_id + " " + self.mission_id self.mission_identifier_prefix = self.task_type + " " + self.task_id + " "#+ self.robot_id + " " + self.mission_id self.mission_identifier_suffix = self.mission_id # scene self.stage = omni.usd.get_context().get_stage() self.default_prim_path_str = self.stage.GetDefaultPrim().GetPath().pathString self.timeline = omni.timeline.get_timeline_interface() self.current_mission = self.register_mission() self.success_steps = 0 self.success = False self.time = 0 # tasks if run_time: self.create_task_callback() # log self.total_step = 0 self.print_every = 240 self.checking_interval = 15 # get time per second physicsScenePath = "/World/physicsScene" scene = UsdPhysics.Scene.Get(self.stage, physicsScenePath) if not scene: carb.log_warn("physics scene not found") physxSceneAPI = PhysxSchema.PhysxSceneAPI.Apply(scene.GetPrim()) self.steps_per_second = physxSceneAPI.GetTimeStepsPerSecondAttr().Get() def register_mission(self): """ Register mission """ task_folder = os.path.join(self.data_path, self.annotator, "task", self.task_type, str(self.task_id)) if not os.path.exists(task_folder): raise carb.log_warn(f"Task folder not exist at {task_folder}") self.mission_file_path = os.path.join(task_folder, "missions.json") if os.path.exists(self.mission_file_path): self.missions = json.load(open(self.mission_file_path)) carb.log_info(f"Loading missions.json at path {self.mission_file_path}") else: self.missions = {} with open(self.mission_file_path, "w") as f: json.dump(self.missions, f, indent = 4) carb.log_info(f"Saving missions.json at path {self.mission_file_path}") for key, value in self.missions.items(): if key.startswith(self.mission_identifier_prefix) and key.endswith(self.mission_identifier_suffix): return self.missions[key] else: return {} def get_diff(self): raise NotImplementedError def create_task_callback(self): stream = self.timeline.get_timeline_event_stream() self._timeline_subscription = stream.create_subscription_to_pop(self._on_timeline_event) # subscribe to Physics updates: self._physics_update_subscription = omni.physx.get_physx_interface().subscribe_physics_step_events( self._on_physics_step ) def _on_timeline_event(self, e): """ set up timeline event """ if e.type == int(omni.timeline.TimelineEventType.STOP): self.it = 0 self.time = 0 self.reset() def reset(self): """ Reset event """ self._physics_update_subscription = None self._timeline_subscription = None # self._setup_callbacks() def _on_success_hold(self): try: if (self.success_steps - 1) % 240 == 0: carb.log_info("hold on") BaseChecker.SUCCESS_UI.model.set_value("hold on") except: pass def _on_success(self): carb.log_info("task sucess") self.success = True try: BaseChecker.SUCCESS_UI.model.set_value("task sucess") if self.timeline.is_playing() and not BaseChecker.IS_REPLAY: self.timeline.pause() except: pass def _on_not_success(self): # carb.log_info("task not sucess") self.success_steps = 0 self.success = False try: BaseChecker.SUCCESS_UI.model.set_value("") except: pass def _on_physics_step(self, dt): """ Physics event """ # print("timestep: ", self.time) if self.time == 0: stage = omni.usd.get_context().get_stage() prim_list = list(stage.TraverseAll()) prim_list = [ item for item in prim_list if 'Isosurface' in item.GetPath().pathString and item.GetTypeName() == 'Mesh' ] from pxr import Sdf water_path = Randomizer(None, 1).get_water_material() for iso2Prim in prim_list: # omni.kit.commands.execute( # "CreateAndBindMdlMaterialFromLibrary", # mdl_name='/media/nikepupu/fast/omni_lib/lib_path/isaac_sim-2021.2.1/kit/mdl/core/Base/OmniSurfacePresets.mdl', # mtl_name='OmniSurface_ClearWater', # mtl_created_list=None, # ) # water_path = '/World/Looks/OmniSurface_ClearWater' rel = iso2Prim.CreateRelationship("material:binding", False) rel.SetTargets([Sdf.Path(water_path)]) # Randomizer.get_water_material(iso2Prim) self.time += 1 self.start_checking() def start_checking(self): if self.success_steps > self.steps_per_second * 2: self._on_success() def save_mission(self): """ save mission """ self.missions[self.old_mission_identifier] = self.current_mission with open(self.mission_file_path, "w") as f: json.dump(self.missions, f, indent = 4) carb.log_info(f"Saving missions.json at path {self.mission_file_path}")
6,634
Python
35.059782
132
0.572053
isaac-orbit/orbit.ext_template/scripts/rsl_rl/play.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Script to play a checkpoint if an RL agent from RSL-RL.""" from __future__ import annotations """Launch Isaac Sim Simulator first.""" import argparse from omni.isaac.orbit.app import AppLauncher # local imports import cli_args # isort: skip # add argparse arguments parser = argparse.ArgumentParser(description="Train an RL agent with RSL-RL.") parser.add_argument("--cpu", action="store_true", default=False, help="Use CPU pipeline.") parser.add_argument("--num_envs", type=int, default=None, help="Number of environments to simulate.") parser.add_argument("--task", type=str, default=None, help="Name of the task.") parser.add_argument("--seed", type=int, default=None, help="Seed used for the environment") # append RSL-RL cli arguments cli_args.add_rsl_rl_args(parser) # append AppLauncher cli args AppLauncher.add_app_launcher_args(parser) args_cli = parser.parse_args() # launch omniverse app app_launcher = AppLauncher(args_cli) simulation_app = app_launcher.app """Rest everything follows.""" import os import gymnasium as gym import omni.isaac.contrib_tasks # noqa: F401 import omni.isaac.orbit_tasks # noqa: F401 import torch from omni.isaac.orbit_tasks.utils import get_checkpoint_path, parse_env_cfg from omni.isaac.orbit_tasks.utils.wrappers.rsl_rl import ( RslRlOnPolicyRunnerCfg, RslRlVecEnvWrapper, export_policy_as_onnx, ) from rsl_rl.runners import OnPolicyRunner # Import extensions to set up environment tasks import orbit.ext_template.tasks # noqa: F401 TODO: import orbit.<your_extension_name> def main(): """Play with RSL-RL agent.""" # parse configuration env_cfg = parse_env_cfg(args_cli.task, use_gpu=not args_cli.cpu, num_envs=args_cli.num_envs) agent_cfg: RslRlOnPolicyRunnerCfg = cli_args.parse_rsl_rl_cfg(args_cli.task, args_cli) # create isaac environment env = gym.make(args_cli.task, cfg=env_cfg) # wrap around environment for rsl-rl env = RslRlVecEnvWrapper(env) # specify directory for logging experiments log_root_path = os.path.join("logs", "rsl_rl", agent_cfg.experiment_name) log_root_path = os.path.abspath(log_root_path) print(f"[INFO] Loading experiment from directory: {log_root_path}") resume_path = get_checkpoint_path(log_root_path, agent_cfg.load_run, agent_cfg.load_checkpoint) print(f"[INFO]: Loading model checkpoint from: {resume_path}") # load previously trained model ppo_runner = OnPolicyRunner(env, agent_cfg.to_dict(), log_dir=None, device=agent_cfg.device) ppo_runner.load(resume_path) print(f"[INFO]: Loading model checkpoint from: {resume_path}") # obtain the trained policy for inference policy = ppo_runner.get_inference_policy(device=env.unwrapped.device) # export policy to onnx export_model_dir = os.path.join(os.path.dirname(resume_path), "exported") export_policy_as_onnx(ppo_runner.alg.actor_critic, export_model_dir, filename="policy.onnx") # reset environment obs, _ = env.get_observations() # simulate environment while simulation_app.is_running(): # run everything in inference mode with torch.inference_mode(): # agent stepping actions = policy(obs) # env stepping obs, _, _, _ = env.step(actions) # close the simulator env.close() if __name__ == "__main__": # run the main execution main() # close sim app simulation_app.close()
3,566
Python
32.027777
101
0.706955
isaac-orbit/orbit.ext_template/scripts/rsl_rl/cli_args.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations import argparse from typing import TYPE_CHECKING if TYPE_CHECKING: from omni.isaac.orbit_tasks.utils.wrappers.rsl_rl import RslRlOnPolicyRunnerCfg def add_rsl_rl_args(parser: argparse.ArgumentParser): """Add RSL-RL arguments to the parser. Args: parser: The parser to add the arguments to. """ # create a new argument group arg_group = parser.add_argument_group("rsl_rl", description="Arguments for RSL-RL agent.") # -- experiment arguments arg_group.add_argument( "--experiment_name", type=str, default=None, help="Name of the experiment folder where logs will be stored." ) arg_group.add_argument("--run_name", type=str, default=None, help="Run name suffix to the log directory.") # -- load arguments arg_group.add_argument("--resume", type=bool, default=None, help="Whether to resume from a checkpoint.") arg_group.add_argument("--load_run", type=str, default=None, help="Name of the run folder to resume from.") arg_group.add_argument("--checkpoint", type=str, default=None, help="Checkpoint file to resume from.") # -- logger arguments arg_group.add_argument( "--logger", type=str, default=None, choices={"wandb", "tensorboard", "neptune"}, help="Logger module to use." ) arg_group.add_argument( "--log_project_name", type=str, default=None, help="Name of the logging project when using wandb or neptune." ) def parse_rsl_rl_cfg(task_name: str, args_cli: argparse.Namespace) -> RslRlOnPolicyRunnerCfg: """Parse configuration for RSL-RL agent based on inputs. Args: task_name: The name of the environment. args_cli: The command line arguments. Returns: The parsed configuration for RSL-RL agent based on inputs. """ from omni.isaac.orbit_tasks.utils.parse_cfg import load_cfg_from_registry # load the default configuration rslrl_cfg: RslRlOnPolicyRunnerCfg = load_cfg_from_registry(task_name, "rsl_rl_cfg_entry_point") # override the default configuration with CLI arguments if args_cli.seed is not None: rslrl_cfg.seed = args_cli.seed if args_cli.resume is not None: rslrl_cfg.resume = args_cli.resume if args_cli.load_run is not None: rslrl_cfg.load_run = args_cli.load_run if args_cli.checkpoint is not None: rslrl_cfg.load_checkpoint = args_cli.checkpoint if args_cli.run_name is not None: rslrl_cfg.run_name = args_cli.run_name if args_cli.logger is not None: rslrl_cfg.logger = args_cli.logger # set the project name for wandb and neptune if rslrl_cfg.logger in {"wandb", "neptune"} and args_cli.log_project_name: rslrl_cfg.wandb_project = args_cli.log_project_name rslrl_cfg.neptune_project = args_cli.log_project_name return rslrl_cfg
2,981
Python
38.759999
117
0.688695
isaac-orbit/orbit.ext_template/scripts/rsl_rl/train.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Script to train RL agent with RSL-RL.""" from __future__ import annotations """Launch Isaac Sim Simulator first.""" import argparse import os from omni.isaac.orbit.app import AppLauncher # local imports import cli_args # isort: skip # add argparse arguments parser = argparse.ArgumentParser(description="Train an RL agent with RSL-RL.") parser.add_argument("--video", action="store_true", default=False, help="Record videos during training.") parser.add_argument("--video_length", type=int, default=200, help="Length of the recorded video (in steps).") parser.add_argument("--video_interval", type=int, default=2000, help="Interval between video recordings (in steps).") parser.add_argument("--cpu", action="store_true", default=False, help="Use CPU pipeline.") parser.add_argument("--num_envs", type=int, default=None, help="Number of environments to simulate.") parser.add_argument("--task", type=str, default=None, help="Name of the task.") parser.add_argument("--seed", type=int, default=None, help="Seed used for the environment") # append RSL-RL cli arguments cli_args.add_rsl_rl_args(parser) # append AppLauncher cli args AppLauncher.add_app_launcher_args(parser) args_cli = parser.parse_args() # load cheaper kit config in headless if args_cli.headless: app_experience = f"{os.environ['EXP_PATH']}/omni.isaac.sim.python.gym.headless.kit" else: app_experience = f"{os.environ['EXP_PATH']}/omni.isaac.sim.python.kit" # launch omniverse app app_launcher = AppLauncher(args_cli, experience=app_experience) simulation_app = app_launcher.app """Rest everything follows.""" import os from datetime import datetime import gymnasium as gym import omni.isaac.orbit_tasks # noqa: F401 import torch from omni.isaac.orbit.envs import RLTaskEnvCfg from omni.isaac.orbit.utils.dict import print_dict from omni.isaac.orbit.utils.io import dump_pickle, dump_yaml from omni.isaac.orbit_tasks.utils import get_checkpoint_path, parse_env_cfg from omni.isaac.orbit_tasks.utils.wrappers.rsl_rl import ( RslRlOnPolicyRunnerCfg, RslRlVecEnvWrapper, ) from rsl_rl.runners import OnPolicyRunner # Import extensions to set up environment tasks import orbit.ext_template.tasks # noqa: F401 TODO: import orbit.<your_extension_name> torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cudnn.allow_tf32 = True torch.backends.cudnn.deterministic = False torch.backends.cudnn.benchmark = False def main(): """Train with RSL-RL agent.""" # parse configuration env_cfg: RLTaskEnvCfg = parse_env_cfg(args_cli.task, use_gpu=not args_cli.cpu, num_envs=args_cli.num_envs) agent_cfg: RslRlOnPolicyRunnerCfg = cli_args.parse_rsl_rl_cfg(args_cli.task, args_cli) # specify directory for logging experiments log_root_path = os.path.join("logs", "rsl_rl", agent_cfg.experiment_name) log_root_path = os.path.abspath(log_root_path) print(f"[INFO] Logging experiment in directory: {log_root_path}") # specify directory for logging runs: {time-stamp}_{run_name} log_dir = datetime.now().strftime("%Y-%m-%d_%H-%M-%S") if agent_cfg.run_name: log_dir += f"_{agent_cfg.run_name}" log_dir = os.path.join(log_root_path, log_dir) # create isaac environment env = gym.make(args_cli.task, cfg=env_cfg, render_mode="rgb_array" if args_cli.video else None) # wrap for video recording if args_cli.video: video_kwargs = { "video_folder": os.path.join(log_dir, "videos"), "step_trigger": lambda step: step % args_cli.video_interval == 0, "video_length": args_cli.video_length, "disable_logger": True, } print("[INFO] Recording videos during training.") print_dict(video_kwargs, nesting=4) env = gym.wrappers.RecordVideo(env, **video_kwargs) # wrap around environment for rsl-rl env = RslRlVecEnvWrapper(env) # create runner from rsl-rl runner = OnPolicyRunner(env, agent_cfg.to_dict(), log_dir=log_dir, device=agent_cfg.device) # write git state to logs runner.add_git_repo_to_log(__file__) # save resume path before creating a new log_dir if agent_cfg.resume: # get path to previous checkpoint resume_path = get_checkpoint_path(log_root_path, agent_cfg.load_run, agent_cfg.load_checkpoint) print(f"[INFO]: Loading model checkpoint from: {resume_path}") # load previously trained model runner.load(resume_path) # set seed of the environment env.seed(agent_cfg.seed) # dump the configuration into log-directory dump_yaml(os.path.join(log_dir, "params", "env.yaml"), env_cfg) dump_yaml(os.path.join(log_dir, "params", "agent.yaml"), agent_cfg) dump_pickle(os.path.join(log_dir, "params", "env.pkl"), env_cfg) dump_pickle(os.path.join(log_dir, "params", "agent.pkl"), agent_cfg) # run training runner.learn(num_learning_iterations=agent_cfg.max_iterations, init_at_random_ep_len=True) # close the simulator env.close() if __name__ == "__main__": # run the main execution main() # close sim app simulation_app.close()
5,231
Python
36.640288
117
0.703307
isaac-orbit/orbit.ext_template/orbit/ext_template/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """ Python module serving as a project/extension template. """ # Register Gym environments. from .tasks import * # Register UI extensions. from .ui_extension_example import *
300
Python
19.066665
56
0.743333
isaac-orbit/orbit.ext_template/orbit/ext_template/ui_extension_example.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause import omni.ext import omni.ui as ui # Functions and vars are available to other extension as usual in python: `example.python_ext.some_public_function(x)` def some_public_function(x: int): print("[orbit.ext_template] some_public_function was called with x: ", x) return x**x # Any class derived from `omni.ext.IExt` in top level module (defined in `python.modules` of `extension.toml`) will be # instantiated when extension gets enabled and `on_startup(ext_id)` will be called. Later when extension gets disabled # on_shutdown() is called. class ExampleExtension(omni.ext.IExt): # ext_id is current extension id. It can be used with extension manager to query additional information, like where # this extension is located on filesystem. def on_startup(self, ext_id): print("[orbit.ext_template] startup") self._count = 0 self._window = ui.Window("My Window", width=300, height=300) with self._window.frame: with ui.VStack(): label = ui.Label("") def on_click(): self._count += 1 label.text = f"count: {self._count}" def on_reset(): self._count = 0 label.text = "empty" on_reset() with ui.HStack(): ui.Button("Add", clicked_fn=on_click) ui.Button("Reset", clicked_fn=on_reset) def on_shutdown(self): print("[orbit.ext_template] shutdown")
1,650
Python
33.395833
119
0.609697
isaac-orbit/orbit.ext_template/orbit/ext_template/tasks/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Package containing task implementations for various robotic environments.""" import os import toml # Conveniences to other module directories via relative paths ORBIT_TASKS_EXT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../../")) """Path to the extension source directory.""" ORBIT_TASKS_METADATA = toml.load(os.path.join(ORBIT_TASKS_EXT_DIR, "config", "extension.toml")) """Extension metadata dictionary parsed from the extension.toml file.""" # Configure the module-level variables __version__ = ORBIT_TASKS_METADATA["package"]["version"] ## # Register Gym environments. ## from omni.isaac.orbit_tasks.utils import import_packages # The blacklist is used to prevent importing configs from sub-packages _BLACKLIST_PKGS = ["utils"] # Import all configs in this package import_packages(__name__, _BLACKLIST_PKGS)
969
Python
29.312499
95
0.744066
isaac-orbit/orbit.ext_template/orbit/ext_template/tasks/locomotion/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Locomotion environments for legged robots.""" from .velocity import * # noqa
205
Python
21.888886
56
0.731707
isaac-orbit/orbit.ext_template/orbit/ext_template/tasks/locomotion/velocity/velocity_env_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations import math from dataclasses import MISSING import omni.isaac.orbit.sim as sim_utils from omni.isaac.orbit.assets import ArticulationCfg, AssetBaseCfg from omni.isaac.orbit.envs import RLTaskEnvCfg from omni.isaac.orbit.managers import CurriculumTermCfg as CurrTerm from omni.isaac.orbit.managers import ObservationGroupCfg as ObsGroup from omni.isaac.orbit.managers import ObservationTermCfg as ObsTerm from omni.isaac.orbit.managers import RandomizationTermCfg as RandTerm from omni.isaac.orbit.managers import RewardTermCfg as RewTerm from omni.isaac.orbit.managers import SceneEntityCfg from omni.isaac.orbit.managers import TerminationTermCfg as DoneTerm from omni.isaac.orbit.scene import InteractiveSceneCfg from omni.isaac.orbit.sensors import ContactSensorCfg, RayCasterCfg, patterns from omni.isaac.orbit.terrains import TerrainImporterCfg from omni.isaac.orbit.utils import configclass from omni.isaac.orbit.utils.noise import AdditiveUniformNoiseCfg as Unoise import orbit.ext_template.tasks.locomotion.velocity.mdp as mdp ## # Pre-defined configs ## from omni.isaac.orbit.terrains.config.rough import ROUGH_TERRAINS_CFG # isort: skip ## # Scene definition ## @configclass class MySceneCfg(InteractiveSceneCfg): """Configuration for the terrain scene with a legged robot.""" # ground terrain terrain = TerrainImporterCfg( prim_path="/World/ground", terrain_type="generator", terrain_generator=ROUGH_TERRAINS_CFG, max_init_terrain_level=5, collision_group=-1, physics_material=sim_utils.RigidBodyMaterialCfg( friction_combine_mode="multiply", restitution_combine_mode="multiply", static_friction=1.0, dynamic_friction=1.0, ), visual_material=sim_utils.MdlFileCfg( mdl_path="{NVIDIA_NUCLEUS_DIR}/Materials/Base/Architecture/Shingles_01.mdl", project_uvw=True, ), debug_vis=False, ) # robots robot: ArticulationCfg = MISSING # sensors height_scanner = RayCasterCfg( prim_path="{ENV_REGEX_NS}/Robot/base", offset=RayCasterCfg.OffsetCfg(pos=(0.0, 0.0, 20.0)), attach_yaw_only=True, pattern_cfg=patterns.GridPatternCfg(resolution=0.1, size=[1.6, 1.0]), debug_vis=False, mesh_prim_paths=["/World/ground"], ) contact_forces = ContactSensorCfg(prim_path="{ENV_REGEX_NS}/Robot/.*", history_length=3, track_air_time=True) # lights light = AssetBaseCfg( prim_path="/World/light", spawn=sim_utils.DistantLightCfg(color=(0.75, 0.75, 0.75), intensity=3000.0), ) sky_light = AssetBaseCfg( prim_path="/World/skyLight", spawn=sim_utils.DomeLightCfg(color=(0.13, 0.13, 0.13), intensity=1000.0), ) ## # MDP settings ## @configclass class CommandsCfg: """Command specifications for the MDP.""" base_velocity = mdp.UniformVelocityCommandCfg( asset_name="robot", resampling_time_range=(10.0, 10.0), rel_standing_envs=0.02, rel_heading_envs=1.0, heading_command=True, heading_control_stiffness=0.5, debug_vis=True, ranges=mdp.UniformVelocityCommandCfg.Ranges( lin_vel_x=(-1.0, 1.0), lin_vel_y=(-1.0, 1.0), ang_vel_z=(-1.0, 1.0), heading=(-math.pi, math.pi) ), ) @configclass class ActionsCfg: """Action specifications for the MDP.""" joint_pos = mdp.JointPositionActionCfg(asset_name="robot", joint_names=[".*"], scale=0.5, use_default_offset=True) @configclass class ObservationsCfg: """Observation specifications for the MDP.""" @configclass class PolicyCfg(ObsGroup): """Observations for policy group.""" # observation terms (order preserved) base_lin_vel = ObsTerm(func=mdp.base_lin_vel, noise=Unoise(n_min=-0.1, n_max=0.1)) base_ang_vel = ObsTerm(func=mdp.base_ang_vel, noise=Unoise(n_min=-0.2, n_max=0.2)) projected_gravity = ObsTerm( func=mdp.projected_gravity, noise=Unoise(n_min=-0.05, n_max=0.05), ) velocity_commands = ObsTerm(func=mdp.generated_commands, params={"command_name": "base_velocity"}) joint_pos = ObsTerm(func=mdp.joint_pos_rel, noise=Unoise(n_min=-0.01, n_max=0.01)) joint_vel = ObsTerm(func=mdp.joint_vel_rel, noise=Unoise(n_min=-1.5, n_max=1.5)) actions = ObsTerm(func=mdp.last_action) height_scan = ObsTerm( func=mdp.height_scan, params={"sensor_cfg": SceneEntityCfg("height_scanner")}, noise=Unoise(n_min=-0.1, n_max=0.1), clip=(-1.0, 1.0), ) def __post_init__(self): self.enable_corruption = True self.concatenate_terms = True # observation groups policy: PolicyCfg = PolicyCfg() @configclass class RandomizationCfg: """Configuration for randomization.""" # startup physics_material = RandTerm( func=mdp.randomize_rigid_body_material, mode="startup", params={ "asset_cfg": SceneEntityCfg("robot", body_names=".*"), "static_friction_range": (0.8, 0.8), "dynamic_friction_range": (0.6, 0.6), "restitution_range": (0.0, 0.0), "num_buckets": 64, }, ) add_base_mass = RandTerm( func=mdp.add_body_mass, mode="startup", params={"asset_cfg": SceneEntityCfg("robot", body_names="base"), "mass_range": (-5.0, 5.0)}, ) # reset base_external_force_torque = RandTerm( func=mdp.apply_external_force_torque, mode="reset", params={ "asset_cfg": SceneEntityCfg("robot", body_names="base"), "force_range": (0.0, 0.0), "torque_range": (-0.0, 0.0), }, ) reset_base = RandTerm( func=mdp.reset_root_state_uniform, mode="reset", params={ "pose_range": {"x": (-0.5, 0.5), "y": (-0.5, 0.5), "yaw": (-3.14, 3.14)}, "velocity_range": { "x": (-0.5, 0.5), "y": (-0.5, 0.5), "z": (-0.5, 0.5), "roll": (-0.5, 0.5), "pitch": (-0.5, 0.5), "yaw": (-0.5, 0.5), }, }, ) reset_robot_joints = RandTerm( func=mdp.reset_joints_by_scale, mode="reset", params={ "position_range": (0.5, 1.5), "velocity_range": (0.0, 0.0), }, ) # interval push_robot = RandTerm( func=mdp.push_by_setting_velocity, mode="interval", interval_range_s=(10.0, 15.0), params={"velocity_range": {"x": (-0.5, 0.5), "y": (-0.5, 0.5)}}, ) @configclass class RewardsCfg: """Reward terms for the MDP.""" # -- task track_lin_vel_xy_exp = RewTerm( func=mdp.track_lin_vel_xy_exp, weight=1.0, params={"command_name": "base_velocity", "std": math.sqrt(0.25)} ) track_ang_vel_z_exp = RewTerm( func=mdp.track_ang_vel_z_exp, weight=0.5, params={"command_name": "base_velocity", "std": math.sqrt(0.25)} ) # -- penalties lin_vel_z_l2 = RewTerm(func=mdp.lin_vel_z_l2, weight=-2.0) ang_vel_xy_l2 = RewTerm(func=mdp.ang_vel_xy_l2, weight=-0.05) dof_torques_l2 = RewTerm(func=mdp.joint_torques_l2, weight=-1.0e-5) dof_acc_l2 = RewTerm(func=mdp.joint_acc_l2, weight=-2.5e-7) action_rate_l2 = RewTerm(func=mdp.action_rate_l2, weight=-0.01) feet_air_time = RewTerm( func=mdp.feet_air_time, weight=0.125, params={ "sensor_cfg": SceneEntityCfg("contact_forces", body_names=".*FOOT"), "command_name": "base_velocity", "threshold": 0.5, }, ) undesired_contacts = RewTerm( func=mdp.undesired_contacts, weight=-1.0, params={"sensor_cfg": SceneEntityCfg("contact_forces", body_names=".*THIGH"), "threshold": 1.0}, ) # -- optional penalties flat_orientation_l2 = RewTerm(func=mdp.flat_orientation_l2, weight=0.0) dof_pos_limits = RewTerm(func=mdp.joint_pos_limits, weight=0.0) @configclass class TerminationsCfg: """Termination terms for the MDP.""" time_out = DoneTerm(func=mdp.time_out, time_out=True) base_contact = DoneTerm( func=mdp.illegal_contact, params={"sensor_cfg": SceneEntityCfg("contact_forces", body_names="base"), "threshold": 1.0}, ) @configclass class CurriculumCfg: """Curriculum terms for the MDP.""" terrain_levels = CurrTerm(func=mdp.terrain_levels_vel) ## # Environment configuration ## @configclass class LocomotionVelocityRoughEnvCfg(RLTaskEnvCfg): """Configuration for the locomotion velocity-tracking environment.""" # Scene settings scene: MySceneCfg = MySceneCfg(num_envs=4096, env_spacing=2.5) # Basic settings observations: ObservationsCfg = ObservationsCfg() actions: ActionsCfg = ActionsCfg() commands: CommandsCfg = CommandsCfg() # MDP settings rewards: RewardsCfg = RewardsCfg() terminations: TerminationsCfg = TerminationsCfg() randomization: RandomizationCfg = RandomizationCfg() curriculum: CurriculumCfg = CurriculumCfg() def __post_init__(self): """Post initialization.""" # general settings self.decimation = 4 self.episode_length_s = 20.0 # simulation settings self.sim.dt = 0.005 self.sim.disable_contact_processing = True self.sim.physics_material = self.scene.terrain.physics_material # update sensor update periods # we tick all the sensors based on the smallest update period (physics update period) if self.scene.height_scanner is not None: self.scene.height_scanner.update_period = self.decimation * self.sim.dt if self.scene.contact_forces is not None: self.scene.contact_forces.update_period = self.sim.dt # check if terrain levels curriculum is enabled - if so, enable curriculum for terrain generator # this generates terrains with increasing difficulty and is useful for training if getattr(self.curriculum, "terrain_levels", None) is not None: if self.scene.terrain.terrain_generator is not None: self.scene.terrain.terrain_generator.curriculum = True else: if self.scene.terrain.terrain_generator is not None: self.scene.terrain.terrain_generator.curriculum = False
10,649
Python
32.596214
118
0.626538
isaac-orbit/orbit.ext_template/orbit/ext_template/tasks/locomotion/velocity/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Locomotion environments with velocity-tracking commands. These environments are based on the `legged_gym` environments provided by Rudin et al. Reference: https://github.com/leggedrobotics/legged_gym """
336
Python
24.923075
86
0.764881
isaac-orbit/orbit.ext_template/orbit/ext_template/tasks/locomotion/velocity/mdp/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """This sub-module contains the functions that are specific to the locomotion environments.""" from omni.isaac.orbit.envs.mdp import * # noqa: F401, F403 from .curriculums import * # noqa: F401, F403 from .rewards import * # noqa: F401, F403
370
Python
29.916664
94
0.732432
isaac-orbit/orbit.ext_template/orbit/ext_template/tasks/locomotion/velocity/mdp/curriculums.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Common functions that can be used to create curriculum for the learning environment. The functions can be passed to the :class:`omni.isaac.orbit.managers.CurriculumTermCfg` object to enable the curriculum introduced by the function. """ from __future__ import annotations from collections.abc import Sequence from typing import TYPE_CHECKING import torch from omni.isaac.orbit.assets import Articulation from omni.isaac.orbit.managers import SceneEntityCfg from omni.isaac.orbit.terrains import TerrainImporter if TYPE_CHECKING: from omni.isaac.orbit.envs import RLTaskEnv def terrain_levels_vel( env: RLTaskEnv, env_ids: Sequence[int], asset_cfg: SceneEntityCfg = SceneEntityCfg("robot") ) -> torch.Tensor: """Curriculum based on the distance the robot walked when commanded to move at a desired velocity. This term is used to increase the difficulty of the terrain when the robot walks far enough and decrease the difficulty when the robot walks less than half of the distance required by the commanded velocity. .. note:: It is only possible to use this term with the terrain type ``generator``. For further information on different terrain types, check the :class:`omni.isaac.orbit.terrains.TerrainImporter` class. Returns: The mean terrain level for the given environment ids. """ # extract the used quantities (to enable type-hinting) asset: Articulation = env.scene[asset_cfg.name] terrain: TerrainImporter = env.scene.terrain command = env.command_manager.get_command("base_velocity") # compute the distance the robot walked distance = torch.norm(asset.data.root_pos_w[env_ids, :2] - env.scene.env_origins[env_ids, :2], dim=1) # robots that walked far enough progress to harder terrains move_up = distance > terrain.cfg.terrain_generator.size[0] / 2 # robots that walked less than half of their required distance go to simpler terrains move_down = distance < torch.norm(command[env_ids, :2], dim=1) * env.max_episode_length_s * 0.5 move_down *= ~move_up # update terrain levels terrain.update_env_origins(env_ids, move_up, move_down) # return the mean terrain level return torch.mean(terrain.terrain_levels.float())
2,376
Python
41.446428
112
0.742424
isaac-orbit/orbit.ext_template/orbit/ext_template/tasks/locomotion/velocity/mdp/rewards.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations from typing import TYPE_CHECKING import torch from omni.isaac.orbit.managers import SceneEntityCfg from omni.isaac.orbit.sensors import ContactSensor if TYPE_CHECKING: from omni.isaac.orbit.envs import RLTaskEnv def feet_air_time(env: RLTaskEnv, command_name: str, sensor_cfg: SceneEntityCfg, threshold: float) -> torch.Tensor: """Reward long steps taken by the feet using L2-kernel. This function rewards the agent for taking steps that are longer than a threshold. This helps ensure that the robot lifts its feet off the ground and takes steps. The reward is computed as the sum of the time for which the feet are in the air. If the commands are small (i.e. the agent is not supposed to take a step), then the reward is zero. """ # extract the used quantities (to enable type-hinting) contact_sensor: ContactSensor = env.scene.sensors[sensor_cfg.name] # compute the reward first_contact = contact_sensor.compute_first_contact(env.step_dt)[:, sensor_cfg.body_ids] last_air_time = contact_sensor.data.last_air_time[:, sensor_cfg.body_ids] reward = torch.sum((last_air_time - threshold) * first_contact, dim=1) # no reward for zero command reward *= torch.norm(env.command_manager.get_command(command_name)[:, :2], dim=1) > 0.1 return reward def feet_air_time_positive_biped(env, command_name: str, threshold: float, sensor_cfg: SceneEntityCfg) -> torch.Tensor: """Reward long steps taken by the feet for bipeds. This function rewards the agent for taking steps up to a specified threshold and also keep one foot at a time in the air. If the commands are small (i.e. the agent is not supposed to take a step), then the reward is zero. """ contact_sensor: ContactSensor = env.scene.sensors[sensor_cfg.name] # compute the reward air_time = contact_sensor.data.current_air_time[:, sensor_cfg.body_ids] contact_time = contact_sensor.data.current_contact_time[:, sensor_cfg.body_ids] in_contact = contact_time > 0.0 in_mode_time = torch.where(in_contact, contact_time, air_time) single_stance = torch.sum(in_contact.int(), dim=1) == 1 reward = torch.min(torch.where(single_stance.unsqueeze(-1), in_mode_time, 0.0), dim=1)[0] reward = torch.clamp(reward, max=threshold) # no reward for zero command reward *= torch.norm(env.command_manager.get_command(command_name)[:, :2], dim=1) > 0.1 return reward
2,595
Python
43.75862
119
0.717148
isaac-orbit/orbit.ext_template/orbit/ext_template/tasks/locomotion/velocity/config/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Configurations for velocity-based locomotion environments.""" # We leave this file empty since we don't want to expose any configs in this package directly. # We still need this file to import the "config" module in the parent package.
363
Python
35.399996
94
0.763085
isaac-orbit/orbit.ext_template/orbit/ext_template/tasks/locomotion/velocity/config/anymal_d/rough_env_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from omni.isaac.orbit.utils import configclass from orbit.ext_template.tasks.locomotion.velocity.velocity_env_cfg import ( LocomotionVelocityRoughEnvCfg, ) ## # Pre-defined configs ## from omni.isaac.orbit_assets.anymal import ANYMAL_D_CFG # isort: skip @configclass class AnymalDRoughEnvCfg(LocomotionVelocityRoughEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # switch robot to anymal-d self.scene.robot = ANYMAL_D_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") @configclass class AnymalDRoughEnvCfg_PLAY(AnymalDRoughEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # make a smaller scene for play self.scene.num_envs = 50 self.scene.env_spacing = 2.5 # spawn the robot randomly in the grid (instead of their terrain levels) self.scene.terrain.max_init_terrain_level = None # reduce the number of terrains to save memory if self.scene.terrain.terrain_generator is not None: self.scene.terrain.terrain_generator.num_rows = 5 self.scene.terrain.terrain_generator.num_cols = 5 self.scene.terrain.terrain_generator.curriculum = False # disable randomization for play self.observations.policy.enable_corruption = False # remove random pushing self.randomization.base_external_force_torque = None self.randomization.push_robot = None
1,617
Python
32.020408
81
0.683364
isaac-orbit/orbit.ext_template/orbit/ext_template/tasks/locomotion/velocity/config/anymal_d/flat_env_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from omni.isaac.orbit.utils import configclass from .rough_env_cfg import AnymalDRoughEnvCfg @configclass class AnymalDFlatEnvCfg(AnymalDRoughEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() # override rewards self.rewards.flat_orientation_l2.weight = -5.0 self.rewards.dof_torques_l2.weight = -2.5e-5 self.rewards.feet_air_time.weight = 0.5 # change terrain to flat self.scene.terrain.terrain_type = "plane" self.scene.terrain.terrain_generator = None # no height scan self.scene.height_scanner = None self.observations.policy.height_scan = None # no terrain curriculum self.curriculum.terrain_levels = None class AnymalDFlatEnvCfg_PLAY(AnymalDFlatEnvCfg): def __post_init__(self) -> None: # post init of parent super().__post_init__() # make a smaller scene for play self.scene.num_envs = 50 self.scene.env_spacing = 2.5 # disable randomization for play self.observations.policy.enable_corruption = False # remove random pushing self.randomization.base_external_force_torque = None self.randomization.push_robot = None
1,382
Python
30.431817
60
0.656295
isaac-orbit/orbit.ext_template/orbit/ext_template/tasks/locomotion/velocity/config/anymal_d/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause import gymnasium as gym from . import agents, flat_env_cfg, rough_env_cfg ## # Register Gym environments. ## gym.register( id="Isaac-Velocity-Flat-Anymal-D-Template-v0", entry_point="omni.isaac.orbit.envs:RLTaskEnv", disable_env_checker=True, kwargs={ "env_cfg_entry_point": flat_env_cfg.AnymalDFlatEnvCfg, "rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.AnymalDFlatPPORunnerCfg, }, ) gym.register( id="Isaac-Velocity-Flat-Anymal-D-Template-Play-v0", entry_point="omni.isaac.orbit.envs:RLTaskEnv", disable_env_checker=True, kwargs={ "env_cfg_entry_point": flat_env_cfg.AnymalDFlatEnvCfg_PLAY, "rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.AnymalDFlatPPORunnerCfg, }, ) gym.register( id="Isaac-Velocity-Rough-Anymal-D-Template-v0", entry_point="omni.isaac.orbit.envs:RLTaskEnv", disable_env_checker=True, kwargs={ "env_cfg_entry_point": rough_env_cfg.AnymalDRoughEnvCfg, "rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.AnymalDRoughPPORunnerCfg, }, ) gym.register( id="Isaac-Velocity-Rough-Anymal-D-Template-Play-v0", entry_point="omni.isaac.orbit.envs:RLTaskEnv", disable_env_checker=True, kwargs={ "env_cfg_entry_point": rough_env_cfg.AnymalDRoughEnvCfg_PLAY, "rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.AnymalDRoughPPORunnerCfg, }, )
1,498
Python
27.283018
77
0.688251
isaac-orbit/orbit.ext_template/orbit/ext_template/tasks/locomotion/velocity/config/anymal_d/agents/rsl_rl_cfg.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from omni.isaac.orbit.utils import configclass from omni.isaac.orbit_tasks.utils.wrappers.rsl_rl import ( RslRlOnPolicyRunnerCfg, RslRlPpoActorCriticCfg, RslRlPpoAlgorithmCfg, ) @configclass class AnymalDRoughPPORunnerCfg(RslRlOnPolicyRunnerCfg): num_steps_per_env = 24 max_iterations = 1500 save_interval = 50 experiment_name = "anymal_d_rough" empirical_normalization = False policy = RslRlPpoActorCriticCfg( init_noise_std=1.0, actor_hidden_dims=[512, 256, 128], critic_hidden_dims=[512, 256, 128], activation="elu", ) algorithm = RslRlPpoAlgorithmCfg( value_loss_coef=1.0, use_clipped_value_loss=True, clip_param=0.2, entropy_coef=0.005, num_learning_epochs=5, num_mini_batches=4, learning_rate=1.0e-3, schedule="adaptive", gamma=0.99, lam=0.95, desired_kl=0.01, max_grad_norm=1.0, ) @configclass class AnymalDFlatPPORunnerCfg(AnymalDRoughPPORunnerCfg): def __post_init__(self): super().__post_init__() self.max_iterations = 300 self.experiment_name = "anymal_d_flat" self.policy.actor_hidden_dims = [128, 128, 128] self.policy.critic_hidden_dims = [128, 128, 128]
1,417
Python
26.26923
58
0.645025
isaac-orbit/orbit.ext_template/orbit/ext_template/tasks/locomotion/velocity/config/anymal_d/agents/__init__.py
# Copyright (c) 2022-2024, The ORBIT Project Developers. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from . import rsl_rl_cfg # noqa: F401, F403
168
Python
23.142854
56
0.720238
MomentFactory/Omniverse-NDI-extension/exts/mf.ov.ndi/mf/ov/ndi/NDItools.py
from .eventsystem import EventSystem import carb.profiler import logging from .deps import NDIlib as ndi import numpy as np import omni.ui import threading import time from typing import List import warp as wp class NDItools(): def __init__(self): self._ndi_ok = False self._ndi_find = None self._ndi_init() self._ndi_find_init() self._finder = None self._create_finder() self._streams = [] stream = omni.kit.app.get_app().get_update_event_stream() self._sub = stream.create_subscription_to_pop(self._on_update, name="update") def destroy(self): self._sub.unsubscribe() self._sub = None self._finder.destroy() for stream in self._streams: stream.destroy() self._streams.clear() if self._ndi_ok: if self._ndi_find is not None: ndi.find_destroy(self._ndi_find) ndi.destroy() self._ndi_ok = False def is_ndi_ok(self) -> bool: return self._ndi_ok def _on_update(self, e): to_remove = [] for stream in self._streams: if not stream.is_running(): to_remove.append(stream) for stream in to_remove: self._streams.remove(stream) EventSystem.send_event(EventSystem.STREAM_STOP_TIMEOUT_EVENT, payload={"dynamic_id": stream.get_id()}) stream.destroy() def _ndi_init(self): if not ndi.initialize(): logger = logging.getLogger(__name__) logger.error("Could not initialize NDI®") return self._ndi_ok = True def _ndi_find_init(self): self._ndi_find = ndi.find_create_v2() if self._ndi_find is None: logger = logging.getLogger(__name__) logger.error("Could not initialize NDI® find") return def _create_finder(self): if self._ndi_find: self._finder = NDIfinder(self) def get_ndi_find(self): return self._ndi_find def get_stream(self, dynamic_id): return next((x for x in self._streams if x.get_id() == dynamic_id), None) def try_add_stream(self, dynamic_id: str, ndi_source: str, lowbandwidth: bool, update_fps_fn, update_dimensions_fn) -> bool: stream: NDIVideoStream = NDIVideoStream(dynamic_id, ndi_source, lowbandwidth, self, update_fps_fn, update_dimensions_fn) if not stream.is_ok: logger = logging.getLogger(__name__) logger.error(f"Error opening stream: {ndi_source}") return False self._streams.append(stream) return True def try_add_stream_proxy(self, dynamic_id: str, ndi_source: str, fps: float, lowbandwidth: bool) -> bool: stream: NDIVideoStreamProxy = NDIVideoStreamProxy(dynamic_id, ndi_source, fps, lowbandwidth) if not stream.is_ok: logger = logging.getLogger(__name__) logger.error(f"Error opening stream: {ndi_source}") return False self._streams.append(stream) return True def stop_stream(self, dynamic_id: str): stream = self.get_stream(dynamic_id) if stream is not None: self._streams.remove(stream) stream.destroy() def stop_all_streams(self): for stream in self._streams: stream.destroy() self._streams.clear() class NDIfinder(): SLEEP_INTERVAL: float = 2 # seconds def __init__(self, tools: NDItools): self._tools = tools self._previous_sources: List[str] = [] self._is_running = True self._thread = threading.Thread(target=self._search) self._thread.start() def destroy(self): self._is_running = False self._thread.join() self._thread = None def _search(self): find = self._tools.get_ndi_find() if find: while self._is_running: sources = ndi.find_get_current_sources(find) result = [s.ndi_name for s in sources] delta = set(result) ^ set(self._previous_sources) if len(delta) > 0: self._previous_sources = result EventSystem.send_event(EventSystem.NDIFINDER_NEW_SOURCES, payload={"sources": result}) time.sleep(NDIfinder.SLEEP_INTERVAL) self._is_running = False class NDIVideoStream(): NO_FRAME_TIMEOUT = 5 # seconds def __init__(self, dynamic_id: str, ndi_source: str, lowbandwidth: bool, tools: NDItools, update_fps_fn, update_dimensions_fn): wp.init() self._dynamic_id = dynamic_id self._ndi_source = ndi_source self._lowbandwidth = lowbandwidth self._thread: threading.Thread = None self._ndi_recv = None self._update_fps_fn = update_fps_fn self._fps_current = 0.0 self._fps_avg_total = 0.0 self._fps_avg_count = 0 self._fps_expected = 0.0 self._update_dimensions_fn = update_dimensions_fn self.is_ok = False if not tools.is_ndi_ok(): return ndi_find = tools.get_ndi_find() source = None sources = ndi.find_get_current_sources(ndi_find) source_candidates = [s for s in sources if s.ndi_name == self._ndi_source] if len(source_candidates) != 0: source = source_candidates[0] if source is None: logger = logging.getLogger(__name__) logger.error(f"TIMEOUT: Could not find source at \"{self._ndi_source}\".") return if lowbandwidth: recv_create_desc = self.get_recv_low_bandwidth() else: recv_create_desc = self.get_recv_high_bandwidth() self._ndi_recv = ndi.recv_create_v3(recv_create_desc) if self._ndi_recv is None: logger = logging.getLogger(__name__) logger.error("Could not create NDI® receiver") return ndi.recv_connect(self._ndi_recv, source) self._is_running = True self._thread = threading.Thread(target=self._update_texture, args=(self._dynamic_id, )) self._thread.start() self.is_ok = True def _update_fps(self): self._update_fps_fn(self._fps_current, self._fps_avg_total / self._fps_avg_count if self._fps_avg_count != 0 else 0, self._fps_expected) def destroy(self): self._update_fps() self._is_running = False self._thread.join() self._thread = None ndi.recv_destroy(self._ndi_recv) def get_id(self) -> str: return self._dynamic_id def is_running(self) -> bool: return self._is_running def get_recv_high_bandwidth(self): recv_create_desc = ndi.RecvCreateV3() recv_create_desc.color_format = ndi.RECV_COLOR_FORMAT_RGBX_RGBA recv_create_desc.bandwidth = ndi.RECV_BANDWIDTH_HIGHEST return recv_create_desc def get_recv_low_bandwidth(self): recv_create_desc = ndi.RecvCreateV3() recv_create_desc.color_format = ndi.RECV_COLOR_FORMAT_RGBX_RGBA recv_create_desc.bandwidth = ndi.RECV_BANDWIDTH_LOWEST return recv_create_desc @carb.profiler.profile def _update_texture(self, dynamic_id: str): carb.profiler.begin(0, 'Omniverse NDI®::Init') dynamic_texture = omni.ui.DynamicTextureProvider(dynamic_id) last_read = time.time() - 1 # Make sure we run on the first frame fps = 120.0 no_frame_chances = NDIVideoStream.NO_FRAME_TIMEOUT * fps index = 0 self._fps_avg_total = 0.0 self._fps_avg_count = 0 carb.profiler.end(0) while self._is_running: carb.profiler.begin(1, 'Omniverse NDI®::loop outer') now = time.time() time_delta = now - last_read if (time_delta < 1.0 / fps): carb.profiler.end(1) continue carb.profiler.begin(2, 'Omniverse NDI®::loop inner') self._fps_current = 1.0 / time_delta last_read = now carb.profiler.begin(3, 'Omniverse NDI®::receive frame') t, v, _, _ = ndi.recv_capture_v2(self._ndi_recv, 0) carb.profiler.end(3) if t == ndi.FRAME_TYPE_VIDEO: carb.profiler.begin(3, 'Omniverse NDI®::prepare frame') fps = v.frame_rate_N / v.frame_rate_D self._fps_expected = fps if (index == 0): self._fps_current = fps color_format = v.FourCC frame = v.data height, width, channels = frame.shape isGPU = height == width carb.profiler.end(3) if isGPU: carb.profiler.begin(3, 'Omniverse NDI®::begin gpu') with wp.ScopedDevice("cuda"): # CUDA doesnt handle non square texture well, so we need to resize if the te # We are keeping this code in case we find a workaround # # carb.profiler.begin(4, 'Omniverse NDI®::begin cpu resize') # frame = np.resize(frame, (width, width, channels)) # carb.profiler.end(4) # 38 ms carb.profiler.begin(4, 'Omniverse NDI®::gpu uploading') pixels_data = wp.from_numpy(frame, dtype=wp.uint8, device="cuda") carb.profiler.end(4) # 1 ms carb.profiler.begin(4, 'Omniverse NDI®::create gpu texture') self._update_dimensions_fn(width, height, str(color_format)) dynamic_texture.set_bytes_data_from_gpu(pixels_data.ptr, [width, width]) carb.profiler.end(4) carb.profiler.end(3) else: carb.profiler.begin(3, 'Omniverse NDI®::begin cpu') self._update_dimensions_fn(width, height, str(color_format)) dynamic_texture.set_data_array(frame, [width, height, channels]) carb.profiler.end(3) ndi.recv_free_video_v2(self._ndi_recv, v) carb.profiler.end(3) self._fps_avg_total += self._fps_current self._fps_avg_count += 1 self._update_fps() index += 1 if t == ndi.FRAME_TYPE_NONE: no_frame_chances -= 1 if (no_frame_chances <= 0): self._is_running = False else: no_frame_chances = NDIVideoStream.NO_FRAME_TIMEOUT * fps carb.profiler.end(2) carb.profiler.end(1) class NDIVideoStreamProxy(): def __init__(self, dynamic_id: str, ndi_source: str, fps: float, lowbandwidth: bool): self._dynamic_id = dynamic_id self._ndi_source = ndi_source self._fps = fps self._lowbandwidth = lowbandwidth self._thread: threading.Thread = None self.is_ok = False denominator = 1 if lowbandwidth: denominator = 3 w = int(1920 / denominator) # TODO: dimensions from name like for fps h = int(1080 / denominator) self._is_running = True self._thread = threading.Thread(target=self._update_texture, args=(self._dynamic_id, self._fps, w, h, )) self._thread.start() self.is_ok = True def destroy(self): self._is_running = False self._thread.join() self._thread = None def get_id(self) -> str: return self._dynamic_id def is_running(self) -> bool: return self._is_running @carb.profiler.profile def _update_texture(self, dynamic_id: str, fps: float, width: float, height: float): carb.profiler.begin(0, 'Omniverse NDI®::Init') color = np.array([255, 0, 0, 255], np.uint8) channels = len(color) dynamic_texture = omni.ui.DynamicTextureProvider(dynamic_id) frame = np.full((height, width, channels), color, dtype=np.uint8) last_read = time.time() - 1 carb.profiler.end(0) while self._is_running: carb.profiler.begin(1, 'Omniverse NDI®::Proxy loop outer') now = time.time() time_delta = now - last_read if (time_delta < 1.0 / fps): carb.profiler.end(1) continue carb.profiler.begin(2, 'Omniverse NDI®::Proxy loop inner') last_read = now carb.profiler.begin(3, 'Omniverse NDI®::set_data') dynamic_texture.set_data_array(frame, [width, height, channels]) carb.profiler.end(3) carb.profiler.end(2) carb.profiler.end(1)
13,024
Python
33.276316
144
0.552288
MomentFactory/Omniverse-NDI-extension/exts/mf.ov.ndi/mf/ov/ndi/extension.py
from .window import Window import asyncio import omni.ext import omni.kit.app import omni.kit.ui class MFOVNdiExtension(omni.ext.IExt): MENU_PATH = f"Window/{Window.WINDOW_NAME}" def on_startup(self, _): self._menu = None self._window: Window = None editor_menu = omni.kit.ui.get_editor_menu() if editor_menu: self._menu = editor_menu.add_item( MFOVNdiExtension.MENU_PATH, self._show_window, toggle=True, value=True ) self._show_window(None, True) def on_shutdown(self): if self._menu: self._menu = None if self._window: self._destroy_window() def _destroy_window(self): self._window.destroy() self._window = None def _set_menu(self, visible): editor_menu = omni.kit.ui.get_editor_menu() if editor_menu: editor_menu.set_value(MFOVNdiExtension.MENU_PATH, visible) async def _destroy_window_async(self): await omni.kit.app.get_app().next_update_async() if self._window: self._destroy_window() def _visibility_changed_fn(self, visible): self._set_menu(visible) if not visible: asyncio.ensure_future(self._destroy_window_async()) def _show_window(self, _, value): if value: self._window = Window(width=800, height=275) self._window.set_visibility_changed_fn(self._visibility_changed_fn) elif self._window: self._destroy_window()
1,541
Python
27.036363
86
0.595717
MomentFactory/Omniverse-NDI-extension/exts/mf.ov.ndi/mf/ov/ndi/model.py
from .bindings import Binding, BindingsModel from .comboboxModel import ComboboxModel from .NDItools import NDItools from .USDtools import DynamicPrim, USDtools import logging import re from typing import List class Model(): def __init__(self): self._bindings_model: BindingsModel = BindingsModel() self._ndi: NDItools = NDItools() def destroy(self): self._ndi.destroy() self._bindings_model.destroy() # region bindings def get_bindings_count(self) -> int: return self._bindings_model.count() def get_binding_data_from_index(self, index: int): return self._bindings_model.get(index) def get_ndi_source_list(self) -> List[str]: return self._bindings_model.get_source_list() def apply_new_binding_source(self, dynamic_id: str, new_source: str): self._bindings_model.bind(dynamic_id, new_source) def apply_lowbandwidth_value(self, dynamic_id: str, value: bool): self._bindings_model.set_low_bandwidth(dynamic_id, value) # endregion # region dynamic def create_dynamic_material(self, name: str): safename = USDtools.make_name_valid(name) if name != safename: logger = logging.getLogger(__name__) logger.warn(f"Name \"{name}\" was not a valid USD identifier, changed it to \"{safename}\"") final_name = safename index = 1 while (self._bindings_model.find_binding_from_id(final_name) is not None): suffix = str(index) if index >= 10 else "0" + str(index) # name, name_01, name_02, ..., name_99, name_100 final_name = safename + "_" + suffix index += 1 USDtools.create_dynamic_material(final_name) self.search_for_dynamic_material() def search_for_dynamic_material(self): result: List[DynamicPrim] = USDtools.find_all_dynamic_sources() self._bindings_model.update_dynamic_prims(result) def _get_prims_with_id(self, dynamic_id: str) -> List[DynamicPrim]: prims: List[DynamicPrim] = self._bindings_model.get_prim_list() return [x for x in prims if x.dynamic_id == dynamic_id] def set_ndi_source_prim_attr(self, dynamic_id: str, source: str): for prim in self._get_prims_with_id(dynamic_id): USDtools.set_prim_ndi_attribute(prim.path, source) def set_lowbandwidth_prim_attr(self, dynamic_id: str, value: bool): for prim in self._get_prims_with_id(dynamic_id): USDtools.set_prim_lowbandwidth_attribute(prim.path, value) # endregion # region stream def try_add_stream(self, binding: Binding, lowbandwidth: bool, update_fps_fn, update_dimensions_fn) -> bool: if self._ndi.get_stream(binding.dynamic_id) is not None: logger = logging.getLogger(__name__) logger.warning(f"There's already a stream running for {binding.dynamic_id}") return False if binding.ndi_source == ComboboxModel.NONE_VALUE: logger = logging.getLogger(__name__) logger.warning("Won't create stream without NDI® source") return False if binding.ndi_source == ComboboxModel.PROXY_VALUE: fps = float(re.search("\((.*)\)", binding.ndi_source).group(1).split("p")[1]) success: bool = self._ndi.try_add_stream_proxy(binding.dynamic_id, binding.ndi_source, fps, lowbandwidth) return success else: success: bool = self._ndi.try_add_stream(binding.dynamic_id, binding.ndi_source, lowbandwidth, update_fps_fn, update_dimensions_fn) return success def stop_stream(self, binding: Binding): self._ndi.stop_stream(binding.dynamic_id) def stop_all_streams(self): self._ndi.stop_all_streams() # endregion
3,820
Python
37.989796
118
0.638482
MomentFactory/Omniverse-NDI-extension/exts/mf.ov.ndi/mf/ov/ndi/eventsystem.py
import carb.events import omni.kit.app class EventSystem(): BINDINGS_CHANGED_EVENT = carb.events.type_from_string("mf.ov.ndi.BINDINGS_CHANGED_EVENT") COMBOBOX_CHANGED_EVENT = carb.events.type_from_string("mf.ov.ndi.COMBOBOX_CHANGED_EVENT") NDIFINDER_NEW_SOURCES = carb.events.type_from_string("mf.ov.ndi.NDIFINDER_NEW_SOURCES") COMBOBOX_SOURCE_CHANGE_EVENT = carb.events.type_from_string("mf.ov.ndi.COMBOBOX_SOURCE_CHANGE_EVENT") NDI_STATUS_CHANGE_EVENT = carb.events.type_from_string("mf.ov.ndi.NDI_STATUS_CHANGE_EVENT") STREAM_STOP_TIMEOUT_EVENT = carb.events.type_from_string("mf.ov.ndi.STREAM_STOP_TIMEOUT_EVENT") def subscribe(event: int, cb: callable) -> carb.events.ISubscription: bus = omni.kit.app.get_app().get_message_bus_event_stream() return bus.create_subscription_to_push_by_type(event, cb) def send_event(event: int, payload: dict = {}): bus = omni.kit.app.get_app().get_message_bus_event_stream() bus.push(event, payload=payload)
1,015
Python
49.799998
105
0.717241
MomentFactory/Omniverse-NDI-extension/exts/mf.ov.ndi/mf/ov/ndi/USDtools.py
from .bindings import DynamicPrim import logging import numpy as np import omni.ext from pxr import Usd, UsdGeom, UsdShade, Sdf, UsdLux, Tf from typing import List from unidecode import unidecode class USDtools(): ATTR_NDI_NAME = 'ndi:source' ATTR_BANDWIDTH_NAME = "ndi:lowbandwidth" PREFIX = "dynamic://" SCOPE_NAME = "NDI_Looks" def get_stage() -> Usd.Stage: usd_context = omni.usd.get_context() return usd_context.get_stage() def make_name_valid(name: str) -> str: return Tf.MakeValidIdentifier(unidecode(name)) def create_dynamic_material(safename: str): stage = USDtools.get_stage() if not stage: logger = logging.getLogger(__name__) logger.error("Could not get stage") return scope_path: str = f"{stage.GetDefaultPrim().GetPath()}/{USDtools.SCOPE_NAME}" UsdGeom.Scope.Define(stage, scope_path) USDtools._create_material_and_shader(stage, scope_path, safename) USDtools._fill_dynamic_with_magenta(safename) def _create_material_and_shader(stage: Usd.Stage, scope_path: str, safename: str): material_path = f"{scope_path}/{safename}" material: UsdShade.Material = UsdShade.Material.Define(stage, material_path) shader: UsdShade.Shader = UsdShade.Shader.Define(stage, f"{material_path}/Shader") shader.SetSourceAsset("OmniPBR.mdl", "mdl") shader.SetSourceAssetSubIdentifier("OmniPBR", "mdl") shader.CreateIdAttr("OmniPBR") shader.CreateInput("diffuse_texture", Sdf.ValueTypeNames.Asset).Set(f"{USDtools.PREFIX}{safename}") material.CreateSurfaceOutput().ConnectToSource(shader.ConnectableAPI(), "surface") def _fill_dynamic_with_magenta(safename: str): magenta = np.array([255, 0, 255, 255], np.uint8) frame = np.full((1, 1, 4), magenta, dtype=np.uint8) height, width, channels = frame.shape dynamic_texture = omni.ui.DynamicTextureProvider(safename) dynamic_texture.set_data_array(frame, [width, height, channels]) def find_all_dynamic_sources() -> List[DynamicPrim]: stage = USDtools.get_stage() if not stage: logger = logging.getLogger(__name__) logger.warning("Could not get stage") return [] dynamic_sources: List[str] = [] dynamic_shaders, dynamic_sources = USDtools._find_all_dynamic_shaders(stage, dynamic_sources) dynamic_lights, _ = USDtools._find_all_dynamic_lights(stage, dynamic_sources) return dynamic_shaders + dynamic_lights def _find_all_dynamic_shaders(stage: Usd.Stage, sources: List[str]): shaders: List[UsdShade.Shader] = [UsdShade.Shader(x) for x in stage.Traverse() if x.IsA(UsdShade.Shader)] result: List[DynamicPrim] = [] prefix_length: int = len(USDtools.PREFIX) for shader in shaders: albedo = shader.GetInput("diffuse_texture").Get() # roughness = shader.GetInput("reflectionroughness_texture").Get() # metallic = shader.GetInput("metallic_texture").Get() # orm = shader.GetInput("ORM_texture").Get() # ambient_occlusion = shader.GetInput("ao_texture").Get() emissive = shader.GetInput("emissive_color_texture").Get() # emissive_mask = shader.GetInput("emissive_mask_texture").Get() # opacity = shader.GetInput("opacity_texture").Get() # normal = shader.GetInput("normalmap_texture").Get() # normal_detail = shader.GetInput("detail_normalmap_texture").Get() values_set = set([albedo, emissive]) values_unique = list(values_set) for texture_value in values_unique: if texture_value: path: str = texture_value.path if len(path) > prefix_length: candidate = path[:prefix_length] if candidate == USDtools.PREFIX: name = path[prefix_length:] if name not in sources: sources.append(name) attr_ndi = shader.GetPrim().GetAttribute(USDtools.ATTR_NDI_NAME) attr_ndi = attr_ndi.Get() if attr_ndi.IsValid() else None attr_low = shader.GetPrim().GetAttribute(USDtools.ATTR_BANDWIDTH_NAME) attr_low = attr_low.Get() if attr_low.IsValid() else False p = DynamicPrim(shader.GetPath().pathString, name, attr_ndi, attr_low) result.append(p) return result, sources def _find_all_dynamic_lights(stage: Usd.Stage, sources: List[str]): rect_lights: List[UsdLux.Rectlight] = [UsdLux.RectLight(x) for x in stage.Traverse() if x.IsA(UsdLux.RectLight)] result: List[DynamicPrim] = [] prefix_length: int = len(USDtools.PREFIX) for rect_light in rect_lights: # TODO: Filter those that have "isProjector" (the attribute doesn't exist) attribute = rect_light.GetPrim().GetAttribute("texture:file").Get() if attribute: path: str = attribute.path if len(path) > prefix_length: candidate = path[:prefix_length] if candidate == USDtools.PREFIX: name = path[prefix_length:] if name not in sources: attr_ndi = rect_light.GetPrim().GetAttribute(USDtools.ATTR_NDI_NAME) attr_ndi = attr_ndi.Get() if attr_ndi.IsValid() else None attr_low = rect_light.GetPrim().GetAttribute(USDtools.ATTR_BANDWIDTH_NAME) attr_low = attr_low.Get() if attr_low.IsValid() else False p = DynamicPrim(rect_light.GetPath().pathString, name, attr_ndi, attr_low) result.append(p) return result, sources def set_prim_ndi_attribute(path: str, value: str): stage = USDtools.get_stage() if not stage: logger = logging.getLogger(__name__) logger.error("Could not get stage") return prim: Usd.Prim = stage.GetPrimAtPath(path) if not prim.IsValid(): logger = logging.getLogger(__name__) logger.error(f"Could not set the ndi attribute of prim at {path}") return prim.CreateAttribute(USDtools.ATTR_NDI_NAME, Sdf.ValueTypeNames.String).Set(value) def set_prim_lowbandwidth_attribute(path: str, value: bool): stage = USDtools.get_stage() if not stage: logger = logging.getLogger(__name__) logger.error("Could not get stage") return prim: Usd.Prim = stage.GetPrimAtPath(path) if not prim.IsValid(): logger = logging.getLogger(__name__) logger.error(f"Could not set the bandwidth attribute of prim at {path}") prim.CreateAttribute(USDtools.ATTR_BANDWIDTH_NAME, Sdf.ValueTypeNames.Bool).Set(value) # region stage events def subscribe_to_stage_events(callback): return ( omni.usd.get_context() .get_stage_event_stream() .create_subscription_to_pop(callback, name="mf.ov.ndi.STAGE_EVENT") ) def is_StageEventType_OPENED(type) -> bool: return type == int(omni.usd.StageEventType.OPENED) def is_StageEventType_CLOSE(type) -> bool: return type == int(omni.usd.StageEventType.CLOSING) or type == int(omni.usd.StageEventType.CLOSED) # endregion
7,708
Python
44.081871
120
0.597172
MomentFactory/Omniverse-NDI-extension/exts/mf.ov.ndi/mf/ov/ndi/comboboxModel.py
from .eventsystem import EventSystem import omni.ui as ui from typing import List class ComboboxItem(ui.AbstractItem): def __init__(self, value: str): super().__init__() self.model = ui.SimpleStringModel(value) def value(self): return self.model.get_value_as_string() class ComboboxModel(ui.AbstractItemModel): NONE_VALUE = "NONE" PROXY_VALUE = "PROXY (1080p30) - RED" def __init__(self, items: List[str], selected: str, name: str, index: int): super().__init__() self._name = name self._index = index # minimal model implementation self._current_index = ui.SimpleIntModel() self._current_index.add_value_changed_fn(lambda a: self._current_index_changed_fn()) self.set_items_and_current(items, selected) def _current_index_changed_fn(self): self._item_changed(None) EventSystem.send_event(EventSystem.COMBOBOX_CHANGED_EVENT, payload={"id": self._name, "index": self._index, "value": self._current_value()}) def set_items_and_current(self, items: List[str], current: str): self._items = [ComboboxItem(text) for text in items] self._set_current_from_value(current) def _set_current_from_value(self, current: str): index = next((i for i, item in enumerate(self._items) if item.value() == current), 0) self._current_index.set_value(index) self._item_changed(None) def _current_value(self) -> str: current_item = self._items[self._current_index.get_value_as_int()] return current_item.value() # minimal model implementation def get_item_children(self, item): return self._items # minimal model implementation def get_item_value_model(self, item, _): if item is None: return self._current_index return item.model
1,893
Python
31.655172
112
0.628632
MomentFactory/Omniverse-NDI-extension/exts/mf.ov.ndi/mf/ov/ndi/window.py
from .bindings import Binding from .comboboxModel import ComboboxModel from .eventsystem import EventSystem from .model import Model from .USDtools import USDtools import asyncio import carb.events import omni.ui as ui import omni.kit.app import pyperclip from typing import List class Window(ui.Window): WINDOW_NAME = "NDI®" DEFAULT_TEXTURE_NAME = "myDynamicMaterial" NEW_TEXTURE_BTN_TXT = "Create Dynamic Texture" DISCOVER_TEX_BTN_TXT = "Discover Dynamic Textures" STOP_STREAMS_BTN_TXT = "Stop all streams" EMPTY_TEXTURE_LIST_TXT = "No dynamic texture found" def __init__(self, delegate=None, **kwargs): self._model: Model = Model() self._bindingPanels: List[BindingPanel] = [] self._last_material_name = Window.DEFAULT_TEXTURE_NAME super().__init__(Window.WINDOW_NAME, **kwargs) self.frame.set_build_fn(self._build_fn) self._subscribe() self._model.search_for_dynamic_material() def destroy(self): for panel in self._bindingPanels: panel.destroy() self._model.destroy() self._unsubscribe() super().destroy() def _subscribe(self): self._sub: List[carb.events.ISubscription] = [] self._sub.append(EventSystem.subscribe(EventSystem.BINDINGS_CHANGED_EVENT, self._bindings_updated_evt_callback)) self._sub.append(EventSystem.subscribe(EventSystem.COMBOBOX_CHANGED_EVENT, self._combobox_changed_evt_callback)) self._sub.append(EventSystem.subscribe(EventSystem.COMBOBOX_SOURCE_CHANGE_EVENT, self._ndi_sources_changed_evt_callback)) self._sub.append(EventSystem.subscribe(EventSystem.NDI_STATUS_CHANGE_EVENT, self._ndi_status_change_evt_callback)) self._sub.append(EventSystem.subscribe(EventSystem.STREAM_STOP_TIMEOUT_EVENT, self._stream_stop_timeout_evt_callback)) self._sub.append(USDtools.subscribe_to_stage_events(self._stage_event_evt_callback)) def _unsubscribe(self): for sub in self._sub: sub.unsubscribe() sub = None self._sub.clear() def _build_fn(self): with ui.VStack(style={"margin": 3}): self._ui_section_header() self._ui_section_bindings() # region events callback def _bindings_updated_evt_callback(self, e: carb.events.IEvent): self.frame.rebuild() def _combobox_changed_evt_callback(self, e: carb.events.IEvent): value: str = e.payload["value"] dynamic_id = e.payload["id"] panel_index = e.payload["index"] self._model.apply_new_binding_source(dynamic_id, value) self._model.set_ndi_source_prim_attr(dynamic_id, value) if (len(self._bindingPanels) > panel_index): self._bindingPanels[panel_index].combobox_item_changed() def _ndi_sources_changed_evt_callback(self, e: carb.events.IEvent): for panel in self._bindingPanels: panel.combobox_items_changed(e.payload["sources"]) def _ndi_status_change_evt_callback(self, e: carb.events.IEvent): for panel in self._bindingPanels: panel.check_for_ndi_status() def _stream_stop_timeout_evt_callback(self, e: carb.events.IEvent): panel: BindingPanel = next(x for x in self._bindingPanels if x.get_dynamic_id() == e.payload["dynamic_id"]) panel.on_stop_stream() def _stage_event_evt_callback(self, e: carb.events.IEvent): if USDtools.is_StageEventType_OPENED(e.type): self._model.search_for_dynamic_material() self._model.stop_all_streams() if USDtools.is_StageEventType_CLOSE(e.type): self._model.stop_all_streams() # endregion # region UI def _ui_section_header(self): button_style = {"Button": {"stack_direction": ui.Direction.LEFT_TO_RIGHT}} with ui.HStack(height=0): self._dynamic_name = ui.StringField() self._dynamic_name.model.set_value(self._last_material_name) ui.Button(Window.NEW_TEXTURE_BTN_TXT, image_url="resources/glyphs/menu_plus.svg", image_width=24, style=button_style, clicked_fn=self._on_click_create_dynamic_material) with ui.HStack(height=0): ui.Button(Window.DISCOVER_TEX_BTN_TXT, image_url="resources/glyphs/menu_refresh.svg", image_width=24, style=button_style, clicked_fn=self._on_click_refresh_materials) ui.Button(Window.STOP_STREAMS_BTN_TXT, clicked_fn=self._on_click_stop_all_streams) def _ui_section_bindings(self): self._bindingPanels = [] with ui.ScrollingFrame(): with ui.VStack(): count: int = self._model.get_bindings_count() if count == 0: ui.Label(Window.EMPTY_TEXTURE_LIST_TXT) else: for i in range(count): self._bindingPanels.append(BindingPanel(i, self, height=0)) # endregion # region controls def _on_click_create_dynamic_material(self): self._stop_all_streams() name: str = self._dynamic_name.model.get_value_as_string() self._last_material_name = name self._model.create_dynamic_material(name) def _on_click_refresh_materials(self): self._stop_all_streams() self._model.search_for_dynamic_material() def _on_click_stop_all_streams(self): self._stop_all_streams() def _stop_all_streams(self): self._model.stop_all_streams() for panel in self._bindingPanels: panel.on_stop_stream() # endregion # region BindingPanel Callable def get_binding_data_from_index(self, index: int): return self._model.get_binding_data_from_index(index) def get_choices_for_combobox(self) -> List[str]: return self._model.get_ndi_source_list() def apply_lowbandwidth_value(self, dynamic_id: str, value: bool): self._model.apply_lowbandwidth_value(dynamic_id, value) self._model.set_lowbandwidth_prim_attr(dynamic_id, value) def try_add_stream(self, binding: Binding, lowbandwidth: bool, update_fps_fn, update_dimensions_fn) -> bool: return self._model.try_add_stream(binding, lowbandwidth, update_fps_fn, update_dimensions_fn) def stop_stream(self, binding: Binding): return self._model.stop_stream(binding) # endregion class BindingPanel(ui.CollapsableFrame): NDI_COLOR_STOPPED = "#E6E7E8" NDI_COLOR_PLAYING = "#78B159" NDI_COLOR_WARNING = "#F4900C" NDI_COLOR_INACTIVE = "#DD2E45" NDI_STATUS = "resources/glyphs/circle.svg" PLAY_ICON = "resources/glyphs/timeline_play.svg" PAUSE_ICON = "resources/glyphs/toolbar_pause.svg" COPY_ICON = "resources/glyphs/copy.svg" LOW_BANDWIDTH_ICON = "resources/glyphs/AOV_dark.svg" PLAYPAUSE_BTN_NAME = "play_pause_btn" BANDWIDTH_BTN_NAME = "low_bandwidth_btn" COPYPATH_BTN_NAME = "copy_path_btn" RUNNING_LABEL_SUFFIX = " - running" def __init__(self, index: int, window: Window, **kwargs): self._index = index self._window = window binding, _, ndi = self._get_data() choices = self._get_choices() self._dynamic_id = binding.dynamic_id self._lowbandwidth_value = binding.lowbandwidth self._is_playing = False super().__init__(binding.dynamic_id, **kwargs) self._info_window = None with self: with ui.HStack(): self._status_icon = ui.Image(BindingPanel.NDI_STATUS, width=20, mouse_released_fn=self._show_info_window) self._set_ndi_status_icon(ndi.active) self._combobox_alt = ui.Label("") self._set_combobox_alt_text(binding.ndi_source) self._combobox_alt.visible = False self._combobox = ComboboxModel(choices, binding.ndi_source, binding.dynamic_id, self._index) self._combobox_ui = ui.ComboBox(self._combobox) self.play_pause_toolbutton = ui.Button(text="", image_url=BindingPanel.PLAY_ICON, height=30, width=30, clicked_fn=self._on_click_play_pause_ndi, name=BindingPanel.PLAYPAUSE_BTN_NAME) self._lowbandwidth_toolbutton = ui.ToolButton(image_url=BindingPanel.LOW_BANDWIDTH_ICON, width=30, height=30, tooltip="Low bandwidth mode", clicked_fn=self._set_low_bandwidth_value, name=BindingPanel.BANDWIDTH_BTN_NAME) self._lowbandwidth_toolbutton.model.set_value(self._lowbandwidth_value) ui.Button("", image_url=BindingPanel.COPY_ICON, width=30, height=30, clicked_fn=self._on_click_copy, tooltip="Copy dynamic texture path(dynamic://*)", name=BindingPanel.COPYPATH_BTN_NAME) def destroy(self): self._info_window_destroy() # region Info Window def _show_info_window(self, _x, _y, button, _modifier): if (button == 0): # left click binding, _, _ = self._get_data() if not self._info_window: self._info_window = StreamInfoWindow(f"{self._dynamic_id} info", binding.ndi_source, width=280, height=200) self._info_window.set_visibility_changed_fn(self._info_window_visibility_changed) elif self._info_window: self._info_window_destroy() def _info_window_visibility_changed(self, visible): if not visible: asyncio.ensure_future(self._info_window_destroy_async()) def _info_window_destroy(self): if self._info_window: self._info_window.destroy() self._info_window = None async def _info_window_destroy_async(self): await omni.kit.app.get_app().next_update_async() if self._info_window: self._info_window_destroy() def update_fps(self, fps_current: float, fps_average: float, fps_expected: float): if self._info_window: self._info_window.set_fps_values(fps_current, fps_average, fps_expected) def update_details(self, width: int, height: int, color_format: str): if self._info_window: self._info_window.set_stream_details(width, height, color_format) # endregion def combobox_items_changed(self, items: List[str]): binding, _, _ = self._get_data() self._combobox.set_items_and_current(items, binding.ndi_source) def check_for_ndi_status(self): _, _, ndi = self._get_data() self._set_ndi_status_icon(ndi.active) def combobox_item_changed(self): binding, _, ndi = self._get_data() self._set_combobox_alt_text(binding.ndi_source) self._set_ndi_status_icon(ndi.active) if self._info_window: self._info_window.set_stream_name(binding.ndi_source) def get_dynamic_id(self) -> str: return self._dynamic_id def _get_data(self): return self._window.get_binding_data_from_index(self._index) def _get_choices(self): return self._window.get_choices_for_combobox() def _on_click_copy(self): pyperclip.copy(f"{USDtools.PREFIX}{self._dynamic_id}") def _set_low_bandwidth_value(self): self._lowbandwidth_value = not self._lowbandwidth_value self._window.apply_lowbandwidth_value(self._dynamic_id, self._lowbandwidth_value) def _on_play_stream(self): self._is_playing = True self.play_pause_toolbutton.image_url = BindingPanel.PAUSE_ICON self._lowbandwidth_toolbutton.enabled = False self._combobox_ui.visible = False self._combobox_alt.visible = True self.check_for_ndi_status() def on_stop_stream(self): self._is_playing = False self.play_pause_toolbutton.image_url = BindingPanel.PLAY_ICON self._lowbandwidth_toolbutton.enabled = True self._combobox_ui.visible = True self._combobox_alt.visible = False self.check_for_ndi_status() def _on_click_play_pause_ndi(self): binding, _, _ = self._get_data() if self._is_playing: self._window.stop_stream(binding) self.on_stop_stream() else: if self._window.try_add_stream(binding, self._lowbandwidth_value, self.update_fps, self.update_details): self._on_play_stream() def _set_combobox_alt_text(self, text: str): self._combobox_alt.text = f"{text}{BindingPanel.RUNNING_LABEL_SUFFIX}" def _set_ndi_status_icon(self, active: bool): if active and self._is_playing: self._status_icon.style = {"color": ui.color(BindingPanel.NDI_COLOR_PLAYING)} elif active and not self._is_playing: self._status_icon.style = {"color": ui.color(BindingPanel.NDI_COLOR_STOPPED)} elif not active and self._is_playing: self._status_icon.style = {"color": ui.color(BindingPanel.NDI_COLOR_WARNING)} else: # not active and not self._is_playing self._status_icon.style = {"color": ui.color(BindingPanel.NDI_COLOR_INACTIVE)} class StreamInfoWindow(ui.Window): def __init__(self, dynamic_id: str, ndi_id: str, delegate=None, **kwargs): super().__init__(dynamic_id, **kwargs) self.frame.set_build_fn(self._build_fn) self._stream_name = ndi_id def destroy(self): super().destroy() def _build_fn(self): with ui.VStack(height=0): with ui.HStack(): ui.Label("Stream name:") self._stream_name_model = ui.StringField(enabled=False).model self._stream_name_model.set_value(self._stream_name) with ui.HStack(): ui.Label("Current fps:") self._fps_current_model = ui.FloatField(enabled=False).model self._fps_current_model.set_value(0.0) with ui.HStack(): ui.Label("Average fps:") self._fps_average_model = ui.FloatField(enabled=False).model self._fps_average_model.set_value(0.0) with ui.HStack(): ui.Label("Expected fps:") self._fps_expected_model = ui.FloatField(enabled=False).model self._fps_expected_model.set_value(0.0) with ui.HStack(): ui.Label("Width:") self._dimensions_width_model = ui.IntField(enabled=False).model self._dimensions_width_model.set_value(0) with ui.HStack(): ui.Label("Height:") self._dimensions_height_model = ui.IntField(enabled=False).model self._dimensions_height_model.set_value(0) with ui.HStack(): ui.Label("Color format:") self._color_format_model = ui.StringField(enabled=False).model self._color_format_model.set_value("") def set_fps_values(self, fps_current: float, fps_average: float, fps_expected: float): # If this property exists, all the other do as well since its the last one to be initialized if hasattr(self, "_fps_expected_model"): self._fps_current_model.set_value(fps_current) self._fps_average_model.set_value(fps_average) self._fps_expected_model.set_value(fps_expected) def set_stream_name(self, name: str): # No need to check if attribute exists because no possibility of concurrency between build fn and caller self._stream_name_model.set_value(name) # Reset other values self._fps_current_model.set_value(0.0) self._fps_average_model.set_value(0.0) self._fps_expected_model.set_value(0.0) self._dimensions_width_model.set_value(0) self._dimensions_height_model.set_value(0) self._color_format_model.set_value("") def set_stream_details(self, width: int, height: int, color_format: str): if hasattr(self, "_color_format_model"): self._dimensions_width_model.set_value(width) self._dimensions_height_model.set_value(height) # Original format is similar to FourCCVideoType.FOURCC_VIDEO_TYPE_RGBA, we want to display only "RGBA" color_format_simple = color_format.split("_")[-1] self._color_format_model.set_value(color_format_simple)
16,724
Python
41.128463
120
0.61032
MomentFactory/Omniverse-NDI-extension/exts/mf.ov.ndi/mf/ov/ndi/bindings.py
from .comboboxModel import ComboboxModel from .eventsystem import EventSystem import carb.events from dataclasses import dataclass from typing import List @dataclass class DynamicPrim: path: str dynamic_id: str ndi_source_attr: str lowbandwidth_attr: bool @dataclass class Binding(): dynamic_id: str ndi_source: str lowbandwidth: bool @dataclass class NDIData(): source: str active: bool class BindingsModel(): NONE_DATA = NDIData(ComboboxModel.NONE_VALUE, False) def __init__(self): self._bindings: List[Binding] = [] self._dynamic_prims: List[DynamicPrim] = [] self._ndi_sources: List[NDIData] = [] self._ndi_sources.append(BindingsModel.NONE_DATA) self._sub = EventSystem.subscribe(EventSystem.NDIFINDER_NEW_SOURCES, self._ndi_sources_change_evt_callback) def destroy(self): self._sub.unsubscribe() self._sub = None self._dynamic_prims = [] self._bindings = [] self._ndi_sources = [] def count(self): return len(self._bindings) def get(self, index: int) -> Binding: binding: Binding = self._bindings[index] prim: DynamicPrim = self.find_binding_from_id(binding.dynamic_id) ndi: NDIData = self._find_ndi_from_source(binding.ndi_source) return binding, prim, ndi def get_source_list(self) -> List[str]: return [x.source for x in self._ndi_sources] def _get_non_static_source_list(self) -> List[NDIData]: return self._ndi_sources[1:] # Excludes NONE_DATA def get_prim_list(self) -> List[str]: return [x for x in self._dynamic_prims] def bind(self, dynamic_id, new_source): binding: Binding = self.find_binding_from_id(dynamic_id) binding.ndi_source = new_source def set_low_bandwidth(self, dynamic_id: str, value: bool): binding: Binding = self.find_binding_from_id(dynamic_id) binding.lowbandwidth = value def find_binding_from_id(self, dynamic_id: str) -> Binding: return next((x for x in self._bindings if x.dynamic_id == dynamic_id), None) def _find_binding_from_ndi(self, ndi_source: str) -> Binding: return next((x for x in self._bindings if x.source == ndi_source), None) def _find_ndi_from_source(self, ndi_source: str) -> NDIData: if ndi_source is None: return self._ndi_sources[0] return next((x for x in self._ndi_sources if x.source == ndi_source), None) def update_dynamic_prims(self, prims: List[DynamicPrim]): self._dynamic_prims = prims self._update_ndi_from_prims() self._update_bindings_from_prims() EventSystem.send_event(EventSystem.BINDINGS_CHANGED_EVENT) def _update_ndi_from_prims(self): for dynamic_prim in self._dynamic_prims: ndi: NDIData = self._find_ndi_from_source(dynamic_prim.ndi_source_attr) if ndi is None: self._ndi_sources.append(NDIData(dynamic_prim.ndi_source_attr, False)) def _update_bindings_from_prims(self): self._bindings.clear() for dynamic_prim in self._dynamic_prims: source_attr = dynamic_prim.ndi_source_attr source: str = source_attr if source_attr is not None else BindingsModel.NONE_DATA.source self._bindings.append(Binding(dynamic_prim.dynamic_id, source, dynamic_prim.lowbandwidth_attr)) def _ndi_sources_change_evt_callback(self, e: carb.events.IEvent): sources = e.payload["sources"] self._update_ndi_new_and_active_sources(sources) self._update_ndi_inactive_sources(sources) EventSystem.send_event(EventSystem.COMBOBOX_SOURCE_CHANGE_EVENT, payload={"sources": [x.source for x in self._ndi_sources]}) EventSystem.send_event(EventSystem.NDI_STATUS_CHANGE_EVENT) def _update_ndi_new_and_active_sources(self, sources: List[str]): for source in sources: data: NDIData = self._find_ndi_from_source(source) if data is None: data = NDIData(source, True) self._ndi_sources.append(data) else: data.active = True def _update_ndi_inactive_sources(self, sources: List[str]): for ndi in self._get_non_static_source_list(): is_active = next((x for x in sources if x == ndi.source), None) if is_active is None: ndi.active = False
4,477
Python
33.984375
115
0.635694
MomentFactory/Omniverse-NDI-extension/exts/mf.ov.ndi/mf/ov/ndi/deps/NDIlib/__init__.py
import os import sys if os.name == 'nt' and sys.version_info.major >= 3 and sys.version_info.minor >= 8: os.add_dll_directory(os.path.dirname(__file__)) from .NDIlib import *
181
Python
21.749997
83
0.679558
MomentFactory/Omniverse-NDI-extension/exts/mf.ov.ndi/mf/ov/ndi/tests/__init__.py
from .test_USDtools import * from .test_ui import *
52
Python
16.666661
28
0.730769
MomentFactory/Omniverse-NDI-extension/exts/mf.ov.ndi/mf/ov/ndi/tests/test_ui.py
import omni.kit.test from ..window import Window, BindingPanel from ..comboboxModel import ComboboxModel from .test_utils import (make_stage, close_stage, get_window, DYNAMIC_ID1, DYNAMIC_ID2, create_dynamic_material, create_dynamic_rectlight, refresh_dynamic_list, get_dynamic_material_prim, add_proxy_source) class UITestsHeader(omni.kit.test.AsyncTestCase): def setUp(self): self._stage = make_stage() self._window = get_window() def tearDown(self): close_stage() async def test_create_material_button(self): field = self._window.find("**/StringField[*]") field.widget.model.set_value(DYNAMIC_ID1) self.assertEqual(field.widget.model.get_value_as_string(), DYNAMIC_ID1) button = self._window.find(f"**/Button[*].text=='{Window.NEW_TEXTURE_BTN_TXT}'") await button.click() prim = get_dynamic_material_prim(DYNAMIC_ID1) self.assertTrue(prim.IsValid) async def test_texture_discovery(self): create_dynamic_material() create_dynamic_rectlight() await refresh_dynamic_list(self._window) panels = self._window.find_all("**/BindingPanel[*]") self.assertEqual(len(panels), 2) panel1_found = False panel2_found = False for panel in panels: labels = panel.find_all("**/Label[*]") for label in labels: if label.widget.text == DYNAMIC_ID1: panel1_found = True elif label.widget.text == DYNAMIC_ID2: panel2_found = True self.assertTrue(panel1_found) self.assertTrue(panel2_found) class UITestsPanel(omni.kit.test.AsyncTestCase): def setUp(self): self._stage = make_stage() self._window = get_window() def tearDown(self): close_stage() async def test_no_panel_on_start(self): await refresh_dynamic_list(self._window) panel = self._window.find("**/BindingPanel[*]") self.assertIsNone(panel) label = self._window.find("**/Label[*]") self.assertEqual(label.widget.text, Window.EMPTY_TEXTURE_LIST_TXT) async def test_combobox_defaults(self): await refresh_dynamic_list(self._window) add_proxy_source(self._window.widget) button = self._window.find(f"**/Button[*].text=='{Window.NEW_TEXTURE_BTN_TXT}'") await button.click() combobox = self._window.find("**/ComboBox[*]") model = combobox.widget.model self.assertEqual(model._current_value(), ComboboxModel.NONE_VALUE) model._current_index.set_value(1) self.assertNotEquals(model._current_value(), ComboboxModel.NONE_VALUE) model._current_index.set_value(0) self.assertEqual(model._current_value(), ComboboxModel.NONE_VALUE) async def test_low_bandwidth_btn(self): await refresh_dynamic_list(self._window) button = self._window.find(f"**/Button[*].text=='{Window.NEW_TEXTURE_BTN_TXT}'") await button.click() panel = self._window.find("**/BindingPanel[*]") binding, _, _ = panel.widget._get_data() self.assertFalse(binding.lowbandwidth) button = panel.find(f"**/ToolButton[*].name=='{BindingPanel.BANDWIDTH_BTN_NAME}'") await button.click() binding, _, _ = panel.widget._get_data() self.assertTrue(binding.lowbandwidth) await button.click() binding, _, _ = panel.widget._get_data() self.assertFalse(binding.lowbandwidth) async def test_low_bandwidth_stream(self): await refresh_dynamic_list(self._window) add_proxy_source(self._window.widget) button = self._window.find(f"**/Button[*].text=='{Window.NEW_TEXTURE_BTN_TXT}'") await button.click() combobox = self._window.find("**/ComboBox[*]") combobox.widget.model._set_current_from_value(ComboboxModel.PROXY_VALUE) panel = self._window.find("**/BindingPanel[*]") button_bandwidth = panel.find(f"**/ToolButton[*].name=='{BindingPanel.BANDWIDTH_BTN_NAME}'") button_playpause = button = panel.find(f"**/Button[*].name=='{BindingPanel.PLAYPAUSE_BTN_NAME}'") self.assertTrue(panel.widget._lowbandwidth_toolbutton.enabled) await button_playpause.click() self.assertFalse(self._window.widget._model._ndi._streams[0]._lowbandwidth) self.assertFalse(panel.widget._lowbandwidth_toolbutton.enabled) await button_playpause.click() self.assertTrue(panel.widget._lowbandwidth_toolbutton.enabled) await button_bandwidth.click() await button_playpause.click() self.assertTrue(self._window.widget._model._ndi._streams[0]._lowbandwidth) await button_playpause.click() async def test_proxy_play_pause(self): await refresh_dynamic_list(self._window) add_proxy_source(self._window.widget) button_create = self._window.find(f"**/Button[*].text=='{Window.NEW_TEXTURE_BTN_TXT}'") await button_create.click() combobox = self._window.find("**/ComboBox[*]") combobox.widget.model._set_current_from_value(ComboboxModel.PROXY_VALUE) panel = self._window.find("**/BindingPanel[*]") button_playpause = panel.find(f"**/Button[*].name=='{BindingPanel.PLAYPAUSE_BTN_NAME}'") self.assertTrue(panel.widget._combobox_ui.visible) self.assertFalse(panel.widget._combobox_alt.visible) await button_playpause.click() self.assertGreater(len(self._window.widget._model._ndi._streams), 0) self.assertFalse(panel.widget._combobox_ui.visible) self.assertTrue(panel.widget._combobox_alt.visible) await button_playpause.click() self.assertEquals(len(self._window.widget._model._ndi._streams), 0) async def test_proxy_multiple(self): await refresh_dynamic_list(self._window) field = self._window.find("**/StringField[*]") button_create = self._window.find(f"**/Button[*].text=='{Window.NEW_TEXTURE_BTN_TXT}'") field.widget.model.set_value(DYNAMIC_ID1) await button_create.click() field.widget.model.set_value(DYNAMIC_ID2) await button_create.click() comboboxes = self._window.find_all("**/ComboBox[*]") for combobox in comboboxes: combobox.widget.model._set_current_from_value(ComboboxModel.PROXY_VALUE) buttons_playpause = self._window.find_all(f"**/Button[*].name=='{BindingPanel.PLAYPAUSE_BTN_NAME}'") for button_playpause in buttons_playpause: await button_playpause.click() self.assertEquals(len(self._window.widget._model._ndi._streams), 2) button_stopall = self._window.find(f"**/Button[*].text=='{Window.STOP_STREAMS_BTN_TXT}'") await button_stopall.click() self.assertEquals(len(self._window.widget._model._ndi._streams), 0) panels = self._window.find_all("**/BindingPanel[*]") for panel in panels: self.assertTrue(panel.widget._combobox_ui.visible) self.assertFalse(panel.widget._combobox_alt.visible)
7,140
Python
38.672222
117
0.640196
MomentFactory/Omniverse-NDI-extension/exts/mf.ov.ndi/mf/ov/ndi/tests/test_utils.py
import omni import omni.kit.ui_test as ui_test from pxr import Usd, UsdLux, UsdShade from ..USDtools import USDtools from ..window import Window from ..eventsystem import EventSystem from ..comboboxModel import ComboboxModel SOURCE1 = "MY-PC (Test Pattern)" SOURCE2 = "MY-PC (Test Pattern 2)" DYNAMIC_ID1 = "myDynamicMaterial1" DYNAMIC_ID2 = "myDynamicMaterial2" DUMMY_PATH = "/path/to/dummy" RECTLIGHT_NAME = "MyRectLight" DEFAULT_PRIM_NAME = "World" def make_stage() -> Usd.Stage: usd_context = omni.usd.get_context() usd_context.new_stage() # self._stage = Usd.Stage.CreateInMemory() stage = usd_context.get_stage() prim = stage.DefinePrim(f"/{DEFAULT_PRIM_NAME}") stage.SetDefaultPrim(prim) return stage def get_stage() -> Usd.Stage: usd_context = omni.usd.get_context() stage = usd_context.get_stage() return stage def close_stage(): usd_context = omni.usd.get_context() assert usd_context.can_close_stage() usd_context.close_stage() def get_window(): return ui_test.find(Window.WINDOW_NAME) def create_dynamic_material() -> UsdShade.Material: USDtools.create_dynamic_material(DYNAMIC_ID1) return get_dynamic_material_prim(DYNAMIC_ID1) def create_dynamic_rectlight(): stage = get_stage() path: str = f"{stage.GetDefaultPrim().GetPath()}/{RECTLIGHT_NAME}" light = UsdLux.RectLight.Define(stage, path) light.GetPrim().GetAttribute("texture:file").Set(f"{USDtools.PREFIX}{DYNAMIC_ID2}") def get_dynamic_material_prim(name: str): usd_context = omni.usd.get_context() stage = usd_context.get_stage() return stage.GetPrimAtPath(f"{stage.GetDefaultPrim().GetPath()}/{USDtools.SCOPE_NAME}/{name}") async def refresh_dynamic_list(window): button = window.find(f"**/Button[*].text=='{Window.DISCOVER_TEX_BTN_TXT}'") await button.click() def add_proxy_source(window): EventSystem.send_event(EventSystem.NDIFINDER_NEW_SOURCES, payload={"sources": [ComboboxModel.PROXY_VALUE]})
1,995
Python
27.112676
111
0.707268
MomentFactory/Omniverse-NDI-extension/exts/mf.ov.ndi/mf/ov/ndi/tests/test_USDtools.py
from ..USDtools import USDtools from .test_utils import make_stage, close_stage, create_dynamic_material, create_dynamic_rectlight, SOURCE1 import omni.kit.test class USDValidNameUnitTest(omni.kit.test.AsyncTestCase): async def test_name_valid(self): self.check_name_valid("myDynamicMaterial", "myDynamicMaterial") self.check_name_valid("789testing123numbers456", "_89testing123numbers456") self.check_name_valid("", "_") self.check_name_valid("àâáäãåÀÂÁÃÅÄ", "aaaaaaAAAAAA") self.check_name_valid("èêéëÈÊÉË", "eeeeEEEE") self.check_name_valid("ìîíïÌÎÍÏ", "iiiiIIII") self.check_name_valid("òôóöõøÒÔÓÕÖØ", "ooooooOOOOOO") self.check_name_valid("ùûúüÙÛÚÜ", "uuuuUUUU") self.check_name_valid("æœÆŒçÇ°ðÐñÑýÝþÞÿß", "aeoeAEOEcCdegdDnNyYthThyss") self.check_name_valid("!¡¿@#$%?&*()-_=+/`^~.,'\\<>`;:¤{}[]|\"¦¨«»¬¯±´·¸÷", "___________________________________________________") self.check_name_valid("¢£¥§©ªº®¹²³µ¶¼½¾×", "C_PSY_SS_c_ao_r_123uP_1_4_1_2_3_4x") def check_name_valid(self, source, expected): v: str = USDtools.make_name_valid(source) self.assertEqual(v, expected, f"Expected \"{v}\", derived from \"{source}\", to equals \"{expected}\"") class USDToolsUnitTest(omni.kit.test.AsyncTestCase): def setUp(self): self._stage = make_stage() def tearDown(self): close_stage() async def test_create_dynamic_material(self): material = create_dynamic_material() prim = self._stage.GetPrimAtPath(material.GetPath()) self.assertIsNotNone(prim) async def test_find_dynamic_sources(self): create_dynamic_material() create_dynamic_rectlight() sources = USDtools.find_all_dynamic_sources() self.assertEqual(len(sources), 2) async def test_set_property_ndi(self): material = create_dynamic_material() path = material.GetPath() USDtools.set_prim_ndi_attribute(path, SOURCE1) attr = material.GetPrim().GetAttribute(USDtools.ATTR_NDI_NAME) self.assertEqual(attr.Get(), SOURCE1) async def test_set_property_bandwidth(self): material = create_dynamic_material() path = material.GetPath() USDtools.set_prim_lowbandwidth_attribute(path, True) attr = material.GetPrim().GetAttribute(USDtools.ATTR_BANDWIDTH_NAME) self.assertTrue(attr.Get()) USDtools.set_prim_lowbandwidth_attribute(path, False) self.assertFalse(attr.Get())
2,554
Python
38.921874
111
0.634299
MomentFactory/Omniverse-MPCDI-converter/bootstrap.py
# Copyright 2023 NVIDIA CORPORATION # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import contextlib import io import packmanapi import os import sys REPO_ROOT = os.path.dirname(os.path.realpath(__file__)) REPO_DEPS_FILE = os.path.join(REPO_ROOT, "deps", "repo-deps.packman.xml") if __name__ == "__main__": # pull all repo dependencies first # and add them to the python path with contextlib.redirect_stdout(io.StringIO()): deps = packmanapi.pull(REPO_DEPS_FILE) for dep_path in deps.values(): if dep_path not in sys.path: sys.path.append(dep_path) sys.path.append(REPO_ROOT) import omni.repo.usd omni.repo.usd.bootstrap(REPO_ROOT)
1,191
Python
31.216215
74
0.717044
MomentFactory/Omniverse-MPCDI-converter/tools/packman/packmanconf.py
# Use this file to bootstrap packman into your Python environment (3.7.x). Simply # add the path by doing sys.insert to where packmanconf.py is located and then execute: # # >>> import packmanconf # >>> packmanconf.init() # # It will use the configured remote(s) and the version of packman in the same folder, # giving you full access to the packman API via the following module # # >> import packmanapi # >> dir(packmanapi) import os import platform import sys def init(): """Call this function to initialize the packman configuration. Calls to the packman API will work after successfully calling this function. Note: This function only needs to be called once during the execution of your program. Calling it repeatedly is harmless but wasteful. Compatibility with your Python interpreter is checked and upon failure the function will report what is required. Example: >>> import packmanconf >>> packmanconf.init() >>> import packmanapi >>> packmanapi.set_verbosity_level(packmanapi.VERBOSITY_HIGH) """ major = sys.version_info[0] minor = sys.version_info[1] if major != 3 or minor != 10: raise RuntimeError( f"This version of packman requires Python 3.10.x, but {major}.{minor} was provided" ) conf_dir = os.path.dirname(os.path.abspath(__file__)) os.environ["PM_INSTALL_PATH"] = conf_dir packages_root = get_packages_root(conf_dir) version = get_version(conf_dir) module_dir = get_module_dir(conf_dir, packages_root, version) sys.path.insert(1, module_dir) def get_packages_root(conf_dir: str) -> str: root = os.getenv("PM_PACKAGES_ROOT") if not root: platform_name = platform.system() if platform_name == "Windows": drive, _ = os.path.splitdrive(conf_dir) root = os.path.join(drive, "packman-repo") elif platform_name == "Darwin": # macOS root = os.path.join( os.path.expanduser("~"), "/Library/Application Support/packman-cache" ) elif platform_name == "Linux": try: cache_root = os.environ["XDG_HOME_CACHE"] except KeyError: cache_root = os.path.join(os.path.expanduser("~"), ".cache") return os.path.join(cache_root, "packman") else: raise RuntimeError(f"Unsupported platform '{platform_name}'") # make sure the path exists: os.makedirs(root, exist_ok=True) return root def get_module_dir(conf_dir, packages_root: str, version: str) -> str: module_dir = os.path.join(packages_root, "packman-common", version) if not os.path.exists(module_dir): import tempfile tf = tempfile.NamedTemporaryFile(delete=False) target_name = tf.name tf.close() url = f"http://bootstrap.packman.nvidia.com/packman-common@{version}.zip" print(f"Downloading '{url}' ...") import urllib.request urllib.request.urlretrieve(url, target_name) from importlib.machinery import SourceFileLoader # import module from path provided script_path = os.path.join(conf_dir, "bootstrap", "install_package.py") ip = SourceFileLoader("install_package", script_path).load_module() print("Unpacking ...") ip.install_package(target_name, module_dir) os.unlink(tf.name) return module_dir def get_version(conf_dir: str): path = os.path.join(conf_dir, "packman") if not os.path.exists(path): # in dev repo fallback path += ".sh" with open(path, "rt", encoding="utf8") as launch_file: for line in launch_file.readlines(): if line.startswith("PM_PACKMAN_VERSION"): _, value = line.split("=") return value.strip() raise RuntimeError(f"Unable to find 'PM_PACKMAN_VERSION' in '{path}'")
3,932
Python
35.416666
95
0.632503
MomentFactory/Omniverse-MPCDI-converter/exts/mf.ov.mpcdi_converter/mf/ov/mpcdi_converter/extension.py
import os import time from typing import List import omni.ext import omni.client import carb import omni.kit.notification_manager as nm from omni.kit.notification_manager import NotificationStatus from omni.kit.menu import utils from omni.kit.tool.asset_importer.file_picker import FilePicker from omni.kit.tool.asset_importer.filebrowser import FileBrowserMode, FileBrowserSelectionType import omni.ui as ui import omni.kit.tool.asset_importer as ai import omni.kit.window.content_browser as content from .omni_client_wrapper import OmniClientWrapper import xml.etree.ElementTree as ET from pxr import UsdGeom, Sdf, Gf, Tf import math import logging class MPCDIConverterContext: usd_reference_path = "" class MPCDIConverterHelper: def __init__(self): pass def _cleanNameForUSD(self, strIn: str) -> str: strOut = strIn # Do not allow for a blank name if len(strOut) == 0: return "Default" elif len(strOut) == 1 and strIn.isnumeric(): # If we have an index as a name, we only need to add _ beforehand. return "_" + strIn return Tf.MakeValidIdentifier(strIn) def _convert_xml_to_usd(self, absolute_path_xml): result = 0 try: _, _, content = omni.client.read_file(absolute_path_xml) data = memoryview(content).tobytes() # Read xml file here root = ET.fromstring(data) hasLensShifting = False stage = omni.usd.get_context().get_stage() mpcdiId = "/MPCDI" stage.DefinePrim(mpcdiId, "Xform") # Create usd content here for display in root: if display.tag != 'display': continue for buffer in display: bufferId = buffer.attrib['id'] bufferPath = mpcdiId + '/' + self._cleanNameForUSD(bufferId) stage.DefinePrim(bufferPath, "Scope") # A region is a projector for region in buffer: # GetCoordFrams coordinateFrame = region.find('coordinateFrame') # Get Position posX = float(coordinateFrame.find('posx').text) * 10 posY = float(coordinateFrame.find('posy').text) * 10 posZ = float(coordinateFrame.find('posz').text) * 10 # Get Axis up upX = float(coordinateFrame.find('yawx').text) upY = float(coordinateFrame.find('yawy').text) upZ = float(coordinateFrame.find('yawz').text) # Get Axis right rightX = float(coordinateFrame.find('pitchx').text) rightY = float(coordinateFrame.find('pitchy').text) rightZ = float(coordinateFrame.find('pitchz').text) # Get Axis down forwardX = float(coordinateFrame.find('rollx').text) forwardY = float(coordinateFrame.find('rolly').text) forwardZ = float(coordinateFrame.find('rollz').text) # The "coordinateFrame" provided in the MPCDI comes with three vectors to solve any coordinate # system ambiguity we meed to convert the position from the "source" coordinate system to the # standard MPCDI system And then convert from the standard to the Omniverse system sourceToStandard = Gf.Matrix3f( rightX, rightY, rightZ, upX, upY, upZ, forwardX, forwardY, forwardZ) # Omniverse uses the same axis for Roll/Pitch/Yaw than the standard, so we have a diagonal matrix # BUT the Y and Z axis are pointing to the opposite direction, so we need to invert them # in the matrix. Here we'll avoid a second matrix product and simply invert Y and Z of the # vector instead. newPos = sourceToStandard * Gf.Vec3f(posX, posY, posZ) newPos[1] = newPos[1] * -1.0 newPos[2] = newPos[2] * -1.0 frustum = region.find('frustum') yaw = float(frustum.find('yaw').text) * -1 pitch = float(frustum.find('pitch').text) roll = float(frustum.find('roll').text) # For the moment we do not support lens shifting, so we simply add the two angles and assume # They are the same on both sides of the angle. fovRight = float(frustum.find('rightAngle').text) fovLeft = float(frustum.find('leftAngle').text) fovTop = float(frustum.find('upAngle').text) fovBottom = float(frustum.find('downAngle').text) focalLength = 10 # We chose a fixed focal length. tanRight = math.tan(math.radians(fovRight)) tanLeft = math.tan(math.radians(fovLeft)) tanUp = math.tan(math.radians(fovTop)) tanDown = math.tan(math.radians(fovBottom)) apertureH = (abs(tanRight) + abs(tanLeft)) * focalLength apertureV = (abs(tanUp) + abs(tanDown)) * focalLength lightWidth = abs(tanRight) + abs(tanLeft) lightHeight = abs(tanUp) + abs(tanDown) horizLensShiftAmount = (tanLeft + tanRight) / (tanLeft - tanRight) vertLensShiftAmount = (tanUp + tanDown) / (tanUp - tanDown) horizApertureOffset = horizLensShiftAmount * apertureH / 2.0 vertApertureOffset = vertLensShiftAmount * apertureV / 2.0 if fovRight != fovLeft or fovTop != fovBottom: hasLensShifting = True regionId = region.attrib['id'] primPath = bufferPath + '/' + self._cleanNameForUSD(regionId) prim = stage.DefinePrim(primPath, "Camera") prim.GetAttribute('focalLength').Set(focalLength) prim.GetAttribute('focusDistance').Set(2000.0) prim.GetAttribute('horizontalAperture').Set(apertureH) prim.GetAttribute('horizontalApertureOffset').Set(horizApertureOffset) prim.GetAttribute('verticalAperture').Set(apertureV) prim.GetAttribute('verticalApertureOffset').Set(vertApertureOffset) primXform = UsdGeom.Xformable(prim) # This prevents from trying to add another Operation if overwritting nodes. primXform.ClearXformOpOrder() primXform.AddTranslateOp().Set(value=(newPos * 10.0)) primXform.AddRotateYOp().Set(value=yaw) primXform.AddRotateXOp().Set(value=pitch) primXform.AddRotateZOp().Set(value=roll) # Create rectLight node rectLightpath = primPath + '/ProjectLight' rectLight = stage.DefinePrim(rectLightpath, 'RectLight') # We need to create those attributes as they are not standard in USD and they are omniverse # Specific. At this point in time Omniverse hasn't added their own attributes. # We simply do it ourselves. rectLight.CreateAttribute('isProjector', Sdf.ValueTypeNames.Bool).Set(True) rectLight.CreateAttribute('intensity', Sdf.ValueTypeNames.Float).Set(15000) rectLight.CreateAttribute('exposure', Sdf.ValueTypeNames.Float).Set(5) rectLight.GetAttribute('inputs:width').Set(lightWidth) rectLight.GetAttribute('inputs:height').Set(lightHeight) # Creating projector box mesh to simulate the space a projector takes in the space projectorBoxPath = primPath + '/ProjectorBox' projector = stage.DefinePrim(projectorBoxPath, 'Cube') projectorXform = UsdGeom.Xformable(projector) projectorXform.ClearXformOpOrder() projectorXform.AddTranslateOp().Set(value=(0, 0, 42.0)) projectorXform.AddScaleOp().Set(value=(50.0, 15, 40.0)) except Exception as e: logger = logging.getLogger(__name__) logger.error(f"Failed to parse MPCDI file. Make sure it is not corrupt. {e}") return -1 if hasLensShifting: message = "Lens shifting detected in MPCDI. Lens shifting is not supported." logger = logging.getLogger(__name__) logger.warn(message) nm.post_notification(message, status=NotificationStatus.WARNING) return result def _create_import_task(self, absolute_path, relative_path, export_folder, _): stage = omni.usd.get_context().get_stage() usd_path = "" # If the stage is not saved save the imported USD next to the original asset. if not stage or stage.GetRootLayer().anonymous: now = time.localtime() ext = time.strftime("_%H%M%S", now) basename = relative_path[:relative_path.rfind(".")] no_folder_name = absolute_path[:absolute_path.find("/" + relative_path)] host_dir = os.path.join(no_folder_name, "convertedAssets", basename + ext).replace("\\", "/") # Save the imported USD next to the saved stage. path_out = omni.usd.get_context().get_stage_url() # If user makes a selection for the output folder use it. if export_folder is not None: path_out = export_folder path_out_index = path_out.rfind("/") success = self._convert_xml_to_usd(absolute_path) # self._hi.convert_cad_file_to_usd(absolute_path, path_out[:path_out_index]) ext_index = relative_path.rfind(".") relative_path = self._cleanNameForUSD(relative_path[:ext_index]) + ".usd" usd_path = os.path.join(path_out[:path_out_index], relative_path).replace("\\", "/") logger = logging.getLogger(__name__) if success == 0: message = "Import succesful" logger.info(message) nm.post_notification(message) return usd_path elif success == -10002: # TODO this is when we have problem reading the file from OV, might need to download it locally logger.info("NOT IMPLEMENTED: Failure to load model form omniverse server, please select a file from local disk.") nm.post_notification( f"Failed to convert file {os.path.basename(absolute_path)}.\n" "Please check console for more details.", status=nm.NotificationStatus.WARNING, ) return None else: logger.info("IMPORT FAILED") nm.post_notification( f"Failed to convert file {os.path.basename(absolute_path)}.\n" "Please check console for more details.", status=nm.NotificationStatus.WARNING, ) return None async def create_import_task(self, absolute_paths, relative_paths, export_folder, hoops_context): converted_assets = {} for i in range(len(absolute_paths)): converted_assets[absolute_paths[i]] = self._create_import_task(absolute_paths[i], relative_paths[i], export_folder, hoops_context) return converted_assets class MPCDIConverterOptions: def __init__(self): self.cad_converter_context = MPCDIConverterContext() self.export_folder: str = None class MPCDIConverterOptionsBuilder: def __init__(self, usd_context): super().__init__() self._file_picker = None self._usd_context = usd_context self._export_context = MPCDIConverterOptions() self._folder_button = None self._refresh_default_folder = False self._default_folder = None self._clear() def _clear(self): self._built = False self._export_folder_field = None if self._folder_button: self._folder_button.set_clicked_fn(None) self._folder_button = None def set_default_target_folder(self, folder: str): self._default_folder = folder self._refresh_default_folder = True def build_pane(self, asset_paths: List[str]): self._export_context = self.get_import_options() if self._refresh_default_folder: self._export_context.export_folder = self._default_folder self._default_folder = None self._refresh_default_folder = False self._built = True OPTIONS_STYLE = { "Rectangle::hovering": {"background_color": 0x0, "border_radius": 2, "margin": 0, "padding": 0}, "Rectangle::hovering:hovered": {"background_color": 0xFF9E9E9E}, "Button.Image::folder": {"image_url": Icons().get("folder")}, "Button.Image::folder:checked": {"image_url": Icons().get("folder")}, "Button::folder": {"background_color": 0x0, "margin": 0}, "Button::folder:checked": {"background_color": 0x0, "margin": 0}, "Button::folder:pressed": {"background_color": 0x0, "margin": 0}, "Button::folder:hovered": {"background_color": 0x0, "margin": 0}, } with ui.VStack(height=0, style=OPTIONS_STYLE): ui.Spacer(width=0, height=5) with ui.HStack(height=0): ui.Label("Convert To:", width=0) ui.Spacer(width=3) with ui.VStack(height=0): ui.Spacer(height=4) self._export_folder_field = ui.StringField(height=20, width=ui.Fraction(1), read_only=False) self._export_folder_field.set_tooltip( "Left this empty will export USD to the folder that assets are under." ) ui.Spacer(height=4) with ui.VStack(height=0, width=0): ui.Spacer(height=4) with ui.ZStack(width=20, height=20): ui.Rectangle(name="hovering") self._folder_button = ui.Button(name="folder", width=24, height=24) self._folder_button.set_tooltip("Choose folder") ui.Spacer(height=4) ui.Spacer(width=2) self._folder_button.set_clicked_fn(self._show_file_picker) ui.Spacer(width=0, height=10) if self._export_context.export_folder: self._export_folder_field.model.set_value(self._export_context.export_folder) else: self._export_folder_field.model.set_value("") def _select_picked_folder_callback(self, paths): if paths: self._export_folder_field.model.set_value(paths[0]) def _cancel_picked_folder_callback(self): pass def _show_file_picker(self): if not self._file_picker: mode = FileBrowserMode.OPEN file_type = FileBrowserSelectionType.DIRECTORY_ONLY filters = [(".*", "All Files (*.*)")] self._file_picker = FilePicker("Select Folder", mode=mode, file_type=file_type, filter_options=filters) self._file_picker.set_file_selected_fn(self._select_picked_folder_callback) self._file_picker.set_cancel_fn(self._cancel_picked_folder_callback) folder = self._export_folder_field.model.get_value_as_string() if utils.is_folder(folder): self._file_picker.show(folder) else: self._file_picker.show(self._get_current_dir_in_content_window()) def _get_current_dir_in_content_window(self): content_window = content.get_content_window() return content_window.get_current_directory() def get_import_options(self): context = MPCDIConverterOptions() # TODO enable this after the filepicker bugfix: OM-47383 # if self._built: # context.export_folder = str.strip(self._export_folder_field.model.get_value_as_string()) # context.export_folder = context.export_folder.replace("\\", "/") return context def destroy(self): self._clear() if self._file_picker: self._file_picker.destroy() class MPCDIConverterDelegate(ai.AbstractImporterDelegate): def __init__(self, usd_context, name, filters, descriptions): super().__init__() self._hoops_options_builder = MPCDIConverterOptionsBuilder(usd_context) self._hoops_converter = MPCDIConverterHelper() self._name = name self._filters = filters self._descriptions = descriptions def destroy(self): if self._hoops_converter: self._hoops_converter.destroy() self._hoops_converter = None if self._hoops_options_builder: self._hoops_options_builder.destroy() self._hoops_options_builder = None @property def name(self): return self._name @property def filter_regexes(self): return self._filters @property def filter_descriptions(self): return self._descriptions def build_options(self, paths): pass # TODO enable this after the filepicker bugfix: OM-47383 # self._hoops_options_builder.build_pane(paths) async def convert_assets(self, paths): context = self._hoops_options_builder.get_import_options() hoops_context = context.cad_converter_context absolute_paths = [] relative_paths = [] for file_path in paths: if self.is_supported_format(file_path): absolute_paths.append(file_path) filename = os.path.basename(file_path) relative_paths.append(filename) converted_assets = await self._hoops_converter.create_import_task( absolute_paths, relative_paths, context.export_folder, hoops_context ) return converted_assets _global_instance = None # Any class derived from `omni.ext.IExt` in top level module (defined in `python.modules` of `extension.toml`) will be # instantiated when extension gets enabled and `on_startup(ext_id)` will be called. Later when extension gets disabled # on_shutdown() is called. class MfMpcdiConverterExtension(omni.ext.IExt): # ext_id is current extension id. It can be used with extension manager to query additional information, like where # this extension is located on filesystem. def on_startup(self, ext_id): global _global_instance _global_instance = self self._usd_context = omni.usd.get_context() self.delegate_mpcdi = MPCDIConverterDelegate( self._usd_context, "MPCDI Converter", ["(.*\\.mpcdi\\.xml$)"], ["mpcdi XML Files (*.mpdci.xml)"] ) ai.register_importer(self.delegate_mpcdi) def on_shutdown(self): global _global_instance _global_instance = None ai.remove_importer(self.delegate_mpcdi) self.delegate_mpcdi = None
19,870
Python
43.354911
135
0.573125
MomentFactory/Omniverse-MPCDI-converter/exts/mf.ov.mpcdi_converter/mf/ov/mpcdi_converter/__init__.py
import os from pxr import Plug pluginsRoot = os.path.join(os.path.dirname(__file__), '../../../plugin/resources') Plug.Registry().RegisterPlugins(pluginsRoot) from .extension import *
191
Python
20.333331
82
0.706806
MomentFactory/Omniverse-MPCDI-converter/exts/mf.ov.mpcdi_converter/mf/ov/mpcdi_converter/omni_client_wrapper.py
import os import traceback import asyncio import carb import omni.client def _encode_content(content): if type(content) == str: payload = bytes(content.encode("utf-8")) elif type(content) != type(None): payload = bytes(content) else: payload = bytes() return payload class OmniClientWrapper: @staticmethod async def exists(path): try: result, entry = await omni.client.stat_async(path) return result == omni.client.Result.OK except Exception as e: traceback.print_exc() carb.log_error(str(e)) return False @staticmethod def exists_sync(path): try: result, entry = omni.client.stat(path) return result == omni.client.Result.OK except Exception as e: traceback.print_exc() carb.log_error(str(e)) return False @staticmethod async def write(path: str, content): carb.log_info(f"Writing {path}...") try: result = await omni.client.write_file_async(path, _encode_content(content)) if result != omni.client.Result.OK: carb.log_error(f"Cannot write {path}, error code: {result}.") return False except Exception as e: traceback.print_exc() carb.log_error(str(e)) return False finally: carb.log_info(f"Writing {path} done...") return True @staticmethod async def copy(src_path: str, dest_path: str): carb.log_info(f"Coping from {src_path} to {dest_path}...") try: await omni.client.delete_async(dest_path) result = await omni.client.copy_async(src_path, dest_path) if result != omni.client.Result.OK: carb.log_error(f"Cannot copy from {src_path} to {dest_path}, error code: {result}.") return False else: return True except Exception as e: traceback.print_exc() carb.log_error(str(e)) return False @staticmethod async def read(src_path: str): carb.log_info(f"Reading {src_path}...") try: result, version, content = await omni.client.read_file_async(src_path) if result == omni.client.Result.OK: return memoryview(content).tobytes() else: carb.log_error(f"Cannot read {src_path}, error code: {result}.") except Exception as e: traceback.print_exc() carb.log_error(str(e)) finally: carb.log_info(f"Reading {src_path} done...") return None @staticmethod async def create_folder(path): carb.log_info(f"Creating dir {path}...") result = await omni.client.create_folder_async(path) return result == omni.client.Result.OK or result == omni.client.Result.ERROR_ALREADY_EXISTS @staticmethod def create_folder_sync(path): carb.log_info(f"Creating dir {path}...") result = omni.client.create_folder(path) return result == omni.client.Result.OK or result == omni.client.Result.ERROR_ALREADY_EXISTS
3,226
Python
30.637255
100
0.574086
MomentFactory/Omniverse-MVR-GDTF-converter/tools/packman/bootstrap/install_package.py
# Copyright 2019 NVIDIA CORPORATION # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import zipfile import tempfile import sys import os import stat import time from typing import Any, Callable RENAME_RETRY_COUNT = 100 RENAME_RETRY_DELAY = 0.1 logging.basicConfig(level=logging.WARNING, format="%(message)s") logger = logging.getLogger("install_package") def remove_directory_item(path): if os.path.islink(path) or os.path.isfile(path): try: os.remove(path) except PermissionError: # make sure we have access and try again: os.chmod(path, stat.S_IRWXU) os.remove(path) else: # try first to delete the dir because this will work for folder junctions, otherwise we would follow the junctions and cause destruction! clean_out_folder = False try: # make sure we have access preemptively - this is necessary because recursing into a directory without permissions # will only lead to heart ache os.chmod(path, stat.S_IRWXU) os.rmdir(path) except OSError: clean_out_folder = True if clean_out_folder: # we should make sure the directory is empty names = os.listdir(path) for name in names: fullname = os.path.join(path, name) remove_directory_item(fullname) # now try to again get rid of the folder - and not catch if it raises: os.rmdir(path) class StagingDirectory: def __init__(self, staging_path): self.staging_path = staging_path self.temp_folder_path = None os.makedirs(staging_path, exist_ok=True) def __enter__(self): self.temp_folder_path = tempfile.mkdtemp(prefix="ver-", dir=self.staging_path) return self def get_temp_folder_path(self): return self.temp_folder_path # this function renames the temp staging folder to folder_name, it is required that the parent path exists! def promote_and_rename(self, folder_name): abs_dst_folder_name = os.path.join(self.staging_path, folder_name) os.rename(self.temp_folder_path, abs_dst_folder_name) def __exit__(self, type, value, traceback): # Remove temp staging folder if it's still there (something went wrong): path = self.temp_folder_path if os.path.isdir(path): remove_directory_item(path) def rename_folder(staging_dir: StagingDirectory, folder_name: str): try: staging_dir.promote_and_rename(folder_name) except OSError as exc: # if we failed to rename because the folder now exists we can assume that another packman process # has managed to update the package before us - in all other cases we re-raise the exception abs_dst_folder_name = os.path.join(staging_dir.staging_path, folder_name) if os.path.exists(abs_dst_folder_name): logger.warning( f"Directory {abs_dst_folder_name} already present, package installation already completed" ) else: raise def call_with_retry( op_name: str, func: Callable, retry_count: int = 3, retry_delay: float = 20 ) -> Any: retries_left = retry_count while True: try: return func() except (OSError, IOError) as exc: logger.warning(f"Failure while executing {op_name} [{str(exc)}]") if retries_left: retry_str = "retry" if retries_left == 1 else "retries" logger.warning( f"Retrying after {retry_delay} seconds" f" ({retries_left} {retry_str} left) ..." ) time.sleep(retry_delay) else: logger.error("Maximum retries exceeded, giving up") raise retries_left -= 1 def rename_folder_with_retry(staging_dir: StagingDirectory, folder_name): dst_path = os.path.join(staging_dir.staging_path, folder_name) call_with_retry( f"rename {staging_dir.get_temp_folder_path()} -> {dst_path}", lambda: rename_folder(staging_dir, folder_name), RENAME_RETRY_COUNT, RENAME_RETRY_DELAY, ) def install_package(package_path, install_path): staging_path, version = os.path.split(install_path) with StagingDirectory(staging_path) as staging_dir: output_folder = staging_dir.get_temp_folder_path() with zipfile.ZipFile(package_path, allowZip64=True) as zip_file: zip_file.extractall(output_folder) # attempt the rename operation rename_folder_with_retry(staging_dir, version) print(f"Package successfully installed to {install_path}") if __name__ == "__main__": executable_paths = os.getenv("PATH") paths_list = executable_paths.split(os.path.pathsep) if executable_paths else [] target_path_np = os.path.normpath(sys.argv[2]) target_path_np_nc = os.path.normcase(target_path_np) for exec_path in paths_list: if os.path.normcase(os.path.normpath(exec_path)) == target_path_np_nc: raise RuntimeError(f"packman will not install to executable path '{exec_path}'") install_package(sys.argv[1], target_path_np)
5,776
Python
36.270968
145
0.645083
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.gdtf/mf/ov/gdtf/gltfImporter.py
import logging import omni.client import os import subprocess import tempfile from typing import List import xml.etree.ElementTree as ET from zipfile import ZipFile from .filepathUtility import Filepath from .gdtfUtil import Model class GLTFImporter: TMP_ARCHIVE_EXTRACT_DIR = f"{tempfile.gettempdir()}/MF.OV.GDTF/" def convert(root: ET.Element, archive: ZipFile, output_dir: str) -> List[Model]: models: List[Model] = GLTFImporter._get_model_nodes(root) models_filtered: List[Model] = GLTFImporter._filter_models(models) GLTFImporter._extract_gltf_to_tmp(models_filtered, archive) GLTFImporter._convert_gltf(models_filtered, output_dir) return models def _get_model_nodes(root: ET.Element) -> List[Model]: node_fixture: ET.Element = root.find("FixtureType") node_models: ET.Element = node_fixture.find("Models") nodes_model = node_models.findall("Model") models: List[Model] = [] for node_model in nodes_model: models.append(Model(node_model)) return models def _filter_models(models: List[Model]) -> List[Model]: filters: List[str] = ['pigtail', 'beam'] filtered_models: List[Model] = [] for model in models: if model.has_file(): filtered_models.append(model) elif model.get_name().lower() not in filters: logger = logging.getLogger(__name__) logger.info(f"File attribute empty for model node {model.get_name()}, skipping.") return filtered_models def _extract_gltf_to_tmp(models: List[Model], gdtf_archive: ZipFile): namelist = gdtf_archive.namelist() to_remove: List[Model] = [] for model in models: filename = model.get_file() filepath_glb = f"models/gltf/{filename}.glb" filepath_gltf = f"models/gltf/{filename}.gltf" filepath_3ds = f"models/3ds/{filename}.3ds" if filepath_glb in namelist: tmp_export_path = gdtf_archive.extract(filepath_glb, GLTFImporter.TMP_ARCHIVE_EXTRACT_DIR) model.set_tmpdir_filepath(Filepath(tmp_export_path)) elif filepath_gltf in namelist: tmp_export_path = gdtf_archive.extract(filepath_gltf, GLTFImporter.TMP_ARCHIVE_EXTRACT_DIR) for filepath in namelist: # Also import .bin, textures, etc. if filepath.startswith(f"models/gltf/{filename}") and filepath != filepath_gltf: gdtf_archive.extract(filepath, GLTFImporter.TMP_ARCHIVE_EXTRACT_DIR) model.set_tmpdir_filepath(Filepath(tmp_export_path)) elif filepath_3ds in namelist: tmp_export_path = gdtf_archive.extract(filepath_3ds, GLTFImporter.TMP_ARCHIVE_EXTRACT_DIR) temp_export_path_gltf = tmp_export_path[:-4] + ".gltf" GLTFImporter._convert_3ds_to_gltf(tmp_export_path, temp_export_path_gltf) model.set_tmpdir_filepath(Filepath(temp_export_path_gltf)) model.set_converted_from_3ds() os.remove(tmp_export_path) else: logger = logging.getLogger(__name__) logger.warn(f"No file found for {filename}, skipping.") to_remove.append(model) for model in to_remove: models.remove(model) def _convert_3ds_to_gltf(input, output): path = __file__ my_env = os.environ.copy() my_env["PATH"] = path + '\\..\\' + os.pathsep + my_env['PATH'] scriptPath = path + "\\..\\3dsConverterScript.py" try: result = subprocess.run(["py", "-3.10", scriptPath, input, output], capture_output=True, env=my_env) if result.returncode != 0: logger = logging.getLogger(__name__) logger.error(f"Failed to convert 3ds file to gltf: {input}\nerror (Requires python 3.10): {result.stderr.decode('utf-8')}\nerror message: {result.stdout.decode('utf-8')}") except Exception as e: logger = logging.getLogger(__name__) logger.error(f"Failed to convert 3ds file to gltf: {input}\n{e}") def _convert_gltf(models: List[Model], gdtf_output_dir): output_dir = gdtf_output_dir + "gltf/" _, files_in_output_dir = omni.client.list(output_dir) # Ignoring omni.client.Result relative_paths_in_output_dir = [x.relative_path for x in files_in_output_dir] converted_models: List[Model] = [] for model in models: file: Filepath = model.get_tmpdir_filepath() if model.get_converted_from_3ds(): bin_file = file.basename[:-5] + ".bin" bin_path = output_dir + bin_file if bin_file not in relative_paths_in_output_dir: input_path = file.fullpath[:-5] + ".bin" result = result = omni.client.copy(input_path, bin_path, omni.client.CopyBehavior.OVERWRITE) output_file = file.basename output_path = output_dir + output_file if output_file not in relative_paths_in_output_dir: input_path = file.fullpath result = omni.client.copy(input_path, output_path, omni.client.CopyBehavior.OVERWRITE) if result == omni.client.Result.OK: model.set_converted_filepath(Filepath(output_path)) converted_models.append(model) else: logger = logging.getLogger(__name__) logger.error(f"Failure to convert file {input_path}: {result}") else: model.set_converted_filepath(Filepath(output_path)) converted_models.append(model) return converted_models
5,820
Python
46.325203
187
0.601203
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.gdtf/mf/ov/gdtf/converterContext.py
class ConverterContext: usd_reference_path = ""
52
Python
16.666661
27
0.711538
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.gdtf/mf/ov/gdtf/gdtfUtil.py
import math import xml.etree.ElementTree as ET from pxr import Usd, UsdGeom, UsdLux, Sdf from .filepathUtility import Filepath from .USDTools import USDTools def get_attrib_if_exists(node: ET.Element, attr: str): return node.attrib[attr] if attr in node.attrib else None def get_attrib_text_if_exists(node: ET.Element, attr: str): return get_attrib_if_exists(node, attr) def get_attrib_int_if_exists(node: ET.Element, attr: str): str_value = get_attrib_if_exists(node, attr) if str_value is not None: return int(str_value) return None def get_attrib_float_if_exists(node: ET.Element, attr: str): str_value = get_attrib_if_exists(node, attr) if str_value is not None: return float(str_value) return None def set_attribute_text_if_valid(prim: Usd.Prim, name: str, value: str): if value is not None: USDTools.set_prim_attribute(prim, name, Sdf.ValueTypeNames.String, value) def set_attribute_int_if_valid(prim: Usd.Prim, name: str, value: str): if value is not None: USDTools.set_prim_attribute(prim, name, Sdf.ValueTypeNames.Int, value) def set_attribute_float_if_valid(prim: Usd.Prim, name: str, value: str): if value is not None: USDTools.set_prim_attribute(prim, name, Sdf.ValueTypeNames.Float, value) class Model: def __init__(self, node: ET.Element): self._name = node.attrib["Name"] self._name_usd = USDTools.make_name_valid(self._name) self._file = get_attrib_if_exists(node, "File") self._primitive_type = node.attrib["PrimitiveType"] self._height = float(node.attrib["Height"]) self._length = float(node.attrib["Length"]) self._width = float(node.attrib["Width"]) self._converted_from_3ds = False def get_name(self) -> str: return self._name def get_name_usd(self) -> str: return self._name_usd def has_file(self) -> bool: return self._file is not None and self._file != "" def get_file(self) -> str: return self._file def set_tmpdir_filepath(self, path: Filepath): self._tmpdir_filepath = path def get_tmpdir_filepath(self) -> Filepath: return self._tmpdir_filepath def set_converted_from_3ds(self): self._converted_from_3ds = True def get_converted_from_3ds(self): return self._converted_from_3ds def set_converted_filepath(self, path: Filepath): self._converted_filepath = path def get_converted_filepath(self) -> Filepath: return self._converted_filepath def get_height(self) -> float: return self._height def get_width(self) -> float: return self._width class Geometry: def __init__(self, node: ET.Element): self._name: str = node.attrib["Name"] self._model_id: str = get_attrib_if_exists(node, "Model") self._position_matrix = node.attrib["Position"] self._tag = node.tag def get_tag(self) -> str: return self._tag def get_name(self) -> str: return self._name def get_model_id(self) -> str: if self._model_id is not None: return self._model_id return self._name def get_position_matrix(self) -> str: return self._position_matrix def set_model(self, model: Model): self._model = model def get_model(self) -> Model: return self._model def set_stage_path(self, path: str): self._stage_path = path def get_stage_path(self) -> str: return self._stage_path def set_depth(self, depth: int): self._depth = depth def get_depth(self) -> int: return self._depth def set_xform_model(self, xform: UsdGeom.Xform): self._xform_model = xform def get_xform_model(self) -> UsdGeom.Xform: return self._xform_model def set_xform_parent(self, xform: UsdGeom.Xform): self._xform_parent = xform def get_xform_parent(self) -> UsdGeom.Xform: return self._xform_parent class Beam: def __init__(self, geometry: Geometry, node: ET.Element): self._radius = float(node.attrib["BeamRadius"]) self._position_matrix = geometry.get_position_matrix() self._stage_path = geometry.get_stage_path() # The attributes should always exists as per standard definition self._beam_angle = get_attrib_float_if_exists(node, "BeamAngle") self._beam_type = get_attrib_text_if_exists(node, "BeamType") self._color_rendering_index = get_attrib_int_if_exists(node, "ColorRenderingIndex") self._color_temperature = get_attrib_float_if_exists(node, "ColorTemperature") self._field_angle = get_attrib_float_if_exists(node, "FieldAngle") self._lamp_type = get_attrib_text_if_exists(node, "LampType") self._luminous_flux = get_attrib_float_if_exists(node, "LuminousFlux") self._power_consumption = get_attrib_float_if_exists(node, "PowerConsumption") def get_radius(self) -> float: return self._radius def get_position_matrix(self) -> str: return self._position_matrix def get_stage_path(self) -> str: return self._stage_path def get_intensity(self) -> float: lumens = self._luminous_flux radius = self._radius if lumens is None: return None candela: float = lumens / 12.566 numerator = candela * 1000 denominator = 4 * math.pi * radius * radius result = numerator / denominator return result def apply_attributes_to_prim(self, light: UsdLux): prim: Usd.Prim = light.GetPrim() set_attribute_float_if_valid(prim, "BeamAngle", self._beam_angle) set_attribute_text_if_valid(prim, "BeamType", self._beam_type) set_attribute_int_if_valid(prim, "ColorRenderingIndex", self._color_rendering_index) set_attribute_float_if_valid(prim, "ColorTemperature", self._color_temperature) set_attribute_float_if_valid(prim, "FieldAngle", self._field_angle) set_attribute_text_if_valid(prim, "LampType", self._lamp_type) set_attribute_float_if_valid(prim, "LuminousFlux", self._luminous_flux) set_attribute_float_if_valid(prim, "PowerConsumption", self._power_consumption) USDTools.set_light_attributes(light, self._beam_angle, self.get_intensity(), self._color_temperature) class FixtureAttributes: def __init__(self, root: ET.Element): self._operating_temperature_high = None self._operating_temperature_low = None self._weight = None self._leg_height = None node_fixture: ET.Element = root.find("FixtureType") node_physdesc: ET.Element = node_fixture.find("PhysicalDescriptions") if node_physdesc is not None: node_properties: ET.Element = node_physdesc.find("Properties") if node_properties is not None: node_operatingtemp: ET.Element = node_properties.find("OperatingTemperature") if node_operatingtemp is not None: self._operating_temperature_high = get_attrib_float_if_exists(node_operatingtemp, "High") self._operating_temperature_low = get_attrib_float_if_exists(node_operatingtemp, "Low") node_weight: ET.Element = node_properties.find("Weight") if node_weight is not None: self._weight = get_attrib_float_if_exists(node_weight, "Value") node_legheight: ET.Element = node_properties.find("LegHeight") if node_legheight is not None: self._leg_height = get_attrib_float_if_exists(node_legheight, "Value") def apply_attributes_to_prim(self, prim: Usd.Prim): set_attribute_float_if_valid(prim, "OperatingTemperature:High", self._operating_temperature_high) set_attribute_float_if_valid(prim, "OperatingTemperature:Low", self._operating_temperature_low) set_attribute_float_if_valid(prim, "Weight", self._weight) set_attribute_float_if_valid(prim, "LegHeight", self._leg_height)
8,096
Python
35.147321
109
0.643157
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.gdtf/mf/ov/gdtf/gdtfImporter.py
from io import BytesIO import logging from typing import List import xml.etree.ElementTree as ET from zipfile import ZipFile from pxr import Gf, Sdf, Usd, UsdGeom from .filepathUtility import Filepath from .gdtfUtil import Model, Geometry, Beam, FixtureAttributes from .gltfImporter import GLTFImporter from .USDTools import USDTools class GDTFImporter: def convert(file: Filepath, output_dir: str, output_ext: str = ".usd") -> str: try: with ZipFile(file.fullpath, 'r') as archive: gdtf_output_dir = output_dir + file.filename + "_gdtf/" url: str = GDTFImporter._convert(archive, gdtf_output_dir, file.filename, output_ext) return url except Exception as e: logger = logging.getLogger(__name__) logger.error(f"Failed to parse gdtf file at {file.fullpath}. Make sure it is not corrupt. {e}") return None def convert_from_mvr(spec_name: str, output_dir: str, mvr_archive: ZipFile, output_ext: str = ".usd") -> bool: spec_name_with_ext = spec_name + ".gdtf" if spec_name_with_ext in mvr_archive.namelist(): gdtf_data = BytesIO(mvr_archive.read(spec_name_with_ext)) gdtf_output_dir = output_dir + spec_name + "_gdtf/" with ZipFile(gdtf_data) as gdtf_archive: GDTFImporter._convert(gdtf_archive, gdtf_output_dir, spec_name, output_ext) return True else: return False def _convert(archive: ZipFile, output_dir: str, name: str, output_ext: str) -> str: data = archive.read("description.xml") root = ET.fromstring(data) converted_models: List[Model] = GLTFImporter.convert(root, archive, output_dir) url: str = GDTFImporter._convert_gdtf_usd(output_dir, name, output_ext, root, converted_models) return url def _convert_gdtf_usd(output_dir: str, filename: str, ext: str, root: ET.Element, models: List[Model]) -> str: url: str = output_dir + filename + ext stage: Usd.Stage = GDTFImporter._get_or_create_gdtf_usd(url) geometries, beams = GDTFImporter._get_stage_hierarchy(root, models, stage) GDTFImporter._add_gltf_reference(stage, geometries) GDTFImporter._apply_gdtf_matrix(stage, geometries) GDTFImporter._add_light_to_hierarchy(stage, beams, geometries) GDTFImporter._apply_gltf_scale(stage, geometries) GDTFImporter._set_general_attributes(stage, root) return url def _get_or_create_gdtf_usd(url: str) -> Usd.Stage: return USDTools.get_or_create_stage(url) def _get_stage_hierarchy(root: ET.Element, models: List[Model], stage: Usd.Stage) -> (List[Geometry], List[Beam]): node_fixture: ET.Element = root.find("FixtureType") node_geometries = node_fixture.find("Geometries") default_prim_path = stage.GetDefaultPrim().GetPath() geometries: List[Geometry] = [] beams: List[Beam] = [] GDTFImporter._get_stage_hierarchy_recursive(node_geometries, models, geometries, beams, default_prim_path, 0) return geometries, beams def _get_stage_hierarchy_recursive(parent_node: ET.Element, models: List[Model], geometries: List[Geometry], beams: List[Beam], path: str, depth: int): geometry_filter: List[str] = ['Geometry', 'Axis', 'Beam', 'Inventory'] for child_node in list(parent_node): if 'Model' in child_node.attrib: if child_node.tag not in geometry_filter: # Pass through (might want to add an xform) GDTFImporter._get_stage_hierarchy_recursive(child_node, models, geometries, beams, path, depth + 1) else: geometry: Geometry = Geometry(child_node) model_id: str = geometry.get_model_id() model: Model = next((model for model in models if model.get_name() == model_id), None) if model is not None and model.has_file(): geometry.set_model(model) stage_path = f"{path}/{model.get_name_usd()}" geometry.set_stage_path(stage_path) geometry.set_depth(depth) geometries.append(geometry) GDTFImporter._get_stage_hierarchy_recursive(child_node, models, geometries, beams, stage_path, depth + 1) else: if model_id.lower() == "pigtail": pass # Skip pigtail geometry elif model_id.lower() == "beam": stage_path = f"{path}/beam" geometry.set_stage_path(stage_path) beam: Beam = Beam(geometry, child_node) beams.append(beam) elif model is not None and not model.has_file(): logger = logging.getLogger(__name__) logger.warn(f"No file found for {model_id}, skipping.") else: # Probably could just be a transform pass else: # Probably could just be a transform pass def _add_gltf_reference(stage: Usd.Stage, geometries: List[Geometry]): stage_path = Filepath(USDTools.get_stage_directory(stage)) for geometry in geometries: model: Model = geometry.get_model() relative_path: str = stage_path.get_relative_from(model.get_converted_filepath()) xform_parent, xform_model = USDTools.add_reference(stage, relative_path, geometry.get_stage_path(), "/model") xform_model.GetPrim().CreateAttribute("mf:gdtf:converter_from_3ds", Sdf.ValueTypeNames.Bool).Set(model.get_converted_from_3ds()) geometry.set_xform_parent(xform_parent) geometry.set_xform_model(xform_model) stage.Save() def _apply_gltf_scale(stage: Usd.Stage, geometries: List[Geometry]): world_xform: UsdGeom.Xform = UsdGeom.Xform(stage.GetDefaultPrim()) stage_metersPerUnit = UsdGeom.GetStageMetersPerUnit(stage) scale = 1 / stage_metersPerUnit USDTools.apply_scale_xform_op(world_xform, scale) converted_3ds = False for geometry in geometries: model = geometry.get_model() if model.get_converted_from_3ds(): converted_3ds = True if converted_3ds: for geometry in geometries: if geometry.get_tag() != 'Beam': xform = geometry.get_xform_model() USDTools.apply_scale_xform_op(xform, UsdGeom.LinearUnits.millimeters) # force mm stage.Save() def _apply_gdtf_matrix(stage: Usd.Stage, geometries: List[Geometry]): applied_scale = USDTools.compute_applied_scale(stage) axis_matrix = USDTools.get_axis_rotation_matrix() for geometry in geometries: translation, rotation = USDTools.compute_xform_values(geometry.get_position_matrix(), applied_scale, axis_matrix) xform: UsdGeom.Xform = geometry.get_xform_parent() xform.ClearXformOpOrder() # Prevent error when overwritting xform.AddTranslateOp().Set(translation) xform.AddRotateZYXOp().Set(rotation) xform.AddScaleOp().Set(Gf.Vec3d(1, 1, 1)) stage.Save() def _add_light_to_hierarchy(stage: Usd.Stage, beams: List[Beam], geometries: List[Geometry]): if len(beams) > 0: GDTFImporter._add_beam_to_hierarchy(stage, beams) else: # Some gdtf files only represents brackets and such. They contain only "Inventory" geometry. # We don't want to add a light source to those. has_not_inventory_geometry = False for geometry in geometries: if geometry.get_tag() != 'Inventory': has_not_inventory_geometry = True if has_not_inventory_geometry: GDTFImporter._add_default_light_to_hierarchy(stage, geometries) def _add_beam_to_hierarchy(stage: Usd.Stage, beams: List[Beam]): for beam in beams: light = USDTools.add_beam(stage, beam.get_stage_path(), beam.get_position_matrix(), beam.get_radius()) beam.apply_attributes_to_prim(light) stage.Save() def _add_default_light_to_hierarchy(stage: Usd.Stage, geometries: List[Geometry]): deepest_geom = geometries[-1] max_depth = deepest_geom.get_depth() for geom in reversed(geometries): depth = geom.get_depth() if (depth > max_depth): deepest_geom = geom max_depth = depth light_stage_path = deepest_geom.get_stage_path() + "/Beam" model = deepest_geom.get_model() USDTools.add_light_default(stage, light_stage_path, model.get_height(), model.get_width()) stage.Save() def _set_general_attributes(stage: Usd.Stage, root: ET.Element): fixtureAttr = FixtureAttributes(root) prim: Usd.Prim = USDTools.get_default_prim(stage) fixtureAttr.apply_attributes_to_prim(prim) stage.Save()
9,327
Python
48.617021
140
0.599979
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.gdtf/mf/ov/gdtf/3dsConverterScript.py
import sys import os def main(): os.environ["PATH"] = __file__ + os.pathsep + os.environ["PATH"] if len(sys.argv) <= 2: print("Need at least 2 arguments") exit(1) from pyassimp import load, export inputFile = sys.argv[1] outputFile = sys.argv[2] print("Input 3ds file:" + inputFile) print("output file: " + outputFile) with load(inputFile) as scene: export(scene, outputFile, "gltf2") if __name__ == "__main__": main()
487
Python
18.519999
67
0.585216
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.gdtf/mf/ov/gdtf/extension.py
import omni.ext import omni.kit.tool.asset_importer as ai from .converterDelegate import ConverterDelegate class MfOvGdtfExtension(omni.ext.IExt): def on_startup(self, _): self._delegate_gdtf = ConverterDelegate( "GDTF Converter", ["(.*\\.gdtf$)"], ["GDTF Files (*.gdtf)"] ) ai.register_importer(self._delegate_gdtf) def on_shutdown(self): ai.remove_importer(self._delegate_gdtf) self._delegate_gdtf.destroy() self._delegate_gdtf = None
533
Python
25.699999
49
0.617261
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.gdtf/mf/ov/gdtf/converterDelegate.py
import os import omni.kit.tool.asset_importer as ai from .converterOptionsBuilder import ConverterOptionsBuilder from .converterHelper import ConverterHelper class ConverterDelegate(ai.AbstractImporterDelegate): def __init__(self, name, filters, descriptions): super().__init__() self._hoops_options_builder = ConverterOptionsBuilder() self._hoops_converter = ConverterHelper() self._name = name self._filters = filters self._descriptions = descriptions def destroy(self): if self._hoops_converter: # self._hoops_converter.destroy() self._hoops_converter = None if self._hoops_options_builder: self._hoops_options_builder.destroy() self._hoops_options_builder = None @property def name(self): return self._name @property def filter_regexes(self): return self._filters @property def filter_descriptions(self): return self._descriptions def build_options(self, paths): pass # TODO enable this after the filepicker bugfix: OM-47383 # self._hoops_options_builder.build_pane(paths) async def convert_assets(self, paths): context = self._hoops_options_builder.get_import_options() hoops_context = context.cad_converter_context absolute_paths = [] relative_paths = [] for file_path in paths: if self.is_supported_format(file_path): absolute_paths.append(file_path) filename = os.path.basename(file_path) relative_paths.append(filename) converted_assets = await self._hoops_converter.create_import_task( absolute_paths, context.export_folder, hoops_context ) return converted_assets
1,825
Python
28.934426
74
0.637808
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.gdtf/mf/ov/gdtf/converterOptionsBuilder.py
from omni.kit.menu import utils from omni.kit.tool.asset_importer.file_picker import FilePicker from omni.kit.tool.asset_importer.filebrowser import FileBrowserMode, FileBrowserSelectionType import omni.kit.window.content_browser as content from .converterOptions import ConverterOptions class ConverterOptionsBuilder: def __init__(self): self._file_picker = None self._export_content = ConverterOptions() self._folder_button = None self._refresh_default_folder = False self._default_folder = None self._clear() def destroy(self): self._clear() if self._file_picker: self._file_picker.destroy() def _clear(self): self._built = False self._export_folder_field = None if self._folder_button: self._folder_button.set_clicked_fn(None) self._folder_button = None def set_default_target_folder(self, folder: str): self._default_folder = folder self._refresh_default_folder = True def _select_picked_folder_callback(self, paths): if paths: self._export_folder_field.model.set_value(paths[0]) def _cancel_picked_folder_callback(self): pass def _show_file_picker(self): if not self._file_picker: mode = FileBrowserMode.OPEN file_type = FileBrowserSelectionType.DIRECTORY_ONLY filters = [(".*", "All Files (*.*)")] self._file_picker = FilePicker("Select Folder", mode=mode, file_type=file_type, filter_options=filters) self._file_picker.set_file_selected_fn(self._select_picked_folder_callback) self._file_picker.set_cancel_fn(self._cancel_picked_folder_callback) folder = self._export_folder_field.model.get_value_as_string() if utils.is_folder(folder): self._file_picker.show(folder) else: self._file_picker.show(self._get_current_dir_in_content_window()) def _get_current_dir_in_content_window(self): content_window = content.get_content_window() return content_window.get_current_directory() def get_import_options(self): return ConverterOptions()
2,210
Python
34.66129
115
0.646606
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.gdtf/mf/ov/gdtf/converterHelper.py
import logging import shutil from urllib.parse import unquote import omni.kit.window.content_browser from .filepathUtility import Filepath from .gdtfImporter import GDTFImporter from .gltfImporter import GLTFImporter class ConverterHelper: def _create_import_task(self, absolute_path, export_folder, _): absolute_path_unquoted = unquote(absolute_path) if absolute_path_unquoted.startswith("file:/"): path = absolute_path_unquoted[6:] else: path = absolute_path_unquoted current_nucleus_dir = omni.kit.window.content_browser.get_content_window().get_current_directory() file: Filepath = Filepath(path) output_dir = current_nucleus_dir if export_folder is None else export_folder if export_folder is not None and export_folder != "": output_dir = export_folder # Cannot Unzip directly from Nucleus, must download file beforehand if file.is_nucleus_path(): tmp_path = GLTFImporter.TMP_ARCHIVE_EXTRACT_DIR + file.basename result = omni.client.copy(file.fullpath, tmp_path, omni.client.CopyBehavior.OVERWRITE) if result == omni.client.Result.OK: file = Filepath(tmp_path) else: logger = logging.getLogger(__name__) logger.error(f"Could not import {file.fullpath} directly from Omniverse, try downloading the file instead") return url: str = GDTFImporter.convert(file, output_dir) return url async def create_import_task(self, absolute_paths, export_folder, hoops_context): converted_assets = {} for i in range(len(absolute_paths)): converted_assets[absolute_paths[i]] = self._create_import_task(absolute_paths[i], export_folder, hoops_context) shutil.rmtree(GLTFImporter.TMP_ARCHIVE_EXTRACT_DIR) return converted_assets
1,987
Python
40.416666
123
0.642174
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.gdtf/mf/ov/gdtf/USDTools.py
import numpy as np from typing import List, Tuple from unidecode import unidecode from urllib.parse import unquote import omni.usd from pxr import Gf, Tf, Sdf, UsdLux, Usd, UsdGeom class USDTools: def make_name_valid(name: str) -> str: if name[:1].isdigit(): name = "_" + name return Tf.MakeValidIdentifier(unidecode(name)) def get_context(): return omni.usd.get_context() def get_stage() -> Usd.Stage: context = USDTools.get_context() return context.get_stage() def get_stage_directory(stage: Usd.Stage = None) -> str: if stage is None: stage = USDTools.get_stage() root_layer = stage.GetRootLayer() repository_path = root_layer.realPath repository_path_unquoted = unquote(repository_path) dir_index = repository_path_unquoted.rfind("/") return repository_path_unquoted[:dir_index + 1] def get_or_create_stage(url: str) -> Usd.Stage: try: # TODO: Better way to check if stage exists? return Usd.Stage.Open(url) except: stage = Usd.Stage.CreateNew(url) UsdGeom.SetStageMetersPerUnit(stage, UsdGeom.LinearUnits.centimeters) # TODO get user defaults UsdGeom.SetStageUpAxis(stage, UsdGeom.Tokens.y) # TODO get user defaults default_prim = stage.DefinePrim("/World", "Xform") stage.SetDefaultPrim(default_prim) stage.Save() return stage def get_default_prim(stage: Usd.Stage) -> Usd.Prim: return stage.GetDefaultPrim() def add_reference(stage: Usd.Stage, ref_path_relative: str, stage_path: str, stage_subpath: str) -> Tuple[ UsdGeom.Xform, UsdGeom.Xform]: xform_parent: UsdGeom.Xform = UsdGeom.Xform.Define(stage, stage_path) xform_ref: UsdGeom.Xform = UsdGeom.Xform.Define(stage, stage_path + stage_subpath) xform_ref_prim: Usd.Prim = xform_ref.GetPrim() path_unquoted = unquote(ref_path_relative) references: Usd.References = xform_ref_prim.GetReferences() references.AddReference(path_unquoted) return xform_parent, xform_ref def get_applied_scale(stage: Usd.Stage, scale_factor: float): stage_scale = UsdGeom.GetStageMetersPerUnit(stage) return scale_factor / stage_scale def apply_scale_xform_op(xform: UsdGeom.Xform, scale: float): scale_value = Gf.Vec3d(scale, scale, scale) xform_ordered_ops: List[UsdGeom.XformOp] = xform.GetOrderedXformOps() found_op = False for xform_op in xform_ordered_ops: if xform_op.GetOpType() == UsdGeom.XformOp.TypeScale: xform_op.Set(scale_value) found_op = True if not found_op: xform.AddScaleOp().Set(scale_value) def np_matrix_from_gdtf(value: str) -> np.matrix: # GDTF Matrix is: 4x4, row-major, Right-Handed, Z-up (Distance Unit not specified, but mm implied) # expect form like "{x,y,z,w}{x,y,z,w}{x,y,z,w}{x,y,z,w}" where "x","y","z", "w" is similar to 1.000000 # make source compatible with np.matrix constructor: "x y z; x y z; x y z; x y z" value_alt = value[1:] # Removes "{" prefix value_alt = value_alt[:-1] # Removes "}" suffix value_alt = value_alt.replace("}{", "; ") value_alt = value_alt.replace(",", " ") np_matrix: np.matrix = np.matrix(value_alt) return np_matrix def gf_matrix_from_gdtf(np_matrix: np.matrix, scale: float) -> Gf.Matrix4d: # Row major matrix gf_matrix = Gf.Matrix4d( np_matrix.item((0, 0)), np_matrix.item((1, 0)), np_matrix.item((2, 0)), np_matrix.item((3, 0)), np_matrix.item((0, 1)), np_matrix.item((1, 1)), np_matrix.item((2, 1)), np_matrix.item((3, 1)), np_matrix.item((0, 2)), np_matrix.item((1, 2)), np_matrix.item((2, 2)), np_matrix.item((3, 2)), np_matrix.item((0, 3)), np_matrix.item((1, 3)), np_matrix.item((2, 3)), np_matrix.item((3, 3)) ) return gf_matrix def add_beam(stage: Usd.Stage, path: str, position_matrix: str, radius: float) -> UsdLux: applied_scale = USDTools.compute_applied_scale(stage) axis_matrix = USDTools.get_axis_rotation_matrix() light: UsdLux.DiskLight = UsdLux.DiskLight.Define(stage, path) translation, rotation = USDTools.compute_xform_values(position_matrix, applied_scale, axis_matrix) rotation += Gf.Vec3d(-90, 0, 0) scale = Gf.Vec3d(radius * 2, radius * 2, 1) USDTools._set_light_xform(light, translation, rotation, scale) USDTools._additional_default_attributes(light) return light def add_light_default(stage: Usd.Stage, path: str, height: float, diameter: float): light: UsdLux.DiskLight = UsdLux.DiskLight.Define(stage, path) translation = Gf.Vec3d(0, -height * 0.5, 0) rotation = Gf.Vec3d(-90, 0, 0) scale = Gf.Vec3d(diameter, diameter, 1) USDTools._set_light_xform(light, translation, rotation, scale) USDTools._additional_default_attributes(light) def _additional_default_attributes(light: UsdLux): prim = light.GetPrim() prim.CreateAttribute("visibleInPrimaryRay", Sdf.ValueTypeNames.Bool).Set(True) light.CreateIntensityAttr().Set(60_000) # if UsdLux.ShapingAPI.CanApply(prim): UsdLux.ShapingAPI.Apply(prim) def _set_light_xform(light: UsdLux.DiskLight, translation: Gf.Vec3d, rotation: Gf.Vec3d, scale: Gf.Vec3d): light.ClearXformOpOrder() # Prevent error when overwritting light.AddTranslateOp().Set(translation) light.AddRotateZYXOp().Set(rotation) light.AddScaleOp().Set(scale) def set_light_attributes(light: UsdLux.DiskLight, beamAngle: float, intensity: float, colorTemp: float): if colorTemp is not None: light.GetEnableColorTemperatureAttr().Set(True) light.GetColorTemperatureAttr().Set(colorTemp) else: light.GetEnableColorTemperatureAttr().Set(False) light.GetColorTemperatureAttr().Set(6500) # default value if intensity is not None: light.GetIntensityAttr().Set(intensity) if beamAngle is not None: prim: Usd.Prim = light.GetPrim() shapingAPI = UsdLux.ShapingAPI(prim) shapingAPI.GetShapingConeAngleAttr().Set(beamAngle) def compute_applied_scale(stage: Usd.Stage) -> float: gdtf_scale = 1 # GDTF dimensions are in meters applied_scale = USDTools.get_applied_scale(stage, gdtf_scale) return applied_scale def get_axis_rotation_matrix() -> Gf.Matrix3d: rotate_minus90deg_xaxis = Gf.Matrix3d(1, 0, 0, 0, 0, 1, 0, -1, 0) return rotate_minus90deg_xaxis def compute_xform_values(position_matrix: str, scale: float, axis_matrix: Gf.Matrix3d) -> (Gf.Vec3d, Gf.Vec3d): np_matrix: np.matrix = USDTools.np_matrix_from_gdtf(position_matrix) gf_matrix: Gf.Matrix4d = USDTools.gf_matrix_from_gdtf(np_matrix, scale) rotation: Gf.Rotation = gf_matrix.GetTranspose().ExtractRotation() euler: Gf.Vec3d = rotation.Decompose(Gf.Vec3d.XAxis(), Gf.Vec3d.YAxis(), Gf.Vec3d.ZAxis()) translation_value = axis_matrix * gf_matrix.ExtractTranslation() rotation_value = axis_matrix * euler return translation_value, rotation_value def set_prim_attribute(prim: Usd.Prim, attribute_name: str, attribute_type: Sdf.ValueTypeNames, attribute_value): prim.CreateAttribute(f"mf:gdtf:{attribute_name}", attribute_type).Set(attribute_value)
7,736
Python
45.329341
117
0.633273
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.gdtf/mf/ov/gdtf/converterOptions.py
from .converterContext import ConverterContext class ConverterOptions: def __init__(self): self.cad_converter_context = ConverterContext() self.export_folder: str = None
192
Python
23.124997
55
0.708333
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.gdtf/mf/ov/gdtf/filepathUtility.py
import os class Filepath: def __init__(self, filepath: str): self._is_none = filepath == "" self.fullpath = filepath self.directory = os.path.dirname(filepath) + "/" self.basename = os.path.basename(filepath) self.filename, self.ext = os.path.splitext(self.basename) def is_nucleus_path(self) -> bool: # TODO: Replace with omni utility method return self.directory[:12] == "omniverse://" def get_relative_from(self, other) -> str: if self._is_none: return other.fullpath else: return "./" + other.fullpath[len(self.directory):]
641
Python
28.181817
65
0.592824
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.gdtf/mf/ov/gdtf/pyassimp/material.py
# Dummy value. # # No texture, but the value to be used as 'texture semantic' # (#aiMaterialProperty::mSemantic) for all material properties # # not* related to textures. # aiTextureType_NONE = 0x0 # The texture is combined with the result of the diffuse # lighting equation. # aiTextureType_DIFFUSE = 0x1 # The texture is combined with the result of the specular # lighting equation. # aiTextureType_SPECULAR = 0x2 # The texture is combined with the result of the ambient # lighting equation. # aiTextureType_AMBIENT = 0x3 # The texture is added to the result of the lighting # calculation. It isn't influenced by incoming light. # aiTextureType_EMISSIVE = 0x4 # The texture is a height map. # # By convention, higher gray-scale values stand for # higher elevations from the base height. # aiTextureType_HEIGHT = 0x5 # The texture is a (tangent space) normal-map. # # Again, there are several conventions for tangent-space # normal maps. Assimp does (intentionally) not # distinguish here. # aiTextureType_NORMALS = 0x6 # The texture defines the glossiness of the material. # # The glossiness is in fact the exponent of the specular # (phong) lighting equation. Usually there is a conversion # function defined to map the linear color values in the # texture to a suitable exponent. Have fun. # aiTextureType_SHININESS = 0x7 # The texture defines per-pixel opacity. # # Usually 'white' means opaque and 'black' means # 'transparency'. Or quite the opposite. Have fun. # aiTextureType_OPACITY = 0x8 # Displacement texture # # The exact purpose and format is application-dependent. # Higher color values stand for higher vertex displacements. # aiTextureType_DISPLACEMENT = 0x9 # Lightmap texture (aka Ambient Occlusion) # # Both 'Lightmaps' and dedicated 'ambient occlusion maps' are # covered by this material property. The texture contains a # scaling value for the final color value of a pixel. Its # intensity is not affected by incoming light. # aiTextureType_LIGHTMAP = 0xA # Reflection texture # # Contains the color of a perfect mirror reflection. # Rarely used, almost never for real-time applications. # aiTextureType_REFLECTION = 0xB # Unknown texture # # A texture reference that does not match any of the definitions # above is considered to be 'unknown'. It is still imported # but is excluded from any further postprocessing. # aiTextureType_UNKNOWN = 0xC
2,409
Python
25.777777
65
0.757991
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.gdtf/mf/ov/gdtf/pyassimp/__init__.py
from .core import *
20
Python
9.499995
19
0.7
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.gdtf/mf/ov/gdtf/pyassimp/core.py
""" PyAssimp This is the main-module of PyAssimp. """ import sys if sys.version_info < (2,6): raise RuntimeError('pyassimp: need python 2.6 or newer') # xrange was renamed range in Python 3 and the original range from Python 2 was removed. # To keep compatibility with both Python 2 and 3, xrange is set to range for version 3.0 and up. if sys.version_info >= (3,0): xrange = range try: import numpy except ImportError: numpy = None import logging import ctypes from contextlib import contextmanager logger = logging.getLogger("pyassimp") # attach default null handler to logger so it doesn't complain # even if you don't attach another handler to logger logger.addHandler(logging.NullHandler()) from . import structs from . import helper from . import postprocess from .errors import AssimpError class AssimpLib(object): """ Assimp-Singleton """ load, load_mem, export, export_blob, release, dll = helper.search_library() _assimp_lib = AssimpLib() def make_tuple(ai_obj, type = None): res = None #notes: # ai_obj._fields_ = [ ("attr", c_type), ... ] # getattr(ai_obj, e[0]).__class__ == float if isinstance(ai_obj, structs.Matrix4x4): if numpy: res = numpy.array([getattr(ai_obj, e[0]) for e in ai_obj._fields_]).reshape((4,4)) #import pdb;pdb.set_trace() else: res = [getattr(ai_obj, e[0]) for e in ai_obj._fields_] res = [res[i:i+4] for i in xrange(0,16,4)] elif isinstance(ai_obj, structs.Matrix3x3): if numpy: res = numpy.array([getattr(ai_obj, e[0]) for e in ai_obj._fields_]).reshape((3,3)) else: res = [getattr(ai_obj, e[0]) for e in ai_obj._fields_] res = [res[i:i+3] for i in xrange(0,9,3)] else: if numpy: res = numpy.array([getattr(ai_obj, e[0]) for e in ai_obj._fields_]) else: res = [getattr(ai_obj, e[0]) for e in ai_obj._fields_] return res # Returns unicode object for Python 2, and str object for Python 3. def _convert_assimp_string(assimp_string): if sys.version_info >= (3, 0): return str(assimp_string.data, errors='ignore') else: return unicode(assimp_string.data, errors='ignore') # It is faster and more correct to have an init function for each assimp class def _init_face(aiFace): aiFace.indices = [aiFace.mIndices[i] for i in range(aiFace.mNumIndices)] assimp_struct_inits = { structs.Face : _init_face } def call_init(obj, caller = None): if helper.hasattr_silent(obj,'contents'): #pointer _init(obj.contents, obj, caller) else: _init(obj,parent=caller) def _is_init_type(obj): if obj and helper.hasattr_silent(obj,'contents'): #pointer return _is_init_type(obj[0]) # null-pointer case that arises when we reach a mesh attribute # like mBitangents which use mNumVertices rather than mNumBitangents # so it breaks the 'is iterable' check. # Basically: # FIXME! elif not bool(obj): return False tname = obj.__class__.__name__ return not (tname[:2] == 'c_' or tname == 'Structure' \ or tname == 'POINTER') and not isinstance(obj, (int, str, bytes)) def _init(self, target = None, parent = None): """ Custom initialize() for C structs, adds safely accessible member functionality. :param target: set the object which receive the added methods. Useful when manipulating pointers, to skip the intermediate 'contents' deferencing. """ if not target: target = self dirself = dir(self) for m in dirself: if m.startswith("_"): continue # We should not be accessing `mPrivate` according to structs.Scene. if m == 'mPrivate': continue if m.startswith('mNum'): if 'm' + m[4:] in dirself: continue # will be processed later on else: name = m[1:].lower() obj = getattr(self, m) setattr(target, name, obj) continue if m == 'mName': target.name = str(_convert_assimp_string(self.mName)) target.__class__.__repr__ = lambda x: str(x.__class__) + "(" + getattr(x, 'name','') + ")" target.__class__.__str__ = lambda x: getattr(x, 'name', '') continue name = m[1:].lower() obj = getattr(self, m) # Create tuples if isinstance(obj, structs.assimp_structs_as_tuple): setattr(target, name, make_tuple(obj)) logger.debug(str(self) + ": Added array " + str(getattr(target, name)) + " as self." + name.lower()) continue if m.startswith('m') and len(m) > 1 and m[1].upper() == m[1]: if name == "parent": setattr(target, name, parent) logger.debug("Added a parent as self." + name) continue if helper.hasattr_silent(self, 'mNum' + m[1:]): length = getattr(self, 'mNum' + m[1:]) # -> special case: properties are # stored as a dict. if m == 'mProperties': setattr(target, name, _get_properties(obj, length)) continue if not length: # empty! setattr(target, name, []) logger.debug(str(self) + ": " + name + " is an empty list.") continue try: if obj._type_ in structs.assimp_structs_as_tuple: if numpy: setattr(target, name, numpy.array([make_tuple(obj[i]) for i in range(length)], dtype=numpy.float32)) logger.debug(str(self) + ": Added an array of numpy arrays (type "+ str(type(obj)) + ") as self." + name) else: setattr(target, name, [make_tuple(obj[i]) for i in range(length)]) logger.debug(str(self) + ": Added a list of lists (type "+ str(type(obj)) + ") as self." + name) else: setattr(target, name, [obj[i] for i in range(length)]) #TODO: maybe not necessary to recreate an array? logger.debug(str(self) + ": Added list of " + str(obj) + " " + name + " as self." + name + " (type: " + str(type(obj)) + ")") # initialize array elements try: init = assimp_struct_inits[type(obj[0])] except KeyError: if _is_init_type(obj[0]): for e in getattr(target, name): call_init(e, target) else: for e in getattr(target, name): init(e) except IndexError: logger.error("in " + str(self) +" : mismatch between mNum" + name + " and the actual amount of data in m" + name + ". This may be due to version mismatch between libassimp and pyassimp. Quitting now.") sys.exit(1) except ValueError as e: logger.error("In " + str(self) + "->" + name + ": " + str(e) + ". Quitting now.") if "setting an array element with a sequence" in str(e): logger.error("Note that pyassimp does not currently " "support meshes with mixed triangles " "and quads. Try to load your mesh with" " a post-processing to triangulate your" " faces.") raise e else: # starts with 'm' but not iterable setattr(target, name, obj) logger.debug("Added " + name + " as self." + name + " (type: " + str(type(obj)) + ")") if _is_init_type(obj): call_init(obj, target) if isinstance(self, structs.Mesh): _finalize_mesh(self, target) if isinstance(self, structs.Texture): _finalize_texture(self, target) if isinstance(self, structs.Metadata): _finalize_metadata(self, target) return self def pythonize_assimp(type, obj, scene): """ This method modify the Assimp data structures to make them easier to work with in Python. Supported operations: - MESH: replace a list of mesh IDs by reference to these meshes - ADDTRANSFORMATION: add a reference to an object's transformation taken from their associated node. :param type: the type of modification to operate (cf above) :param obj: the input object to modify :param scene: a reference to the whole scene """ if type == "MESH": meshes = [] for i in obj: meshes.append(scene.meshes[i]) return meshes if type == "ADDTRANSFORMATION": def getnode(node, name): if node.name == name: return node for child in node.children: n = getnode(child, name) if n: return n node = getnode(scene.rootnode, obj.name) if not node: raise AssimpError("Object " + str(obj) + " has no associated node!") setattr(obj, "transformation", node.transformation) def recur_pythonize(node, scene): ''' Recursively call pythonize_assimp on nodes tree to apply several post-processing to pythonize the assimp datastructures. ''' node.meshes = pythonize_assimp("MESH", node.meshes, scene) for mesh in node.meshes: mesh.material = scene.materials[mesh.materialindex] for cam in scene.cameras: pythonize_assimp("ADDTRANSFORMATION", cam, scene) for c in node.children: recur_pythonize(c, scene) def release(scene): ''' Release resources of a loaded scene. ''' _assimp_lib.release(ctypes.pointer(scene)) @contextmanager def load(filename, file_type = None, processing = postprocess.aiProcess_Triangulate): ''' Load a model into a scene. On failure throws AssimpError. Arguments --------- filename: Either a filename or a file object to load model from. If a file object is passed, file_type MUST be specified Otherwise Assimp has no idea which importer to use. This is named 'filename' so as to not break legacy code. processing: assimp postprocessing parameters. Verbose keywords are imported from postprocessing, and the parameters can be combined bitwise to generate the final processing value. Note that the default value will triangulate quad faces. Example of generating other possible values: processing = (pyassimp.postprocess.aiProcess_Triangulate | pyassimp.postprocess.aiProcess_OptimizeMeshes) file_type: string of file extension, such as 'stl' Returns --------- Scene object with model data ''' if hasattr(filename, 'read'): # This is the case where a file object has been passed to load. # It is calling the following function: # const aiScene* aiImportFileFromMemory(const char* pBuffer, # unsigned int pLength, # unsigned int pFlags, # const char* pHint) if file_type is None: raise AssimpError('File type must be specified when passing file objects!') data = filename.read() model = _assimp_lib.load_mem(data, len(data), processing, file_type) else: # a filename string has been passed model = _assimp_lib.load(filename.encode(sys.getfilesystemencoding()), processing) if not model: raise AssimpError('Could not import file!') scene = _init(model.contents) recur_pythonize(scene.rootnode, scene) try: yield scene finally: release(scene) def export(scene, filename, file_type = None, processing = postprocess.aiProcess_Triangulate): ''' Export a scene. On failure throws AssimpError. Arguments --------- scene: scene to export. filename: Filename that the scene should be exported to. file_type: string of file exporter to use. For example "collada". processing: assimp postprocessing parameters. Verbose keywords are imported from postprocessing, and the parameters can be combined bitwise to generate the final processing value. Note that the default value will triangulate quad faces. Example of generating other possible values: processing = (pyassimp.postprocess.aiProcess_Triangulate | pyassimp.postprocess.aiProcess_OptimizeMeshes) ''' exportStatus = _assimp_lib.export(ctypes.pointer(scene), file_type.encode("ascii"), filename.encode(sys.getfilesystemencoding()), processing) if exportStatus != 0: raise AssimpError('Could not export scene!') def export_blob(scene, file_type = None, processing = postprocess.aiProcess_Triangulate): ''' Export a scene and return a blob in the correct format. On failure throws AssimpError. Arguments --------- scene: scene to export. file_type: string of file exporter to use. For example "collada". processing: assimp postprocessing parameters. Verbose keywords are imported from postprocessing, and the parameters can be combined bitwise to generate the final processing value. Note that the default value will triangulate quad faces. Example of generating other possible values: processing = (pyassimp.postprocess.aiProcess_Triangulate | pyassimp.postprocess.aiProcess_OptimizeMeshes) Returns --------- Pointer to structs.ExportDataBlob ''' exportBlobPtr = _assimp_lib.export_blob(ctypes.pointer(scene), file_type.encode("ascii"), processing) if exportBlobPtr == 0: raise AssimpError('Could not export scene to blob!') return exportBlobPtr def _finalize_texture(tex, target): setattr(target, "achformathint", tex.achFormatHint) if numpy: data = numpy.array([make_tuple(getattr(tex, "pcData")[i]) for i in range(tex.mWidth * tex.mHeight)]) else: data = [make_tuple(getattr(tex, "pcData")[i]) for i in range(tex.mWidth * tex.mHeight)] setattr(target, "data", data) def _finalize_mesh(mesh, target): """ Building of meshes is a bit specific. We override here the various datasets that can not be process as regular fields. For instance, the length of the normals array is mNumVertices (no mNumNormals is available) """ nb_vertices = getattr(mesh, "mNumVertices") def fill(name): mAttr = getattr(mesh, name) if numpy: if mAttr: data = numpy.array([make_tuple(getattr(mesh, name)[i]) for i in range(nb_vertices)], dtype=numpy.float32) setattr(target, name[1:].lower(), data) else: setattr(target, name[1:].lower(), numpy.array([], dtype="float32")) else: if mAttr: data = [make_tuple(getattr(mesh, name)[i]) for i in range(nb_vertices)] setattr(target, name[1:].lower(), data) else: setattr(target, name[1:].lower(), []) def fillarray(name): mAttr = getattr(mesh, name) data = [] for index, mSubAttr in enumerate(mAttr): if mSubAttr: data.append([make_tuple(getattr(mesh, name)[index][i]) for i in range(nb_vertices)]) if numpy: setattr(target, name[1:].lower(), numpy.array(data, dtype=numpy.float32)) else: setattr(target, name[1:].lower(), data) fill("mNormals") fill("mTangents") fill("mBitangents") fillarray("mColors") fillarray("mTextureCoords") # prepare faces if numpy: faces = numpy.array([f.indices for f in target.faces], dtype=numpy.int32) else: faces = [f.indices for f in target.faces] setattr(target, 'faces', faces) def _init_metadata_entry(entry): entry.type = entry.mType if entry.type == structs.MetadataEntry.AI_BOOL: entry.data = ctypes.cast(entry.mData, ctypes.POINTER(ctypes.c_bool)).contents.value elif entry.type == structs.MetadataEntry.AI_INT32: entry.data = ctypes.cast(entry.mData, ctypes.POINTER(ctypes.c_int32)).contents.value elif entry.type == structs.MetadataEntry.AI_UINT64: entry.data = ctypes.cast(entry.mData, ctypes.POINTER(ctypes.c_uint64)).contents.value elif entry.type == structs.MetadataEntry.AI_FLOAT: entry.data = ctypes.cast(entry.mData, ctypes.POINTER(ctypes.c_float)).contents.value elif entry.type == structs.MetadataEntry.AI_DOUBLE: entry.data = ctypes.cast(entry.mData, ctypes.POINTER(ctypes.c_double)).contents.value elif entry.type == structs.MetadataEntry.AI_AISTRING: assimp_string = ctypes.cast(entry.mData, ctypes.POINTER(structs.String)).contents entry.data = _convert_assimp_string(assimp_string) elif entry.type == structs.MetadataEntry.AI_AIVECTOR3D: assimp_vector = ctypes.cast(entry.mData, ctypes.POINTER(structs.Vector3D)).contents entry.data = make_tuple(assimp_vector) return entry def _finalize_metadata(metadata, target): """ Building the metadata object is a bit specific. Firstly, there are two separate arrays: one with metadata keys and one with metadata values, and there are no corresponding mNum* attributes, so the C arrays are not converted to Python arrays using the generic code in the _init function. Secondly, a metadata entry value has to be cast according to declared metadata entry type. """ length = metadata.mNumProperties setattr(target, 'keys', [str(_convert_assimp_string(metadata.mKeys[i])) for i in range(length)]) setattr(target, 'values', [_init_metadata_entry(metadata.mValues[i]) for i in range(length)]) class PropertyGetter(dict): def __getitem__(self, key): semantic = 0 if isinstance(key, tuple): key, semantic = key return dict.__getitem__(self, (key, semantic)) def keys(self): for k in dict.keys(self): yield k[0] def __iter__(self): return self.keys() def items(self): for k, v in dict.items(self): yield k[0], v def _get_properties(properties, length): """ Convenience Function to get the material properties as a dict and values in a python format. """ result = {} #read all properties for p in [properties[i] for i in range(length)]: #the name p = p.contents key = str(_convert_assimp_string(p.mKey)) key = (key.split('.')[1], p.mSemantic) #the data if p.mType == 1: arr = ctypes.cast(p.mData, ctypes.POINTER(ctypes.c_float * int(p.mDataLength/ctypes.sizeof(ctypes.c_float))) ).contents value = [x for x in arr] elif p.mType == 3: #string can't be an array value = _convert_assimp_string(ctypes.cast(p.mData, ctypes.POINTER(structs.MaterialPropertyString)).contents) elif p.mType == 4: arr = ctypes.cast(p.mData, ctypes.POINTER(ctypes.c_int * int(p.mDataLength/ctypes.sizeof(ctypes.c_int))) ).contents value = [x for x in arr] else: value = p.mData[:p.mDataLength] if len(value) == 1: [value] = value result[key] = value return PropertyGetter(result) def decompose_matrix(matrix): if not isinstance(matrix, structs.Matrix4x4): raise AssimpError("pyassimp.decompose_matrix failed: Not a Matrix4x4!") scaling = structs.Vector3D() rotation = structs.Quaternion() position = structs.Vector3D() _assimp_lib.dll.aiDecomposeMatrix(ctypes.pointer(matrix), ctypes.byref(scaling), ctypes.byref(rotation), ctypes.byref(position)) return scaling._init(), rotation._init(), position._init()
20,821
Python
36.115864
221
0.58369
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.gdtf/mf/ov/gdtf/pyassimp/errors.py
#-*- coding: UTF-8 -*- """ All possible errors. """ class AssimpError(BaseException): """ If an internal error occurs. """ pass
146
Python
11.249999
33
0.568493
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.gdtf/mf/ov/gdtf/pyassimp/structs.py
#-*- coding: utf-8 -*- from ctypes import POINTER, c_void_p, c_uint, c_char, c_float, Structure, c_double, c_ubyte, c_size_t, c_uint32 class Vector2D(Structure): """ See 'vector2.h' for details. """ _fields_ = [ ("x", c_float),("y", c_float), ] class Matrix3x3(Structure): """ See 'matrix3x3.h' for details. """ _fields_ = [ ("a1", c_float),("a2", c_float),("a3", c_float), ("b1", c_float),("b2", c_float),("b3", c_float), ("c1", c_float),("c2", c_float),("c3", c_float), ] class Texel(Structure): """ See 'texture.h' for details. """ _fields_ = [ ("b", c_ubyte),("g", c_ubyte),("r", c_ubyte),("a", c_ubyte), ] class Color4D(Structure): """ See 'color4.h' for details. """ _fields_ = [ # Red, green, blue and alpha color values ("r", c_float),("g", c_float),("b", c_float),("a", c_float), ] class Plane(Structure): """ See 'types.h' for details. """ _fields_ = [ # Plane equation ("a", c_float),("b", c_float),("c", c_float),("d", c_float), ] class Color3D(Structure): """ See 'types.h' for details. """ _fields_ = [ # Red, green and blue color values ("r", c_float),("g", c_float),("b", c_float), ] class String(Structure): """ See 'types.h' for details. """ MAXLEN = 1024 _fields_ = [ # Binary length of the string excluding the terminal 0. This is NOT the # logical length of strings containing UTF-8 multibyte sequences! It's # the number of bytes from the beginning of the string to its end. ("length", c_uint32), # String buffer. Size limit is MAXLEN ("data", c_char*MAXLEN), ] class MaterialPropertyString(Structure): """ See 'MaterialSystem.cpp' for details. The size of length is truncated to 4 bytes on 64-bit platforms when used as a material property (see MaterialSystem.cpp aiMaterial::AddProperty() for details). """ MAXLEN = 1024 _fields_ = [ # Binary length of the string excluding the terminal 0. This is NOT the # logical length of strings containing UTF-8 multibyte sequences! It's # the number of bytes from the beginning of the string to its end. ("length", c_uint32), # String buffer. Size limit is MAXLEN ("data", c_char*MAXLEN), ] class MemoryInfo(Structure): """ See 'types.h' for details. """ _fields_ = [ # Storage allocated for texture data ("textures", c_uint), # Storage allocated for material data ("materials", c_uint), # Storage allocated for mesh data ("meshes", c_uint), # Storage allocated for node data ("nodes", c_uint), # Storage allocated for animation data ("animations", c_uint), # Storage allocated for camera data ("cameras", c_uint), # Storage allocated for light data ("lights", c_uint), # Total storage allocated for the full import. ("total", c_uint), ] class Quaternion(Structure): """ See 'quaternion.h' for details. """ _fields_ = [ # w,x,y,z components of the quaternion ("w", c_float),("x", c_float),("y", c_float),("z", c_float), ] class Face(Structure): """ See 'mesh.h' for details. """ _fields_ = [ # Number of indices defining this face. # The maximum value for this member is #AI_MAX_FACE_INDICES. ("mNumIndices", c_uint), # Pointer to the indices array. Size of the array is given in numIndices. ("mIndices", POINTER(c_uint)), ] class VertexWeight(Structure): """ See 'mesh.h' for details. """ _fields_ = [ # Index of the vertex which is influenced by the bone. ("mVertexId", c_uint), # The strength of the influence in the range (0...1). # The influence from all bones at one vertex amounts to 1. ("mWeight", c_float), ] class Matrix4x4(Structure): """ See 'matrix4x4.h' for details. """ _fields_ = [ ("a1", c_float),("a2", c_float),("a3", c_float),("a4", c_float), ("b1", c_float),("b2", c_float),("b3", c_float),("b4", c_float), ("c1", c_float),("c2", c_float),("c3", c_float),("c4", c_float), ("d1", c_float),("d2", c_float),("d3", c_float),("d4", c_float), ] class Vector3D(Structure): """ See 'vector3.h' for details. """ _fields_ = [ ("x", c_float),("y", c_float),("z", c_float), ] class MeshKey(Structure): """ See 'anim.h' for details. """ _fields_ = [ # The time of this key ("mTime", c_double), # Index into the aiMesh::mAnimMeshes array of the # mesh corresponding to the #aiMeshAnim hosting this # key frame. The referenced anim mesh is evaluated # according to the rules defined in the docs for #aiAnimMesh. ("mValue", c_uint), ] class MetadataEntry(Structure): """ See 'metadata.h' for details """ AI_BOOL = 0 AI_INT32 = 1 AI_UINT64 = 2 AI_FLOAT = 3 AI_DOUBLE = 4 AI_AISTRING = 5 AI_AIVECTOR3D = 6 AI_META_MAX = 7 _fields_ = [ # The type field uniquely identifies the underlying type of the data field ("mType", c_uint), ("mData", c_void_p), ] class Metadata(Structure): """ See 'metadata.h' for details """ _fields_ = [ # Length of the mKeys and mValues arrays, respectively ("mNumProperties", c_uint), # Arrays of keys, may not be NULL. Entries in this array may not be NULL # as well. ("mKeys", POINTER(String)), # Arrays of values, may not be NULL. Entries in this array may be NULL # if the corresponding property key has no assigned value. ("mValues", POINTER(MetadataEntry)), ] class Node(Structure): """ See 'scene.h' for details. """ Node._fields_ = [ # The name of the node. # The name might be empty (length of zero) but all nodes which # need to be accessed afterwards by bones or anims are usually named. # Multiple nodes may have the same name, but nodes which are accessed # by bones (see #aiBone and #aiMesh::mBones) *must* be unique. # Cameras and lights are assigned to a specific node name - if there # are multiple nodes with this name, they're assigned to each of them. # <br> # There are no limitations regarding the characters contained in # this text. You should be able to handle stuff like whitespace, tabs, # linefeeds, quotation marks, ampersands, ... . ("mName", String), # The transformation relative to the node's parent. ("mTransformation", Matrix4x4), # Parent node. NULL if this node is the root node. ("mParent", POINTER(Node)), # The number of child nodes of this node. ("mNumChildren", c_uint), # The child nodes of this node. NULL if mNumChildren is 0. ("mChildren", POINTER(POINTER(Node))), # The number of meshes of this node. ("mNumMeshes", c_uint), # The meshes of this node. Each entry is an index into the mesh ("mMeshes", POINTER(c_uint)), # Metadata associated with this node or NULL if there is no metadata. # Whether any metadata is generated depends on the source file format. ("mMetadata", POINTER(Metadata)), ] class Light(Structure): """ See 'light.h' for details. """ _fields_ = [ # The name of the light source. # There must be a node in the scenegraph with the same name. # This node specifies the position of the light in the scene # hierarchy and can be animated. ("mName", String), # The type of the light source. # aiLightSource_UNDEFINED is not a valid value for this member. ("mType", c_uint), # Position of the light source in space. Relative to the # transformation of the node corresponding to the light. # The position is undefined for directional lights. ("mPosition", Vector3D), # Direction of the light source in space. Relative to the # transformation of the node corresponding to the light. # The direction is undefined for point lights. The vector # may be normalized, but it needn't. ("mDirection", Vector3D), # Up direction of the light source in space. Relative to the # transformation of the node corresponding to the light. # # The direction is undefined for point lights. The vector # may be normalized, but it needn't. ("mUp", Vector3D), # Constant light attenuation factor. # The intensity of the light source at a given distance 'd' from # the light's position is # @code # Atten = 1/( att0 + att1 # d + att2 # d*d) # @endcode # This member corresponds to the att0 variable in the equation. # Naturally undefined for directional lights. ("mAttenuationConstant", c_float), # Linear light attenuation factor. # The intensity of the light source at a given distance 'd' from # the light's position is # @code # Atten = 1/( att0 + att1 # d + att2 # d*d) # @endcode # This member corresponds to the att1 variable in the equation. # Naturally undefined for directional lights. ("mAttenuationLinear", c_float), # Quadratic light attenuation factor. # The intensity of the light source at a given distance 'd' from # the light's position is # @code # Atten = 1/( att0 + att1 # d + att2 # d*d) # @endcode # This member corresponds to the att2 variable in the equation. # Naturally undefined for directional lights. ("mAttenuationQuadratic", c_float), # Diffuse color of the light source # The diffuse light color is multiplied with the diffuse # material color to obtain the final color that contributes # to the diffuse shading term. ("mColorDiffuse", Color3D), # Specular color of the light source # The specular light color is multiplied with the specular # material color to obtain the final color that contributes # to the specular shading term. ("mColorSpecular", Color3D), # Ambient color of the light source # The ambient light color is multiplied with the ambient # material color to obtain the final color that contributes # to the ambient shading term. Most renderers will ignore # this value it, is just a remaining of the fixed-function pipeline # that is still supported by quite many file formats. ("mColorAmbient", Color3D), # Inner angle of a spot light's light cone. # The spot light has maximum influence on objects inside this # angle. The angle is given in radians. It is 2PI for point # lights and undefined for directional lights. ("mAngleInnerCone", c_float), # Outer angle of a spot light's light cone. # The spot light does not affect objects outside this angle. # The angle is given in radians. It is 2PI for point lights and # undefined for directional lights. The outer angle must be # greater than or equal to the inner angle. # It is assumed that the application uses a smooth # interpolation between the inner and the outer cone of the # spot light. ("mAngleOuterCone", c_float), # Size of area light source. ("mSize", Vector2D), ] class Texture(Structure): """ See 'texture.h' for details. """ _fields_ = [ # Width of the texture, in pixels # If mHeight is zero the texture is compressed in a format # like JPEG. In this case mWidth specifies the size of the # memory area pcData is pointing to, in bytes. ("mWidth", c_uint), # Height of the texture, in pixels # If this value is zero, pcData points to an compressed texture # in any format (e.g. JPEG). ("mHeight", c_uint), # A hint from the loader to make it easier for applications # to determine the type of embedded textures. # # If mHeight != 0 this member is show how data is packed. Hint will consist of # two parts: channel order and channel bitness (count of the bits for every # color channel). For simple parsing by the viewer it's better to not omit # absent color channel and just use 0 for bitness. For example: # 1. Image contain RGBA and 8 bit per channel, achFormatHint == "rgba8888"; # 2. Image contain ARGB and 8 bit per channel, achFormatHint == "argb8888"; # 3. Image contain RGB and 5 bit for R and B channels and 6 bit for G channel, # achFormatHint == "rgba5650"; # 4. One color image with B channel and 1 bit for it, achFormatHint == "rgba0010"; # If mHeight == 0 then achFormatHint is set set to '\\0\\0\\0\\0' if the loader has no additional # information about the texture file format used OR the # file extension of the format without a trailing dot. If there # are multiple file extensions for a format, the shortest # extension is chosen (JPEG maps to 'jpg', not to 'jpeg'). # E.g. 'dds\\0', 'pcx\\0', 'jpg\\0'. All characters are lower-case. # The fourth character will always be '\\0'. ("achFormatHint", c_char*9), # Data of the texture. # Points to an array of mWidth # mHeight aiTexel's. # The format of the texture data is always ARGB8888 to # make the implementation for user of the library as easy # as possible. If mHeight = 0 this is a pointer to a memory # buffer of size mWidth containing the compressed texture # data. Good luck, have fun! ("pcData", POINTER(Texel)), # Texture original filename # Used to get the texture reference ("mFilename", String), ] class Ray(Structure): """ See 'types.h' for details. """ _fields_ = [ # Position and direction of the ray ("pos", Vector3D),("dir", Vector3D), ] class UVTransform(Structure): """ See 'material.h' for details. """ _fields_ = [ # Translation on the u and v axes. # The default value is (0|0). ("mTranslation", Vector2D), # Scaling on the u and v axes. # The default value is (1|1). ("mScaling", Vector2D), # Rotation - in counter-clockwise direction. # The rotation angle is specified in radians. The # rotation center is 0.5f|0.5f. The default value # 0.f. ("mRotation", c_float), ] class MaterialProperty(Structure): """ See 'material.h' for details. """ _fields_ = [ # Specifies the name of the property (key) # Keys are generally case insensitive. ("mKey", String), # Textures: Specifies their exact usage semantic. # For non-texture properties, this member is always 0 # (or, better-said, #aiTextureType_NONE). ("mSemantic", c_uint), # Textures: Specifies the index of the texture. # For non-texture properties, this member is always 0. ("mIndex", c_uint), # Size of the buffer mData is pointing to, in bytes. # This value may not be 0. ("mDataLength", c_uint), # Type information for the property. # Defines the data layout inside the data buffer. This is used # by the library internally to perform debug checks and to # utilize proper type conversions. # (It's probably a hacky solution, but it works.) ("mType", c_uint), # Binary buffer to hold the property's value. # The size of the buffer is always mDataLength. ("mData", POINTER(c_char)), ] class Material(Structure): """ See 'material.h' for details. """ _fields_ = [ # List of all material properties loaded. ("mProperties", POINTER(POINTER(MaterialProperty))), # Number of properties in the data base ("mNumProperties", c_uint), # Storage allocated ("mNumAllocated", c_uint), ] class Bone(Structure): """ See 'mesh.h' for details. """ _fields_ = [ # The name of the bone. ("mName", String), # The number of vertices affected by this bone # The maximum value for this member is #AI_MAX_BONE_WEIGHTS. ("mNumWeights", c_uint), # The vertices affected by this bone ("mWeights", POINTER(VertexWeight)), # Matrix that transforms from mesh space to bone space in bind pose ("mOffsetMatrix", Matrix4x4), ] class AnimMesh(Structure): """ See 'mesh.h' for details. """ AI_MAX_NUMBER_OF_TEXTURECOORDS = 0x8 AI_MAX_NUMBER_OF_COLOR_SETS = 0x8 _fields_ = [ # Anim Mesh name ("mName", String), # Replacement for aiMesh::mVertices. If this array is non-NULL, # it *must* contain mNumVertices entries. The corresponding # array in the host mesh must be non-NULL as well - animation # meshes may neither add or nor remove vertex components (if # a replacement array is NULL and the corresponding source # array is not, the source data is taken instead) ("mVertices", POINTER(Vector3D)), # Replacement for aiMesh::mNormals. ("mNormals", POINTER(Vector3D)), # Replacement for aiMesh::mTangents. ("mTangents", POINTER(Vector3D)), # Replacement for aiMesh::mBitangents. ("mBitangents", POINTER(Vector3D)), # Replacement for aiMesh::mColors ("mColors", POINTER(Color4D) * AI_MAX_NUMBER_OF_COLOR_SETS), # Replacement for aiMesh::mTextureCoords ("mTextureCoords", POINTER(Vector3D) * AI_MAX_NUMBER_OF_TEXTURECOORDS), # The number of vertices in the aiAnimMesh, and thus the length of all # the member arrays. # # This has always the same value as the mNumVertices property in the # corresponding aiMesh. It is duplicated here merely to make the length # of the member arrays accessible even if the aiMesh is not known, e.g. # from language bindings. ("mNumVertices", c_uint), # Weight of the AnimMesh. ("mWeight", c_float), ] class Mesh(Structure): """ See 'mesh.h' for details. """ AI_MAX_FACE_INDICES = 0x7fff AI_MAX_BONE_WEIGHTS = 0x7fffffff AI_MAX_VERTICES = 0x7fffffff AI_MAX_FACES = 0x7fffffff AI_MAX_NUMBER_OF_COLOR_SETS = 0x8 AI_MAX_NUMBER_OF_TEXTURECOORDS = 0x8 _fields_ = [ # Bitwise combination of the members of the #aiPrimitiveType enum. # This specifies which types of primitives are present in the mesh. # The "SortByPrimitiveType"-Step can be used to make sure the # output meshes consist of one primitive type each. ("mPrimitiveTypes", c_uint), # The number of vertices in this mesh. # This is also the size of all of the per-vertex data arrays. # The maximum value for this member is #AI_MAX_VERTICES. ("mNumVertices", c_uint), # The number of primitives (triangles, polygons, lines) in this mesh. # This is also the size of the mFaces array. # The maximum value for this member is #AI_MAX_FACES. ("mNumFaces", c_uint), # Vertex positions. # This array is always present in a mesh. The array is # mNumVertices in size. ("mVertices", POINTER(Vector3D)), # Vertex normals. # The array contains normalized vectors, NULL if not present. # The array is mNumVertices in size. Normals are undefined for # point and line primitives. A mesh consisting of points and # lines only may not have normal vectors. Meshes with mixed # primitive types (i.e. lines and triangles) may have normals, # but the normals for vertices that are only referenced by # point or line primitives are undefined and set to QNaN (WARN: # qNaN compares to inequal to *everything*, even to qNaN itself. # Using code like this to check whether a field is qnan is: # @code #define IS_QNAN(f) (f != f) # @endcode # still dangerous because even 1.f == 1.f could evaluate to false! ( # remember the subtleties of IEEE754 artithmetics). Use stuff like # @c fpclassify instead. # @note Normal vectors computed by Assimp are always unit-length. # However, this needn't apply for normals that have been taken # directly from the model file. ("mNormals", POINTER(Vector3D)), # Vertex tangents. # The tangent of a vertex points in the direction of the positive # X texture axis. The array contains normalized vectors, NULL if # not present. The array is mNumVertices in size. A mesh consisting # of points and lines only may not have normal vectors. Meshes with # mixed primitive types (i.e. lines and triangles) may have # normals, but the normals for vertices that are only referenced by # point or line primitives are undefined and set to qNaN. See # the #mNormals member for a detailed discussion of qNaNs. # @note If the mesh contains tangents, it automatically also # contains bitangents (the bitangent is just the cross product of # tangent and normal vectors). ("mTangents", POINTER(Vector3D)), # Vertex bitangents. # The bitangent of a vertex points in the direction of the positive # Y texture axis. The array contains normalized vectors, NULL if not # present. The array is mNumVertices in size. # @note If the mesh contains tangents, it automatically also contains # bitangents. ("mBitangents", POINTER(Vector3D)), # Vertex color sets. # A mesh may contain 0 to #AI_MAX_NUMBER_OF_COLOR_SETS vertex # colors per vertex. NULL if not present. Each array is # mNumVertices in size if present. ("mColors", POINTER(Color4D)*AI_MAX_NUMBER_OF_COLOR_SETS), # Vertex texture coords, also known as UV channels. # A mesh may contain 0 to AI_MAX_NUMBER_OF_TEXTURECOORDS per # vertex. NULL if not present. The array is mNumVertices in size. ("mTextureCoords", POINTER(Vector3D)*AI_MAX_NUMBER_OF_TEXTURECOORDS), # Specifies the number of components for a given UV channel. # Up to three channels are supported (UVW, for accessing volume # or cube maps). If the value is 2 for a given channel n, the # component p.z of mTextureCoords[n][p] is set to 0.0f. # If the value is 1 for a given channel, p.y is set to 0.0f, too. # @note 4D coords are not supported ("mNumUVComponents", c_uint*AI_MAX_NUMBER_OF_TEXTURECOORDS), # The faces the mesh is constructed from. # Each face refers to a number of vertices by their indices. # This array is always present in a mesh, its size is given # in mNumFaces. If the #AI_SCENE_FLAGS_NON_VERBOSE_FORMAT # is NOT set each face references an unique set of vertices. ("mFaces", POINTER(Face)), # The number of bones this mesh contains. # Can be 0, in which case the mBones array is NULL. ("mNumBones", c_uint), # The bones of this mesh. # A bone consists of a name by which it can be found in the # frame hierarchy and a set of vertex weights. ("mBones", POINTER(POINTER(Bone))), # The material used by this mesh. # A mesh does use only a single material. If an imported model uses # multiple materials, the import splits up the mesh. Use this value # as index into the scene's material list. ("mMaterialIndex", c_uint), # Name of the mesh. Meshes can be named, but this is not a # requirement and leaving this field empty is totally fine. # There are mainly three uses for mesh names: # - some formats name nodes and meshes independently. # - importers tend to split meshes up to meet the # one-material-per-mesh requirement. Assigning # the same (dummy) name to each of the result meshes # aids the caller at recovering the original mesh # partitioning. # - Vertex animations refer to meshes by their names. ("mName", String), # The number of attachment meshes. # Currently known to work with loaders: # - Collada # - gltf ("mNumAnimMeshes", c_uint), # Attachment meshes for this mesh, for vertex-based animation. # Attachment meshes carry replacement data for some of the # mesh'es vertex components (usually positions, normals). # Currently known to work with loaders: # - Collada # - gltf ("mAnimMeshes", POINTER(POINTER(AnimMesh))), # Method of morphing when animeshes are specified. ("mMethod", c_uint), ] class Camera(Structure): """ See 'camera.h' for details. """ _fields_ = [ # The name of the camera. # There must be a node in the scenegraph with the same name. # This node specifies the position of the camera in the scene # hierarchy and can be animated. ("mName", String), # Position of the camera relative to the coordinate space # defined by the corresponding node. # The default value is 0|0|0. ("mPosition", Vector3D), # 'Up' - vector of the camera coordinate system relative to # the coordinate space defined by the corresponding node. # The 'right' vector of the camera coordinate system is # the cross product of the up and lookAt vectors. # The default value is 0|1|0. The vector # may be normalized, but it needn't. ("mUp", Vector3D), # 'LookAt' - vector of the camera coordinate system relative to # the coordinate space defined by the corresponding node. # This is the viewing direction of the user. # The default value is 0|0|1. The vector # may be normalized, but it needn't. ("mLookAt", Vector3D), # Half horizontal field of view angle, in radians. # The field of view angle is the angle between the center # line of the screen and the left or right border. # The default value is 1/4PI. ("mHorizontalFOV", c_float), # Distance of the near clipping plane from the camera. # The value may not be 0.f (for arithmetic reasons to prevent # a division through zero). The default value is 0.1f. ("mClipPlaneNear", c_float), # Distance of the far clipping plane from the camera. # The far clipping plane must, of course, be further away than the # near clipping plane. The default value is 1000.f. The ratio # between the near and the far plane should not be too # large (between 1000-10000 should be ok) to avoid floating-point # inaccuracies which could lead to z-fighting. ("mClipPlaneFar", c_float), # Screen aspect ratio. # This is the ration between the width and the height of the # screen. Typical values are 4/3, 1/2 or 1/1. This value is # 0 if the aspect ratio is not defined in the source file. # 0 is also the default value. ("mAspect", c_float), ] class VectorKey(Structure): """ See 'anim.h' for details. """ _fields_ = [ # The time of this key ("mTime", c_double), # The value of this key ("mValue", Vector3D), ] class QuatKey(Structure): """ See 'anim.h' for details. """ _fields_ = [ # The time of this key ("mTime", c_double), # The value of this key ("mValue", Quaternion), ] class MeshMorphKey(Structure): """ See 'anim.h' for details. """ _fields_ = [ # The time of this key ("mTime", c_double), # The values and weights at the time of this key ("mValues", POINTER(c_uint)), ("mWeights", POINTER(c_double)), # The number of values and weights ("mNumValuesAndWeights", c_uint), ] class NodeAnim(Structure): """ See 'anim.h' for details. """ _fields_ = [ # The name of the node affected by this animation. The node # must exist and it must be unique. ("mNodeName", String), # The number of position keys ("mNumPositionKeys", c_uint), # The position keys of this animation channel. Positions are # specified as 3D vector. The array is mNumPositionKeys in size. # If there are position keys, there will also be at least one # scaling and one rotation key. ("mPositionKeys", POINTER(VectorKey)), # The number of rotation keys ("mNumRotationKeys", c_uint), # The rotation keys of this animation channel. Rotations are # given as quaternions, which are 4D vectors. The array is # mNumRotationKeys in size. # If there are rotation keys, there will also be at least one # scaling and one position key. ("mRotationKeys", POINTER(QuatKey)), # The number of scaling keys ("mNumScalingKeys", c_uint), # The scaling keys of this animation channel. Scalings are # specified as 3D vector. The array is mNumScalingKeys in size. # If there are scaling keys, there will also be at least one # position and one rotation key. ("mScalingKeys", POINTER(VectorKey)), # Defines how the animation behaves before the first # key is encountered. # The default value is aiAnimBehaviour_DEFAULT (the original # transformation matrix of the affected node is used). ("mPreState", c_uint), # Defines how the animation behaves after the last # key was processed. # The default value is aiAnimBehaviour_DEFAULT (the original # transformation matrix of the affected node is taken). ("mPostState", c_uint), ] class MeshAnim(Structure): """ See 'anim.h' for details. """ _fields_ = [ # Name of the mesh to be animated. An empty string is not allowed, # animated meshes need to be named (not necessarily uniquely, # the name can basically serve as wild-card to select a group # of meshes with similar animation setup) ("mName", String), # Size of the #mKeys array. Must be 1, at least. ("mNumKeys", c_uint), # Key frames of the animation. May not be NULL. ("mKeys", POINTER(MeshKey)), ] class MeshMorphAnim(Structure): """ See 'anim.h' for details. """ _fields_ = [ # Name of the mesh to be animated. An empty string is not allowed, # animated meshes need to be named (not necessarily uniquely, # the name can basically serve as wildcard to select a group # of meshes with similar animation setup) ("mName", String), # Size of the #mKeys array. Must be 1, at least. ("mNumKeys", c_uint), # Key frames of the animation. May not be NULL. ("mKeys", POINTER(MeshMorphKey)), ] class Animation(Structure): """ See 'anim.h' for details. """ _fields_ = [ # The name of the animation. If the modeling package this data was # exported from does support only a single animation channel, this # name is usually empty (length is zero). ("mName", String), # Duration of the animation in ticks. ("mDuration", c_double), # Ticks per second. 0 if not specified in the imported file ("mTicksPerSecond", c_double), # The number of bone animation channels. Each channel affects # a single node. ("mNumChannels", c_uint), # The node animation channels. Each channel affects a single node. # The array is mNumChannels in size. ("mChannels", POINTER(POINTER(NodeAnim))), # The number of mesh animation channels. Each channel affects # a single mesh and defines vertex-based animation. ("mNumMeshChannels", c_uint), # The mesh animation channels. Each channel affects a single mesh. # The array is mNumMeshChannels in size. ("mMeshChannels", POINTER(POINTER(MeshAnim))), # The number of mesh animation channels. Each channel affects # a single mesh and defines morphing animation. ("mNumMorphMeshChannels", c_uint), # The morph mesh animation channels. Each channel affects a single mesh. # The array is mNumMorphMeshChannels in size. ("mMorphMeshChannels", POINTER(POINTER(MeshMorphAnim))), ] class ExportDataBlob(Structure): """ See 'cexport.h' for details. Note that the '_fields_' definition is outside the class to allow the 'next' field to be recursive """ pass ExportDataBlob._fields_ = [ # Size of the data in bytes ("size", c_size_t), # The data. ("data", c_void_p), # Name of the blob. An empty string always # indicates the first (and primary) blob, # which contains the actual file data. # Any other blobs are auxiliary files produced # by exporters (i.e. material files). Existence # of such files depends on the file format. Most # formats don't split assets across multiple files. # # If used, blob names usually contain the file # extension that should be used when writing # the data to disc. ("name", String), # Pointer to the next blob in the chain or NULL if there is none. ("next", POINTER(ExportDataBlob)), ] class Scene(Structure): """ See 'aiScene.h' for details. """ AI_SCENE_FLAGS_INCOMPLETE = 0x1 AI_SCENE_FLAGS_VALIDATED = 0x2 AI_SCENE_FLAGS_VALIDATION_WARNING = 0x4 AI_SCENE_FLAGS_NON_VERBOSE_FORMAT = 0x8 AI_SCENE_FLAGS_TERRAIN = 0x10 AI_SCENE_FLAGS_ALLOW_SHARED = 0x20 _fields_ = [ # Any combination of the AI_SCENE_FLAGS_XXX flags. By default # this value is 0, no flags are set. Most applications will # want to reject all scenes with the AI_SCENE_FLAGS_INCOMPLETE # bit set. ("mFlags", c_uint), # The root node of the hierarchy. # There will always be at least the root node if the import # was successful (and no special flags have been set). # Presence of further nodes depends on the format and content # of the imported file. ("mRootNode", POINTER(Node)), # The number of meshes in the scene. ("mNumMeshes", c_uint), # The array of meshes. # Use the indices given in the aiNode structure to access # this array. The array is mNumMeshes in size. If the # AI_SCENE_FLAGS_INCOMPLETE flag is not set there will always # be at least ONE material. ("mMeshes", POINTER(POINTER(Mesh))), # The number of materials in the scene. ("mNumMaterials", c_uint), # The array of materials. # Use the index given in each aiMesh structure to access this # array. The array is mNumMaterials in size. If the # AI_SCENE_FLAGS_INCOMPLETE flag is not set there will always # be at least ONE material. ("mMaterials", POINTER(POINTER(Material))), # The number of animations in the scene. ("mNumAnimations", c_uint), # The array of animations. # All animations imported from the given file are listed here. # The array is mNumAnimations in size. ("mAnimations", POINTER(POINTER(Animation))), # The number of textures embedded into the file ("mNumTextures", c_uint), # The array of embedded textures. # Not many file formats embed their textures into the file. # An example is Quake's MDL format (which is also used by # some GameStudio versions) ("mTextures", POINTER(POINTER(Texture))), # The number of light sources in the scene. Light sources # are fully optional, in most cases this attribute will be 0 ("mNumLights", c_uint), # The array of light sources. # All light sources imported from the given file are # listed here. The array is mNumLights in size. ("mLights", POINTER(POINTER(Light))), # The number of cameras in the scene. Cameras # are fully optional, in most cases this attribute will be 0 ("mNumCameras", c_uint), # The array of cameras. # All cameras imported from the given file are listed here. # The array is mNumCameras in size. The first camera in the # array (if existing) is the default camera view into # the scene. ("mCameras", POINTER(POINTER(Camera))), # This data contains global metadata which belongs to the scene like # unit-conversions, versions, vendors or other model-specific data. This # can be used to store format-specific metadata as well. ("mMetadata", POINTER(Metadata)), # Internal data, do not touch ("mPrivate", POINTER(c_char)), ] assimp_structs_as_tuple = (Matrix4x4, Matrix3x3, Vector2D, Vector3D, Color3D, Color4D, Quaternion, Plane, Texel)
41,444
Python
35.3234
111
0.56136
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.gdtf/mf/ov/gdtf/pyassimp/postprocess.py
# <hr>Calculates the tangents and bitangents for the imported meshes. # # Does nothing if a mesh does not have normals. You might want this post # processing step to be executed if you plan to use tangent space calculations # such as normal mapping applied to the meshes. There's a config setting, # <tt>#AI_CONFIG_PP_CT_MAX_SMOOTHING_ANGLE<tt>, which allows you to specify # a maximum smoothing angle for the algorithm. However, usually you'll # want to leave it at the default value. # aiProcess_CalcTangentSpace = 0x1 ## <hr>Identifies and joins identical vertex data sets within all # imported meshes. # # After this step is run, each mesh contains unique vertices, # so a vertex may be used by multiple faces. You usually want # to use this post processing step. If your application deals with # indexed geometry, this step is compulsory or you'll just waste rendering # time. <b>If this flag is not specified<b>, no vertices are referenced by # more than one face and <b>no index buffer is required<b> for rendering. # aiProcess_JoinIdenticalVertices = 0x2 ## <hr>Converts all the imported data to a left-handed coordinate space. # # By default the data is returned in a right-handed coordinate space (which # OpenGL prefers). In this space, +X points to the right, # +Z points towards the viewer, and +Y points upwards. In the DirectX # coordinate space +X points to the right, +Y points upwards, and +Z points # away from the viewer. # # You'll probably want to consider this flag if you use Direct3D for # rendering. The #aiProcess_ConvertToLeftHanded flag supersedes this # setting and bundles all conversions typically required for D3D-based # applications. # aiProcess_MakeLeftHanded = 0x4 ## <hr>Triangulates all faces of all meshes. # # By default the imported mesh data might contain faces with more than 3 # indices. For rendering you'll usually want all faces to be triangles. # This post processing step splits up faces with more than 3 indices into # triangles. Line and point primitives are #not# modified! If you want # 'triangles only' with no other kinds of primitives, try the following # solution: # <ul> # <li>Specify both #aiProcess_Triangulate and #aiProcess_SortByPType <li> # <li>Ignore all point and line meshes when you process assimp's output<li> # <ul> # aiProcess_Triangulate = 0x8 ## <hr>Removes some parts of the data structure (animations, materials, # light sources, cameras, textures, vertex components). # # The components to be removed are specified in a separate # configuration option, <tt>#AI_CONFIG_PP_RVC_FLAGS<tt>. This is quite useful # if you don't need all parts of the output structure. Vertex colors # are rarely used today for example... Calling this step to remove unneeded # data from the pipeline as early as possible results in increased # performance and a more optimized output data structure. # This step is also useful if you want to force Assimp to recompute # normals or tangents. The corresponding steps don't recompute them if # they're already there (loaded from the source asset). By using this # step you can make sure they are NOT there. # # This flag is a poor one, mainly because its purpose is usually # misunderstood. Consider the following case: a 3D model has been exported # from a CAD app, and it has per-face vertex colors. Vertex positions can't be # shared, thus the #aiProcess_JoinIdenticalVertices step fails to # optimize the data because of these nasty little vertex colors. # Most apps don't even process them, so it's all for nothing. By using # this step, unneeded components are excluded as early as possible # thus opening more room for internal optimizations. # aiProcess_RemoveComponent = 0x10 ## <hr>Generates normals for all faces of all meshes. # # This is ignored if normals are already there at the time this flag # is evaluated. Model importers try to load them from the source file, so # they're usually already there. Face normals are shared between all points # of a single face, so a single point can have multiple normals, which # forces the library to duplicate vertices in some cases. # #aiProcess_JoinIdenticalVertices is #senseless# then. # # This flag may not be specified together with #aiProcess_GenSmoothNormals. # aiProcess_GenNormals = 0x20 ## <hr>Generates smooth normals for all vertices in the mesh. # # This is ignored if normals are already there at the time this flag # is evaluated. Model importers try to load them from the source file, so # they're usually already there. # # This flag may not be specified together with # #aiProcess_GenNormals. There's a configuration option, # <tt>#AI_CONFIG_PP_GSN_MAX_SMOOTHING_ANGLE<tt> which allows you to specify # an angle maximum for the normal smoothing algorithm. Normals exceeding # this limit are not smoothed, resulting in a 'hard' seam between two faces. # Using a decent angle here (e.g. 80 degrees) results in very good visual # appearance. # aiProcess_GenSmoothNormals = 0x40 ## <hr>Splits large meshes into smaller sub-meshes. # # This is quite useful for real-time rendering, where the number of triangles # which can be maximally processed in a single draw-call is limited # by the video driverhardware. The maximum vertex buffer is usually limited # too. Both requirements can be met with this step: you may specify both a # triangle and vertex limit for a single mesh. # # The split limits can (and should!) be set through the # <tt>#AI_CONFIG_PP_SLM_VERTEX_LIMIT<tt> and <tt>#AI_CONFIG_PP_SLM_TRIANGLE_LIMIT<tt> # settings. The default values are <tt>#AI_SLM_DEFAULT_MAX_VERTICES<tt> and # <tt>#AI_SLM_DEFAULT_MAX_TRIANGLES<tt>. # # Note that splitting is generally a time-consuming task, but only if there's # something to split. The use of this step is recommended for most users. # aiProcess_SplitLargeMeshes = 0x80 ## <hr>Removes the node graph and pre-transforms all vertices with # the local transformation matrices of their nodes. # # The output scene still contains nodes, however there is only a # root node with children, each one referencing only one mesh, # and each mesh referencing one material. For rendering, you can # simply render all meshes in order - you don't need to pay # attention to local transformations and the node hierarchy. # Animations are removed during this step. # This step is intended for applications without a scenegraph. # The step CAN cause some problems: if e.g. a mesh of the asset # contains normals and another, using the same material index, does not, # they will be brought together, but the first meshes's part of # the normal list is zeroed. However, these artifacts are rare. # @note The <tt>#AI_CONFIG_PP_PTV_NORMALIZE<tt> configuration property # can be set to normalize the scene's spatial dimension to the -1...1 # range. # aiProcess_PreTransformVertices = 0x100 ## <hr>Limits the number of bones simultaneously affecting a single vertex # to a maximum value. # # If any vertex is affected by more than the maximum number of bones, the least # important vertex weights are removed and the remaining vertex weights are # renormalized so that the weights still sum up to 1. # The default bone weight limit is 4 (defined as <tt>#AI_LMW_MAX_WEIGHTS<tt> in # config.h), but you can use the <tt>#AI_CONFIG_PP_LBW_MAX_WEIGHTS<tt> setting to # supply your own limit to the post processing step. # # If you intend to perform the skinning in hardware, this post processing # step might be of interest to you. # aiProcess_LimitBoneWeights = 0x200 ## <hr>Validates the imported scene data structure. # This makes sure that all indices are valid, all animations and # bones are linked correctly, all material references are correct .. etc. # # It is recommended that you capture Assimp's log output if you use this flag, # so you can easily find out what's wrong if a file fails the # validation. The validator is quite strict and will find #all# # inconsistencies in the data structure... It is recommended that plugin # developers use it to debug their loaders. There are two types of # validation failures: # <ul> # <li>Error: There's something wrong with the imported data. Further # postprocessing is not possible and the data is not usable at all. # The import fails. #Importer::GetErrorString() or #aiGetErrorString() # carry the error message around.<li> # <li>Warning: There are some minor issues (e.g. 1000000 animation # keyframes with the same time), but further postprocessing and use # of the data structure is still safe. Warning details are written # to the log file, <tt>#AI_SCENE_FLAGS_VALIDATION_WARNING<tt> is set # in #aiScene::mFlags<li> # <ul> # # This post-processing step is not time-consuming. Its use is not # compulsory, but recommended. # aiProcess_ValidateDataStructure = 0x400 ## <hr>Reorders triangles for better vertex cache locality. # # The step tries to improve the ACMR (average post-transform vertex cache # miss ratio) for all meshes. The implementation runs in O(n) and is # roughly based on the 'tipsify' algorithm (see <a href=" # http:www.cs.princeton.edugfxpubsSander_2007_%3ETRtipsy.pdf">this # paper<a>). # # If you intend to render huge models in hardware, this step might # be of interest to you. The <tt>#AI_CONFIG_PP_ICL_PTCACHE_SIZE<tt>config # setting can be used to fine-tune the cache optimization. # aiProcess_ImproveCacheLocality = 0x800 ## <hr>Searches for redundantunreferenced materials and removes them. # # This is especially useful in combination with the # #aiProcess_PretransformVertices and #aiProcess_OptimizeMeshes flags. # Both join small meshes with equal characteristics, but they can't do # their work if two meshes have different materials. Because several # material settings are lost during Assimp's import filters, # (and because many exporters don't check for redundant materials), huge # models often have materials which are are defined several times with # exactly the same settings. # # Several material settings not contributing to the final appearance of # a surface are ignored in all comparisons (e.g. the material name). # So, if you're passing additional information through the # content pipeline (probably using #magic# material names), don't # specify this flag. Alternatively take a look at the # <tt>#AI_CONFIG_PP_RRM_EXCLUDE_LIST<tt> setting. # aiProcess_RemoveRedundantMaterials = 0x1000 ## <hr>This step tries to determine which meshes have normal vectors # that are facing inwards and inverts them. # # The algorithm is simple but effective: # the bounding box of all vertices + their normals is compared against # the volume of the bounding box of all vertices without their normals. # This works well for most objects, problems might occur with planar # surfaces. However, the step tries to filter such cases. # The step inverts all in-facing normals. Generally it is recommended # to enable this step, although the result is not always correct. # aiProcess_FixInfacingNormals = 0x2000 ## <hr>This step splits meshes with more than one primitive type in # homogeneous sub-meshes. # # The step is executed after the triangulation step. After the step # returns, just one bit is set in aiMesh::mPrimitiveTypes. This is # especially useful for real-time rendering where point and line # primitives are often ignored or rendered separately. # You can use the <tt>#AI_CONFIG_PP_SBP_REMOVE<tt> option to specify which # primitive types you need. This can be used to easily exclude # lines and points, which are rarely used, from the import. # aiProcess_SortByPType = 0x8000 ## <hr>This step searches all meshes for degenerate primitives and # converts them to proper lines or points. # # A face is 'degenerate' if one or more of its points are identical. # To have the degenerate stuff not only detected and collapsed but # removed, try one of the following procedures: # <br><b>1.<b> (if you support lines and points for rendering but don't # want the degenerates)<br> # <ul> # <li>Specify the #aiProcess_FindDegenerates flag. # <li> # <li>Set the <tt>AI_CONFIG_PP_FD_REMOVE<tt> option to 1. This will # cause the step to remove degenerate triangles from the import # as soon as they're detected. They won't pass any further # pipeline steps. # <li> # <ul> # <br><b>2.<b>(if you don't support lines and points at all)<br> # <ul> # <li>Specify the #aiProcess_FindDegenerates flag. # <li> # <li>Specify the #aiProcess_SortByPType flag. This moves line and # point primitives to separate meshes. # <li> # <li>Set the <tt>AI_CONFIG_PP_SBP_REMOVE<tt> option to # @code aiPrimitiveType_POINTS | aiPrimitiveType_LINES # @endcode to cause SortByPType to reject point # and line meshes from the scene. # <li> # <ul> # @note Degenerate polygons are not necessarily evil and that's why # they're not removed by default. There are several file formats which # don't support lines or points, and some exporters bypass the # format specification and write them as degenerate triangles instead. # aiProcess_FindDegenerates = 0x10000 ## <hr>This step searches all meshes for invalid data, such as zeroed # normal vectors or invalid UV coords and removesfixes them. This is # intended to get rid of some common exporter errors. # # This is especially useful for normals. If they are invalid, and # the step recognizes this, they will be removed and can later # be recomputed, i.e. by the #aiProcess_GenSmoothNormals flag.<br> # The step will also remove meshes that are infinitely small and reduce # animation tracks consisting of hundreds if redundant keys to a single # key. The <tt>AI_CONFIG_PP_FID_ANIM_ACCURACY<tt> config property decides # the accuracy of the check for duplicate animation tracks. # aiProcess_FindInvalidData = 0x20000 ## <hr>This step converts non-UV mappings (such as spherical or # cylindrical mapping) to proper texture coordinate channels. # # Most applications will support UV mapping only, so you will # probably want to specify this step in every case. Note that Assimp is not # always able to match the original mapping implementation of the # 3D app which produced a model perfectly. It's always better to let the # modelling app compute the UV channels - 3ds max, Maya, Blender, # LightWave, and Modo do this for example. # # @note If this step is not requested, you'll need to process the # <tt>#AI_MATKEY_MAPPING<tt> material property in order to display all assets # properly. # aiProcess_GenUVCoords = 0x40000 ## <hr>This step applies per-texture UV transformations and bakes # them into stand-alone vtexture coordinate channels. # # UV transformations are specified per-texture - see the # <tt>#AI_MATKEY_UVTRANSFORM<tt> material key for more information. # This step processes all textures with # transformed input UV coordinates and generates a new (pre-transformed) UV channel # which replaces the old channel. Most applications won't support UV # transformations, so you will probably want to specify this step. # # @note UV transformations are usually implemented in real-time apps by # transforming texture coordinates at vertex shader stage with a 3x3 # (homogenous) transformation matrix. # aiProcess_TransformUVCoords = 0x80000 ## <hr>This step searches for duplicate meshes and replaces them # with references to the first mesh. # # This step takes a while, so don't use it if speed is a concern. # Its main purpose is to workaround the fact that many export # file formats don't support instanced meshes, so exporters need to # duplicate meshes. This step removes the duplicates again. Please # note that Assimp does not currently support per-node material # assignment to meshes, which means that identical meshes with # different materials are currently #not# joined, although this is # planned for future versions. # aiProcess_FindInstances = 0x100000 ## <hr>A postprocessing step to reduce the number of meshes. # # This will, in fact, reduce the number of draw calls. # # This is a very effective optimization and is recommended to be used # together with #aiProcess_OptimizeGraph, if possible. The flag is fully # compatible with both #aiProcess_SplitLargeMeshes and #aiProcess_SortByPType. # aiProcess_OptimizeMeshes = 0x200000 ## <hr>A postprocessing step to optimize the scene hierarchy. # # Nodes without animations, bones, lights or cameras assigned are # collapsed and joined. # # Node names can be lost during this step. If you use special 'tag nodes' # to pass additional information through your content pipeline, use the # <tt>#AI_CONFIG_PP_OG_EXCLUDE_LIST<tt> setting to specify a list of node # names you want to be kept. Nodes matching one of the names in this list won't # be touched or modified. # # Use this flag with caution. Most simple files will be collapsed to a # single node, so complex hierarchies are usually completely lost. This is not # useful for editor environments, but probably a very effective # optimization if you just want to get the model data, convert it to your # own format, and render it as fast as possible. # # This flag is designed to be used with #aiProcess_OptimizeMeshes for best # results. # # @note 'Crappy' scenes with thousands of extremely small meshes packed # in deeply nested nodes exist for almost all file formats. # #aiProcess_OptimizeMeshes in combination with #aiProcess_OptimizeGraph # usually fixes them all and makes them renderable. # aiProcess_OptimizeGraph = 0x400000 ## <hr>This step flips all UV coordinates along the y-axis and adjusts # material settings and bitangents accordingly. # # <b>Output UV coordinate system:<b> # @code # 0y|0y ---------- 1x|0y # | | # | | # | | # 0x|1y ---------- 1x|1y # @endcode # # You'll probably want to consider this flag if you use Direct3D for # rendering. The #aiProcess_ConvertToLeftHanded flag supersedes this # setting and bundles all conversions typically required for D3D-based # applications. # aiProcess_FlipUVs = 0x800000 ## <hr>This step adjusts the output face winding order to be CW. # # The default face winding order is counter clockwise (CCW). # # <b>Output face order:<b> # @code # x2 # # x0 # x1 # @endcode # aiProcess_FlipWindingOrder = 0x1000000 ## <hr>This step splits meshes with many bones into sub-meshes so that each # su-bmesh has fewer or as many bones as a given limit. # aiProcess_SplitByBoneCount = 0x2000000 ## <hr>This step removes bones losslessly or according to some threshold. # # In some cases (i.e. formats that require it) exporters are forced to # assign dummy bone weights to otherwise static meshes assigned to # animated meshes. Full, weight-based skinning is expensive while # animating nodes is extremely cheap, so this step is offered to clean up # the data in that regard. # # Use <tt>#AI_CONFIG_PP_DB_THRESHOLD<tt> to control this. # Use <tt>#AI_CONFIG_PP_DB_ALL_OR_NONE<tt> if you want bones removed if and # only if all bones within the scene qualify for removal. # aiProcess_Debone = 0x4000000 aiProcess_GenEntityMeshes = 0x100000 aiProcess_OptimizeAnimations = 0x200000 aiProcess_FixTexturePaths = 0x200000 aiProcess_EmbedTextures = 0x10000000, ## @def aiProcess_ConvertToLeftHanded # @brief Shortcut flag for Direct3D-based applications. # # Supersedes the #aiProcess_MakeLeftHanded and #aiProcess_FlipUVs and # #aiProcess_FlipWindingOrder flags. # The output data matches Direct3D's conventions: left-handed geometry, upper-left # origin for UV coordinates and finally clockwise face order, suitable for CCW culling. # # @deprecated # aiProcess_ConvertToLeftHanded = ( \ aiProcess_MakeLeftHanded | \ aiProcess_FlipUVs | \ aiProcess_FlipWindingOrder | \ 0 ) ## @def aiProcessPreset_TargetRealtimeUse_Fast # @brief Default postprocess configuration optimizing the data for real-time rendering. # # Applications would want to use this preset to load models on end-user PCs, # maybe for direct use in game. # # If you're using DirectX, don't forget to combine this value with # the #aiProcess_ConvertToLeftHanded step. If you don't support UV transformations # in your application apply the #aiProcess_TransformUVCoords step, too. # @note Please take the time to read the docs for the steps enabled by this preset. # Some of them offer further configurable properties, while some of them might not be of # use for you so it might be better to not specify them. # aiProcessPreset_TargetRealtime_Fast = ( \ aiProcess_CalcTangentSpace | \ aiProcess_GenNormals | \ aiProcess_JoinIdenticalVertices | \ aiProcess_Triangulate | \ aiProcess_GenUVCoords | \ aiProcess_SortByPType | \ 0 ) ## @def aiProcessPreset_TargetRealtime_Quality # @brief Default postprocess configuration optimizing the data for real-time rendering. # # Unlike #aiProcessPreset_TargetRealtime_Fast, this configuration # performs some extra optimizations to improve rendering speed and # to minimize memory usage. It could be a good choice for a level editor # environment where import speed is not so important. # # If you're using DirectX, don't forget to combine this value with # the #aiProcess_ConvertToLeftHanded step. If you don't support UV transformations # in your application apply the #aiProcess_TransformUVCoords step, too. # @note Please take the time to read the docs for the steps enabled by this preset. # Some of them offer further configurable properties, while some of them might not be # of use for you so it might be better to not specify them. # aiProcessPreset_TargetRealtime_Quality = ( \ aiProcess_CalcTangentSpace | \ aiProcess_GenSmoothNormals | \ aiProcess_JoinIdenticalVertices | \ aiProcess_ImproveCacheLocality | \ aiProcess_LimitBoneWeights | \ aiProcess_RemoveRedundantMaterials | \ aiProcess_SplitLargeMeshes | \ aiProcess_Triangulate | \ aiProcess_GenUVCoords | \ aiProcess_SortByPType | \ aiProcess_FindDegenerates | \ aiProcess_FindInvalidData | \ 0 ) ## @def aiProcessPreset_TargetRealtime_MaxQuality # @brief Default postprocess configuration optimizing the data for real-time rendering. # # This preset enables almost every optimization step to achieve perfectly # optimized data. It's your choice for level editor environments where import speed # is not important. # # If you're using DirectX, don't forget to combine this value with # the #aiProcess_ConvertToLeftHanded step. If you don't support UV transformations # in your application, apply the #aiProcess_TransformUVCoords step, too. # @note Please take the time to read the docs for the steps enabled by this preset. # Some of them offer further configurable properties, while some of them might not be # of use for you so it might be better to not specify them. # aiProcessPreset_TargetRealtime_MaxQuality = ( \ aiProcessPreset_TargetRealtime_Quality | \ aiProcess_FindInstances | \ aiProcess_ValidateDataStructure | \ aiProcess_OptimizeMeshes | \ 0 )
23,548
Python
43.348399
90
0.741422
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.gdtf/mf/ov/gdtf/pyassimp/helper.py
#-*- coding: UTF-8 -*- """ Some fancy helper functions. """ import os import ctypes import operator from distutils.sysconfig import get_python_lib import re import sys try: import numpy except ImportError: numpy = None import logging;logger = logging.getLogger("pyassimp") from .errors import AssimpError additional_dirs, ext_whitelist = [],[] # populate search directories and lists of allowed file extensions # depending on the platform we're running on. if os.name=='posix': additional_dirs.append('./') additional_dirs.append('/usr/lib/') additional_dirs.append('/usr/lib/x86_64-linux-gnu/') additional_dirs.append('/usr/lib/aarch64-linux-gnu/') additional_dirs.append('/usr/local/lib/') if 'LD_LIBRARY_PATH' in os.environ: additional_dirs.extend([item for item in os.environ['LD_LIBRARY_PATH'].split(':') if item]) # check if running from anaconda. anaconda_keywords = ("conda", "continuum") if any(k in sys.version.lower() for k in anaconda_keywords): cur_path = get_python_lib() pattern = re.compile('.*\/lib\/') conda_lib = pattern.match(cur_path).group() logger.info("Adding Anaconda lib path:"+ conda_lib) additional_dirs.append(conda_lib) # note - this won't catch libassimp.so.N.n, but # currently there's always a symlink called # libassimp.so in /usr/local/lib. ext_whitelist.append('.so') # libassimp.dylib in /usr/local/lib ext_whitelist.append('.dylib') elif os.name=='nt': ext_whitelist.append('.dll') path_dirs = os.environ['PATH'].split(';') additional_dirs.extend(path_dirs) def vec2tuple(x): """ Converts a VECTOR3D to a Tuple """ return (x.x, x.y, x.z) def transform(vector3, matrix4x4): """ Apply a transformation matrix on a 3D vector. :param vector3: array with 3 elements :param matrix4x4: 4x4 matrix """ if numpy: return numpy.dot(matrix4x4, numpy.append(vector3, 1.)) else: m0,m1,m2,m3 = matrix4x4; x,y,z = vector3 return [ m0[0]*x + m0[1]*y + m0[2]*z + m0[3], m1[0]*x + m1[1]*y + m1[2]*z + m1[3], m2[0]*x + m2[1]*y + m2[2]*z + m2[3], m3[0]*x + m3[1]*y + m3[2]*z + m3[3] ] def _inv(matrix4x4): m0,m1,m2,m3 = matrix4x4 det = m0[3]*m1[2]*m2[1]*m3[0] - m0[2]*m1[3]*m2[1]*m3[0] - \ m0[3]*m1[1]*m2[2]*m3[0] + m0[1]*m1[3]*m2[2]*m3[0] + \ m0[2]*m1[1]*m2[3]*m3[0] - m0[1]*m1[2]*m2[3]*m3[0] - \ m0[3]*m1[2]*m2[0]*m3[1] + m0[2]*m1[3]*m2[0]*m3[1] + \ m0[3]*m1[0]*m2[2]*m3[1] - m0[0]*m1[3]*m2[2]*m3[1] - \ m0[2]*m1[0]*m2[3]*m3[1] + m0[0]*m1[2]*m2[3]*m3[1] + \ m0[3]*m1[1]*m2[0]*m3[2] - m0[1]*m1[3]*m2[0]*m3[2] - \ m0[3]*m1[0]*m2[1]*m3[2] + m0[0]*m1[3]*m2[1]*m3[2] + \ m0[1]*m1[0]*m2[3]*m3[2] - m0[0]*m1[1]*m2[3]*m3[2] - \ m0[2]*m1[1]*m2[0]*m3[3] + m0[1]*m1[2]*m2[0]*m3[3] + \ m0[2]*m1[0]*m2[1]*m3[3] - m0[0]*m1[2]*m2[1]*m3[3] - \ m0[1]*m1[0]*m2[2]*m3[3] + m0[0]*m1[1]*m2[2]*m3[3] return[[( m1[2]*m2[3]*m3[1] - m1[3]*m2[2]*m3[1] + m1[3]*m2[1]*m3[2] - m1[1]*m2[3]*m3[2] - m1[2]*m2[1]*m3[3] + m1[1]*m2[2]*m3[3]) /det, ( m0[3]*m2[2]*m3[1] - m0[2]*m2[3]*m3[1] - m0[3]*m2[1]*m3[2] + m0[1]*m2[3]*m3[2] + m0[2]*m2[1]*m3[3] - m0[1]*m2[2]*m3[3]) /det, ( m0[2]*m1[3]*m3[1] - m0[3]*m1[2]*m3[1] + m0[3]*m1[1]*m3[2] - m0[1]*m1[3]*m3[2] - m0[2]*m1[1]*m3[3] + m0[1]*m1[2]*m3[3]) /det, ( m0[3]*m1[2]*m2[1] - m0[2]*m1[3]*m2[1] - m0[3]*m1[1]*m2[2] + m0[1]*m1[3]*m2[2] + m0[2]*m1[1]*m2[3] - m0[1]*m1[2]*m2[3]) /det], [( m1[3]*m2[2]*m3[0] - m1[2]*m2[3]*m3[0] - m1[3]*m2[0]*m3[2] + m1[0]*m2[3]*m3[2] + m1[2]*m2[0]*m3[3] - m1[0]*m2[2]*m3[3]) /det, ( m0[2]*m2[3]*m3[0] - m0[3]*m2[2]*m3[0] + m0[3]*m2[0]*m3[2] - m0[0]*m2[3]*m3[2] - m0[2]*m2[0]*m3[3] + m0[0]*m2[2]*m3[3]) /det, ( m0[3]*m1[2]*m3[0] - m0[2]*m1[3]*m3[0] - m0[3]*m1[0]*m3[2] + m0[0]*m1[3]*m3[2] + m0[2]*m1[0]*m3[3] - m0[0]*m1[2]*m3[3]) /det, ( m0[2]*m1[3]*m2[0] - m0[3]*m1[2]*m2[0] + m0[3]*m1[0]*m2[2] - m0[0]*m1[3]*m2[2] - m0[2]*m1[0]*m2[3] + m0[0]*m1[2]*m2[3]) /det], [( m1[1]*m2[3]*m3[0] - m1[3]*m2[1]*m3[0] + m1[3]*m2[0]*m3[1] - m1[0]*m2[3]*m3[1] - m1[1]*m2[0]*m3[3] + m1[0]*m2[1]*m3[3]) /det, ( m0[3]*m2[1]*m3[0] - m0[1]*m2[3]*m3[0] - m0[3]*m2[0]*m3[1] + m0[0]*m2[3]*m3[1] + m0[1]*m2[0]*m3[3] - m0[0]*m2[1]*m3[3]) /det, ( m0[1]*m1[3]*m3[0] - m0[3]*m1[1]*m3[0] + m0[3]*m1[0]*m3[1] - m0[0]*m1[3]*m3[1] - m0[1]*m1[0]*m3[3] + m0[0]*m1[1]*m3[3]) /det, ( m0[3]*m1[1]*m2[0] - m0[1]*m1[3]*m2[0] - m0[3]*m1[0]*m2[1] + m0[0]*m1[3]*m2[1] + m0[1]*m1[0]*m2[3] - m0[0]*m1[1]*m2[3]) /det], [( m1[2]*m2[1]*m3[0] - m1[1]*m2[2]*m3[0] - m1[2]*m2[0]*m3[1] + m1[0]*m2[2]*m3[1] + m1[1]*m2[0]*m3[2] - m1[0]*m2[1]*m3[2]) /det, ( m0[1]*m2[2]*m3[0] - m0[2]*m2[1]*m3[0] + m0[2]*m2[0]*m3[1] - m0[0]*m2[2]*m3[1] - m0[1]*m2[0]*m3[2] + m0[0]*m2[1]*m3[2]) /det, ( m0[2]*m1[1]*m3[0] - m0[1]*m1[2]*m3[0] - m0[2]*m1[0]*m3[1] + m0[0]*m1[2]*m3[1] + m0[1]*m1[0]*m3[2] - m0[0]*m1[1]*m3[2]) /det, ( m0[1]*m1[2]*m2[0] - m0[2]*m1[1]*m2[0] + m0[2]*m1[0]*m2[1] - m0[0]*m1[2]*m2[1] - m0[1]*m1[0]*m2[2] + m0[0]*m1[1]*m2[2]) /det]] def get_bounding_box(scene): bb_min = [1e10, 1e10, 1e10] # x,y,z bb_max = [-1e10, -1e10, -1e10] # x,y,z inv = numpy.linalg.inv if numpy else _inv return get_bounding_box_for_node(scene.rootnode, bb_min, bb_max, inv(scene.rootnode.transformation)) def get_bounding_box_for_node(node, bb_min, bb_max, transformation): if numpy: transformation = numpy.dot(transformation, node.transformation) else: t0,t1,t2,t3 = transformation T0,T1,T2,T3 = node.transformation transformation = [ [ t0[0]*T0[0] + t0[1]*T1[0] + t0[2]*T2[0] + t0[3]*T3[0], t0[0]*T0[1] + t0[1]*T1[1] + t0[2]*T2[1] + t0[3]*T3[1], t0[0]*T0[2] + t0[1]*T1[2] + t0[2]*T2[2] + t0[3]*T3[2], t0[0]*T0[3] + t0[1]*T1[3] + t0[2]*T2[3] + t0[3]*T3[3] ],[ t1[0]*T0[0] + t1[1]*T1[0] + t1[2]*T2[0] + t1[3]*T3[0], t1[0]*T0[1] + t1[1]*T1[1] + t1[2]*T2[1] + t1[3]*T3[1], t1[0]*T0[2] + t1[1]*T1[2] + t1[2]*T2[2] + t1[3]*T3[2], t1[0]*T0[3] + t1[1]*T1[3] + t1[2]*T2[3] + t1[3]*T3[3] ],[ t2[0]*T0[0] + t2[1]*T1[0] + t2[2]*T2[0] + t2[3]*T3[0], t2[0]*T0[1] + t2[1]*T1[1] + t2[2]*T2[1] + t2[3]*T3[1], t2[0]*T0[2] + t2[1]*T1[2] + t2[2]*T2[2] + t2[3]*T3[2], t2[0]*T0[3] + t2[1]*T1[3] + t2[2]*T2[3] + t2[3]*T3[3] ],[ t3[0]*T0[0] + t3[1]*T1[0] + t3[2]*T2[0] + t3[3]*T3[0], t3[0]*T0[1] + t3[1]*T1[1] + t3[2]*T2[1] + t3[3]*T3[1], t3[0]*T0[2] + t3[1]*T1[2] + t3[2]*T2[2] + t3[3]*T3[2], t3[0]*T0[3] + t3[1]*T1[3] + t3[2]*T2[3] + t3[3]*T3[3] ] ] for mesh in node.meshes: for v in mesh.vertices: v = transform(v, transformation) bb_min[0] = min(bb_min[0], v[0]) bb_min[1] = min(bb_min[1], v[1]) bb_min[2] = min(bb_min[2], v[2]) bb_max[0] = max(bb_max[0], v[0]) bb_max[1] = max(bb_max[1], v[1]) bb_max[2] = max(bb_max[2], v[2]) for child in node.children: bb_min, bb_max = get_bounding_box_for_node(child, bb_min, bb_max, transformation) return bb_min, bb_max def try_load_functions(library_path, dll): ''' Try to bind to aiImportFile and aiReleaseImport Arguments --------- library_path: path to current lib dll: ctypes handle to library Returns --------- If unsuccessful: None If successful: Tuple containing (library_path, load from filename function, load from memory function, export to filename function, export to blob function, release function, ctypes handle to assimp library) ''' try: load = dll.aiImportFile release = dll.aiReleaseImport load_mem = dll.aiImportFileFromMemory export = dll.aiExportScene export2blob = dll.aiExportSceneToBlob except AttributeError: #OK, this is a library, but it doesn't have the functions we need return None # library found! from .structs import Scene, ExportDataBlob load.restype = ctypes.POINTER(Scene) load_mem.restype = ctypes.POINTER(Scene) export2blob.restype = ctypes.POINTER(ExportDataBlob) return (library_path, load, load_mem, export, export2blob, release, dll) def search_library(): ''' Loads the assimp library. Throws exception AssimpError if no library_path is found Returns: tuple, (load from filename function, load from memory function, export to filename function, export to blob function, release function, dll) ''' #this path folder = os.path.dirname(__file__) # silence 'DLL not found' message boxes on win try: ctypes.windll.kernel32.SetErrorMode(0x8007) except AttributeError: pass candidates = [] # test every file for curfolder in [folder]+additional_dirs: if os.path.isdir(curfolder): for filename in os.listdir(curfolder): # our minimum requirement for candidates is that # they should contain 'assimp' somewhere in # their name if filename.lower().find('assimp')==-1 : continue is_out=1 for et in ext_whitelist: if et in filename.lower(): is_out=0 break if is_out: continue library_path = os.path.join(curfolder, filename) logger.debug('Try ' + library_path) try: dll = ctypes.cdll.LoadLibrary(library_path) except Exception as e: logger.warning(str(e)) # OK, this except is evil. But different OSs will throw different # errors. So just ignore any errors. continue # see if the functions we need are in the dll loaded = try_load_functions(library_path, dll) if loaded: candidates.append(loaded) if not candidates: # no library found raise AssimpError("assimp library not found") else: # get the newest library_path candidates = map(lambda x: (os.lstat(x[0])[-2], x), candidates) res = max(candidates, key=operator.itemgetter(0))[1] logger.debug('Using assimp library located at ' + res[0]) # XXX: if there are 1000 dll/so files containing 'assimp' # in their name, do we have all of them in our address # space now until gc kicks in? # XXX: take version postfix of the .so on linux? return res[1:] def hasattr_silent(object, name): """ Calls hasttr() with the given parameters and preserves the legacy (pre-Python 3.2) functionality of silently catching exceptions. Returns the result of hasatter() or False if an exception was raised. """ try: if not object: return False return hasattr(object, name) except AttributeError: return False
11,799
Python
40.549296
139
0.507755
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.mvr/mf/ov/mvr/mvrImporter.py
import logging import numpy as np from typing import List, Tuple import xml.etree.ElementTree as ET from zipfile import ZipFile from pxr import Gf, Usd, UsdGeom from mf.ov.gdtf import gdtfImporter as gdtf from .filepathUtility import Filepath from .mvrUtil import Layer, Fixture from .USDTools import USDTools class MVRImporter: def convert(file: Filepath, mvr_output_dir: str, output_ext: str = ".usd") -> str: # TODO: change output_ext to bool use_usda try: with ZipFile(file.fullpath, 'r') as archive: output_dir = mvr_output_dir + file.filename + "_mvr/" data = archive.read("GeneralSceneDescription.xml") root = ET.fromstring(data) MVRImporter._warn_for_version(root) url: str = MVRImporter.convert_mvr_usd(output_dir, file.filename, output_ext, root, archive) return url except Exception as e: logger = logging.getLogger(__name__) logger.error(f"Failed to parse mvr file at {file.fullpath}. Make sure it is not corrupt. {e}") return None def _warn_for_version(root): v_major = root.attrib["verMajor"] v_minor = root.attrib["verMinor"] if v_major != "1" or v_minor != "5": logger = logging.getLogger(__name__) logger.warn(f"This extension is tested with mvr v1.5, this file version is {v_major}.{v_minor}") def convert_mvr_usd(output_dir: str, filename: str, ext: str, root: ET.Element, archive: ZipFile) -> str: scene: ET.Element = root.find("Scene") layers: List[Layer] = MVRImporter._get_layers(scene) for layer in layers: layer.find_fixtures() stage, url = MVRImporter._make_mvr_stage(output_dir, filename, ext, layers) MVRImporter._convert_gdtf(stage, layers, output_dir, archive, ext) stage.Save() return url def _get_layers(scene: ET.Element) -> List[Layer]: layersNode: ET.Element = scene.find("Layers") layerNodes: ET.Element = layersNode.findall("Layer") layers: List[Layer] = [] for layerNode in layerNodes: layer: Layer = Layer(layerNode) layers.append(layer) return layers def _make_mvr_stage(output_dir: str, filename: str, ext: str, layers: List[Layer]) -> Tuple[Usd.Stage, str]: url: str = output_dir + filename + ext stage: Usd.Stage = USDTools.get_or_create_stage(url) MVRImporter._add_fixture_xform(stage, layers) return stage, url def _add_fixture_xform(stage: Usd.Stage, layers: List[Layer]): rotate_minus90deg_xaxis = Gf.Matrix3d(1, 0, 0, 0, 0, 1, 0, -1, 0) mvr_scale = UsdGeom.LinearUnits.millimeters # MVR dimensions are in millimeters applied_scale: float = USDTools.get_applied_scale(stage, mvr_scale) for layer in layers: if layer.fixtures_len() > 0: scope: UsdGeom.Scope = USDTools.add_scope(stage, layer.get_name_usd()) for fixture in layer.get_fixtures(): xform: UsdGeom.Xform = USDTools.add_fixture_xform(stage, scope, fixture.get_unique_name_usd()) fixture.set_stage_path(xform.GetPrim().GetPath()) np_matrix: np.matrix = USDTools.np_matrix_from_mvr(fixture.get_matrix()) gf_matrix: Gf.Matrix4d = USDTools.gf_matrix_from_mvr(np_matrix, applied_scale) rotation: Gf.Rotation = gf_matrix.ExtractRotation() euler: Gf.Vec3d = rotation.Decompose(Gf.Vec3d.XAxis(), Gf.Vec3d.YAxis(), Gf.Vec3d.ZAxis()) # Z-up to Y-up # TODO: Validate with stage up axis translation = rotate_minus90deg_xaxis * gf_matrix.ExtractTranslation() rotate = rotate_minus90deg_xaxis * euler xform.ClearXformOpOrder() # Prevent error when overwritting xform.AddTranslateOp().Set(translation) xform.AddRotateZYXOp().Set(rotate) # Scale Op is added in _add_gdtf_reference fixture.apply_attributes_to_prim(xform.GetPrim()) stage.Save() def _convert_gdtf(stage: Usd.Stage, layers: List[Layer], mvr_output_dir: str, archive: ZipFile, ext: str): gdtf_spec_uniq: List[str] = MVRImporter._get_gdtf_to_import(layers) gdtf_output_dir = mvr_output_dir for gdtf_spec in gdtf_spec_uniq: gdtf.GDTFImporter.convert_from_mvr(gdtf_spec, gdtf_output_dir, archive) MVRImporter._add_gdtf_reference(layers, stage, ext) def _get_gdtf_to_import(layers: List[Layer]) -> List[str]: result: List[str] = [] for layer in layers: if layer.fixtures_len() > 0: current_fixture_names = [x.get_spec_name() for x in layer.get_fixtures()] current_fixture_names_set = set(current_fixture_names) current_fixture_names_uniq = list(current_fixture_names_set) for current_fixture_name_uniq in current_fixture_names_uniq: result.append(current_fixture_name_uniq) return result def _add_gdtf_reference(layers: List[Layer], stage: Usd.Stage, ext: str): for layer in layers: if layer.fixtures_len() > 0: for fixture in layer.get_fixtures(): spec = fixture.get_spec_name() relative_path = f"./{spec}_gdtf/{spec}{ext}" stage_path = fixture.get_stage_path() USDTools.add_reference(stage, relative_path, stage_path) USDTools.copy_gdtf_scale(stage, stage_path, relative_path)
5,751
Python
46.147541
114
0.603026
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.mvr/mf/ov/mvr/extension.py
import omni.ext import omni.kit.tool.asset_importer as ai from .converterDelegate import ConverterDelegate class MfOvMvrExtension(omni.ext.IExt): def on_startup(self, _): self._delegate_mvr = ConverterDelegate( "MVR Converter", ["(.*\\.mvr$)"], ["MVR Files (*.mvr)"] ) ai.register_importer(self._delegate_mvr) def on_shutdown(self): ai.remove_importer(self._delegate_mvr) self._delegate_mvr.destroy() self._delegate_mvr = None
522
Python
26.526314
48
0.611111
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.mvr/mf/ov/mvr/USDTools.py
import numpy as np from typing import List from unidecode import unidecode from urllib.parse import unquote from pxr import Gf, Tf, Sdf, Usd, UsdGeom class USDTools: def make_name_valid(name: str) -> str: if name[:1].isdigit(): name = "_" + name return Tf.MakeValidIdentifier(unidecode(name)) def get_or_create_stage(url: str) -> Usd.Stage: try: # TODO: Better way to check if stage exists? return Usd.Stage.Open(url) except: stage = Usd.Stage.CreateNew(url) UsdGeom.SetStageMetersPerUnit(stage, UsdGeom.LinearUnits.centimeters) # TODO get user defaults UsdGeom.SetStageUpAxis(stage, UsdGeom.Tokens.y) # TODO get user defaults default_prim = stage.DefinePrim("/World", "Xform") stage.SetDefaultPrim(default_prim) stage.Save() return stage def add_scope(stage: Usd.Stage, name: str) -> UsdGeom.Scope: default_prim_path: Sdf.Path = stage.GetDefaultPrim().GetPrimPath() scope_path: Sdf.Path = default_prim_path.AppendPath(name) scope: UsdGeom.Scope = UsdGeom.Scope.Define(stage, scope_path) return scope def add_fixture_xform(stage: Usd.Stage, scope: UsdGeom.Scope, name: str) -> UsdGeom.Xform: path = scope.GetPath().AppendPath(name) xform: UsdGeom.Xform = UsdGeom.Xform.Define(stage, path) return xform def get_applied_scale(stage: Usd.Stage, scale_factor: float) -> float: stage_scale = UsdGeom.GetStageMetersPerUnit(stage) return scale_factor / stage_scale def np_matrix_from_mvr(value: str) -> np.matrix: # MVR Matrix is: 4x3, Right-handed, Z-up, 1 Distance Unit equals 1mm # expect form like "<Matrix>{x,y,z}{x,y,z}{x,y,z}{x,y,z}</Matrix>" where "x","y","z" is similar to 1.000000 # make source compatible with np.matrix constructor: "x y z; x y z; x y z; x y z" value_alt = value[1:] # Removes "{" prefix value_alt = value_alt[:-1] # Removes "}" suffix value_alt = value_alt.replace("}{", "; ") value_alt = value_alt.replace(",", " ") np_matrix: np.matrix = np.matrix(value_alt) return np_matrix def gf_matrix_from_mvr(np_matrix: np.matrix, scale: float) -> Gf.Matrix4d: # Column major matrix gf_matrix = Gf.Matrix4d( np_matrix.item((0, 0)), np_matrix.item((0, 1)), np_matrix.item((0, 2)), 0, np_matrix.item((1, 0)), np_matrix.item((1, 1)), np_matrix.item((1, 2)), 0, np_matrix.item((2, 0)), np_matrix.item((2, 1)), np_matrix.item((2, 2)), 0, np_matrix.item((3, 0)) * scale, np_matrix.item((3, 1)) * scale, np_matrix.item((3, 2)) * scale, 1 ) return gf_matrix def set_fixture_attribute(prim: Usd.Prim, attribute_name: str, attribute_type: Sdf.ValueTypeNames, attribute_value): prim.CreateAttribute(f"mf:mvr:{attribute_name}", attribute_type).Set(attribute_value) def add_reference(stage: Usd.Stage, ref_path_relative: str, stage_path: str): xform_ref: UsdGeom.Xform = stage.GetPrimAtPath(stage_path) path_unquoted = unquote(ref_path_relative) references: Usd.References = xform_ref.GetReferences() references.AddReference(path_unquoted) stage.Save() def copy_gdtf_scale(mvr_stage: Usd.Stage, stage_prim_path: str, relative_path: str): # Copy a reference default prim scale op value to a referencing xform in an other stage curr_root_layer = mvr_stage.GetRootLayer() curr_stage_url: str = curr_root_layer.realPath curr_stage_url_formatted: str = curr_stage_url.replace('\\', '/') curr_stage_dir_index: str = curr_stage_url_formatted.rindex("/") curr_stage_dir = curr_stage_url_formatted[:curr_stage_dir_index] mvr_xform_target = UsdGeom.Xform(mvr_stage.GetPrimAtPath(stage_prim_path)) gdtf_stage_filename: str = relative_path[1:] gdtf_stage_path: str = curr_stage_dir + gdtf_stage_filename gdtf_stage: Usd.Stage = Usd.Stage.Open(gdtf_stage_path) gdtf_default_prim = UsdGeom.Xform(gdtf_stage.GetDefaultPrim()) stage_scale = UsdGeom.GetStageMetersPerUnit(mvr_stage) scale_factor = 1 / stage_scale scale_value = Gf.Vec3d(scale_factor, scale_factor, scale_factor) xform_ordered_ops: List[UsdGeom.XformOp] = gdtf_default_prim.GetOrderedXformOps() for xform_op in xform_ordered_ops: if xform_op.GetOpType() == UsdGeom.XformOp.TypeScale: scale_value = xform_op.Get() mvr_xform_target.AddScaleOp().Set(scale_value) mvr_stage.Save()
4,672
Python
46.683673
120
0.634632
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.mvr/mf/ov/mvr/mvrUtil.py
from typing import List import xml.etree.ElementTree as ET from pxr import Usd, Sdf from .USDTools import USDTools class Fixture: def __init__(self, node: ET.Element): self._root = node self._name = node.attrib["name"] self._uuid = node.attrib["uuid"] self._matrix = self._get_value_text_if_exists("Matrix") self._gdtf_spec = self._get_value_text_if_exists("GDTFSpec") self._gdtf_mode = self._get_value_text_if_exists("GDTFMode") self._custom_commands = self._get_custom_commands_values() self._classing = self._get_value_text_if_exists("Classing") self._addresses = self._get_addresses_values() self._fixture_id = self._get_value_int_if_exists("fixtureID") self._unit_number = self._get_value_int_if_exists("UnitNumber") self._fixture_type_id = self._get_value_int_if_exists("FixtureTypeId") self._custom_id = self._get_value_int_if_exists("CustomId") self._cie_color = self._get_color_values() self._cast_shadow = self._get_value_bool_if_exists("CastShadow") def get_unique_name_usd(self) -> str: return USDTools.make_name_valid(self._name + "_" + self._uuid) def get_matrix(self) -> str: return self._matrix def set_stage_path(self, path: str): self._stage_path = path def get_stage_path(self) -> str: return self._stage_path def get_spec_name(self) -> str: spec_name = self._gdtf_spec if self._gdtf_spec[-5:] == ".gdtf": spec_name = self._gdtf_spec[:-5] return spec_name def _get_value_text_if_exists(self, name: str) -> str: node = self._get_child_node(name) if node is not None: text = node.text if text is not None: return node.text return None def _get_value_int_if_exists(self, name: str) -> int: txt = self._get_value_text_if_exists(name) if txt is None: return None return int(txt) def _get_value_bool_if_exists(self, name: str) -> bool: txt = self._get_value_text_if_exists(name) if txt is None: return None return bool(txt) def _get_child_node(self, node: str): return self._root.find(node) def _get_custom_commands_values(self) -> List[str]: values: List[str] = [] node = self._get_child_node("CustomCommands") if node is not None: subnodes = node.findall("CustomCommand") if subnodes is not None and len(subnodes) > 0: values = [x.text for x in subnodes] return values def _get_addresses_values(self) -> List[str]: values: List[str] = [] node = self._get_child_node("Addresses") if node is not None: subnodes = node.findall("Address") if subnodes is not None and len(subnodes): values = [int(x.text) for x in subnodes] return values def _get_color_values(self) -> List[float]: colors: List[float] = [] node = self._get_child_node("Color") if node is not None: colors = [float(x) for x in node.text.split(",")] return colors def apply_attributes_to_prim(self, prim: Usd.Prim): self._set_attribute_text_if_valid(prim, "name", self._name) self._set_attribute_text_if_valid(prim, "uuid", self._uuid) self._set_attribute_text_if_valid(prim, "GDTFSpec", self._gdtf_spec) self._set_attribute_text_if_valid(prim, "GDTFMode", self._gdtf_mode) self._set_attribute_textarray_if_valid(prim, "CustomCommands", self._custom_commands) self._set_attribute_text_if_valid(prim, "Classing", self._classing) self._set_attribute_intarray_if_valid(prim, "Addresses", self._addresses) self._set_attribute_int_if_valid(prim, "FixtureID", self._fixture_id) self._set_attribute_int_if_valid(prim, "UnitNumber", self._unit_number) self._set_attribute_int_if_valid(prim, "FixtureTypeId", self._fixture_type_id) self._set_attribute_int_if_valid(prim, "CustomId", self._custom_id) self._set_attribute_floatarray_if_valid(prim, "CIEColor", self._cie_color) self._set_attribute_bool_if_value(prim, "CastShadow", self._cast_shadow) def _set_attribute_text_if_valid(self, prim: Usd.Prim, name: str, value: str): if value is not None: USDTools.set_fixture_attribute(prim, name, Sdf.ValueTypeNames.String, value) def _set_attribute_int_if_valid(self, prim: Usd.Prim, name: str, value: int): if value is not None: USDTools.set_fixture_attribute(prim, name, Sdf.ValueTypeNames.Int, value) def _set_attribute_bool_if_value(self, prim: Usd.Prim, name: str, value: bool): if value is not None: USDTools.set_fixture_attribute(prim, name, Sdf.ValueTypeNames.Bool, value) def _set_attribute_textarray_if_valid(self, prim: Usd.Prim, name: str, value: List[str]): if value is not None and len(value) > 0: USDTools.set_fixture_attribute(prim, name, Sdf.ValueTypeNames.StringArray, value) def _set_attribute_intarray_if_valid(self, prim: Usd.Prim, name: str, value: List[int]): if value is not None and len(value) > 0: USDTools.set_fixture_attribute(prim, name, Sdf.ValueTypeNames.IntArray, value) def _set_attribute_floatarray_if_valid(self, prim: Usd.Prim, name: str, value: List[float]): if value is not None and len(value) > 0: USDTools.set_fixture_attribute(prim, name, Sdf.ValueTypeNames.FloatArray, value) class Layer: def __init__(self, node: ET.Element): self._name = node.attrib["name"] self._uuid = node.attrib["uuid"] self._node = node self._fixtures = [] def get_name_usd(self) -> str: return USDTools.make_name_valid(self._name) def find_fixtures(self): childlist = self._node.find("ChildList") fixtures = childlist.findall("Fixture") self._fixtures = [Fixture(x) for x in fixtures] def fixtures_len(self) -> int: return len(self._fixtures) def get_fixtures(self) -> List[Fixture]: return self._fixtures
6,238
Python
39.777778
96
0.617987
MomentFactory/Omniverse-Lidar-Live-Synthetic-Data/tools/repoman/repoman.py
import os import sys import io import contextlib import packmanapi REPO_ROOT = os.path.join(os.path.dirname(os.path.realpath(__file__)), "../..") REPO_DEPS_FILE = os.path.join(REPO_ROOT, "deps/repo-deps.packman.xml") def bootstrap(): """ Bootstrap all omni.repo modules. Pull with packman from repo.packman.xml and add them all to python sys.path to enable importing. """ #with contextlib.redirect_stdout(io.StringIO()): deps = packmanapi.pull(REPO_DEPS_FILE) for dep_path in deps.values(): if dep_path not in sys.path: sys.path.append(dep_path) if __name__ == "__main__": bootstrap() import omni.repo.man omni.repo.man.main(REPO_ROOT)
703
Python
23.275861
100
0.661451
MomentFactory/Omniverse-Lidar-Live-Synthetic-Data/source/extensions/mf.ov.lidar_live_synth/mf/ov/lidar_live_synth/__init__.py
## Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. ## ## NVIDIA CORPORATION and its licensors retain all intellectual property ## and proprietary rights in and to this software, related documentation ## and any modifications thereto. Any use, reproduction, disclosure or ## distribution of this software and related documentation without an express ## license agreement from NVIDIA CORPORATION is strictly prohibited. ## # This file is needed so tests don't fail.
480
Python
42.727269
77
0.785417
openhackathons-org/End-to-End-AI-for-Science/workspace/python/source_code/dataset.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. import gdown import os ## FCN Dataset url = 'https://drive.google.com/uc?id=1mSN6eLqPYEo9d9pBjSGzQ-ocLd8itP0P&export=download' output = str(os.path.realpath(os.path.dirname(__file__)))+ '/fourcastnet/dataset.zip' gdown.cached_download(url, output, quiet=False,proxy=None,postprocess=gdown.extractall) os.remove(output) ## FCN Pre-trained url = 'https://drive.google.com/uc?id=1oSkK69LGP3DfU2tlH5iaejOh94VNsMDu&export=download' output = str(os.path.realpath(os.path.dirname(__file__)))+ '/../jupyter_notebook/FourCastNet/pre_trained.zip' gdown.cached_download(url, output, quiet=False,proxy=None,postprocess=gdown.extractall) os.remove(output) ## NS Data url = 'https://drive.google.com/uc?id=1IXEGbM3NOO6Dig1sxG1stHubwb09-D2N&export=download' output = str(os.path.realpath(os.path.dirname(__file__)))+ '/navier_stokes/dataset.zip' gdown.cached_download(url, output, quiet=False,proxy=None,postprocess=gdown.extractall) os.remove(output) ## FCN for Omniverse-P1 url = 'https://drive.google.com/uc?id=16YqSnstqoSJdgBzerbzYIkYagwS12lK3&export=download' output = str(os.path.realpath(os.path.dirname(__file__)))+ '/FCN.zip' gdown.cached_download(url, output, quiet=False,proxy=None,postprocess=gdown.extractall) os.remove(output) ## FCN for Omniverse-P2 url = 'https://drive.google.com/uc?id=1lSSx8eKfqCcHAbDvXTeUMoZGHfVQe-HG&export=download' output = str(os.path.realpath(os.path.dirname(__file__)))+ '/FCN/dataset.zip' gdown.cached_download(url, output, quiet=False,proxy=None,postprocess=gdown.extractall) os.remove(output) ## Download and Install Omniverse url = 'https://drive.google.com/uc?id=1DugS2IbHhBPyCE-EuZczLHBZnlnFViIm&export=download' output = str(os.path.realpath(os.path.dirname(__file__)))+'/ov.zip' gdown.cached_download(url, output, quiet=False,proxy=None,postprocess=gdown.extractall) os.remove(output)
2,958
Python
46.725806
110
0.772481
openhackathons-org/End-to-End-AI-for-Science/workspace/python/source_code/spring_mass/spring_mass_solver.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. import numpy as np from sympy import Symbol, Eq import modulus from modulus.sym.hydra import ModulusConfig, instantiate_arch from modulus.sym.solver import Solver from modulus.sym.domain import Domain from modulus.sym.geometry.primitives_1d import Point1D from modulus.sym.geometry import Parameterization from modulus.sym.domain.constraint import ( PointwiseBoundaryConstraint, PointwiseBoundaryConstraint, ) from modulus.sym.domain.validator import PointwiseValidator from modulus.sym.key import Key from modulus.sym.node import Node from spring_mass_ode import SpringMass @modulus.sym.main(config_path="conf", config_name="config") def run(cfg: ModulusConfig) -> None: # make list of nodes to unroll graph on sm = SpringMass(k=(2, 1, 1, 2), m=(1, 1, 1)) sm_net = instantiate_arch( input_keys=[Key("t")], output_keys=[Key("x1"), Key("x2"), Key("x3")], cfg=cfg.arch.fully_connected, ) nodes = sm.make_nodes() + [ sm_net.make_node(name="spring_mass_network", jit=cfg.jit) ] # add constraints to solver # make geometry geo = Point1D(0) t_max = 10.0 t_symbol = Symbol("t") x = Symbol("x") time_range = {t_symbol: (0, t_max)} # make domain domain = Domain() # initial conditions IC = PointwiseBoundaryConstraint( nodes=nodes, geometry=geo, outvar={"x1": 1.0, "x2": 0, "x3": 0, "x1__t": 0, "x2__t": 0, "x3__t": 0}, batch_size=cfg.batch_size.IC, lambda_weighting={ "x1": 1.0, "x2": 1.0, "x3": 1.0, "x1__t": 1.0, "x2__t": 1.0, "x3__t": 1.0, }, parameterization=Parameterization({t_symbol: 0}), ) domain.add_constraint(IC, name="IC") # solve over given time period interior = PointwiseBoundaryConstraint( nodes=nodes, geometry=geo, outvar={"ode_x1": 0.0, "ode_x2": 0.0, "ode_x3": 0.0}, batch_size=cfg.batch_size.interior, parameterization=Parameterization(time_range), ) domain.add_constraint(interior, "interior") # add validation data deltaT = 0.001 t = np.arange(0, t_max, deltaT) t = np.expand_dims(t, axis=-1) invar_numpy = {"t": t} outvar_numpy = { "x1": (1 / 6) * np.cos(t) + (1 / 2) * np.cos(np.sqrt(3) * t) + (1 / 3) * np.cos(2 * t), "x2": (2 / 6) * np.cos(t) + (0 / 2) * np.cos(np.sqrt(3) * t) - (1 / 3) * np.cos(2 * t), "x3": (1 / 6) * np.cos(t) - (1 / 2) * np.cos(np.sqrt(3) * t) + (1 / 3) * np.cos(2 * t), } validator = PointwiseValidator( nodes=nodes, invar=invar_numpy, true_outvar=outvar_numpy, batch_size=1024 ) domain.add_validator(validator) # make solver slv = Solver(cfg, domain) # start solver slv.solve() if __name__ == "__main__": run()
4,033
Python
31.532258
81
0.631044
openhackathons-org/End-to-End-AI-for-Science/workspace/python/source_code/spring_mass/spring_mass_ode.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. from sympy import Symbol, Function, Number from modulus.sym.eq.pde import PDE class SpringMass(PDE): name = "SpringMass" def __init__(self, k=(2, 1, 1, 2), m=(1, 1, 1)): self.k = k self.m = m k1 = k[0] k2 = k[1] k3 = k[2] k4 = k[3] m1 = m[0] m2 = m[1] m3 = m[2] t = Symbol("t") input_variables = {"t": t} x1 = Function("x1")(*input_variables) x2 = Function("x2")(*input_variables) x3 = Function("x3")(*input_variables) if type(k1) is str: k1 = Function(k1)(*input_variables) elif type(k1) in [float, int]: k1 = Number(k1) if type(k2) is str: k2 = Function(k2)(*input_variables) elif type(k2) in [float, int]: k2 = Number(k2) if type(k3) is str: k3 = Function(k3)(*input_variables) elif type(k3) in [float, int]: k3 = Number(k3) if type(k4) is str: k4 = Function(k4)(*input_variables) elif type(k4) in [float, int]: k4 = Number(k4) if type(m1) is str: m1 = Function(m1)(*input_variables) elif type(m1) in [float, int]: m1 = Number(m1) if type(m2) is str: m2 = Function(m2)(*input_variables) elif type(m2) in [float, int]: m2 = Number(m2) if type(m3) is str: m3 = Function(m3)(*input_variables) elif type(m3) in [float, int]: m3 = Number(m3) self.equations = {} self.equations["ode_x1"] = m1 * (x1.diff(t)).diff(t) + k1 * x1 - k2 * (x2 - x1) self.equations["ode_x2"] = ( m2 * (x2.diff(t)).diff(t) + k2 * (x2 - x1) - k3 * (x3 - x2) ) self.equations["ode_x3"] = m3 * (x3.diff(t)).diff(t) + k3 * (x3 - x2) + k4 * x3
2,999
Python
34.294117
87
0.585195
openhackathons-org/End-to-End-AI-for-Science/workspace/python/source_code/spring_mass/spring_mass_inverse.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. import torch import numpy as np from sympy import Symbol, Eq import modulus from modulus.sym.hydra import ModulusConfig, instantiate_arch from modulus.sym.solver import Solver from modulus.sym.domain import Domain from modulus.sym.geometry.primitives_1d import Point1D from modulus.sym.geometry import Parameterization from modulus.sym.domain.constraint import ( PointwiseBoundaryConstraint, PointwiseConstraint, ) from modulus.sym.domain.validator import PointwiseValidator from modulus.sym.domain.monitor import PointwiseMonitor from modulus.sym.key import Key from modulus.sym.node import Node from spring_mass_ode import SpringMass @modulus.sym.main(config_path="conf", config_name="config_inverse") def run(cfg: ModulusConfig) -> None: # prepare data t_max = 10.0 deltaT = 0.01 t = np.arange(0, t_max, deltaT) t = np.expand_dims(t, axis=-1) invar_numpy = {"t": t} outvar_numpy = { "x1": (1 / 6) * np.cos(t) + (1 / 2) * np.cos(np.sqrt(3) * t) + (1 / 3) * np.cos(2 * t), "x2": (2 / 6) * np.cos(t) + (0 / 2) * np.cos(np.sqrt(3) * t) - (1 / 3) * np.cos(2 * t), "x3": (1 / 6) * np.cos(t) - (1 / 2) * np.cos(np.sqrt(3) * t) + (1 / 3) * np.cos(2 * t), } outvar_numpy.update({"ode_x1": np.full_like(invar_numpy["t"], 0)}) outvar_numpy.update({"ode_x2": np.full_like(invar_numpy["t"], 0)}) outvar_numpy.update({"ode_x3": np.full_like(invar_numpy["t"], 0)}) # make list of nodes to unroll graph on sm = SpringMass(k=(2, 1, 1, "k4"), m=("m1", 1, 1)) sm_net = instantiate_arch( input_keys=[Key("t")], output_keys=[Key("x1"), Key("x2"), Key("x3")], cfg=cfg.arch.fully_connected, ) invert_net = instantiate_arch( input_keys=[Key("t")], output_keys=[Key("m1"), Key("k4")], cfg=cfg.arch.fully_connected, ) nodes = ( sm.make_nodes( detach_names=[ "x1", "x1__t", "x1__t__t", "x2", "x2__t", "x2__t__t", "x3", "x3__t", "x3__t__t", ] ) + [sm_net.make_node(name="spring_mass_network", jit=cfg.jit)] + [invert_net.make_node(name="invert_network", jit=cfg.jit)] ) # add constraints to solver # make geometry geo = Point1D(0) t_symbol = Symbol("t") x = Symbol("x") time_range = {t_symbol: (0, t_max)} # make domain domain = Domain() # initial conditions IC = PointwiseBoundaryConstraint( nodes=nodes, geometry=geo, outvar={"x1": 1.0, "x2": 0, "x3": 0, "x1__t": 0, "x2__t": 0, "x3__t": 0}, batch_size=cfg.batch_size.IC, lambda_weighting={ "x1": 1.0, "x2": 1.0, "x3": 1.0, "x1__t": 1.0, "x2__t": 1.0, "x3__t": 1.0, }, parameterization=Parameterization({t_symbol: 0}), ) domain.add_constraint(IC, name="IC") # data and pdes data = PointwiseConstraint.from_numpy( nodes=nodes, invar=invar_numpy, outvar=outvar_numpy, batch_size=cfg.batch_size.data, ) domain.add_constraint(data, name="Data") # add monitors monitor = PointwiseMonitor( invar_numpy, output_names=["m1"], metrics={"mean_m1": lambda var: torch.mean(var["m1"])}, nodes=nodes, ) domain.add_monitor(monitor) monitor = PointwiseMonitor( invar_numpy, output_names=["k4"], metrics={"mean_k4": lambda var: torch.mean(var["k4"])}, nodes=nodes, ) domain.add_monitor(monitor) # make solver slv = Solver(cfg, domain) # start solver slv.solve() if __name__ == "__main__": run()
4,988
Python
29.796296
81
0.591419
openhackathons-org/End-to-End-AI-for-Science/workspace/python/source_code/projectile/projectile.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. import numpy as np from sympy import Symbol, sin, cos, pi, Eq import torch import modulus from modulus.sym.hydra import instantiate_arch, ModulusConfig from modulus.sym.solver import Solver from modulus.sym.domain import Domain from modulus.sym.geometry.primitives_1d import Line1D,Point1D from modulus.sym.geometry.primitives_2d import Rectangle from modulus.sym.domain.constraint import ( PointwiseBoundaryConstraint, PointwiseInteriorConstraint, ) from modulus.sym.domain.inferencer import PointwiseInferencer from modulus.sym.domain.validator import PointwiseValidator from modulus.sym.key import Key from modulus.sym.node import Node from projectile_eqn import ProjectileEquation from modulus.sym.utils.io import ( csv_to_dict, ValidatorPlotter, InferencerPlotter, ) @modulus.sym.main(config_path="conf", config_name="config") def run(cfg: ModulusConfig) -> None: #Creating Nodes and Domain pe = ProjectileEquation() projectile_net = instantiate_arch( input_keys=[Key("t")], output_keys=[Key("x"),Key("y")], cfg=cfg.arch.fully_connected, ) nodes = pe.make_nodes() + [projectile_net.make_node(name="projectile_network")] x, y, t = Symbol("x"), Symbol("y"), Symbol("t") #Creating Geometry and adding constraint geo = Point1D(0) #make domain projectile_domain = Domain() #add constraint to solver v_o = 40.0 theta = np.pi/3 time_range = {t :(0.0,5.0)} #initial condition # Set boundary to be only left boundary IC = PointwiseBoundaryConstraint( nodes = nodes, geometry = geo, outvar = {"x": 0.0,"y":0.0, "x__t":v_o*cos(theta), "y__t":v_o*sin(theta)}, batch_size = cfg.batch_size.initial_x, parameterization = {t:0.0} ) projectile_domain.add_constraint(IC,"IC") #interior interior = PointwiseBoundaryConstraint( nodes = nodes, geometry = geo, outvar = {"ode_x":0.0,"ode_y":-9.81}, batch_size = cfg.batch_size.interior, parameterization = time_range, ) projectile_domain.add_constraint(interior,"interior") # Setup validator delta_T = 0.01 t_val = np.arange(0.,5.,delta_T) T_val = np.expand_dims(t_val.flatten(), axis = -1) X_val = v_o*np.cos(theta)*T_val Y_val = v_o*np.sin(theta)*T_val - 0.5*9.81*(T_val**2) invar_numpy = {"t": T_val} outvar_numpy = {"x":X_val, "y": Y_val} validator = PointwiseValidator( nodes=nodes, invar=invar_numpy, true_outvar=outvar_numpy, batch_size=128, plotter = ValidatorPlotter(), ) projectile_domain.add_validator(validator) # Setup Inferencer t_infe = np.arange(0,8,0.001) T_infe = np.expand_dims(t_infe.flatten(), axis = -1) invar_infe = {"t":T_infe} grid_inference = PointwiseInferencer( nodes=nodes, invar=invar_infe, output_names=["x","y"], batch_size=128, plotter=InferencerPlotter(), ) projectile_domain.add_inferencer(grid_inference, "inferencer_data") #make solver slv = Solver(cfg, projectile_domain) #start solve slv.solve() if __name__ == "__main__": run()
4,482
Python
25.370588
86
0.657073
openhackathons-org/End-to-End-AI-for-Science/workspace/python/source_code/fourcastnet/fourcastnet.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # Script to train Fourcastnet on ERA5 # Ref: https://arxiv.org/abs/2202.11214 import modulus from modulus.sym.hydra.config import ModulusConfig from modulus.sym.key import Key from modulus.sym.domain import Domain from modulus.sym.domain.constraint import SupervisedGridConstraint from modulus.sym.domain.validator import GridValidator from modulus.sym.solver import Solver from modulus.sym.utils.io import GridValidatorPlotter from src.dataset import ERA5HDF5GridDataset from src.fourcastnet import FourcastNetArch from src.loss import LpLoss @modulus.sym.main(config_path="conf", config_name="config_FCN") def run(cfg: ModulusConfig) -> None: # load training/ test data channels = list(range(cfg.custom.n_channels)) train_dataset = ERA5HDF5GridDataset( cfg.custom.training_data_path, chans=channels, tstep=cfg.custom.tstep, n_tsteps=cfg.custom.n_tsteps, patch_size=cfg.arch.afno.patch_size, ) test_dataset = ERA5HDF5GridDataset( cfg.custom.test_data_path, chans=channels, tstep=cfg.custom.tstep, n_tsteps=cfg.custom.n_tsteps, patch_size=cfg.arch.afno.patch_size, n_samples_per_year=20, ) # define input/output keys input_keys = [Key(k, size=train_dataset.nchans) for k in train_dataset.invar_keys] output_keys = [Key(k, size=train_dataset.nchans) for k in train_dataset.outvar_keys] # make list of nodes to unroll graph on model = FourcastNetArch( input_keys=input_keys, output_keys=output_keys, img_shape=test_dataset.img_shape, patch_size=cfg.arch.afno.patch_size, embed_dim=cfg.arch.afno.embed_dim, depth=cfg.arch.afno.depth, num_blocks=cfg.arch.afno.num_blocks, ) nodes = [model.make_node(name="FCN")] # make domain domain = Domain() # add constraints to domain supervised = SupervisedGridConstraint( nodes=nodes, dataset=train_dataset, batch_size=cfg.batch_size.grid, loss=LpLoss(), num_workers=cfg.custom.num_workers.grid, ) domain.add_constraint(supervised, "supervised") # add validator val = GridValidator( nodes, dataset=test_dataset, batch_size=cfg.batch_size.validation, plotter=GridValidatorPlotter(n_examples=5), num_workers=cfg.custom.num_workers.validation, ) domain.add_validator(val, "test") # make solver slv = Solver(cfg, domain) # start solver slv.solve() if __name__ == "__main__": run()
3,688
Python
33.157407
88
0.706345
openhackathons-org/End-to-End-AI-for-Science/workspace/python/source_code/fourcastnet/inferencer.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #"Script to carry out Fourcastnet inference" import omegaconf import torch import logging import numpy as np from torch.utils.data import DataLoader, Sampler from modulus.sym.hydra import to_absolute_path from modulus.sym.key import Key from modulus.sym.distributed.manager import DistributedManager from src.dataset import ERA5HDF5GridDataset from src.fourcastnet import FourcastNetArch from src.metrics import Metrics logging.basicConfig(format="[%(levelname)s] - %(message)s", level=logging.INFO) var_key_dict = { 0: "u10", 1: "v10", 2: "t2m", 3: "sp", 4: "msl", 5: "t850", 6: "u1000", 7: "v1000", 8: "z1000", 9: "u850", 10: "v850", 11: "z850", 12: "u500", 13: "v500", 14: "z500", 15: "t500", 16: "z50", 17: "r500", 18: "r850", 19: "tcwv", } def to_device(tensor_dict): return { key: torch.as_tensor(value, dtype=torch.float32, device=device) for key, value in tensor_dict.items() } class SubsetSequentialBatchSampler(Sampler): """Custom subset sequential batch sampler for inferencer""" def __init__(self, subset): self.subset = subset def __iter__(self): for i in self.subset: yield [i] # batch size of 1 def __len__(self): return len(self.subset) # load configuration cfg = omegaconf.OmegaConf.load("conf/config_FCN.yaml") model_path = to_absolute_path("fcn_era5.pth") # get device device = DistributedManager().device # load test data test_dataset = ERA5HDF5GridDataset( cfg.custom.test_data_path, # Test data location e.g. /era5/20var/test chans=list(range(cfg.custom.n_channels)), tstep=cfg.custom.tstep, n_tsteps=1, # set to one for inference patch_size=cfg.arch.afno.patch_size, ) m = Metrics( test_dataset.img_shape, clim_mean_path="/data/stats/time_means.npy", # Path to climate mean device=device ) # define input/output keys input_keys = [Key(k, size=test_dataset.nchans) for k in test_dataset.invar_keys] output_keys = [Key(k, size=test_dataset.nchans) for k in test_dataset.outvar_keys] # create model model = FourcastNetArch( input_keys=input_keys, output_keys=output_keys, img_shape=test_dataset.img_shape, patch_size=cfg.arch.afno.patch_size, embed_dim=cfg.arch.afno.embed_dim, depth=cfg.arch.afno.depth, num_blocks=cfg.arch.afno.num_blocks, ) # load parameters model.load_state_dict(torch.load(model_path)) model.to(device) logging.info(f"Loaded model {model_path}") # define subsets of dataset to run inference nics = 180 # Number of 2 day correl time samples nsteps = 25 last = len(test_dataset) - 1 - nsteps * cfg.custom.tstep # Variable dictionary acc_recursive = {key: [] for key in var_key_dict.values()} rmse_recursive = {key: [] for key in var_key_dict.values()} # Normalization stats mu = torch.tensor(test_dataset.mu[0]).to(device) # shape [C, 1, 1] sd = torch.tensor(test_dataset.sd[0]).to(device) # shape [C, 1, 1] # run inference with torch.no_grad(): for ic in range(0, min([8 * nics + 1, last])): subset = cfg.custom.tstep * np.arange(nsteps) + ic if (ic + 1) % 8 == 0 or (ic + 1) % 36 == 0 or ic == 0: logging.info(f"Running IC at step {ic}") # get dataloader dataloader = DataLoader( dataset=test_dataset, batch_sampler=SubsetSequentialBatchSampler(subset), pin_memory=True, num_workers=1, worker_init_fn=test_dataset.worker_init_fn, ) acc_error = torch.zeros(nsteps, test_dataset.nchans) rmse_error = torch.zeros(nsteps, test_dataset.nchans) for tstep, (invar, true_outvar, _) in enumerate(dataloader): if tstep % 10 == 0: logging.info(f"ic: {ic} tstep: {tstep}/{nsteps}") # place tensors on device invar = to_device(invar) true_outvar = to_device(true_outvar) # 1. single step inference pred_outvar_single = model(invar) pred_single = sd * pred_outvar_single["x_t1"][0] # 2. recursive inference if tstep == 0: pred_outvar_recursive = model(invar) else: pred_outvar_recursive = model( {"x_t0": pred_outvar_recursive["x_t1"]} ) # get unormalised target / prediction true = sd * true_outvar["x_t1"][0] pred_recursive = sd * pred_outvar_recursive["x_t1"][0] # Calc metrics rmse_error[tstep] = m.weighted_rmse(pred_recursive, true).detach().cpu() acc_error[tstep] = m.weighted_acc(pred_recursive, true).detach().cpu() # Save fields into dictionary if (ic + 1) % 8 == 0 or (ic + 1) % 36 == 0 or ic == 0: for i, fld in var_key_dict.items(): # Fields with 9 day (36) dc time if fld == "z500" or fld == "t2m" or fld == "t850": if (ic + 1) % 36 == 0 or ic == 0: acc_recursive[fld].append(acc_error[:, i].numpy()) rmse_recursive[fld].append(rmse_error[:, i].numpy()) # Rest have regular 2 day (8) dc time else: if (ic + 1) % 8 == 0 or ic == 0: acc_recursive[fld].append(acc_error[:, i].numpy()) rmse_recursive[fld].append(rmse_error[:, i].numpy()) # Field stacking for var_dict in [acc_recursive, rmse_recursive]: for key, value in var_dict.items(): print(f"{len(value)} samples for field {key}") var_dict[key] = np.stack(value, axis=0) np.save("rmse_recursive", rmse_recursive) np.save("acc_recursive", acc_recursive)
7,069
Python
33.827586
88
0.610553
openhackathons-org/End-to-End-AI-for-Science/workspace/python/source_code/fourcastnet/src/fourcastnet.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """Defines the FCN architecture""" import logging import torch from torch import Tensor from typing import List, Tuple, Dict from modulus.sym.models.afno.afno import AFNONet from modulus.sym.models.arch import Arch from modulus.sym.key import Key class FourcastNetArch(Arch): "Defines the FourcastNet architecture" def __init__( self, input_keys: List[Key], output_keys: List[Key], img_shape: Tuple[int, int], detach_keys: List[Key] = [], patch_size: int = 16, embed_dim: int = 256, depth: int = 4, num_blocks: int = 4, ) -> None: """Fourcastnet model. This is a simple wrapper for Modulus' AFNO model. The only difference is that FourcastNet needs multi-step training. This class allows the model to auto-regressively predict multiple timesteps Parameters (Same as AFNO) ---------- input_keys : List[Key] Input key list. The key dimension size should equal the variables channel dim. output_keys : List[Key] Output key list. The key dimension size should equal the variables channel dim. img_shape : Tuple[int, int] Input image dimensions (height, width) detach_keys : List[Key], optional List of keys to detach gradients, by default [] patch_size : int, optional Size of image patchs, by default 16 embed_dim : int, optional Embedded channel size, by default 256 depth : int, optional Number of AFNO layers, by default 4 num_blocks : int, optional Number of blocks in the frequency weight matrices, by default 4 """ super().__init__( input_keys=input_keys, output_keys=output_keys, detach_keys=detach_keys, ) # get number of timesteps steps to unroll assert ( len(self.input_keys) == 1 ), "Error, FourcastNet only accepts one input variable (x_t0)" self.n_tsteps = len(self.output_keys) logging.info(f"Unrolling FourcastNet over {self.n_tsteps} timesteps") # get number of input/output channels in_channels = self.input_keys[0].size out_channels = self.output_keys[0].size # intialise AFNO kernel self._impl = AFNONet( in_channels=in_channels, out_channels=out_channels, patch_size=(patch_size, patch_size), img_size=img_shape, embed_dim=embed_dim, depth=depth, num_blocks=num_blocks, ) def forward(self, in_vars: Dict[str, Tensor]) -> Dict[str, Tensor]: # prepare input tensor x = self.prepare_input( input_variables=in_vars, mask=self.input_key_dict.keys(), detach_dict=self.detach_key_dict, dim=1, input_scales=self.input_scales, ) # unroll model over multiple timesteps ys = [] for t in range(self.n_tsteps): x = self._impl(x) ys.append(x) y = torch.cat(ys, dim=1) # prepare output dict return self.prepare_output( output_tensor=y, output_var=self.output_key_dict, dim=1, output_scales=self.output_scales, )
4,496
Python
35.560975
91
0.630338
openhackathons-org/End-to-End-AI-for-Science/workspace/python/source_code/fourcastnet/src/metrics.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. import torch import numpy as np from typing import Tuple class Metrics: """Class used for computing performance related metrics. Expects predictions / targets to be of shape [C, H, W] where H is latitude dimension and W is longitude dimension. Metrics are computed for each channel separately. Parameters ---------- img_shape : Tuple[int] Shape of input image (resolution for fourcastnet) clim_mean_path : str, optional Path to total climate mean data, needed for ACC. By default "/era5/stats/time_means.npy" device : torch.device, optional Pytorch device model is on, by default 'cpu' """ def __init__( self, img_shape: Tuple[int], clim_mean_path: str = "/era5/stats/time_means.npy", device: torch.device = "cpu", ): self.img_shape = tuple(img_shape) self.device = device # Load climate mean value self.clim_mean = torch.as_tensor(np.load(clim_mean_path)) # compute latitude weighting nlat = img_shape[0] lat = torch.linspace(90, -90, nlat) lat_weight = torch.cos(torch.pi * (lat / 180)) lat_weight = nlat * lat_weight / lat_weight.sum() self.lat_weight = lat_weight.view(1, nlat, 1) # place on device if self.device is not None: self.lat_weight = self.lat_weight.to(self.device) self.clim_mean = self.clim_mean.to(self.device) def _check_shape(self, *args): # checks for shape [C, H, W] for x in args: assert x.ndim == 3 assert tuple(x.shape[1:]) == self.img_shape def weighted_acc(self, pred: torch.Tensor, target: torch.Tensor) -> torch.Tensor: """Computes the anomaly correlation coefficient (ACC). The ACC calculation is weighted based on the latitude. Parameters ---------- pred : torch.Tensor [C, H, W] Predicted tensor target : torch.Tensor [C, H, W] Target tensor Returns ------- torch.Tensor [C] ACC values for each channel """ self._check_shape(pred, target) # subtract climate means (n_chans, img_x, img_y) = pred.shape clim_mean = self.clim_mean[0, 0:n_chans, 0:img_x] pred_hat = pred - clim_mean target_hat = target - clim_mean # Weighted mean pred_bar = torch.sum( self.lat_weight * pred_hat, dim=(1, 2), keepdim=True ) / torch.sum( self.lat_weight * torch.ones_like(pred_hat), dim=(1, 2), keepdim=True ) target_bar = torch.sum( self.lat_weight * target_hat, dim=(1, 2), keepdim=True ) / torch.sum( self.lat_weight * torch.ones_like(target_hat), dim=(1, 2), keepdim=True ) pred_diff = pred_hat - pred_bar target_diff = target_hat - target_bar # compute weighted acc # Ref: https://www.atmos.albany.edu/daes/atmclasses/atm401/spring_2016/ppts_pdfs/ECMWF_ACC_definition.pdf p1 = torch.sum(self.lat_weight * pred_diff * target_diff, dim=(1, 2)) p2 = torch.sum(self.lat_weight * pred_diff * pred_diff, dim=(1, 2)) p3 = torch.sum(self.lat_weight * target_diff * target_diff, dim=(1, 2)) m = p1 / torch.sqrt(p2 * p3) return m def weighted_rmse(self, pred: torch.Tensor, target: torch.Tensor) -> torch.Tensor: """Computes RMSE weighted based on latitude Parameters ---------- pred : torch.Tensor [C, H, W] Predicted tensor target : torch.Tensor [C, H, W] Target tensor Returns ------- torch.Tensor [C] Weighted RSME values for each channel """ self._check_shape(pred, target) # compute weighted rmse m = torch.sqrt(torch.mean(self.lat_weight * (pred - target) ** 2, dim=(1, 2))) return m
5,098
Python
34.657342
113
0.616712
openhackathons-org/End-to-End-AI-for-Science/workspace/python/source_code/fourcastnet/src/dataset.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. import h5py import logging import numpy as np from typing import List from pathlib import Path from modulus.sym.hydra import to_absolute_path from modulus.sym.dataset import Dataset class ERA5HDF5GridDataset(Dataset): """Lazy-loading ERA5 dataset. Parameters ---------- data_dir : str Directory where ERA5 data is stored chans : List[int] Defines which ERA5 variables to load tstep : int Defines the size of the timestep between the input and output variables n_tsteps : int, optional Defines how many timesteps are included in the output variables Default is 1 patch_size : int, optional If specified, crops input and output variables so image dimensions are divisible by patch_size Default is None n_samples_per_year : int, optional If specified, randomly selects n_samples_per_year samples from each year rather than all of the samples per year Default is None stats_dir : str, optional Directory to test data statistic numpy files that have the global mean and variance """ def __init__( self, data_dir: str, chans: List[int], tstep: int = 1, n_tsteps: int = 1, patch_size: int = None, n_samples_per_year: int = None, stats_dir: str = None, ): self.data_dir = Path(to_absolute_path(data_dir)) print(self.data_dir) self.chans = chans self.nchans = len(self.chans) self.tstep = tstep self.n_tsteps = n_tsteps self.patch_size = patch_size self.n_samples_per_year = n_samples_per_year if stats_dir is None: self.stats_dir = self.data_dir.parent / "stats" # check root directory exists assert ( self.data_dir.is_dir() ), f"Error, data directory {self.data_dir} does not exist" assert ( self.stats_dir.is_dir() ), f"Error, stats directory {self.stats_dir} does not exist" # get all input data files self.data_paths = sorted(self.data_dir.glob("??????.h5")) for data_path in self.data_paths: logging.info(f"ERA5 file found: {data_path}") self.n_years = len(self.data_paths) logging.info(f"Number of months: {self.n_years}") # get total number of examples and image shape from the first file, # assuming other files have exactly the same format. logging.info(f"Getting file stats from {self.data_paths[0]}") with h5py.File(self.data_paths[0], "r") as f: self.n_samples_per_year_all = f["fields"].shape[0] self.img_shape = f["fields"].shape[2:] logging.info(f"Number of channels available: {f['fields'].shape[1]}") # get example indices to use if self.n_samples_per_year is None: self.n_samples_per_year = self.n_samples_per_year_all self.samples = [ np.arange(self.n_samples_per_year) for _ in range(self.n_years) ] else: if self.n_samples_per_year > self.n_samples_per_year_all: raise ValueError( f"n_samples_per_year ({self.n_samples_per_year}) > number of samples available ({self.n_samples_per_year_all})!" ) self.samples = [ np.random.choice( np.arange(self.n_samples_per_year_all), self.n_samples_per_year, replace=False, ) for _ in range(self.n_years) ] logging.info(f"Number of samples/month: {self.n_samples_per_year}") # get total length self.length = self.n_years * self.n_samples_per_year # adjust image shape if patch_size defined if self.patch_size is not None: self.img_shape = [s - s % self.patch_size for s in self.img_shape] logging.info(f"Input image shape: {self.img_shape}") # load normalisation values # has shape [1, C, 1, 1] self.mu = np.load(self.stats_dir / "global_means.npy")[:, self.chans] # has shape [1, C, 1, 1] self.sd = np.load(self.stats_dir / "global_stds.npy")[:, self.chans] assert ( self.mu.shape == self.sd.shape == (1, self.nchans, 1, 1) ), "Error, normalisation arrays have wrong shape" def worker_init_fn(self, iworker): super().worker_init_fn(iworker) # open all year files at once on worker thread self.data_files = [h5py.File(path, "r") for path in self.data_paths] @property def invar_keys(self): return ["x_t0"] @property def outvar_keys(self): return [f"x_t{(i+1)*self.tstep}" for i in range(self.n_tsteps)] def __getitem__(self, idx): # get local indices from global index year_idx = int(idx / self.n_samples_per_year) local_idx = int(idx % self.n_samples_per_year) in_idx = self.samples[year_idx][local_idx] # get output indices out_idxs = [] for i in range(self.n_tsteps): out_idx = in_idx + (i + 1) * self.tstep # if at end of dataset, just learn identity instead if out_idx > (self.n_samples_per_year_all - 1): out_idx = in_idx out_idxs.append(out_idx) # get data xs = [] for idx in [in_idx] + out_idxs: # get array # has shape [C, H, W] x = self.data_files[year_idx]["fields"][idx, self.chans] assert x.ndim == 3, f"Expected 3 dimensions, but got {x.shape}" # apply input / output normalisation (broadcasted operation) x = (x - self.mu[0]) / self.sd[0] # crop data if needed if self.patch_size is not None: x = x[..., : self.img_shape[0], : self.img_shape[1]] xs.append(x) # convert to tensor dicts invar = {"x_t0": xs[0]} outvar = {f"x_t{(i+1)*self.tstep}": x for i, x in enumerate(xs[1:])} invar = Dataset._to_tensor_dict(invar) outvar = Dataset._to_tensor_dict(outvar) # TODO: get rid to lambda weighting lambda_weighting = Dataset._to_tensor_dict( {k: np.ones_like(v) for k, v in outvar.items()} ) # lambda_weighting = Dataset._to_tensor_dict( # {k: np.array([1]) for k, v in outvar.items()} # ) return invar, outvar, lambda_weighting def __len__(self): return self.length
7,719
Python
36.294686
132
0.598523
openhackathons-org/End-to-End-AI-for-Science/workspace/python/source_code/fourcastnet/src/loss.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. import torch from typing import Dict Tensor = torch.Tensor class LpLoss(torch.nn.Module): def __init__( self, d: float = 2.0, p: float = 2.0, ): """Relative Lp loss normalized seperately in the batch dimension. Expects inputs of the shape [B, C, ...] Parameters ---------- p : float, optional Norm power, by default 2.0 """ super(LpLoss, self).__init__() # Dimension and Lp-norm type are postive assert p > 0.0 self.p = p def _rel(self, x: torch.Tensor, y: torch.Tensor) -> float: num_examples = x.size()[0] xv = x.reshape(num_examples, -1) yv = y.reshape(num_examples, -1) diff_norms = torch.linalg.norm(xv - yv, ord=self.p, dim=1) y_norms = torch.linalg.norm(yv, ord=self.p, dim=1) return torch.mean(diff_norms / y_norms) def forward( self, invar: Dict[str, Tensor], pred_outvar: Dict[str, Tensor], true_outvar: Dict[str, Tensor], lambda_weighting: Dict[str, Tensor], step: int, ) -> Dict[str, float]: losses = {} for key, value in pred_outvar.items(): losses[key] = self._rel(pred_outvar[key], true_outvar[key]) return losses
2,433
Python
33.28169
73
0.648993
openhackathons-org/End-to-End-AI-for-Science/workspace/python/source_code/diffusion_1d/plot_results.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. import numpy as np import matplotlib.pyplot as plt network_dir = "./outputs/diffusion_bar/validators/" data_1 = np.load(network_dir + "Val1.npz", allow_pickle=True) data_2 = np.load(network_dir + "Val2.npz", allow_pickle=True) data_1 = np.atleast_1d(data_1.f.arr_0)[0] data_2 = np.atleast_1d(data_2.f.arr_0)[0] plt.plot(data_1["x"][:, 0], data_1["pred_u_1"][:, 0], "--", label="u_1_pred") plt.plot(data_2["x"][:, 0], data_2["pred_u_2"][:, 0], "--", label="u_2_pred") plt.plot(data_1["x"][:, 0], data_1["true_u_1"][:, 0], label="u_1_true") plt.plot(data_2["x"][:, 0], data_2["true_u_2"][:, 0], label="u_2_true") plt.legend() plt.savefig("image_diffusion_problem_bootcamp")
1,801
Python
46.421051
77
0.716824
openhackathons-org/End-to-End-AI-for-Science/workspace/python/source_code/diffusion_1d/diffusion_bar.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. import torch import numpy as np from sympy import Symbol, Eq, Function, Number import modulus from modulus.sym.hydra import instantiate_arch , ModulusConfig from modulus.sym.solver import Solver from modulus.sym.domain import Domain from modulus.sym.geometry.primitives_1d import Line1D from modulus.sym.domain.constraint import ( PointwiseBoundaryConstraint, PointwiseInteriorConstraint, ) from modulus.sym.domain.validator import PointwiseValidator from modulus.sym.domain.monitor import PointwiseMonitor from modulus.sym.key import Key from modulus.sym.node import Node from modulus.sym.eq.pde import PDE # params for domain L1 = Line1D(0, 1) L2 = Line1D(1, 2) D1 = 1e1 D2 = 1e-1 Tc = 100 Ta = 0 Tb = (Tc + (D1 / D2) * Ta) / (1 + (D1 / D2)) print(Ta) print(Tb) print(Tc) class Diffusion(PDE): name = "Diffusion" def __init__(self, T="T", D="D", Q=0, dim=3, time=True): # set params self.T = T self.dim = dim self.time = time # coordinates x, y, z = Symbol("x"), Symbol("y"), Symbol("z") # time t = Symbol("t") # make input variables input_variables = {"x": x, "y": y, "z": z, "t": t} if self.dim == 1: input_variables.pop("y") input_variables.pop("z") elif self.dim == 2: input_variables.pop("z") if not self.time: input_variables.pop("t") # Temperature assert type(T) == str, "T needs to be string" T = Function(T)(*input_variables) # Diffusivity if type(D) is str: D = Function(D)(*input_variables) elif type(D) in [float, int]: D = Number(D) # Source if type(Q) is str: Q = Function(Q)(*input_variables) elif type(Q) in [float, int]: Q = Number(Q) # set equations self.equations = {} self.equations["diffusion_" + self.T] = ( T.diff(t) - (D * T.diff(x)).diff(x) - (D * T.diff(y)).diff(y) - (D * T.diff(z)).diff(z) - Q ) class DiffusionInterface(PDE): name = "DiffusionInterface" def __init__(self, T_1, T_2, D_1, D_2, dim=3, time=True): # set params self.T_1 = T_1 self.T_2 = T_2 self.dim = dim self.time = time # coordinates x, y, z = Symbol("x"), Symbol("y"), Symbol("z") normal_x, normal_y, normal_z = ( Symbol("normal_x"), Symbol("normal_y"), Symbol("normal_z"), ) # time t = Symbol("t") # make input variables input_variables = {"x": x, "y": y, "z": z, "t": t} if self.dim == 1: input_variables.pop("y") input_variables.pop("z") elif self.dim == 2: input_variables.pop("z") if not self.time: input_variables.pop("t") # Diffusivity if type(D_1) is str: D_1 = Function(D_1)(*input_variables) elif type(D_1) in [float, int]: D_1 = Number(D_1) if type(D_2) is str: D_2 = Function(D_2)(*input_variables) elif type(D_2) in [float, int]: D_2 = Number(D_2) # variables to match the boundary conditions (example Temperature) T_1 = Function(T_1)(*input_variables) T_2 = Function(T_2)(*input_variables) # set equations self.equations = {} self.equations["diffusion_interface_dirichlet_" + self.T_1 + "_" + self.T_2] = ( T_1 - T_2 ) flux_1 = D_1 * ( normal_x * T_1.diff(x) + normal_y * T_1.diff(y) + normal_z * T_1.diff(z) ) flux_2 = D_2 * ( normal_x * T_2.diff(x) + normal_y * T_2.diff(y) + normal_z * T_2.diff(z) ) self.equations["diffusion_interface_neumann_" + self.T_1 + "_" + self.T_2] = ( flux_1 - flux_2 ) @modulus.sym.main(config_path="conf", config_name="config") def run(cfg: ModulusConfig) -> None: # make list of nodes to unroll graph on diff_u1 = Diffusion(T="u_1", D=D1, dim=1, time=False) diff_u2 = Diffusion(T="u_2", D=D2, dim=1, time=False) diff_in = DiffusionInterface("u_1", "u_2", D1, D2, dim=1, time=False) diff_net_u_1 = instantiate_arch( input_keys=[Key("x")], output_keys=[Key("u_1")], cfg=cfg.arch.fully_connected, ) diff_net_u_2 = instantiate_arch( input_keys=[Key("x")], output_keys=[Key("u_2")], cfg=cfg.arch.fully_connected, ) nodes = ( diff_u1.make_nodes() + diff_u2.make_nodes() + diff_in.make_nodes() + [diff_net_u_1.make_node(name="u1_network", jit=cfg.jit)] + [diff_net_u_2.make_node(name="u2_network", jit=cfg.jit)] ) # make domain add constraints to the solver domain = Domain() # sympy variables x = Symbol("x") # right hand side (x = 2) Pt c rhs = PointwiseBoundaryConstraint( nodes=nodes, geometry=L2, outvar={"u_2": Tc}, batch_size=cfg.batch_size.rhs, criteria=Eq(x, 2), ) domain.add_constraint(rhs, "right_hand_side") # left hand side (x = 0) Pt a lhs = PointwiseBoundaryConstraint( nodes=nodes, geometry=L1, outvar={"u_1": Ta}, batch_size=cfg.batch_size.lhs, criteria=Eq(x, 0), ) domain.add_constraint(lhs, "left_hand_side") # interface 1-2 interface = PointwiseBoundaryConstraint( nodes=nodes, geometry=L1, outvar={ "diffusion_interface_dirichlet_u_1_u_2": 0, "diffusion_interface_neumann_u_1_u_2": 0, }, batch_size=cfg.batch_size.interface, criteria=Eq(x, 1), ) domain.add_constraint(interface, "interface") # interior 1 interior_u1 = PointwiseInteriorConstraint( nodes=nodes, geometry=L1, outvar={"diffusion_u_1": 0}, bounds={x: (0, 1)}, batch_size=cfg.batch_size.interior_u1, ) domain.add_constraint(interior_u1, "interior_u1") # interior 2 interior_u2 = PointwiseInteriorConstraint( nodes=nodes, geometry=L2, outvar={"diffusion_u_2": 0}, bounds={x: (1, 2)}, batch_size=cfg.batch_size.interior_u2, ) domain.add_constraint(interior_u2, "interior_u2") # validation data x = np.expand_dims(np.linspace(0, 1, 100), axis=-1) u_1 = x * Tb + (1 - x) * Ta invar_numpy = {"x": x} outvar_numpy = {"u_1": u_1} val = PointwiseValidator(nodes=nodes,invar=invar_numpy, true_outvar=outvar_numpy) domain.add_validator(val, name="Val1") # make validation data line 2 x = np.expand_dims(np.linspace(1, 2, 100), axis=-1) u_2 = (x - 1) * Tc + (2 - x) * Tb invar_numpy = {"x": x} outvar_numpy = {"u_2": u_2} val = PointwiseValidator(nodes=nodes, invar=invar_numpy, true_outvar=outvar_numpy) domain.add_validator(val, name="Val2") # make monitors invar_numpy = {"x": [[1.0]]} monitor = PointwiseMonitor( invar_numpy, output_names=["u_1__x"], metrics={"flux_u1": lambda var: torch.mean(var["u_1__x"])}, nodes=nodes, requires_grad=True, ) domain.add_monitor(monitor) monitor = PointwiseMonitor( invar_numpy, output_names=["u_2__x"], metrics={"flux_u2": lambda var: torch.mean(var["u_2__x"])}, nodes=nodes, requires_grad=True, ) domain.add_monitor(monitor) # make solver slv = Solver(cfg, domain) # start solver slv.solve() if __name__ == "__main__": run()
8,835
Python
28.065789
88
0.572835
openhackathons-org/End-to-End-AI-for-Science/workspace/python/source_code/darcy/darcy_FNO_lazy.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. import modulus from modulus.sym.hydra import to_absolute_path, instantiate_arch, ModulusConfig from modulus.sym.key import Key from modulus.sym.solver import Solver from modulus.sym.domain import Domain from modulus.sym.domain.constraint import SupervisedGridConstraint from modulus.sym.domain.validator import GridValidator from modulus.sym.dataset import HDF5GridDataset from modulus.sym.utils.io.plotter import GridValidatorPlotter from utilities import download_FNO_dataset @modulus.sym.main(config_path="conf", config_name="config_FNO") def run(cfg: ModulusConfig) -> None: # load training/ test data input_keys = [Key("coeff", scale=(7.48360e00, 4.49996e00))] output_keys = [Key("sol", scale=(5.74634e-03, 3.88433e-03))] download_FNO_dataset("Darcy_241", outdir="datasets/") train_path = to_absolute_path( "datasets/Darcy_241/piececonst_r241_N1024_smooth1.hdf5" ) test_path = to_absolute_path( "datasets/Darcy_241/piececonst_r241_N1024_smooth2.hdf5" ) # make datasets train_dataset = HDF5GridDataset( train_path, invar_keys=["coeff"], outvar_keys=["sol"], n_examples=1000 ) test_dataset = HDF5GridDataset( test_path, invar_keys=["coeff"], outvar_keys=["sol"], n_examples=100 ) # make list of nodes to unroll graph on decoder_net = instantiate_arch( cfg=cfg.arch.decoder, output_keys=output_keys, ) fno = instantiate_arch( cfg=cfg.arch.fno, input_keys=input_keys, decoder_net=decoder_net, ) nodes = [fno.make_node('fno')] # make domain domain = Domain() # add constraints to domain supervised = SupervisedGridConstraint( nodes=nodes, dataset=train_dataset, batch_size=cfg.batch_size.grid, num_workers=4, # number of parallel data loaders ) domain.add_constraint(supervised, "supervised") # add validator val = GridValidator( nodes, dataset=test_dataset, batch_size=cfg.batch_size.validation, plotter=GridValidatorPlotter(n_examples=5), ) domain.add_validator(val, "test") # make solver slv = Solver(cfg, domain) # start solver slv.solve() if __name__ == "__main__": run()
3,392
Python
32.264706
79
0.704009
openhackathons-org/End-to-End-AI-for-Science/workspace/python/source_code/darcy/utilities.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. import os import zipfile try: import gdown except: gdown = None import scipy.io import numpy as np import h5py from modulus.sym.hydra import to_absolute_path # list of FNO dataset url ids on drive: https://drive.google.com/drive/folders/1UnbQh2WWc6knEHbLn-ZaXrKUZhp7pjt- _FNO_datatsets_ids = { "Darcy_241": "1ViDqN7nc_VCnMackiXv_d7CHZANAFKzV", "Darcy_421": "1Z1uxG9R8AdAGJprG5STcphysjm56_0Jf", } _FNO_dataset_names = { "Darcy_241": ( "piececonst_r241_N1024_smooth1.hdf5", "piececonst_r241_N1024_smooth2.hdf5", ), "Darcy_421": ( "piececonst_r421_N1024_smooth1.hdf5", "piececonst_r421_N1024_smooth2.hdf5", ), } def load_FNO_dataset(path, input_keys, output_keys, n_examples=None): "Loads a FNO dataset" if not path.endswith(".hdf5"): raise Exception( ".hdf5 file required: please use utilities.preprocess_FNO_mat to convert .mat file" ) # load data path = to_absolute_path(path) data = h5py.File(path, "r") _ks = [k for k in data.keys() if not k.startswith("__")] print(f"loaded: {path}\navaliable keys: {_ks}") # parse data invar, outvar = dict(), dict() for d, keys in [(invar, input_keys), (outvar, output_keys)]: for k in keys: # get data x = data[k] # N, C, H, W # cut examples out if n_examples is not None: x = x[:n_examples] # print out normalisation values print(f"selected key: {k}, mean: {x.mean():.5e}, std: {x.std():.5e}") d[k] = x del data return (invar, outvar) def download_FNO_dataset(name, outdir="datasets/"): "Tries to download FNO dataset from drive" if name not in _FNO_datatsets_ids: raise Exception( f"Error: FNO dataset {name} not recognised, select one from {list(_FNO_datatsets_ids.keys())}" ) id = _FNO_datatsets_ids[name] outdir = to_absolute_path(outdir) + "/" namedir = f"{outdir}{name}/" # skip if already exists exists = True for file_name in _FNO_dataset_names[name]: if not os.path.isfile(namedir + file_name): exists = False break if exists: return print(f"FNO dataset {name} not detected, downloading dataset") # Make sure we have gdown installed if gdown is None: raise ModuleNotFoundError("gdown package is required to download the dataset!") # get output directory os.makedirs(namedir, exist_ok=True) # download dataset zippath = f"{outdir}{name}.zip" _download_file_from_google_drive(id, zippath) # unzip with zipfile.ZipFile(zippath, "r") as f: f.extractall(namedir) os.remove(zippath) # preprocess files for file in os.listdir(namedir): if file.endswith(".mat"): matpath = f"{namedir}{file}" preprocess_FNO_mat(matpath) os.remove(matpath) def _download_file_from_google_drive(id, path): "Downloads a file from google drive" # use gdown library to download file gdown.download(id=id, output=path) def preprocess_FNO_mat(path): "Convert a FNO .mat file to a hdf5 file, adding extra dimension to data arrays" assert path.endswith(".mat") data = scipy.io.loadmat(path) ks = [k for k in data.keys() if not k.startswith("__")] with h5py.File(path[:-4] + ".hdf5", "w") as f: for k in ks: x = np.expand_dims(data[k], axis=1) # N, C, H, W f.create_dataset( k, data=x, dtype="float32" ) # note h5 files larger than .mat because no compression used
4,794
Python
30.339869
112
0.646016
openhackathons-org/End-to-End-AI-for-Science/workspace/python/source_code/darcy/ops.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. import torch import torch.nn.functional as F def dx(inpt, dx, channel, dim, order=1, padding="zeros"): "Compute first order numerical derivatives of input tensor" var = inpt[:, channel : channel + 1, :, :] # get filter if order == 1: ddx1D = torch.Tensor( [ -0.5, 0.0, 0.5, ] ).to(inpt.device) elif order == 3: ddx1D = torch.Tensor( [ -1.0 / 60.0, 3.0 / 20.0, -3.0 / 4.0, 0.0, 3.0 / 4.0, -3.0 / 20.0, 1.0 / 60.0, ] ).to(inpt.device) ddx3D = torch.reshape(ddx1D, shape=[1, 1] + dim * [1] + [-1] + (1 - dim) * [1]) # apply convolution if padding == "zeros": var = F.pad(var, 4 * [(ddx1D.shape[0] - 1) // 2], "constant", 0) elif padding == "replication": var = F.pad(var, 4 * [(ddx1D.shape[0] - 1) // 2], "replicate") output = F.conv2d(var, ddx3D, padding="valid") output = (1.0 / dx) * output if dim == 0: output = output[:, :, :, (ddx1D.shape[0] - 1) // 2 : -(ddx1D.shape[0] - 1) // 2] elif dim == 1: output = output[:, :, (ddx1D.shape[0] - 1) // 2 : -(ddx1D.shape[0] - 1) // 2, :] return output def ddx(inpt, dx, channel, dim, order=1, padding="zeros"): "Compute second order numerical derivatives of input tensor" var = inpt[:, channel : channel + 1, :, :] # get filter if order == 1: ddx1D = torch.Tensor( [ 1.0, -2.0, 1.0, ] ).to(inpt.device) elif order == 3: ddx1D = torch.Tensor( [ 1.0 / 90.0, -3.0 / 20.0, 3.0 / 2.0, -49.0 / 18.0, 3.0 / 2.0, -3.0 / 20.0, 1.0 / 90.0, ] ).to(inpt.device) ddx3D = torch.reshape(ddx1D, shape=[1, 1] + dim * [1] + [-1] + (1 - dim) * [1]) # apply convolution if padding == "zeros": var = F.pad(var, 4 * [(ddx1D.shape[0] - 1) // 2], "constant", 0) elif padding == "replication": var = F.pad(var, 4 * [(ddx1D.shape[0] - 1) // 2], "replicate") output = F.conv2d(var, ddx3D, padding="valid") output = (1.0 / dx ** 2) * output if dim == 0: output = output[:, :, :, (ddx1D.shape[0] - 1) // 2 : -(ddx1D.shape[0] - 1) // 2] elif dim == 1: output = output[:, :, (ddx1D.shape[0] - 1) // 2 : -(ddx1D.shape[0] - 1) // 2, :] return output
3,754
Python
33.136363
88
0.531167
openhackathons-org/End-to-End-AI-for-Science/workspace/python/source_code/navier_stokes/navier_stokes.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. import numpy as np import os from sympy import Symbol, Eq, Abs, sin, cos import modulus from modulus.sym.hydra import to_absolute_path, instantiate_arch, ModulusConfig from modulus.sym.eq.pdes.navier_stokes import NavierStokes from modulus.sym.geometry.primitives_2d import Rectangle as rect from modulus.sym.models.fully_connected import FullyConnectedArch from modulus.sym.key import Key from modulus.sym.node import Node from modulus.sym.solver import Solver from modulus.sym.domain import Domain from modulus.sym.domain.constraint import ( PointwiseConstraint, PointwiseInteriorConstraint, ) from modulus.sym.domain.inferencer import PointVTKInferencer from modulus.sym.utils.io import ( VTKUniformGrid, ) def read_wf_data(velocity_scale,pressure_scale): path = "/workspace/python/source_code/navier_stokes/data_lat.npy" print(path) ic = np.load(path).astype(np.float32) Pa_to_kgm3 = 0.10197 mesh_y, mesh_x = np.meshgrid( np.linspace(-0.720, 0.719, ic[0].shape[0]), np.linspace(-0.720, 0.719, ic[0].shape[1]), indexing="ij", ) invar = {} invar["x"] = np.expand_dims(mesh_x.astype(np.float32).flatten(),axis=-1) invar["y"] = np.expand_dims(mesh_y.astype(np.float32).flatten(),axis=-1) invar["t"] = np.full_like(invar["x"], 0) outvar = {} outvar["u"] = np.expand_dims((ic[0]/velocity_scale).flatten(),axis=-1) outvar["v"] = np.expand_dims((ic[1]/velocity_scale).flatten(),axis=-1) outvar["p"] = np.expand_dims((ic[2]*Pa_to_kgm3/pressure_scale).flatten(),axis=-1) return invar, outvar @modulus.sym.main(config_path="conf", config_name="config") def run(cfg: ModulusConfig) -> None: # define sympy variables to parametrize domain curves x, y = Symbol("x"), Symbol("y") # make geometry for problem length = (-0.720, 0.720) height = (-0.720, 0.720) box_bounds = {x: length, y: height} # define geometry rec = rect( (length[0], height[0]), (length[1], height[1]) ) # Scaling and Nondimensionalizing the Problem ############# # Real Params ############# fluid_kinematic_viscosity = 1.655e-5 # m**2/s fluid_density = 1.1614 # kg/m**3 fluid_specific_heat = 1005 # J/(kg K) fluid_conductivity = 0.0261 # W/(m K) ################ # Non dim params for normalisation ################ # Diameter of Earth : 12742000 m over range of 1.440 length_scale = 12742000/1.440 # 60 hrs to 1 timestep- every inference frame is a 6 hour prediction (s) time_scale = 60*60*60 # Calcuale velocity & pressure scale velocity_scale = length_scale / time_scale # m/s pressure_scale = fluid_density * ((length_scale / time_scale) ** 2) # kg / (m s**2) # Density scale density_scale = 1.1614 # kg/m3 ############################## # Nondimensionalization Params for NavierStokes fn ############################## # fluid params nd_fluid_kinematic_viscosity = fluid_kinematic_viscosity / ( length_scale ** 2 / time_scale ) nd_fluid_density = fluid_density / density_scale # time window parameters time_window_size = 1.0 t_symbol = Symbol("t") time_range = {t_symbol: (0, time_window_size)} # make navier stokes equations ns = NavierStokes(nu=nd_fluid_kinematic_viscosity, rho=nd_fluid_density, dim=2, time=True) # make network flow_net = FullyConnectedArch( input_keys=[Key("x"), Key("y"), Key("t")], output_keys=[Key("u"), Key("v"), Key("p")], periodicity={"x": length, "y" : height}, layer_size=256, ) # make nodes to unroll graph on nodes = ns.make_nodes() + [flow_net.make_node(name="flow_net")] # make initial condition domain navier = Domain("navier_stokes") # make initial condition ic_invar,ic_outvar = read_wf_data(velocity_scale,pressure_scale) ic = PointwiseConstraint.from_numpy( nodes, ic_invar, ic_outvar, batch_size=cfg.batch_size.initial_condition, ) navier.add_constraint(ic, name="ic") # make interior constraint interior = PointwiseInteriorConstraint( nodes=nodes, geometry=rec, outvar={"continuity": 0, "momentum_x": 0, "momentum_y": 0}, bounds=box_bounds, batch_size=cfg.batch_size.interior, parameterization=time_range, ) navier.add_constraint(interior, name="interior") # add inference data for time slices for i, specific_time in enumerate(np.linspace(0, time_window_size, 10)): vtk_obj = VTKUniformGrid( bounds=[(-0.720, 0.720), (-0.360, 0.360)], npoints=[1440,720], export_map={"u": ["u", "v"], "p": ["p"]}, ) grid_inference = PointVTKInferencer( vtk_obj=vtk_obj, nodes=nodes, input_vtk_map={"x": "x", "y": "y"}, output_names=["u", "v", "p"], requires_grad=False, invar={"t": np.full([720 *1440, 1], specific_time)}, batch_size=100000, ) navier.add_inferencer(grid_inference, name="time_slice_" + str(i).zfill(4)) slv = Solver(cfg, navier) # start solver slv.solve() if __name__ == "__main__": run()
6,473
Python
33.43617
94
0.631083
eliabntt/animated_human_SMPL_to_USD/generate_sequence.py
import json import os import humangenerator import bpy import humangenerator as hgen import argparse import ipdb import sys import yaml parser = argparse.ArgumentParser() parser.add_argument("--dataset", help="Dataset from which you want to generate data") parser.add_argument("--output_dir", help="Path to where the data should be saved") parser.add_argument("--samples_dir", help="Paths where the data is stored") parser.add_argument("--last_sample", help="Last sample processed, this must be the FULL name of the folder (e.g. 00001). This WILL be processed", default="") parser.add_argument("--parent_path", help="Path containing the subfolders for the datasets (with the pkl models)", default="") parser.add_argument("--sample_id", help="ID of the sample, if emtpy process all", default="all") parser.add_argument("--with_cache", help="Write \"False\" if generating blendshapes", default="True") parser.add_argument("--suppress_out", help="Write \"False\" if output in console", default="False") parser.add_argument("--write_verts", help="Write \"True\" if you want to write verts info in the pkl", default="False") parser.add_argument("--frame", help="The n-th frame to generate. Default all", default="all") parser.add_argument("--config_file", help="json file containing the configuration", default="") parser.add_argument("--exp_name", help="The name of the \"experiment\" of the dataset. By default the name of the samples_dir folder", default="") # structure should be `parent_path/[surreal/datageneration/smpl_data,body_models/{smplh,dmpls}]` args = parser.parse_args() with open(os.path.join("humangenerator", "avail_datasets.yaml"), 'r') as stream: data_loaded = yaml.safe_load(stream) avail_datasets = data_loaded["datasets"] processor = None if avail_datasets == [] or args.dataset not in avail_datasets: if not avail_datasets: print("No avail dataset. Check file") else: print(f"Sought dataset is not yet avail. The avail ones are {avail_datasets}") exit(-1) else: print(f"Processing {args.dataset} data") found = (args.last_sample == "") try: WITH_CACHE = (False if args.with_cache == "False" else True) parent_path = args.parent_path smpl_body_list = [] # Init SMPL models smpl_path = os.path.join(parent_path, "surreal", "datageneration", "smpl_data") smpl_models = { 'f': hgen.SMPLModel(os.path.join(smpl_path, 'smpl', 'models', 'basicModel_f_lbs_10_207_0_v1.0.0.pkl')), 'm': hgen.SMPLModel(os.path.join(smpl_path, 'smpl', 'models', 'basicModel_m_lbs_10_207_0_v1.0.0.pkl')), } if args.frame != "all": try: frame = int(args.frame) except: print("Error converting frame to int, considering the WHOLE sequence") frame = None else: frame = None print("Whole sequence considered") print("This will export only the whole sequence") hgen.init() # Parse args PATH_SAMPLES = args.samples_dir if args.exp_name == "": exp_name = os.path.split(PATH_SAMPLES)[-1] else: exp_name = args.exp_name PATH_OUT = os.path.join(args.output_dir, exp_name) if not os.path.exists(PATH_OUT): os.makedirs(PATH_OUT) if args.config_file == "": config = {} else: if os.path.exists(args.config_file): with open(args.config_file, "r") as f: config = json.load(f) else: raise Exception("The taxonomy file could not be found: {}".format(args.config_file)) processor, PATH_SAMPLES = hgen.get_processor(args.dataset, parent_path, WITH_CACHE, PATH_OUT, PATH_SAMPLES, smpl_models, args.write_verts.lower() == "false", config) sample_id = args.sample_id if sample_id != "all": print("Processing single sample") # Check if sample exists if not os.path.isdir(os.path.join(PATH_SAMPLES, sample_id)): print("Specified sample does not exist") exit(-1) else: sample_id = [sample_id] else: print("Processing all samples") sample_id = os.listdir(PATH_SAMPLES) if not sample_id: print("No subfolder found") exit(-1) if len(smpl_body_list) == 0: smpl_body_list = processor.generator.load_SMPLs_objects() found = (args.last_sample == "") sample_id.sort() clean_cnt = 1 for sample in sample_id: if not found: if sample == args.last_sample: found = True else: continue if clean_cnt % 100 == 0: clean_cnt = 0 hgen.init() smpl_body_list = processor.generator.load_SMPLs_objects() clean_cnt += 1 print("------------------------------") print(f"Processing {sample}") isdone = False count = 0 while (not isdone and count <= 5): hgen.deselect() if len(sample_id) > 1: hgen.clean_mesh_and_textures( exclude=['Material_0', 'Material_1', 'Armature_0', 'Armature_1', 'body_0', 'body_1']) print("Scene cleaned!\n\n") count += 1 path_sample = os.path.join(PATH_OUT, sample + ('_with_cache' if WITH_CACHE else '')) if not os.path.exists(path_sample): os.makedirs(path_sample) with open(os.path.join(path_sample, f"out_{count}.txt"), "w") as file_out, open( os.path.join(path_sample, f"err_{count}.txt"), "w") as file_err: # file logging try: if args.suppress_out == "True": sys.stdout = file_out sys.stderr = file_err res = processor.process_sample(sample, frame, smpl_body_list) if res: print("Exported!") else: raise Exception("Unknown error") isdone = True except: import traceback sys.stderr.write('error\n') sys.stderr.write(traceback.format_exc()) print(f"Failed -- going with try {count}\n\n") finally: sys.stderr.flush() sys.stdout.flush() sys.stdout = sys.__stdout__ sys.stderr = sys.__stderr__ except: import traceback sys.stderr.write('error\n') sys.stderr.write(traceback.format_exc()) sys.stdout.flush() sys.stderr.flush() sys.stdout = sys.__stdout__ sys.stderr = sys.__stderr__ print('error') print(traceback.format_exc()) extype, value, tb = sys.exc_info() ipdb.post_mortem(tb)
6,955
Python
35.610526
128
0.570669
eliabntt/animated_human_SMPL_to_USD/start_blend_debug.py
import bpy import sys import ipdb import os from pathlib import Path from bl_ui.space_text import TEXT_MT_editor_menus repo_root_directory = os.path.join(os.path.dirname(__file__), ".") sys.path.append(repo_root_directory) argv = sys.argv[sys.argv.index("--") + 1:] bpy.context.window.workspace = bpy.data.workspaces["Scripting"] bpy.context.view_layer.update() if argv[0].endswith(".py"): print(f"Loading: {os.path.join(os.path.dirname(os.path.abspath(__file__)), argv[0])}") text = bpy.data.texts.load(os.path.join(os.path.dirname(os.path.abspath(__file__)), argv[0])) sys.argv = argv[:] print(f"New argv: {sys.argv}") else: print("First argument should be the script file") exit(-1) # Declare operator that runs the blender proc script class RunHumanGeneratorOperator(bpy.types.Operator): bl_idname = "wm.run_humangenerator" bl_label = "Run Human Generator" bl_description = "This operator runs the loaded HumanGenerator script and also makes sure to unload all modules before starting." bl_options = {"REGISTER"} def execute(self, context): # Delete all loaded models inside src/, as they are cached inside blender for module in list(sys.modules.keys()): if module.startswith("humangenerator"): del sys.modules[module] # Make sure the parent of the humangenerator folder is in sys.path import_path = os.path.abspath(os.path.join(os.path.dirname(__file__), ".")) if import_path not in sys.path: sys.path.append(import_path) # Run the script try: bpy.ops.text.run_script() except RuntimeError: # Skip irrelevant error messages (The relevant stacktrace+error has already been printed at this point) pass return {"FINISHED"} bpy.utils.register_class(RunHumanGeneratorOperator) def draw(self, context): layout = self.layout st = context.space_data text = st.text is_syntax_highlight_supported = st.is_syntax_highlight_supported() layout.template_header() TEXT_MT_editor_menus.draw_collapsible(context, layout) if text and text.is_modified: row = layout.row(align=True) row.alert = True row.operator("text.resolve_conflict", text="", icon='HELP') layout.separator_spacer() row = layout.row(align=True) row.template_ID(st, "text", new="text.new", unlink="text.unlink", open="text.open") if text: is_osl = text.name.endswith((".osl", ".osl")) if is_osl: row.operator("node.shader_script_update", text="", icon='FILE_REFRESH') else: row = layout.row() row.active = is_syntax_highlight_supported # The following line has changed compared to the orignal code, it starts our operator instead of text.run_script row.operator("wm.run_humangenerator", text="Run") layout.separator_spacer() row = layout.row(align=True) row.prop(st, "show_line_numbers", text="") row.prop(st, "show_word_wrap", text="") syntax = row.row(align=True) syntax.active = is_syntax_highlight_supported syntax.prop(st, "show_syntax_highlight", text="") # Set our draw function as the default draw function for text area headers bpy.types.TEXT_HT_header.draw = draw # Put text into scripting tool for area in bpy.data.workspaces["Scripting"].screens[0].areas.values(): if area.type == 'TEXT_EDITOR': area.spaces.active.text = text
3,540
Python
34.767676
133
0.652825
eliabntt/animated_human_SMPL_to_USD/convert_fbx.py
import json import os import humangenerator import bpy import humangenerator as hgen import argparse import ipdb import sys import yaml parser = argparse.ArgumentParser() parser.add_argument("--fbx", help="Path to the fbx file") parser.add_argument("--output_dir", help="Path to where the data should be saved") parser.add_argument("--temp_dir", help="Path to where the data should be temporary saved") parser.add_argument("--usd", help="True if export usd is necessary, default to false", default="False") args = parser.parse_args() out_dir = args.output_dir if not os.path.exists(out_dir): os.makedirs(out_dir) fbx = args.fbx for o in bpy.context.scene.objects: o.select_set(True) # Call the operator only once bpy.ops.object.delete() with open(os.path.join(out_dir, f"out.txt"), "w") as file_out, open( os.path.join(out_dir, f"err.txt"), "w") as file_err: try: sys.stdout = file_out sys.stderr = file_err bpy.ops.import_scene.fbx(filepath=fbx) filepath=os.path.join(out_dir,os.path.basename(fbx[:-4])+".usd") temp_filepath = os.path.join(args.temp_dir,os.path.basename(fbx[:-4])+".usd") hgen.export_data(temp_path, out_dir, os.path.basename(fbx[:-4]), False, None, {}, {}, False, args.usd.lower() == "true") bpy.ops.object.select_all(action='SELECT') bpy.ops.object.delete() succeed = True except: import traceback sys.stderr.write('error\n') sys.stderr.write(traceback.format_exc()) finally: sys.stdout.flush() sys.stderr.flush() sys.stdout = sys.__stdout__ sys.stderr = sys.__stderr__
1,653
Python
30.807692
128
0.655777
eliabntt/animated_human_SMPL_to_USD/data_folder/smpl/smpl_np.py
import sys import numpy as np import pickle class SMPLModel(): def __init__(self, model_path): """ SMPL model. Parameter: --------- model_path: Path to the SMPL model parameters, pre-processed by `preprocess.py`. """ with open(model_path, 'rb') as f: if sys.version_info[0] == 2: params = pickle.load(f) # Python 2.x elif sys.version_info[0] == 3: params = pickle.load(f, encoding='latin1') # Python 3.x self.J_regressor = params['J_regressor'] self.weights = params['weights'] self.posedirs = params['posedirs'] self.v_template = params['v_template'] self.shapedirs = params['shapedirs'] self.faces = params['f'] self.kintree_table = params['kintree_table'] id_to_col = { self.kintree_table[1, i]: i for i in range(self.kintree_table.shape[1]) } self.parent = { i: id_to_col[self.kintree_table[0, i]] for i in range(1, self.kintree_table.shape[1]) } self.pose_shape = [24, 3] self.beta_shape = [10] self.trans_shape = [3] self.pose = np.zeros(self.pose_shape) self.beta = np.zeros(self.beta_shape) self.trans = np.zeros(self.trans_shape) self.verts = None self.J = None self.R = None self.update() def set_params(self, pose=None, beta=None, trans=None): """ Set pose, shape, and/or translation parameters of SMPL model. Verices of the model will be updated and returned. Parameters: --------- pose: Also known as 'theta', a [24,3] matrix indicating child joint rotation relative to parent joint. For root joint it's global orientation. Represented in a axis-angle format. beta: Parameter for model shape. A vector of shape [10]. Coefficients for PCA component. Only 10 components were released by MPI. trans: Global translation of shape [3]. Return: ------ Updated vertices. """ if pose is not None: self.pose = pose if beta is not None: self.beta = beta if trans is not None: self.trans = trans self.update() return self.verts, self.J def update(self): """ Called automatically when parameters are updated. """ # how beta affect body shape v_shaped = self.shapedirs.dot(self.beta) + self.v_template # joints location self.J = self.J_regressor.dot(v_shaped) pose_cube = self.pose.reshape((-1, 1, 3)) # rotation matrix for each joint self.R = self.rodrigues(pose_cube) I_cube = np.broadcast_to( np.expand_dims(np.eye(3), axis=0), (self.R.shape[0]-1, 3, 3) ) lrotmin = (self.R[1:] - I_cube).ravel() # how pose affect body shape in zero pose v_posed = v_shaped + self.posedirs.dot(lrotmin) # world transformation of each joint G = np.empty((self.kintree_table.shape[1], 4, 4)) G[0] = self.with_zeros(np.hstack((self.R[0], self.J[0, :].reshape([3, 1])))) for i in range(1, self.kintree_table.shape[1]): G[i] = G[self.parent[i]].dot( self.with_zeros( np.hstack( [self.R[i],((self.J[i, :]-self.J[self.parent[i],:]).reshape([3,1]))] ) ) ) G = G - self.pack( np.matmul( G, np.hstack([self.J, np.zeros([24, 1])]).reshape([24, 4, 1]) ) ) # transformation of each vertex T = np.tensordot(self.weights, G, axes=[[1], [0]]) rest_shape_h = np.hstack((v_posed, np.ones([v_posed.shape[0], 1]))) v = np.matmul(T, rest_shape_h.reshape([-1, 4, 1])).reshape([-1, 4])[:, :3] self.verts = v + self.trans.reshape([1, 3]) def rodrigues(self, r): """ Rodrigues' rotation formula that turns axis-angle vector into rotation matrix in a batch-ed manner. Parameter: ---------- r: Axis-angle rotation vector of shape [batch_size, 1, 3]. Return: ------- Rotation matrix of shape [batch_size, 3, 3]. """ theta = np.linalg.norm(r, axis=(1, 2), keepdims=True) # avoid zero divide theta = np.maximum(theta, np.finfo(np.float64).tiny) r_hat = r / theta cos = np.cos(theta) z_stick = np.zeros(theta.shape[0]) m = np.dstack([ z_stick, -r_hat[:, 0, 2], r_hat[:, 0, 1], r_hat[:, 0, 2], z_stick, -r_hat[:, 0, 0], -r_hat[:, 0, 1], r_hat[:, 0, 0], z_stick] ).reshape([-1, 3, 3]) i_cube = np.broadcast_to( np.expand_dims(np.eye(3), axis=0), [theta.shape[0], 3, 3] ) A = np.transpose(r_hat, axes=[0, 2, 1]) B = r_hat dot = np.matmul(A, B) R = cos * i_cube + (1 - cos) * dot + np.sin(theta) * m return R def with_zeros(self, x): """ Append a [0, 0, 0, 1] vector to a [3, 4] matrix. Parameter: --------- x: Matrix to be appended. Return: ------ Matrix after appending of shape [4,4] """ return np.vstack((x, np.array([[0.0, 0.0, 0.0, 1.0]]))) def pack(self, x): """ Append zero matrices of shape [4, 3] to vectors of [4, 1] shape in a batched manner. Parameter: ---------- x: Matrices to be appended of shape [batch_size, 4, 1] Return: ------ Matrix of shape [batch_size, 4, 4] after appending. """ return np.dstack((np.zeros((x.shape[0], 4, 3)), x))
5,242
Python
27.037433
80
0.571347
eliabntt/animated_human_SMPL_to_USD/humangenerator/cloth3d_gen.py
from humangenerator.util.blender_util import * import bpy from .util.cloth3d_util import loadInfo, bodyCache, loadGarment import humangenerator as hgen from pathlib import Path class cloth3d: def __init__(self, parent_path, with_cache, path_out, path_samples, smpl_models, write_verts): from humangenerator.generator import generator # temporary usd export path, we cannot directly write in mounted network drives sometimes temp_path = os.path.join(parent_path, 'usd_exports') # surreal path for textures smpl_path = os.path.join(parent_path, "surreal", "datageneration", "smpl_data") self.generator = generator(smpl_path) self.with_cache = with_cache self.path_out = path_out self.path_samples = path_samples self.smpl = smpl_models self.temp_path = temp_path self.write_verts = (write_verts == "True") def animateSMPL(self, sample, smpl_ob, info, j): if self.with_cache: bodyCache(self.path_cache, sample, info, smpl_ob.ob, self.smpl) # generate blendshapes + trans s = info['shape'] smpl_ob.reset_joint_positions(s, bpy.data.scenes["Scene"]) if len(info['poses'].shape) > 1: N = info['poses'].shape[1] else: sys.stderr.write('Error animation is ONLY ONE FRAME \n') N = 1 for i in range(N): if N > 1: p = info['poses'][:, i] t = info['trans'][:, i].reshape((3,)) - j[0] else: p = info['poses'][:] t = info['trans'][:].reshape((3,)) - j[0] bpy.data.scenes["Scene"].frame_set(i) smpl_ob.apply_trans_pose_shape(t, p, s, i, with_blendshapes=not self.with_cache) def generate_SMPLbody_animation(self, sample, info, gender, index): print("Generate Animation..") if len(info['poses'].shape) > 1: p = info['poses'][:, 0].reshape((24, 3)) t = info['trans'][:, 0].reshape((3,)) else: p = info['poses'][:].reshape((24, 3)) t = info['trans'][:].reshape((3,)) s = info['shape'] v, j = self.smpl[gender].set_params(pose=p, beta=s, trans=t) cloth_img_name = self.generator.pick_skin_texture(gender=gender, clothing_option="grey") img = bpy.data.materials[f'Material_{index}'].node_tree.nodes["Image Texture"] img.image = bpy.data.images.load(cloth_img_name) material = bpy.data.materials[f'Material_{index}'] self.smpl_body_list[index].refine_SMPL(material, j, info['zrot']) self.animateSMPL(sample, self.smpl_body_list[index], info, j) # Smooth bpy.ops.object.shade_smooth() def loadCloth3DSequence(self, sample: str, info: dict, frame: int = None): if len(info['poses'].shape) > 1: bpy.context.scene.frame_end = info['poses'].shape[-1] - 1 else: bpy.context.scene.frame_end = 1 bpy.ops.object.select_all(action='DESELECT') # delete current garments for obj in bpy.data.objects.values(): if 'body' not in obj.name.lower() and 'armature' not in obj.name.lower(): obj.select_set(True) bpy.ops.object.delete() # Load new garments for garment in info['outfit']: loadGarment(self.path_samples, self.path_cache, sample, garment, info) for obj in bpy.data.objects.values(): obj.select_set(False) gender = 'm' if info['gender'] else 'f' index = 0 if info['gender'] else 1 self.generate_SMPLbody_animation(sample, info, gender, index) bpy.context.view_layer.objects.active = bpy.data.objects[f'Armature_{index}'] arm_obj = bpy.data.objects[f'Armature_{index}'] bpy.context.scene.frame_current = bpy.context.scene.frame_start for obj in bpy.data.objects.values(): if 'body' not in obj.name.lower() and 'armature' not in obj.name.lower(): obj.select_set(True) obj.parent = arm_obj obj.rotation_euler = [0, 0, 0] obj.select_set(False) for obj in bpy.data.objects.values(): if 'armature' not in obj.name.lower() and 'body' not in obj.name.lower(): obj.select_set(True) else: if str(index) in obj.name: obj.select_set(True) if frame != None and frame >= 0 and frame <= bpy.context.scene.frame_end: bpy.context.scene.frame_current = frame def process_sample(self, sample: str, frame: int, smpl_body_list): # load info info = loadInfo(os.path.join(self.path_samples, sample, 'info.mat')) self.smpl_body_list = smpl_body_list subfolder_name = Path(sample).stem + ('_with_cache' if self.with_cache else '') self.path_cache = hgen.create_outfolder_structure(self.path_out, subfolder_name, self.with_cache) if frame is None: self.loadCloth3DSequence(sample, info) else: self.loadCloth3DSequence(sample, info, frame) bpy.ops.wm.save_as_mainfile(filepath=os.path.join(self.path_out, subfolder_name, subfolder_name + ".blend")) return hgen.export_data(self.temp_path, self.path_out, Path(sample).stem, self.with_cache, frame, info, info['zrot'], self.write_verts)
5,429
Python
42.095238
143
0.589796
eliabntt/animated_human_SMPL_to_USD/humangenerator/__init__.py
import os import sys # check the python version, only python 3.X is allowed: if sys.version_info.major < 3: raise Exception("HumanGenerator requires at least python 3.X to run.") sys.path.remove(os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) from .util.blender_util import * from data_folder.smpl.smpl_np import SMPLModel from .generator import *
370
Python
29.916664
79
0.737838
eliabntt/animated_human_SMPL_to_USD/humangenerator/generator.py
import os from random import choice import bpy from .util.smplutils import SMPL_Body, rotate_vector from .cloth3d_gen import * from .amass_gen import * from .util.blender_util import export_stl_data, write_pkl_data, write_usd # import amass_gen def get_processor(dataset, parent_path, with_cache, path_out, path_samples, smpl_models, write_verts, config={}): if dataset == "cloth3d": return cloth3d(parent_path, with_cache, path_out, path_samples, smpl_models, write_verts), path_samples if dataset == "amass": # todo fixme tmp_obj = amass(parent_path, with_cache, path_out, path_samples, smpl_models, write_verts, config) return tmp_obj, path_samples raise Exception("NOT A VALID DATASET") def export_data(temp_path, path_out, sample, with_cache, frame, info, orient, write_verts, usd=True): try: if usd: write_usd(temp_path, path_out, sample + ('_with_cache' if with_cache else ''), with_cache, True if frame == None else False, 0 if frame == None else frame) for obj in bpy.data.objects.values(): if "body" in obj.name.lower() and obj.select_get(): ob = obj elif "armature" in obj.name.lower() and obj.select_get(): arm_ob = obj export_stl_data(path_out, sample + ('_with_cache' if with_cache else ''), [ob for ob in bpy.data.objects if ob.select_get()], orient) write_pkl_data(path_out, sample + ('_with_cache' if with_cache else ''), arm_ob, ob, info, write_verts=write_verts) except: return False return True def create_outfolder_structure(path_out, subfolder_name, with_cache): if (with_cache): path_cache = os.path.join(path_out, subfolder_name, 'view_cache') if not os.path.exists(path_cache): os.makedirs(path_cache) else: path_cache = os.path.join(path_out, subfolder_name, 'view_cache') if not os.path.exists(path_cache): os.makedirs(path_cache) return path_cache class generator: def __init__(self, smpl_path, write_verts=False): self.SMPL_PATH = smpl_path def pick_skin_texture(self, split_name='all', clothing_option="grey", gender="m"): if gender == "f": with open( os.path.join(self.SMPL_PATH, "textures", "female_{}.txt".format(split_name)) ) as f: txt_paths = f.read().splitlines() else: with open( os.path.join(self.SMPL_PATH, "textures", "male_{}.txt".format(split_name)) ) as f: txt_paths = f.read().splitlines() # if using only one source of clothing if clothing_option == "nongrey": txt_paths = [k for k in txt_paths if "nongrey" in k] elif clothing_option == "grey": txt_paths = [k for k in txt_paths if "nongrey" not in k] elif clothing_option == "same": # Orig txt_paths = ["textures/male/nongrey_male_0244.jpg"] elif clothing_option == "all": txt_paths = [k for k in txt_paths] # random clothing texture cloth_img_name = choice(txt_paths) cloth_img_name = os.path.join(self.SMPL_PATH, cloth_img_name) print("Picked skin texture: {}".format(cloth_img_name)) return cloth_img_name def create_material_SMPL(self, gender="m", person_no=0, clothing_option="grey", split_name="all"): print("Creating SMPL texture material") cloth_img_name = self.pick_skin_texture(split_name, clothing_option, gender) material = bpy.data.materials.new(name=f"Material_{person_no}") material.use_nodes = True # Add nodes tree = material.node_tree nodes = tree.nodes # Principled BSDf bsdf = nodes['Principled BSDF'] # Image img = nodes.new('ShaderNodeTexImage') img.image = bpy.data.images.load(cloth_img_name) # Links tree.links.new(img.outputs[0], bsdf.inputs[0]) return material def load_SMPLs_objects(self): # create the material for SMPL material = self.create_material_SMPL("m", 0) print("Male Material Created") smpl_body_list = [] # create the SMPL_Body object smpl_body_list.append( SMPL_Body(self.SMPL_PATH, material, 0, "male", person_no=0) ) print("Male created") material = self.create_material_SMPL("f", 1) print("Female material created") smpl_body_list.append( SMPL_Body(self.SMPL_PATH, material, 0, "female", person_no=1) ) print("Female created") return smpl_body_list
4,735
Python
38.140496
123
0.597043
eliabntt/animated_human_SMPL_to_USD/humangenerator/amass_gen.py
from pathlib import Path from humangenerator.util.blender_util import * import bpy from .util.amass_util import loadInfo, bodyCache, _load_parametric_body_model, _get_supported_mocap_datasets, \ _get_sequence_path import humangenerator as hgen class amass: def __init__(self, parent_path, with_cache, path_out, path_samples, smpl_models, write_verts, config): # temporary usd export path, we cannot directly write in mounted network drives sometimes temp_path = os.path.join(parent_path, 'usd_exports') # surreal path for textures smpl_path = os.path.join(parent_path, "surreal", "datageneration", "smpl_data") from humangenerator.generator import generator self.generator = generator(smpl_path) self.with_cache = with_cache self.path_out = path_out self.path_samples = path_samples self.smpl = smpl_models self.sub_dataset_id = config['sub_dataset_id'] self.num_betas = config['num_betas'] self.num_dmpls = config['num_dmpls'] self.subject_ids = config['subject_ids'].split() self.write_verts = (write_verts == "True") self.temp_path = temp_path self.body_model_m, self.faces_m = _load_parametric_body_model(parent_path, "male", self.num_betas, self.num_dmpls) self.body_model_f, self.faces_f = _load_parametric_body_model(parent_path, "female", self.num_betas, self.num_dmpls) taxonomy_file_path = os.path.join(parent_path, "taxonomy.json") self.supported_datasets = _get_supported_mocap_datasets(taxonomy_file_path, path_samples) def animateSMPL(self, sample, smpl_ob, info, body_model): if self.with_cache: bodyCache(self.path_cache, sample, info, smpl_ob.ob, body_model, self.num_betas, self.num_dmpls) # generate blendshapes + trans s = info['betas'][:10] smpl_ob.reset_joint_positions(s, bpy.data.scenes["Scene"]) for i in range(info['poses'].shape[0]): p = np.append(info['poses'][i][:66].reshape(-1, 3), [[0, 0, 0], [0, 0, 0]], 0) t = info['trans'][i].reshape((3,)) bpy.data.scenes["Scene"].frame_set(i) smpl_ob.apply_trans_pose_shape(t, p, s, i, with_blendshapes=not self.with_cache) def generate_SMPLbody_animation(self, sample, info, gender, index, body_model): print("Generate Animation..") orient = info['poses'][0, :3][2] p = np.append(info['poses'][0][:66].reshape(-1, 3), [[0, 0, 0], [0, 0, 0]], 0) t = info['trans'][0].reshape((3,)) s = info['betas'][:10] v, j = self.smpl[gender].set_params(pose=p, beta=s, trans=t) cloth_img_name = self.generator.pick_skin_texture(gender=gender, clothing_option="all") img = bpy.data.materials[f'Material_{index}'].node_tree.nodes["Image Texture"] img.image = bpy.data.images.load(cloth_img_name) material = bpy.data.materials[f'Material_{index}'] self.smpl_body_list[index].refine_SMPL(material, j, orient) # info['zrot'] self.animateSMPL(sample, self.smpl_body_list[index], info, body_model) # Smooth bpy.ops.object.shade_smooth() def loadAmassSequence(self, sample: str, info: dict, body_model, frame: int = None): bpy.context.scene.frame_end = info['poses'].shape[0] - 1 bpy.ops.object.select_all(action='DESELECT') # delete current garments for obj in bpy.data.objects.values(): if 'body' not in obj.name.lower() and 'armature' not in obj.name.lower(): obj.select_set(True) bpy.ops.object.delete() for obj in bpy.data.objects.values(): obj.select_set(False) gender = 'm' if info['gender'] == 'male' else 'f' index = 0 if info['gender'] == 'male' else 1 self.generate_SMPLbody_animation(sample, info, gender, index, body_model) bpy.context.view_layer.objects.active = bpy.data.objects[f'Armature_{index}'] arm_obj = bpy.data.objects[f'Armature_{index}'] bpy.context.scene.frame_current = bpy.context.scene.frame_start for obj in bpy.data.objects.values(): if 'body' not in obj.name.lower() and 'armature' not in obj.name.lower(): obj.select_set(True) obj.parent = arm_obj obj.rotation_euler = [0, 0, 0] obj.select_set(False) for obj in bpy.data.objects.values(): if 'armature' not in obj.name.lower() and 'body' not in obj.name.lower(): obj.select_set(True) else: if str(index) in obj.name: obj.select_set(True) if frame != None and frame >= 0 and frame <= bpy.context.scene.frame_end: bpy.context.scene.frame_current = frame def process_sample(self, sample: str, frame: int, smpl_body_list): # load info if sample in self.subject_ids: for subject_id in os.listdir(os.path.join(self.path_samples, sample)): sequence_path, main_path = _get_sequence_path(self.supported_datasets, self.sub_dataset_id, sample, subject_id) info = loadInfo(sequence_path) self.smpl_body_list = smpl_body_list subfolder_name = Path(subject_id).stem + ('_with_cache' if self.with_cache else '') self.path_cache = hgen.create_outfolder_structure(self.path_out, subfolder_name, self.with_cache) if frame is None: self.loadAmassSequence(sample, info, self.body_model_m if info["gender"] == "male" else self.body_model_f) else: self.loadAmassSequence(sample, info, self.body_model_m if info["gender"] == "male" else self.body_model_f, frame) bpy.ops.wm.save_as_mainfile(filepath=os.path.join(self.path_out, subfolder_name, subfolder_name + ".blend")) my_l = list(info.keys()) new_info = {} for i in my_l: new_info[i] = info[i] hgen.export_data(self.temp_path, self.path_out, Path(subject_id).stem, self.with_cache, frame, new_info, info['poses'][0, :3][2], self.write_verts) return True
6,545
Python
47.488889
126
0.579221
eliabntt/animated_human_SMPL_to_USD/humangenerator/util/amass_util.py
import numpy as np import glob import os import random from .IO import readPC2, writePC2 import bpy, sys, torch from .blender_util import mesh_cache from typing import Tuple def bodyCache(path_cache, sample, info, ob, body_model, num_betas, num_dmpls): print("Processing Body Cache") pc2_path = os.path.join(path_cache, sample + '.pc2') V = np.zeros((info['poses'].shape[1], 6890, 3), np.float32) bdata = info time_length = len(bdata['trans']) comp_device = torch.device("cuda" if torch.cuda.is_available() else "cpu") body_params = { 'root_orient': torch.Tensor(bdata['poses'][:, :3]).to(comp_device), # controls the global root orientation 'pose_body': torch.Tensor(bdata['poses'][:, 3:66]).to(comp_device), # controls the body 'pose_hand': torch.Tensor(bdata['poses'][:, 66:]).to(comp_device), # controls the finger articulation 'trans': torch.Tensor(bdata['trans']).to(comp_device), # controls the global body position 'betas': torch.Tensor(np.repeat(bdata['betas'][:num_betas][np.newaxis], repeats=time_length, axis=0)).to( comp_device), # controls the body shape. Body shape is static 'dmpls': torch.Tensor(bdata['dmpls'][:, :num_dmpls]).to(comp_device) # controls soft tissue dynamics } body_trans_root = body_model( **{k: v for k, v in body_params.items() if k in ['pose_body', 'betas', 'pose_hand', 'dmpls', 'trans', 'root_orient']}) if not os.path.isfile(pc2_path): V = body_trans_root.v.data.cpu().numpy() print("Writing PC2 file...") writePC2(pc2_path, V) else: V = readPC2(pc2_path)['V'] if V.shape[1] != len(ob.data.vertices): sys.stderr.write("ERROR IN THE VERTEX COUNT FOR THE BODY!!!!!") sys.stderr.flush() mesh_cache(ob, pc2_path) bpy.ops.object.shade_smooth() return body_trans_root def loadInfo(sequence_path): if os.path.exists(sequence_path): # load AMASS dataset sequence file which contains the coefficients for the whole motion sequence sequence_body_data = np.load(sequence_path) # get the number of supported frames return sequence_body_data else: raise Exception( "Invalid sequence/subject category identifiers, please choose a " "valid one. Used path: {}".format(sequence_path)) def _get_sequence_path(supported_mocap_datasets: dict, used_sub_dataset_id: str, used_subject_id: str, used_sequence_id: str) -> [str, str]: """ Extract pose and shape parameters corresponding to the requested pose from the database to be processed by the parametric model :param supported_mocap_datasets: A dict which maps sub dataset names to their paths. :param used_sub_dataset_id: Identifier for the sub dataset, the dataset which the human pose object should be extracted from. :param used_subject_id: Type of motion from which the pose should be extracted, this is dataset dependent parameter. :param used_sequence_id: Sequence id in the dataset, sequences are the motion recorded to represent certain action. :return: tuple of arrays contains the parameters. Type: tuple """ # check if the sub_dataset is supported if used_sub_dataset_id in supported_mocap_datasets: # get path from dictionary sub_dataset_path = supported_mocap_datasets[used_sub_dataset_id] # concatenate path to specific if not used_subject_id: # if none was selected possible_subject_ids = glob.glob(os.path.join(sub_dataset_path, "*")) possible_subject_ids.sort() if len(possible_subject_ids) > 0: used_subject_id_str = os.path.basename(random.choice(possible_subject_ids)) else: raise Exception("No subjects found in folder: {}".format(sub_dataset_path)) else: try: used_subject_id_str = "{:02d}".format(int(used_subject_id)) except: used_subject_id_str = used_subject_id subject_path = os.path.join(sub_dataset_path, used_subject_id_str) sequence_path = os.path.join(subject_path, used_sequence_id) return sequence_path, subject_path else: raise Exception( "The requested mocap dataset is not yest supported, please choose anothe one from the following " "supported datasets: {}".format([key for key, value in supported_mocap_datasets.items()])) def _load_parametric_body_model(data_path: str, used_body_model_gender: str, num_betas: int, num_dmpls: int) -> Tuple["BodyModel", np.array]: """ loads the parametric model that is used to generate the mesh object :return: parametric model. Type: tuple. """ import torch from human_body_prior.body_model.body_model import BodyModel bm_path = os.path.join(data_path, 'body_models', 'smplh', used_body_model_gender, 'model.npz') # body model dmpl_path = os.path.join(data_path, 'body_models', 'dmpls', used_body_model_gender, 'model.npz') # deformation model if not os.path.exists(bm_path) or not os.path.exists(dmpl_path): raise Exception("Parametric Body model doesn't exist, please follow download instructions section in AMASS Example") comp_device = torch.device("cuda" if torch.cuda.is_available() else "cpu") body_model = BodyModel(bm_path=bm_path, num_betas=num_betas, num_dmpls=num_dmpls, path_dmpl=dmpl_path).to(comp_device) faces = body_model.f.detach().cpu().numpy() return body_model, faces def _get_supported_mocap_datasets(taxonomy_file_path: str, data_path: str) -> dict: """ get latest updated list from taxonomoy json file about the supported mocap datasets supported in the loader module and update.supported_mocap_datasets list :param taxonomy_file_path: path to taxomomy.json file which contains the supported datasets and their respective paths. Type: string. :param data_path: path to the AMASS dataset root folder. Type: string. """ import json # dictionary contains mocap dataset name and path to its sub folder within the main dataset, dictionary will # be filled from taxonomy.json file which indicates the supported datastests supported_mocap_datasets = {} if os.path.exists(taxonomy_file_path): with open(taxonomy_file_path, "r") as f: loaded_data = json.load(f) for block in loaded_data: if "sub_data_id" in block: sub_dataset_id = block["sub_data_id"] supported_mocap_datasets[sub_dataset_id] = os.path.join(data_path, block["path"]) else: raise Exception("The taxonomy file could not be found: {}".format(taxonomy_file_path)) return supported_mocap_datasets
6,996
Python
50.448529
163
0.646512
eliabntt/animated_human_SMPL_to_USD/humangenerator/util/cloth3d_util.py
import numpy as np import scipy.io as sio from math import cos, sin from .blender_util import readOBJ, createBPYObj, setMaterial, mesh_cache, convert_meshcache import os, sys from .IO import readPC2, writePC2 import bpy def loadInfo(path: str): ''' this function should be called instead of direct sio.loadmat as it cures the problem of not properly recovering python dictionaries from mat files. It calls the function check keys to cure all entries which are still mat-objects ''' data = sio.loadmat(path, struct_as_record=False, squeeze_me=True) del data['__globals__'] del data['__header__'] del data['__version__'] return _check_keys(data) def _check_keys(dict): ''' checks if entries in dictionary are mat-objects. If yes todict is called to change them to nested dictionaries ''' for key in dict: if isinstance(dict[key], sio.matlab.mio5_params.mat_struct): dict[key] = _todict(dict[key]) return dict def _todict(matobj): ''' A recursive function which constructs from matobjects nested dictionaries ''' dict = {} for strg in matobj._fieldnames: elem = matobj.__dict__[strg] if isinstance(elem, sio.matlab.mio5_params.mat_struct): dict[strg] = _todict(elem) elif isinstance(elem, np.ndarray) and np.any([isinstance(item, sio.matlab.mio5_params.mat_struct) for item in elem]): dict[strg] = [None] * len(elem) for i,item in enumerate(elem): if isinstance(item, sio.matlab.mio5_params.mat_struct): dict[strg][i] = _todict(item) else: dict[strg][i] = item else: dict[strg] = elem return dict # Computes matrix of rotation around z-axis for 'zrot' radians def zRotMatrix(zrot): c, s = cos(zrot), sin(zrot) return np.array([[c, -s, 0], [s, c, 0], [0, 0, 1]], np.float32) """ CAMERA """ def intrinsic(): RES_X = 640 RES_Y = 480 f_mm = 50 # blender default sensor_w_mm = 36 # blender default sensor_h_mm = sensor_w_mm * RES_Y / RES_X fx_px = f_mm * RES_X / sensor_w_mm; fy_px = f_mm * RES_Y / sensor_h_mm; u = RES_X / 2; v = RES_Y / 2; return np.array([[fx_px, 0, u], [0, fy_px, v], [0, 0, 1]], np.float32) def extrinsic(camLoc): R_w2bc = np.array([[0, 1, 0], [0, 0, 1], [1, 0, 0]], np.float32) T_w2bc = -1 * R_w2bc.dot(camLoc) R_bc2cv = np.array([[1, 0, 0], [0, -1, 0], [0, 0, -1]], np.float32) R_w2cv = R_bc2cv.dot(R_w2bc) T_w2cv = R_bc2cv.dot(T_w2bc) return np.concatenate((R_w2cv, T_w2cv[:,None]), axis=1) def proj(camLoc): return intrinsic().dot(extrinsic(camLoc)) """ Mesh to UV map Computes correspondences between 3D mesh and UV map NOTE: 3D mesh vertices can have multiple correspondences with UV vertices """ def mesh2UV(F, Ft): m2uv = {v: set() for f in F for v in f} for f, ft in zip(F, Ft): for v, vt in zip(f, ft): m2uv[v].add(vt) # m2uv = {k:list(v) for k,v in m2uv.items()} return m2uv # Maps UV coordinates to texture space (pixel) IMG_SIZE = 2048 # all image textures have this squared size def uv_to_pixel(vt): px = vt * IMG_SIZE # scale to image plane px %= IMG_SIZE # wrap to [0, IMG_SIZE] # Note that Blender graphic engines invert vertical axis return int(px[0]), int(IMG_SIZE - px[1]) # texel X, texel Y def loadGarment(path_sample, path_cache, sample, garment, info): print("Processing Garment Cache") print(f"Loading {garment}") texture = info['outfit'][garment]['texture'] # Read OBJ file and create BPY object V, F, Vt, Ft = readOBJ(os.path.join(path_sample, sample, garment + '.obj')) ob = createBPYObj(V, F, Vt, Ft, name=sample + '_' + garment) # z-rot ob.rotation_euler[2] = info['zrot'] # Convert cache PC16 to PC2 pc2_path = os.path.join(path_cache, sample + '_' + garment + '.pc2' ) if not os.path.isfile(pc2_path): # Convert PC16 to PC2 (and move to view_cache folder) # Add trans to vertex locations pc16_path = os.path.join(path_sample, sample, garment + '.pc16') V = readPC2(pc16_path, True)['V'] for i in range(V.shape[0]): sys.stdout.write('\r' + str(i + 1) + '/' + str(V.shape[0])) sys.stdout.flush() if V.shape[0] > 1: V[i] += info['trans'][:, i][None] else: V[i] += info['trans'][:][None] writePC2(pc2_path, V) else: V = readPC2(pc2_path)['V'] if V.shape[1] != len(ob.data.vertices): sys.stderr.write("ERROR IN THE VERTEX COUNT!!!!!") sys.stderr.flush() mesh_cache(ob, pc2_path) # necessary to have this in the old version of the code with the old omni-blender # convert_meshcache(bpy.ops.object) # Set material setMaterial(path_sample, ob, sample, garment, texture) # Smooth bpy.ops.object.shade_smooth() print(f"\nLoaded {garment}.\n") def bodyCache(path_cache, sample, info, ob, smpl): print("Processing Body Cache") pc2_path = os.path.join(path_cache, sample + '.pc2') if not os.path.isfile(pc2_path): # Compute body sequence print("Computing body sequence...") print("") gender = 'm' if info['gender'] else 'f' if len(info['poses'].shape)>1: N = info['poses'].shape[1] else: N = 1 V = np.zeros((N, 6890, 3), np.float32) for i in range(N): sys.stdout.write('\r' + str(i + 1) + '/' + str(N)) sys.stdout.flush() s = info['shape'] if N == 1: p = info['poses'][:].reshape((24, 3)) t = info['trans'][:].reshape((3,)) else: p = info['poses'][:, i].reshape((24, 3)) t = info['trans'][:, i].reshape((3,)) v, j = smpl[gender].set_params(pose=p, beta=s, trans=t) V[i] = v - j[0:1] print("") print("Writing PC2 file...") writePC2(pc2_path, V) else: V = readPC2(pc2_path)['V'] if V.shape[1] != len(ob.data.vertices): sys.stderr.write("ERROR IN THE VERTEX COUNT FOR THE BODY!!!!!") sys.stderr.flush() mesh_cache(ob, pc2_path) bpy.ops.object.shade_smooth()
6,626
Python
32.469697
125
0.551313
eliabntt/animated_human_SMPL_to_USD/humangenerator/util/smplutils.py
import bpy from bpy_extras.object_utils import world_to_camera_view from mathutils import Matrix, Quaternion import numpy as np import pickle as pkl import os import math from pyquaternion import Quaternion # computes rotation matrix through Rodrigues formula as in cv2.Rodrigues def Rodrigues(rotvec): theta = np.linalg.norm(rotvec) r = (rotvec / theta).reshape(3, 1) if theta > 0.0 else rotvec cost = np.cos(theta) mat = np.asarray([[0, -r[2], r[1]], [r[2], 0, -r[0]], [-r[1], r[0], 0]]) return cost * np.eye(3) + (1 - cost) * r.dot(r.T) + np.sin(theta) * mat # transformation between pose and blendshapes def rodrigues2bshapes(pose): rod_rots = np.asarray(pose).reshape(24, 3) mat_rots = [Rodrigues(rod_rot) for rod_rot in rod_rots] bshapes = np.concatenate( [(mat_rot - np.eye(3)).ravel() for mat_rot in mat_rots[1:]] ) return mat_rots, bshapes def rotate_vector(vector, axis, angle): """ Rotate a vector around an axis by an angle. """ q = Quaternion(axis=axis, angle=angle) return q.rotate(vector) class SMPL_Body: def __init__(self, smpl_data_folder, material, j, gender="female", person_no=0, zrot=0): # load fbx model bpy.ops.import_scene.fbx( filepath=os.path.join( smpl_data_folder, "basicModel_{}_lbs_10_207_0_v1.0.2.fbx".format(gender[0]), ), axis_forward="Y", axis_up="Z", global_scale=100, ) J_regressors = pkl.load( open(os.path.join(smpl_data_folder, "joint_regressors.pkl"), "rb") ) # 24 x 6890 regressor from vertices to joints self.joint_regressor = J_regressors["J_regressor_{}".format(gender)] self.j = j armature_name = "Armature_{}".format(person_no) bpy.context.active_object.name = armature_name self.gender_name = "{}_avg".format(gender[0]) self.obj_name = "body_{:d}".format(person_no) bpy.data.objects[armature_name].children[0].name = self.obj_name # not the default self.gender_name because each time fbx is loaded it adds some suffix self.ob = bpy.data.objects[self.obj_name] # Rename the armature self.ob.data.use_auto_smooth = False # autosmooth creates artifacts # assign the existing spherical harmonics material self.ob.active_material = bpy.data.materials["Material_{}".format(person_no)] bpy.ops.object.origin_set(type='GEOMETRY_ORIGIN', center='MEDIAN') # clear existing animation data # self.ob.shape_key_clear() self.ob.data.shape_keys.animation_data_clear() self.arm_ob = bpy.data.objects[armature_name] self.arm_ob.animation_data_clear() self.setState0() # self.ob.select = True # blender < 2.8x self.ob.select_set(True) # bpy.context.scene.objects.active = self.ob # blender < 2.8x bpy.context.view_layer.objects.active = self.ob self.smpl_data_folder = smpl_data_folder self.materials = self.create_segmentation(material, smpl_data_folder) # unblocking both the pose and the blendshape limits for k in self.ob.data.shape_keys.key_blocks.keys(): self.ob.data.shape_keys.key_blocks[k].slider_min = -100 self.ob.data.shape_keys.key_blocks[k].slider_max = 100 # bpy.context.scene.objects.active = self.arm_ob # blender < 2.8x bpy.context.view_layer.objects.active = self.arm_ob # order self.part_match = { "root": "root", "bone_00": "Pelvis", "bone_01": "L_Hip", "bone_02": "R_Hip", "bone_03": "Spine1", "bone_04": "L_Knee", "bone_05": "R_Knee", "bone_06": "Spine2", "bone_07": "L_Ankle", "bone_08": "R_Ankle", "bone_09": "Spine3", "bone_10": "L_Foot", "bone_11": "R_Foot", "bone_12": "Neck", "bone_13": "L_Collar", "bone_14": "R_Collar", "bone_15": "Head", "bone_16": "L_Shoulder", "bone_17": "R_Shoulder", "bone_18": "L_Elbow", "bone_19": "R_Elbow", "bone_20": "L_Wrist", "bone_21": "R_Wrist", "bone_22": "L_Hand", "bone_23": "R_Hand", } def refine_SMPL(self, material, j, zrot): self.j = j self.arm_ob.rotation_euler = [0, 0, zrot] self.ob.data.shape_keys.animation_data_clear() self.arm_ob.animation_data_clear() self.ob.select_set(True) bpy.ops.object.origin_set(type='GEOMETRY_ORIGIN', center='MEDIAN') # bpy.context.scene.objects.active = self.ob # blender < 2.8x bpy.context.view_layer.objects.active = self.ob self.materials = self.create_segmentation(material, self.smpl_data_folder) for k in self.ob.data.shape_keys.key_blocks.keys(): self.ob.data.shape_keys.key_blocks[k].slider_min = -10 self.ob.data.shape_keys.key_blocks[k].slider_max = 10 # bpy.context.scene.objects.active = self.arm_ob # blender < 2.8x bpy.context.view_layer.objects.active = self.arm_ob def setState0(self): for ob in bpy.data.objects.values(): # ob.select = False # blender < 2.8x ob.select_set(False) # bpy.context.scene.objects.active = None # blender < 2.8x bpy.context.view_layer.objects.active = None # create one material per part as defined in a pickle with the segmentation # this is useful to render the segmentation in a material pass def create_segmentation(self, material, smpl_path): print("Creating materials segmentation") sorted_parts = [ "hips", "leftUpLeg", "rightUpLeg", "spine", "leftLeg", "rightLeg", "spine1", "leftFoot", "rightFoot", "spine2", "leftToeBase", "rightToeBase", "neck", "leftShoulder", "rightShoulder", "head", "leftArm", "rightArm", "leftForeArm", "rightForeArm", "leftHand", "rightHand", "leftHandIndex1", "rightHandIndex1", ] part2num = {part: (ipart + 1) for ipart, part in enumerate(sorted_parts)} materials = {} vgroups = {} with open(os.path.join(smpl_path,"segm_per_v_overlap.pkl"), "rb") as f: vsegm = pkl.load(f) if len(self.ob.material_slots) <= 1: bpy.ops.object.material_slot_remove() parts = sorted(vsegm.keys()) existing = False cnt = 0 for part in parts: vs = vsegm[part] # vgroups[part] = self.ob.vertex_groups.new(part) # blender < 2.8x if part not in self.ob.vertex_groups: vgroups[part] = self.ob.vertex_groups.new(name=part) vgroups[part].add(vs, 1.0, "ADD") else: existing = True bpy.ops.object.vertex_group_set_active(group=part) materials[part] = material.copy() materials[part].pass_index = part2num[part] if not existing: bpy.ops.object.material_slot_add() self.ob.material_slots[-1].material = materials[part] bpy.ops.object.mode_set(mode="EDIT") bpy.ops.mesh.select_all(action="DESELECT") bpy.ops.object.vertex_group_select() bpy.ops.object.material_slot_assign() bpy.ops.object.mode_set(mode="OBJECT") else: self.ob.material_slots[cnt].material = materials[part] cnt += 1 for scene_material in bpy.data.materials: if not scene_material.users and len(scene_material.name) != len(material.name): bpy.data.materials.remove(scene_material) return materials def quaternion_multiply(self, quaternion1, quaternion0): w0, x0, y0, z0 = quaternion0 w1, x1, y1, z1 = quaternion1 return np.array([-x1 * x0 - y1 * y0 - z1 * z0 + w1 * w0, x1 * w0 + y1 * z0 - z1 * y0 + w1 * x0, -x1 * z0 + y1 * w0 + z1 * x0 + w1 * y0, x1 * y0 - y1 * x0 + z1 * w0 + w1 * z0], dtype=np.float64) def euler_from_quaternion(self, quat): """ Convert a quaternion into euler angles (roll, pitch, yaw) roll is rotation around x in radians (counterclockwise) pitch is rotation around y in radians (counterclockwise) yaw is rotation around z in radians (counterclockwise) """ w,x,y,z = quat t0 = +2.0 * (w * x + y * z) t1 = +1.0 - 2.0 * (x * x + y * y) roll_x = math.atan2(t0, t1) t2 = +2.0 * (w * y - z * x) t2 = +1.0 if t2 > +1.0 else t2 t2 = -1.0 if t2 < -1.0 else t2 pitch_y = math.asin(t2) t3 = +2.0 * (w * z + x * y) t4 = +1.0 - 2.0 * (y * y + z * z) yaw_z = math.atan2(t3, t4) return roll_x*180/3.1415, pitch_y*180/3.1415, yaw_z*180/3.1415 # in radians def apply_trans_pose_shape(self, trans, pose, shape, frame=None, with_blendshapes = True): """ Apply trans pose and shape to character """ # transform pose into rotation matrices (for pose) and pose blendshapes mrots, bsh = rodrigues2bshapes(pose) # set the location of the first bone to the translation parameter mytrans = [0,0,0] mytrans[2] = trans[2] mytrans[1] = trans[1] mytrans[0] = trans[0] self.arm_ob.pose.bones[self.gender_name + "_Pelvis"].location = mytrans if frame is not None: self.arm_ob.pose.bones[self.gender_name + "_root"].keyframe_insert( "location", frame=frame ) self.arm_ob.pose.bones[self.gender_name + "_root"].keyframe_insert( "rotation_quaternion", frame=frame ) # set the pose of each bone to the quaternion specified by pose for ibone, mrot in enumerate(mrots): bone = self.arm_ob.pose.bones[ self.gender_name + "_" + self.part_match["bone_{:02d}".format(ibone)] ] bone.rotation_quaternion = Matrix(mrot).to_quaternion() if frame is not None: bone.keyframe_insert("rotation_quaternion", frame=frame) bone.keyframe_insert("location", frame=frame) # apply pose blendshapes if with_blendshapes: for ibshape, bshape in enumerate(bsh): self.ob.data.shape_keys.key_blocks[ "Pose{:03d}".format(ibshape) ].value = bshape if frame is not None: self.ob.data.shape_keys.key_blocks[ "Pose{:03d}".format(ibshape) ].keyframe_insert("value", index=-1, frame=frame) # apply shape blendshapes for ibshape, shape_elem in enumerate(shape): self.ob.data.shape_keys.key_blocks[ "Shape{:03d}".format(ibshape) ].value = shape_elem if frame is not None: self.ob.data.shape_keys.key_blocks[ "Shape{:03d}".format(ibshape) ].keyframe_insert("value", index=-1, frame=frame) else: mod = self.ob.modifiers.get('Armature') if mod is not None: self.ob.modifiers.remove(mod) def reset_joint_positions(self, shape, scene): orig_trans = np.asarray( self.arm_ob.pose.bones[self.gender_name + "_Pelvis"].location ).copy() # zero the pose and trans to obtain joint positions in zero pose self.apply_trans_pose_shape(orig_trans, np.zeros(72), shape) bpy.ops.wm.memory_statistics() depsgraph = bpy.context.evaluated_depsgraph_get() me = self.ob.evaluated_get(depsgraph).to_mesh() num_vertices = len(me.vertices) # 6890 reg_vs = np.empty((num_vertices, 3)) for iiv in range(num_vertices): reg_vs[iiv] = me.vertices[iiv].co # bpy.data.meshes.remove(me) # blender < 2.8x self.ob.evaluated_get(depsgraph).to_mesh_clear() # regress joint positions in rest pose joint_xyz = self.j # adapt joint positions in rest pose # self.arm_ob.hide = False # Added this line # bpy.context.scene.objects.active = self.arm_ob # blender < 2.8x bpy.context.view_layer.objects.active = self.arm_ob bpy.ops.object.mode_set(mode="EDIT") # self.arm_ob.hide = True for ibone in range(24): bb = self.arm_ob.data.edit_bones[ self.gender_name + "_" + self.part_match["bone_{:02d}".format(ibone)] ] bboffset = bb.tail - bb.head bb.head = joint_xyz[ibone] bb.tail = bb.head + bboffset bpy.ops.object.mode_set(mode="OBJECT")
13,308
Python
37.915205
94
0.550646
eliabntt/animated_human_SMPL_to_USD/humangenerator/util/blender_util.py
import os import bpy from humangenerator.util.IO import readOBJ, readPC2, writePC2 import numpy as np import bmesh import sys import pickle as pkl import shutil import random PI = 3.14159 """ Scene """ def init(): clean() # scene return scene() def clean(): for collection in dir(bpy.data): data_structure = getattr(bpy.data, collection) # Check that it is a data collection if isinstance(data_structure, bpy.types.bpy_prop_collection) and hasattr(data_structure, "remove") and collection not in [ "texts"]: # Go over all entities in that collection for block in data_structure: # Remove everything besides the default scene if not isinstance(block, bpy.types.Scene) or block.name != "Scene": data_structure.remove(block) def clean_mesh_and_textures(exclude=[]): # ensure everything is lowered exclude = [i.lower() for i in exclude] for block in bpy.data.objects: if block.users == 0 or block.name.lower() not in exclude: bpy.data.objects.remove(block) for block in bpy.data.meshes: if block.users == 0: bpy.data.meshes.remove(block) for block in bpy.data.materials: if block.users == 0 and block.name.lower() not in exclude: bpy.data.materials.remove(block) for block in bpy.data.textures: if block.users == 0: bpy.data.textures.remove(block) for block in bpy.data.images: bpy.data.images.remove(block) for block in bpy.data.shape_keys: if block.users == 0: bpy.data.textures.remove(block) for block in bpy.data.actions: if block.users == 0: bpy.data.actions.remove(block) def scene(): scene = bpy.data.scenes["Scene"] scene.render.engine = "CYCLES" # bpy.data.materials['Material'].use_nodes = True scene.cycles.shading_system = True scene.use_nodes = True scene.render.film_transparent = True scene.frame_current = 0 scene.render.fps = 30 scene.render.resolution_x = 640 scene.render.resolution_y = 480 return scene """ BPY obj manipulation """ def select(ob, only=True): if type(ob) is str: ob = bpy.data.objects[ob] if only: deselect() ob.select_set(True) bpy.context.view_layer.objects.active = ob return ob def deselect(): for obj in bpy.data.objects.values(): obj.select_set(False) bpy.context.view_layer.objects.active = None def delete(ob): select(ob) bpy.ops.object.delete() def createBPYObj(V, F, Vt=None, Ft=None, name='new_obj'): # Create obj mesh = bpy.data.meshes.new('mesh') ob = bpy.data.objects.new(name, mesh) # Add to collection bpy.context.collection.objects.link(ob) select(ob) mesh = bpy.context.object.data bm = bmesh.new() # Vertices for v in V: bm.verts.new(v) bm.verts.ensure_lookup_table() # Faces for f in F: v = [bm.verts[i] for i in f] bm.faces.new(v) bm.to_mesh(mesh) bm.free() # UV Map if not Vt is None: # Create UV layer ob.data.uv_layers.new() # Assign UV coords iloop = 0 for f in Ft: for i in f: ob.data.uv_layers['UVMap'].data[iloop].uv = Vt[i] iloop += 1 return ob def convert_meshcache(ob: bpy.ops.object, offset=0): # Converts a MeshCache or Cloth modifiers to ShapeKeys bpy.context.scene.frame_current = bpy.context.scene.frame_start for frame in range(bpy.context.scene.frame_end + 1): bpy.context.scene.frame_current = frame # for alembic files converted to PC2 and loaded as MeshCache bpy.ops.object.modifier_apply_as_shapekey(keep_modifier=True, modifier="MeshCache") # loop through shapekeys and add as keyframe per frame # https://blender.stackexchange.com/q/149045/87258 bpy.context.scene.frame_current = bpy.context.scene.frame_start for frame in range(bpy.context.scene.frame_end + 1): bpy.context.scene.frame_current = frame shapekey = bpy.data.shape_keys[-1] for i, keyblock in enumerate(shapekey.key_blocks): if keyblock.name != "Basis": curr = i - 1 if curr != frame: keyblock.value = 0 keyblock.keyframe_insert("value", frame=frame) else: keyblock.value = 1 keyblock.keyframe_insert("value", frame=frame) bpy.ops.object.modifier_remove(modifier="MeshCache") def setMaterial(path_sample, ob, sample, garment, texture): mat = bpy.data.materials.new(name=sample + '_' + garment + '_Material') mat.use_nodes = True ob.data.materials.append(mat) if texture['type'] == 'color': mat.node_tree.nodes['Principled BSDF'].inputs[0].default_value = texture['data'].tolist() + [1] elif texture['type'] == 'pattern': # Read pattern img_path = os.path.join(path_sample, sample, garment + '.png') # Add nodes tree = mat.node_tree nodes = tree.nodes # Principled BSDf bsdf = nodes['Principled BSDF'] # Image img = nodes.new('ShaderNodeTexImage') try: img.image = bpy.data.images.load(img_path) # Links tree.links.new(img.outputs[0], bsdf.inputs[0]) except: mat.node_tree.nodes['Principled BSDF'].inputs[0].default_value = [random.random(), random.random(), random.random(), 1] """ Modifiers """ def mesh_cache(ob, cache, scale=1): ob = select(ob) bpy.ops.object.modifier_add(type='MESH_CACHE') ob.modifiers['MeshCache'].cache_format = 'PC2' ob.modifiers['MeshCache'].filepath = cache ob.modifiers['MeshCache'].frame_scale = scale def write_usd(temppath, filepath, filename, with_cache, export_animation=True, sf=0, ef=-1, frame_step=1): outpath = os.path.join(filepath, filename) filepath = os.path.join(filepath, filename, filename + ".usd") if ef == -1: ef = bpy.context.scene.frame_end print(f"\nExporting usd to {filepath}\n") print(f"With blendshapes = {not with_cache}") bpy.ops.wm.usd_export(filepath=os.path.join(temppath, filename + ".usd"), filemode=8, display_type='DEFAULT', sort_method='DEFAULT', selected_objects_only=True, visible_objects_only=True, export_animation=export_animation, export_hair=True, export_vertices=True, export_vertex_colors=True, export_vertex_groups=True, export_face_maps=True, export_uvmaps=True, export_normals=True, export_transforms=True, export_materials=True, export_meshes=True, export_lights=True, export_cameras=False, export_blendshapes=(not with_cache), export_curves=True, export_particles=True, export_armatures=True, use_instancing=False, evaluation_mode='VIEWPORT', default_prim_path=f"/body_{filename}", root_prim_path=f"/body_{filename}", material_prim_path=f"/body_{filename}/materials", generate_cycles_shaders=False, generate_preview_surface=True, generate_mdl=True, convert_uv_to_st=True, convert_orientation=True, convert_to_cm=True, export_global_forward_selection='Y', export_global_up_selection='Z', export_child_particles=False, export_as_overs=False, merge_transform_and_shape=False, export_custom_properties=True, add_properties_namespace=False, export_identity_transforms=False, apply_subdiv=True, author_blender_name=True, vertex_data_as_face_varying=False, frame_step=frame_step, start=sf, end=ef, override_shutter=False, init_scene_frame_range=True, export_textures=True, relative_paths=True, light_intensity_scale=1, convert_light_to_nits=True, scale_light_radius=True, convert_world_material=True, fix_skel_root=True, xform_op_mode='SRT') shutil.move(os.path.join(temppath, filename + ".usd"), filepath) shutil.move(os.path.join(temppath, "textures"), os.path.join(outpath, "textures")) def export_stl_data(filepath, filename, lobs, zrot): context = bpy.context dg = context.evaluated_depsgraph_get() scene = context.scene coll = context.collection step = 5 for ob in lobs: if ob.type != 'MESH': print(ob.name) print(ob.type) ob.select_set(False) continue bpy.context.view_layer.objects.active = ob rings = [] me = ob.data nverts = len(me.vertices) nedges = len(me.edges) bm = bmesh.new() f = scene.frame_start while f <= scene.frame_end: scene.frame_set(f) bm.from_object(ob, dg, cage=True) bmesh.ops.remove_doubles(bm, verts=bm.verts, dist=0.02) # bmesh.ops.transform(bm, verts=bm.verts[:], matrix=ob.matrix_world) f += step rings.append(bm.edges[:]) print("Frames processeds, going to do rings") # build from rings next = rings.pop() while rings: ring = rings.pop() bmesh.ops.bridge_loops(bm, edges=ring + next) next = ring rme = bpy.data.meshes.new("Rib") bm.to_mesh(rme) copy = bpy.data.objects.new("Rib", rme) coll.objects.link(copy) print("DONE" + ob.name) for ob in bpy.data.objects: if 'Rib' in ob.name: ob.select_set(True) bpy.context.view_layer.objects.active = ob else: ob.select_set(False) bpy.ops.object.join() ob = bpy.context.view_layer.objects.active ob.select_set(True) ob.rotation_euler = [0, 0, zrot] bpy.ops.export_mesh.stl(filepath=os.path.join(filepath, filename, filename + ".stl"), check_existing=True, use_selection=True, global_scale=1, ascii=False, use_mesh_modifiers=False, batch_mode='OFF', axis_forward='Y', axis_up='Z') bpy.ops.object.delete() def write_pkl_data(filepath, filename, arm_ob, ob, info, frame_step=1, write_verts=False): bpy.context.scene.frame_current = bpy.context.scene.frame_start N = int((bpy.context.scene.frame_end - bpy.context.scene.frame_start + 1) / frame_step) n_bones = len(arm_ob.pose.bones) - 1 n_verts = len(ob.data.vertices) if write_verts: d = { 'frame': [], 'bones': np.zeros((N, n_bones, 3), np.float32), 'info': info, 'verts': np.zeros((N, n_verts, 3), np.float32), 'sf': bpy.context.scene.frame_start, 'ef': bpy.context.scene.frame_end + 1, 'nframes': frame_step } else: d = { 'frame': [], 'bones': np.zeros((N, n_bones, 3), np.float32), 'info': info, 'sf': bpy.context.scene.frame_start, 'ef': bpy.context.scene.frame_end + 1, 'nframes': frame_step } select(ob) dg = bpy.context.evaluated_depsgraph_get() cnt = 0 for f in range(bpy.context.scene.frame_start, bpy.context.scene.frame_end + 1): sys.stdout.write('\r' + str(f) + '/' + str(N * frame_step)) sys.stdout.flush() bpy.context.scene.frame_current = f bpy.context.view_layer.update() d['frame'].append(f) select(ob) tmp = ob.evaluated_get(dg) me = tmp.to_mesh() if write_verts: d['verts'][cnt] = np.reshape([ob.matrix_world @ v.co for v in me.vertices], (n_verts, 3)) select(arm_ob) d['bones'][cnt] = np.reshape([arm_ob.matrix_world @ bone.head for bone in arm_ob.pose.bones[1:]], (n_bones, 3)) cnt += 1 if not os.path.exists(os.path.join(filepath, filename)): os.makedirs(os.path.join(filepath, filename)) filepath = os.path.join(filepath, filename, filename + ".pkl") out = open(filepath, 'wb') pkl.dump(d, out) out.close()
12,339
Python
35.081871
120
0.593322
eliabntt/animated_human_SMPL_to_USD/humangenerator/util/IO.py
import os import numpy as np from struct import pack, unpack """ Reads OBJ files Only handles vertices, faces and UV maps Input: - file: path to .obj file Outputs: - V: 3D vertices - F: 3D faces - Vt: UV vertices - Ft: UV faces Correspondence between mesh and UV map is implicit in F to Ft correspondences If no UV map data in .obj file, it shall return Vt=None and Ft=None """ def readOBJ(file): V, Vt, F, Ft = [], [], [], [] with open(file, 'r') as f: T = f.readlines() for t in T: # 3D vertex if t.startswith('v '): v = [float(n) for n in t.replace('v ','').split(' ')] V += [v] # UV vertex elif t.startswith('vt '): v = [float(n) for n in t.replace('vt ','').split(' ')] Vt += [v] # Face elif t.startswith('f '): idx = [n.split('/') for n in t.replace('f ','').split(' ')] f = [int(n[0]) - 1 for n in idx] F += [f] # UV face if '/' in t: f = [int(n[1]) - 1 for n in idx] Ft += [f] V = np.array(V, np.float32) Vt = np.array(Vt, np.float32) if Ft: assert len(F) == len(Ft), 'Inconsistent .obj file, mesh and UV map do not have the same number of faces' else: Vt, Ft = None, None return V, F, Vt, Ft """ Writes OBJ files Only handles vertices, faces and UV maps Inputs: - file: path to .obj file (overwrites if exists) - V: 3D vertices - F: 3D faces - Vt: UV vertices - Ft: UV faces Correspondence between mesh and UV map is implicit in F to Ft correspondences If no UV map data as input, it will write only 3D data in .obj file """ def writeOBJ(file, V, F, Vt=None, Ft=None): if not Vt is None: assert len(F) == len(Ft), 'Inconsistent data, mesh and UV map do not have the same number of faces' with open(file, 'w') as file: # Vertices for v in V: line = 'v ' + ' '.join([str(_) for _ in v]) + '\n' file.write(line) # UV verts if not Vt is None: for v in Vt: line = 'vt ' + ' '.join([str(_) for _ in v]) + '\n' file.write(line) # 3D Faces / UV faces if Ft: F = [[str(i+1)+'/'+str(j+1) for i,j in zip(f,ft)] for f,ft in zip(F,Ft)] else: F = [[str(i + 1) for i in f] for f in F] for f in F: line = 'f ' + ' '.join(f) + '\n' file.write(line) """ Reads PC2 files, and proposed format PC16 files Inputs: - file: path to .pc2/.pc16 file - float16: False for PC2 files, True for PC16 Output: - data: dictionary with .pc2/.pc16 file data NOTE: 16-bit floats lose precision with high values (positive or negative), we do not recommend using this format for data outside range [-2, 2] """ def readPC2(file, float16=False): # assert file.endswith('.pc2') and not float16 or file.endswith('.pc16') and float16, 'File format not consistent with specified input format' data = {} bytes = 2 if float16 else 4 dtype = np.float16 if float16 else np.float32 with open(file, 'rb') as f: # Header data['sign'] = f.read(12) # data['version'] = int.from_bytes(f.read(4), 'little') data['version'] = unpack('<i', f.read(4))[0] # Num points # data['nPoints'] = int.from_bytes(f.read(4), 'little') data['nPoints'] = unpack('<i', f.read(4))[0] # Start frame data['startFrame'] = unpack('f', f.read(4)) # Sample rate data['sampleRate'] = unpack('f', f.read(4)) # Number of samples # data['nSamples'] = int.from_bytes(f.read(4), 'little') data['nSamples'] = unpack('<i', f.read(4))[0] # Animation data size = data['nPoints']*data['nSamples']*3*bytes data['V'] = np.frombuffer(f.read(size), dtype=dtype).astype(np.float32) data['V'] = data['V'].reshape(data['nSamples'], data['nPoints'], 3) return data """ Reads an specific frame of PC2/PC16 files Inputs: - file: path to .pc2/.pc16 file - frame: number of the frame to read - float16: False for PC2 files, True for PC16 Output: - T: mesh vertex data at specified frame """ def readPC2Frame(file, frame, float16=False): assert file.endswith('.pc2') and not float16 or file.endswith('.pc16') and float16, 'File format not consistent with specified input format' assert frame >= 0 and isinstance(frame,int), 'Frame must be a positive integer' bytes = 2 if float16 else 4 dtype = np.float16 if float16 else np.float32 with open(file,'rb') as f: # Num points f.seek(16) # nPoints = int.from_bytes(f.read(4), 'little') nPoints = unpack('<i', f.read(4))[0] # Number of samples f.seek(28) # nSamples = int.from_bytes(f.read(4), 'little') nSamples = unpack('<i', f.read(4))[0] if frame > nSamples: print("Frame index outside size") print("\tN. frame: " + str(frame)) print("\tN. samples: " + str(nSamples)) return # Read frame size = nPoints * 3 * bytes f.seek(size * frame, 1) # offset from current '1' T = np.frombuffer(f.read(size), dtype=dtype).astype(np.float32) return T.reshape(nPoints, 3) """ Writes PC2 and PC16 files Inputs: - file: path to file (overwrites if exists) - V: 3D animation data as a three dimensional array (N. Frames x N. Vertices x 3) - float16: False for writing as PC2 file, True for PC16 This function assumes 'startFrame' to be 0 and 'sampleRate' to be 1 NOTE: 16-bit floats lose precision with high values (positive or negative), we do not recommend using this format for data outside range [-2, 2] """ def writePC2(file, V, float16=False): assert file.endswith('.pc2') and not float16 or file.endswith('.pc16') and float16, 'File format not consistent with specified input format' if float16: V = V.astype(np.float16) else: V = V.astype(np.float32) with open(file, 'wb') as f: # Create the header headerFormat='<12siiffi' headerStr = pack(headerFormat, b'POINTCACHE2\0', 1, V.shape[1], 0, 1, V.shape[0]) f.write(headerStr) # Write vertices f.write(V.tobytes()) """ Reads proposed compressed file format for mesh topology. Inputs: - fname: name of the file to read Outputs: - F: faces of the mesh, as triangles """ def readFaceBIN(fname): if '.' in os.path.basename(fname) and not fname.endswith('.bin'): print("File name extension should be '.bin'") return elif not '.' in os.path.basename(fname): fname += '.bin' with open(fname, 'rb') as f: F = np.frombuffer(f.read(), dtype=np.uint16).astype(np.int32) return F.reshape((-1,3)) """ Compress mesh topology into uint16 (Note that this imposes a maximum of 65,536 vertices). Writes this data into the specified file. Inputs: - fname: name of the file to be created (provide NO extension) - F: faces. MUST be an Nx3 array """ def writeFaceBIN(fname, F): assert type(F) is np.ndarray, "Make sure faces is an Nx3 NumPy array" assert len(F.shape) == 2 and F.shape[1] == 3, "Faces have the wron shape (should be Nx3)" if '.' in os.path.basename(fname) and not fname.endswith('.bin'): print("File name extension should be '.bin'") return elif not '.' in os.path.basename(fname): fname += '.bin' F = F.astype(np.uint16) with open(fname, 'wb') as f: f.write(F.tobytes())
6,824
Python
31.971014
143
0.652989
eliabntt/GRADE-RR/additional_scripts/average_rosbag.py
""" This is the code used to get the average acc speed and dynamic frames for the GRADE paper. You need some experiment folders. This code will use the bags files in those folder. Please change the folders as desired (first loop in the code, first two lines). We also suppose that you have the instance images to compute the percentage of dynamic frames. """ import rosbag import sys import numpy as np import os # loop through all the bags in the folder folders = [] folders.append("/ps/project/irotate/GRADE-paper/Test.bak2/d94ecc9f-10f6-4f6d-b49f-1ed841f86772") folders.append("/ps/project/irotate/GRADE-paper/Test.bak2/d8c14dd6-d794-46d5-aa59-01d3552828c7") folders.append("/ps/project/irotate/GRADE-paper/Test.bak2/b13a4874-00a4-49a5-aa2d-e22d7d864b56") folders.append("/ps/project/irotate/GRADE-paper/Test.bak2/75bf66e8-acb0-4f27-842d-1945ad42f9de") folders.append("/ps/project/irotate/GRADE-paper/Test.bak2/53bfe530-122d-42cb-a1f4-453e6a2a617f") folders.append("/ps/project/irotate/GRADE-paper/Test.bak2/23aae785-c0bc-4645-9e64-fdea78c42e2d") folders.append("/ps/project/irotate/GRADE-paper/Test.bak2/b0a9c3c3-d470-45ea-82c6-ac529b6882ea") folders.append("/ps/project/irotate/GRADE-paper/Test.bak2/12e463c1-4993-4ea8-9cbf-54ba9403e5f8") names = ["d94ecc9f-10f6-4f6d-b49f-1ed841f86772","d8c14dd6-d794-46d5-aa59-01d3552828c7","b13a4874-00a4-49a5-aa2d-e22d7d864b56","75bf66e8-acb0-4f27-842d-1945ad42f9de","53bfe530-122d-42cb-a1f4-453e6a2a617f","23aae785-c0bc-4645-9e64-fdea78c42e2d","b0a9c3c3-d470-45ea-82c6-ac529b6882ea","12e463c1-4993-4ea8-9cbf-54ba9403e5f8"] import pandas as pd df = pd.DataFrame(columns=['name','speed','acc','dynamic_frames','dynamic_frames_avg_coverage']) for folder in folders: bag_folder = os.path.join(folder, "reindex_bags") bags = [] for bag in os.listdir(bag_folder): if bag.endswith(".bag"): for n in names: if n in bag: bags.append(bag) break # sort bags according to the number bags = sorted(bags, key=lambda x: int(x.split("_")[1].split(".")[0])) avg_speed = [] # avg absolute speed per axis avg_acc = [] # avg absolute acc per axis for bagname in bags: print(bagname) # open the bag bag = rosbag.Bag(os.path.join(bag_folder, bagname)) old_t = None # loop through all the topics for topic, msg, t in bag.read_messages(topics=['/my_robot_0/odom']): # if the topic is the one we want if topic == "/my_robot_0/odom": # get the data data_lin = np.array([msg.twist.twist.linear.x, msg.twist.twist.linear.y, msg.twist.twist.linear.z]) data_ang = np.array([msg.twist.twist.angular.x, msg.twist.twist.angular.y, msg.twist.twist.angular.z]) # get the speed avg_speed.append([np.abs(data_lin[0]), np.abs(data_lin[1]), np.abs(data_lin[2]), np.abs(data_ang[0]), np.abs(data_ang[1]), np.abs(data_ang[2])]) # get the acceleration by using the difference between the current and the previous time if old_t is None: old_speed = [data_lin[0], data_lin[1], data_lin[2], data_ang[0], data_ang[1], data_ang[2]] old_t = t else: # get the difference between the current and the previous time dt = (t - old_t).to_sec() # get the acceleration avg_acc.append(np.abs(np.array( [(data_lin[0] - old_speed[0]) / dt, (data_lin[1] - old_speed[1]) / dt, (data_lin[2] - old_speed[2]) / dt, (data_ang[0] - old_speed[3]) / dt, (data_ang[1] - old_speed[4]) / dt, (data_ang[2] - old_speed[5]) / dt]))) # update the old speed and time old_speed = [data_lin[0], data_lin[1], data_lin[2], data_ang[0], data_ang[1], data_ang[2]] old_t = t bag.close() df = pd.concat([df, pd.DataFrame([[bagname[:-6], np.round(np.mean(avg_speed, axis=0),3), np.round(np.mean(avg_acc, axis=0),3), 0, 0]], columns=df.columns)]) folders = [] folders.append("/ps/project/irotate/DE_few_obs_cam0_horiz/d94ecc9f-10f6-4f6d-b49f-1ed841f86772") folders.append("/ps/project/irotate/DE_few_obs_cam0_horiz/d8c14dd6-d794-46d5-aa59-01d3552828c7") folders.append("/ps/project/irotate/DE_cam0_horiz/b13a4874-00a4-49a5-aa2d-e22d7d864b56") folders.append("/ps/project/irotate/DE_cam1/75bf66e8-acb0-4f27-842d-1945ad42f9de") folders.append("/ps/project/irotate/DE_few_obs_cam1/53bfe530-122d-42cb-a1f4-453e6a2a617f") folders.append("/ps/project/irotate/DE_lot_obs_cam0/23aae785-c0bc-4645-9e64-fdea78c42e2d") import cv2 for folder in folders: dynamic_images = 0 dynamic_coverage = 0 masks = os.path.join(folder, "Viewport0_occluded/instance") for mask in os.listdir(masks): if mask.endswith(".npy"): f = np.load(os.path.join(masks, mask), allow_pickle=True) classes = [] for item in f[1]: if item[3] == "human" or item[3] == "google" or item[3] == "shapenet": classes.append(item[0]) """ opencv reshape f[0] to (640, 480) """ img = cv2.resize(f[0].astype(np.uint16), (640, 480), interpolation=cv2.INTER_NEAREST) out = np.isin(img, classes) """count the number of elements of img that are equal to an element of classes""" if len(out[out==True]) > 0: dynamic_coverage += len(out[out==True]) / img.size dynamic_images += 1 df.loc[df["name"] == folder.split("/")[-1], "dynamic_frames"] = dynamic_images df.loc[df["name"] == folder.split("/")[-1], "dynamic_frames_avg_coverage"] = round(dynamic_coverage / dynamic_images*100,2) # print dataframe as latex table print(df.to_latex(index=False)) df.to_pickle("dynamic_frames.pkl")
5,411
Python
46.060869
321
0.697653