file_path
stringlengths
21
202
content
stringlengths
12
1.02M
size
int64
12
1.02M
lang
stringclasses
9 values
avg_line_length
float64
3.33
100
max_line_length
int64
10
993
alphanum_fraction
float64
0.27
0.93
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.gdtf/mf/ov/gdtf/gltfImporter.py
import logging import omni.client import os import subprocess import tempfile from typing import List import xml.etree.ElementTree as ET from zipfile import ZipFile from .filepathUtility import Filepath from .gdtfUtil import Model class GLTFImporter: TMP_ARCHIVE_EXTRACT_DIR = f"{tempfile.gettempdir()}/MF.OV.GDTF/" def convert(root: ET.Element, archive: ZipFile, output_dir: str) -> List[Model]: models: List[Model] = GLTFImporter._get_model_nodes(root) models_filtered: List[Model] = GLTFImporter._filter_models(models) GLTFImporter._extract_gltf_to_tmp(models_filtered, archive) GLTFImporter._convert_gltf(models_filtered, output_dir) return models def _get_model_nodes(root: ET.Element) -> List[Model]: node_fixture: ET.Element = root.find("FixtureType") node_models: ET.Element = node_fixture.find("Models") nodes_model = node_models.findall("Model") models: List[Model] = [] for node_model in nodes_model: models.append(Model(node_model)) return models def _filter_models(models: List[Model]) -> List[Model]: filters: List[str] = ['pigtail', 'beam'] filtered_models: List[Model] = [] for model in models: if model.has_file(): filtered_models.append(model) elif model.get_name().lower() not in filters: logger = logging.getLogger(__name__) logger.info(f"File attribute empty for model node {model.get_name()}, skipping.") return filtered_models def _extract_gltf_to_tmp(models: List[Model], gdtf_archive: ZipFile): namelist = gdtf_archive.namelist() to_remove: List[Model] = [] for model in models: filename = model.get_file() filepath_glb = f"models/gltf/{filename}.glb" filepath_gltf = f"models/gltf/{filename}.gltf" filepath_3ds = f"models/3ds/{filename}.3ds" if filepath_glb in namelist: tmp_export_path = gdtf_archive.extract(filepath_glb, GLTFImporter.TMP_ARCHIVE_EXTRACT_DIR) model.set_tmpdir_filepath(Filepath(tmp_export_path)) elif filepath_gltf in namelist: tmp_export_path = gdtf_archive.extract(filepath_gltf, GLTFImporter.TMP_ARCHIVE_EXTRACT_DIR) for filepath in namelist: # Also import .bin, textures, etc. if filepath.startswith(f"models/gltf/{filename}") and filepath != filepath_gltf: gdtf_archive.extract(filepath, GLTFImporter.TMP_ARCHIVE_EXTRACT_DIR) model.set_tmpdir_filepath(Filepath(tmp_export_path)) elif filepath_3ds in namelist: tmp_export_path = gdtf_archive.extract(filepath_3ds, GLTFImporter.TMP_ARCHIVE_EXTRACT_DIR) temp_export_path_gltf = tmp_export_path[:-4] + ".gltf" GLTFImporter._convert_3ds_to_gltf(tmp_export_path, temp_export_path_gltf) model.set_tmpdir_filepath(Filepath(temp_export_path_gltf)) model.set_converted_from_3ds() os.remove(tmp_export_path) else: logger = logging.getLogger(__name__) logger.warn(f"No file found for {filename}, skipping.") to_remove.append(model) for model in to_remove: models.remove(model) def _convert_3ds_to_gltf(input, output): path = __file__ my_env = os.environ.copy() my_env["PATH"] = path + '\\..\\' + os.pathsep + my_env['PATH'] scriptPath = path + "\\..\\3dsConverterScript.py" try: result = subprocess.run(["py", "-3.10", scriptPath, input, output], capture_output=True, env=my_env) if result.returncode != 0: logger = logging.getLogger(__name__) logger.error(f"Failed to convert 3ds file to gltf: {input}\nerror (Requires python 3.10): {result.stderr.decode('utf-8')}\nerror message: {result.stdout.decode('utf-8')}") except Exception as e: logger = logging.getLogger(__name__) logger.error(f"Failed to convert 3ds file to gltf: {input}\n{e}") def _convert_gltf(models: List[Model], gdtf_output_dir): output_dir = gdtf_output_dir + "gltf/" _, files_in_output_dir = omni.client.list(output_dir) # Ignoring omni.client.Result relative_paths_in_output_dir = [x.relative_path for x in files_in_output_dir] converted_models: List[Model] = [] for model in models: file: Filepath = model.get_tmpdir_filepath() if model.get_converted_from_3ds(): bin_file = file.basename[:-5] + ".bin" bin_path = output_dir + bin_file if bin_file not in relative_paths_in_output_dir: input_path = file.fullpath[:-5] + ".bin" result = result = omni.client.copy(input_path, bin_path, omni.client.CopyBehavior.OVERWRITE) output_file = file.basename output_path = output_dir + output_file if output_file not in relative_paths_in_output_dir: input_path = file.fullpath result = omni.client.copy(input_path, output_path, omni.client.CopyBehavior.OVERWRITE) if result == omni.client.Result.OK: model.set_converted_filepath(Filepath(output_path)) converted_models.append(model) else: logger = logging.getLogger(__name__) logger.error(f"Failure to convert file {input_path}: {result}") else: model.set_converted_filepath(Filepath(output_path)) converted_models.append(model) return converted_models
5,820
Python
46.325203
187
0.601203
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.gdtf/mf/ov/gdtf/converterContext.py
class ConverterContext: usd_reference_path = ""
52
Python
16.666661
27
0.711538
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.gdtf/mf/ov/gdtf/gdtfUtil.py
import math import xml.etree.ElementTree as ET from pxr import Usd, UsdGeom, UsdLux, Sdf from .filepathUtility import Filepath from .USDTools import USDTools def get_attrib_if_exists(node: ET.Element, attr: str): return node.attrib[attr] if attr in node.attrib else None def get_attrib_text_if_exists(node: ET.Element, attr: str): return get_attrib_if_exists(node, attr) def get_attrib_int_if_exists(node: ET.Element, attr: str): str_value = get_attrib_if_exists(node, attr) if str_value is not None: return int(str_value) return None def get_attrib_float_if_exists(node: ET.Element, attr: str): str_value = get_attrib_if_exists(node, attr) if str_value is not None: return float(str_value) return None def set_attribute_text_if_valid(prim: Usd.Prim, name: str, value: str): if value is not None: USDTools.set_prim_attribute(prim, name, Sdf.ValueTypeNames.String, value) def set_attribute_int_if_valid(prim: Usd.Prim, name: str, value: str): if value is not None: USDTools.set_prim_attribute(prim, name, Sdf.ValueTypeNames.Int, value) def set_attribute_float_if_valid(prim: Usd.Prim, name: str, value: str): if value is not None: USDTools.set_prim_attribute(prim, name, Sdf.ValueTypeNames.Float, value) class Model: def __init__(self, node: ET.Element): self._name = node.attrib["Name"] self._name_usd = USDTools.make_name_valid(self._name) self._file = get_attrib_if_exists(node, "File") self._primitive_type = node.attrib["PrimitiveType"] self._height = float(node.attrib["Height"]) self._length = float(node.attrib["Length"]) self._width = float(node.attrib["Width"]) self._converted_from_3ds = False def get_name(self) -> str: return self._name def get_name_usd(self) -> str: return self._name_usd def has_file(self) -> bool: return self._file is not None and self._file != "" def get_file(self) -> str: return self._file def set_tmpdir_filepath(self, path: Filepath): self._tmpdir_filepath = path def get_tmpdir_filepath(self) -> Filepath: return self._tmpdir_filepath def set_converted_from_3ds(self): self._converted_from_3ds = True def get_converted_from_3ds(self): return self._converted_from_3ds def set_converted_filepath(self, path: Filepath): self._converted_filepath = path def get_converted_filepath(self) -> Filepath: return self._converted_filepath def get_height(self) -> float: return self._height def get_width(self) -> float: return self._width class Geometry: def __init__(self, node: ET.Element): self._name: str = node.attrib["Name"] self._model_id: str = get_attrib_if_exists(node, "Model") self._position_matrix = node.attrib["Position"] self._tag = node.tag def get_tag(self) -> str: return self._tag def get_name(self) -> str: return self._name def get_model_id(self) -> str: if self._model_id is not None: return self._model_id return self._name def get_position_matrix(self) -> str: return self._position_matrix def set_model(self, model: Model): self._model = model def get_model(self) -> Model: return self._model def set_stage_path(self, path: str): self._stage_path = path def get_stage_path(self) -> str: return self._stage_path def set_depth(self, depth: int): self._depth = depth def get_depth(self) -> int: return self._depth def set_xform_model(self, xform: UsdGeom.Xform): self._xform_model = xform def get_xform_model(self) -> UsdGeom.Xform: return self._xform_model def set_xform_parent(self, xform: UsdGeom.Xform): self._xform_parent = xform def get_xform_parent(self) -> UsdGeom.Xform: return self._xform_parent class Beam: def __init__(self, geometry: Geometry, node: ET.Element): self._radius = float(node.attrib["BeamRadius"]) self._position_matrix = geometry.get_position_matrix() self._stage_path = geometry.get_stage_path() # The attributes should always exists as per standard definition self._beam_angle = get_attrib_float_if_exists(node, "BeamAngle") self._beam_type = get_attrib_text_if_exists(node, "BeamType") self._color_rendering_index = get_attrib_int_if_exists(node, "ColorRenderingIndex") self._color_temperature = get_attrib_float_if_exists(node, "ColorTemperature") self._field_angle = get_attrib_float_if_exists(node, "FieldAngle") self._lamp_type = get_attrib_text_if_exists(node, "LampType") self._luminous_flux = get_attrib_float_if_exists(node, "LuminousFlux") self._power_consumption = get_attrib_float_if_exists(node, "PowerConsumption") def get_radius(self) -> float: return self._radius def get_position_matrix(self) -> str: return self._position_matrix def get_stage_path(self) -> str: return self._stage_path def get_intensity(self) -> float: lumens = self._luminous_flux radius = self._radius if lumens is None: return None candela: float = lumens / 12.566 numerator = candela * 1000 denominator = 4 * math.pi * radius * radius result = numerator / denominator return result def apply_attributes_to_prim(self, light: UsdLux): prim: Usd.Prim = light.GetPrim() set_attribute_float_if_valid(prim, "BeamAngle", self._beam_angle) set_attribute_text_if_valid(prim, "BeamType", self._beam_type) set_attribute_int_if_valid(prim, "ColorRenderingIndex", self._color_rendering_index) set_attribute_float_if_valid(prim, "ColorTemperature", self._color_temperature) set_attribute_float_if_valid(prim, "FieldAngle", self._field_angle) set_attribute_text_if_valid(prim, "LampType", self._lamp_type) set_attribute_float_if_valid(prim, "LuminousFlux", self._luminous_flux) set_attribute_float_if_valid(prim, "PowerConsumption", self._power_consumption) USDTools.set_light_attributes(light, self._beam_angle, self.get_intensity(), self._color_temperature) class FixtureAttributes: def __init__(self, root: ET.Element): self._operating_temperature_high = None self._operating_temperature_low = None self._weight = None self._leg_height = None node_fixture: ET.Element = root.find("FixtureType") node_physdesc: ET.Element = node_fixture.find("PhysicalDescriptions") if node_physdesc is not None: node_properties: ET.Element = node_physdesc.find("Properties") if node_properties is not None: node_operatingtemp: ET.Element = node_properties.find("OperatingTemperature") if node_operatingtemp is not None: self._operating_temperature_high = get_attrib_float_if_exists(node_operatingtemp, "High") self._operating_temperature_low = get_attrib_float_if_exists(node_operatingtemp, "Low") node_weight: ET.Element = node_properties.find("Weight") if node_weight is not None: self._weight = get_attrib_float_if_exists(node_weight, "Value") node_legheight: ET.Element = node_properties.find("LegHeight") if node_legheight is not None: self._leg_height = get_attrib_float_if_exists(node_legheight, "Value") def apply_attributes_to_prim(self, prim: Usd.Prim): set_attribute_float_if_valid(prim, "OperatingTemperature:High", self._operating_temperature_high) set_attribute_float_if_valid(prim, "OperatingTemperature:Low", self._operating_temperature_low) set_attribute_float_if_valid(prim, "Weight", self._weight) set_attribute_float_if_valid(prim, "LegHeight", self._leg_height)
8,096
Python
35.147321
109
0.643157
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.gdtf/mf/ov/gdtf/gdtfImporter.py
from io import BytesIO import logging from typing import List import xml.etree.ElementTree as ET from zipfile import ZipFile from pxr import Gf, Sdf, Usd, UsdGeom from .filepathUtility import Filepath from .gdtfUtil import Model, Geometry, Beam, FixtureAttributes from .gltfImporter import GLTFImporter from .USDTools import USDTools class GDTFImporter: def convert(file: Filepath, output_dir: str, output_ext: str = ".usd") -> str: try: with ZipFile(file.fullpath, 'r') as archive: gdtf_output_dir = output_dir + file.filename + "_gdtf/" url: str = GDTFImporter._convert(archive, gdtf_output_dir, file.filename, output_ext) return url except Exception as e: logger = logging.getLogger(__name__) logger.error(f"Failed to parse gdtf file at {file.fullpath}. Make sure it is not corrupt. {e}") return None def convert_from_mvr(spec_name: str, output_dir: str, mvr_archive: ZipFile, output_ext: str = ".usd") -> bool: spec_name_with_ext = spec_name + ".gdtf" if spec_name_with_ext in mvr_archive.namelist(): gdtf_data = BytesIO(mvr_archive.read(spec_name_with_ext)) gdtf_output_dir = output_dir + spec_name + "_gdtf/" with ZipFile(gdtf_data) as gdtf_archive: GDTFImporter._convert(gdtf_archive, gdtf_output_dir, spec_name, output_ext) return True else: return False def _convert(archive: ZipFile, output_dir: str, name: str, output_ext: str) -> str: data = archive.read("description.xml") root = ET.fromstring(data) converted_models: List[Model] = GLTFImporter.convert(root, archive, output_dir) url: str = GDTFImporter._convert_gdtf_usd(output_dir, name, output_ext, root, converted_models) return url def _convert_gdtf_usd(output_dir: str, filename: str, ext: str, root: ET.Element, models: List[Model]) -> str: url: str = output_dir + filename + ext stage: Usd.Stage = GDTFImporter._get_or_create_gdtf_usd(url) geometries, beams = GDTFImporter._get_stage_hierarchy(root, models, stage) GDTFImporter._add_gltf_reference(stage, geometries) GDTFImporter._apply_gdtf_matrix(stage, geometries) GDTFImporter._add_light_to_hierarchy(stage, beams, geometries) GDTFImporter._apply_gltf_scale(stage, geometries) GDTFImporter._set_general_attributes(stage, root) return url def _get_or_create_gdtf_usd(url: str) -> Usd.Stage: return USDTools.get_or_create_stage(url) def _get_stage_hierarchy(root: ET.Element, models: List[Model], stage: Usd.Stage) -> (List[Geometry], List[Beam]): node_fixture: ET.Element = root.find("FixtureType") node_geometries = node_fixture.find("Geometries") default_prim_path = stage.GetDefaultPrim().GetPath() geometries: List[Geometry] = [] beams: List[Beam] = [] GDTFImporter._get_stage_hierarchy_recursive(node_geometries, models, geometries, beams, default_prim_path, 0) return geometries, beams def _get_stage_hierarchy_recursive(parent_node: ET.Element, models: List[Model], geometries: List[Geometry], beams: List[Beam], path: str, depth: int): geometry_filter: List[str] = ['Geometry', 'Axis', 'Beam', 'Inventory'] for child_node in list(parent_node): if 'Model' in child_node.attrib: if child_node.tag not in geometry_filter: # Pass through (might want to add an xform) GDTFImporter._get_stage_hierarchy_recursive(child_node, models, geometries, beams, path, depth + 1) else: geometry: Geometry = Geometry(child_node) model_id: str = geometry.get_model_id() model: Model = next((model for model in models if model.get_name() == model_id), None) if model is not None and model.has_file(): geometry.set_model(model) stage_path = f"{path}/{model.get_name_usd()}" geometry.set_stage_path(stage_path) geometry.set_depth(depth) geometries.append(geometry) GDTFImporter._get_stage_hierarchy_recursive(child_node, models, geometries, beams, stage_path, depth + 1) else: if model_id.lower() == "pigtail": pass # Skip pigtail geometry elif model_id.lower() == "beam": stage_path = f"{path}/beam" geometry.set_stage_path(stage_path) beam: Beam = Beam(geometry, child_node) beams.append(beam) elif model is not None and not model.has_file(): logger = logging.getLogger(__name__) logger.warn(f"No file found for {model_id}, skipping.") else: # Probably could just be a transform pass else: # Probably could just be a transform pass def _add_gltf_reference(stage: Usd.Stage, geometries: List[Geometry]): stage_path = Filepath(USDTools.get_stage_directory(stage)) for geometry in geometries: model: Model = geometry.get_model() relative_path: str = stage_path.get_relative_from(model.get_converted_filepath()) xform_parent, xform_model = USDTools.add_reference(stage, relative_path, geometry.get_stage_path(), "/model") xform_model.GetPrim().CreateAttribute("mf:gdtf:converter_from_3ds", Sdf.ValueTypeNames.Bool).Set(model.get_converted_from_3ds()) geometry.set_xform_parent(xform_parent) geometry.set_xform_model(xform_model) stage.Save() def _apply_gltf_scale(stage: Usd.Stage, geometries: List[Geometry]): world_xform: UsdGeom.Xform = UsdGeom.Xform(stage.GetDefaultPrim()) stage_metersPerUnit = UsdGeom.GetStageMetersPerUnit(stage) scale = 1 / stage_metersPerUnit USDTools.apply_scale_xform_op(world_xform, scale) converted_3ds = False for geometry in geometries: model = geometry.get_model() if model.get_converted_from_3ds(): converted_3ds = True if converted_3ds: for geometry in geometries: if geometry.get_tag() != 'Beam': xform = geometry.get_xform_model() USDTools.apply_scale_xform_op(xform, UsdGeom.LinearUnits.millimeters) # force mm stage.Save() def _apply_gdtf_matrix(stage: Usd.Stage, geometries: List[Geometry]): applied_scale = USDTools.compute_applied_scale(stage) axis_matrix = USDTools.get_axis_rotation_matrix() for geometry in geometries: translation, rotation = USDTools.compute_xform_values(geometry.get_position_matrix(), applied_scale, axis_matrix) xform: UsdGeom.Xform = geometry.get_xform_parent() xform.ClearXformOpOrder() # Prevent error when overwritting xform.AddTranslateOp().Set(translation) xform.AddRotateZYXOp().Set(rotation) xform.AddScaleOp().Set(Gf.Vec3d(1, 1, 1)) stage.Save() def _add_light_to_hierarchy(stage: Usd.Stage, beams: List[Beam], geometries: List[Geometry]): if len(beams) > 0: GDTFImporter._add_beam_to_hierarchy(stage, beams) else: # Some gdtf files only represents brackets and such. They contain only "Inventory" geometry. # We don't want to add a light source to those. has_not_inventory_geometry = False for geometry in geometries: if geometry.get_tag() != 'Inventory': has_not_inventory_geometry = True if has_not_inventory_geometry: GDTFImporter._add_default_light_to_hierarchy(stage, geometries) def _add_beam_to_hierarchy(stage: Usd.Stage, beams: List[Beam]): for beam in beams: light = USDTools.add_beam(stage, beam.get_stage_path(), beam.get_position_matrix(), beam.get_radius()) beam.apply_attributes_to_prim(light) stage.Save() def _add_default_light_to_hierarchy(stage: Usd.Stage, geometries: List[Geometry]): deepest_geom = geometries[-1] max_depth = deepest_geom.get_depth() for geom in reversed(geometries): depth = geom.get_depth() if (depth > max_depth): deepest_geom = geom max_depth = depth light_stage_path = deepest_geom.get_stage_path() + "/Beam" model = deepest_geom.get_model() USDTools.add_light_default(stage, light_stage_path, model.get_height(), model.get_width()) stage.Save() def _set_general_attributes(stage: Usd.Stage, root: ET.Element): fixtureAttr = FixtureAttributes(root) prim: Usd.Prim = USDTools.get_default_prim(stage) fixtureAttr.apply_attributes_to_prim(prim) stage.Save()
9,327
Python
48.617021
140
0.599979
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.gdtf/mf/ov/gdtf/3dsConverterScript.py
import sys import os def main(): os.environ["PATH"] = __file__ + os.pathsep + os.environ["PATH"] if len(sys.argv) <= 2: print("Need at least 2 arguments") exit(1) from pyassimp import load, export inputFile = sys.argv[1] outputFile = sys.argv[2] print("Input 3ds file:" + inputFile) print("output file: " + outputFile) with load(inputFile) as scene: export(scene, outputFile, "gltf2") if __name__ == "__main__": main()
487
Python
18.519999
67
0.585216
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.gdtf/mf/ov/gdtf/extension.py
import omni.ext import omni.kit.tool.asset_importer as ai from .converterDelegate import ConverterDelegate class MfOvGdtfExtension(omni.ext.IExt): def on_startup(self, _): self._delegate_gdtf = ConverterDelegate( "GDTF Converter", ["(.*\\.gdtf$)"], ["GDTF Files (*.gdtf)"] ) ai.register_importer(self._delegate_gdtf) def on_shutdown(self): ai.remove_importer(self._delegate_gdtf) self._delegate_gdtf.destroy() self._delegate_gdtf = None
533
Python
25.699999
49
0.617261
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.gdtf/mf/ov/gdtf/converterDelegate.py
import os import omni.kit.tool.asset_importer as ai from .converterOptionsBuilder import ConverterOptionsBuilder from .converterHelper import ConverterHelper class ConverterDelegate(ai.AbstractImporterDelegate): def __init__(self, name, filters, descriptions): super().__init__() self._hoops_options_builder = ConverterOptionsBuilder() self._hoops_converter = ConverterHelper() self._name = name self._filters = filters self._descriptions = descriptions def destroy(self): if self._hoops_converter: # self._hoops_converter.destroy() self._hoops_converter = None if self._hoops_options_builder: self._hoops_options_builder.destroy() self._hoops_options_builder = None @property def name(self): return self._name @property def filter_regexes(self): return self._filters @property def filter_descriptions(self): return self._descriptions def build_options(self, paths): pass # TODO enable this after the filepicker bugfix: OM-47383 # self._hoops_options_builder.build_pane(paths) async def convert_assets(self, paths): context = self._hoops_options_builder.get_import_options() hoops_context = context.cad_converter_context absolute_paths = [] relative_paths = [] for file_path in paths: if self.is_supported_format(file_path): absolute_paths.append(file_path) filename = os.path.basename(file_path) relative_paths.append(filename) converted_assets = await self._hoops_converter.create_import_task( absolute_paths, context.export_folder, hoops_context ) return converted_assets
1,825
Python
28.934426
74
0.637808
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.gdtf/mf/ov/gdtf/converterOptionsBuilder.py
from omni.kit.menu import utils from omni.kit.tool.asset_importer.file_picker import FilePicker from omni.kit.tool.asset_importer.filebrowser import FileBrowserMode, FileBrowserSelectionType import omni.kit.window.content_browser as content from .converterOptions import ConverterOptions class ConverterOptionsBuilder: def __init__(self): self._file_picker = None self._export_content = ConverterOptions() self._folder_button = None self._refresh_default_folder = False self._default_folder = None self._clear() def destroy(self): self._clear() if self._file_picker: self._file_picker.destroy() def _clear(self): self._built = False self._export_folder_field = None if self._folder_button: self._folder_button.set_clicked_fn(None) self._folder_button = None def set_default_target_folder(self, folder: str): self._default_folder = folder self._refresh_default_folder = True def _select_picked_folder_callback(self, paths): if paths: self._export_folder_field.model.set_value(paths[0]) def _cancel_picked_folder_callback(self): pass def _show_file_picker(self): if not self._file_picker: mode = FileBrowserMode.OPEN file_type = FileBrowserSelectionType.DIRECTORY_ONLY filters = [(".*", "All Files (*.*)")] self._file_picker = FilePicker("Select Folder", mode=mode, file_type=file_type, filter_options=filters) self._file_picker.set_file_selected_fn(self._select_picked_folder_callback) self._file_picker.set_cancel_fn(self._cancel_picked_folder_callback) folder = self._export_folder_field.model.get_value_as_string() if utils.is_folder(folder): self._file_picker.show(folder) else: self._file_picker.show(self._get_current_dir_in_content_window()) def _get_current_dir_in_content_window(self): content_window = content.get_content_window() return content_window.get_current_directory() def get_import_options(self): return ConverterOptions()
2,210
Python
34.66129
115
0.646606
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.gdtf/mf/ov/gdtf/converterHelper.py
import logging import shutil from urllib.parse import unquote import omni.kit.window.content_browser from .filepathUtility import Filepath from .gdtfImporter import GDTFImporter from .gltfImporter import GLTFImporter class ConverterHelper: def _create_import_task(self, absolute_path, export_folder, _): absolute_path_unquoted = unquote(absolute_path) if absolute_path_unquoted.startswith("file:/"): path = absolute_path_unquoted[6:] else: path = absolute_path_unquoted current_nucleus_dir = omni.kit.window.content_browser.get_content_window().get_current_directory() file: Filepath = Filepath(path) output_dir = current_nucleus_dir if export_folder is None else export_folder if export_folder is not None and export_folder != "": output_dir = export_folder # Cannot Unzip directly from Nucleus, must download file beforehand if file.is_nucleus_path(): tmp_path = GLTFImporter.TMP_ARCHIVE_EXTRACT_DIR + file.basename result = omni.client.copy(file.fullpath, tmp_path, omni.client.CopyBehavior.OVERWRITE) if result == omni.client.Result.OK: file = Filepath(tmp_path) else: logger = logging.getLogger(__name__) logger.error(f"Could not import {file.fullpath} directly from Omniverse, try downloading the file instead") return url: str = GDTFImporter.convert(file, output_dir) return url async def create_import_task(self, absolute_paths, export_folder, hoops_context): converted_assets = {} for i in range(len(absolute_paths)): converted_assets[absolute_paths[i]] = self._create_import_task(absolute_paths[i], export_folder, hoops_context) shutil.rmtree(GLTFImporter.TMP_ARCHIVE_EXTRACT_DIR) return converted_assets
1,987
Python
40.416666
123
0.642174
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.gdtf/mf/ov/gdtf/USDTools.py
import numpy as np from typing import List, Tuple from unidecode import unidecode from urllib.parse import unquote import omni.usd from pxr import Gf, Tf, Sdf, UsdLux, Usd, UsdGeom class USDTools: def make_name_valid(name: str) -> str: if name[:1].isdigit(): name = "_" + name return Tf.MakeValidIdentifier(unidecode(name)) def get_context(): return omni.usd.get_context() def get_stage() -> Usd.Stage: context = USDTools.get_context() return context.get_stage() def get_stage_directory(stage: Usd.Stage = None) -> str: if stage is None: stage = USDTools.get_stage() root_layer = stage.GetRootLayer() repository_path = root_layer.realPath repository_path_unquoted = unquote(repository_path) dir_index = repository_path_unquoted.rfind("/") return repository_path_unquoted[:dir_index + 1] def get_or_create_stage(url: str) -> Usd.Stage: try: # TODO: Better way to check if stage exists? return Usd.Stage.Open(url) except: stage = Usd.Stage.CreateNew(url) UsdGeom.SetStageMetersPerUnit(stage, UsdGeom.LinearUnits.centimeters) # TODO get user defaults UsdGeom.SetStageUpAxis(stage, UsdGeom.Tokens.y) # TODO get user defaults default_prim = stage.DefinePrim("/World", "Xform") stage.SetDefaultPrim(default_prim) stage.Save() return stage def get_default_prim(stage: Usd.Stage) -> Usd.Prim: return stage.GetDefaultPrim() def add_reference(stage: Usd.Stage, ref_path_relative: str, stage_path: str, stage_subpath: str) -> Tuple[ UsdGeom.Xform, UsdGeom.Xform]: xform_parent: UsdGeom.Xform = UsdGeom.Xform.Define(stage, stage_path) xform_ref: UsdGeom.Xform = UsdGeom.Xform.Define(stage, stage_path + stage_subpath) xform_ref_prim: Usd.Prim = xform_ref.GetPrim() path_unquoted = unquote(ref_path_relative) references: Usd.References = xform_ref_prim.GetReferences() references.AddReference(path_unquoted) return xform_parent, xform_ref def get_applied_scale(stage: Usd.Stage, scale_factor: float): stage_scale = UsdGeom.GetStageMetersPerUnit(stage) return scale_factor / stage_scale def apply_scale_xform_op(xform: UsdGeom.Xform, scale: float): scale_value = Gf.Vec3d(scale, scale, scale) xform_ordered_ops: List[UsdGeom.XformOp] = xform.GetOrderedXformOps() found_op = False for xform_op in xform_ordered_ops: if xform_op.GetOpType() == UsdGeom.XformOp.TypeScale: xform_op.Set(scale_value) found_op = True if not found_op: xform.AddScaleOp().Set(scale_value) def np_matrix_from_gdtf(value: str) -> np.matrix: # GDTF Matrix is: 4x4, row-major, Right-Handed, Z-up (Distance Unit not specified, but mm implied) # expect form like "{x,y,z,w}{x,y,z,w}{x,y,z,w}{x,y,z,w}" where "x","y","z", "w" is similar to 1.000000 # make source compatible with np.matrix constructor: "x y z; x y z; x y z; x y z" value_alt = value[1:] # Removes "{" prefix value_alt = value_alt[:-1] # Removes "}" suffix value_alt = value_alt.replace("}{", "; ") value_alt = value_alt.replace(",", " ") np_matrix: np.matrix = np.matrix(value_alt) return np_matrix def gf_matrix_from_gdtf(np_matrix: np.matrix, scale: float) -> Gf.Matrix4d: # Row major matrix gf_matrix = Gf.Matrix4d( np_matrix.item((0, 0)), np_matrix.item((1, 0)), np_matrix.item((2, 0)), np_matrix.item((3, 0)), np_matrix.item((0, 1)), np_matrix.item((1, 1)), np_matrix.item((2, 1)), np_matrix.item((3, 1)), np_matrix.item((0, 2)), np_matrix.item((1, 2)), np_matrix.item((2, 2)), np_matrix.item((3, 2)), np_matrix.item((0, 3)), np_matrix.item((1, 3)), np_matrix.item((2, 3)), np_matrix.item((3, 3)) ) return gf_matrix def add_beam(stage: Usd.Stage, path: str, position_matrix: str, radius: float) -> UsdLux: applied_scale = USDTools.compute_applied_scale(stage) axis_matrix = USDTools.get_axis_rotation_matrix() light: UsdLux.DiskLight = UsdLux.DiskLight.Define(stage, path) translation, rotation = USDTools.compute_xform_values(position_matrix, applied_scale, axis_matrix) rotation += Gf.Vec3d(-90, 0, 0) scale = Gf.Vec3d(radius * 2, radius * 2, 1) USDTools._set_light_xform(light, translation, rotation, scale) USDTools._additional_default_attributes(light) return light def add_light_default(stage: Usd.Stage, path: str, height: float, diameter: float): light: UsdLux.DiskLight = UsdLux.DiskLight.Define(stage, path) translation = Gf.Vec3d(0, -height * 0.5, 0) rotation = Gf.Vec3d(-90, 0, 0) scale = Gf.Vec3d(diameter, diameter, 1) USDTools._set_light_xform(light, translation, rotation, scale) USDTools._additional_default_attributes(light) def _additional_default_attributes(light: UsdLux): prim = light.GetPrim() prim.CreateAttribute("visibleInPrimaryRay", Sdf.ValueTypeNames.Bool).Set(True) light.CreateIntensityAttr().Set(60_000) # if UsdLux.ShapingAPI.CanApply(prim): UsdLux.ShapingAPI.Apply(prim) def _set_light_xform(light: UsdLux.DiskLight, translation: Gf.Vec3d, rotation: Gf.Vec3d, scale: Gf.Vec3d): light.ClearXformOpOrder() # Prevent error when overwritting light.AddTranslateOp().Set(translation) light.AddRotateZYXOp().Set(rotation) light.AddScaleOp().Set(scale) def set_light_attributes(light: UsdLux.DiskLight, beamAngle: float, intensity: float, colorTemp: float): if colorTemp is not None: light.GetEnableColorTemperatureAttr().Set(True) light.GetColorTemperatureAttr().Set(colorTemp) else: light.GetEnableColorTemperatureAttr().Set(False) light.GetColorTemperatureAttr().Set(6500) # default value if intensity is not None: light.GetIntensityAttr().Set(intensity) if beamAngle is not None: prim: Usd.Prim = light.GetPrim() shapingAPI = UsdLux.ShapingAPI(prim) shapingAPI.GetShapingConeAngleAttr().Set(beamAngle) def compute_applied_scale(stage: Usd.Stage) -> float: gdtf_scale = 1 # GDTF dimensions are in meters applied_scale = USDTools.get_applied_scale(stage, gdtf_scale) return applied_scale def get_axis_rotation_matrix() -> Gf.Matrix3d: rotate_minus90deg_xaxis = Gf.Matrix3d(1, 0, 0, 0, 0, 1, 0, -1, 0) return rotate_minus90deg_xaxis def compute_xform_values(position_matrix: str, scale: float, axis_matrix: Gf.Matrix3d) -> (Gf.Vec3d, Gf.Vec3d): np_matrix: np.matrix = USDTools.np_matrix_from_gdtf(position_matrix) gf_matrix: Gf.Matrix4d = USDTools.gf_matrix_from_gdtf(np_matrix, scale) rotation: Gf.Rotation = gf_matrix.GetTranspose().ExtractRotation() euler: Gf.Vec3d = rotation.Decompose(Gf.Vec3d.XAxis(), Gf.Vec3d.YAxis(), Gf.Vec3d.ZAxis()) translation_value = axis_matrix * gf_matrix.ExtractTranslation() rotation_value = axis_matrix * euler return translation_value, rotation_value def set_prim_attribute(prim: Usd.Prim, attribute_name: str, attribute_type: Sdf.ValueTypeNames, attribute_value): prim.CreateAttribute(f"mf:gdtf:{attribute_name}", attribute_type).Set(attribute_value)
7,736
Python
45.329341
117
0.633273
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.gdtf/mf/ov/gdtf/converterOptions.py
from .converterContext import ConverterContext class ConverterOptions: def __init__(self): self.cad_converter_context = ConverterContext() self.export_folder: str = None
192
Python
23.124997
55
0.708333
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.gdtf/mf/ov/gdtf/filepathUtility.py
import os class Filepath: def __init__(self, filepath: str): self._is_none = filepath == "" self.fullpath = filepath self.directory = os.path.dirname(filepath) + "/" self.basename = os.path.basename(filepath) self.filename, self.ext = os.path.splitext(self.basename) def is_nucleus_path(self) -> bool: # TODO: Replace with omni utility method return self.directory[:12] == "omniverse://" def get_relative_from(self, other) -> str: if self._is_none: return other.fullpath else: return "./" + other.fullpath[len(self.directory):]
641
Python
28.181817
65
0.592824
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.gdtf/mf/ov/gdtf/pyassimp/material.py
# Dummy value. # # No texture, but the value to be used as 'texture semantic' # (#aiMaterialProperty::mSemantic) for all material properties # # not* related to textures. # aiTextureType_NONE = 0x0 # The texture is combined with the result of the diffuse # lighting equation. # aiTextureType_DIFFUSE = 0x1 # The texture is combined with the result of the specular # lighting equation. # aiTextureType_SPECULAR = 0x2 # The texture is combined with the result of the ambient # lighting equation. # aiTextureType_AMBIENT = 0x3 # The texture is added to the result of the lighting # calculation. It isn't influenced by incoming light. # aiTextureType_EMISSIVE = 0x4 # The texture is a height map. # # By convention, higher gray-scale values stand for # higher elevations from the base height. # aiTextureType_HEIGHT = 0x5 # The texture is a (tangent space) normal-map. # # Again, there are several conventions for tangent-space # normal maps. Assimp does (intentionally) not # distinguish here. # aiTextureType_NORMALS = 0x6 # The texture defines the glossiness of the material. # # The glossiness is in fact the exponent of the specular # (phong) lighting equation. Usually there is a conversion # function defined to map the linear color values in the # texture to a suitable exponent. Have fun. # aiTextureType_SHININESS = 0x7 # The texture defines per-pixel opacity. # # Usually 'white' means opaque and 'black' means # 'transparency'. Or quite the opposite. Have fun. # aiTextureType_OPACITY = 0x8 # Displacement texture # # The exact purpose and format is application-dependent. # Higher color values stand for higher vertex displacements. # aiTextureType_DISPLACEMENT = 0x9 # Lightmap texture (aka Ambient Occlusion) # # Both 'Lightmaps' and dedicated 'ambient occlusion maps' are # covered by this material property. The texture contains a # scaling value for the final color value of a pixel. Its # intensity is not affected by incoming light. # aiTextureType_LIGHTMAP = 0xA # Reflection texture # # Contains the color of a perfect mirror reflection. # Rarely used, almost never for real-time applications. # aiTextureType_REFLECTION = 0xB # Unknown texture # # A texture reference that does not match any of the definitions # above is considered to be 'unknown'. It is still imported # but is excluded from any further postprocessing. # aiTextureType_UNKNOWN = 0xC
2,409
Python
25.777777
65
0.757991
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.gdtf/mf/ov/gdtf/pyassimp/__init__.py
from .core import *
20
Python
9.499995
19
0.7
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.gdtf/mf/ov/gdtf/pyassimp/core.py
""" PyAssimp This is the main-module of PyAssimp. """ import sys if sys.version_info < (2,6): raise RuntimeError('pyassimp: need python 2.6 or newer') # xrange was renamed range in Python 3 and the original range from Python 2 was removed. # To keep compatibility with both Python 2 and 3, xrange is set to range for version 3.0 and up. if sys.version_info >= (3,0): xrange = range try: import numpy except ImportError: numpy = None import logging import ctypes from contextlib import contextmanager logger = logging.getLogger("pyassimp") # attach default null handler to logger so it doesn't complain # even if you don't attach another handler to logger logger.addHandler(logging.NullHandler()) from . import structs from . import helper from . import postprocess from .errors import AssimpError class AssimpLib(object): """ Assimp-Singleton """ load, load_mem, export, export_blob, release, dll = helper.search_library() _assimp_lib = AssimpLib() def make_tuple(ai_obj, type = None): res = None #notes: # ai_obj._fields_ = [ ("attr", c_type), ... ] # getattr(ai_obj, e[0]).__class__ == float if isinstance(ai_obj, structs.Matrix4x4): if numpy: res = numpy.array([getattr(ai_obj, e[0]) for e in ai_obj._fields_]).reshape((4,4)) #import pdb;pdb.set_trace() else: res = [getattr(ai_obj, e[0]) for e in ai_obj._fields_] res = [res[i:i+4] for i in xrange(0,16,4)] elif isinstance(ai_obj, structs.Matrix3x3): if numpy: res = numpy.array([getattr(ai_obj, e[0]) for e in ai_obj._fields_]).reshape((3,3)) else: res = [getattr(ai_obj, e[0]) for e in ai_obj._fields_] res = [res[i:i+3] for i in xrange(0,9,3)] else: if numpy: res = numpy.array([getattr(ai_obj, e[0]) for e in ai_obj._fields_]) else: res = [getattr(ai_obj, e[0]) for e in ai_obj._fields_] return res # Returns unicode object for Python 2, and str object for Python 3. def _convert_assimp_string(assimp_string): if sys.version_info >= (3, 0): return str(assimp_string.data, errors='ignore') else: return unicode(assimp_string.data, errors='ignore') # It is faster and more correct to have an init function for each assimp class def _init_face(aiFace): aiFace.indices = [aiFace.mIndices[i] for i in range(aiFace.mNumIndices)] assimp_struct_inits = { structs.Face : _init_face } def call_init(obj, caller = None): if helper.hasattr_silent(obj,'contents'): #pointer _init(obj.contents, obj, caller) else: _init(obj,parent=caller) def _is_init_type(obj): if obj and helper.hasattr_silent(obj,'contents'): #pointer return _is_init_type(obj[0]) # null-pointer case that arises when we reach a mesh attribute # like mBitangents which use mNumVertices rather than mNumBitangents # so it breaks the 'is iterable' check. # Basically: # FIXME! elif not bool(obj): return False tname = obj.__class__.__name__ return not (tname[:2] == 'c_' or tname == 'Structure' \ or tname == 'POINTER') and not isinstance(obj, (int, str, bytes)) def _init(self, target = None, parent = None): """ Custom initialize() for C structs, adds safely accessible member functionality. :param target: set the object which receive the added methods. Useful when manipulating pointers, to skip the intermediate 'contents' deferencing. """ if not target: target = self dirself = dir(self) for m in dirself: if m.startswith("_"): continue # We should not be accessing `mPrivate` according to structs.Scene. if m == 'mPrivate': continue if m.startswith('mNum'): if 'm' + m[4:] in dirself: continue # will be processed later on else: name = m[1:].lower() obj = getattr(self, m) setattr(target, name, obj) continue if m == 'mName': target.name = str(_convert_assimp_string(self.mName)) target.__class__.__repr__ = lambda x: str(x.__class__) + "(" + getattr(x, 'name','') + ")" target.__class__.__str__ = lambda x: getattr(x, 'name', '') continue name = m[1:].lower() obj = getattr(self, m) # Create tuples if isinstance(obj, structs.assimp_structs_as_tuple): setattr(target, name, make_tuple(obj)) logger.debug(str(self) + ": Added array " + str(getattr(target, name)) + " as self." + name.lower()) continue if m.startswith('m') and len(m) > 1 and m[1].upper() == m[1]: if name == "parent": setattr(target, name, parent) logger.debug("Added a parent as self." + name) continue if helper.hasattr_silent(self, 'mNum' + m[1:]): length = getattr(self, 'mNum' + m[1:]) # -> special case: properties are # stored as a dict. if m == 'mProperties': setattr(target, name, _get_properties(obj, length)) continue if not length: # empty! setattr(target, name, []) logger.debug(str(self) + ": " + name + " is an empty list.") continue try: if obj._type_ in structs.assimp_structs_as_tuple: if numpy: setattr(target, name, numpy.array([make_tuple(obj[i]) for i in range(length)], dtype=numpy.float32)) logger.debug(str(self) + ": Added an array of numpy arrays (type "+ str(type(obj)) + ") as self." + name) else: setattr(target, name, [make_tuple(obj[i]) for i in range(length)]) logger.debug(str(self) + ": Added a list of lists (type "+ str(type(obj)) + ") as self." + name) else: setattr(target, name, [obj[i] for i in range(length)]) #TODO: maybe not necessary to recreate an array? logger.debug(str(self) + ": Added list of " + str(obj) + " " + name + " as self." + name + " (type: " + str(type(obj)) + ")") # initialize array elements try: init = assimp_struct_inits[type(obj[0])] except KeyError: if _is_init_type(obj[0]): for e in getattr(target, name): call_init(e, target) else: for e in getattr(target, name): init(e) except IndexError: logger.error("in " + str(self) +" : mismatch between mNum" + name + " and the actual amount of data in m" + name + ". This may be due to version mismatch between libassimp and pyassimp. Quitting now.") sys.exit(1) except ValueError as e: logger.error("In " + str(self) + "->" + name + ": " + str(e) + ". Quitting now.") if "setting an array element with a sequence" in str(e): logger.error("Note that pyassimp does not currently " "support meshes with mixed triangles " "and quads. Try to load your mesh with" " a post-processing to triangulate your" " faces.") raise e else: # starts with 'm' but not iterable setattr(target, name, obj) logger.debug("Added " + name + " as self." + name + " (type: " + str(type(obj)) + ")") if _is_init_type(obj): call_init(obj, target) if isinstance(self, structs.Mesh): _finalize_mesh(self, target) if isinstance(self, structs.Texture): _finalize_texture(self, target) if isinstance(self, structs.Metadata): _finalize_metadata(self, target) return self def pythonize_assimp(type, obj, scene): """ This method modify the Assimp data structures to make them easier to work with in Python. Supported operations: - MESH: replace a list of mesh IDs by reference to these meshes - ADDTRANSFORMATION: add a reference to an object's transformation taken from their associated node. :param type: the type of modification to operate (cf above) :param obj: the input object to modify :param scene: a reference to the whole scene """ if type == "MESH": meshes = [] for i in obj: meshes.append(scene.meshes[i]) return meshes if type == "ADDTRANSFORMATION": def getnode(node, name): if node.name == name: return node for child in node.children: n = getnode(child, name) if n: return n node = getnode(scene.rootnode, obj.name) if not node: raise AssimpError("Object " + str(obj) + " has no associated node!") setattr(obj, "transformation", node.transformation) def recur_pythonize(node, scene): ''' Recursively call pythonize_assimp on nodes tree to apply several post-processing to pythonize the assimp datastructures. ''' node.meshes = pythonize_assimp("MESH", node.meshes, scene) for mesh in node.meshes: mesh.material = scene.materials[mesh.materialindex] for cam in scene.cameras: pythonize_assimp("ADDTRANSFORMATION", cam, scene) for c in node.children: recur_pythonize(c, scene) def release(scene): ''' Release resources of a loaded scene. ''' _assimp_lib.release(ctypes.pointer(scene)) @contextmanager def load(filename, file_type = None, processing = postprocess.aiProcess_Triangulate): ''' Load a model into a scene. On failure throws AssimpError. Arguments --------- filename: Either a filename or a file object to load model from. If a file object is passed, file_type MUST be specified Otherwise Assimp has no idea which importer to use. This is named 'filename' so as to not break legacy code. processing: assimp postprocessing parameters. Verbose keywords are imported from postprocessing, and the parameters can be combined bitwise to generate the final processing value. Note that the default value will triangulate quad faces. Example of generating other possible values: processing = (pyassimp.postprocess.aiProcess_Triangulate | pyassimp.postprocess.aiProcess_OptimizeMeshes) file_type: string of file extension, such as 'stl' Returns --------- Scene object with model data ''' if hasattr(filename, 'read'): # This is the case where a file object has been passed to load. # It is calling the following function: # const aiScene* aiImportFileFromMemory(const char* pBuffer, # unsigned int pLength, # unsigned int pFlags, # const char* pHint) if file_type is None: raise AssimpError('File type must be specified when passing file objects!') data = filename.read() model = _assimp_lib.load_mem(data, len(data), processing, file_type) else: # a filename string has been passed model = _assimp_lib.load(filename.encode(sys.getfilesystemencoding()), processing) if not model: raise AssimpError('Could not import file!') scene = _init(model.contents) recur_pythonize(scene.rootnode, scene) try: yield scene finally: release(scene) def export(scene, filename, file_type = None, processing = postprocess.aiProcess_Triangulate): ''' Export a scene. On failure throws AssimpError. Arguments --------- scene: scene to export. filename: Filename that the scene should be exported to. file_type: string of file exporter to use. For example "collada". processing: assimp postprocessing parameters. Verbose keywords are imported from postprocessing, and the parameters can be combined bitwise to generate the final processing value. Note that the default value will triangulate quad faces. Example of generating other possible values: processing = (pyassimp.postprocess.aiProcess_Triangulate | pyassimp.postprocess.aiProcess_OptimizeMeshes) ''' exportStatus = _assimp_lib.export(ctypes.pointer(scene), file_type.encode("ascii"), filename.encode(sys.getfilesystemencoding()), processing) if exportStatus != 0: raise AssimpError('Could not export scene!') def export_blob(scene, file_type = None, processing = postprocess.aiProcess_Triangulate): ''' Export a scene and return a blob in the correct format. On failure throws AssimpError. Arguments --------- scene: scene to export. file_type: string of file exporter to use. For example "collada". processing: assimp postprocessing parameters. Verbose keywords are imported from postprocessing, and the parameters can be combined bitwise to generate the final processing value. Note that the default value will triangulate quad faces. Example of generating other possible values: processing = (pyassimp.postprocess.aiProcess_Triangulate | pyassimp.postprocess.aiProcess_OptimizeMeshes) Returns --------- Pointer to structs.ExportDataBlob ''' exportBlobPtr = _assimp_lib.export_blob(ctypes.pointer(scene), file_type.encode("ascii"), processing) if exportBlobPtr == 0: raise AssimpError('Could not export scene to blob!') return exportBlobPtr def _finalize_texture(tex, target): setattr(target, "achformathint", tex.achFormatHint) if numpy: data = numpy.array([make_tuple(getattr(tex, "pcData")[i]) for i in range(tex.mWidth * tex.mHeight)]) else: data = [make_tuple(getattr(tex, "pcData")[i]) for i in range(tex.mWidth * tex.mHeight)] setattr(target, "data", data) def _finalize_mesh(mesh, target): """ Building of meshes is a bit specific. We override here the various datasets that can not be process as regular fields. For instance, the length of the normals array is mNumVertices (no mNumNormals is available) """ nb_vertices = getattr(mesh, "mNumVertices") def fill(name): mAttr = getattr(mesh, name) if numpy: if mAttr: data = numpy.array([make_tuple(getattr(mesh, name)[i]) for i in range(nb_vertices)], dtype=numpy.float32) setattr(target, name[1:].lower(), data) else: setattr(target, name[1:].lower(), numpy.array([], dtype="float32")) else: if mAttr: data = [make_tuple(getattr(mesh, name)[i]) for i in range(nb_vertices)] setattr(target, name[1:].lower(), data) else: setattr(target, name[1:].lower(), []) def fillarray(name): mAttr = getattr(mesh, name) data = [] for index, mSubAttr in enumerate(mAttr): if mSubAttr: data.append([make_tuple(getattr(mesh, name)[index][i]) for i in range(nb_vertices)]) if numpy: setattr(target, name[1:].lower(), numpy.array(data, dtype=numpy.float32)) else: setattr(target, name[1:].lower(), data) fill("mNormals") fill("mTangents") fill("mBitangents") fillarray("mColors") fillarray("mTextureCoords") # prepare faces if numpy: faces = numpy.array([f.indices for f in target.faces], dtype=numpy.int32) else: faces = [f.indices for f in target.faces] setattr(target, 'faces', faces) def _init_metadata_entry(entry): entry.type = entry.mType if entry.type == structs.MetadataEntry.AI_BOOL: entry.data = ctypes.cast(entry.mData, ctypes.POINTER(ctypes.c_bool)).contents.value elif entry.type == structs.MetadataEntry.AI_INT32: entry.data = ctypes.cast(entry.mData, ctypes.POINTER(ctypes.c_int32)).contents.value elif entry.type == structs.MetadataEntry.AI_UINT64: entry.data = ctypes.cast(entry.mData, ctypes.POINTER(ctypes.c_uint64)).contents.value elif entry.type == structs.MetadataEntry.AI_FLOAT: entry.data = ctypes.cast(entry.mData, ctypes.POINTER(ctypes.c_float)).contents.value elif entry.type == structs.MetadataEntry.AI_DOUBLE: entry.data = ctypes.cast(entry.mData, ctypes.POINTER(ctypes.c_double)).contents.value elif entry.type == structs.MetadataEntry.AI_AISTRING: assimp_string = ctypes.cast(entry.mData, ctypes.POINTER(structs.String)).contents entry.data = _convert_assimp_string(assimp_string) elif entry.type == structs.MetadataEntry.AI_AIVECTOR3D: assimp_vector = ctypes.cast(entry.mData, ctypes.POINTER(structs.Vector3D)).contents entry.data = make_tuple(assimp_vector) return entry def _finalize_metadata(metadata, target): """ Building the metadata object is a bit specific. Firstly, there are two separate arrays: one with metadata keys and one with metadata values, and there are no corresponding mNum* attributes, so the C arrays are not converted to Python arrays using the generic code in the _init function. Secondly, a metadata entry value has to be cast according to declared metadata entry type. """ length = metadata.mNumProperties setattr(target, 'keys', [str(_convert_assimp_string(metadata.mKeys[i])) for i in range(length)]) setattr(target, 'values', [_init_metadata_entry(metadata.mValues[i]) for i in range(length)]) class PropertyGetter(dict): def __getitem__(self, key): semantic = 0 if isinstance(key, tuple): key, semantic = key return dict.__getitem__(self, (key, semantic)) def keys(self): for k in dict.keys(self): yield k[0] def __iter__(self): return self.keys() def items(self): for k, v in dict.items(self): yield k[0], v def _get_properties(properties, length): """ Convenience Function to get the material properties as a dict and values in a python format. """ result = {} #read all properties for p in [properties[i] for i in range(length)]: #the name p = p.contents key = str(_convert_assimp_string(p.mKey)) key = (key.split('.')[1], p.mSemantic) #the data if p.mType == 1: arr = ctypes.cast(p.mData, ctypes.POINTER(ctypes.c_float * int(p.mDataLength/ctypes.sizeof(ctypes.c_float))) ).contents value = [x for x in arr] elif p.mType == 3: #string can't be an array value = _convert_assimp_string(ctypes.cast(p.mData, ctypes.POINTER(structs.MaterialPropertyString)).contents) elif p.mType == 4: arr = ctypes.cast(p.mData, ctypes.POINTER(ctypes.c_int * int(p.mDataLength/ctypes.sizeof(ctypes.c_int))) ).contents value = [x for x in arr] else: value = p.mData[:p.mDataLength] if len(value) == 1: [value] = value result[key] = value return PropertyGetter(result) def decompose_matrix(matrix): if not isinstance(matrix, structs.Matrix4x4): raise AssimpError("pyassimp.decompose_matrix failed: Not a Matrix4x4!") scaling = structs.Vector3D() rotation = structs.Quaternion() position = structs.Vector3D() _assimp_lib.dll.aiDecomposeMatrix(ctypes.pointer(matrix), ctypes.byref(scaling), ctypes.byref(rotation), ctypes.byref(position)) return scaling._init(), rotation._init(), position._init()
20,821
Python
36.115864
221
0.58369
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.gdtf/mf/ov/gdtf/pyassimp/errors.py
#-*- coding: UTF-8 -*- """ All possible errors. """ class AssimpError(BaseException): """ If an internal error occurs. """ pass
146
Python
11.249999
33
0.568493
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.gdtf/mf/ov/gdtf/pyassimp/structs.py
#-*- coding: utf-8 -*- from ctypes import POINTER, c_void_p, c_uint, c_char, c_float, Structure, c_double, c_ubyte, c_size_t, c_uint32 class Vector2D(Structure): """ See 'vector2.h' for details. """ _fields_ = [ ("x", c_float),("y", c_float), ] class Matrix3x3(Structure): """ See 'matrix3x3.h' for details. """ _fields_ = [ ("a1", c_float),("a2", c_float),("a3", c_float), ("b1", c_float),("b2", c_float),("b3", c_float), ("c1", c_float),("c2", c_float),("c3", c_float), ] class Texel(Structure): """ See 'texture.h' for details. """ _fields_ = [ ("b", c_ubyte),("g", c_ubyte),("r", c_ubyte),("a", c_ubyte), ] class Color4D(Structure): """ See 'color4.h' for details. """ _fields_ = [ # Red, green, blue and alpha color values ("r", c_float),("g", c_float),("b", c_float),("a", c_float), ] class Plane(Structure): """ See 'types.h' for details. """ _fields_ = [ # Plane equation ("a", c_float),("b", c_float),("c", c_float),("d", c_float), ] class Color3D(Structure): """ See 'types.h' for details. """ _fields_ = [ # Red, green and blue color values ("r", c_float),("g", c_float),("b", c_float), ] class String(Structure): """ See 'types.h' for details. """ MAXLEN = 1024 _fields_ = [ # Binary length of the string excluding the terminal 0. This is NOT the # logical length of strings containing UTF-8 multibyte sequences! It's # the number of bytes from the beginning of the string to its end. ("length", c_uint32), # String buffer. Size limit is MAXLEN ("data", c_char*MAXLEN), ] class MaterialPropertyString(Structure): """ See 'MaterialSystem.cpp' for details. The size of length is truncated to 4 bytes on 64-bit platforms when used as a material property (see MaterialSystem.cpp aiMaterial::AddProperty() for details). """ MAXLEN = 1024 _fields_ = [ # Binary length of the string excluding the terminal 0. This is NOT the # logical length of strings containing UTF-8 multibyte sequences! It's # the number of bytes from the beginning of the string to its end. ("length", c_uint32), # String buffer. Size limit is MAXLEN ("data", c_char*MAXLEN), ] class MemoryInfo(Structure): """ See 'types.h' for details. """ _fields_ = [ # Storage allocated for texture data ("textures", c_uint), # Storage allocated for material data ("materials", c_uint), # Storage allocated for mesh data ("meshes", c_uint), # Storage allocated for node data ("nodes", c_uint), # Storage allocated for animation data ("animations", c_uint), # Storage allocated for camera data ("cameras", c_uint), # Storage allocated for light data ("lights", c_uint), # Total storage allocated for the full import. ("total", c_uint), ] class Quaternion(Structure): """ See 'quaternion.h' for details. """ _fields_ = [ # w,x,y,z components of the quaternion ("w", c_float),("x", c_float),("y", c_float),("z", c_float), ] class Face(Structure): """ See 'mesh.h' for details. """ _fields_ = [ # Number of indices defining this face. # The maximum value for this member is #AI_MAX_FACE_INDICES. ("mNumIndices", c_uint), # Pointer to the indices array. Size of the array is given in numIndices. ("mIndices", POINTER(c_uint)), ] class VertexWeight(Structure): """ See 'mesh.h' for details. """ _fields_ = [ # Index of the vertex which is influenced by the bone. ("mVertexId", c_uint), # The strength of the influence in the range (0...1). # The influence from all bones at one vertex amounts to 1. ("mWeight", c_float), ] class Matrix4x4(Structure): """ See 'matrix4x4.h' for details. """ _fields_ = [ ("a1", c_float),("a2", c_float),("a3", c_float),("a4", c_float), ("b1", c_float),("b2", c_float),("b3", c_float),("b4", c_float), ("c1", c_float),("c2", c_float),("c3", c_float),("c4", c_float), ("d1", c_float),("d2", c_float),("d3", c_float),("d4", c_float), ] class Vector3D(Structure): """ See 'vector3.h' for details. """ _fields_ = [ ("x", c_float),("y", c_float),("z", c_float), ] class MeshKey(Structure): """ See 'anim.h' for details. """ _fields_ = [ # The time of this key ("mTime", c_double), # Index into the aiMesh::mAnimMeshes array of the # mesh corresponding to the #aiMeshAnim hosting this # key frame. The referenced anim mesh is evaluated # according to the rules defined in the docs for #aiAnimMesh. ("mValue", c_uint), ] class MetadataEntry(Structure): """ See 'metadata.h' for details """ AI_BOOL = 0 AI_INT32 = 1 AI_UINT64 = 2 AI_FLOAT = 3 AI_DOUBLE = 4 AI_AISTRING = 5 AI_AIVECTOR3D = 6 AI_META_MAX = 7 _fields_ = [ # The type field uniquely identifies the underlying type of the data field ("mType", c_uint), ("mData", c_void_p), ] class Metadata(Structure): """ See 'metadata.h' for details """ _fields_ = [ # Length of the mKeys and mValues arrays, respectively ("mNumProperties", c_uint), # Arrays of keys, may not be NULL. Entries in this array may not be NULL # as well. ("mKeys", POINTER(String)), # Arrays of values, may not be NULL. Entries in this array may be NULL # if the corresponding property key has no assigned value. ("mValues", POINTER(MetadataEntry)), ] class Node(Structure): """ See 'scene.h' for details. """ Node._fields_ = [ # The name of the node. # The name might be empty (length of zero) but all nodes which # need to be accessed afterwards by bones or anims are usually named. # Multiple nodes may have the same name, but nodes which are accessed # by bones (see #aiBone and #aiMesh::mBones) *must* be unique. # Cameras and lights are assigned to a specific node name - if there # are multiple nodes with this name, they're assigned to each of them. # <br> # There are no limitations regarding the characters contained in # this text. You should be able to handle stuff like whitespace, tabs, # linefeeds, quotation marks, ampersands, ... . ("mName", String), # The transformation relative to the node's parent. ("mTransformation", Matrix4x4), # Parent node. NULL if this node is the root node. ("mParent", POINTER(Node)), # The number of child nodes of this node. ("mNumChildren", c_uint), # The child nodes of this node. NULL if mNumChildren is 0. ("mChildren", POINTER(POINTER(Node))), # The number of meshes of this node. ("mNumMeshes", c_uint), # The meshes of this node. Each entry is an index into the mesh ("mMeshes", POINTER(c_uint)), # Metadata associated with this node or NULL if there is no metadata. # Whether any metadata is generated depends on the source file format. ("mMetadata", POINTER(Metadata)), ] class Light(Structure): """ See 'light.h' for details. """ _fields_ = [ # The name of the light source. # There must be a node in the scenegraph with the same name. # This node specifies the position of the light in the scene # hierarchy and can be animated. ("mName", String), # The type of the light source. # aiLightSource_UNDEFINED is not a valid value for this member. ("mType", c_uint), # Position of the light source in space. Relative to the # transformation of the node corresponding to the light. # The position is undefined for directional lights. ("mPosition", Vector3D), # Direction of the light source in space. Relative to the # transformation of the node corresponding to the light. # The direction is undefined for point lights. The vector # may be normalized, but it needn't. ("mDirection", Vector3D), # Up direction of the light source in space. Relative to the # transformation of the node corresponding to the light. # # The direction is undefined for point lights. The vector # may be normalized, but it needn't. ("mUp", Vector3D), # Constant light attenuation factor. # The intensity of the light source at a given distance 'd' from # the light's position is # @code # Atten = 1/( att0 + att1 # d + att2 # d*d) # @endcode # This member corresponds to the att0 variable in the equation. # Naturally undefined for directional lights. ("mAttenuationConstant", c_float), # Linear light attenuation factor. # The intensity of the light source at a given distance 'd' from # the light's position is # @code # Atten = 1/( att0 + att1 # d + att2 # d*d) # @endcode # This member corresponds to the att1 variable in the equation. # Naturally undefined for directional lights. ("mAttenuationLinear", c_float), # Quadratic light attenuation factor. # The intensity of the light source at a given distance 'd' from # the light's position is # @code # Atten = 1/( att0 + att1 # d + att2 # d*d) # @endcode # This member corresponds to the att2 variable in the equation. # Naturally undefined for directional lights. ("mAttenuationQuadratic", c_float), # Diffuse color of the light source # The diffuse light color is multiplied with the diffuse # material color to obtain the final color that contributes # to the diffuse shading term. ("mColorDiffuse", Color3D), # Specular color of the light source # The specular light color is multiplied with the specular # material color to obtain the final color that contributes # to the specular shading term. ("mColorSpecular", Color3D), # Ambient color of the light source # The ambient light color is multiplied with the ambient # material color to obtain the final color that contributes # to the ambient shading term. Most renderers will ignore # this value it, is just a remaining of the fixed-function pipeline # that is still supported by quite many file formats. ("mColorAmbient", Color3D), # Inner angle of a spot light's light cone. # The spot light has maximum influence on objects inside this # angle. The angle is given in radians. It is 2PI for point # lights and undefined for directional lights. ("mAngleInnerCone", c_float), # Outer angle of a spot light's light cone. # The spot light does not affect objects outside this angle. # The angle is given in radians. It is 2PI for point lights and # undefined for directional lights. The outer angle must be # greater than or equal to the inner angle. # It is assumed that the application uses a smooth # interpolation between the inner and the outer cone of the # spot light. ("mAngleOuterCone", c_float), # Size of area light source. ("mSize", Vector2D), ] class Texture(Structure): """ See 'texture.h' for details. """ _fields_ = [ # Width of the texture, in pixels # If mHeight is zero the texture is compressed in a format # like JPEG. In this case mWidth specifies the size of the # memory area pcData is pointing to, in bytes. ("mWidth", c_uint), # Height of the texture, in pixels # If this value is zero, pcData points to an compressed texture # in any format (e.g. JPEG). ("mHeight", c_uint), # A hint from the loader to make it easier for applications # to determine the type of embedded textures. # # If mHeight != 0 this member is show how data is packed. Hint will consist of # two parts: channel order and channel bitness (count of the bits for every # color channel). For simple parsing by the viewer it's better to not omit # absent color channel and just use 0 for bitness. For example: # 1. Image contain RGBA and 8 bit per channel, achFormatHint == "rgba8888"; # 2. Image contain ARGB and 8 bit per channel, achFormatHint == "argb8888"; # 3. Image contain RGB and 5 bit for R and B channels and 6 bit for G channel, # achFormatHint == "rgba5650"; # 4. One color image with B channel and 1 bit for it, achFormatHint == "rgba0010"; # If mHeight == 0 then achFormatHint is set set to '\\0\\0\\0\\0' if the loader has no additional # information about the texture file format used OR the # file extension of the format without a trailing dot. If there # are multiple file extensions for a format, the shortest # extension is chosen (JPEG maps to 'jpg', not to 'jpeg'). # E.g. 'dds\\0', 'pcx\\0', 'jpg\\0'. All characters are lower-case. # The fourth character will always be '\\0'. ("achFormatHint", c_char*9), # Data of the texture. # Points to an array of mWidth # mHeight aiTexel's. # The format of the texture data is always ARGB8888 to # make the implementation for user of the library as easy # as possible. If mHeight = 0 this is a pointer to a memory # buffer of size mWidth containing the compressed texture # data. Good luck, have fun! ("pcData", POINTER(Texel)), # Texture original filename # Used to get the texture reference ("mFilename", String), ] class Ray(Structure): """ See 'types.h' for details. """ _fields_ = [ # Position and direction of the ray ("pos", Vector3D),("dir", Vector3D), ] class UVTransform(Structure): """ See 'material.h' for details. """ _fields_ = [ # Translation on the u and v axes. # The default value is (0|0). ("mTranslation", Vector2D), # Scaling on the u and v axes. # The default value is (1|1). ("mScaling", Vector2D), # Rotation - in counter-clockwise direction. # The rotation angle is specified in radians. The # rotation center is 0.5f|0.5f. The default value # 0.f. ("mRotation", c_float), ] class MaterialProperty(Structure): """ See 'material.h' for details. """ _fields_ = [ # Specifies the name of the property (key) # Keys are generally case insensitive. ("mKey", String), # Textures: Specifies their exact usage semantic. # For non-texture properties, this member is always 0 # (or, better-said, #aiTextureType_NONE). ("mSemantic", c_uint), # Textures: Specifies the index of the texture. # For non-texture properties, this member is always 0. ("mIndex", c_uint), # Size of the buffer mData is pointing to, in bytes. # This value may not be 0. ("mDataLength", c_uint), # Type information for the property. # Defines the data layout inside the data buffer. This is used # by the library internally to perform debug checks and to # utilize proper type conversions. # (It's probably a hacky solution, but it works.) ("mType", c_uint), # Binary buffer to hold the property's value. # The size of the buffer is always mDataLength. ("mData", POINTER(c_char)), ] class Material(Structure): """ See 'material.h' for details. """ _fields_ = [ # List of all material properties loaded. ("mProperties", POINTER(POINTER(MaterialProperty))), # Number of properties in the data base ("mNumProperties", c_uint), # Storage allocated ("mNumAllocated", c_uint), ] class Bone(Structure): """ See 'mesh.h' for details. """ _fields_ = [ # The name of the bone. ("mName", String), # The number of vertices affected by this bone # The maximum value for this member is #AI_MAX_BONE_WEIGHTS. ("mNumWeights", c_uint), # The vertices affected by this bone ("mWeights", POINTER(VertexWeight)), # Matrix that transforms from mesh space to bone space in bind pose ("mOffsetMatrix", Matrix4x4), ] class AnimMesh(Structure): """ See 'mesh.h' for details. """ AI_MAX_NUMBER_OF_TEXTURECOORDS = 0x8 AI_MAX_NUMBER_OF_COLOR_SETS = 0x8 _fields_ = [ # Anim Mesh name ("mName", String), # Replacement for aiMesh::mVertices. If this array is non-NULL, # it *must* contain mNumVertices entries. The corresponding # array in the host mesh must be non-NULL as well - animation # meshes may neither add or nor remove vertex components (if # a replacement array is NULL and the corresponding source # array is not, the source data is taken instead) ("mVertices", POINTER(Vector3D)), # Replacement for aiMesh::mNormals. ("mNormals", POINTER(Vector3D)), # Replacement for aiMesh::mTangents. ("mTangents", POINTER(Vector3D)), # Replacement for aiMesh::mBitangents. ("mBitangents", POINTER(Vector3D)), # Replacement for aiMesh::mColors ("mColors", POINTER(Color4D) * AI_MAX_NUMBER_OF_COLOR_SETS), # Replacement for aiMesh::mTextureCoords ("mTextureCoords", POINTER(Vector3D) * AI_MAX_NUMBER_OF_TEXTURECOORDS), # The number of vertices in the aiAnimMesh, and thus the length of all # the member arrays. # # This has always the same value as the mNumVertices property in the # corresponding aiMesh. It is duplicated here merely to make the length # of the member arrays accessible even if the aiMesh is not known, e.g. # from language bindings. ("mNumVertices", c_uint), # Weight of the AnimMesh. ("mWeight", c_float), ] class Mesh(Structure): """ See 'mesh.h' for details. """ AI_MAX_FACE_INDICES = 0x7fff AI_MAX_BONE_WEIGHTS = 0x7fffffff AI_MAX_VERTICES = 0x7fffffff AI_MAX_FACES = 0x7fffffff AI_MAX_NUMBER_OF_COLOR_SETS = 0x8 AI_MAX_NUMBER_OF_TEXTURECOORDS = 0x8 _fields_ = [ # Bitwise combination of the members of the #aiPrimitiveType enum. # This specifies which types of primitives are present in the mesh. # The "SortByPrimitiveType"-Step can be used to make sure the # output meshes consist of one primitive type each. ("mPrimitiveTypes", c_uint), # The number of vertices in this mesh. # This is also the size of all of the per-vertex data arrays. # The maximum value for this member is #AI_MAX_VERTICES. ("mNumVertices", c_uint), # The number of primitives (triangles, polygons, lines) in this mesh. # This is also the size of the mFaces array. # The maximum value for this member is #AI_MAX_FACES. ("mNumFaces", c_uint), # Vertex positions. # This array is always present in a mesh. The array is # mNumVertices in size. ("mVertices", POINTER(Vector3D)), # Vertex normals. # The array contains normalized vectors, NULL if not present. # The array is mNumVertices in size. Normals are undefined for # point and line primitives. A mesh consisting of points and # lines only may not have normal vectors. Meshes with mixed # primitive types (i.e. lines and triangles) may have normals, # but the normals for vertices that are only referenced by # point or line primitives are undefined and set to QNaN (WARN: # qNaN compares to inequal to *everything*, even to qNaN itself. # Using code like this to check whether a field is qnan is: # @code #define IS_QNAN(f) (f != f) # @endcode # still dangerous because even 1.f == 1.f could evaluate to false! ( # remember the subtleties of IEEE754 artithmetics). Use stuff like # @c fpclassify instead. # @note Normal vectors computed by Assimp are always unit-length. # However, this needn't apply for normals that have been taken # directly from the model file. ("mNormals", POINTER(Vector3D)), # Vertex tangents. # The tangent of a vertex points in the direction of the positive # X texture axis. The array contains normalized vectors, NULL if # not present. The array is mNumVertices in size. A mesh consisting # of points and lines only may not have normal vectors. Meshes with # mixed primitive types (i.e. lines and triangles) may have # normals, but the normals for vertices that are only referenced by # point or line primitives are undefined and set to qNaN. See # the #mNormals member for a detailed discussion of qNaNs. # @note If the mesh contains tangents, it automatically also # contains bitangents (the bitangent is just the cross product of # tangent and normal vectors). ("mTangents", POINTER(Vector3D)), # Vertex bitangents. # The bitangent of a vertex points in the direction of the positive # Y texture axis. The array contains normalized vectors, NULL if not # present. The array is mNumVertices in size. # @note If the mesh contains tangents, it automatically also contains # bitangents. ("mBitangents", POINTER(Vector3D)), # Vertex color sets. # A mesh may contain 0 to #AI_MAX_NUMBER_OF_COLOR_SETS vertex # colors per vertex. NULL if not present. Each array is # mNumVertices in size if present. ("mColors", POINTER(Color4D)*AI_MAX_NUMBER_OF_COLOR_SETS), # Vertex texture coords, also known as UV channels. # A mesh may contain 0 to AI_MAX_NUMBER_OF_TEXTURECOORDS per # vertex. NULL if not present. The array is mNumVertices in size. ("mTextureCoords", POINTER(Vector3D)*AI_MAX_NUMBER_OF_TEXTURECOORDS), # Specifies the number of components for a given UV channel. # Up to three channels are supported (UVW, for accessing volume # or cube maps). If the value is 2 for a given channel n, the # component p.z of mTextureCoords[n][p] is set to 0.0f. # If the value is 1 for a given channel, p.y is set to 0.0f, too. # @note 4D coords are not supported ("mNumUVComponents", c_uint*AI_MAX_NUMBER_OF_TEXTURECOORDS), # The faces the mesh is constructed from. # Each face refers to a number of vertices by their indices. # This array is always present in a mesh, its size is given # in mNumFaces. If the #AI_SCENE_FLAGS_NON_VERBOSE_FORMAT # is NOT set each face references an unique set of vertices. ("mFaces", POINTER(Face)), # The number of bones this mesh contains. # Can be 0, in which case the mBones array is NULL. ("mNumBones", c_uint), # The bones of this mesh. # A bone consists of a name by which it can be found in the # frame hierarchy and a set of vertex weights. ("mBones", POINTER(POINTER(Bone))), # The material used by this mesh. # A mesh does use only a single material. If an imported model uses # multiple materials, the import splits up the mesh. Use this value # as index into the scene's material list. ("mMaterialIndex", c_uint), # Name of the mesh. Meshes can be named, but this is not a # requirement and leaving this field empty is totally fine. # There are mainly three uses for mesh names: # - some formats name nodes and meshes independently. # - importers tend to split meshes up to meet the # one-material-per-mesh requirement. Assigning # the same (dummy) name to each of the result meshes # aids the caller at recovering the original mesh # partitioning. # - Vertex animations refer to meshes by their names. ("mName", String), # The number of attachment meshes. # Currently known to work with loaders: # - Collada # - gltf ("mNumAnimMeshes", c_uint), # Attachment meshes for this mesh, for vertex-based animation. # Attachment meshes carry replacement data for some of the # mesh'es vertex components (usually positions, normals). # Currently known to work with loaders: # - Collada # - gltf ("mAnimMeshes", POINTER(POINTER(AnimMesh))), # Method of morphing when animeshes are specified. ("mMethod", c_uint), ] class Camera(Structure): """ See 'camera.h' for details. """ _fields_ = [ # The name of the camera. # There must be a node in the scenegraph with the same name. # This node specifies the position of the camera in the scene # hierarchy and can be animated. ("mName", String), # Position of the camera relative to the coordinate space # defined by the corresponding node. # The default value is 0|0|0. ("mPosition", Vector3D), # 'Up' - vector of the camera coordinate system relative to # the coordinate space defined by the corresponding node. # The 'right' vector of the camera coordinate system is # the cross product of the up and lookAt vectors. # The default value is 0|1|0. The vector # may be normalized, but it needn't. ("mUp", Vector3D), # 'LookAt' - vector of the camera coordinate system relative to # the coordinate space defined by the corresponding node. # This is the viewing direction of the user. # The default value is 0|0|1. The vector # may be normalized, but it needn't. ("mLookAt", Vector3D), # Half horizontal field of view angle, in radians. # The field of view angle is the angle between the center # line of the screen and the left or right border. # The default value is 1/4PI. ("mHorizontalFOV", c_float), # Distance of the near clipping plane from the camera. # The value may not be 0.f (for arithmetic reasons to prevent # a division through zero). The default value is 0.1f. ("mClipPlaneNear", c_float), # Distance of the far clipping plane from the camera. # The far clipping plane must, of course, be further away than the # near clipping plane. The default value is 1000.f. The ratio # between the near and the far plane should not be too # large (between 1000-10000 should be ok) to avoid floating-point # inaccuracies which could lead to z-fighting. ("mClipPlaneFar", c_float), # Screen aspect ratio. # This is the ration between the width and the height of the # screen. Typical values are 4/3, 1/2 or 1/1. This value is # 0 if the aspect ratio is not defined in the source file. # 0 is also the default value. ("mAspect", c_float), ] class VectorKey(Structure): """ See 'anim.h' for details. """ _fields_ = [ # The time of this key ("mTime", c_double), # The value of this key ("mValue", Vector3D), ] class QuatKey(Structure): """ See 'anim.h' for details. """ _fields_ = [ # The time of this key ("mTime", c_double), # The value of this key ("mValue", Quaternion), ] class MeshMorphKey(Structure): """ See 'anim.h' for details. """ _fields_ = [ # The time of this key ("mTime", c_double), # The values and weights at the time of this key ("mValues", POINTER(c_uint)), ("mWeights", POINTER(c_double)), # The number of values and weights ("mNumValuesAndWeights", c_uint), ] class NodeAnim(Structure): """ See 'anim.h' for details. """ _fields_ = [ # The name of the node affected by this animation. The node # must exist and it must be unique. ("mNodeName", String), # The number of position keys ("mNumPositionKeys", c_uint), # The position keys of this animation channel. Positions are # specified as 3D vector. The array is mNumPositionKeys in size. # If there are position keys, there will also be at least one # scaling and one rotation key. ("mPositionKeys", POINTER(VectorKey)), # The number of rotation keys ("mNumRotationKeys", c_uint), # The rotation keys of this animation channel. Rotations are # given as quaternions, which are 4D vectors. The array is # mNumRotationKeys in size. # If there are rotation keys, there will also be at least one # scaling and one position key. ("mRotationKeys", POINTER(QuatKey)), # The number of scaling keys ("mNumScalingKeys", c_uint), # The scaling keys of this animation channel. Scalings are # specified as 3D vector. The array is mNumScalingKeys in size. # If there are scaling keys, there will also be at least one # position and one rotation key. ("mScalingKeys", POINTER(VectorKey)), # Defines how the animation behaves before the first # key is encountered. # The default value is aiAnimBehaviour_DEFAULT (the original # transformation matrix of the affected node is used). ("mPreState", c_uint), # Defines how the animation behaves after the last # key was processed. # The default value is aiAnimBehaviour_DEFAULT (the original # transformation matrix of the affected node is taken). ("mPostState", c_uint), ] class MeshAnim(Structure): """ See 'anim.h' for details. """ _fields_ = [ # Name of the mesh to be animated. An empty string is not allowed, # animated meshes need to be named (not necessarily uniquely, # the name can basically serve as wild-card to select a group # of meshes with similar animation setup) ("mName", String), # Size of the #mKeys array. Must be 1, at least. ("mNumKeys", c_uint), # Key frames of the animation. May not be NULL. ("mKeys", POINTER(MeshKey)), ] class MeshMorphAnim(Structure): """ See 'anim.h' for details. """ _fields_ = [ # Name of the mesh to be animated. An empty string is not allowed, # animated meshes need to be named (not necessarily uniquely, # the name can basically serve as wildcard to select a group # of meshes with similar animation setup) ("mName", String), # Size of the #mKeys array. Must be 1, at least. ("mNumKeys", c_uint), # Key frames of the animation. May not be NULL. ("mKeys", POINTER(MeshMorphKey)), ] class Animation(Structure): """ See 'anim.h' for details. """ _fields_ = [ # The name of the animation. If the modeling package this data was # exported from does support only a single animation channel, this # name is usually empty (length is zero). ("mName", String), # Duration of the animation in ticks. ("mDuration", c_double), # Ticks per second. 0 if not specified in the imported file ("mTicksPerSecond", c_double), # The number of bone animation channels. Each channel affects # a single node. ("mNumChannels", c_uint), # The node animation channels. Each channel affects a single node. # The array is mNumChannels in size. ("mChannels", POINTER(POINTER(NodeAnim))), # The number of mesh animation channels. Each channel affects # a single mesh and defines vertex-based animation. ("mNumMeshChannels", c_uint), # The mesh animation channels. Each channel affects a single mesh. # The array is mNumMeshChannels in size. ("mMeshChannels", POINTER(POINTER(MeshAnim))), # The number of mesh animation channels. Each channel affects # a single mesh and defines morphing animation. ("mNumMorphMeshChannels", c_uint), # The morph mesh animation channels. Each channel affects a single mesh. # The array is mNumMorphMeshChannels in size. ("mMorphMeshChannels", POINTER(POINTER(MeshMorphAnim))), ] class ExportDataBlob(Structure): """ See 'cexport.h' for details. Note that the '_fields_' definition is outside the class to allow the 'next' field to be recursive """ pass ExportDataBlob._fields_ = [ # Size of the data in bytes ("size", c_size_t), # The data. ("data", c_void_p), # Name of the blob. An empty string always # indicates the first (and primary) blob, # which contains the actual file data. # Any other blobs are auxiliary files produced # by exporters (i.e. material files). Existence # of such files depends on the file format. Most # formats don't split assets across multiple files. # # If used, blob names usually contain the file # extension that should be used when writing # the data to disc. ("name", String), # Pointer to the next blob in the chain or NULL if there is none. ("next", POINTER(ExportDataBlob)), ] class Scene(Structure): """ See 'aiScene.h' for details. """ AI_SCENE_FLAGS_INCOMPLETE = 0x1 AI_SCENE_FLAGS_VALIDATED = 0x2 AI_SCENE_FLAGS_VALIDATION_WARNING = 0x4 AI_SCENE_FLAGS_NON_VERBOSE_FORMAT = 0x8 AI_SCENE_FLAGS_TERRAIN = 0x10 AI_SCENE_FLAGS_ALLOW_SHARED = 0x20 _fields_ = [ # Any combination of the AI_SCENE_FLAGS_XXX flags. By default # this value is 0, no flags are set. Most applications will # want to reject all scenes with the AI_SCENE_FLAGS_INCOMPLETE # bit set. ("mFlags", c_uint), # The root node of the hierarchy. # There will always be at least the root node if the import # was successful (and no special flags have been set). # Presence of further nodes depends on the format and content # of the imported file. ("mRootNode", POINTER(Node)), # The number of meshes in the scene. ("mNumMeshes", c_uint), # The array of meshes. # Use the indices given in the aiNode structure to access # this array. The array is mNumMeshes in size. If the # AI_SCENE_FLAGS_INCOMPLETE flag is not set there will always # be at least ONE material. ("mMeshes", POINTER(POINTER(Mesh))), # The number of materials in the scene. ("mNumMaterials", c_uint), # The array of materials. # Use the index given in each aiMesh structure to access this # array. The array is mNumMaterials in size. If the # AI_SCENE_FLAGS_INCOMPLETE flag is not set there will always # be at least ONE material. ("mMaterials", POINTER(POINTER(Material))), # The number of animations in the scene. ("mNumAnimations", c_uint), # The array of animations. # All animations imported from the given file are listed here. # The array is mNumAnimations in size. ("mAnimations", POINTER(POINTER(Animation))), # The number of textures embedded into the file ("mNumTextures", c_uint), # The array of embedded textures. # Not many file formats embed their textures into the file. # An example is Quake's MDL format (which is also used by # some GameStudio versions) ("mTextures", POINTER(POINTER(Texture))), # The number of light sources in the scene. Light sources # are fully optional, in most cases this attribute will be 0 ("mNumLights", c_uint), # The array of light sources. # All light sources imported from the given file are # listed here. The array is mNumLights in size. ("mLights", POINTER(POINTER(Light))), # The number of cameras in the scene. Cameras # are fully optional, in most cases this attribute will be 0 ("mNumCameras", c_uint), # The array of cameras. # All cameras imported from the given file are listed here. # The array is mNumCameras in size. The first camera in the # array (if existing) is the default camera view into # the scene. ("mCameras", POINTER(POINTER(Camera))), # This data contains global metadata which belongs to the scene like # unit-conversions, versions, vendors or other model-specific data. This # can be used to store format-specific metadata as well. ("mMetadata", POINTER(Metadata)), # Internal data, do not touch ("mPrivate", POINTER(c_char)), ] assimp_structs_as_tuple = (Matrix4x4, Matrix3x3, Vector2D, Vector3D, Color3D, Color4D, Quaternion, Plane, Texel)
41,444
Python
35.3234
111
0.56136
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.gdtf/mf/ov/gdtf/pyassimp/postprocess.py
# <hr>Calculates the tangents and bitangents for the imported meshes. # # Does nothing if a mesh does not have normals. You might want this post # processing step to be executed if you plan to use tangent space calculations # such as normal mapping applied to the meshes. There's a config setting, # <tt>#AI_CONFIG_PP_CT_MAX_SMOOTHING_ANGLE<tt>, which allows you to specify # a maximum smoothing angle for the algorithm. However, usually you'll # want to leave it at the default value. # aiProcess_CalcTangentSpace = 0x1 ## <hr>Identifies and joins identical vertex data sets within all # imported meshes. # # After this step is run, each mesh contains unique vertices, # so a vertex may be used by multiple faces. You usually want # to use this post processing step. If your application deals with # indexed geometry, this step is compulsory or you'll just waste rendering # time. <b>If this flag is not specified<b>, no vertices are referenced by # more than one face and <b>no index buffer is required<b> for rendering. # aiProcess_JoinIdenticalVertices = 0x2 ## <hr>Converts all the imported data to a left-handed coordinate space. # # By default the data is returned in a right-handed coordinate space (which # OpenGL prefers). In this space, +X points to the right, # +Z points towards the viewer, and +Y points upwards. In the DirectX # coordinate space +X points to the right, +Y points upwards, and +Z points # away from the viewer. # # You'll probably want to consider this flag if you use Direct3D for # rendering. The #aiProcess_ConvertToLeftHanded flag supersedes this # setting and bundles all conversions typically required for D3D-based # applications. # aiProcess_MakeLeftHanded = 0x4 ## <hr>Triangulates all faces of all meshes. # # By default the imported mesh data might contain faces with more than 3 # indices. For rendering you'll usually want all faces to be triangles. # This post processing step splits up faces with more than 3 indices into # triangles. Line and point primitives are #not# modified! If you want # 'triangles only' with no other kinds of primitives, try the following # solution: # <ul> # <li>Specify both #aiProcess_Triangulate and #aiProcess_SortByPType <li> # <li>Ignore all point and line meshes when you process assimp's output<li> # <ul> # aiProcess_Triangulate = 0x8 ## <hr>Removes some parts of the data structure (animations, materials, # light sources, cameras, textures, vertex components). # # The components to be removed are specified in a separate # configuration option, <tt>#AI_CONFIG_PP_RVC_FLAGS<tt>. This is quite useful # if you don't need all parts of the output structure. Vertex colors # are rarely used today for example... Calling this step to remove unneeded # data from the pipeline as early as possible results in increased # performance and a more optimized output data structure. # This step is also useful if you want to force Assimp to recompute # normals or tangents. The corresponding steps don't recompute them if # they're already there (loaded from the source asset). By using this # step you can make sure they are NOT there. # # This flag is a poor one, mainly because its purpose is usually # misunderstood. Consider the following case: a 3D model has been exported # from a CAD app, and it has per-face vertex colors. Vertex positions can't be # shared, thus the #aiProcess_JoinIdenticalVertices step fails to # optimize the data because of these nasty little vertex colors. # Most apps don't even process them, so it's all for nothing. By using # this step, unneeded components are excluded as early as possible # thus opening more room for internal optimizations. # aiProcess_RemoveComponent = 0x10 ## <hr>Generates normals for all faces of all meshes. # # This is ignored if normals are already there at the time this flag # is evaluated. Model importers try to load them from the source file, so # they're usually already there. Face normals are shared between all points # of a single face, so a single point can have multiple normals, which # forces the library to duplicate vertices in some cases. # #aiProcess_JoinIdenticalVertices is #senseless# then. # # This flag may not be specified together with #aiProcess_GenSmoothNormals. # aiProcess_GenNormals = 0x20 ## <hr>Generates smooth normals for all vertices in the mesh. # # This is ignored if normals are already there at the time this flag # is evaluated. Model importers try to load them from the source file, so # they're usually already there. # # This flag may not be specified together with # #aiProcess_GenNormals. There's a configuration option, # <tt>#AI_CONFIG_PP_GSN_MAX_SMOOTHING_ANGLE<tt> which allows you to specify # an angle maximum for the normal smoothing algorithm. Normals exceeding # this limit are not smoothed, resulting in a 'hard' seam between two faces. # Using a decent angle here (e.g. 80 degrees) results in very good visual # appearance. # aiProcess_GenSmoothNormals = 0x40 ## <hr>Splits large meshes into smaller sub-meshes. # # This is quite useful for real-time rendering, where the number of triangles # which can be maximally processed in a single draw-call is limited # by the video driverhardware. The maximum vertex buffer is usually limited # too. Both requirements can be met with this step: you may specify both a # triangle and vertex limit for a single mesh. # # The split limits can (and should!) be set through the # <tt>#AI_CONFIG_PP_SLM_VERTEX_LIMIT<tt> and <tt>#AI_CONFIG_PP_SLM_TRIANGLE_LIMIT<tt> # settings. The default values are <tt>#AI_SLM_DEFAULT_MAX_VERTICES<tt> and # <tt>#AI_SLM_DEFAULT_MAX_TRIANGLES<tt>. # # Note that splitting is generally a time-consuming task, but only if there's # something to split. The use of this step is recommended for most users. # aiProcess_SplitLargeMeshes = 0x80 ## <hr>Removes the node graph and pre-transforms all vertices with # the local transformation matrices of their nodes. # # The output scene still contains nodes, however there is only a # root node with children, each one referencing only one mesh, # and each mesh referencing one material. For rendering, you can # simply render all meshes in order - you don't need to pay # attention to local transformations and the node hierarchy. # Animations are removed during this step. # This step is intended for applications without a scenegraph. # The step CAN cause some problems: if e.g. a mesh of the asset # contains normals and another, using the same material index, does not, # they will be brought together, but the first meshes's part of # the normal list is zeroed. However, these artifacts are rare. # @note The <tt>#AI_CONFIG_PP_PTV_NORMALIZE<tt> configuration property # can be set to normalize the scene's spatial dimension to the -1...1 # range. # aiProcess_PreTransformVertices = 0x100 ## <hr>Limits the number of bones simultaneously affecting a single vertex # to a maximum value. # # If any vertex is affected by more than the maximum number of bones, the least # important vertex weights are removed and the remaining vertex weights are # renormalized so that the weights still sum up to 1. # The default bone weight limit is 4 (defined as <tt>#AI_LMW_MAX_WEIGHTS<tt> in # config.h), but you can use the <tt>#AI_CONFIG_PP_LBW_MAX_WEIGHTS<tt> setting to # supply your own limit to the post processing step. # # If you intend to perform the skinning in hardware, this post processing # step might be of interest to you. # aiProcess_LimitBoneWeights = 0x200 ## <hr>Validates the imported scene data structure. # This makes sure that all indices are valid, all animations and # bones are linked correctly, all material references are correct .. etc. # # It is recommended that you capture Assimp's log output if you use this flag, # so you can easily find out what's wrong if a file fails the # validation. The validator is quite strict and will find #all# # inconsistencies in the data structure... It is recommended that plugin # developers use it to debug their loaders. There are two types of # validation failures: # <ul> # <li>Error: There's something wrong with the imported data. Further # postprocessing is not possible and the data is not usable at all. # The import fails. #Importer::GetErrorString() or #aiGetErrorString() # carry the error message around.<li> # <li>Warning: There are some minor issues (e.g. 1000000 animation # keyframes with the same time), but further postprocessing and use # of the data structure is still safe. Warning details are written # to the log file, <tt>#AI_SCENE_FLAGS_VALIDATION_WARNING<tt> is set # in #aiScene::mFlags<li> # <ul> # # This post-processing step is not time-consuming. Its use is not # compulsory, but recommended. # aiProcess_ValidateDataStructure = 0x400 ## <hr>Reorders triangles for better vertex cache locality. # # The step tries to improve the ACMR (average post-transform vertex cache # miss ratio) for all meshes. The implementation runs in O(n) and is # roughly based on the 'tipsify' algorithm (see <a href=" # http:www.cs.princeton.edugfxpubsSander_2007_%3ETRtipsy.pdf">this # paper<a>). # # If you intend to render huge models in hardware, this step might # be of interest to you. The <tt>#AI_CONFIG_PP_ICL_PTCACHE_SIZE<tt>config # setting can be used to fine-tune the cache optimization. # aiProcess_ImproveCacheLocality = 0x800 ## <hr>Searches for redundantunreferenced materials and removes them. # # This is especially useful in combination with the # #aiProcess_PretransformVertices and #aiProcess_OptimizeMeshes flags. # Both join small meshes with equal characteristics, but they can't do # their work if two meshes have different materials. Because several # material settings are lost during Assimp's import filters, # (and because many exporters don't check for redundant materials), huge # models often have materials which are are defined several times with # exactly the same settings. # # Several material settings not contributing to the final appearance of # a surface are ignored in all comparisons (e.g. the material name). # So, if you're passing additional information through the # content pipeline (probably using #magic# material names), don't # specify this flag. Alternatively take a look at the # <tt>#AI_CONFIG_PP_RRM_EXCLUDE_LIST<tt> setting. # aiProcess_RemoveRedundantMaterials = 0x1000 ## <hr>This step tries to determine which meshes have normal vectors # that are facing inwards and inverts them. # # The algorithm is simple but effective: # the bounding box of all vertices + their normals is compared against # the volume of the bounding box of all vertices without their normals. # This works well for most objects, problems might occur with planar # surfaces. However, the step tries to filter such cases. # The step inverts all in-facing normals. Generally it is recommended # to enable this step, although the result is not always correct. # aiProcess_FixInfacingNormals = 0x2000 ## <hr>This step splits meshes with more than one primitive type in # homogeneous sub-meshes. # # The step is executed after the triangulation step. After the step # returns, just one bit is set in aiMesh::mPrimitiveTypes. This is # especially useful for real-time rendering where point and line # primitives are often ignored or rendered separately. # You can use the <tt>#AI_CONFIG_PP_SBP_REMOVE<tt> option to specify which # primitive types you need. This can be used to easily exclude # lines and points, which are rarely used, from the import. # aiProcess_SortByPType = 0x8000 ## <hr>This step searches all meshes for degenerate primitives and # converts them to proper lines or points. # # A face is 'degenerate' if one or more of its points are identical. # To have the degenerate stuff not only detected and collapsed but # removed, try one of the following procedures: # <br><b>1.<b> (if you support lines and points for rendering but don't # want the degenerates)<br> # <ul> # <li>Specify the #aiProcess_FindDegenerates flag. # <li> # <li>Set the <tt>AI_CONFIG_PP_FD_REMOVE<tt> option to 1. This will # cause the step to remove degenerate triangles from the import # as soon as they're detected. They won't pass any further # pipeline steps. # <li> # <ul> # <br><b>2.<b>(if you don't support lines and points at all)<br> # <ul> # <li>Specify the #aiProcess_FindDegenerates flag. # <li> # <li>Specify the #aiProcess_SortByPType flag. This moves line and # point primitives to separate meshes. # <li> # <li>Set the <tt>AI_CONFIG_PP_SBP_REMOVE<tt> option to # @code aiPrimitiveType_POINTS | aiPrimitiveType_LINES # @endcode to cause SortByPType to reject point # and line meshes from the scene. # <li> # <ul> # @note Degenerate polygons are not necessarily evil and that's why # they're not removed by default. There are several file formats which # don't support lines or points, and some exporters bypass the # format specification and write them as degenerate triangles instead. # aiProcess_FindDegenerates = 0x10000 ## <hr>This step searches all meshes for invalid data, such as zeroed # normal vectors or invalid UV coords and removesfixes them. This is # intended to get rid of some common exporter errors. # # This is especially useful for normals. If they are invalid, and # the step recognizes this, they will be removed and can later # be recomputed, i.e. by the #aiProcess_GenSmoothNormals flag.<br> # The step will also remove meshes that are infinitely small and reduce # animation tracks consisting of hundreds if redundant keys to a single # key. The <tt>AI_CONFIG_PP_FID_ANIM_ACCURACY<tt> config property decides # the accuracy of the check for duplicate animation tracks. # aiProcess_FindInvalidData = 0x20000 ## <hr>This step converts non-UV mappings (such as spherical or # cylindrical mapping) to proper texture coordinate channels. # # Most applications will support UV mapping only, so you will # probably want to specify this step in every case. Note that Assimp is not # always able to match the original mapping implementation of the # 3D app which produced a model perfectly. It's always better to let the # modelling app compute the UV channels - 3ds max, Maya, Blender, # LightWave, and Modo do this for example. # # @note If this step is not requested, you'll need to process the # <tt>#AI_MATKEY_MAPPING<tt> material property in order to display all assets # properly. # aiProcess_GenUVCoords = 0x40000 ## <hr>This step applies per-texture UV transformations and bakes # them into stand-alone vtexture coordinate channels. # # UV transformations are specified per-texture - see the # <tt>#AI_MATKEY_UVTRANSFORM<tt> material key for more information. # This step processes all textures with # transformed input UV coordinates and generates a new (pre-transformed) UV channel # which replaces the old channel. Most applications won't support UV # transformations, so you will probably want to specify this step. # # @note UV transformations are usually implemented in real-time apps by # transforming texture coordinates at vertex shader stage with a 3x3 # (homogenous) transformation matrix. # aiProcess_TransformUVCoords = 0x80000 ## <hr>This step searches for duplicate meshes and replaces them # with references to the first mesh. # # This step takes a while, so don't use it if speed is a concern. # Its main purpose is to workaround the fact that many export # file formats don't support instanced meshes, so exporters need to # duplicate meshes. This step removes the duplicates again. Please # note that Assimp does not currently support per-node material # assignment to meshes, which means that identical meshes with # different materials are currently #not# joined, although this is # planned for future versions. # aiProcess_FindInstances = 0x100000 ## <hr>A postprocessing step to reduce the number of meshes. # # This will, in fact, reduce the number of draw calls. # # This is a very effective optimization and is recommended to be used # together with #aiProcess_OptimizeGraph, if possible. The flag is fully # compatible with both #aiProcess_SplitLargeMeshes and #aiProcess_SortByPType. # aiProcess_OptimizeMeshes = 0x200000 ## <hr>A postprocessing step to optimize the scene hierarchy. # # Nodes without animations, bones, lights or cameras assigned are # collapsed and joined. # # Node names can be lost during this step. If you use special 'tag nodes' # to pass additional information through your content pipeline, use the # <tt>#AI_CONFIG_PP_OG_EXCLUDE_LIST<tt> setting to specify a list of node # names you want to be kept. Nodes matching one of the names in this list won't # be touched or modified. # # Use this flag with caution. Most simple files will be collapsed to a # single node, so complex hierarchies are usually completely lost. This is not # useful for editor environments, but probably a very effective # optimization if you just want to get the model data, convert it to your # own format, and render it as fast as possible. # # This flag is designed to be used with #aiProcess_OptimizeMeshes for best # results. # # @note 'Crappy' scenes with thousands of extremely small meshes packed # in deeply nested nodes exist for almost all file formats. # #aiProcess_OptimizeMeshes in combination with #aiProcess_OptimizeGraph # usually fixes them all and makes them renderable. # aiProcess_OptimizeGraph = 0x400000 ## <hr>This step flips all UV coordinates along the y-axis and adjusts # material settings and bitangents accordingly. # # <b>Output UV coordinate system:<b> # @code # 0y|0y ---------- 1x|0y # | | # | | # | | # 0x|1y ---------- 1x|1y # @endcode # # You'll probably want to consider this flag if you use Direct3D for # rendering. The #aiProcess_ConvertToLeftHanded flag supersedes this # setting and bundles all conversions typically required for D3D-based # applications. # aiProcess_FlipUVs = 0x800000 ## <hr>This step adjusts the output face winding order to be CW. # # The default face winding order is counter clockwise (CCW). # # <b>Output face order:<b> # @code # x2 # # x0 # x1 # @endcode # aiProcess_FlipWindingOrder = 0x1000000 ## <hr>This step splits meshes with many bones into sub-meshes so that each # su-bmesh has fewer or as many bones as a given limit. # aiProcess_SplitByBoneCount = 0x2000000 ## <hr>This step removes bones losslessly or according to some threshold. # # In some cases (i.e. formats that require it) exporters are forced to # assign dummy bone weights to otherwise static meshes assigned to # animated meshes. Full, weight-based skinning is expensive while # animating nodes is extremely cheap, so this step is offered to clean up # the data in that regard. # # Use <tt>#AI_CONFIG_PP_DB_THRESHOLD<tt> to control this. # Use <tt>#AI_CONFIG_PP_DB_ALL_OR_NONE<tt> if you want bones removed if and # only if all bones within the scene qualify for removal. # aiProcess_Debone = 0x4000000 aiProcess_GenEntityMeshes = 0x100000 aiProcess_OptimizeAnimations = 0x200000 aiProcess_FixTexturePaths = 0x200000 aiProcess_EmbedTextures = 0x10000000, ## @def aiProcess_ConvertToLeftHanded # @brief Shortcut flag for Direct3D-based applications. # # Supersedes the #aiProcess_MakeLeftHanded and #aiProcess_FlipUVs and # #aiProcess_FlipWindingOrder flags. # The output data matches Direct3D's conventions: left-handed geometry, upper-left # origin for UV coordinates and finally clockwise face order, suitable for CCW culling. # # @deprecated # aiProcess_ConvertToLeftHanded = ( \ aiProcess_MakeLeftHanded | \ aiProcess_FlipUVs | \ aiProcess_FlipWindingOrder | \ 0 ) ## @def aiProcessPreset_TargetRealtimeUse_Fast # @brief Default postprocess configuration optimizing the data for real-time rendering. # # Applications would want to use this preset to load models on end-user PCs, # maybe for direct use in game. # # If you're using DirectX, don't forget to combine this value with # the #aiProcess_ConvertToLeftHanded step. If you don't support UV transformations # in your application apply the #aiProcess_TransformUVCoords step, too. # @note Please take the time to read the docs for the steps enabled by this preset. # Some of them offer further configurable properties, while some of them might not be of # use for you so it might be better to not specify them. # aiProcessPreset_TargetRealtime_Fast = ( \ aiProcess_CalcTangentSpace | \ aiProcess_GenNormals | \ aiProcess_JoinIdenticalVertices | \ aiProcess_Triangulate | \ aiProcess_GenUVCoords | \ aiProcess_SortByPType | \ 0 ) ## @def aiProcessPreset_TargetRealtime_Quality # @brief Default postprocess configuration optimizing the data for real-time rendering. # # Unlike #aiProcessPreset_TargetRealtime_Fast, this configuration # performs some extra optimizations to improve rendering speed and # to minimize memory usage. It could be a good choice for a level editor # environment where import speed is not so important. # # If you're using DirectX, don't forget to combine this value with # the #aiProcess_ConvertToLeftHanded step. If you don't support UV transformations # in your application apply the #aiProcess_TransformUVCoords step, too. # @note Please take the time to read the docs for the steps enabled by this preset. # Some of them offer further configurable properties, while some of them might not be # of use for you so it might be better to not specify them. # aiProcessPreset_TargetRealtime_Quality = ( \ aiProcess_CalcTangentSpace | \ aiProcess_GenSmoothNormals | \ aiProcess_JoinIdenticalVertices | \ aiProcess_ImproveCacheLocality | \ aiProcess_LimitBoneWeights | \ aiProcess_RemoveRedundantMaterials | \ aiProcess_SplitLargeMeshes | \ aiProcess_Triangulate | \ aiProcess_GenUVCoords | \ aiProcess_SortByPType | \ aiProcess_FindDegenerates | \ aiProcess_FindInvalidData | \ 0 ) ## @def aiProcessPreset_TargetRealtime_MaxQuality # @brief Default postprocess configuration optimizing the data for real-time rendering. # # This preset enables almost every optimization step to achieve perfectly # optimized data. It's your choice for level editor environments where import speed # is not important. # # If you're using DirectX, don't forget to combine this value with # the #aiProcess_ConvertToLeftHanded step. If you don't support UV transformations # in your application, apply the #aiProcess_TransformUVCoords step, too. # @note Please take the time to read the docs for the steps enabled by this preset. # Some of them offer further configurable properties, while some of them might not be # of use for you so it might be better to not specify them. # aiProcessPreset_TargetRealtime_MaxQuality = ( \ aiProcessPreset_TargetRealtime_Quality | \ aiProcess_FindInstances | \ aiProcess_ValidateDataStructure | \ aiProcess_OptimizeMeshes | \ 0 )
23,548
Python
43.348399
90
0.741422
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.gdtf/mf/ov/gdtf/pyassimp/helper.py
#-*- coding: UTF-8 -*- """ Some fancy helper functions. """ import os import ctypes import operator from distutils.sysconfig import get_python_lib import re import sys try: import numpy except ImportError: numpy = None import logging;logger = logging.getLogger("pyassimp") from .errors import AssimpError additional_dirs, ext_whitelist = [],[] # populate search directories and lists of allowed file extensions # depending on the platform we're running on. if os.name=='posix': additional_dirs.append('./') additional_dirs.append('/usr/lib/') additional_dirs.append('/usr/lib/x86_64-linux-gnu/') additional_dirs.append('/usr/lib/aarch64-linux-gnu/') additional_dirs.append('/usr/local/lib/') if 'LD_LIBRARY_PATH' in os.environ: additional_dirs.extend([item for item in os.environ['LD_LIBRARY_PATH'].split(':') if item]) # check if running from anaconda. anaconda_keywords = ("conda", "continuum") if any(k in sys.version.lower() for k in anaconda_keywords): cur_path = get_python_lib() pattern = re.compile('.*\/lib\/') conda_lib = pattern.match(cur_path).group() logger.info("Adding Anaconda lib path:"+ conda_lib) additional_dirs.append(conda_lib) # note - this won't catch libassimp.so.N.n, but # currently there's always a symlink called # libassimp.so in /usr/local/lib. ext_whitelist.append('.so') # libassimp.dylib in /usr/local/lib ext_whitelist.append('.dylib') elif os.name=='nt': ext_whitelist.append('.dll') path_dirs = os.environ['PATH'].split(';') additional_dirs.extend(path_dirs) def vec2tuple(x): """ Converts a VECTOR3D to a Tuple """ return (x.x, x.y, x.z) def transform(vector3, matrix4x4): """ Apply a transformation matrix on a 3D vector. :param vector3: array with 3 elements :param matrix4x4: 4x4 matrix """ if numpy: return numpy.dot(matrix4x4, numpy.append(vector3, 1.)) else: m0,m1,m2,m3 = matrix4x4; x,y,z = vector3 return [ m0[0]*x + m0[1]*y + m0[2]*z + m0[3], m1[0]*x + m1[1]*y + m1[2]*z + m1[3], m2[0]*x + m2[1]*y + m2[2]*z + m2[3], m3[0]*x + m3[1]*y + m3[2]*z + m3[3] ] def _inv(matrix4x4): m0,m1,m2,m3 = matrix4x4 det = m0[3]*m1[2]*m2[1]*m3[0] - m0[2]*m1[3]*m2[1]*m3[0] - \ m0[3]*m1[1]*m2[2]*m3[0] + m0[1]*m1[3]*m2[2]*m3[0] + \ m0[2]*m1[1]*m2[3]*m3[0] - m0[1]*m1[2]*m2[3]*m3[0] - \ m0[3]*m1[2]*m2[0]*m3[1] + m0[2]*m1[3]*m2[0]*m3[1] + \ m0[3]*m1[0]*m2[2]*m3[1] - m0[0]*m1[3]*m2[2]*m3[1] - \ m0[2]*m1[0]*m2[3]*m3[1] + m0[0]*m1[2]*m2[3]*m3[1] + \ m0[3]*m1[1]*m2[0]*m3[2] - m0[1]*m1[3]*m2[0]*m3[2] - \ m0[3]*m1[0]*m2[1]*m3[2] + m0[0]*m1[3]*m2[1]*m3[2] + \ m0[1]*m1[0]*m2[3]*m3[2] - m0[0]*m1[1]*m2[3]*m3[2] - \ m0[2]*m1[1]*m2[0]*m3[3] + m0[1]*m1[2]*m2[0]*m3[3] + \ m0[2]*m1[0]*m2[1]*m3[3] - m0[0]*m1[2]*m2[1]*m3[3] - \ m0[1]*m1[0]*m2[2]*m3[3] + m0[0]*m1[1]*m2[2]*m3[3] return[[( m1[2]*m2[3]*m3[1] - m1[3]*m2[2]*m3[1] + m1[3]*m2[1]*m3[2] - m1[1]*m2[3]*m3[2] - m1[2]*m2[1]*m3[3] + m1[1]*m2[2]*m3[3]) /det, ( m0[3]*m2[2]*m3[1] - m0[2]*m2[3]*m3[1] - m0[3]*m2[1]*m3[2] + m0[1]*m2[3]*m3[2] + m0[2]*m2[1]*m3[3] - m0[1]*m2[2]*m3[3]) /det, ( m0[2]*m1[3]*m3[1] - m0[3]*m1[2]*m3[1] + m0[3]*m1[1]*m3[2] - m0[1]*m1[3]*m3[2] - m0[2]*m1[1]*m3[3] + m0[1]*m1[2]*m3[3]) /det, ( m0[3]*m1[2]*m2[1] - m0[2]*m1[3]*m2[1] - m0[3]*m1[1]*m2[2] + m0[1]*m1[3]*m2[2] + m0[2]*m1[1]*m2[3] - m0[1]*m1[2]*m2[3]) /det], [( m1[3]*m2[2]*m3[0] - m1[2]*m2[3]*m3[0] - m1[3]*m2[0]*m3[2] + m1[0]*m2[3]*m3[2] + m1[2]*m2[0]*m3[3] - m1[0]*m2[2]*m3[3]) /det, ( m0[2]*m2[3]*m3[0] - m0[3]*m2[2]*m3[0] + m0[3]*m2[0]*m3[2] - m0[0]*m2[3]*m3[2] - m0[2]*m2[0]*m3[3] + m0[0]*m2[2]*m3[3]) /det, ( m0[3]*m1[2]*m3[0] - m0[2]*m1[3]*m3[0] - m0[3]*m1[0]*m3[2] + m0[0]*m1[3]*m3[2] + m0[2]*m1[0]*m3[3] - m0[0]*m1[2]*m3[3]) /det, ( m0[2]*m1[3]*m2[0] - m0[3]*m1[2]*m2[0] + m0[3]*m1[0]*m2[2] - m0[0]*m1[3]*m2[2] - m0[2]*m1[0]*m2[3] + m0[0]*m1[2]*m2[3]) /det], [( m1[1]*m2[3]*m3[0] - m1[3]*m2[1]*m3[0] + m1[3]*m2[0]*m3[1] - m1[0]*m2[3]*m3[1] - m1[1]*m2[0]*m3[3] + m1[0]*m2[1]*m3[3]) /det, ( m0[3]*m2[1]*m3[0] - m0[1]*m2[3]*m3[0] - m0[3]*m2[0]*m3[1] + m0[0]*m2[3]*m3[1] + m0[1]*m2[0]*m3[3] - m0[0]*m2[1]*m3[3]) /det, ( m0[1]*m1[3]*m3[0] - m0[3]*m1[1]*m3[0] + m0[3]*m1[0]*m3[1] - m0[0]*m1[3]*m3[1] - m0[1]*m1[0]*m3[3] + m0[0]*m1[1]*m3[3]) /det, ( m0[3]*m1[1]*m2[0] - m0[1]*m1[3]*m2[0] - m0[3]*m1[0]*m2[1] + m0[0]*m1[3]*m2[1] + m0[1]*m1[0]*m2[3] - m0[0]*m1[1]*m2[3]) /det], [( m1[2]*m2[1]*m3[0] - m1[1]*m2[2]*m3[0] - m1[2]*m2[0]*m3[1] + m1[0]*m2[2]*m3[1] + m1[1]*m2[0]*m3[2] - m1[0]*m2[1]*m3[2]) /det, ( m0[1]*m2[2]*m3[0] - m0[2]*m2[1]*m3[0] + m0[2]*m2[0]*m3[1] - m0[0]*m2[2]*m3[1] - m0[1]*m2[0]*m3[2] + m0[0]*m2[1]*m3[2]) /det, ( m0[2]*m1[1]*m3[0] - m0[1]*m1[2]*m3[0] - m0[2]*m1[0]*m3[1] + m0[0]*m1[2]*m3[1] + m0[1]*m1[0]*m3[2] - m0[0]*m1[1]*m3[2]) /det, ( m0[1]*m1[2]*m2[0] - m0[2]*m1[1]*m2[0] + m0[2]*m1[0]*m2[1] - m0[0]*m1[2]*m2[1] - m0[1]*m1[0]*m2[2] + m0[0]*m1[1]*m2[2]) /det]] def get_bounding_box(scene): bb_min = [1e10, 1e10, 1e10] # x,y,z bb_max = [-1e10, -1e10, -1e10] # x,y,z inv = numpy.linalg.inv if numpy else _inv return get_bounding_box_for_node(scene.rootnode, bb_min, bb_max, inv(scene.rootnode.transformation)) def get_bounding_box_for_node(node, bb_min, bb_max, transformation): if numpy: transformation = numpy.dot(transformation, node.transformation) else: t0,t1,t2,t3 = transformation T0,T1,T2,T3 = node.transformation transformation = [ [ t0[0]*T0[0] + t0[1]*T1[0] + t0[2]*T2[0] + t0[3]*T3[0], t0[0]*T0[1] + t0[1]*T1[1] + t0[2]*T2[1] + t0[3]*T3[1], t0[0]*T0[2] + t0[1]*T1[2] + t0[2]*T2[2] + t0[3]*T3[2], t0[0]*T0[3] + t0[1]*T1[3] + t0[2]*T2[3] + t0[3]*T3[3] ],[ t1[0]*T0[0] + t1[1]*T1[0] + t1[2]*T2[0] + t1[3]*T3[0], t1[0]*T0[1] + t1[1]*T1[1] + t1[2]*T2[1] + t1[3]*T3[1], t1[0]*T0[2] + t1[1]*T1[2] + t1[2]*T2[2] + t1[3]*T3[2], t1[0]*T0[3] + t1[1]*T1[3] + t1[2]*T2[3] + t1[3]*T3[3] ],[ t2[0]*T0[0] + t2[1]*T1[0] + t2[2]*T2[0] + t2[3]*T3[0], t2[0]*T0[1] + t2[1]*T1[1] + t2[2]*T2[1] + t2[3]*T3[1], t2[0]*T0[2] + t2[1]*T1[2] + t2[2]*T2[2] + t2[3]*T3[2], t2[0]*T0[3] + t2[1]*T1[3] + t2[2]*T2[3] + t2[3]*T3[3] ],[ t3[0]*T0[0] + t3[1]*T1[0] + t3[2]*T2[0] + t3[3]*T3[0], t3[0]*T0[1] + t3[1]*T1[1] + t3[2]*T2[1] + t3[3]*T3[1], t3[0]*T0[2] + t3[1]*T1[2] + t3[2]*T2[2] + t3[3]*T3[2], t3[0]*T0[3] + t3[1]*T1[3] + t3[2]*T2[3] + t3[3]*T3[3] ] ] for mesh in node.meshes: for v in mesh.vertices: v = transform(v, transformation) bb_min[0] = min(bb_min[0], v[0]) bb_min[1] = min(bb_min[1], v[1]) bb_min[2] = min(bb_min[2], v[2]) bb_max[0] = max(bb_max[0], v[0]) bb_max[1] = max(bb_max[1], v[1]) bb_max[2] = max(bb_max[2], v[2]) for child in node.children: bb_min, bb_max = get_bounding_box_for_node(child, bb_min, bb_max, transformation) return bb_min, bb_max def try_load_functions(library_path, dll): ''' Try to bind to aiImportFile and aiReleaseImport Arguments --------- library_path: path to current lib dll: ctypes handle to library Returns --------- If unsuccessful: None If successful: Tuple containing (library_path, load from filename function, load from memory function, export to filename function, export to blob function, release function, ctypes handle to assimp library) ''' try: load = dll.aiImportFile release = dll.aiReleaseImport load_mem = dll.aiImportFileFromMemory export = dll.aiExportScene export2blob = dll.aiExportSceneToBlob except AttributeError: #OK, this is a library, but it doesn't have the functions we need return None # library found! from .structs import Scene, ExportDataBlob load.restype = ctypes.POINTER(Scene) load_mem.restype = ctypes.POINTER(Scene) export2blob.restype = ctypes.POINTER(ExportDataBlob) return (library_path, load, load_mem, export, export2blob, release, dll) def search_library(): ''' Loads the assimp library. Throws exception AssimpError if no library_path is found Returns: tuple, (load from filename function, load from memory function, export to filename function, export to blob function, release function, dll) ''' #this path folder = os.path.dirname(__file__) # silence 'DLL not found' message boxes on win try: ctypes.windll.kernel32.SetErrorMode(0x8007) except AttributeError: pass candidates = [] # test every file for curfolder in [folder]+additional_dirs: if os.path.isdir(curfolder): for filename in os.listdir(curfolder): # our minimum requirement for candidates is that # they should contain 'assimp' somewhere in # their name if filename.lower().find('assimp')==-1 : continue is_out=1 for et in ext_whitelist: if et in filename.lower(): is_out=0 break if is_out: continue library_path = os.path.join(curfolder, filename) logger.debug('Try ' + library_path) try: dll = ctypes.cdll.LoadLibrary(library_path) except Exception as e: logger.warning(str(e)) # OK, this except is evil. But different OSs will throw different # errors. So just ignore any errors. continue # see if the functions we need are in the dll loaded = try_load_functions(library_path, dll) if loaded: candidates.append(loaded) if not candidates: # no library found raise AssimpError("assimp library not found") else: # get the newest library_path candidates = map(lambda x: (os.lstat(x[0])[-2], x), candidates) res = max(candidates, key=operator.itemgetter(0))[1] logger.debug('Using assimp library located at ' + res[0]) # XXX: if there are 1000 dll/so files containing 'assimp' # in their name, do we have all of them in our address # space now until gc kicks in? # XXX: take version postfix of the .so on linux? return res[1:] def hasattr_silent(object, name): """ Calls hasttr() with the given parameters and preserves the legacy (pre-Python 3.2) functionality of silently catching exceptions. Returns the result of hasatter() or False if an exception was raised. """ try: if not object: return False return hasattr(object, name) except AttributeError: return False
11,799
Python
40.549296
139
0.507755
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.gdtf/config/extension.toml
[package] version = "1.0.0" title = "MF GDTF converter" description = "Support of GDTF (General Device Type Format) files in USD." authors = ["Moment Factory", "Frederic Lestage", "Antoine Pilote"] readme = "docs/README.md" changelog = "docs/CHANGELOG.md" repository = "https://github.com/MomentFactory/Omniverse-MVR-GDTF-converter" category = "Rendering" keywords = ["MVR", "GDTF","Audiovisual","Lighting","Fixture"] preview_image = "data/preview.png" icon = "data/icon.png" toggleable = false [core] reloadable = false # Load at the start, load all schemas with order -100 (with order -1000 the USD libs are loaded) order = -100 [dependencies] "omni.kit.uiapp" = {} "omni.kit.tool.asset_importer" = {} [[python.module]] name = "mf.ov.gdtf" [python.pipapi] requirements = [ "unidecode" ] use_online_index = true [package.target] kit = ["105.1"] [package.writeTarget] kit = true python = false
909
TOML
20.16279
96
0.70187
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.gdtf/docs/CHANGELOG.md
# Changelog # [1.0.0] - 2024-01-24 - Added native OpenUSD file format plugin for payload support. - Fixed orientation and scale issues - Some light parameters are now applied to USD light (cone, color temp, intensity) - Deprecated kit 104 and 105.0 - Added Sample files for USDView # [0.4.0] - 2023-10-02 # Added - Sample file # Fixed - Enabled importing from Omniverse - Importing within the same repository as the source file fixed (for filesystem and Omniverse) # Changed - The name of the folder (the one created during importation that contains the files converted to usd) won't include the file extension ("myGDTFFile.gdtf/" will now be "myGDTFFile_gdtf/") - Properly remove the temporary directory created for archive extraction at the end of importation ## [0.3.0] - 2023-09-01 ## Added - Support for node type "Inventory" - Use "Beam" node when present for light xform ## Fixed - Global scale and rotation rework - Fix relative links issue with path and character escaping ## [0.2.0] - 2023-08-17 ### Fixed - Better support for 3ds files ### Changed - When making name valid for usd, add underscore if starts with number ## [0.1.0] - 2023-07-21 ### Added - Initial version of the extension - Support import of GDTF files
1,243
Markdown
26.043478
187
0.738536
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.gdtf/docs/README.md
# GDTF extension for Omniverse [mf.ov.gdtf] Copyright 2023 Moment Factory Studios Inc. An Omniverse extension for [GDTF (General Device Type Format)](https://github.com/mvrdevelopment/spec/blob/main/gdtf-spec.md) files. Support GTDF to OpenUSD conversion as well as References to GDTF files through a native OpenUSD FileFormat Plugin.
336
Markdown
66.399987
247
0.803571
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.mvr/mf/ov/mvr/mvrImporter.py
import logging import numpy as np from typing import List, Tuple import xml.etree.ElementTree as ET from zipfile import ZipFile from pxr import Gf, Usd, UsdGeom from mf.ov.gdtf import gdtfImporter as gdtf from .filepathUtility import Filepath from .mvrUtil import Layer, Fixture from .USDTools import USDTools class MVRImporter: def convert(file: Filepath, mvr_output_dir: str, output_ext: str = ".usd") -> str: # TODO: change output_ext to bool use_usda try: with ZipFile(file.fullpath, 'r') as archive: output_dir = mvr_output_dir + file.filename + "_mvr/" data = archive.read("GeneralSceneDescription.xml") root = ET.fromstring(data) MVRImporter._warn_for_version(root) url: str = MVRImporter.convert_mvr_usd(output_dir, file.filename, output_ext, root, archive) return url except Exception as e: logger = logging.getLogger(__name__) logger.error(f"Failed to parse mvr file at {file.fullpath}. Make sure it is not corrupt. {e}") return None def _warn_for_version(root): v_major = root.attrib["verMajor"] v_minor = root.attrib["verMinor"] if v_major != "1" or v_minor != "5": logger = logging.getLogger(__name__) logger.warn(f"This extension is tested with mvr v1.5, this file version is {v_major}.{v_minor}") def convert_mvr_usd(output_dir: str, filename: str, ext: str, root: ET.Element, archive: ZipFile) -> str: scene: ET.Element = root.find("Scene") layers: List[Layer] = MVRImporter._get_layers(scene) for layer in layers: layer.find_fixtures() stage, url = MVRImporter._make_mvr_stage(output_dir, filename, ext, layers) MVRImporter._convert_gdtf(stage, layers, output_dir, archive, ext) stage.Save() return url def _get_layers(scene: ET.Element) -> List[Layer]: layersNode: ET.Element = scene.find("Layers") layerNodes: ET.Element = layersNode.findall("Layer") layers: List[Layer] = [] for layerNode in layerNodes: layer: Layer = Layer(layerNode) layers.append(layer) return layers def _make_mvr_stage(output_dir: str, filename: str, ext: str, layers: List[Layer]) -> Tuple[Usd.Stage, str]: url: str = output_dir + filename + ext stage: Usd.Stage = USDTools.get_or_create_stage(url) MVRImporter._add_fixture_xform(stage, layers) return stage, url def _add_fixture_xform(stage: Usd.Stage, layers: List[Layer]): rotate_minus90deg_xaxis = Gf.Matrix3d(1, 0, 0, 0, 0, 1, 0, -1, 0) mvr_scale = UsdGeom.LinearUnits.millimeters # MVR dimensions are in millimeters applied_scale: float = USDTools.get_applied_scale(stage, mvr_scale) for layer in layers: if layer.fixtures_len() > 0: scope: UsdGeom.Scope = USDTools.add_scope(stage, layer.get_name_usd()) for fixture in layer.get_fixtures(): xform: UsdGeom.Xform = USDTools.add_fixture_xform(stage, scope, fixture.get_unique_name_usd()) fixture.set_stage_path(xform.GetPrim().GetPath()) np_matrix: np.matrix = USDTools.np_matrix_from_mvr(fixture.get_matrix()) gf_matrix: Gf.Matrix4d = USDTools.gf_matrix_from_mvr(np_matrix, applied_scale) rotation: Gf.Rotation = gf_matrix.ExtractRotation() euler: Gf.Vec3d = rotation.Decompose(Gf.Vec3d.XAxis(), Gf.Vec3d.YAxis(), Gf.Vec3d.ZAxis()) # Z-up to Y-up # TODO: Validate with stage up axis translation = rotate_minus90deg_xaxis * gf_matrix.ExtractTranslation() rotate = rotate_minus90deg_xaxis * euler xform.ClearXformOpOrder() # Prevent error when overwritting xform.AddTranslateOp().Set(translation) xform.AddRotateZYXOp().Set(rotate) # Scale Op is added in _add_gdtf_reference fixture.apply_attributes_to_prim(xform.GetPrim()) stage.Save() def _convert_gdtf(stage: Usd.Stage, layers: List[Layer], mvr_output_dir: str, archive: ZipFile, ext: str): gdtf_spec_uniq: List[str] = MVRImporter._get_gdtf_to_import(layers) gdtf_output_dir = mvr_output_dir for gdtf_spec in gdtf_spec_uniq: gdtf.GDTFImporter.convert_from_mvr(gdtf_spec, gdtf_output_dir, archive) MVRImporter._add_gdtf_reference(layers, stage, ext) def _get_gdtf_to_import(layers: List[Layer]) -> List[str]: result: List[str] = [] for layer in layers: if layer.fixtures_len() > 0: current_fixture_names = [x.get_spec_name() for x in layer.get_fixtures()] current_fixture_names_set = set(current_fixture_names) current_fixture_names_uniq = list(current_fixture_names_set) for current_fixture_name_uniq in current_fixture_names_uniq: result.append(current_fixture_name_uniq) return result def _add_gdtf_reference(layers: List[Layer], stage: Usd.Stage, ext: str): for layer in layers: if layer.fixtures_len() > 0: for fixture in layer.get_fixtures(): spec = fixture.get_spec_name() relative_path = f"./{spec}_gdtf/{spec}{ext}" stage_path = fixture.get_stage_path() USDTools.add_reference(stage, relative_path, stage_path) USDTools.copy_gdtf_scale(stage, stage_path, relative_path)
5,751
Python
46.147541
114
0.603026
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.mvr/mf/ov/mvr/extension.py
import omni.ext import omni.kit.tool.asset_importer as ai from .converterDelegate import ConverterDelegate class MfOvMvrExtension(omni.ext.IExt): def on_startup(self, _): self._delegate_mvr = ConverterDelegate( "MVR Converter", ["(.*\\.mvr$)"], ["MVR Files (*.mvr)"] ) ai.register_importer(self._delegate_mvr) def on_shutdown(self): ai.remove_importer(self._delegate_mvr) self._delegate_mvr.destroy() self._delegate_mvr = None
522
Python
26.526314
48
0.611111
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.mvr/mf/ov/mvr/USDTools.py
import numpy as np from typing import List from unidecode import unidecode from urllib.parse import unquote from pxr import Gf, Tf, Sdf, Usd, UsdGeom class USDTools: def make_name_valid(name: str) -> str: if name[:1].isdigit(): name = "_" + name return Tf.MakeValidIdentifier(unidecode(name)) def get_or_create_stage(url: str) -> Usd.Stage: try: # TODO: Better way to check if stage exists? return Usd.Stage.Open(url) except: stage = Usd.Stage.CreateNew(url) UsdGeom.SetStageMetersPerUnit(stage, UsdGeom.LinearUnits.centimeters) # TODO get user defaults UsdGeom.SetStageUpAxis(stage, UsdGeom.Tokens.y) # TODO get user defaults default_prim = stage.DefinePrim("/World", "Xform") stage.SetDefaultPrim(default_prim) stage.Save() return stage def add_scope(stage: Usd.Stage, name: str) -> UsdGeom.Scope: default_prim_path: Sdf.Path = stage.GetDefaultPrim().GetPrimPath() scope_path: Sdf.Path = default_prim_path.AppendPath(name) scope: UsdGeom.Scope = UsdGeom.Scope.Define(stage, scope_path) return scope def add_fixture_xform(stage: Usd.Stage, scope: UsdGeom.Scope, name: str) -> UsdGeom.Xform: path = scope.GetPath().AppendPath(name) xform: UsdGeom.Xform = UsdGeom.Xform.Define(stage, path) return xform def get_applied_scale(stage: Usd.Stage, scale_factor: float) -> float: stage_scale = UsdGeom.GetStageMetersPerUnit(stage) return scale_factor / stage_scale def np_matrix_from_mvr(value: str) -> np.matrix: # MVR Matrix is: 4x3, Right-handed, Z-up, 1 Distance Unit equals 1mm # expect form like "<Matrix>{x,y,z}{x,y,z}{x,y,z}{x,y,z}</Matrix>" where "x","y","z" is similar to 1.000000 # make source compatible with np.matrix constructor: "x y z; x y z; x y z; x y z" value_alt = value[1:] # Removes "{" prefix value_alt = value_alt[:-1] # Removes "}" suffix value_alt = value_alt.replace("}{", "; ") value_alt = value_alt.replace(",", " ") np_matrix: np.matrix = np.matrix(value_alt) return np_matrix def gf_matrix_from_mvr(np_matrix: np.matrix, scale: float) -> Gf.Matrix4d: # Column major matrix gf_matrix = Gf.Matrix4d( np_matrix.item((0, 0)), np_matrix.item((0, 1)), np_matrix.item((0, 2)), 0, np_matrix.item((1, 0)), np_matrix.item((1, 1)), np_matrix.item((1, 2)), 0, np_matrix.item((2, 0)), np_matrix.item((2, 1)), np_matrix.item((2, 2)), 0, np_matrix.item((3, 0)) * scale, np_matrix.item((3, 1)) * scale, np_matrix.item((3, 2)) * scale, 1 ) return gf_matrix def set_fixture_attribute(prim: Usd.Prim, attribute_name: str, attribute_type: Sdf.ValueTypeNames, attribute_value): prim.CreateAttribute(f"mf:mvr:{attribute_name}", attribute_type).Set(attribute_value) def add_reference(stage: Usd.Stage, ref_path_relative: str, stage_path: str): xform_ref: UsdGeom.Xform = stage.GetPrimAtPath(stage_path) path_unquoted = unquote(ref_path_relative) references: Usd.References = xform_ref.GetReferences() references.AddReference(path_unquoted) stage.Save() def copy_gdtf_scale(mvr_stage: Usd.Stage, stage_prim_path: str, relative_path: str): # Copy a reference default prim scale op value to a referencing xform in an other stage curr_root_layer = mvr_stage.GetRootLayer() curr_stage_url: str = curr_root_layer.realPath curr_stage_url_formatted: str = curr_stage_url.replace('\\', '/') curr_stage_dir_index: str = curr_stage_url_formatted.rindex("/") curr_stage_dir = curr_stage_url_formatted[:curr_stage_dir_index] mvr_xform_target = UsdGeom.Xform(mvr_stage.GetPrimAtPath(stage_prim_path)) gdtf_stage_filename: str = relative_path[1:] gdtf_stage_path: str = curr_stage_dir + gdtf_stage_filename gdtf_stage: Usd.Stage = Usd.Stage.Open(gdtf_stage_path) gdtf_default_prim = UsdGeom.Xform(gdtf_stage.GetDefaultPrim()) stage_scale = UsdGeom.GetStageMetersPerUnit(mvr_stage) scale_factor = 1 / stage_scale scale_value = Gf.Vec3d(scale_factor, scale_factor, scale_factor) xform_ordered_ops: List[UsdGeom.XformOp] = gdtf_default_prim.GetOrderedXformOps() for xform_op in xform_ordered_ops: if xform_op.GetOpType() == UsdGeom.XformOp.TypeScale: scale_value = xform_op.Get() mvr_xform_target.AddScaleOp().Set(scale_value) mvr_stage.Save()
4,672
Python
46.683673
120
0.634632
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.mvr/mf/ov/mvr/mvrUtil.py
from typing import List import xml.etree.ElementTree as ET from pxr import Usd, Sdf from .USDTools import USDTools class Fixture: def __init__(self, node: ET.Element): self._root = node self._name = node.attrib["name"] self._uuid = node.attrib["uuid"] self._matrix = self._get_value_text_if_exists("Matrix") self._gdtf_spec = self._get_value_text_if_exists("GDTFSpec") self._gdtf_mode = self._get_value_text_if_exists("GDTFMode") self._custom_commands = self._get_custom_commands_values() self._classing = self._get_value_text_if_exists("Classing") self._addresses = self._get_addresses_values() self._fixture_id = self._get_value_int_if_exists("fixtureID") self._unit_number = self._get_value_int_if_exists("UnitNumber") self._fixture_type_id = self._get_value_int_if_exists("FixtureTypeId") self._custom_id = self._get_value_int_if_exists("CustomId") self._cie_color = self._get_color_values() self._cast_shadow = self._get_value_bool_if_exists("CastShadow") def get_unique_name_usd(self) -> str: return USDTools.make_name_valid(self._name + "_" + self._uuid) def get_matrix(self) -> str: return self._matrix def set_stage_path(self, path: str): self._stage_path = path def get_stage_path(self) -> str: return self._stage_path def get_spec_name(self) -> str: spec_name = self._gdtf_spec if self._gdtf_spec[-5:] == ".gdtf": spec_name = self._gdtf_spec[:-5] return spec_name def _get_value_text_if_exists(self, name: str) -> str: node = self._get_child_node(name) if node is not None: text = node.text if text is not None: return node.text return None def _get_value_int_if_exists(self, name: str) -> int: txt = self._get_value_text_if_exists(name) if txt is None: return None return int(txt) def _get_value_bool_if_exists(self, name: str) -> bool: txt = self._get_value_text_if_exists(name) if txt is None: return None return bool(txt) def _get_child_node(self, node: str): return self._root.find(node) def _get_custom_commands_values(self) -> List[str]: values: List[str] = [] node = self._get_child_node("CustomCommands") if node is not None: subnodes = node.findall("CustomCommand") if subnodes is not None and len(subnodes) > 0: values = [x.text for x in subnodes] return values def _get_addresses_values(self) -> List[str]: values: List[str] = [] node = self._get_child_node("Addresses") if node is not None: subnodes = node.findall("Address") if subnodes is not None and len(subnodes): values = [int(x.text) for x in subnodes] return values def _get_color_values(self) -> List[float]: colors: List[float] = [] node = self._get_child_node("Color") if node is not None: colors = [float(x) for x in node.text.split(",")] return colors def apply_attributes_to_prim(self, prim: Usd.Prim): self._set_attribute_text_if_valid(prim, "name", self._name) self._set_attribute_text_if_valid(prim, "uuid", self._uuid) self._set_attribute_text_if_valid(prim, "GDTFSpec", self._gdtf_spec) self._set_attribute_text_if_valid(prim, "GDTFMode", self._gdtf_mode) self._set_attribute_textarray_if_valid(prim, "CustomCommands", self._custom_commands) self._set_attribute_text_if_valid(prim, "Classing", self._classing) self._set_attribute_intarray_if_valid(prim, "Addresses", self._addresses) self._set_attribute_int_if_valid(prim, "FixtureID", self._fixture_id) self._set_attribute_int_if_valid(prim, "UnitNumber", self._unit_number) self._set_attribute_int_if_valid(prim, "FixtureTypeId", self._fixture_type_id) self._set_attribute_int_if_valid(prim, "CustomId", self._custom_id) self._set_attribute_floatarray_if_valid(prim, "CIEColor", self._cie_color) self._set_attribute_bool_if_value(prim, "CastShadow", self._cast_shadow) def _set_attribute_text_if_valid(self, prim: Usd.Prim, name: str, value: str): if value is not None: USDTools.set_fixture_attribute(prim, name, Sdf.ValueTypeNames.String, value) def _set_attribute_int_if_valid(self, prim: Usd.Prim, name: str, value: int): if value is not None: USDTools.set_fixture_attribute(prim, name, Sdf.ValueTypeNames.Int, value) def _set_attribute_bool_if_value(self, prim: Usd.Prim, name: str, value: bool): if value is not None: USDTools.set_fixture_attribute(prim, name, Sdf.ValueTypeNames.Bool, value) def _set_attribute_textarray_if_valid(self, prim: Usd.Prim, name: str, value: List[str]): if value is not None and len(value) > 0: USDTools.set_fixture_attribute(prim, name, Sdf.ValueTypeNames.StringArray, value) def _set_attribute_intarray_if_valid(self, prim: Usd.Prim, name: str, value: List[int]): if value is not None and len(value) > 0: USDTools.set_fixture_attribute(prim, name, Sdf.ValueTypeNames.IntArray, value) def _set_attribute_floatarray_if_valid(self, prim: Usd.Prim, name: str, value: List[float]): if value is not None and len(value) > 0: USDTools.set_fixture_attribute(prim, name, Sdf.ValueTypeNames.FloatArray, value) class Layer: def __init__(self, node: ET.Element): self._name = node.attrib["name"] self._uuid = node.attrib["uuid"] self._node = node self._fixtures = [] def get_name_usd(self) -> str: return USDTools.make_name_valid(self._name) def find_fixtures(self): childlist = self._node.find("ChildList") fixtures = childlist.findall("Fixture") self._fixtures = [Fixture(x) for x in fixtures] def fixtures_len(self) -> int: return len(self._fixtures) def get_fixtures(self) -> List[Fixture]: return self._fixtures
6,238
Python
39.777778
96
0.617987
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.mvr/docs/CHANGELOG.md
# Changelog # [1.0.0] - 2024-01-24 - Added native OpenUSD file format plugin for payload support. - Fixed orientation and scale issues - Some light parameters are now applied to USD light (cone, color temp, intensity) - Deprecated kit 104 and 105.0 - Added Sample files for USDView # [0.4.0] - 2023-10-02 # Added - Sample file # Fixed - Enabled importing from Omniverse - Importing within the same repository as the source file fixed (for filesystem and Omniverse) # Changed - The name of the folder (the one created during importation that contains the files converted to usd) won't include the file extension ("myMVRFile.mvr/" will now be "myMVRFile_mvr/") - GDTF attributes populated by MVR now better reflect naming convention of the specs ("fixture_id" becomes "FixtureID") - Properly remove the temporary directory created for archive extraction at the end of importation # [0.3.0] - 2023-09-01 ## Fixed - Global scale rework - Fix relative link issue with character escaping # [0.2.0] - 2023-08-17 ### Added - Support for multiple layers - Layers reflected as Scope in usd ### Changed - When making name valid for usd, add underscore if starts with number # [0.1.0] - 2023-07-21 ### Added - Initial version of the extension - Support import of MVR files
1,273
Markdown
27.954545
183
0.745483
MomentFactory/Omniverse-MVR-GDTF-converter/exts/mf.ov.mvr/docs/README.md
# MVR extension for Omniverse [mf.ov.mvr] Copyright 2023 Moment Factory Studios Inc. An Omniverse extension for MVR [MVR (My Virtual Rig)](https://github.com/mvrdevelopment/spec/blob/main/mvr-spec.md) files. Support MVR to OpenUSD conversion as well as References to MVR files through a native USD FileFormat Plugin. Requires the mf.ov.gdtf extension to fully work. MVR (My Virtual Rig) is a scene format that can describe an complete rig of lights, using GDTF assets at its core while adding capabilities to define groups, layers, DMX address and more to allow lighting designer to build virtual replicas of their lighting rigs and enforce a single file format from show design to previz to operation.
705
Markdown
87.249989
336
0.8
MomentFactory/Omniverse-Lidar-Live-Synthetic-Data/repo.toml
######################################################################################################################## # Repo tool base settings ######################################################################################################################## [repo] # Use the Kit Template repo configuration as a base. Only override things specific to the repo. import_configs = ["${root}/_repo/deps/repo_kit_tools/kit-template/repo.toml"] # Repository Name name = "omniverse-lidar-live-synthetic-data" [repo_build] msbuild.vs_version = "vs2019" post_build.commands = [] [repo_docs] name = "MF Lidar live synthetic data" project = "omniverse-lidar-live-synthetic-data" api_output_directory = "api" use_fast_doxygen_conversion=false sphinx_version = "4.5.0.2-py3.10-${platform}" sphinx_exclude_patterns = [ "_build", "tools", "VERSION.md", "source/extensions/*/docs/Overview.md", "source/extensions/*/docs/CHANGELOG.md", ] [repo_docs.kit] extensions = [ "mf.ov.lidar_live_synth" ] [repo_package.packages."platform:windows-x86_64".docs] windows_max_path_length = 0
1,103
TOML
28.052631
120
0.553944
MomentFactory/Omniverse-Lidar-Live-Synthetic-Data/README.md
# MF Lidar live synthetic data [mf.ov.lidar_live_synth] Adds an Action Graph Node ("Generic/Beam to Ouster UDP") to send Isaac beam data via the Ouster(tm) UDP procotol. This allows any third party software implementing Ouster(tm) lidars to be connected to simulated sensors instead of physical sensors. Developped for kit 105.1 and currently working only in Isaac Sim. This extensions provides pre-built binaries for Windows and Linux x86_64. You may want to compile from the [source code](https://github.com/MomentFactory/Omniverse-Lidar-Live-Synthetic-Data) ## Requirements - kit 105 based - Isaac Sim > 2023.1.0 - Linux or Windows platforms ### Supported Lidars Currently, only Ouster™ sensors are supported. The Lidar must have 16, 32, 64 or 128 rows to be supported by the procotol. Lidar FOVs and resolutions are not transmitted in the protocol and therefore should match those of an actual Ouster(tm) model (22.5, 45 or 90 degrees FOV) for an accurate reconstruction by the receiving software. JSON config files that describe the angles of the beams for an external application are included in the 'data' folder (example : [OusterJsonConfigOmniverse-OS0-16.json](source/extensions/mf.ov.lidar_live_synth/data/OusterJsonConfigOmniverse-OS0-16.json)). These files can be used in Cirrus as the Ouster(tm) Json Config file to properly recronstruct the data with the correct beam angles. OS0 are 90 degrees FOV, OS1 are 45 and OS2 are 22.5. ## Build ### Windows - Run `./build.bat` ### Linux - Install Docker - Run `./build.sh` ## Using the extension Requires Isaac Sim as well as a third party software that can receive and parse Ouster Lidar sensors frames. You can use the [isaac_lidar_sample_moving_cube.usd](source/extensions/mf.ov.lidar_live_synth/samples/isaac_lidar_sample_moving_cube.usd), or [isaac_lidar_ouster_sample.usd](source/extensions/mf.ov.lidar_live_synth/samples//isaac_lidar_ouster_sample.usd), or create your own following the instructions below. ### Enable the extension In Isaac Sim : - Windows > Extensions. - Switch to THIRD PARY tab. - Install and enable the extension. ### In Isaac Sim: 1. Open or create a scene - Meshes requires a Rigidbody to intercept Lidar raycast - Right-click a mesh, then select `Add / Physics / Rigid Body` 2. Add a Lidar to the scene if not present - `Create / Isaac / Sensors / Lidar / Generic` - Unfold Raw USD Properties - Check `drawPoints` and/or `drawLines` if you want to see the point cloud - Check the `enabled` property - Use `horizontalFov`, `horizontalResolution`. `maxRange`, `minRange`, `verticalFov`, and `verticalResolution` to define the Lidar raycast zone - set `rotationRate` to `0` if you want continuous raycast 3. Create an action graph - Right-click the Stage, then select `Create / Visual Scripting / Action Graph` - Right-click the Action Graph then select "Open Graph" - Add a `Event / On Playback Tick` node - Add a `Isaac Range Sensor / Isaac Read Lidar Beam Node` - Connect the "Tick" output to the "Exec In" input - Add a `Generic / Beam to Ouster UDP` node - Connect the "Exec Out" output to the "Exec In" input - Connect the outputs of `Isaac Read Lidar Beam Node` to the matching `Beam to Ouster UDP` inputs - `Azimuth Range` - `Horizontal Resolution` - `Linear Depth Data` - `Num Cols` - `Num Rows` 4. Press the play icon (SPACE) to begin the simulation #### Beam to Ouster UDP fields - `IP Address` (string): The IP address to send the data to. - `Port` (int): The port to send the data to. - `Broadcast` (bool): Check to indicate the IP Address is a broadcast address. ## Developer notes As the extension is written in C++ for performance reasons, developers need to build it before using it. Most of it works in the same way as the official [Omniverse C++ examples](https://github.com/NVIDIA-Omniverse/kit-extension-template-cpp). The first step is to run the `build.bat` file at the root of the repo. It will generate the actual extension files usable by Omniverse, as well as the Visual Studio files. It is recommended to work in Visual Studio (2019 and above) for C++, although VSCode should also work. The `build.bat` script generates the VS2019 `.sln` files in `_compiler\vs2019\kit-extension-template-cpp.sln` . It should work as-is. Do not upgrade the compiler and Windows SDK versions if asked to do so, and install the correct Windows SDK for the VS Installer if it is missing on your machine. Unlike the samples, we do not recommend running the project by launching it via Visual Studio, since the extension is made specifically for Isaac Sim, and Visual Studio doesnt launch it within an Isaac Sim environment. It is recommended to run Isaac and attach the VS debugger to it by going to Debug -> Attach to Process and selecting the kit.exe coresponding to Isaac. Make sure to attach to Native Code. If you have the "Python - Profiling" extension, it might want to try to attach to Python code instead. One thing to note is that the symbols for the extension will only be loaded IF the extension is enabled after attaching. If the extension is already enabled, disabling then enabling it will also work. Also, to update the extension in Isaac after doing some changes and building, it needs to be disabled and enabled again (The extension willl probably fail to build if it is in use as the dll cannot be overwritten anyways). To add the extension to Isaac, simply add the built plugin folder (`c:/git/omniverse/omniverse-lidar-synthetic-data/_build/windows-x86_64/release/exts` or `c:/git/omniverse/omniverse-lidar-synthetic-data/_build/windows-x86_64/debug/exts` for a debug build) to the extension manager paths. ## Resources - Inspired by : [NVIDIA's kit-extension-template-cpp](https://github.com/NVIDIA-Omniverse/kit-extension-template-cpp)
5,882
Markdown
62.258064
933
0.753655
MomentFactory/Omniverse-Lidar-Live-Synthetic-Data/index.rst
MF Lidar live synthetic data ########################## .. mdinclude:: README.md Example Extensions ################## * `mf.ov.lidar_live_synth <../../mf.ov.lidar_live_synth/1.0.0/index.html>`_
198
reStructuredText
18.899998
75
0.535354
MomentFactory/Omniverse-Lidar-Live-Synthetic-Data/deps/repo-deps.packman.xml
<project toolsVersion="5.0"> <dependency name="repo_build" linkPath="../_repo/deps/repo_build"> <package name="repo_build" version="0.44.6" checksum="11858f3d45b15d83f0279fa96e2813232bfd65755d0cf45861f5fdd28a5a39b6" /> </dependency> <dependency name="repo_changelog" linkPath="../_repo/deps/repo_changelog"> <package name="repo_changelog" version="0.3.2" checksum="fbe4bc4257d5aec1c964f2616257043095a9dfac8a10e027ac96aa89340f1423" /> </dependency> <dependency name="repo_docs" linkPath="../_repo/deps/repo_docs"> <package name="repo_docs" version="0.37.3" checksum="78bd6488c1cd7295ab6728d9cd0b79fac3684598bcaebefad710fc79e3a7b8ea" /> </dependency> <dependency name="repo_kit_tools" linkPath="../_repo/deps/repo_kit_tools"> <package name="repo_kit_tools" version="0.11.8" checksum="8d6e1ade8b75b40f880505ba62308958d87a88e52db6a3b932be3da387a8a571" /> </dependency> <dependency name="repo_licensing" linkPath="../_repo/deps/repo_licensing"> <package name="repo_licensing" version="1.12.0" checksum="2fa002302a776f1104896f39c8822a8c9516ef6c0ce251548b2b915979666b9d" /> </dependency> <dependency name="repo_man" linkPath="../_repo/deps/repo_man"> <package name="repo_man" version="1.36.1" checksum="aba22f72ec46b7d2761c5fe2eee397bcb6958dda9b4a8aaca947eb69b97f6089" /> </dependency> <dependency name="repo_package" linkPath="../_repo/deps/repo_package"> <package name="repo_package" version="5.8.8" checksum="b8279d841f7201b44d9b232b934960d9a302367be59ee64e976345854b741fec" /> </dependency> <dependency name="repo_format" linkPath="../_repo/deps/repo_format"> <package name="repo_format" version="2.7.0" checksum="8083eb423043de585dfdfd3cf7637d7e50ba2a297abb8bebcaef4307b80503bb" /> </dependency> <dependency name="repo_source" linkPath="../_repo/deps/repo_source"> <package name="repo_source" version="0.4.2" checksum="05776a984978d84611cb8becd5ed9c26137434e0abff6e3076f36ab354313423" /> </dependency> <dependency name="repo_test" linkPath="../_repo/deps/repo_test"> <package name="repo_test" version="2.9.3" checksum="1903a2a1c998ca4adc87bc20520e91a9af21bf18a6a48a8e05467fe29d674931" /> </dependency> </project>
2,191
XML
65.42424
130
0.760383
MomentFactory/Omniverse-Lidar-Live-Synthetic-Data/deps/kit-sdk.packman.xml
<project toolsVersion="5.0"> <!-- We always depend on the release kit-sdk package, regardless of config --> <dependency name="kit_sdk_${config}" linkPath="../_build/${platform}/${config}/kit" tags="${config} non-redist"> <package name="kit-sdk" version="105.1+release.127680.dd92291b.tc.windows-x86_64.release" platforms="windows-x86_64" checksum="78b6054c730a44b97e6551eae9e17f45384621f244d4babde5264a1d6df3038f" /> <package name="kit-sdk" version="105.1+release.127680.dd92291b.tc.linux-x86_64.release" platforms="linux-x86_64" checksum="2f8357eda2de9232c0b4cb345eb6c4d3c3aa8c4c9685ed45d4bfe749af57b0b8" /> </dependency> </project>
648
XML
80.12499
199
0.759259
MomentFactory/Omniverse-Lidar-Live-Synthetic-Data/deps/host-deps.packman.xml
<project toolsVersion="5.0"> <dependency name="premake" linkPath="../_build/host-deps/premake"> <package name="premake" version="5.0.0-alpha15.dev+pipeline3388156.1f299ea4-windows-x86_64" checksum="b1e5dcef9acf47b0c86a4630afa4fadc9485b878e25e4321ac5afbb826bbdf93" platforms="windows-x86_64" /> <package name="premake" version="5.0.0-alpha15.dev+pipeline3388156.1f299ea4-linux-x86_64" checksum="ae15e63cf6d53571fa3bdfa33ddcec8a3be90675cdd155590a26bcd75d04d73f" platforms="linux-x86_64" /> </dependency> <dependency name="msvc" linkPath="../_build/host-deps/msvc"> <package name="msvc" version="2019-16.7.6-license" platforms="windows-x86_64" checksum="0e37c0f29899fe10dcbef6756bcd69c2c4422a3ca1101206df272dc3d295b92d" /> </dependency> <dependency name="winsdk" linkPath="../_build/host-deps/winsdk"> <package name="winsdk" version="10.0.18362.0-license" platforms="windows-x86_64" checksum="2db7aeb2278b79c6c9fbca8f5d72b16090b3554f52b1f3e5f1c8739c5132a3d6" /> </dependency> </project>
1,012
XML
76.923071
201
0.778656
MomentFactory/Omniverse-Lidar-Live-Synthetic-Data/deps/kit-sdk-deps.packman.xml
<project toolsVersion="5.0"> <!-- Import dependencies from Kit SDK to ensure we're using the same versions. --> <import path="../_build/${platform}/${config}/kit/dev/all-deps.packman.xml"> <filter include="carb_sdk_plugins"/> <filter include="cuda"/> <filter include="doctest"/> <filter include="pybind11"/> <filter include="python"/> </import> <!-- Override the link paths to point to the correct locations. --> <dependency name="carb_sdk_plugins" linkPath="../_build/target-deps/carb_sdk_plugins"/> <dependency name="cuda" linkPath="../_build/target-deps/cuda"/> <dependency name="doctest" linkPath="../_build/target-deps/doctest"/> <dependency name="pybind11" linkPath="../_build/target-deps/pybind11"/> <dependency name="python" linkPath="../_build/target-deps/python"/> </project>
826
XML
42.526314
89
0.679177
MomentFactory/Omniverse-Lidar-Live-Synthetic-Data/deps/ext-deps.packman.xml
<project toolsVersion="5.0"> <!-- Import dependencies from Kit SDK to ensure we're using the same versions. --> <import path="../_build/${platform}/${config}/kit/dev/all-deps.packman.xml"> <filter include="boost_preprocessor"/> <filter include="imgui"/> <filter include="nv_usd_py310_release"/> </import> <!-- Override the link paths to point to the correct locations. --> <dependency name="boost_preprocessor" linkPath="../_build/target-deps/boost-preprocessor"/> <dependency name="imgui" linkPath="../_build/target-deps/imgui"/> <dependency name="nv_usd_py310_release" linkPath="../_build/target-deps/nv_usd/release"/> <!-- Because we always use the release kit-sdk we have to explicitly refer to the debug usd package. --> <dependency name="nv_usd_py310_debug" linkPath="../_build/target-deps/nv_usd/debug"> <package name="nv-usd" version="22.11.nv.0.2.1058.7d2f59ad-win64_py310_debug-dev_omniverse" platforms="windows-x86_64" checksum="02f7c3477830eb17699cc91774438edd8651f3ec0031582c67093ae3276f360b" /> <package name="nv-usd" version="22.11.nv.0.2.1058.7d2f59ad-linux64_py310-centos_debug-dev_omniverse" platforms="linux-x86_64" checksum="2ac18e0470d05b251a2f36691a1dc1b28da340da92b19175d890addb762adb0f"/> <package name="nv-usd" version="22.11.nv.0.2.1058.7d2f59ad-linux-aarch64_py310_debug-dev_omniverse" platforms="linux-aarch64" checksum="904ede636008fb011b5f3d66c1a7c2969dfba291dcf1a227fa7503a714f1f18d" /> </dependency> </project>
1,497
XML
67.090906
210
0.739479
MomentFactory/Omniverse-Lidar-Live-Synthetic-Data/tools/repoman/repoman.py
import os import sys import io import contextlib import packmanapi REPO_ROOT = os.path.join(os.path.dirname(os.path.realpath(__file__)), "../..") REPO_DEPS_FILE = os.path.join(REPO_ROOT, "deps/repo-deps.packman.xml") def bootstrap(): """ Bootstrap all omni.repo modules. Pull with packman from repo.packman.xml and add them all to python sys.path to enable importing. """ #with contextlib.redirect_stdout(io.StringIO()): deps = packmanapi.pull(REPO_DEPS_FILE) for dep_path in deps.values(): if dep_path not in sys.path: sys.path.append(dep_path) if __name__ == "__main__": bootstrap() import omni.repo.man omni.repo.man.main(REPO_ROOT)
703
Python
23.275861
100
0.661451
MomentFactory/Omniverse-Lidar-Live-Synthetic-Data/source/extensions/mf.ov.lidar_live_synth/mf/ov/lidar_live_synth/__init__.py
## Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. ## ## NVIDIA CORPORATION and its licensors retain all intellectual property ## and proprietary rights in and to this software, related documentation ## and any modifications thereto. Any use, reproduction, disclosure or ## distribution of this software and related documentation without an express ## license agreement from NVIDIA CORPORATION is strictly prohibited. ## # This file is needed so tests don't fail.
480
Python
42.727269
77
0.785417
MomentFactory/Omniverse-Lidar-Live-Synthetic-Data/source/extensions/mf.ov.lidar_live_synth/config/extension.toml
[package] version = "0.2.0" title = "MF Lidar live synthetic data" description = "Send real-time Lidar synthetic point cloud data from Omniverse to third party software." category = "Graph" keywords = ["lidar", "UDP", "omnigraph", "Graph", "Node", "OmniGraph", "synthetic", "realtime"] preview_image = "data/preview.png" icon = "data/icon.png" changelog = "docs/CHANGELOG.md" readme = "docs/README.md" authors = ["Moment Factory","Frederic Lestage","Steven Beliveau"] repository = "https://github.com/MomentFactory/Omniverse-Lidar-extension" [dependencies] "omni.graph" = {} [[python.module]] name = "mf.ov.lidar_live_synth" [[native.plugin]] path = "bin/*.plugin" [documentation] pages = [ "docs/README.md", "docs/CHANGELOG.md", ] [package.target] kit = ["105.1"] [package.writeTarget] kit = true python = false
829
TOML
22.055555
103
0.694813
MomentFactory/Omniverse-Lidar-Live-Synthetic-Data/source/extensions/mf.ov.lidar_live_synth/plugins/mf.ov.lidar_live_synth/LidarLiveSyntheticDataExtension.cpp
#define CARB_EXPORTS #include <carb/PluginUtils.h> #include <omni/ext/IExt.h> #include <omni/graph/core/IGraphRegistry.h> #include <omni/graph/core/ogn/Database.h> #include <omni/graph/core/ogn/Registration.h> // Standard plugin definitions required by Carbonite. const struct carb::PluginImplDesc pluginImplDesc = { "mf.ov.lidar_live_synth.plugin", "MF Lidar live synthetic data.", "MF", carb::PluginHotReload::eEnabled, "dev" }; // These interface dependencies are required by all OmniGraph node types CARB_PLUGIN_IMPL_DEPS(omni::graph::core::IGraphRegistry, omni::fabric::IPath, omni::fabric::IToken) // This macro sets up the information required to register your node type definitions with OmniGraph DECLARE_OGN_NODES() namespace mf { namespace ov { namespace lidar_live_synth { class LidarLiveSyntheticDataExtension : public omni::ext::IExt { public: void onStartup(const char* extId) override { // This macro walks the list of pending node type definitions and registers them with OmniGraph INITIALIZE_OGN_NODES() } void onShutdown() override { // This macro walks the list of registered node type definitions and deregisters all of them. This is required // for hot reload to work. RELEASE_OGN_NODES() } private: }; } } } CARB_PLUGIN_IMPL(pluginImplDesc, mf::ov::lidar_live_synth::LidarLiveSyntheticDataExtension) void fillInterface(mf::ov::lidar_live_synth::LidarLiveSyntheticDataExtension& iface) { }
1,622
C++
26.982758
118
0.676326
MomentFactory/Omniverse-Lidar-Live-Synthetic-Data/source/extensions/mf.ov.lidar_live_synth/plugins/nodes/OgnBeamToOusterUDPNode.cpp
#include <OgnBeamToOusterUDPNodeDatabase.h> #include <chrono> #define WIN32_LEAN_AND_MEAN #define _WINSOCK_DEPRECATED_NO_WARNINGS #ifdef _WIN32 #include <Winsock2.h> #else #include <arpa/inet.h> #include <netdb.h> #include <netinet/in.h> #include <sys/select.h> #include <sys/socket.h> #include <sys/types.h> #define SOCKET int #define INVALID_SOCKET (SOCKET)(~0) #define SOCKET_ERROR (-1) #define closesocket close #define SOCKADDR sockaddr #endif namespace mf { namespace ov { namespace lidar_live_synth { static const int kColumnsPerPacket = 16; static const float kPi = 3.14159265359f; static const float kTwoPi = kPi * 2.0f; static const float kDegToRad = kTwoPi / 360.0f; static const int kOusterNumRotAngles = 90112; static const float kOusterNumRotAnglesOverTwoPi = kOusterNumRotAngles / kTwoPi; class OgnBeamToOusterUDPNode { int m_frameId{ 0 }; #pragma pack(push,4) // Force packing in 4-byte packs (Words) struct OusterChannelDataBlock { unsigned int rangemm; unsigned short reflectivity; unsigned short signal_photons; unsigned short noise_photons; unsigned short unused; OusterChannelDataBlock() : rangemm(0) , reflectivity(0) , signal_photons(0) , noise_photons(0) , unused(0) {} }; template <int NUMROWS> struct OusterAzimuthBlock { unsigned long long timeStamp; // Word 0,1 unsigned short measurementId; // Word 2[0:15] unsigned short frameId; // Word 2[16:31] unsigned int encoderCount; // Word 3 OusterChannelDataBlock channelDataBlock[NUMROWS]; // Word [4:195] in groups of 3 unsigned int azimuthDataBlockStatus; // word 196 OusterAzimuthBlock() : timeStamp(0) , measurementId(0) , frameId(0) , encoderCount(0) , channelDataBlock{} , azimuthDataBlockStatus(0) {} }; template <int NUMROWS> struct OusterDataPacket { OusterAzimuthBlock<NUMROWS> block[16]; // Each packet consists of 16 azimuth blocks OusterDataPacket() :block{} {} }; #pragma pack(pop) class OgnBeamToOusterUDPNodeSocket { public: OgnBeamToOusterUDPNodeSocket() : SendSocket(INVALID_SOCKET) , isBroadcastSocket(false) {} virtual ~OgnBeamToOusterUDPNodeSocket() { if (SendSocket != INVALID_SOCKET) { closesocket(SendSocket); } } bool prepare(OgnBeamToOusterUDPNodeDatabase& db) { if (isBroadcastSocket != db.inputs.broadcast()) { closesocket(SendSocket); SendSocket = INVALID_SOCKET; } if (SendSocket == INVALID_SOCKET) { SendSocket = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP); if (SendSocket == INVALID_SOCKET) { db.logError("Error in OgnBeamToOusterUDPNode opening socket : %d", SendSocket); return false; } if (db.inputs.broadcast()) { char broadcast = 1; int iResult = setsockopt(SendSocket, SOL_SOCKET, SO_BROADCAST, &broadcast, sizeof(broadcast)); if (!iResult) { closesocket(SendSocket); SendSocket = INVALID_SOCKET; db.logError("Error in OgnBeamToOusterUDPNode setting socket options : %d", iResult); return false; } } isBroadcastSocket = db.inputs.broadcast(); } RecvAddr.sin_family = AF_INET; RecvAddr.sin_port = htons(db.inputs.port()); std::string ipAddress = db.inputs.ip_address(); RecvAddr.sin_addr.s_addr = inet_addr(ipAddress.data()); return true; } template <int NUMROWS> bool send(const OusterDataPacket<NUMROWS>& packet, OgnBeamToOusterUDPNodeDatabase& db) { int iResult = sendto(SendSocket, reinterpret_cast<const char*>(&packet), sizeof(packet), 0, (SOCKADDR*)&RecvAddr, sizeof(RecvAddr)); if (iResult == SOCKET_ERROR) { db.logError("Error in OgnBeamToOusterUDPNode sending data on socket : %d", iResult); return false; } return true; } private: SOCKET SendSocket; sockaddr_in RecvAddr; bool isBroadcastSocket; }; OgnBeamToOusterUDPNodeSocket m_ognBeamToOusterUDPNodeSocket; template<int NUMROWS> static bool computeForSize(OgnBeamToOusterUDPNodeDatabase& db) { auto& state = db.internalState<OgnBeamToOusterUDPNode>(); const auto& linearDepthData = db.inputs.linearDepthData(); const int& numCols = db.inputs.numCols(); const float& azimuthStart = db.inputs.azimuthRange()[0] + kTwoPi + kTwoPi; const float& horizontalStepInRads = -1.0f * db.inputs.horizontalResolution() * kDegToRad; const int& frameId = state.m_frameId % 65536; try { if (!state.m_ognBeamToOusterUDPNodeSocket.prepare(db)) { return false; } int measurementId = 0; OusterDataPacket<NUMROWS> packet; int currentChunkColumn = 0; // We need to send data in ascending angle (encoder_count) order // Data is in right-to-left order, we need to iterate left-to-right // We also need to start at the middle (center) of the data which is encoderCount 0 int colEndIndex = (numCols - 1) / 2; int colStartIndex = colEndIndex + numCols; for (int tempColIndex = colStartIndex; tempColIndex > colEndIndex; tempColIndex--) { int colIndex = tempColIndex % numCols; // This assumes consistent input data across azimuthRange, horizontalResolution, numCols, numRows and linearDepthData size int currentEncoderCount = int((azimuthStart + horizontalStepInRads * tempColIndex) * kOusterNumRotAnglesOverTwoPi); if (currentEncoderCount < 0 || currentEncoderCount >= kOusterNumRotAngles) { db.logError("currentEncoderCount must be between 0 and %d, not %d", kOusterNumRotAngles, currentEncoderCount); return false; } // If previous chunk is complete, start new one if (currentChunkColumn == kColumnsPerPacket) { state.m_ognBeamToOusterUDPNodeSocket.send<NUMROWS>(packet, db); packet = OusterDataPacket<NUMROWS>(); currentChunkColumn = 0; } packet.block[currentChunkColumn].timeStamp = std::chrono::duration_cast<std::chrono::nanoseconds>(std::chrono::system_clock::now().time_since_epoch()).count(); packet.block[currentChunkColumn].measurementId = measurementId; packet.block[currentChunkColumn].frameId = frameId; packet.block[currentChunkColumn].encoderCount = currentEncoderCount; measurementId = (measurementId + 1) % 65536; int colIndexStart = colIndex * NUMROWS; for (int rowIndex = 0; rowIndex < NUMROWS; rowIndex++) { packet.block[currentChunkColumn].channelDataBlock[rowIndex].rangemm = (int)(linearDepthData[colIndexStart + rowIndex] * 1000.0f); packet.block[currentChunkColumn].channelDataBlock[rowIndex].signal_photons = 0xFFFF; //0xFFFF means valid } packet.block[currentChunkColumn].azimuthDataBlockStatus = 0xFFFFFFFF; //0xFFFFFFFF means valid currentChunkColumn++; } if (currentChunkColumn != 0) { for (int extraColumnIndex = currentChunkColumn; extraColumnIndex < kColumnsPerPacket; extraColumnIndex++) { packet.block[extraColumnIndex].timeStamp = std::chrono::duration_cast<std::chrono::nanoseconds>(std::chrono::system_clock::now().time_since_epoch()).count(); packet.block[extraColumnIndex].measurementId = measurementId; packet.block[extraColumnIndex].frameId = frameId; packet.block[extraColumnIndex].encoderCount = kOusterNumRotAngles; } state.m_ognBeamToOusterUDPNodeSocket.send<NUMROWS>(packet, db); } } catch (...) { db.logError("Error in OgnBeamToOusterUDPNode::compute"); return false; } state.m_frameId++; // Always enable the output execution db.outputs.execOut() = omni::graph::core::ExecutionAttributeState::kExecutionAttributeStateEnabled; // Even if inputs were edge cases like empty arrays, correct outputs mean success return true; } public: static bool compute(OgnBeamToOusterUDPNodeDatabase& db) { // TODO: why is state declared here // auto& state = db.internalState<OgnBeamToOusterUDPNode>(); const int& numRows = db.inputs.numRows(); switch (numRows) { case 16: return computeForSize<16>(db); break; case 32: return computeForSize<32>(db); break; case 64: return computeForSize<64>(db); break; case 128: return computeForSize<128>(db); break; } db.logError("Row count must be either 16, 32, 64 or 128, not %d", numRows); return false; } }; // This macro provides the information necessary to OmniGraph that lets it automatically register and deregister // your node type definition. REGISTER_OGN_NODE() } } }
10,237
C++
32.348534
149
0.58142
MomentFactory/Omniverse-Lidar-Live-Synthetic-Data/source/extensions/mf.ov.lidar_live_synth/docs/CHANGELOG.md
# Changelog ## [0.2.0] - 2023-12-20 ### Modified - Adapted for compatibility with kit 105 - Enhanced documentation ### Added - Linux support thanks to [@Samahu](https://github.com/Samahu)'s PR on Github ## [0.1.3] - 2023-08-30 ### Changed - Version bump for registry publishing ## [0.1.2] - 2023-08-30 ### Added - New example with more Lidars ### Modified - Now comes as C++ for maximum performance. ## [0.1.1] - 2023-05-09 ### Added - Documentation ### Modified - Name of the Node - Icon ## [0.1.0] - 2023-05-09 ### Added - Action Graph Node that sends Isaac Lidar Point Cloud data in UDP
603
Markdown
14.894736
77
0.658375
MomentFactory/Omniverse-Lidar-Live-Synthetic-Data/source/extensions/mf.ov.lidar_live_synth/docs/README.md
# MF Lidar live synthetic data [mf.ov.lidar_live_synth] Adds an Action Graph Node ("Generic/Beam to Ouster UDP") to send Isaac beam data via the Ouster(tm) UDP procotol. This allows any third party software implementing Ouster(tm) lidars to be connected to simulated sensors instead of physical sensors. Developped for kit 105.1 and currently working only in Isaac Sim. This extensions provides pre-built binaries for Windows and Linux x86_64. You may want to compile from the [source code](https://github.com/MomentFactory/Omniverse-Lidar-Live-Synthetic-Data)
562
Markdown
69.374991
133
0.798932
openhackathons-org/End-to-End-AI-for-Science/CONTRIBUTING.md
Contributing ------------ Please use the following guidelines when contributing to this project. Before contributing significant changes, please begin a discussion of the desired changes via a GitHub Issue to prevent doing unnecessary or overlapping work. ## License The preferred license for source code contributed to this project is the Apache License 2.0 (https://www.apache.org/licenses/LICENSE-2.0) and for documentation, including Jupyter notebooks and text documentation, is the Creative Commons Attribution 4.0 International (CC BY 4.0) (https://creativecommons.org/licenses/by/4.0/). Contributions under other, compatible licenses will be considered on a case-by-case basis. ## Styling Please use the following style guidelines when making contributions. ### Source Code * Tab indentation, no spaces * To the extent possible, variable names should be descriptive * Code should be documentation with detail like what function does and returns making the code readable. The code should also have proper license at the beginning of the file. * The following file extensions should be used appropriately: * Python = .py ### Jupyter Notebooks & Markdown * When they appear inline with the text; directive names, clauses, function or subroutine names, variable names, file names, commands and command-line arguments should appear between two backticks. * Code blocks should begin with three backticks and either 'python' or 'yaml' to enable appropriate source formatting and end with three backticks. * Leave an empty line before and after the codeblock. Emphasis, including quotes made for emphasis and introduction of new terms should be highlighted between a single pair of asterisks * A level 1 heading should appear at the top of the notebook as the title of the notebook. * A horizontal rule should appear between sections that begin with a level 2 heading. Please refer to the following template for jupyter notebook styling in the github repository:misc/jupyter_lab_template ## Contributing Labs/Modules ### Directory stucture for Github Before starting to work on new lab it is important to follow the recommended git structure as shown below to avoid reformatting. Each lab will have following files/directories consisting of training material for the lab. * jupyter_notebook folder: Consists of jupyter notebooks and its corresponding images. * source_code folder: Source codes are stored in a separate directory because sometime not all clusters may support jupyter notebooks. During such bootcamps, we should be able to use the source codes directly from this directory. Source code folder may optionally contain Makefile especially for HPC labs. * presentations: Consists of presentations for the labs ( pdf format is preferred ) * Dockerfile and Singularity: Each lab should have both Docker and Singularity recipes. The lab optionally may also add custom license in case of any deviation from the top level directory license ( Apache 2.0 ). The base of the module contains individual subdirectory containing versions of the module for languages respectively(C/C++/Fortran…). Each of these directories should contain a directory for individual language translation provided (English, for instance). Each lab translation and programming language combination should have a solutions directory containing correct solutions Additionally there are two folders "experimental" and "archived" for labs covering features which are in early access phase ( not stable ) or deprecated features repectively. ### Git Branching Adding a new feature/lab will follow a forking workflow. Which means a feature branch development will happen on a forked repo which later gets merged into our original project (OpenHackathons.org) repository. ![Git Branching Workflow](workspace/python/jupyter_notebook/images/git_branching.jpg) The 5 main steps depicted in image above are as follows: 1. Fork: To create a new lab/feature the repository must be forked. Fork will create a snapshot of repository at the time it was forked. Any new feature/lab that will be developed should be based on the develop branch of the repository. 2. Clone: Developer can than clone this new repository to local machine Create Feature Branch: Create a new branch with a feature name in which your changes will be done. Recommend naming convention of feature branch is naming convention for branch: <feature_name>. The new changes that developer makes can be added, committed and pushed 3. Push: After the changes are committed, the developer pushes the changes to the remote branch. Push command helps the local changes to github repository 4. Pull: Submit a pull request. Upon receiving pull request a Hackathon team reviewer/owner will review the changes and upon accepting it can be merged into the develop branch of GpuHacakthons.org Git Branch details are as follows: * main branch: Consists of the stable branch. * origin/main to be the main branch where the source code of HEAD always reflects a production-ready state * Merge request is possible through: develop branch * develop branch: branched from master branch * Must branch from: main branch * Must merge back into: main branch * It is the main development branch where the source code of HEAD always reflects a state with the latest delivered development changes for the next release. * When the source code in the develop branch reaches a stable point and is ready to be released, all of the changes should be merged back into master somehow and then tagged with a release number * All feature development should happen by forking and branching from develop branch only.
5,650
Markdown
76.410958
502
0.80354
openhackathons-org/End-to-End-AI-for-Science/workspace/python/source_code/dataset.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. import gdown import os ## FCN Dataset url = 'https://drive.google.com/uc?id=1mSN6eLqPYEo9d9pBjSGzQ-ocLd8itP0P&export=download' output = str(os.path.realpath(os.path.dirname(__file__)))+ '/fourcastnet/dataset.zip' gdown.cached_download(url, output, quiet=False,proxy=None,postprocess=gdown.extractall) os.remove(output) ## FCN Pre-trained url = 'https://drive.google.com/uc?id=1oSkK69LGP3DfU2tlH5iaejOh94VNsMDu&export=download' output = str(os.path.realpath(os.path.dirname(__file__)))+ '/../jupyter_notebook/FourCastNet/pre_trained.zip' gdown.cached_download(url, output, quiet=False,proxy=None,postprocess=gdown.extractall) os.remove(output) ## NS Data url = 'https://drive.google.com/uc?id=1IXEGbM3NOO6Dig1sxG1stHubwb09-D2N&export=download' output = str(os.path.realpath(os.path.dirname(__file__)))+ '/navier_stokes/dataset.zip' gdown.cached_download(url, output, quiet=False,proxy=None,postprocess=gdown.extractall) os.remove(output) ## FCN for Omniverse-P1 url = 'https://drive.google.com/uc?id=16YqSnstqoSJdgBzerbzYIkYagwS12lK3&export=download' output = str(os.path.realpath(os.path.dirname(__file__)))+ '/FCN.zip' gdown.cached_download(url, output, quiet=False,proxy=None,postprocess=gdown.extractall) os.remove(output) ## FCN for Omniverse-P2 url = 'https://drive.google.com/uc?id=1lSSx8eKfqCcHAbDvXTeUMoZGHfVQe-HG&export=download' output = str(os.path.realpath(os.path.dirname(__file__)))+ '/FCN/dataset.zip' gdown.cached_download(url, output, quiet=False,proxy=None,postprocess=gdown.extractall) os.remove(output) ## Download and Install Omniverse url = 'https://drive.google.com/uc?id=1DugS2IbHhBPyCE-EuZczLHBZnlnFViIm&export=download' output = str(os.path.realpath(os.path.dirname(__file__)))+'/ov.zip' gdown.cached_download(url, output, quiet=False,proxy=None,postprocess=gdown.extractall) os.remove(output)
2,958
Python
46.725806
110
0.772481
openhackathons-org/End-to-End-AI-for-Science/workspace/python/source_code/spring_mass/spring_mass_solver.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. import numpy as np from sympy import Symbol, Eq import modulus from modulus.sym.hydra import ModulusConfig, instantiate_arch from modulus.sym.solver import Solver from modulus.sym.domain import Domain from modulus.sym.geometry.primitives_1d import Point1D from modulus.sym.geometry import Parameterization from modulus.sym.domain.constraint import ( PointwiseBoundaryConstraint, PointwiseBoundaryConstraint, ) from modulus.sym.domain.validator import PointwiseValidator from modulus.sym.key import Key from modulus.sym.node import Node from spring_mass_ode import SpringMass @modulus.sym.main(config_path="conf", config_name="config") def run(cfg: ModulusConfig) -> None: # make list of nodes to unroll graph on sm = SpringMass(k=(2, 1, 1, 2), m=(1, 1, 1)) sm_net = instantiate_arch( input_keys=[Key("t")], output_keys=[Key("x1"), Key("x2"), Key("x3")], cfg=cfg.arch.fully_connected, ) nodes = sm.make_nodes() + [ sm_net.make_node(name="spring_mass_network", jit=cfg.jit) ] # add constraints to solver # make geometry geo = Point1D(0) t_max = 10.0 t_symbol = Symbol("t") x = Symbol("x") time_range = {t_symbol: (0, t_max)} # make domain domain = Domain() # initial conditions IC = PointwiseBoundaryConstraint( nodes=nodes, geometry=geo, outvar={"x1": 1.0, "x2": 0, "x3": 0, "x1__t": 0, "x2__t": 0, "x3__t": 0}, batch_size=cfg.batch_size.IC, lambda_weighting={ "x1": 1.0, "x2": 1.0, "x3": 1.0, "x1__t": 1.0, "x2__t": 1.0, "x3__t": 1.0, }, parameterization=Parameterization({t_symbol: 0}), ) domain.add_constraint(IC, name="IC") # solve over given time period interior = PointwiseBoundaryConstraint( nodes=nodes, geometry=geo, outvar={"ode_x1": 0.0, "ode_x2": 0.0, "ode_x3": 0.0}, batch_size=cfg.batch_size.interior, parameterization=Parameterization(time_range), ) domain.add_constraint(interior, "interior") # add validation data deltaT = 0.001 t = np.arange(0, t_max, deltaT) t = np.expand_dims(t, axis=-1) invar_numpy = {"t": t} outvar_numpy = { "x1": (1 / 6) * np.cos(t) + (1 / 2) * np.cos(np.sqrt(3) * t) + (1 / 3) * np.cos(2 * t), "x2": (2 / 6) * np.cos(t) + (0 / 2) * np.cos(np.sqrt(3) * t) - (1 / 3) * np.cos(2 * t), "x3": (1 / 6) * np.cos(t) - (1 / 2) * np.cos(np.sqrt(3) * t) + (1 / 3) * np.cos(2 * t), } validator = PointwiseValidator( nodes=nodes, invar=invar_numpy, true_outvar=outvar_numpy, batch_size=1024 ) domain.add_validator(validator) # make solver slv = Solver(cfg, domain) # start solver slv.solve() if __name__ == "__main__": run()
4,033
Python
31.532258
81
0.631044
openhackathons-org/End-to-End-AI-for-Science/workspace/python/source_code/spring_mass/spring_mass_ode.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. from sympy import Symbol, Function, Number from modulus.sym.eq.pde import PDE class SpringMass(PDE): name = "SpringMass" def __init__(self, k=(2, 1, 1, 2), m=(1, 1, 1)): self.k = k self.m = m k1 = k[0] k2 = k[1] k3 = k[2] k4 = k[3] m1 = m[0] m2 = m[1] m3 = m[2] t = Symbol("t") input_variables = {"t": t} x1 = Function("x1")(*input_variables) x2 = Function("x2")(*input_variables) x3 = Function("x3")(*input_variables) if type(k1) is str: k1 = Function(k1)(*input_variables) elif type(k1) in [float, int]: k1 = Number(k1) if type(k2) is str: k2 = Function(k2)(*input_variables) elif type(k2) in [float, int]: k2 = Number(k2) if type(k3) is str: k3 = Function(k3)(*input_variables) elif type(k3) in [float, int]: k3 = Number(k3) if type(k4) is str: k4 = Function(k4)(*input_variables) elif type(k4) in [float, int]: k4 = Number(k4) if type(m1) is str: m1 = Function(m1)(*input_variables) elif type(m1) in [float, int]: m1 = Number(m1) if type(m2) is str: m2 = Function(m2)(*input_variables) elif type(m2) in [float, int]: m2 = Number(m2) if type(m3) is str: m3 = Function(m3)(*input_variables) elif type(m3) in [float, int]: m3 = Number(m3) self.equations = {} self.equations["ode_x1"] = m1 * (x1.diff(t)).diff(t) + k1 * x1 - k2 * (x2 - x1) self.equations["ode_x2"] = ( m2 * (x2.diff(t)).diff(t) + k2 * (x2 - x1) - k3 * (x3 - x2) ) self.equations["ode_x3"] = m3 * (x3.diff(t)).diff(t) + k3 * (x3 - x2) + k4 * x3
2,999
Python
34.294117
87
0.585195
openhackathons-org/End-to-End-AI-for-Science/workspace/python/source_code/spring_mass/spring_mass_inverse.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. import torch import numpy as np from sympy import Symbol, Eq import modulus from modulus.sym.hydra import ModulusConfig, instantiate_arch from modulus.sym.solver import Solver from modulus.sym.domain import Domain from modulus.sym.geometry.primitives_1d import Point1D from modulus.sym.geometry import Parameterization from modulus.sym.domain.constraint import ( PointwiseBoundaryConstraint, PointwiseConstraint, ) from modulus.sym.domain.validator import PointwiseValidator from modulus.sym.domain.monitor import PointwiseMonitor from modulus.sym.key import Key from modulus.sym.node import Node from spring_mass_ode import SpringMass @modulus.sym.main(config_path="conf", config_name="config_inverse") def run(cfg: ModulusConfig) -> None: # prepare data t_max = 10.0 deltaT = 0.01 t = np.arange(0, t_max, deltaT) t = np.expand_dims(t, axis=-1) invar_numpy = {"t": t} outvar_numpy = { "x1": (1 / 6) * np.cos(t) + (1 / 2) * np.cos(np.sqrt(3) * t) + (1 / 3) * np.cos(2 * t), "x2": (2 / 6) * np.cos(t) + (0 / 2) * np.cos(np.sqrt(3) * t) - (1 / 3) * np.cos(2 * t), "x3": (1 / 6) * np.cos(t) - (1 / 2) * np.cos(np.sqrt(3) * t) + (1 / 3) * np.cos(2 * t), } outvar_numpy.update({"ode_x1": np.full_like(invar_numpy["t"], 0)}) outvar_numpy.update({"ode_x2": np.full_like(invar_numpy["t"], 0)}) outvar_numpy.update({"ode_x3": np.full_like(invar_numpy["t"], 0)}) # make list of nodes to unroll graph on sm = SpringMass(k=(2, 1, 1, "k4"), m=("m1", 1, 1)) sm_net = instantiate_arch( input_keys=[Key("t")], output_keys=[Key("x1"), Key("x2"), Key("x3")], cfg=cfg.arch.fully_connected, ) invert_net = instantiate_arch( input_keys=[Key("t")], output_keys=[Key("m1"), Key("k4")], cfg=cfg.arch.fully_connected, ) nodes = ( sm.make_nodes( detach_names=[ "x1", "x1__t", "x1__t__t", "x2", "x2__t", "x2__t__t", "x3", "x3__t", "x3__t__t", ] ) + [sm_net.make_node(name="spring_mass_network", jit=cfg.jit)] + [invert_net.make_node(name="invert_network", jit=cfg.jit)] ) # add constraints to solver # make geometry geo = Point1D(0) t_symbol = Symbol("t") x = Symbol("x") time_range = {t_symbol: (0, t_max)} # make domain domain = Domain() # initial conditions IC = PointwiseBoundaryConstraint( nodes=nodes, geometry=geo, outvar={"x1": 1.0, "x2": 0, "x3": 0, "x1__t": 0, "x2__t": 0, "x3__t": 0}, batch_size=cfg.batch_size.IC, lambda_weighting={ "x1": 1.0, "x2": 1.0, "x3": 1.0, "x1__t": 1.0, "x2__t": 1.0, "x3__t": 1.0, }, parameterization=Parameterization({t_symbol: 0}), ) domain.add_constraint(IC, name="IC") # data and pdes data = PointwiseConstraint.from_numpy( nodes=nodes, invar=invar_numpy, outvar=outvar_numpy, batch_size=cfg.batch_size.data, ) domain.add_constraint(data, name="Data") # add monitors monitor = PointwiseMonitor( invar_numpy, output_names=["m1"], metrics={"mean_m1": lambda var: torch.mean(var["m1"])}, nodes=nodes, ) domain.add_monitor(monitor) monitor = PointwiseMonitor( invar_numpy, output_names=["k4"], metrics={"mean_k4": lambda var: torch.mean(var["k4"])}, nodes=nodes, ) domain.add_monitor(monitor) # make solver slv = Solver(cfg, domain) # start solver slv.solve() if __name__ == "__main__": run()
4,988
Python
29.796296
81
0.591419
openhackathons-org/End-to-End-AI-for-Science/workspace/python/source_code/spring_mass/conf/config_inverse.yaml
defaults : - modulus_default - arch: - fully_connected - scheduler: tf_exponential_lr - optimizer: adam - loss: sum - _self_ arch: fully_connected: layer_size: 256 save_filetypes : "vtk,npz" scheduler: decay_rate: 0.95 decay_steps: 100 training: rec_results_freq: 1000 max_steps : 10000 batch_size: IC: 10 data: 1000
364
YAML
12.518518
32
0.634615
openhackathons-org/End-to-End-AI-for-Science/workspace/python/source_code/projectile/projectile.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. import numpy as np from sympy import Symbol, sin, cos, pi, Eq import torch import modulus from modulus.sym.hydra import instantiate_arch, ModulusConfig from modulus.sym.solver import Solver from modulus.sym.domain import Domain from modulus.sym.geometry.primitives_1d import Line1D,Point1D from modulus.sym.geometry.primitives_2d import Rectangle from modulus.sym.domain.constraint import ( PointwiseBoundaryConstraint, PointwiseInteriorConstraint, ) from modulus.sym.domain.inferencer import PointwiseInferencer from modulus.sym.domain.validator import PointwiseValidator from modulus.sym.key import Key from modulus.sym.node import Node from projectile_eqn import ProjectileEquation from modulus.sym.utils.io import ( csv_to_dict, ValidatorPlotter, InferencerPlotter, ) @modulus.sym.main(config_path="conf", config_name="config") def run(cfg: ModulusConfig) -> None: #Creating Nodes and Domain pe = ProjectileEquation() projectile_net = instantiate_arch( input_keys=[Key("t")], output_keys=[Key("x"),Key("y")], cfg=cfg.arch.fully_connected, ) nodes = pe.make_nodes() + [projectile_net.make_node(name="projectile_network")] x, y, t = Symbol("x"), Symbol("y"), Symbol("t") #Creating Geometry and adding constraint geo = Point1D(0) #make domain projectile_domain = Domain() #add constraint to solver v_o = 40.0 theta = np.pi/3 time_range = {t :(0.0,5.0)} #initial condition # Set boundary to be only left boundary IC = PointwiseBoundaryConstraint( nodes = nodes, geometry = geo, outvar = {"x": 0.0,"y":0.0, "x__t":v_o*cos(theta), "y__t":v_o*sin(theta)}, batch_size = cfg.batch_size.initial_x, parameterization = {t:0.0} ) projectile_domain.add_constraint(IC,"IC") #interior interior = PointwiseBoundaryConstraint( nodes = nodes, geometry = geo, outvar = {"ode_x":0.0,"ode_y":-9.81}, batch_size = cfg.batch_size.interior, parameterization = time_range, ) projectile_domain.add_constraint(interior,"interior") # Setup validator delta_T = 0.01 t_val = np.arange(0.,5.,delta_T) T_val = np.expand_dims(t_val.flatten(), axis = -1) X_val = v_o*np.cos(theta)*T_val Y_val = v_o*np.sin(theta)*T_val - 0.5*9.81*(T_val**2) invar_numpy = {"t": T_val} outvar_numpy = {"x":X_val, "y": Y_val} validator = PointwiseValidator( nodes=nodes, invar=invar_numpy, true_outvar=outvar_numpy, batch_size=128, plotter = ValidatorPlotter(), ) projectile_domain.add_validator(validator) # Setup Inferencer t_infe = np.arange(0,8,0.001) T_infe = np.expand_dims(t_infe.flatten(), axis = -1) invar_infe = {"t":T_infe} grid_inference = PointwiseInferencer( nodes=nodes, invar=invar_infe, output_names=["x","y"], batch_size=128, plotter=InferencerPlotter(), ) projectile_domain.add_inferencer(grid_inference, "inferencer_data") #make solver slv = Solver(cfg, projectile_domain) #start solve slv.solve() if __name__ == "__main__": run()
4,482
Python
25.370588
86
0.657073
openhackathons-org/End-to-End-AI-for-Science/workspace/python/source_code/projectile/conf/config.yaml
defaults : - modulus_default - arch: - fully_connected - scheduler: tf_exponential_lr - optimizer: adam - loss: sum - _self_ save_filetypes : "vtk,npz" scheduler: decay_rate: 0.95 decay_steps: 100 training: rec_validation_freq: 1000 rec_inference_freq: 2000 rec_monitor_freq: 1000 rec_constraint_freq: 2000 max_steps : 5000 batch_size: initial_x: 100 interior: 1000 graph: func_arch: true cuda_graphs: True cuda_graph_warmup: 20
479
YAML
14
32
0.670146
openhackathons-org/End-to-End-AI-for-Science/workspace/python/source_code/fourcastnet/fourcastnet.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # Script to train Fourcastnet on ERA5 # Ref: https://arxiv.org/abs/2202.11214 import modulus from modulus.sym.hydra.config import ModulusConfig from modulus.sym.key import Key from modulus.sym.domain import Domain from modulus.sym.domain.constraint import SupervisedGridConstraint from modulus.sym.domain.validator import GridValidator from modulus.sym.solver import Solver from modulus.sym.utils.io import GridValidatorPlotter from src.dataset import ERA5HDF5GridDataset from src.fourcastnet import FourcastNetArch from src.loss import LpLoss @modulus.sym.main(config_path="conf", config_name="config_FCN") def run(cfg: ModulusConfig) -> None: # load training/ test data channels = list(range(cfg.custom.n_channels)) train_dataset = ERA5HDF5GridDataset( cfg.custom.training_data_path, chans=channels, tstep=cfg.custom.tstep, n_tsteps=cfg.custom.n_tsteps, patch_size=cfg.arch.afno.patch_size, ) test_dataset = ERA5HDF5GridDataset( cfg.custom.test_data_path, chans=channels, tstep=cfg.custom.tstep, n_tsteps=cfg.custom.n_tsteps, patch_size=cfg.arch.afno.patch_size, n_samples_per_year=20, ) # define input/output keys input_keys = [Key(k, size=train_dataset.nchans) for k in train_dataset.invar_keys] output_keys = [Key(k, size=train_dataset.nchans) for k in train_dataset.outvar_keys] # make list of nodes to unroll graph on model = FourcastNetArch( input_keys=input_keys, output_keys=output_keys, img_shape=test_dataset.img_shape, patch_size=cfg.arch.afno.patch_size, embed_dim=cfg.arch.afno.embed_dim, depth=cfg.arch.afno.depth, num_blocks=cfg.arch.afno.num_blocks, ) nodes = [model.make_node(name="FCN")] # make domain domain = Domain() # add constraints to domain supervised = SupervisedGridConstraint( nodes=nodes, dataset=train_dataset, batch_size=cfg.batch_size.grid, loss=LpLoss(), num_workers=cfg.custom.num_workers.grid, ) domain.add_constraint(supervised, "supervised") # add validator val = GridValidator( nodes, dataset=test_dataset, batch_size=cfg.batch_size.validation, plotter=GridValidatorPlotter(n_examples=5), num_workers=cfg.custom.num_workers.validation, ) domain.add_validator(val, "test") # make solver slv = Solver(cfg, domain) # start solver slv.solve() if __name__ == "__main__": run()
3,688
Python
33.157407
88
0.706345
openhackathons-org/End-to-End-AI-for-Science/workspace/python/source_code/fourcastnet/inferencer.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #"Script to carry out Fourcastnet inference" import omegaconf import torch import logging import numpy as np from torch.utils.data import DataLoader, Sampler from modulus.sym.hydra import to_absolute_path from modulus.sym.key import Key from modulus.sym.distributed.manager import DistributedManager from src.dataset import ERA5HDF5GridDataset from src.fourcastnet import FourcastNetArch from src.metrics import Metrics logging.basicConfig(format="[%(levelname)s] - %(message)s", level=logging.INFO) var_key_dict = { 0: "u10", 1: "v10", 2: "t2m", 3: "sp", 4: "msl", 5: "t850", 6: "u1000", 7: "v1000", 8: "z1000", 9: "u850", 10: "v850", 11: "z850", 12: "u500", 13: "v500", 14: "z500", 15: "t500", 16: "z50", 17: "r500", 18: "r850", 19: "tcwv", } def to_device(tensor_dict): return { key: torch.as_tensor(value, dtype=torch.float32, device=device) for key, value in tensor_dict.items() } class SubsetSequentialBatchSampler(Sampler): """Custom subset sequential batch sampler for inferencer""" def __init__(self, subset): self.subset = subset def __iter__(self): for i in self.subset: yield [i] # batch size of 1 def __len__(self): return len(self.subset) # load configuration cfg = omegaconf.OmegaConf.load("conf/config_FCN.yaml") model_path = to_absolute_path("fcn_era5.pth") # get device device = DistributedManager().device # load test data test_dataset = ERA5HDF5GridDataset( cfg.custom.test_data_path, # Test data location e.g. /era5/20var/test chans=list(range(cfg.custom.n_channels)), tstep=cfg.custom.tstep, n_tsteps=1, # set to one for inference patch_size=cfg.arch.afno.patch_size, ) m = Metrics( test_dataset.img_shape, clim_mean_path="/data/stats/time_means.npy", # Path to climate mean device=device ) # define input/output keys input_keys = [Key(k, size=test_dataset.nchans) for k in test_dataset.invar_keys] output_keys = [Key(k, size=test_dataset.nchans) for k in test_dataset.outvar_keys] # create model model = FourcastNetArch( input_keys=input_keys, output_keys=output_keys, img_shape=test_dataset.img_shape, patch_size=cfg.arch.afno.patch_size, embed_dim=cfg.arch.afno.embed_dim, depth=cfg.arch.afno.depth, num_blocks=cfg.arch.afno.num_blocks, ) # load parameters model.load_state_dict(torch.load(model_path)) model.to(device) logging.info(f"Loaded model {model_path}") # define subsets of dataset to run inference nics = 180 # Number of 2 day correl time samples nsteps = 25 last = len(test_dataset) - 1 - nsteps * cfg.custom.tstep # Variable dictionary acc_recursive = {key: [] for key in var_key_dict.values()} rmse_recursive = {key: [] for key in var_key_dict.values()} # Normalization stats mu = torch.tensor(test_dataset.mu[0]).to(device) # shape [C, 1, 1] sd = torch.tensor(test_dataset.sd[0]).to(device) # shape [C, 1, 1] # run inference with torch.no_grad(): for ic in range(0, min([8 * nics + 1, last])): subset = cfg.custom.tstep * np.arange(nsteps) + ic if (ic + 1) % 8 == 0 or (ic + 1) % 36 == 0 or ic == 0: logging.info(f"Running IC at step {ic}") # get dataloader dataloader = DataLoader( dataset=test_dataset, batch_sampler=SubsetSequentialBatchSampler(subset), pin_memory=True, num_workers=1, worker_init_fn=test_dataset.worker_init_fn, ) acc_error = torch.zeros(nsteps, test_dataset.nchans) rmse_error = torch.zeros(nsteps, test_dataset.nchans) for tstep, (invar, true_outvar, _) in enumerate(dataloader): if tstep % 10 == 0: logging.info(f"ic: {ic} tstep: {tstep}/{nsteps}") # place tensors on device invar = to_device(invar) true_outvar = to_device(true_outvar) # 1. single step inference pred_outvar_single = model(invar) pred_single = sd * pred_outvar_single["x_t1"][0] # 2. recursive inference if tstep == 0: pred_outvar_recursive = model(invar) else: pred_outvar_recursive = model( {"x_t0": pred_outvar_recursive["x_t1"]} ) # get unormalised target / prediction true = sd * true_outvar["x_t1"][0] pred_recursive = sd * pred_outvar_recursive["x_t1"][0] # Calc metrics rmse_error[tstep] = m.weighted_rmse(pred_recursive, true).detach().cpu() acc_error[tstep] = m.weighted_acc(pred_recursive, true).detach().cpu() # Save fields into dictionary if (ic + 1) % 8 == 0 or (ic + 1) % 36 == 0 or ic == 0: for i, fld in var_key_dict.items(): # Fields with 9 day (36) dc time if fld == "z500" or fld == "t2m" or fld == "t850": if (ic + 1) % 36 == 0 or ic == 0: acc_recursive[fld].append(acc_error[:, i].numpy()) rmse_recursive[fld].append(rmse_error[:, i].numpy()) # Rest have regular 2 day (8) dc time else: if (ic + 1) % 8 == 0 or ic == 0: acc_recursive[fld].append(acc_error[:, i].numpy()) rmse_recursive[fld].append(rmse_error[:, i].numpy()) # Field stacking for var_dict in [acc_recursive, rmse_recursive]: for key, value in var_dict.items(): print(f"{len(value)} samples for field {key}") var_dict[key] = np.stack(value, axis=0) np.save("rmse_recursive", rmse_recursive) np.save("acc_recursive", acc_recursive)
7,069
Python
33.827586
88
0.610553
openhackathons-org/End-to-End-AI-for-Science/workspace/python/source_code/fourcastnet/src/fourcastnet.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """Defines the FCN architecture""" import logging import torch from torch import Tensor from typing import List, Tuple, Dict from modulus.sym.models.afno.afno import AFNONet from modulus.sym.models.arch import Arch from modulus.sym.key import Key class FourcastNetArch(Arch): "Defines the FourcastNet architecture" def __init__( self, input_keys: List[Key], output_keys: List[Key], img_shape: Tuple[int, int], detach_keys: List[Key] = [], patch_size: int = 16, embed_dim: int = 256, depth: int = 4, num_blocks: int = 4, ) -> None: """Fourcastnet model. This is a simple wrapper for Modulus' AFNO model. The only difference is that FourcastNet needs multi-step training. This class allows the model to auto-regressively predict multiple timesteps Parameters (Same as AFNO) ---------- input_keys : List[Key] Input key list. The key dimension size should equal the variables channel dim. output_keys : List[Key] Output key list. The key dimension size should equal the variables channel dim. img_shape : Tuple[int, int] Input image dimensions (height, width) detach_keys : List[Key], optional List of keys to detach gradients, by default [] patch_size : int, optional Size of image patchs, by default 16 embed_dim : int, optional Embedded channel size, by default 256 depth : int, optional Number of AFNO layers, by default 4 num_blocks : int, optional Number of blocks in the frequency weight matrices, by default 4 """ super().__init__( input_keys=input_keys, output_keys=output_keys, detach_keys=detach_keys, ) # get number of timesteps steps to unroll assert ( len(self.input_keys) == 1 ), "Error, FourcastNet only accepts one input variable (x_t0)" self.n_tsteps = len(self.output_keys) logging.info(f"Unrolling FourcastNet over {self.n_tsteps} timesteps") # get number of input/output channels in_channels = self.input_keys[0].size out_channels = self.output_keys[0].size # intialise AFNO kernel self._impl = AFNONet( in_channels=in_channels, out_channels=out_channels, patch_size=(patch_size, patch_size), img_size=img_shape, embed_dim=embed_dim, depth=depth, num_blocks=num_blocks, ) def forward(self, in_vars: Dict[str, Tensor]) -> Dict[str, Tensor]: # prepare input tensor x = self.prepare_input( input_variables=in_vars, mask=self.input_key_dict.keys(), detach_dict=self.detach_key_dict, dim=1, input_scales=self.input_scales, ) # unroll model over multiple timesteps ys = [] for t in range(self.n_tsteps): x = self._impl(x) ys.append(x) y = torch.cat(ys, dim=1) # prepare output dict return self.prepare_output( output_tensor=y, output_var=self.output_key_dict, dim=1, output_scales=self.output_scales, )
4,496
Python
35.560975
91
0.630338
openhackathons-org/End-to-End-AI-for-Science/workspace/python/source_code/fourcastnet/src/metrics.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. import torch import numpy as np from typing import Tuple class Metrics: """Class used for computing performance related metrics. Expects predictions / targets to be of shape [C, H, W] where H is latitude dimension and W is longitude dimension. Metrics are computed for each channel separately. Parameters ---------- img_shape : Tuple[int] Shape of input image (resolution for fourcastnet) clim_mean_path : str, optional Path to total climate mean data, needed for ACC. By default "/era5/stats/time_means.npy" device : torch.device, optional Pytorch device model is on, by default 'cpu' """ def __init__( self, img_shape: Tuple[int], clim_mean_path: str = "/era5/stats/time_means.npy", device: torch.device = "cpu", ): self.img_shape = tuple(img_shape) self.device = device # Load climate mean value self.clim_mean = torch.as_tensor(np.load(clim_mean_path)) # compute latitude weighting nlat = img_shape[0] lat = torch.linspace(90, -90, nlat) lat_weight = torch.cos(torch.pi * (lat / 180)) lat_weight = nlat * lat_weight / lat_weight.sum() self.lat_weight = lat_weight.view(1, nlat, 1) # place on device if self.device is not None: self.lat_weight = self.lat_weight.to(self.device) self.clim_mean = self.clim_mean.to(self.device) def _check_shape(self, *args): # checks for shape [C, H, W] for x in args: assert x.ndim == 3 assert tuple(x.shape[1:]) == self.img_shape def weighted_acc(self, pred: torch.Tensor, target: torch.Tensor) -> torch.Tensor: """Computes the anomaly correlation coefficient (ACC). The ACC calculation is weighted based on the latitude. Parameters ---------- pred : torch.Tensor [C, H, W] Predicted tensor target : torch.Tensor [C, H, W] Target tensor Returns ------- torch.Tensor [C] ACC values for each channel """ self._check_shape(pred, target) # subtract climate means (n_chans, img_x, img_y) = pred.shape clim_mean = self.clim_mean[0, 0:n_chans, 0:img_x] pred_hat = pred - clim_mean target_hat = target - clim_mean # Weighted mean pred_bar = torch.sum( self.lat_weight * pred_hat, dim=(1, 2), keepdim=True ) / torch.sum( self.lat_weight * torch.ones_like(pred_hat), dim=(1, 2), keepdim=True ) target_bar = torch.sum( self.lat_weight * target_hat, dim=(1, 2), keepdim=True ) / torch.sum( self.lat_weight * torch.ones_like(target_hat), dim=(1, 2), keepdim=True ) pred_diff = pred_hat - pred_bar target_diff = target_hat - target_bar # compute weighted acc # Ref: https://www.atmos.albany.edu/daes/atmclasses/atm401/spring_2016/ppts_pdfs/ECMWF_ACC_definition.pdf p1 = torch.sum(self.lat_weight * pred_diff * target_diff, dim=(1, 2)) p2 = torch.sum(self.lat_weight * pred_diff * pred_diff, dim=(1, 2)) p3 = torch.sum(self.lat_weight * target_diff * target_diff, dim=(1, 2)) m = p1 / torch.sqrt(p2 * p3) return m def weighted_rmse(self, pred: torch.Tensor, target: torch.Tensor) -> torch.Tensor: """Computes RMSE weighted based on latitude Parameters ---------- pred : torch.Tensor [C, H, W] Predicted tensor target : torch.Tensor [C, H, W] Target tensor Returns ------- torch.Tensor [C] Weighted RSME values for each channel """ self._check_shape(pred, target) # compute weighted rmse m = torch.sqrt(torch.mean(self.lat_weight * (pred - target) ** 2, dim=(1, 2))) return m
5,098
Python
34.657342
113
0.616712
openhackathons-org/End-to-End-AI-for-Science/workspace/python/source_code/fourcastnet/src/dataset.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. import h5py import logging import numpy as np from typing import List from pathlib import Path from modulus.sym.hydra import to_absolute_path from modulus.sym.dataset import Dataset class ERA5HDF5GridDataset(Dataset): """Lazy-loading ERA5 dataset. Parameters ---------- data_dir : str Directory where ERA5 data is stored chans : List[int] Defines which ERA5 variables to load tstep : int Defines the size of the timestep between the input and output variables n_tsteps : int, optional Defines how many timesteps are included in the output variables Default is 1 patch_size : int, optional If specified, crops input and output variables so image dimensions are divisible by patch_size Default is None n_samples_per_year : int, optional If specified, randomly selects n_samples_per_year samples from each year rather than all of the samples per year Default is None stats_dir : str, optional Directory to test data statistic numpy files that have the global mean and variance """ def __init__( self, data_dir: str, chans: List[int], tstep: int = 1, n_tsteps: int = 1, patch_size: int = None, n_samples_per_year: int = None, stats_dir: str = None, ): self.data_dir = Path(to_absolute_path(data_dir)) print(self.data_dir) self.chans = chans self.nchans = len(self.chans) self.tstep = tstep self.n_tsteps = n_tsteps self.patch_size = patch_size self.n_samples_per_year = n_samples_per_year if stats_dir is None: self.stats_dir = self.data_dir.parent / "stats" # check root directory exists assert ( self.data_dir.is_dir() ), f"Error, data directory {self.data_dir} does not exist" assert ( self.stats_dir.is_dir() ), f"Error, stats directory {self.stats_dir} does not exist" # get all input data files self.data_paths = sorted(self.data_dir.glob("??????.h5")) for data_path in self.data_paths: logging.info(f"ERA5 file found: {data_path}") self.n_years = len(self.data_paths) logging.info(f"Number of months: {self.n_years}") # get total number of examples and image shape from the first file, # assuming other files have exactly the same format. logging.info(f"Getting file stats from {self.data_paths[0]}") with h5py.File(self.data_paths[0], "r") as f: self.n_samples_per_year_all = f["fields"].shape[0] self.img_shape = f["fields"].shape[2:] logging.info(f"Number of channels available: {f['fields'].shape[1]}") # get example indices to use if self.n_samples_per_year is None: self.n_samples_per_year = self.n_samples_per_year_all self.samples = [ np.arange(self.n_samples_per_year) for _ in range(self.n_years) ] else: if self.n_samples_per_year > self.n_samples_per_year_all: raise ValueError( f"n_samples_per_year ({self.n_samples_per_year}) > number of samples available ({self.n_samples_per_year_all})!" ) self.samples = [ np.random.choice( np.arange(self.n_samples_per_year_all), self.n_samples_per_year, replace=False, ) for _ in range(self.n_years) ] logging.info(f"Number of samples/month: {self.n_samples_per_year}") # get total length self.length = self.n_years * self.n_samples_per_year # adjust image shape if patch_size defined if self.patch_size is not None: self.img_shape = [s - s % self.patch_size for s in self.img_shape] logging.info(f"Input image shape: {self.img_shape}") # load normalisation values # has shape [1, C, 1, 1] self.mu = np.load(self.stats_dir / "global_means.npy")[:, self.chans] # has shape [1, C, 1, 1] self.sd = np.load(self.stats_dir / "global_stds.npy")[:, self.chans] assert ( self.mu.shape == self.sd.shape == (1, self.nchans, 1, 1) ), "Error, normalisation arrays have wrong shape" def worker_init_fn(self, iworker): super().worker_init_fn(iworker) # open all year files at once on worker thread self.data_files = [h5py.File(path, "r") for path in self.data_paths] @property def invar_keys(self): return ["x_t0"] @property def outvar_keys(self): return [f"x_t{(i+1)*self.tstep}" for i in range(self.n_tsteps)] def __getitem__(self, idx): # get local indices from global index year_idx = int(idx / self.n_samples_per_year) local_idx = int(idx % self.n_samples_per_year) in_idx = self.samples[year_idx][local_idx] # get output indices out_idxs = [] for i in range(self.n_tsteps): out_idx = in_idx + (i + 1) * self.tstep # if at end of dataset, just learn identity instead if out_idx > (self.n_samples_per_year_all - 1): out_idx = in_idx out_idxs.append(out_idx) # get data xs = [] for idx in [in_idx] + out_idxs: # get array # has shape [C, H, W] x = self.data_files[year_idx]["fields"][idx, self.chans] assert x.ndim == 3, f"Expected 3 dimensions, but got {x.shape}" # apply input / output normalisation (broadcasted operation) x = (x - self.mu[0]) / self.sd[0] # crop data if needed if self.patch_size is not None: x = x[..., : self.img_shape[0], : self.img_shape[1]] xs.append(x) # convert to tensor dicts invar = {"x_t0": xs[0]} outvar = {f"x_t{(i+1)*self.tstep}": x for i, x in enumerate(xs[1:])} invar = Dataset._to_tensor_dict(invar) outvar = Dataset._to_tensor_dict(outvar) # TODO: get rid to lambda weighting lambda_weighting = Dataset._to_tensor_dict( {k: np.ones_like(v) for k, v in outvar.items()} ) # lambda_weighting = Dataset._to_tensor_dict( # {k: np.array([1]) for k, v in outvar.items()} # ) return invar, outvar, lambda_weighting def __len__(self): return self.length
7,719
Python
36.294686
132
0.598523
openhackathons-org/End-to-End-AI-for-Science/workspace/python/source_code/fourcastnet/src/loss.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. import torch from typing import Dict Tensor = torch.Tensor class LpLoss(torch.nn.Module): def __init__( self, d: float = 2.0, p: float = 2.0, ): """Relative Lp loss normalized seperately in the batch dimension. Expects inputs of the shape [B, C, ...] Parameters ---------- p : float, optional Norm power, by default 2.0 """ super(LpLoss, self).__init__() # Dimension and Lp-norm type are postive assert p > 0.0 self.p = p def _rel(self, x: torch.Tensor, y: torch.Tensor) -> float: num_examples = x.size()[0] xv = x.reshape(num_examples, -1) yv = y.reshape(num_examples, -1) diff_norms = torch.linalg.norm(xv - yv, ord=self.p, dim=1) y_norms = torch.linalg.norm(yv, ord=self.p, dim=1) return torch.mean(diff_norms / y_norms) def forward( self, invar: Dict[str, Tensor], pred_outvar: Dict[str, Tensor], true_outvar: Dict[str, Tensor], lambda_weighting: Dict[str, Tensor], step: int, ) -> Dict[str, float]: losses = {} for key, value in pred_outvar.items(): losses[key] = self._rel(pred_outvar[key], true_outvar[key]) return losses
2,433
Python
33.28169
73
0.648993
openhackathons-org/End-to-End-AI-for-Science/workspace/python/source_code/fourcastnet/conf/config_FCN.yaml
defaults : - modulus_default - arch: - afno - scheduler: cosine_annealing - optimizer: adam - loss: sum - _self_ arch: afno: patch_size: 8 embed_dim: 512 depth: 10 num_blocks: 8 optimizer: lr: 0.0005 scheduler: T_max: 80000 custom: n_channels: 20 tstep: 1 n_tsteps: 1 training_data_path: "/workspace/python/source_code/fourcastnet/data/train" # Training dataset path here test_data_path: "/workspace/python/source_code/fourcastnet/data/test" # Test dataset path here num_workers: grid: 4 validation: 4 tag: batch_size: grid: 1 validation: 1 training: amp: true rec_constraint_freq: 10000 rec_results_freq : 1000 save_network_freq: 1000 print_stats_freq: 100 summary_freq: 1000 max_steps : 71000
787
YAML
15.765957
105
0.662008
openhackathons-org/End-to-End-AI-for-Science/workspace/python/source_code/diffusion_1d/plot_results.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. import numpy as np import matplotlib.pyplot as plt network_dir = "./outputs/diffusion_bar/validators/" data_1 = np.load(network_dir + "Val1.npz", allow_pickle=True) data_2 = np.load(network_dir + "Val2.npz", allow_pickle=True) data_1 = np.atleast_1d(data_1.f.arr_0)[0] data_2 = np.atleast_1d(data_2.f.arr_0)[0] plt.plot(data_1["x"][:, 0], data_1["pred_u_1"][:, 0], "--", label="u_1_pred") plt.plot(data_2["x"][:, 0], data_2["pred_u_2"][:, 0], "--", label="u_2_pred") plt.plot(data_1["x"][:, 0], data_1["true_u_1"][:, 0], label="u_1_true") plt.plot(data_2["x"][:, 0], data_2["true_u_2"][:, 0], label="u_2_true") plt.legend() plt.savefig("image_diffusion_problem_bootcamp")
1,801
Python
46.421051
77
0.716824
openhackathons-org/End-to-End-AI-for-Science/workspace/python/source_code/diffusion_1d/diffusion_bar.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. import torch import numpy as np from sympy import Symbol, Eq, Function, Number import modulus from modulus.sym.hydra import instantiate_arch , ModulusConfig from modulus.sym.solver import Solver from modulus.sym.domain import Domain from modulus.sym.geometry.primitives_1d import Line1D from modulus.sym.domain.constraint import ( PointwiseBoundaryConstraint, PointwiseInteriorConstraint, ) from modulus.sym.domain.validator import PointwiseValidator from modulus.sym.domain.monitor import PointwiseMonitor from modulus.sym.key import Key from modulus.sym.node import Node from modulus.sym.eq.pde import PDE # params for domain L1 = Line1D(0, 1) L2 = Line1D(1, 2) D1 = 1e1 D2 = 1e-1 Tc = 100 Ta = 0 Tb = (Tc + (D1 / D2) * Ta) / (1 + (D1 / D2)) print(Ta) print(Tb) print(Tc) class Diffusion(PDE): name = "Diffusion" def __init__(self, T="T", D="D", Q=0, dim=3, time=True): # set params self.T = T self.dim = dim self.time = time # coordinates x, y, z = Symbol("x"), Symbol("y"), Symbol("z") # time t = Symbol("t") # make input variables input_variables = {"x": x, "y": y, "z": z, "t": t} if self.dim == 1: input_variables.pop("y") input_variables.pop("z") elif self.dim == 2: input_variables.pop("z") if not self.time: input_variables.pop("t") # Temperature assert type(T) == str, "T needs to be string" T = Function(T)(*input_variables) # Diffusivity if type(D) is str: D = Function(D)(*input_variables) elif type(D) in [float, int]: D = Number(D) # Source if type(Q) is str: Q = Function(Q)(*input_variables) elif type(Q) in [float, int]: Q = Number(Q) # set equations self.equations = {} self.equations["diffusion_" + self.T] = ( T.diff(t) - (D * T.diff(x)).diff(x) - (D * T.diff(y)).diff(y) - (D * T.diff(z)).diff(z) - Q ) class DiffusionInterface(PDE): name = "DiffusionInterface" def __init__(self, T_1, T_2, D_1, D_2, dim=3, time=True): # set params self.T_1 = T_1 self.T_2 = T_2 self.dim = dim self.time = time # coordinates x, y, z = Symbol("x"), Symbol("y"), Symbol("z") normal_x, normal_y, normal_z = ( Symbol("normal_x"), Symbol("normal_y"), Symbol("normal_z"), ) # time t = Symbol("t") # make input variables input_variables = {"x": x, "y": y, "z": z, "t": t} if self.dim == 1: input_variables.pop("y") input_variables.pop("z") elif self.dim == 2: input_variables.pop("z") if not self.time: input_variables.pop("t") # Diffusivity if type(D_1) is str: D_1 = Function(D_1)(*input_variables) elif type(D_1) in [float, int]: D_1 = Number(D_1) if type(D_2) is str: D_2 = Function(D_2)(*input_variables) elif type(D_2) in [float, int]: D_2 = Number(D_2) # variables to match the boundary conditions (example Temperature) T_1 = Function(T_1)(*input_variables) T_2 = Function(T_2)(*input_variables) # set equations self.equations = {} self.equations["diffusion_interface_dirichlet_" + self.T_1 + "_" + self.T_2] = ( T_1 - T_2 ) flux_1 = D_1 * ( normal_x * T_1.diff(x) + normal_y * T_1.diff(y) + normal_z * T_1.diff(z) ) flux_2 = D_2 * ( normal_x * T_2.diff(x) + normal_y * T_2.diff(y) + normal_z * T_2.diff(z) ) self.equations["diffusion_interface_neumann_" + self.T_1 + "_" + self.T_2] = ( flux_1 - flux_2 ) @modulus.sym.main(config_path="conf", config_name="config") def run(cfg: ModulusConfig) -> None: # make list of nodes to unroll graph on diff_u1 = Diffusion(T="u_1", D=D1, dim=1, time=False) diff_u2 = Diffusion(T="u_2", D=D2, dim=1, time=False) diff_in = DiffusionInterface("u_1", "u_2", D1, D2, dim=1, time=False) diff_net_u_1 = instantiate_arch( input_keys=[Key("x")], output_keys=[Key("u_1")], cfg=cfg.arch.fully_connected, ) diff_net_u_2 = instantiate_arch( input_keys=[Key("x")], output_keys=[Key("u_2")], cfg=cfg.arch.fully_connected, ) nodes = ( diff_u1.make_nodes() + diff_u2.make_nodes() + diff_in.make_nodes() + [diff_net_u_1.make_node(name="u1_network", jit=cfg.jit)] + [diff_net_u_2.make_node(name="u2_network", jit=cfg.jit)] ) # make domain add constraints to the solver domain = Domain() # sympy variables x = Symbol("x") # right hand side (x = 2) Pt c rhs = PointwiseBoundaryConstraint( nodes=nodes, geometry=L2, outvar={"u_2": Tc}, batch_size=cfg.batch_size.rhs, criteria=Eq(x, 2), ) domain.add_constraint(rhs, "right_hand_side") # left hand side (x = 0) Pt a lhs = PointwiseBoundaryConstraint( nodes=nodes, geometry=L1, outvar={"u_1": Ta}, batch_size=cfg.batch_size.lhs, criteria=Eq(x, 0), ) domain.add_constraint(lhs, "left_hand_side") # interface 1-2 interface = PointwiseBoundaryConstraint( nodes=nodes, geometry=L1, outvar={ "diffusion_interface_dirichlet_u_1_u_2": 0, "diffusion_interface_neumann_u_1_u_2": 0, }, batch_size=cfg.batch_size.interface, criteria=Eq(x, 1), ) domain.add_constraint(interface, "interface") # interior 1 interior_u1 = PointwiseInteriorConstraint( nodes=nodes, geometry=L1, outvar={"diffusion_u_1": 0}, bounds={x: (0, 1)}, batch_size=cfg.batch_size.interior_u1, ) domain.add_constraint(interior_u1, "interior_u1") # interior 2 interior_u2 = PointwiseInteriorConstraint( nodes=nodes, geometry=L2, outvar={"diffusion_u_2": 0}, bounds={x: (1, 2)}, batch_size=cfg.batch_size.interior_u2, ) domain.add_constraint(interior_u2, "interior_u2") # validation data x = np.expand_dims(np.linspace(0, 1, 100), axis=-1) u_1 = x * Tb + (1 - x) * Ta invar_numpy = {"x": x} outvar_numpy = {"u_1": u_1} val = PointwiseValidator(nodes=nodes,invar=invar_numpy, true_outvar=outvar_numpy) domain.add_validator(val, name="Val1") # make validation data line 2 x = np.expand_dims(np.linspace(1, 2, 100), axis=-1) u_2 = (x - 1) * Tc + (2 - x) * Tb invar_numpy = {"x": x} outvar_numpy = {"u_2": u_2} val = PointwiseValidator(nodes=nodes, invar=invar_numpy, true_outvar=outvar_numpy) domain.add_validator(val, name="Val2") # make monitors invar_numpy = {"x": [[1.0]]} monitor = PointwiseMonitor( invar_numpy, output_names=["u_1__x"], metrics={"flux_u1": lambda var: torch.mean(var["u_1__x"])}, nodes=nodes, requires_grad=True, ) domain.add_monitor(monitor) monitor = PointwiseMonitor( invar_numpy, output_names=["u_2__x"], metrics={"flux_u2": lambda var: torch.mean(var["u_2__x"])}, nodes=nodes, requires_grad=True, ) domain.add_monitor(monitor) # make solver slv = Solver(cfg, domain) # start solver slv.solve() if __name__ == "__main__": run()
8,835
Python
28.065789
88
0.572835
openhackathons-org/End-to-End-AI-for-Science/workspace/python/source_code/diffusion_1d/conf/config.yaml
defaults : - modulus_default - arch: - fully_connected - scheduler: tf_exponential_lr - optimizer: adam - loss: sum - _self_ arch: fully_connected: layer_size: 256 save_filetypes : "vtk,npz" scheduler: decay_rate: 0.95 decay_steps: 100 optimizer: lr : 1e-4 training: rec_results_freq: 1000 max_steps : 5000 batch_size: rhs: 2 lhs: 2 interface: 2 interior_u1: 200 interior_u2: 200
437
YAML
12.272727
32
0.631579
openhackathons-org/End-to-End-AI-for-Science/workspace/python/source_code/darcy/darcy_FNO_lazy.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. import modulus from modulus.sym.hydra import to_absolute_path, instantiate_arch, ModulusConfig from modulus.sym.key import Key from modulus.sym.solver import Solver from modulus.sym.domain import Domain from modulus.sym.domain.constraint import SupervisedGridConstraint from modulus.sym.domain.validator import GridValidator from modulus.sym.dataset import HDF5GridDataset from modulus.sym.utils.io.plotter import GridValidatorPlotter from utilities import download_FNO_dataset @modulus.sym.main(config_path="conf", config_name="config_FNO") def run(cfg: ModulusConfig) -> None: # load training/ test data input_keys = [Key("coeff", scale=(7.48360e00, 4.49996e00))] output_keys = [Key("sol", scale=(5.74634e-03, 3.88433e-03))] download_FNO_dataset("Darcy_241", outdir="datasets/") train_path = to_absolute_path( "datasets/Darcy_241/piececonst_r241_N1024_smooth1.hdf5" ) test_path = to_absolute_path( "datasets/Darcy_241/piececonst_r241_N1024_smooth2.hdf5" ) # make datasets train_dataset = HDF5GridDataset( train_path, invar_keys=["coeff"], outvar_keys=["sol"], n_examples=1000 ) test_dataset = HDF5GridDataset( test_path, invar_keys=["coeff"], outvar_keys=["sol"], n_examples=100 ) # make list of nodes to unroll graph on decoder_net = instantiate_arch( cfg=cfg.arch.decoder, output_keys=output_keys, ) fno = instantiate_arch( cfg=cfg.arch.fno, input_keys=input_keys, decoder_net=decoder_net, ) nodes = [fno.make_node('fno')] # make domain domain = Domain() # add constraints to domain supervised = SupervisedGridConstraint( nodes=nodes, dataset=train_dataset, batch_size=cfg.batch_size.grid, num_workers=4, # number of parallel data loaders ) domain.add_constraint(supervised, "supervised") # add validator val = GridValidator( nodes, dataset=test_dataset, batch_size=cfg.batch_size.validation, plotter=GridValidatorPlotter(n_examples=5), ) domain.add_validator(val, "test") # make solver slv = Solver(cfg, domain) # start solver slv.solve() if __name__ == "__main__": run()
3,392
Python
32.264706
79
0.704009
openhackathons-org/End-to-End-AI-for-Science/workspace/python/source_code/darcy/utilities.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. import os import zipfile try: import gdown except: gdown = None import scipy.io import numpy as np import h5py from modulus.sym.hydra import to_absolute_path # list of FNO dataset url ids on drive: https://drive.google.com/drive/folders/1UnbQh2WWc6knEHbLn-ZaXrKUZhp7pjt- _FNO_datatsets_ids = { "Darcy_241": "1ViDqN7nc_VCnMackiXv_d7CHZANAFKzV", "Darcy_421": "1Z1uxG9R8AdAGJprG5STcphysjm56_0Jf", } _FNO_dataset_names = { "Darcy_241": ( "piececonst_r241_N1024_smooth1.hdf5", "piececonst_r241_N1024_smooth2.hdf5", ), "Darcy_421": ( "piececonst_r421_N1024_smooth1.hdf5", "piececonst_r421_N1024_smooth2.hdf5", ), } def load_FNO_dataset(path, input_keys, output_keys, n_examples=None): "Loads a FNO dataset" if not path.endswith(".hdf5"): raise Exception( ".hdf5 file required: please use utilities.preprocess_FNO_mat to convert .mat file" ) # load data path = to_absolute_path(path) data = h5py.File(path, "r") _ks = [k for k in data.keys() if not k.startswith("__")] print(f"loaded: {path}\navaliable keys: {_ks}") # parse data invar, outvar = dict(), dict() for d, keys in [(invar, input_keys), (outvar, output_keys)]: for k in keys: # get data x = data[k] # N, C, H, W # cut examples out if n_examples is not None: x = x[:n_examples] # print out normalisation values print(f"selected key: {k}, mean: {x.mean():.5e}, std: {x.std():.5e}") d[k] = x del data return (invar, outvar) def download_FNO_dataset(name, outdir="datasets/"): "Tries to download FNO dataset from drive" if name not in _FNO_datatsets_ids: raise Exception( f"Error: FNO dataset {name} not recognised, select one from {list(_FNO_datatsets_ids.keys())}" ) id = _FNO_datatsets_ids[name] outdir = to_absolute_path(outdir) + "/" namedir = f"{outdir}{name}/" # skip if already exists exists = True for file_name in _FNO_dataset_names[name]: if not os.path.isfile(namedir + file_name): exists = False break if exists: return print(f"FNO dataset {name} not detected, downloading dataset") # Make sure we have gdown installed if gdown is None: raise ModuleNotFoundError("gdown package is required to download the dataset!") # get output directory os.makedirs(namedir, exist_ok=True) # download dataset zippath = f"{outdir}{name}.zip" _download_file_from_google_drive(id, zippath) # unzip with zipfile.ZipFile(zippath, "r") as f: f.extractall(namedir) os.remove(zippath) # preprocess files for file in os.listdir(namedir): if file.endswith(".mat"): matpath = f"{namedir}{file}" preprocess_FNO_mat(matpath) os.remove(matpath) def _download_file_from_google_drive(id, path): "Downloads a file from google drive" # use gdown library to download file gdown.download(id=id, output=path) def preprocess_FNO_mat(path): "Convert a FNO .mat file to a hdf5 file, adding extra dimension to data arrays" assert path.endswith(".mat") data = scipy.io.loadmat(path) ks = [k for k in data.keys() if not k.startswith("__")] with h5py.File(path[:-4] + ".hdf5", "w") as f: for k in ks: x = np.expand_dims(data[k], axis=1) # N, C, H, W f.create_dataset( k, data=x, dtype="float32" ) # note h5 files larger than .mat because no compression used
4,794
Python
30.339869
112
0.646016
openhackathons-org/End-to-End-AI-for-Science/workspace/python/source_code/darcy/ops.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. import torch import torch.nn.functional as F def dx(inpt, dx, channel, dim, order=1, padding="zeros"): "Compute first order numerical derivatives of input tensor" var = inpt[:, channel : channel + 1, :, :] # get filter if order == 1: ddx1D = torch.Tensor( [ -0.5, 0.0, 0.5, ] ).to(inpt.device) elif order == 3: ddx1D = torch.Tensor( [ -1.0 / 60.0, 3.0 / 20.0, -3.0 / 4.0, 0.0, 3.0 / 4.0, -3.0 / 20.0, 1.0 / 60.0, ] ).to(inpt.device) ddx3D = torch.reshape(ddx1D, shape=[1, 1] + dim * [1] + [-1] + (1 - dim) * [1]) # apply convolution if padding == "zeros": var = F.pad(var, 4 * [(ddx1D.shape[0] - 1) // 2], "constant", 0) elif padding == "replication": var = F.pad(var, 4 * [(ddx1D.shape[0] - 1) // 2], "replicate") output = F.conv2d(var, ddx3D, padding="valid") output = (1.0 / dx) * output if dim == 0: output = output[:, :, :, (ddx1D.shape[0] - 1) // 2 : -(ddx1D.shape[0] - 1) // 2] elif dim == 1: output = output[:, :, (ddx1D.shape[0] - 1) // 2 : -(ddx1D.shape[0] - 1) // 2, :] return output def ddx(inpt, dx, channel, dim, order=1, padding="zeros"): "Compute second order numerical derivatives of input tensor" var = inpt[:, channel : channel + 1, :, :] # get filter if order == 1: ddx1D = torch.Tensor( [ 1.0, -2.0, 1.0, ] ).to(inpt.device) elif order == 3: ddx1D = torch.Tensor( [ 1.0 / 90.0, -3.0 / 20.0, 3.0 / 2.0, -49.0 / 18.0, 3.0 / 2.0, -3.0 / 20.0, 1.0 / 90.0, ] ).to(inpt.device) ddx3D = torch.reshape(ddx1D, shape=[1, 1] + dim * [1] + [-1] + (1 - dim) * [1]) # apply convolution if padding == "zeros": var = F.pad(var, 4 * [(ddx1D.shape[0] - 1) // 2], "constant", 0) elif padding == "replication": var = F.pad(var, 4 * [(ddx1D.shape[0] - 1) // 2], "replicate") output = F.conv2d(var, ddx3D, padding="valid") output = (1.0 / dx ** 2) * output if dim == 0: output = output[:, :, :, (ddx1D.shape[0] - 1) // 2 : -(ddx1D.shape[0] - 1) // 2] elif dim == 1: output = output[:, :, (ddx1D.shape[0] - 1) // 2 : -(ddx1D.shape[0] - 1) // 2, :] return output
3,754
Python
33.136363
88
0.531167
openhackathons-org/End-to-End-AI-for-Science/workspace/python/source_code/darcy/conf/config_FNO-Backup.yaml
defaults : - modulus_default - arch: - fno - scheduler: tf_exponential_lr - optimizer: adam - loss: sum - _self_ jit: false arch: fno: dimension: 2 nr_fno_layers: 4 fno_layer_size: 32 fno_modes: 12 padding: 9 output_fc_layer_sizes: - 128 scheduler: decay_rate: 0.95 decay_steps: 1000 training: rec_results_freq : 1000 max_steps : 10000 batch_size: grid: 32 validation: 32
443
YAML
12.875
32
0.604966
openhackathons-org/End-to-End-AI-for-Science/workspace/python/source_code/darcy/conf/config_AFNO.yaml
defaults : - modulus_default - arch: - afno - scheduler: tf_exponential_lr - optimizer: adam - loss: sum - _self_ arch: afno: patch_size: 16 embed_dim: 256 depth: 4 num_blocks: 8 scheduler: decay_rate: 0.95 decay_steps: 1000 training: rec_results_freq : 1000 max_steps : 10000 batch_size: grid: 32 validation: 32
369
YAML
12.214285
32
0.609756
openhackathons-org/End-to-End-AI-for-Science/workspace/python/source_code/darcy/conf/config_FNO.yaml
defaults : - modulus_default - /arch/[email protected] - /arch/[email protected] - scheduler: tf_exponential_lr - optimizer: adam - loss: sum - _self_ arch: decoder: input_keys: [z, 32] output_keys: sol nr_layers: 1 layer_size: 32 fno: input_keys: coeff dimension: 2 nr_fno_layers: 4 fno_modes: 12 padding: 9 scheduler: decay_rate: 0.95 decay_steps: 1000 training: rec_results_freq : 1000 max_steps : 10000 batch_size: grid: 32 validation: 32
532
YAML
14.67647
47
0.633459
openhackathons-org/End-to-End-AI-for-Science/workspace/python/source_code/navier_stokes/navier_stokes.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. import numpy as np import os from sympy import Symbol, Eq, Abs, sin, cos import modulus from modulus.sym.hydra import to_absolute_path, instantiate_arch, ModulusConfig from modulus.sym.eq.pdes.navier_stokes import NavierStokes from modulus.sym.geometry.primitives_2d import Rectangle as rect from modulus.sym.models.fully_connected import FullyConnectedArch from modulus.sym.key import Key from modulus.sym.node import Node from modulus.sym.solver import Solver from modulus.sym.domain import Domain from modulus.sym.domain.constraint import ( PointwiseConstraint, PointwiseInteriorConstraint, ) from modulus.sym.domain.inferencer import PointVTKInferencer from modulus.sym.utils.io import ( VTKUniformGrid, ) def read_wf_data(velocity_scale,pressure_scale): path = "/workspace/python/source_code/navier_stokes/data_lat.npy" print(path) ic = np.load(path).astype(np.float32) Pa_to_kgm3 = 0.10197 mesh_y, mesh_x = np.meshgrid( np.linspace(-0.720, 0.719, ic[0].shape[0]), np.linspace(-0.720, 0.719, ic[0].shape[1]), indexing="ij", ) invar = {} invar["x"] = np.expand_dims(mesh_x.astype(np.float32).flatten(),axis=-1) invar["y"] = np.expand_dims(mesh_y.astype(np.float32).flatten(),axis=-1) invar["t"] = np.full_like(invar["x"], 0) outvar = {} outvar["u"] = np.expand_dims((ic[0]/velocity_scale).flatten(),axis=-1) outvar["v"] = np.expand_dims((ic[1]/velocity_scale).flatten(),axis=-1) outvar["p"] = np.expand_dims((ic[2]*Pa_to_kgm3/pressure_scale).flatten(),axis=-1) return invar, outvar @modulus.sym.main(config_path="conf", config_name="config") def run(cfg: ModulusConfig) -> None: # define sympy variables to parametrize domain curves x, y = Symbol("x"), Symbol("y") # make geometry for problem length = (-0.720, 0.720) height = (-0.720, 0.720) box_bounds = {x: length, y: height} # define geometry rec = rect( (length[0], height[0]), (length[1], height[1]) ) # Scaling and Nondimensionalizing the Problem ############# # Real Params ############# fluid_kinematic_viscosity = 1.655e-5 # m**2/s fluid_density = 1.1614 # kg/m**3 fluid_specific_heat = 1005 # J/(kg K) fluid_conductivity = 0.0261 # W/(m K) ################ # Non dim params for normalisation ################ # Diameter of Earth : 12742000 m over range of 1.440 length_scale = 12742000/1.440 # 60 hrs to 1 timestep- every inference frame is a 6 hour prediction (s) time_scale = 60*60*60 # Calcuale velocity & pressure scale velocity_scale = length_scale / time_scale # m/s pressure_scale = fluid_density * ((length_scale / time_scale) ** 2) # kg / (m s**2) # Density scale density_scale = 1.1614 # kg/m3 ############################## # Nondimensionalization Params for NavierStokes fn ############################## # fluid params nd_fluid_kinematic_viscosity = fluid_kinematic_viscosity / ( length_scale ** 2 / time_scale ) nd_fluid_density = fluid_density / density_scale # time window parameters time_window_size = 1.0 t_symbol = Symbol("t") time_range = {t_symbol: (0, time_window_size)} # make navier stokes equations ns = NavierStokes(nu=nd_fluid_kinematic_viscosity, rho=nd_fluid_density, dim=2, time=True) # make network flow_net = FullyConnectedArch( input_keys=[Key("x"), Key("y"), Key("t")], output_keys=[Key("u"), Key("v"), Key("p")], periodicity={"x": length, "y" : height}, layer_size=256, ) # make nodes to unroll graph on nodes = ns.make_nodes() + [flow_net.make_node(name="flow_net")] # make initial condition domain navier = Domain("navier_stokes") # make initial condition ic_invar,ic_outvar = read_wf_data(velocity_scale,pressure_scale) ic = PointwiseConstraint.from_numpy( nodes, ic_invar, ic_outvar, batch_size=cfg.batch_size.initial_condition, ) navier.add_constraint(ic, name="ic") # make interior constraint interior = PointwiseInteriorConstraint( nodes=nodes, geometry=rec, outvar={"continuity": 0, "momentum_x": 0, "momentum_y": 0}, bounds=box_bounds, batch_size=cfg.batch_size.interior, parameterization=time_range, ) navier.add_constraint(interior, name="interior") # add inference data for time slices for i, specific_time in enumerate(np.linspace(0, time_window_size, 10)): vtk_obj = VTKUniformGrid( bounds=[(-0.720, 0.720), (-0.360, 0.360)], npoints=[1440,720], export_map={"u": ["u", "v"], "p": ["p"]}, ) grid_inference = PointVTKInferencer( vtk_obj=vtk_obj, nodes=nodes, input_vtk_map={"x": "x", "y": "y"}, output_names=["u", "v", "p"], requires_grad=False, invar={"t": np.full([720 *1440, 1], specific_time)}, batch_size=100000, ) navier.add_inferencer(grid_inference, name="time_slice_" + str(i).zfill(4)) slv = Solver(cfg, navier) # start solver slv.solve() if __name__ == "__main__": run()
6,473
Python
33.43617
94
0.631083
openhackathons-org/End-to-End-AI-for-Science/workspace/python/source_code/navier_stokes/conf/config.yaml
defaults : - modulus_default - arch: - fully_connected - scheduler: tf_exponential_lr - optimizer: adam - loss: sum - _self_ save_filetypes : "vtk,npz" scheduler: decay_rate: 0.95 decay_steps: 3000 training: rec_results_freq : 1000 rec_constraint_freq: 5000 max_steps : 110000 batch_size: initial_condition: 2048 interior: 2048
364
YAML
14.208333
32
0.67033
metaiintw/build-an-avatar-with-ASR-TTS-Transformer-Omniverse-Audio2Face/README.md
# Build an avatar with ASR, Sentence-transformer, Similarity Search, TTS and Omniverse Audio2Face ## Project Description I'll show you how I used several Python packages and NVIDIA's Omniverse Audio2Face to quickly implement an avatar that can answer questions defined in a knowledge set or FAQ. ## Demo [![IMAGE ALT TEXT](https://user-images.githubusercontent.com/104120636/166487700-9f14813c-13b9-42bc-b477-c0f8aff7db5d.png)](http://www.youtube.com/watch?v=G_c94cGIKgs "Video Title") ## How It Works ![](https://i.imgur.com/BZIBUAt.png) #### ***Automatic Speech Recognition, ASR*** Upon receiving user's request, the [SpeechRecognition API](https://pypi.org/project/SpeechRecognition/) records the frequencies and sound waves from user's voice and translates them into text. #### ***Language Understanding*** [Sentence-Transformer](https://www.sbert.net/) is for state-of-the-art sentence, text and image embeddings that can encode input questions into feature vectors. The feature vectors represent entire sentences and their semantic information, this helps the machine in understanding the context, intention, and other nuances in the entire text. We’ll conduct a similarity search, comparing a user input question to a list of FAQs and return the most likely answers by [Facebook’s Similarity Search API](https://ai.facebook.com/tools/faiss/). #### ***Text To Speech*** The avatar's voice is fully synthesized by the [Gtts API](https://pypi.org/project/gTTS/), which turns text into natural-sounding speech. The synthesized voice is also used to drive the avatar's facial animation. #### ***Omniverse Audio2Face*** ![](https://i.imgur.com/7ioYQHj.png) Omniverse Audio2Face is an application brings our avatars to life. With [Omniverse Audio2Face](https://www.nvidia.com/en-us/omniverse/apps/audio2face/), anyone can now create realistic facial expressions and emotions to match any voice-over track. The technology feeds the audio input into a pre-trained Deep Neural Network, based on NVIDIA and the output of the network drives the facial animation of 3D characters in real-time. ## System Requirements | Element | Minimum Specifications | | ---- | ---- | | OS Supported | Windows 10 64-bit (Version 1909 and above) | | CPU | Intel I7, AMD Ryzen 2.5GHz or greater | | CPU Cores | 4 or higher | | RAM | 16 GB or higher | | Storage | 500 Gb SSD or higher | | GPU | Any RTX GPU | | VRAM | 6 GB or higher | | Min. Video Driver Version | See latest drivers [here](https://developer.nvidia.com/omniverse/driver) | ## How to Install and Run the Project Before you begin, you'll need to clone the repository with the template code used in this repo. Open your Terminal app and find a directory where you'd like to store the code. Run this command to clone the GitHub App template repository: ``` $ git clone https://github.com/metaiintw/build-an-avatar-with-ASR-TTS-Transformer-Omniverse-Audio2Face.git ``` #### Creating an environment from an environment. yml file Make sure Anaconda is installed on your local machine. Use the following command to install packages included in requirements.yml: ``` $ conda env create -f /path/to/requirements.yml ``` #### Download and Install Omniverse Launcher [NVIDIA Omniverse](https://docs.omniverse.nvidia.com/prod_install-guide/prod_install-guide.html) is a development platform for 3D simulation and design collaboration, it is free for individual, you can download Omniverse Launcher [here](https://www.nvidia.com/en-us/omniverse/download/). I also recommend you to watch this [video tutorial](https://www.youtube.com/watch?v=Ol-bCNBgyFw), which guides you through the installation process. | ![](https://i.imgur.com/4imNFt1.jpg) | |:--:| | *Omniverse Launcher* | #### Install Omniverse Audio2Face | ![](https://i.imgur.com/6kbTCRW.jpg) | |:--:| | *Omniverse apps* | Once you got Omniverse Launcher installed, you can immediate access to all the apps, including [Omniverse Audio2Face](https://www.nvidia.com/en-us/omniverse/apps/audio2face/). Next, simply install Omniverse Audio2Face and you're good to go. | ![](https://i.imgur.com/N94KDTc.png) | |:--:| | *Omniverse Audio2Face* | #### Omniverse Audio2Face setup To get our Python program interacts with Omniverse Audio2Face, you should use streaming audio player that allows developers to stream audio data from an external source or applications via the gRPC protocol. | ![](https://i.imgur.com/qZUQVS0.png) | |:--:| | *streaming audio player allows developers to stream audio data from an external source* | This [tutorial](https://www.youtube.com/watch?v=qKhPwdcOG_w&t=17s) showcases how to create an audio player and connect it to the audio2face instance using the omnigraph editor. #### Bring Your Avatar to life Now we're ready to bring our avatar to life, simply enter the following commands into your terminal. ``` $ cd path_to_the_project_folder $ conda activate avatar $ jupyter lab ``` Execute the .ipynb notebook file named ***1.Creating_a_simple_avatar.ipynb***, start building your first avatar! | ![](https://i.imgur.com/YTSdxJT.png) | |:--:| | *1.Creating_a_simple_avatar.ipynb* | ## Creators **Renton Hsu** - [Linkedin](https://www.linkedin.com/in/renton-hsu-bba5a0102) - [Facebook](https://www.facebook.com/renton.hsu/)
5,296
Markdown
53.608247
429
0.749056
metaiintw/build-an-avatar-with-ASR-TTS-Transformer-Omniverse-Audio2Face/1.Create_A_Simple_Avatar/requirements.yml
name: avatar666 channels: - conda-forge - pytorch-nightly dependencies: - argon2-cffi=21.3.0 - argon2-cffi-bindings=21.2.0 - ca-certificates=2020.10.14 - charset-normalizer=2.0.4 - huggingface_hub=0.5.1 - importlib-metadata=4.11.3 - intel-openmp=2021.4.0 - libfaiss-avx2=1.7.2 - lz4-c=1.9.3=h2bbff1b_1 - m2w64-gcc-libgfortran=5.3.0 - m2w64-gcc-libs=5.3.0=7 - m2w64-gcc-libs-core=5.3.0=7 - m2w64-gmp=6.1.0 - m2w64-libwinpthread-git=5.0.0.4634.697f757=2 - matplotlib-inline=0.1.2 - mkl-service=2.4.0 - msys2-conda-epoch=20160418 - nest-asyncio=1.5.5 - numpy-base=1.21.5 - prompt-toolkit=3.0.20 - python-dateutil=2.8.2 - python-fastjsonschema=2.15.1 - pytorch-mutex=1.0 - scikit-learn=1.0.2 - sentence-transformers=2.2.0 - typing-extensions=4.1.1 - websocket-client=0.58.0 - ffmpeg-python=0.2.0 - pyaudio=0.2.11 - faiss=1.7.2 - pip: # Package that is only on PyPI - jupyter - notebook - jupyterlab - grpcio - pandas - faiss - pydub - soundfile - google - protobuf==3.20.0 - gtts - librosa==0.7.2 - numba==0.48 - SpeechRecognition
1,184
YAML
22.235294
48
0.614865
eliabntt/animated_human_SMPL_to_USD/generate_sequence.py
import json import os import humangenerator import bpy import humangenerator as hgen import argparse import ipdb import sys import yaml parser = argparse.ArgumentParser() parser.add_argument("--dataset", help="Dataset from which you want to generate data") parser.add_argument("--output_dir", help="Path to where the data should be saved") parser.add_argument("--samples_dir", help="Paths where the data is stored") parser.add_argument("--last_sample", help="Last sample processed, this must be the FULL name of the folder (e.g. 00001). This WILL be processed", default="") parser.add_argument("--parent_path", help="Path containing the subfolders for the datasets (with the pkl models)", default="") parser.add_argument("--sample_id", help="ID of the sample, if emtpy process all", default="all") parser.add_argument("--with_cache", help="Write \"False\" if generating blendshapes", default="True") parser.add_argument("--suppress_out", help="Write \"False\" if output in console", default="False") parser.add_argument("--write_verts", help="Write \"True\" if you want to write verts info in the pkl", default="False") parser.add_argument("--frame", help="The n-th frame to generate. Default all", default="all") parser.add_argument("--config_file", help="json file containing the configuration", default="") parser.add_argument("--exp_name", help="The name of the \"experiment\" of the dataset. By default the name of the samples_dir folder", default="") # structure should be `parent_path/[surreal/datageneration/smpl_data,body_models/{smplh,dmpls}]` args = parser.parse_args() with open(os.path.join("humangenerator", "avail_datasets.yaml"), 'r') as stream: data_loaded = yaml.safe_load(stream) avail_datasets = data_loaded["datasets"] processor = None if avail_datasets == [] or args.dataset not in avail_datasets: if not avail_datasets: print("No avail dataset. Check file") else: print(f"Sought dataset is not yet avail. The avail ones are {avail_datasets}") exit(-1) else: print(f"Processing {args.dataset} data") found = (args.last_sample == "") try: WITH_CACHE = (False if args.with_cache == "False" else True) parent_path = args.parent_path smpl_body_list = [] # Init SMPL models smpl_path = os.path.join(parent_path, "surreal", "datageneration", "smpl_data") smpl_models = { 'f': hgen.SMPLModel(os.path.join(smpl_path, 'smpl', 'models', 'basicModel_f_lbs_10_207_0_v1.0.0.pkl')), 'm': hgen.SMPLModel(os.path.join(smpl_path, 'smpl', 'models', 'basicModel_m_lbs_10_207_0_v1.0.0.pkl')), } if args.frame != "all": try: frame = int(args.frame) except: print("Error converting frame to int, considering the WHOLE sequence") frame = None else: frame = None print("Whole sequence considered") print("This will export only the whole sequence") hgen.init() # Parse args PATH_SAMPLES = args.samples_dir if args.exp_name == "": exp_name = os.path.split(PATH_SAMPLES)[-1] else: exp_name = args.exp_name PATH_OUT = os.path.join(args.output_dir, exp_name) if not os.path.exists(PATH_OUT): os.makedirs(PATH_OUT) if args.config_file == "": config = {} else: if os.path.exists(args.config_file): with open(args.config_file, "r") as f: config = json.load(f) else: raise Exception("The taxonomy file could not be found: {}".format(args.config_file)) processor, PATH_SAMPLES = hgen.get_processor(args.dataset, parent_path, WITH_CACHE, PATH_OUT, PATH_SAMPLES, smpl_models, args.write_verts.lower() == "false", config) sample_id = args.sample_id if sample_id != "all": print("Processing single sample") # Check if sample exists if not os.path.isdir(os.path.join(PATH_SAMPLES, sample_id)): print("Specified sample does not exist") exit(-1) else: sample_id = [sample_id] else: print("Processing all samples") sample_id = os.listdir(PATH_SAMPLES) if not sample_id: print("No subfolder found") exit(-1) if len(smpl_body_list) == 0: smpl_body_list = processor.generator.load_SMPLs_objects() found = (args.last_sample == "") sample_id.sort() clean_cnt = 1 for sample in sample_id: if not found: if sample == args.last_sample: found = True else: continue if clean_cnt % 100 == 0: clean_cnt = 0 hgen.init() smpl_body_list = processor.generator.load_SMPLs_objects() clean_cnt += 1 print("------------------------------") print(f"Processing {sample}") isdone = False count = 0 while (not isdone and count <= 5): hgen.deselect() if len(sample_id) > 1: hgen.clean_mesh_and_textures( exclude=['Material_0', 'Material_1', 'Armature_0', 'Armature_1', 'body_0', 'body_1']) print("Scene cleaned!\n\n") count += 1 path_sample = os.path.join(PATH_OUT, sample + ('_with_cache' if WITH_CACHE else '')) if not os.path.exists(path_sample): os.makedirs(path_sample) with open(os.path.join(path_sample, f"out_{count}.txt"), "w") as file_out, open( os.path.join(path_sample, f"err_{count}.txt"), "w") as file_err: # file logging try: if args.suppress_out == "True": sys.stdout = file_out sys.stderr = file_err res = processor.process_sample(sample, frame, smpl_body_list) if res: print("Exported!") else: raise Exception("Unknown error") isdone = True except: import traceback sys.stderr.write('error\n') sys.stderr.write(traceback.format_exc()) print(f"Failed -- going with try {count}\n\n") finally: sys.stderr.flush() sys.stdout.flush() sys.stdout = sys.__stdout__ sys.stderr = sys.__stderr__ except: import traceback sys.stderr.write('error\n') sys.stderr.write(traceback.format_exc()) sys.stdout.flush() sys.stderr.flush() sys.stdout = sys.__stdout__ sys.stderr = sys.__stderr__ print('error') print(traceback.format_exc()) extype, value, tb = sys.exc_info() ipdb.post_mortem(tb)
6,955
Python
35.610526
128
0.570669
eliabntt/animated_human_SMPL_to_USD/start_blend_debug.py
import bpy import sys import ipdb import os from pathlib import Path from bl_ui.space_text import TEXT_MT_editor_menus repo_root_directory = os.path.join(os.path.dirname(__file__), ".") sys.path.append(repo_root_directory) argv = sys.argv[sys.argv.index("--") + 1:] bpy.context.window.workspace = bpy.data.workspaces["Scripting"] bpy.context.view_layer.update() if argv[0].endswith(".py"): print(f"Loading: {os.path.join(os.path.dirname(os.path.abspath(__file__)), argv[0])}") text = bpy.data.texts.load(os.path.join(os.path.dirname(os.path.abspath(__file__)), argv[0])) sys.argv = argv[:] print(f"New argv: {sys.argv}") else: print("First argument should be the script file") exit(-1) # Declare operator that runs the blender proc script class RunHumanGeneratorOperator(bpy.types.Operator): bl_idname = "wm.run_humangenerator" bl_label = "Run Human Generator" bl_description = "This operator runs the loaded HumanGenerator script and also makes sure to unload all modules before starting." bl_options = {"REGISTER"} def execute(self, context): # Delete all loaded models inside src/, as they are cached inside blender for module in list(sys.modules.keys()): if module.startswith("humangenerator"): del sys.modules[module] # Make sure the parent of the humangenerator folder is in sys.path import_path = os.path.abspath(os.path.join(os.path.dirname(__file__), ".")) if import_path not in sys.path: sys.path.append(import_path) # Run the script try: bpy.ops.text.run_script() except RuntimeError: # Skip irrelevant error messages (The relevant stacktrace+error has already been printed at this point) pass return {"FINISHED"} bpy.utils.register_class(RunHumanGeneratorOperator) def draw(self, context): layout = self.layout st = context.space_data text = st.text is_syntax_highlight_supported = st.is_syntax_highlight_supported() layout.template_header() TEXT_MT_editor_menus.draw_collapsible(context, layout) if text and text.is_modified: row = layout.row(align=True) row.alert = True row.operator("text.resolve_conflict", text="", icon='HELP') layout.separator_spacer() row = layout.row(align=True) row.template_ID(st, "text", new="text.new", unlink="text.unlink", open="text.open") if text: is_osl = text.name.endswith((".osl", ".osl")) if is_osl: row.operator("node.shader_script_update", text="", icon='FILE_REFRESH') else: row = layout.row() row.active = is_syntax_highlight_supported # The following line has changed compared to the orignal code, it starts our operator instead of text.run_script row.operator("wm.run_humangenerator", text="Run") layout.separator_spacer() row = layout.row(align=True) row.prop(st, "show_line_numbers", text="") row.prop(st, "show_word_wrap", text="") syntax = row.row(align=True) syntax.active = is_syntax_highlight_supported syntax.prop(st, "show_syntax_highlight", text="") # Set our draw function as the default draw function for text area headers bpy.types.TEXT_HT_header.draw = draw # Put text into scripting tool for area in bpy.data.workspaces["Scripting"].screens[0].areas.values(): if area.type == 'TEXT_EDITOR': area.spaces.active.text = text
3,540
Python
34.767676
133
0.652825
eliabntt/animated_human_SMPL_to_USD/convert_fbx.py
import json import os import humangenerator import bpy import humangenerator as hgen import argparse import ipdb import sys import yaml parser = argparse.ArgumentParser() parser.add_argument("--fbx", help="Path to the fbx file") parser.add_argument("--output_dir", help="Path to where the data should be saved") parser.add_argument("--temp_dir", help="Path to where the data should be temporary saved") parser.add_argument("--usd", help="True if export usd is necessary, default to false", default="False") args = parser.parse_args() out_dir = args.output_dir if not os.path.exists(out_dir): os.makedirs(out_dir) fbx = args.fbx for o in bpy.context.scene.objects: o.select_set(True) # Call the operator only once bpy.ops.object.delete() with open(os.path.join(out_dir, f"out.txt"), "w") as file_out, open( os.path.join(out_dir, f"err.txt"), "w") as file_err: try: sys.stdout = file_out sys.stderr = file_err bpy.ops.import_scene.fbx(filepath=fbx) filepath=os.path.join(out_dir,os.path.basename(fbx[:-4])+".usd") temp_filepath = os.path.join(args.temp_dir,os.path.basename(fbx[:-4])+".usd") hgen.export_data(temp_path, out_dir, os.path.basename(fbx[:-4]), False, None, {}, {}, False, args.usd.lower() == "true") bpy.ops.object.select_all(action='SELECT') bpy.ops.object.delete() succeed = True except: import traceback sys.stderr.write('error\n') sys.stderr.write(traceback.format_exc()) finally: sys.stdout.flush() sys.stderr.flush() sys.stdout = sys.__stdout__ sys.stderr = sys.__stderr__
1,653
Python
30.807692
128
0.655777
eliabntt/animated_human_SMPL_to_USD/notes.md
Installation instructions From the `generate_people` folder ``` mkdir data_folder cd data_folder git clone https://github.com/gulvarol/surreact surreal ``` - Download the following two fbx files for SMPL for Maya from https://smpl.is.tue.mpg.de/ using your credentials. Please comply with their license. The files are `basicModel_f_lbs_10_207_0_v1.0.2.fbx` and `basicModel_m_lbs_10_207_0_v1.0.2.fbx` and can be downloaded with this [link](https://download.is.tue.mpg.de/download.php?domain=smpl&sfile=SMPL_maya.zip). Place them in `.../surreal/datageneration/smpl_data`. - download this [pkl](https://raw.githubusercontent.com/gulvarol/surreal/master/datageneration/pkl/segm_per_v_overlap.pkl) and place it in `.../surreal/datageneration/smpl_data` - get [SMPL_python_v.1.0.0](https://download.is.tue.mpg.de/download.php?domain=smpl&sfile=SMPL_python_v.1.0.0.zip). Extract the basicModel\_[m,f]\_lbs\_10\_207\_0\_v1.0.0.pkl. Place those two files in `.../surreal/datageneration/smpl_data/smpl/models/basicModel_{f,m}_lbs_10_207_0_v1.0.0.pkl`. Run `mv basicmodel_m_lbs_10_207_0_v1.0.0.pkl basicModel_m_lbs_10_207_0_v1.0.0.pkl` - `cp .../surreal/datageneration/misc/prepare_smpl_data/extract_J_regressors.py .../surreal/datageneration/smpl_data/smpl/` - run `python3 extract_J_regressor.py` ## Surreal Textures - Accept surreal terms and get an account (you will need username and password to download textures) - get the download script https://github.com/gulvarol/surreal/blob/master/download/download_smpl_data.sh and place it somewhere you like let's call this location "loc" - download this file https://github.com/gulvarol/surreal/blob/master/download/files/files_smpl_data.txt and place it "loc/files/files_smpl_data.txt"(alongside the fbx models) essentially you have ./loc/{script,files/files_smpl_data.txt} - call the download script with `./download_smpl_data.sh /yourpath/surreal/datageneration/smpl_data username_surreal pw_surreal` _____ At this point you should have smpl_data/basicModel_{f,m}_lbs_10_207_0_v1.0.2.fbx smpl_data/smpl/models/basicModel_{f,m}_lbs_10_207_0_v1.0.0.pkl smpl_data/segm_per_v_overlap.pkl smpl_data/joint_regressors.pkl _____ ## For AMASS - create a `body_models` folder in `data_folder` - create inside `smplh` and `dmpls` folders - download [dmpls](https://download.is.tue.mpg.de/download.php?domain=smpl&sfile=dmpls.tar.xz) (DMPLs compatibile with SMPL) and [smplh](https://mano.is.tue.mpg.de/download.php) and get `Extended SMPLH model for AMASS` (accepting the respective licenses) there. NOTE: If exporting WITH cache, the hand movement will be complete, if exporting WITHOUT cache it will not as the basic model for blendshapes is the SMPL model WITHOUT hand. It shouldn't be too difficult to adapt the code to your needs eventually. TESTED ONLY WITH CMU DATA
2,822
Markdown
56.612244
414
0.759036
eliabntt/animated_human_SMPL_to_USD/README.md
# Human animations to USD ## This repository is part of the [GRADE](https://eliabntt.github.io/GRADE-RR/home) project ### This was tested on Windows, using Omniverse suggested Drivers and CUDA version. The goal of this code is to show how you can convert any SMPL-based animation to a USD-based animation. The script is capable of managing mesh caches and skeletal animations. It can export point-sequence based animations and skeletal-based animations. ### Installation instructions Install blender connector from the Omniverse launcher. This code was tested with versions 3.4.0-usd.101.0 (main branch). For the paper work we used 3.1.0-usd.100.1.10. Some limitations of 3.1.0-usd.100.1.10: - you might need to use the mesh cache modifier instead of the blendshape. There is a _minimal_ difference that arise when loading the animation in Omniverse's products. - keep textures with absolute paths. You can replace them whenever you want afterwards with our tool [USD_text_replace](https://github.com/eliabntt/GRADE-RR/tree/main/scripts/process_paths) Install the necessary *dependencies*. Locate the blender installation path and run `python.exe -m pip install ipdb pyquaternion scipy torch pyyaml chumpy`. e.g. In my case `C:\User\ebonetto\AppData\Local\ov\pkg\blender-3.4.0-usd.101.0\Release\3.4\python\bin\python.exe -m pip install ipdb pyquaternion scipy torch pyyaml chumpy` Additionally, you need to follow [this]() to fill up the installation missing files that we cannot redistribute because of licensing. ### Already Supported datasets and HowTo expand We are already supporting two datasets. [Cloth3D](https://chalearnlap.cvc.uab.cat/dataset/38/description/) and [AMASS](https://amass.is.tue.mpg.de/). If you want to add a different dataset for AMASS you need to add it to the `data_folder/taxonomy.json` file ### Run the code *From the cloned repository main folder* `\AppData\Local\ov\pkg\blender-3.4.0-usd.101.0\Release\blender.exe --python-use-system-env --python-exit-code 0 --python start_blend_debug.py -- generate_sequence.py --dataset ... --output_dir ... --samples_dir ... --last_sample ... --parent_path ... --sample_id ...` The parameters are explained in the code or self-explaining. `dataset` can be either `[cloth3d, amass]`. With `amass` a necessary configuration file needs to be included (e.g. `--config_file this_repo\humangenerator\amass.json`). We provide a sample config [here](https://github.com/eliabntt/generate_people/blob/main/humangenerator/amass.json). Note that AMASS will process the folder directly (by querying subfolders) differently than Cloth3D for which you need to give the main parent folder (eg. `cloth3d/train_t1`). `sample_id` if is an ID it will process that ID otherwise you can set it to all or leave it empty and it will process the whole set of data. `last_sample` is used in case `sample_id` is empty and will be used to signal where to restart the processing. If running multiple generations the code will automatically periodically _clean_ the whole simulation environment including textures and materials to avoid crashing. - Cloth3D single sample example `--python-use-system-env --python-exit-code 0 --python start_blend_debug.py -- generate_sequence.py --dataset cloth3d --output_dir outdir --samples_dir cloth3d\train --last_sample 01056 --parent_path D:\generate_people\data_folder\ --sample_id 01056` - AMASS `--python-use-system-env --python-exit-code 0 --python start_blend_debug.py -- generate_sequence.py --dataset amass --output_dir D:\cloth3d\exported_usd --samples_dir D:\AMASS\CMU\ --parent_path D:\Cloth3D_to_usd\data_folder\ --config_file D:\Cloth3D_to_usd\humangenerator\amass.json` ### How does it work The texture of the person is random. In the Cloth3D case the chosen ones are the ones with underwears, with AMASS the ones with clothes. You have the possibility of exporting the SMPL information, the vertex info, the USD file, the STL trace of the animation and much more. You can also suppress the output from the shell. However, the exporter in USD forcibly write directly to stdout. I have found no redirect strategy that works. The system will replicate the input folder structure in the output folder. You can also select a single frame. You are encouraged to extend this and create pull requests. Cloth3D clothes are loaded and exported as MeshCaches. For the human animations you can chose. ### How to edit You can create your own processor by creating a new class [here](https://github.com/eliabntt/generate_people/tree/main/humangenerator), adding your dataset name [here](https://github.com/eliabntt/generate_people/blob/main/humangenerator/avail_datasets.yaml) and write the else [here](https://github.com/eliabntt/generate_people/blob/main/humangenerator/generator.py#L17). In practice you need to write your own python `dataset_gen.py`. That file needs to have a `process_sample` method which will be then called by the main script. Within `process_sample` you want to take care either of the sample (CLOTH3D) or of the whole folder (AMASS). Your choice. We see the processing from the loading of the animation to writing data. In the main script then there is a call to `get_processor` that returns `processor, PATH_SAMPLES`, `processor` is the instance of the class you just created. Few lines below you find `res = processor.process_sample(sample, frame, smpl_body_list)`. ### Some notes The exported USDs will have 24 fps as default. We did not investigate this much. You can change this by using the usd converter to text and change the 4th line to 30 fps. This value will influence how the mesh will be loaded into the simulation by the scripts used in GRADE. In our work we did NOT change this value. _______ ### LICENSING For licensing information, please refer to the main repository located [here](https://github.com/eliabntt/GRADE-RR/). __________ ### CITATION If you find this work useful please cite our work based on [this](https://github.com/eliabntt/GRADE-RR#citation) information __________ ### Acknowledgment Code based on - [blenderproc](https://github.com/DLR-RM/BlenderProc/) - [amass](https://amass.is.tue.mpg.de/) - [Cloth3D starter kit](http://158.109.8.102/CLOTH3D/StarterKit.zip) - [surreact](https://github.com/gulvarol/surreact) and [surreal](https://github.com/gulvarol/surreal)
6,354
Markdown
58.95283
371
0.765974
eliabntt/animated_human_SMPL_to_USD/LICENSE.md
For licensing information please refer to the main repository of the project located [here](https://github.com/eliabntt/GRADE-RR/). The same terms and conditions apply.
169
Markdown
83.999958
168
0.798817
eliabntt/animated_human_SMPL_to_USD/data_folder/smpl/smpl_np.py
import sys import numpy as np import pickle class SMPLModel(): def __init__(self, model_path): """ SMPL model. Parameter: --------- model_path: Path to the SMPL model parameters, pre-processed by `preprocess.py`. """ with open(model_path, 'rb') as f: if sys.version_info[0] == 2: params = pickle.load(f) # Python 2.x elif sys.version_info[0] == 3: params = pickle.load(f, encoding='latin1') # Python 3.x self.J_regressor = params['J_regressor'] self.weights = params['weights'] self.posedirs = params['posedirs'] self.v_template = params['v_template'] self.shapedirs = params['shapedirs'] self.faces = params['f'] self.kintree_table = params['kintree_table'] id_to_col = { self.kintree_table[1, i]: i for i in range(self.kintree_table.shape[1]) } self.parent = { i: id_to_col[self.kintree_table[0, i]] for i in range(1, self.kintree_table.shape[1]) } self.pose_shape = [24, 3] self.beta_shape = [10] self.trans_shape = [3] self.pose = np.zeros(self.pose_shape) self.beta = np.zeros(self.beta_shape) self.trans = np.zeros(self.trans_shape) self.verts = None self.J = None self.R = None self.update() def set_params(self, pose=None, beta=None, trans=None): """ Set pose, shape, and/or translation parameters of SMPL model. Verices of the model will be updated and returned. Parameters: --------- pose: Also known as 'theta', a [24,3] matrix indicating child joint rotation relative to parent joint. For root joint it's global orientation. Represented in a axis-angle format. beta: Parameter for model shape. A vector of shape [10]. Coefficients for PCA component. Only 10 components were released by MPI. trans: Global translation of shape [3]. Return: ------ Updated vertices. """ if pose is not None: self.pose = pose if beta is not None: self.beta = beta if trans is not None: self.trans = trans self.update() return self.verts, self.J def update(self): """ Called automatically when parameters are updated. """ # how beta affect body shape v_shaped = self.shapedirs.dot(self.beta) + self.v_template # joints location self.J = self.J_regressor.dot(v_shaped) pose_cube = self.pose.reshape((-1, 1, 3)) # rotation matrix for each joint self.R = self.rodrigues(pose_cube) I_cube = np.broadcast_to( np.expand_dims(np.eye(3), axis=0), (self.R.shape[0]-1, 3, 3) ) lrotmin = (self.R[1:] - I_cube).ravel() # how pose affect body shape in zero pose v_posed = v_shaped + self.posedirs.dot(lrotmin) # world transformation of each joint G = np.empty((self.kintree_table.shape[1], 4, 4)) G[0] = self.with_zeros(np.hstack((self.R[0], self.J[0, :].reshape([3, 1])))) for i in range(1, self.kintree_table.shape[1]): G[i] = G[self.parent[i]].dot( self.with_zeros( np.hstack( [self.R[i],((self.J[i, :]-self.J[self.parent[i],:]).reshape([3,1]))] ) ) ) G = G - self.pack( np.matmul( G, np.hstack([self.J, np.zeros([24, 1])]).reshape([24, 4, 1]) ) ) # transformation of each vertex T = np.tensordot(self.weights, G, axes=[[1], [0]]) rest_shape_h = np.hstack((v_posed, np.ones([v_posed.shape[0], 1]))) v = np.matmul(T, rest_shape_h.reshape([-1, 4, 1])).reshape([-1, 4])[:, :3] self.verts = v + self.trans.reshape([1, 3]) def rodrigues(self, r): """ Rodrigues' rotation formula that turns axis-angle vector into rotation matrix in a batch-ed manner. Parameter: ---------- r: Axis-angle rotation vector of shape [batch_size, 1, 3]. Return: ------- Rotation matrix of shape [batch_size, 3, 3]. """ theta = np.linalg.norm(r, axis=(1, 2), keepdims=True) # avoid zero divide theta = np.maximum(theta, np.finfo(np.float64).tiny) r_hat = r / theta cos = np.cos(theta) z_stick = np.zeros(theta.shape[0]) m = np.dstack([ z_stick, -r_hat[:, 0, 2], r_hat[:, 0, 1], r_hat[:, 0, 2], z_stick, -r_hat[:, 0, 0], -r_hat[:, 0, 1], r_hat[:, 0, 0], z_stick] ).reshape([-1, 3, 3]) i_cube = np.broadcast_to( np.expand_dims(np.eye(3), axis=0), [theta.shape[0], 3, 3] ) A = np.transpose(r_hat, axes=[0, 2, 1]) B = r_hat dot = np.matmul(A, B) R = cos * i_cube + (1 - cos) * dot + np.sin(theta) * m return R def with_zeros(self, x): """ Append a [0, 0, 0, 1] vector to a [3, 4] matrix. Parameter: --------- x: Matrix to be appended. Return: ------ Matrix after appending of shape [4,4] """ return np.vstack((x, np.array([[0.0, 0.0, 0.0, 1.0]]))) def pack(self, x): """ Append zero matrices of shape [4, 3] to vectors of [4, 1] shape in a batched manner. Parameter: ---------- x: Matrices to be appended of shape [batch_size, 4, 1] Return: ------ Matrix of shape [batch_size, 4, 4] after appending. """ return np.dstack((np.zeros((x.shape[0], 4, 3)), x))
5,242
Python
27.037433
80
0.571347
eliabntt/animated_human_SMPL_to_USD/humangenerator/cloth3d_gen.py
from humangenerator.util.blender_util import * import bpy from .util.cloth3d_util import loadInfo, bodyCache, loadGarment import humangenerator as hgen from pathlib import Path class cloth3d: def __init__(self, parent_path, with_cache, path_out, path_samples, smpl_models, write_verts): from humangenerator.generator import generator # temporary usd export path, we cannot directly write in mounted network drives sometimes temp_path = os.path.join(parent_path, 'usd_exports') # surreal path for textures smpl_path = os.path.join(parent_path, "surreal", "datageneration", "smpl_data") self.generator = generator(smpl_path) self.with_cache = with_cache self.path_out = path_out self.path_samples = path_samples self.smpl = smpl_models self.temp_path = temp_path self.write_verts = (write_verts == "True") def animateSMPL(self, sample, smpl_ob, info, j): if self.with_cache: bodyCache(self.path_cache, sample, info, smpl_ob.ob, self.smpl) # generate blendshapes + trans s = info['shape'] smpl_ob.reset_joint_positions(s, bpy.data.scenes["Scene"]) if len(info['poses'].shape) > 1: N = info['poses'].shape[1] else: sys.stderr.write('Error animation is ONLY ONE FRAME \n') N = 1 for i in range(N): if N > 1: p = info['poses'][:, i] t = info['trans'][:, i].reshape((3,)) - j[0] else: p = info['poses'][:] t = info['trans'][:].reshape((3,)) - j[0] bpy.data.scenes["Scene"].frame_set(i) smpl_ob.apply_trans_pose_shape(t, p, s, i, with_blendshapes=not self.with_cache) def generate_SMPLbody_animation(self, sample, info, gender, index): print("Generate Animation..") if len(info['poses'].shape) > 1: p = info['poses'][:, 0].reshape((24, 3)) t = info['trans'][:, 0].reshape((3,)) else: p = info['poses'][:].reshape((24, 3)) t = info['trans'][:].reshape((3,)) s = info['shape'] v, j = self.smpl[gender].set_params(pose=p, beta=s, trans=t) cloth_img_name = self.generator.pick_skin_texture(gender=gender, clothing_option="grey") img = bpy.data.materials[f'Material_{index}'].node_tree.nodes["Image Texture"] img.image = bpy.data.images.load(cloth_img_name) material = bpy.data.materials[f'Material_{index}'] self.smpl_body_list[index].refine_SMPL(material, j, info['zrot']) self.animateSMPL(sample, self.smpl_body_list[index], info, j) # Smooth bpy.ops.object.shade_smooth() def loadCloth3DSequence(self, sample: str, info: dict, frame: int = None): if len(info['poses'].shape) > 1: bpy.context.scene.frame_end = info['poses'].shape[-1] - 1 else: bpy.context.scene.frame_end = 1 bpy.ops.object.select_all(action='DESELECT') # delete current garments for obj in bpy.data.objects.values(): if 'body' not in obj.name.lower() and 'armature' not in obj.name.lower(): obj.select_set(True) bpy.ops.object.delete() # Load new garments for garment in info['outfit']: loadGarment(self.path_samples, self.path_cache, sample, garment, info) for obj in bpy.data.objects.values(): obj.select_set(False) gender = 'm' if info['gender'] else 'f' index = 0 if info['gender'] else 1 self.generate_SMPLbody_animation(sample, info, gender, index) bpy.context.view_layer.objects.active = bpy.data.objects[f'Armature_{index}'] arm_obj = bpy.data.objects[f'Armature_{index}'] bpy.context.scene.frame_current = bpy.context.scene.frame_start for obj in bpy.data.objects.values(): if 'body' not in obj.name.lower() and 'armature' not in obj.name.lower(): obj.select_set(True) obj.parent = arm_obj obj.rotation_euler = [0, 0, 0] obj.select_set(False) for obj in bpy.data.objects.values(): if 'armature' not in obj.name.lower() and 'body' not in obj.name.lower(): obj.select_set(True) else: if str(index) in obj.name: obj.select_set(True) if frame != None and frame >= 0 and frame <= bpy.context.scene.frame_end: bpy.context.scene.frame_current = frame def process_sample(self, sample: str, frame: int, smpl_body_list): # load info info = loadInfo(os.path.join(self.path_samples, sample, 'info.mat')) self.smpl_body_list = smpl_body_list subfolder_name = Path(sample).stem + ('_with_cache' if self.with_cache else '') self.path_cache = hgen.create_outfolder_structure(self.path_out, subfolder_name, self.with_cache) if frame is None: self.loadCloth3DSequence(sample, info) else: self.loadCloth3DSequence(sample, info, frame) bpy.ops.wm.save_as_mainfile(filepath=os.path.join(self.path_out, subfolder_name, subfolder_name + ".blend")) return hgen.export_data(self.temp_path, self.path_out, Path(sample).stem, self.with_cache, frame, info, info['zrot'], self.write_verts)
5,429
Python
42.095238
143
0.589796
eliabntt/animated_human_SMPL_to_USD/humangenerator/__init__.py
import os import sys # check the python version, only python 3.X is allowed: if sys.version_info.major < 3: raise Exception("HumanGenerator requires at least python 3.X to run.") sys.path.remove(os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) from .util.blender_util import * from data_folder.smpl.smpl_np import SMPLModel from .generator import *
370
Python
29.916664
79
0.737838
eliabntt/animated_human_SMPL_to_USD/humangenerator/generator.py
import os from random import choice import bpy from .util.smplutils import SMPL_Body, rotate_vector from .cloth3d_gen import * from .amass_gen import * from .util.blender_util import export_stl_data, write_pkl_data, write_usd # import amass_gen def get_processor(dataset, parent_path, with_cache, path_out, path_samples, smpl_models, write_verts, config={}): if dataset == "cloth3d": return cloth3d(parent_path, with_cache, path_out, path_samples, smpl_models, write_verts), path_samples if dataset == "amass": # todo fixme tmp_obj = amass(parent_path, with_cache, path_out, path_samples, smpl_models, write_verts, config) return tmp_obj, path_samples raise Exception("NOT A VALID DATASET") def export_data(temp_path, path_out, sample, with_cache, frame, info, orient, write_verts, usd=True): try: if usd: write_usd(temp_path, path_out, sample + ('_with_cache' if with_cache else ''), with_cache, True if frame == None else False, 0 if frame == None else frame) for obj in bpy.data.objects.values(): if "body" in obj.name.lower() and obj.select_get(): ob = obj elif "armature" in obj.name.lower() and obj.select_get(): arm_ob = obj export_stl_data(path_out, sample + ('_with_cache' if with_cache else ''), [ob for ob in bpy.data.objects if ob.select_get()], orient) write_pkl_data(path_out, sample + ('_with_cache' if with_cache else ''), arm_ob, ob, info, write_verts=write_verts) except: return False return True def create_outfolder_structure(path_out, subfolder_name, with_cache): if (with_cache): path_cache = os.path.join(path_out, subfolder_name, 'view_cache') if not os.path.exists(path_cache): os.makedirs(path_cache) else: path_cache = os.path.join(path_out, subfolder_name, 'view_cache') if not os.path.exists(path_cache): os.makedirs(path_cache) return path_cache class generator: def __init__(self, smpl_path, write_verts=False): self.SMPL_PATH = smpl_path def pick_skin_texture(self, split_name='all', clothing_option="grey", gender="m"): if gender == "f": with open( os.path.join(self.SMPL_PATH, "textures", "female_{}.txt".format(split_name)) ) as f: txt_paths = f.read().splitlines() else: with open( os.path.join(self.SMPL_PATH, "textures", "male_{}.txt".format(split_name)) ) as f: txt_paths = f.read().splitlines() # if using only one source of clothing if clothing_option == "nongrey": txt_paths = [k for k in txt_paths if "nongrey" in k] elif clothing_option == "grey": txt_paths = [k for k in txt_paths if "nongrey" not in k] elif clothing_option == "same": # Orig txt_paths = ["textures/male/nongrey_male_0244.jpg"] elif clothing_option == "all": txt_paths = [k for k in txt_paths] # random clothing texture cloth_img_name = choice(txt_paths) cloth_img_name = os.path.join(self.SMPL_PATH, cloth_img_name) print("Picked skin texture: {}".format(cloth_img_name)) return cloth_img_name def create_material_SMPL(self, gender="m", person_no=0, clothing_option="grey", split_name="all"): print("Creating SMPL texture material") cloth_img_name = self.pick_skin_texture(split_name, clothing_option, gender) material = bpy.data.materials.new(name=f"Material_{person_no}") material.use_nodes = True # Add nodes tree = material.node_tree nodes = tree.nodes # Principled BSDf bsdf = nodes['Principled BSDF'] # Image img = nodes.new('ShaderNodeTexImage') img.image = bpy.data.images.load(cloth_img_name) # Links tree.links.new(img.outputs[0], bsdf.inputs[0]) return material def load_SMPLs_objects(self): # create the material for SMPL material = self.create_material_SMPL("m", 0) print("Male Material Created") smpl_body_list = [] # create the SMPL_Body object smpl_body_list.append( SMPL_Body(self.SMPL_PATH, material, 0, "male", person_no=0) ) print("Male created") material = self.create_material_SMPL("f", 1) print("Female material created") smpl_body_list.append( SMPL_Body(self.SMPL_PATH, material, 0, "female", person_no=1) ) print("Female created") return smpl_body_list
4,735
Python
38.140496
123
0.597043
eliabntt/animated_human_SMPL_to_USD/humangenerator/amass_gen.py
from pathlib import Path from humangenerator.util.blender_util import * import bpy from .util.amass_util import loadInfo, bodyCache, _load_parametric_body_model, _get_supported_mocap_datasets, \ _get_sequence_path import humangenerator as hgen class amass: def __init__(self, parent_path, with_cache, path_out, path_samples, smpl_models, write_verts, config): # temporary usd export path, we cannot directly write in mounted network drives sometimes temp_path = os.path.join(parent_path, 'usd_exports') # surreal path for textures smpl_path = os.path.join(parent_path, "surreal", "datageneration", "smpl_data") from humangenerator.generator import generator self.generator = generator(smpl_path) self.with_cache = with_cache self.path_out = path_out self.path_samples = path_samples self.smpl = smpl_models self.sub_dataset_id = config['sub_dataset_id'] self.num_betas = config['num_betas'] self.num_dmpls = config['num_dmpls'] self.subject_ids = config['subject_ids'].split() self.write_verts = (write_verts == "True") self.temp_path = temp_path self.body_model_m, self.faces_m = _load_parametric_body_model(parent_path, "male", self.num_betas, self.num_dmpls) self.body_model_f, self.faces_f = _load_parametric_body_model(parent_path, "female", self.num_betas, self.num_dmpls) taxonomy_file_path = os.path.join(parent_path, "taxonomy.json") self.supported_datasets = _get_supported_mocap_datasets(taxonomy_file_path, path_samples) def animateSMPL(self, sample, smpl_ob, info, body_model): if self.with_cache: bodyCache(self.path_cache, sample, info, smpl_ob.ob, body_model, self.num_betas, self.num_dmpls) # generate blendshapes + trans s = info['betas'][:10] smpl_ob.reset_joint_positions(s, bpy.data.scenes["Scene"]) for i in range(info['poses'].shape[0]): p = np.append(info['poses'][i][:66].reshape(-1, 3), [[0, 0, 0], [0, 0, 0]], 0) t = info['trans'][i].reshape((3,)) bpy.data.scenes["Scene"].frame_set(i) smpl_ob.apply_trans_pose_shape(t, p, s, i, with_blendshapes=not self.with_cache) def generate_SMPLbody_animation(self, sample, info, gender, index, body_model): print("Generate Animation..") orient = info['poses'][0, :3][2] p = np.append(info['poses'][0][:66].reshape(-1, 3), [[0, 0, 0], [0, 0, 0]], 0) t = info['trans'][0].reshape((3,)) s = info['betas'][:10] v, j = self.smpl[gender].set_params(pose=p, beta=s, trans=t) cloth_img_name = self.generator.pick_skin_texture(gender=gender, clothing_option="all") img = bpy.data.materials[f'Material_{index}'].node_tree.nodes["Image Texture"] img.image = bpy.data.images.load(cloth_img_name) material = bpy.data.materials[f'Material_{index}'] self.smpl_body_list[index].refine_SMPL(material, j, orient) # info['zrot'] self.animateSMPL(sample, self.smpl_body_list[index], info, body_model) # Smooth bpy.ops.object.shade_smooth() def loadAmassSequence(self, sample: str, info: dict, body_model, frame: int = None): bpy.context.scene.frame_end = info['poses'].shape[0] - 1 bpy.ops.object.select_all(action='DESELECT') # delete current garments for obj in bpy.data.objects.values(): if 'body' not in obj.name.lower() and 'armature' not in obj.name.lower(): obj.select_set(True) bpy.ops.object.delete() for obj in bpy.data.objects.values(): obj.select_set(False) gender = 'm' if info['gender'] == 'male' else 'f' index = 0 if info['gender'] == 'male' else 1 self.generate_SMPLbody_animation(sample, info, gender, index, body_model) bpy.context.view_layer.objects.active = bpy.data.objects[f'Armature_{index}'] arm_obj = bpy.data.objects[f'Armature_{index}'] bpy.context.scene.frame_current = bpy.context.scene.frame_start for obj in bpy.data.objects.values(): if 'body' not in obj.name.lower() and 'armature' not in obj.name.lower(): obj.select_set(True) obj.parent = arm_obj obj.rotation_euler = [0, 0, 0] obj.select_set(False) for obj in bpy.data.objects.values(): if 'armature' not in obj.name.lower() and 'body' not in obj.name.lower(): obj.select_set(True) else: if str(index) in obj.name: obj.select_set(True) if frame != None and frame >= 0 and frame <= bpy.context.scene.frame_end: bpy.context.scene.frame_current = frame def process_sample(self, sample: str, frame: int, smpl_body_list): # load info if sample in self.subject_ids: for subject_id in os.listdir(os.path.join(self.path_samples, sample)): sequence_path, main_path = _get_sequence_path(self.supported_datasets, self.sub_dataset_id, sample, subject_id) info = loadInfo(sequence_path) self.smpl_body_list = smpl_body_list subfolder_name = Path(subject_id).stem + ('_with_cache' if self.with_cache else '') self.path_cache = hgen.create_outfolder_structure(self.path_out, subfolder_name, self.with_cache) if frame is None: self.loadAmassSequence(sample, info, self.body_model_m if info["gender"] == "male" else self.body_model_f) else: self.loadAmassSequence(sample, info, self.body_model_m if info["gender"] == "male" else self.body_model_f, frame) bpy.ops.wm.save_as_mainfile(filepath=os.path.join(self.path_out, subfolder_name, subfolder_name + ".blend")) my_l = list(info.keys()) new_info = {} for i in my_l: new_info[i] = info[i] hgen.export_data(self.temp_path, self.path_out, Path(subject_id).stem, self.with_cache, frame, new_info, info['poses'][0, :3][2], self.write_verts) return True
6,545
Python
47.488889
126
0.579221
eliabntt/animated_human_SMPL_to_USD/humangenerator/util/amass_util.py
import numpy as np import glob import os import random from .IO import readPC2, writePC2 import bpy, sys, torch from .blender_util import mesh_cache from typing import Tuple def bodyCache(path_cache, sample, info, ob, body_model, num_betas, num_dmpls): print("Processing Body Cache") pc2_path = os.path.join(path_cache, sample + '.pc2') V = np.zeros((info['poses'].shape[1], 6890, 3), np.float32) bdata = info time_length = len(bdata['trans']) comp_device = torch.device("cuda" if torch.cuda.is_available() else "cpu") body_params = { 'root_orient': torch.Tensor(bdata['poses'][:, :3]).to(comp_device), # controls the global root orientation 'pose_body': torch.Tensor(bdata['poses'][:, 3:66]).to(comp_device), # controls the body 'pose_hand': torch.Tensor(bdata['poses'][:, 66:]).to(comp_device), # controls the finger articulation 'trans': torch.Tensor(bdata['trans']).to(comp_device), # controls the global body position 'betas': torch.Tensor(np.repeat(bdata['betas'][:num_betas][np.newaxis], repeats=time_length, axis=0)).to( comp_device), # controls the body shape. Body shape is static 'dmpls': torch.Tensor(bdata['dmpls'][:, :num_dmpls]).to(comp_device) # controls soft tissue dynamics } body_trans_root = body_model( **{k: v for k, v in body_params.items() if k in ['pose_body', 'betas', 'pose_hand', 'dmpls', 'trans', 'root_orient']}) if not os.path.isfile(pc2_path): V = body_trans_root.v.data.cpu().numpy() print("Writing PC2 file...") writePC2(pc2_path, V) else: V = readPC2(pc2_path)['V'] if V.shape[1] != len(ob.data.vertices): sys.stderr.write("ERROR IN THE VERTEX COUNT FOR THE BODY!!!!!") sys.stderr.flush() mesh_cache(ob, pc2_path) bpy.ops.object.shade_smooth() return body_trans_root def loadInfo(sequence_path): if os.path.exists(sequence_path): # load AMASS dataset sequence file which contains the coefficients for the whole motion sequence sequence_body_data = np.load(sequence_path) # get the number of supported frames return sequence_body_data else: raise Exception( "Invalid sequence/subject category identifiers, please choose a " "valid one. Used path: {}".format(sequence_path)) def _get_sequence_path(supported_mocap_datasets: dict, used_sub_dataset_id: str, used_subject_id: str, used_sequence_id: str) -> [str, str]: """ Extract pose and shape parameters corresponding to the requested pose from the database to be processed by the parametric model :param supported_mocap_datasets: A dict which maps sub dataset names to their paths. :param used_sub_dataset_id: Identifier for the sub dataset, the dataset which the human pose object should be extracted from. :param used_subject_id: Type of motion from which the pose should be extracted, this is dataset dependent parameter. :param used_sequence_id: Sequence id in the dataset, sequences are the motion recorded to represent certain action. :return: tuple of arrays contains the parameters. Type: tuple """ # check if the sub_dataset is supported if used_sub_dataset_id in supported_mocap_datasets: # get path from dictionary sub_dataset_path = supported_mocap_datasets[used_sub_dataset_id] # concatenate path to specific if not used_subject_id: # if none was selected possible_subject_ids = glob.glob(os.path.join(sub_dataset_path, "*")) possible_subject_ids.sort() if len(possible_subject_ids) > 0: used_subject_id_str = os.path.basename(random.choice(possible_subject_ids)) else: raise Exception("No subjects found in folder: {}".format(sub_dataset_path)) else: try: used_subject_id_str = "{:02d}".format(int(used_subject_id)) except: used_subject_id_str = used_subject_id subject_path = os.path.join(sub_dataset_path, used_subject_id_str) sequence_path = os.path.join(subject_path, used_sequence_id) return sequence_path, subject_path else: raise Exception( "The requested mocap dataset is not yest supported, please choose anothe one from the following " "supported datasets: {}".format([key for key, value in supported_mocap_datasets.items()])) def _load_parametric_body_model(data_path: str, used_body_model_gender: str, num_betas: int, num_dmpls: int) -> Tuple["BodyModel", np.array]: """ loads the parametric model that is used to generate the mesh object :return: parametric model. Type: tuple. """ import torch from human_body_prior.body_model.body_model import BodyModel bm_path = os.path.join(data_path, 'body_models', 'smplh', used_body_model_gender, 'model.npz') # body model dmpl_path = os.path.join(data_path, 'body_models', 'dmpls', used_body_model_gender, 'model.npz') # deformation model if not os.path.exists(bm_path) or not os.path.exists(dmpl_path): raise Exception("Parametric Body model doesn't exist, please follow download instructions section in AMASS Example") comp_device = torch.device("cuda" if torch.cuda.is_available() else "cpu") body_model = BodyModel(bm_path=bm_path, num_betas=num_betas, num_dmpls=num_dmpls, path_dmpl=dmpl_path).to(comp_device) faces = body_model.f.detach().cpu().numpy() return body_model, faces def _get_supported_mocap_datasets(taxonomy_file_path: str, data_path: str) -> dict: """ get latest updated list from taxonomoy json file about the supported mocap datasets supported in the loader module and update.supported_mocap_datasets list :param taxonomy_file_path: path to taxomomy.json file which contains the supported datasets and their respective paths. Type: string. :param data_path: path to the AMASS dataset root folder. Type: string. """ import json # dictionary contains mocap dataset name and path to its sub folder within the main dataset, dictionary will # be filled from taxonomy.json file which indicates the supported datastests supported_mocap_datasets = {} if os.path.exists(taxonomy_file_path): with open(taxonomy_file_path, "r") as f: loaded_data = json.load(f) for block in loaded_data: if "sub_data_id" in block: sub_dataset_id = block["sub_data_id"] supported_mocap_datasets[sub_dataset_id] = os.path.join(data_path, block["path"]) else: raise Exception("The taxonomy file could not be found: {}".format(taxonomy_file_path)) return supported_mocap_datasets
6,996
Python
50.448529
163
0.646512
eliabntt/animated_human_SMPL_to_USD/humangenerator/util/cloth3d_util.py
import numpy as np import scipy.io as sio from math import cos, sin from .blender_util import readOBJ, createBPYObj, setMaterial, mesh_cache, convert_meshcache import os, sys from .IO import readPC2, writePC2 import bpy def loadInfo(path: str): ''' this function should be called instead of direct sio.loadmat as it cures the problem of not properly recovering python dictionaries from mat files. It calls the function check keys to cure all entries which are still mat-objects ''' data = sio.loadmat(path, struct_as_record=False, squeeze_me=True) del data['__globals__'] del data['__header__'] del data['__version__'] return _check_keys(data) def _check_keys(dict): ''' checks if entries in dictionary are mat-objects. If yes todict is called to change them to nested dictionaries ''' for key in dict: if isinstance(dict[key], sio.matlab.mio5_params.mat_struct): dict[key] = _todict(dict[key]) return dict def _todict(matobj): ''' A recursive function which constructs from matobjects nested dictionaries ''' dict = {} for strg in matobj._fieldnames: elem = matobj.__dict__[strg] if isinstance(elem, sio.matlab.mio5_params.mat_struct): dict[strg] = _todict(elem) elif isinstance(elem, np.ndarray) and np.any([isinstance(item, sio.matlab.mio5_params.mat_struct) for item in elem]): dict[strg] = [None] * len(elem) for i,item in enumerate(elem): if isinstance(item, sio.matlab.mio5_params.mat_struct): dict[strg][i] = _todict(item) else: dict[strg][i] = item else: dict[strg] = elem return dict # Computes matrix of rotation around z-axis for 'zrot' radians def zRotMatrix(zrot): c, s = cos(zrot), sin(zrot) return np.array([[c, -s, 0], [s, c, 0], [0, 0, 1]], np.float32) """ CAMERA """ def intrinsic(): RES_X = 640 RES_Y = 480 f_mm = 50 # blender default sensor_w_mm = 36 # blender default sensor_h_mm = sensor_w_mm * RES_Y / RES_X fx_px = f_mm * RES_X / sensor_w_mm; fy_px = f_mm * RES_Y / sensor_h_mm; u = RES_X / 2; v = RES_Y / 2; return np.array([[fx_px, 0, u], [0, fy_px, v], [0, 0, 1]], np.float32) def extrinsic(camLoc): R_w2bc = np.array([[0, 1, 0], [0, 0, 1], [1, 0, 0]], np.float32) T_w2bc = -1 * R_w2bc.dot(camLoc) R_bc2cv = np.array([[1, 0, 0], [0, -1, 0], [0, 0, -1]], np.float32) R_w2cv = R_bc2cv.dot(R_w2bc) T_w2cv = R_bc2cv.dot(T_w2bc) return np.concatenate((R_w2cv, T_w2cv[:,None]), axis=1) def proj(camLoc): return intrinsic().dot(extrinsic(camLoc)) """ Mesh to UV map Computes correspondences between 3D mesh and UV map NOTE: 3D mesh vertices can have multiple correspondences with UV vertices """ def mesh2UV(F, Ft): m2uv = {v: set() for f in F for v in f} for f, ft in zip(F, Ft): for v, vt in zip(f, ft): m2uv[v].add(vt) # m2uv = {k:list(v) for k,v in m2uv.items()} return m2uv # Maps UV coordinates to texture space (pixel) IMG_SIZE = 2048 # all image textures have this squared size def uv_to_pixel(vt): px = vt * IMG_SIZE # scale to image plane px %= IMG_SIZE # wrap to [0, IMG_SIZE] # Note that Blender graphic engines invert vertical axis return int(px[0]), int(IMG_SIZE - px[1]) # texel X, texel Y def loadGarment(path_sample, path_cache, sample, garment, info): print("Processing Garment Cache") print(f"Loading {garment}") texture = info['outfit'][garment]['texture'] # Read OBJ file and create BPY object V, F, Vt, Ft = readOBJ(os.path.join(path_sample, sample, garment + '.obj')) ob = createBPYObj(V, F, Vt, Ft, name=sample + '_' + garment) # z-rot ob.rotation_euler[2] = info['zrot'] # Convert cache PC16 to PC2 pc2_path = os.path.join(path_cache, sample + '_' + garment + '.pc2' ) if not os.path.isfile(pc2_path): # Convert PC16 to PC2 (and move to view_cache folder) # Add trans to vertex locations pc16_path = os.path.join(path_sample, sample, garment + '.pc16') V = readPC2(pc16_path, True)['V'] for i in range(V.shape[0]): sys.stdout.write('\r' + str(i + 1) + '/' + str(V.shape[0])) sys.stdout.flush() if V.shape[0] > 1: V[i] += info['trans'][:, i][None] else: V[i] += info['trans'][:][None] writePC2(pc2_path, V) else: V = readPC2(pc2_path)['V'] if V.shape[1] != len(ob.data.vertices): sys.stderr.write("ERROR IN THE VERTEX COUNT!!!!!") sys.stderr.flush() mesh_cache(ob, pc2_path) # necessary to have this in the old version of the code with the old omni-blender # convert_meshcache(bpy.ops.object) # Set material setMaterial(path_sample, ob, sample, garment, texture) # Smooth bpy.ops.object.shade_smooth() print(f"\nLoaded {garment}.\n") def bodyCache(path_cache, sample, info, ob, smpl): print("Processing Body Cache") pc2_path = os.path.join(path_cache, sample + '.pc2') if not os.path.isfile(pc2_path): # Compute body sequence print("Computing body sequence...") print("") gender = 'm' if info['gender'] else 'f' if len(info['poses'].shape)>1: N = info['poses'].shape[1] else: N = 1 V = np.zeros((N, 6890, 3), np.float32) for i in range(N): sys.stdout.write('\r' + str(i + 1) + '/' + str(N)) sys.stdout.flush() s = info['shape'] if N == 1: p = info['poses'][:].reshape((24, 3)) t = info['trans'][:].reshape((3,)) else: p = info['poses'][:, i].reshape((24, 3)) t = info['trans'][:, i].reshape((3,)) v, j = smpl[gender].set_params(pose=p, beta=s, trans=t) V[i] = v - j[0:1] print("") print("Writing PC2 file...") writePC2(pc2_path, V) else: V = readPC2(pc2_path)['V'] if V.shape[1] != len(ob.data.vertices): sys.stderr.write("ERROR IN THE VERTEX COUNT FOR THE BODY!!!!!") sys.stderr.flush() mesh_cache(ob, pc2_path) bpy.ops.object.shade_smooth()
6,626
Python
32.469697
125
0.551313
eliabntt/animated_human_SMPL_to_USD/humangenerator/util/smplutils.py
import bpy from bpy_extras.object_utils import world_to_camera_view from mathutils import Matrix, Quaternion import numpy as np import pickle as pkl import os import math from pyquaternion import Quaternion # computes rotation matrix through Rodrigues formula as in cv2.Rodrigues def Rodrigues(rotvec): theta = np.linalg.norm(rotvec) r = (rotvec / theta).reshape(3, 1) if theta > 0.0 else rotvec cost = np.cos(theta) mat = np.asarray([[0, -r[2], r[1]], [r[2], 0, -r[0]], [-r[1], r[0], 0]]) return cost * np.eye(3) + (1 - cost) * r.dot(r.T) + np.sin(theta) * mat # transformation between pose and blendshapes def rodrigues2bshapes(pose): rod_rots = np.asarray(pose).reshape(24, 3) mat_rots = [Rodrigues(rod_rot) for rod_rot in rod_rots] bshapes = np.concatenate( [(mat_rot - np.eye(3)).ravel() for mat_rot in mat_rots[1:]] ) return mat_rots, bshapes def rotate_vector(vector, axis, angle): """ Rotate a vector around an axis by an angle. """ q = Quaternion(axis=axis, angle=angle) return q.rotate(vector) class SMPL_Body: def __init__(self, smpl_data_folder, material, j, gender="female", person_no=0, zrot=0): # load fbx model bpy.ops.import_scene.fbx( filepath=os.path.join( smpl_data_folder, "basicModel_{}_lbs_10_207_0_v1.0.2.fbx".format(gender[0]), ), axis_forward="Y", axis_up="Z", global_scale=100, ) J_regressors = pkl.load( open(os.path.join(smpl_data_folder, "joint_regressors.pkl"), "rb") ) # 24 x 6890 regressor from vertices to joints self.joint_regressor = J_regressors["J_regressor_{}".format(gender)] self.j = j armature_name = "Armature_{}".format(person_no) bpy.context.active_object.name = armature_name self.gender_name = "{}_avg".format(gender[0]) self.obj_name = "body_{:d}".format(person_no) bpy.data.objects[armature_name].children[0].name = self.obj_name # not the default self.gender_name because each time fbx is loaded it adds some suffix self.ob = bpy.data.objects[self.obj_name] # Rename the armature self.ob.data.use_auto_smooth = False # autosmooth creates artifacts # assign the existing spherical harmonics material self.ob.active_material = bpy.data.materials["Material_{}".format(person_no)] bpy.ops.object.origin_set(type='GEOMETRY_ORIGIN', center='MEDIAN') # clear existing animation data # self.ob.shape_key_clear() self.ob.data.shape_keys.animation_data_clear() self.arm_ob = bpy.data.objects[armature_name] self.arm_ob.animation_data_clear() self.setState0() # self.ob.select = True # blender < 2.8x self.ob.select_set(True) # bpy.context.scene.objects.active = self.ob # blender < 2.8x bpy.context.view_layer.objects.active = self.ob self.smpl_data_folder = smpl_data_folder self.materials = self.create_segmentation(material, smpl_data_folder) # unblocking both the pose and the blendshape limits for k in self.ob.data.shape_keys.key_blocks.keys(): self.ob.data.shape_keys.key_blocks[k].slider_min = -100 self.ob.data.shape_keys.key_blocks[k].slider_max = 100 # bpy.context.scene.objects.active = self.arm_ob # blender < 2.8x bpy.context.view_layer.objects.active = self.arm_ob # order self.part_match = { "root": "root", "bone_00": "Pelvis", "bone_01": "L_Hip", "bone_02": "R_Hip", "bone_03": "Spine1", "bone_04": "L_Knee", "bone_05": "R_Knee", "bone_06": "Spine2", "bone_07": "L_Ankle", "bone_08": "R_Ankle", "bone_09": "Spine3", "bone_10": "L_Foot", "bone_11": "R_Foot", "bone_12": "Neck", "bone_13": "L_Collar", "bone_14": "R_Collar", "bone_15": "Head", "bone_16": "L_Shoulder", "bone_17": "R_Shoulder", "bone_18": "L_Elbow", "bone_19": "R_Elbow", "bone_20": "L_Wrist", "bone_21": "R_Wrist", "bone_22": "L_Hand", "bone_23": "R_Hand", } def refine_SMPL(self, material, j, zrot): self.j = j self.arm_ob.rotation_euler = [0, 0, zrot] self.ob.data.shape_keys.animation_data_clear() self.arm_ob.animation_data_clear() self.ob.select_set(True) bpy.ops.object.origin_set(type='GEOMETRY_ORIGIN', center='MEDIAN') # bpy.context.scene.objects.active = self.ob # blender < 2.8x bpy.context.view_layer.objects.active = self.ob self.materials = self.create_segmentation(material, self.smpl_data_folder) for k in self.ob.data.shape_keys.key_blocks.keys(): self.ob.data.shape_keys.key_blocks[k].slider_min = -10 self.ob.data.shape_keys.key_blocks[k].slider_max = 10 # bpy.context.scene.objects.active = self.arm_ob # blender < 2.8x bpy.context.view_layer.objects.active = self.arm_ob def setState0(self): for ob in bpy.data.objects.values(): # ob.select = False # blender < 2.8x ob.select_set(False) # bpy.context.scene.objects.active = None # blender < 2.8x bpy.context.view_layer.objects.active = None # create one material per part as defined in a pickle with the segmentation # this is useful to render the segmentation in a material pass def create_segmentation(self, material, smpl_path): print("Creating materials segmentation") sorted_parts = [ "hips", "leftUpLeg", "rightUpLeg", "spine", "leftLeg", "rightLeg", "spine1", "leftFoot", "rightFoot", "spine2", "leftToeBase", "rightToeBase", "neck", "leftShoulder", "rightShoulder", "head", "leftArm", "rightArm", "leftForeArm", "rightForeArm", "leftHand", "rightHand", "leftHandIndex1", "rightHandIndex1", ] part2num = {part: (ipart + 1) for ipart, part in enumerate(sorted_parts)} materials = {} vgroups = {} with open(os.path.join(smpl_path,"segm_per_v_overlap.pkl"), "rb") as f: vsegm = pkl.load(f) if len(self.ob.material_slots) <= 1: bpy.ops.object.material_slot_remove() parts = sorted(vsegm.keys()) existing = False cnt = 0 for part in parts: vs = vsegm[part] # vgroups[part] = self.ob.vertex_groups.new(part) # blender < 2.8x if part not in self.ob.vertex_groups: vgroups[part] = self.ob.vertex_groups.new(name=part) vgroups[part].add(vs, 1.0, "ADD") else: existing = True bpy.ops.object.vertex_group_set_active(group=part) materials[part] = material.copy() materials[part].pass_index = part2num[part] if not existing: bpy.ops.object.material_slot_add() self.ob.material_slots[-1].material = materials[part] bpy.ops.object.mode_set(mode="EDIT") bpy.ops.mesh.select_all(action="DESELECT") bpy.ops.object.vertex_group_select() bpy.ops.object.material_slot_assign() bpy.ops.object.mode_set(mode="OBJECT") else: self.ob.material_slots[cnt].material = materials[part] cnt += 1 for scene_material in bpy.data.materials: if not scene_material.users and len(scene_material.name) != len(material.name): bpy.data.materials.remove(scene_material) return materials def quaternion_multiply(self, quaternion1, quaternion0): w0, x0, y0, z0 = quaternion0 w1, x1, y1, z1 = quaternion1 return np.array([-x1 * x0 - y1 * y0 - z1 * z0 + w1 * w0, x1 * w0 + y1 * z0 - z1 * y0 + w1 * x0, -x1 * z0 + y1 * w0 + z1 * x0 + w1 * y0, x1 * y0 - y1 * x0 + z1 * w0 + w1 * z0], dtype=np.float64) def euler_from_quaternion(self, quat): """ Convert a quaternion into euler angles (roll, pitch, yaw) roll is rotation around x in radians (counterclockwise) pitch is rotation around y in radians (counterclockwise) yaw is rotation around z in radians (counterclockwise) """ w,x,y,z = quat t0 = +2.0 * (w * x + y * z) t1 = +1.0 - 2.0 * (x * x + y * y) roll_x = math.atan2(t0, t1) t2 = +2.0 * (w * y - z * x) t2 = +1.0 if t2 > +1.0 else t2 t2 = -1.0 if t2 < -1.0 else t2 pitch_y = math.asin(t2) t3 = +2.0 * (w * z + x * y) t4 = +1.0 - 2.0 * (y * y + z * z) yaw_z = math.atan2(t3, t4) return roll_x*180/3.1415, pitch_y*180/3.1415, yaw_z*180/3.1415 # in radians def apply_trans_pose_shape(self, trans, pose, shape, frame=None, with_blendshapes = True): """ Apply trans pose and shape to character """ # transform pose into rotation matrices (for pose) and pose blendshapes mrots, bsh = rodrigues2bshapes(pose) # set the location of the first bone to the translation parameter mytrans = [0,0,0] mytrans[2] = trans[2] mytrans[1] = trans[1] mytrans[0] = trans[0] self.arm_ob.pose.bones[self.gender_name + "_Pelvis"].location = mytrans if frame is not None: self.arm_ob.pose.bones[self.gender_name + "_root"].keyframe_insert( "location", frame=frame ) self.arm_ob.pose.bones[self.gender_name + "_root"].keyframe_insert( "rotation_quaternion", frame=frame ) # set the pose of each bone to the quaternion specified by pose for ibone, mrot in enumerate(mrots): bone = self.arm_ob.pose.bones[ self.gender_name + "_" + self.part_match["bone_{:02d}".format(ibone)] ] bone.rotation_quaternion = Matrix(mrot).to_quaternion() if frame is not None: bone.keyframe_insert("rotation_quaternion", frame=frame) bone.keyframe_insert("location", frame=frame) # apply pose blendshapes if with_blendshapes: for ibshape, bshape in enumerate(bsh): self.ob.data.shape_keys.key_blocks[ "Pose{:03d}".format(ibshape) ].value = bshape if frame is not None: self.ob.data.shape_keys.key_blocks[ "Pose{:03d}".format(ibshape) ].keyframe_insert("value", index=-1, frame=frame) # apply shape blendshapes for ibshape, shape_elem in enumerate(shape): self.ob.data.shape_keys.key_blocks[ "Shape{:03d}".format(ibshape) ].value = shape_elem if frame is not None: self.ob.data.shape_keys.key_blocks[ "Shape{:03d}".format(ibshape) ].keyframe_insert("value", index=-1, frame=frame) else: mod = self.ob.modifiers.get('Armature') if mod is not None: self.ob.modifiers.remove(mod) def reset_joint_positions(self, shape, scene): orig_trans = np.asarray( self.arm_ob.pose.bones[self.gender_name + "_Pelvis"].location ).copy() # zero the pose and trans to obtain joint positions in zero pose self.apply_trans_pose_shape(orig_trans, np.zeros(72), shape) bpy.ops.wm.memory_statistics() depsgraph = bpy.context.evaluated_depsgraph_get() me = self.ob.evaluated_get(depsgraph).to_mesh() num_vertices = len(me.vertices) # 6890 reg_vs = np.empty((num_vertices, 3)) for iiv in range(num_vertices): reg_vs[iiv] = me.vertices[iiv].co # bpy.data.meshes.remove(me) # blender < 2.8x self.ob.evaluated_get(depsgraph).to_mesh_clear() # regress joint positions in rest pose joint_xyz = self.j # adapt joint positions in rest pose # self.arm_ob.hide = False # Added this line # bpy.context.scene.objects.active = self.arm_ob # blender < 2.8x bpy.context.view_layer.objects.active = self.arm_ob bpy.ops.object.mode_set(mode="EDIT") # self.arm_ob.hide = True for ibone in range(24): bb = self.arm_ob.data.edit_bones[ self.gender_name + "_" + self.part_match["bone_{:02d}".format(ibone)] ] bboffset = bb.tail - bb.head bb.head = joint_xyz[ibone] bb.tail = bb.head + bboffset bpy.ops.object.mode_set(mode="OBJECT")
13,308
Python
37.915205
94
0.550646
eliabntt/animated_human_SMPL_to_USD/humangenerator/util/blender_util.py
import os import bpy from humangenerator.util.IO import readOBJ, readPC2, writePC2 import numpy as np import bmesh import sys import pickle as pkl import shutil import random PI = 3.14159 """ Scene """ def init(): clean() # scene return scene() def clean(): for collection in dir(bpy.data): data_structure = getattr(bpy.data, collection) # Check that it is a data collection if isinstance(data_structure, bpy.types.bpy_prop_collection) and hasattr(data_structure, "remove") and collection not in [ "texts"]: # Go over all entities in that collection for block in data_structure: # Remove everything besides the default scene if not isinstance(block, bpy.types.Scene) or block.name != "Scene": data_structure.remove(block) def clean_mesh_and_textures(exclude=[]): # ensure everything is lowered exclude = [i.lower() for i in exclude] for block in bpy.data.objects: if block.users == 0 or block.name.lower() not in exclude: bpy.data.objects.remove(block) for block in bpy.data.meshes: if block.users == 0: bpy.data.meshes.remove(block) for block in bpy.data.materials: if block.users == 0 and block.name.lower() not in exclude: bpy.data.materials.remove(block) for block in bpy.data.textures: if block.users == 0: bpy.data.textures.remove(block) for block in bpy.data.images: bpy.data.images.remove(block) for block in bpy.data.shape_keys: if block.users == 0: bpy.data.textures.remove(block) for block in bpy.data.actions: if block.users == 0: bpy.data.actions.remove(block) def scene(): scene = bpy.data.scenes["Scene"] scene.render.engine = "CYCLES" # bpy.data.materials['Material'].use_nodes = True scene.cycles.shading_system = True scene.use_nodes = True scene.render.film_transparent = True scene.frame_current = 0 scene.render.fps = 30 scene.render.resolution_x = 640 scene.render.resolution_y = 480 return scene """ BPY obj manipulation """ def select(ob, only=True): if type(ob) is str: ob = bpy.data.objects[ob] if only: deselect() ob.select_set(True) bpy.context.view_layer.objects.active = ob return ob def deselect(): for obj in bpy.data.objects.values(): obj.select_set(False) bpy.context.view_layer.objects.active = None def delete(ob): select(ob) bpy.ops.object.delete() def createBPYObj(V, F, Vt=None, Ft=None, name='new_obj'): # Create obj mesh = bpy.data.meshes.new('mesh') ob = bpy.data.objects.new(name, mesh) # Add to collection bpy.context.collection.objects.link(ob) select(ob) mesh = bpy.context.object.data bm = bmesh.new() # Vertices for v in V: bm.verts.new(v) bm.verts.ensure_lookup_table() # Faces for f in F: v = [bm.verts[i] for i in f] bm.faces.new(v) bm.to_mesh(mesh) bm.free() # UV Map if not Vt is None: # Create UV layer ob.data.uv_layers.new() # Assign UV coords iloop = 0 for f in Ft: for i in f: ob.data.uv_layers['UVMap'].data[iloop].uv = Vt[i] iloop += 1 return ob def convert_meshcache(ob: bpy.ops.object, offset=0): # Converts a MeshCache or Cloth modifiers to ShapeKeys bpy.context.scene.frame_current = bpy.context.scene.frame_start for frame in range(bpy.context.scene.frame_end + 1): bpy.context.scene.frame_current = frame # for alembic files converted to PC2 and loaded as MeshCache bpy.ops.object.modifier_apply_as_shapekey(keep_modifier=True, modifier="MeshCache") # loop through shapekeys and add as keyframe per frame # https://blender.stackexchange.com/q/149045/87258 bpy.context.scene.frame_current = bpy.context.scene.frame_start for frame in range(bpy.context.scene.frame_end + 1): bpy.context.scene.frame_current = frame shapekey = bpy.data.shape_keys[-1] for i, keyblock in enumerate(shapekey.key_blocks): if keyblock.name != "Basis": curr = i - 1 if curr != frame: keyblock.value = 0 keyblock.keyframe_insert("value", frame=frame) else: keyblock.value = 1 keyblock.keyframe_insert("value", frame=frame) bpy.ops.object.modifier_remove(modifier="MeshCache") def setMaterial(path_sample, ob, sample, garment, texture): mat = bpy.data.materials.new(name=sample + '_' + garment + '_Material') mat.use_nodes = True ob.data.materials.append(mat) if texture['type'] == 'color': mat.node_tree.nodes['Principled BSDF'].inputs[0].default_value = texture['data'].tolist() + [1] elif texture['type'] == 'pattern': # Read pattern img_path = os.path.join(path_sample, sample, garment + '.png') # Add nodes tree = mat.node_tree nodes = tree.nodes # Principled BSDf bsdf = nodes['Principled BSDF'] # Image img = nodes.new('ShaderNodeTexImage') try: img.image = bpy.data.images.load(img_path) # Links tree.links.new(img.outputs[0], bsdf.inputs[0]) except: mat.node_tree.nodes['Principled BSDF'].inputs[0].default_value = [random.random(), random.random(), random.random(), 1] """ Modifiers """ def mesh_cache(ob, cache, scale=1): ob = select(ob) bpy.ops.object.modifier_add(type='MESH_CACHE') ob.modifiers['MeshCache'].cache_format = 'PC2' ob.modifiers['MeshCache'].filepath = cache ob.modifiers['MeshCache'].frame_scale = scale def write_usd(temppath, filepath, filename, with_cache, export_animation=True, sf=0, ef=-1, frame_step=1): outpath = os.path.join(filepath, filename) filepath = os.path.join(filepath, filename, filename + ".usd") if ef == -1: ef = bpy.context.scene.frame_end print(f"\nExporting usd to {filepath}\n") print(f"With blendshapes = {not with_cache}") bpy.ops.wm.usd_export(filepath=os.path.join(temppath, filename + ".usd"), filemode=8, display_type='DEFAULT', sort_method='DEFAULT', selected_objects_only=True, visible_objects_only=True, export_animation=export_animation, export_hair=True, export_vertices=True, export_vertex_colors=True, export_vertex_groups=True, export_face_maps=True, export_uvmaps=True, export_normals=True, export_transforms=True, export_materials=True, export_meshes=True, export_lights=True, export_cameras=False, export_blendshapes=(not with_cache), export_curves=True, export_particles=True, export_armatures=True, use_instancing=False, evaluation_mode='VIEWPORT', default_prim_path=f"/body_{filename}", root_prim_path=f"/body_{filename}", material_prim_path=f"/body_{filename}/materials", generate_cycles_shaders=False, generate_preview_surface=True, generate_mdl=True, convert_uv_to_st=True, convert_orientation=True, convert_to_cm=True, export_global_forward_selection='Y', export_global_up_selection='Z', export_child_particles=False, export_as_overs=False, merge_transform_and_shape=False, export_custom_properties=True, add_properties_namespace=False, export_identity_transforms=False, apply_subdiv=True, author_blender_name=True, vertex_data_as_face_varying=False, frame_step=frame_step, start=sf, end=ef, override_shutter=False, init_scene_frame_range=True, export_textures=True, relative_paths=True, light_intensity_scale=1, convert_light_to_nits=True, scale_light_radius=True, convert_world_material=True, fix_skel_root=True, xform_op_mode='SRT') shutil.move(os.path.join(temppath, filename + ".usd"), filepath) shutil.move(os.path.join(temppath, "textures"), os.path.join(outpath, "textures")) def export_stl_data(filepath, filename, lobs, zrot): context = bpy.context dg = context.evaluated_depsgraph_get() scene = context.scene coll = context.collection step = 5 for ob in lobs: if ob.type != 'MESH': print(ob.name) print(ob.type) ob.select_set(False) continue bpy.context.view_layer.objects.active = ob rings = [] me = ob.data nverts = len(me.vertices) nedges = len(me.edges) bm = bmesh.new() f = scene.frame_start while f <= scene.frame_end: scene.frame_set(f) bm.from_object(ob, dg, cage=True) bmesh.ops.remove_doubles(bm, verts=bm.verts, dist=0.02) # bmesh.ops.transform(bm, verts=bm.verts[:], matrix=ob.matrix_world) f += step rings.append(bm.edges[:]) print("Frames processeds, going to do rings") # build from rings next = rings.pop() while rings: ring = rings.pop() bmesh.ops.bridge_loops(bm, edges=ring + next) next = ring rme = bpy.data.meshes.new("Rib") bm.to_mesh(rme) copy = bpy.data.objects.new("Rib", rme) coll.objects.link(copy) print("DONE" + ob.name) for ob in bpy.data.objects: if 'Rib' in ob.name: ob.select_set(True) bpy.context.view_layer.objects.active = ob else: ob.select_set(False) bpy.ops.object.join() ob = bpy.context.view_layer.objects.active ob.select_set(True) ob.rotation_euler = [0, 0, zrot] bpy.ops.export_mesh.stl(filepath=os.path.join(filepath, filename, filename + ".stl"), check_existing=True, use_selection=True, global_scale=1, ascii=False, use_mesh_modifiers=False, batch_mode='OFF', axis_forward='Y', axis_up='Z') bpy.ops.object.delete() def write_pkl_data(filepath, filename, arm_ob, ob, info, frame_step=1, write_verts=False): bpy.context.scene.frame_current = bpy.context.scene.frame_start N = int((bpy.context.scene.frame_end - bpy.context.scene.frame_start + 1) / frame_step) n_bones = len(arm_ob.pose.bones) - 1 n_verts = len(ob.data.vertices) if write_verts: d = { 'frame': [], 'bones': np.zeros((N, n_bones, 3), np.float32), 'info': info, 'verts': np.zeros((N, n_verts, 3), np.float32), 'sf': bpy.context.scene.frame_start, 'ef': bpy.context.scene.frame_end + 1, 'nframes': frame_step } else: d = { 'frame': [], 'bones': np.zeros((N, n_bones, 3), np.float32), 'info': info, 'sf': bpy.context.scene.frame_start, 'ef': bpy.context.scene.frame_end + 1, 'nframes': frame_step } select(ob) dg = bpy.context.evaluated_depsgraph_get() cnt = 0 for f in range(bpy.context.scene.frame_start, bpy.context.scene.frame_end + 1): sys.stdout.write('\r' + str(f) + '/' + str(N * frame_step)) sys.stdout.flush() bpy.context.scene.frame_current = f bpy.context.view_layer.update() d['frame'].append(f) select(ob) tmp = ob.evaluated_get(dg) me = tmp.to_mesh() if write_verts: d['verts'][cnt] = np.reshape([ob.matrix_world @ v.co for v in me.vertices], (n_verts, 3)) select(arm_ob) d['bones'][cnt] = np.reshape([arm_ob.matrix_world @ bone.head for bone in arm_ob.pose.bones[1:]], (n_bones, 3)) cnt += 1 if not os.path.exists(os.path.join(filepath, filename)): os.makedirs(os.path.join(filepath, filename)) filepath = os.path.join(filepath, filename, filename + ".pkl") out = open(filepath, 'wb') pkl.dump(d, out) out.close()
12,339
Python
35.081871
120
0.593322
eliabntt/animated_human_SMPL_to_USD/humangenerator/util/IO.py
import os import numpy as np from struct import pack, unpack """ Reads OBJ files Only handles vertices, faces and UV maps Input: - file: path to .obj file Outputs: - V: 3D vertices - F: 3D faces - Vt: UV vertices - Ft: UV faces Correspondence between mesh and UV map is implicit in F to Ft correspondences If no UV map data in .obj file, it shall return Vt=None and Ft=None """ def readOBJ(file): V, Vt, F, Ft = [], [], [], [] with open(file, 'r') as f: T = f.readlines() for t in T: # 3D vertex if t.startswith('v '): v = [float(n) for n in t.replace('v ','').split(' ')] V += [v] # UV vertex elif t.startswith('vt '): v = [float(n) for n in t.replace('vt ','').split(' ')] Vt += [v] # Face elif t.startswith('f '): idx = [n.split('/') for n in t.replace('f ','').split(' ')] f = [int(n[0]) - 1 for n in idx] F += [f] # UV face if '/' in t: f = [int(n[1]) - 1 for n in idx] Ft += [f] V = np.array(V, np.float32) Vt = np.array(Vt, np.float32) if Ft: assert len(F) == len(Ft), 'Inconsistent .obj file, mesh and UV map do not have the same number of faces' else: Vt, Ft = None, None return V, F, Vt, Ft """ Writes OBJ files Only handles vertices, faces and UV maps Inputs: - file: path to .obj file (overwrites if exists) - V: 3D vertices - F: 3D faces - Vt: UV vertices - Ft: UV faces Correspondence between mesh and UV map is implicit in F to Ft correspondences If no UV map data as input, it will write only 3D data in .obj file """ def writeOBJ(file, V, F, Vt=None, Ft=None): if not Vt is None: assert len(F) == len(Ft), 'Inconsistent data, mesh and UV map do not have the same number of faces' with open(file, 'w') as file: # Vertices for v in V: line = 'v ' + ' '.join([str(_) for _ in v]) + '\n' file.write(line) # UV verts if not Vt is None: for v in Vt: line = 'vt ' + ' '.join([str(_) for _ in v]) + '\n' file.write(line) # 3D Faces / UV faces if Ft: F = [[str(i+1)+'/'+str(j+1) for i,j in zip(f,ft)] for f,ft in zip(F,Ft)] else: F = [[str(i + 1) for i in f] for f in F] for f in F: line = 'f ' + ' '.join(f) + '\n' file.write(line) """ Reads PC2 files, and proposed format PC16 files Inputs: - file: path to .pc2/.pc16 file - float16: False for PC2 files, True for PC16 Output: - data: dictionary with .pc2/.pc16 file data NOTE: 16-bit floats lose precision with high values (positive or negative), we do not recommend using this format for data outside range [-2, 2] """ def readPC2(file, float16=False): # assert file.endswith('.pc2') and not float16 or file.endswith('.pc16') and float16, 'File format not consistent with specified input format' data = {} bytes = 2 if float16 else 4 dtype = np.float16 if float16 else np.float32 with open(file, 'rb') as f: # Header data['sign'] = f.read(12) # data['version'] = int.from_bytes(f.read(4), 'little') data['version'] = unpack('<i', f.read(4))[0] # Num points # data['nPoints'] = int.from_bytes(f.read(4), 'little') data['nPoints'] = unpack('<i', f.read(4))[0] # Start frame data['startFrame'] = unpack('f', f.read(4)) # Sample rate data['sampleRate'] = unpack('f', f.read(4)) # Number of samples # data['nSamples'] = int.from_bytes(f.read(4), 'little') data['nSamples'] = unpack('<i', f.read(4))[0] # Animation data size = data['nPoints']*data['nSamples']*3*bytes data['V'] = np.frombuffer(f.read(size), dtype=dtype).astype(np.float32) data['V'] = data['V'].reshape(data['nSamples'], data['nPoints'], 3) return data """ Reads an specific frame of PC2/PC16 files Inputs: - file: path to .pc2/.pc16 file - frame: number of the frame to read - float16: False for PC2 files, True for PC16 Output: - T: mesh vertex data at specified frame """ def readPC2Frame(file, frame, float16=False): assert file.endswith('.pc2') and not float16 or file.endswith('.pc16') and float16, 'File format not consistent with specified input format' assert frame >= 0 and isinstance(frame,int), 'Frame must be a positive integer' bytes = 2 if float16 else 4 dtype = np.float16 if float16 else np.float32 with open(file,'rb') as f: # Num points f.seek(16) # nPoints = int.from_bytes(f.read(4), 'little') nPoints = unpack('<i', f.read(4))[0] # Number of samples f.seek(28) # nSamples = int.from_bytes(f.read(4), 'little') nSamples = unpack('<i', f.read(4))[0] if frame > nSamples: print("Frame index outside size") print("\tN. frame: " + str(frame)) print("\tN. samples: " + str(nSamples)) return # Read frame size = nPoints * 3 * bytes f.seek(size * frame, 1) # offset from current '1' T = np.frombuffer(f.read(size), dtype=dtype).astype(np.float32) return T.reshape(nPoints, 3) """ Writes PC2 and PC16 files Inputs: - file: path to file (overwrites if exists) - V: 3D animation data as a three dimensional array (N. Frames x N. Vertices x 3) - float16: False for writing as PC2 file, True for PC16 This function assumes 'startFrame' to be 0 and 'sampleRate' to be 1 NOTE: 16-bit floats lose precision with high values (positive or negative), we do not recommend using this format for data outside range [-2, 2] """ def writePC2(file, V, float16=False): assert file.endswith('.pc2') and not float16 or file.endswith('.pc16') and float16, 'File format not consistent with specified input format' if float16: V = V.astype(np.float16) else: V = V.astype(np.float32) with open(file, 'wb') as f: # Create the header headerFormat='<12siiffi' headerStr = pack(headerFormat, b'POINTCACHE2\0', 1, V.shape[1], 0, 1, V.shape[0]) f.write(headerStr) # Write vertices f.write(V.tobytes()) """ Reads proposed compressed file format for mesh topology. Inputs: - fname: name of the file to read Outputs: - F: faces of the mesh, as triangles """ def readFaceBIN(fname): if '.' in os.path.basename(fname) and not fname.endswith('.bin'): print("File name extension should be '.bin'") return elif not '.' in os.path.basename(fname): fname += '.bin' with open(fname, 'rb') as f: F = np.frombuffer(f.read(), dtype=np.uint16).astype(np.int32) return F.reshape((-1,3)) """ Compress mesh topology into uint16 (Note that this imposes a maximum of 65,536 vertices). Writes this data into the specified file. Inputs: - fname: name of the file to be created (provide NO extension) - F: faces. MUST be an Nx3 array """ def writeFaceBIN(fname, F): assert type(F) is np.ndarray, "Make sure faces is an Nx3 NumPy array" assert len(F.shape) == 2 and F.shape[1] == 3, "Faces have the wron shape (should be Nx3)" if '.' in os.path.basename(fname) and not fname.endswith('.bin'): print("File name extension should be '.bin'") return elif not '.' in os.path.basename(fname): fname += '.bin' F = F.astype(np.uint16) with open(fname, 'wb') as f: f.write(F.tobytes())
6,824
Python
31.971014
143
0.652989
eliabntt/GRADE-RR/TipsAndTricks.md
## Tips and tricks ### Main concepts The simulation has various components. 1. `kit` or `SimulationApp` used to access the engine itself. Essentially, the base engine over which everything works [link](https://docs.omniverse.nvidia.com/py/isaacsim/source/extensions/omni.isaac.kit/docs/index.html). You can't do much with this. It's always the first call, prior to loading isaac/omni components (even python import calls). 2. The simulation context. This class provide functions that take care of many time-related events such as perform a physics or a render step for instance. It also includes an instance of PhysicsContext which takes care of many physics related settings such as setting physics dt, solver type..etc [link](https://docs.omniverse.nvidia.com/py/isaacsim/source/extensions/omni.isaac.core/docs/index.html?highlight=context#module-omni.isaac.core.simulation_context). 3. The `stage` object. Accessed with `omni.usd.get_context().get_stage()`. Used to access all the simulations objects and their properties, e.g. through `prim = stage.GetPrimAtPath('/whatever')`. 4. `omni` is generally available, independently in which piece of code you are. Thus using 3. or `omni.kit.app.get_app()` you should be able to access everything you need without passing objects around. You'll work mainly with 2 and 3. All functions have autocomplete capabilities, just place a breakpoint and walk your way through them. If you hover over some properties in the UI sometimes a helper dialog will give you the name of the property. Otherwise, the `prim.GetPropertyNames()` will give you the available properties. In general, the prims and their properties are accessed through a prim tree. Sometimes, some properties, are accessible only if the prim is accessed as a specific kind (e.g. a mesh [link](https://github.com/eliabntt/GRADE-RR/blob/7d9cb9a3d75d57628adacb9b9f969909d7663f3d/simulator/smpl_and_bbox.py#L206)) or under specific additional keywords (e.g. the physics collision [link](https://github.com/eliabntt/GRADE-RR/blob/7d9cb9a3d75d57628adacb9b9f969909d7663f3d/simulator/utils/misc_utils.py#L275)). Unfortunately, there is no easy way to figure this out due to the symbiosys between Isaac and USD. NOTE that some functions are highly customized (e.g. `set_drone_joints_init_loc`)! This is thought to help you out set up your _custom_ simulation, and not an off the shelf solution! ### Clear properties For each object that you load you should call `clear_properties(path)` [link](https://github.com/eliabntt/GRADE-RR/blob/7d9cb9a3d75d57628adacb9b9f969909d7663f3d/simulator/utils/misc_utils.py#L414). This will ensure that the translate, rotate and scale operations are attached to the objects as expected. ### Viewports and cameras Each rendering component will have a viewport associated to that. In our testing we found out that switching cameras related to a viewport (to reduce memory consumption) may lead to memory leakage and other problems. What we suggest is to have one viewport for every thing that you want to render, be that a full-res camera or a ROS-camera. If you want both high-res and low-res images, we suggest to have two viewports. It will slow down things (as rendering will be slower), but it will be easier to manage. ### Try out before deployment You can easily try out code in the script tool of the main simulation. In short you want to open the stage, open the script toolbox, and try out your snippets there. Remember that it is not possible to render/update the simulation in this case. If you need the physics, stop and play the simulation using the corresponding buttons. You will probably need the `stage` object (see point 3 above), and your own code. Remember that you need to import any additional module. Some commands can also be seen copied and used using the [Command Tool](https://docs.omniverse.nvidia.com/isaacsim/latest/ext_omni_kit_commands.html) util. <details closed> ![Alt text](image.png) </details closed> ### Time For robotics applications the time is goverened by the physics time. On the other hand, the default step of the simulation in Isaac is governed by the rendering. The easy way to solve this is to manually publish the clock as shown in the tutorials, and keep the timeline tool under "control". The timeline tool is what controls the animations. You can access that using ``` timeline = setup_timeline(config) # config containing some additional options # or timeline = omni.timeline.get_timeline_interface() ``` And perform various operations such as ``` timeline.set_current_time(0) timeline.get_current_time() timeline.forward/backward_one_frame() timeline.play() timeline.stop() ``` `timeline.set_auto_update(False)` is used to stop the timeline advancing every rendering call. The timeline must be `playing` for the physics, clock etc, to work correctly. Thus, in theory, it would be possible to set the update to false, forward the timeline step before rendering whenever necessary, and the problem would be solved. *However, this is apparently not working in the current version of Isaac Sim. Thus, in the `sleeping()` function [link](https://github.com/eliabntt/GRADE-RR/blob/7d9cb9a3d75d57628adacb9b9f969909d7663f3d/simulator/utils/simulation_utils.py#L149) we constantly reset the time to the current time so that the rendering is correct. Also, continuously call `play` and `stop` might cause problems.* ### Simulation rendering, manual interaction, and UI The simulation app UI will be refreshed ONLY when you do a rendering call. For stepping the physics and rendering you have different options: 1. `kit.update()` will step both the physics and the rendering 2. `simulation_context.render()` will do only a SINGLE rendering step 3. `simulation_context.step()` will do both a rendering and physics step. Not always working 4. `simulation_context.step(render=False)` will do a physics step 5. `omni.kit.app.get_app().update()` as `kit.update()`, but accessible if you do not have access to the kit object itself. My suggestion is to always work with a combination of `simulation_context.render()/step(render=False)` and to stick to that. If needed, you will be able to interact with the application only when fast enough rendering calls are made. Sometimes, it is necessary to also step the physics to see the effects of your actions. A quick way to do this is to: 1. enter debug mode in python 2. run a loop such as ``` for _ in range(1000): simulation_context.step(render=False) simulation_context.render() ``` #### *The rendering calls are NOT blocking. This means that every time you render it will do that for either 1) a fixed amount of time in case of RTX rendering, or 2) a single step for path tracing rendering. This has been solved by us through the `sleeping` function in the `simulation_utils.py`.* #### *The visual information on the application is updated after the SECOND render call.* ### Save the GT information The process is to either save stuff from the main simulation loop, or to use the synthetic recorder extension. In the latter case you can use directly what we provide in `isaac_internals/exts/omni.isaac.synthetic_recorder/omni/isaac/synthetic_recorder/extension_custom.py` and expand it alongside with `isaac_internals/exts/omni.isaac.synthetic_utils/omni/isaac/synthetic_utils/writers/numpy.py` and `isaac_internals/exts/omni.isaac.synthetic_utils/omni/isaac/synthetic_utils/syntheticdata.py` code. Then you can create a recorder directly in your code using: ``` from omni.isaac.synthetic_recorder import extension_custom my_recorder = extension_custom.MyRecorder() my_recorder.on_startup() # necessary call _settings = my_recorder.get_default_settings() _settings["rgb"]["enabled"] = True # inspect and extend this dictionary my_recorder.set_single_settings(_settings) my_recorder._dir_name = os.path.join(out_path) my_recorder._enable_record = True # set to false to disable my_recorder.skip_cameras = 0 # number of viewports to skip # do stuff my_recorder._update() # write data if enabled ``` This will create the desired data for EACH viewport. A shorter version is by using ``` recorder = recorder_setup(recorder_setup(_recorder_settings, out_path, enabled, skip_cameras) recorder._update() ``` Skip cameras is used to let the system know how many viewports it need to skip when saving the data itself. Multiple recorders can be set in place. They will all cycle through all the viewports, unless you change the code yourself. All data can be also accessed in the main simulation loop. Some examples are the vertices, or the lidar information (see the replay experiment script). Potentially, you could also get as output of the recorder `_update()` call all the information, edit, and publish them as ROS messages. ### Save the motion vector This is not possible during rendering itself. To save it you need to manually render (for the second time), wait for the data to be produced, and then save the motion vector itself. See the repeating experiment tool for an example on how to do that [here](https://github.com/eliabntt/GRADE-RR/blob/7d9cb9a3d75d57628adacb9b9f969909d7663f3d/simulator/replay_experiment.py#L391-L392). Note that the motion vector can be only visualized by the default Isaac installation and not saved (see [here](https://docs.omniverse.nvidia.com/py/isaacsim/source/extensions/omni.isaac.sensor/docs/index.html#module-omni.isaac.sensor.scripts.camera)). Thus, we cannot ensure correctness. ### Traverse the stage To traverse all the prims in the stage you can simply run `for prim in stage.Traverse(): ...` ### Enable/Disable collisions Add colliders [here](https://github.com/eliabntt/GRADE-RR/blob/7d9cb9a3d75d57628adacb9b9f969909d7663f3d/simulator/utils/misc_utils.py#L125) See [here](https://github.com/eliabntt/GRADE-RR/blob/7d9cb9a3d75d57628adacb9b9f969909d7663f3d/simulator/utils/misc_utils.py#L106) ### Hide objects from the viewport Change visibility of a list of objects [here](https://github.com/eliabntt/GRADE-RR/blob/7d9cb9a3d75d57628adacb9b9f969909d7663f3d/simulator/utils/misc_utils.py#L513) ### Postprocess the data Please check our dedicated repository [here](https://github.com/robot-perception-group/GRADE_tools). ### Colorize the saved data Simply run `python scripts/colorize.py --viewport_folder main_folder_with_npy_files`. Check our code [here](https://github.com/eliabntt/GRADE-RR/blob/main/scripts/colorize.py), you can save images, images and videos, and decide which kind of data you want. ### Get skeletal, vertices, and SMPL information while correcting bounding boxes Look [here](https://github.com/eliabntt/GRADE-RR/blob/main/simulator/smpl_and_bbox.py). This is mainly tuned for our data. However, it can be easily expanded to your own dataset. In short, for skeleton you need to open the prim as `AnimationSchema.SkelJoint(prim).GetJoint()` [here](https://github.com/eliabntt/GRADE-RR/blob/7d9cb9a3d75d57628adacb9b9f969909d7663f3d/simulator/smpl_and_bbox.py#L192), for the vertices use `points = UsdGeom.PointBased(prim)`[here](https://github.com/eliabntt/GRADE-RR/blob/7d9cb9a3d75d57628adacb9b9f969909d7663f3d/simulator/smpl_and_bbox.py#L206). Using the latters, you can get the bounding boxes. ### Edit directly USD files Check the tutorial [here](https://github.com/eliabntt/GRADE-RR/blob/37ee985abccc6239bec7f22241c49da0acc5402c/EDIT_USDS.md). This will help you convert USD to txt files for easy file processing. ### How to move/control the camera/robot You have several possibilities with and without ROS, with and without physics. Check them out [here](https://github.com/eliabntt/GRADE-RR/blob/37ee985abccc6239bec7f22241c49da0acc5402c/MOVEMENT.md) ### Possible missing textures/wrong paths When loading humans or environments (or anything else) it may be necessar for you to edit the paths of the shaders, especially when moving between Windows and Linux. To do that you can use the [`change_shader_path`](https://github.com/eliabntt/GRADE-RR/blob/main/simulator/utils/misc_utils.py#L62) or the [correct paths](https://github.com/eliabntt/GRADE-RR/tree/main/scripts/process_paths) scripts. Otherwise, you can simply process the text files as explained [here](https://github.com/eliabntt/GRADE-RR/blob/main/EDIT_USDS.md). ### Segmentation <-> instance Instance segmentation files will save also the mappings between classes. An example on how to do the mapping and process those file is [here](https://github.com/robot-perception-group/GRADE-eval/blob/main/mapping_and_visualization/convert_classes.py). ### Shapenet and GSO For the objects please download at least some assets from ShapeNetv2 or GSO websites. Paths should be `../gso/folders_of_the_objects` and `../shapenet/synsetIds/...`. For ShapeNet please also add `../shapenet/v1_csv/all_of_the_synset_csvs`. Our code will convert locally in `../gso/exported_usd` and `../shapenet/local-converted-USD`. Clearly, you can pre-process everything and use only the USDs afterwards (to save space). All the code is on `simulator/utils/object_utils.py`. ### Project pixels to world Look at `scripts/pixel_to_world.py` ### Bag processing Average stats `scripts/average_rosbag.py` Filter and compress `scripts/filter_compress.sh` ### Automatically generate data See `scripts/bash_process.zsh` using `screen`
13,343
Markdown
71.129729
727
0.781908
eliabntt/GRADE-RR/HOWTO.md
## Requirements and basic software installation Please check the [requirements](https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/requirements.html) on the official page. Then download the omniverse launcher and install Nucleus, Cache, and Isaac Sim. From now on, we will assume that you installed Isaac Sim within a `ISAAC_FOLDER`. Default location is `~/.local/share/ov/pkg/isaac-version/`. Clone this repository. You can clone this wherever you prefer. For simplicity, we usually download it within the `isaac` folder. However, by using global paths you should be able to run this code anywhere in your PC. _Note_ Isaac will have its own python installation, if you need packages and you run software within the Isaac python executable remember that. To do so, you usually do something like ``` cd $ISAAC_FOLDER ./python.sh -m pip install ... # or ./python.sh python_file.py ``` We have some dependencies which are not installed by default. To install them run `sh req.sh $ISAAC_FOLDER`. (This will simply use the main Isaac `python.sh` to install everything via `pip`). Independently on where you cloned the repository you need to run `sh cp_local_to_different_folder.sh $CLONE_FOLDER $ISAAC_FOLDER` This will copy the edited files from $1 (source) to the $2 (destination). You can use it in reverse (from Isaac to repo), or with any couple of folders. ## Misc A general note: every script has been more or less commented and almost each piece of code should be self-explanatory. If you don't find it like that please **open an issue**. I worked on this mainly alone so the code is far from perfect, super-modular, or anything like that. But together we can make it better. Thus, we welcome any contribution that you might have. Include coding style, comments, additions, or better strategies that you want to propose (of course after you have published your paper). ## How to start the simulation To launch Isaac you can run `./isaac-sim.sh` from the main installation folder to launch the simulator. It is suggested to do this once before starting any other coding activity. To control the simulation with your own code the general process is `./python.sh python_script args`. In `args` you can specify either python arguments arguments for the Isaac simulator itself (e.g. ` --/renderer/enabled='iray'`). The functions that we use in our scripts are all contained in the `simulator/utils` folder. An explanation of each one of the function is given in their comment, while a brief overview is given [here](https://github.com/eliabntt/GRADE-RR/blob/main/simulator/utils/UTILS.md). ## Main concept Our programs all follow the same structure. - load the basic kit and start the initial simulation - load a basic environment with some settings pre-applied (some config changes cannot be made with the code itself) - load libraries and settings - load your main environment - edit the environment - load the robots - attach sensors to the robot - correct the camera fov (bug in Isaac that changes it) - [optional] load and place humans, objects and animate objects - setup information recorder - loop the simulation and publish/write the information when necessary Every aspect can be personalized or adapted. The basic environment could be your final one, the humans/animations can be present or placed in a different way, robot can have your set of sensors or your own publishing rate. Our code is thought in such a way that each robot is loaded pre-fixed with the `my_robot_` name, and this applies to each topic that is published from that robot. The exception lies in the `tf` topic, for which we will have a publisher for each robot. Data can be published in ROS and saved as npy files. If you want both, with the former using a lowres camera and the latter an high res camera you should first load all the robots, and then call `add_npy_cameras` adjusting the skipped camera of your `recorder`. See the [tips](https://github.com/eliabntt/GRADE-RR/blob/main/TipsAndTricks.md) readme for more insights. ## Your first code [Here](https://github.com/eliabntt/GRADE-RR/blob/main/simulator/first_run.py) is a first example showing how to launch the simulation, load a basic environment, and perform some basic actions. The workflow will always be the same. Import general modules, create the `SimulationApp`, import the IsaacSim related stuff, and proceed. Please, look at the comments in the code directly. Brief explanations are also given below. ## Going past your first code Before adventuring here, please be sure to download our sample [world]() and [animated assets](). Those scripts will be incremental (i.e. based on the previous one). Please open all the downloaded USDs once at least to be sure that textures and everything else is correctly loaded. We marked _Optional_ what can be skipped in future iterations of _your_ code, but still, please go through them. They will go step by step from the general environment to the animated house. **Beore launching any simulation that need ros you need to start `roscore` if using ROS preferably with sim time set to true (`rosparam set use_sim_time true`)** In these codes, we consider our provided sampled world, the animated assets, and the drone provided with this repository. For the objects, you will find a note in the corresponding tutorial details. Additional samples (our used code, adapted from v2021), will be added in the next section. ##### Still WIP, need to add links and make sure that the code works. But most of it should work rn. - Using a config file, adding your own "world", and a robot [here](https://github.com/eliabntt/GRADE-RR/blob/main/simulator/world_and_robot.py). <details closed> - To create a robot you can either import our `usds/drone_2022.usd` or `usds/robotino.usd`, use your own URDF [link](https://docs.omniverse.nvidia.com/isaacsim/latest/ext_omni_isaac_urdf.html), create your own USD (add a mesh and attach some joints to it, [link](https://docs.omniverse.nvidia.com/isaacsim/latest/tutorial_gui_simple_robot.html)), or use one of the already available models. For now, the USD file is enough. - The world can be either empty (thus you can skip loading), just with static objects, or with pre-placed animated objects (as in the zebra case). The world needs to be placed into a subfolder, e.g. `worlds/Savana/...`. Inside, you could (not mandatory) have: - `npy` file with the limits of the environment - `stl` file with the 3D occupancy of the environment If you do NOT have those, just disable the flags in the config file (see last point of this list). Otherwise, they will be used as shown [here](https://github.com/eliabntt/GRADE-RR/blob/455891d5021009695a5da13c4feda0ceb258d476/simulator/utils/environment_utils.py). - You will also see how to add colliders to the environment, how to generate a 2D occupancy map, how to use the meters per unit, how to move the robot before starting the simulation (by moving the joints). - Launch this with `./python.sh simulator/world_and_robot.py --config="/your_full_path/simulator/world_and_robot.yaml" --fix_env=Something`. `--config` is mandatory, `--fix_env` will tell to the system to select the `Something` world from the `world` environments folder, e.g. `Sample_house` </details closed> - [Optional] Fix the rendering engine, add and publish some ROS components to the robot itself [here](https://github.com/eliabntt/GRADE-RR/blob/main/simulator/robot_with_ros.py). <details closed> - You will see how to add the clock to the simulation. Thanks to how we define it [here](https://github.com/eliabntt/GRADE-RR/blob/455891d5021009695a5da13c4feda0ceb258d476/simulator/utils/robot_utils.py#L274) the clock will tick with pysics steps, but will need to be manually published. - Our phylosophy is to manually publish ROS messages for better flexibility - We will show both how to add single components, or a batch of them, i.e. through custom "add all sensors" functions as we have done [here](https://github.com/eliabntt/GRADE-RR/blob/7d9cb9a3d75d57628adacb9b9f969909d7663f3d/simulator/utils/robot_utils.py#L557). - How to publish data (either manually with ROS messages or using the internal Isaac Components) - You can then fix the rendering engine (path vs raytracing), and get to know the `sleeping` function - Place a breakpoint somewhere and try to manually render the environment while the timeline is playing (not using sleeping). Note how the rendering will advance the timeline of more than what you want. This does not affect the physics, but will affect the animations. Keep this in mind. See [here](https://github.com/eliabntt/GRADE-RR/blob/6e42652201509ed7ad95624d9a551e24fe5ce03c/TipsAndTricks.md#L38) for more details. - Launch this with `./python.sh simulator/robot_with_ros.py --config="/your_full_path/simulator/robot_with_ros.yaml" --fix_env=Something`. `--config` is mandatory, `--fix_env` will tell to the system to select the `Something` world from the `world` environments folder, e.g. `Sample_house` </details closed> - [Optional] Add animated people, additional objects, and animate those while solving the timeline problem [here](). <details closed> - You can get a sample human from [here](). Soon, we will upload our collection. Since then, you can follow our other repository [here](https://github.com/eliabntt/animated_human_SMPL_to_USD) to convert your SMPL models to USD. The preferred folder structure is `main/dataset/ID`, you will provide the `main` folder to allow the randomizer to work. - You can either place the models manually into your world beforehand (see the zebra case), use pre-fixed (or random) locations, or use a placement technique. Our placement technique will be explored in the additional scripts since it requires setting up the catkin workspace as well. - For the objects please download at least some assets from ShapeNetv2 or GSO websites. If not, please comment out that part of the code, or adapt it to your own assets. We think the GSO part can be made general quite easily. Paths should be `../gso/folders_of_the_objects` and `../shapenet/synsetIds/...`. For ShapeNet please also add `../shapenet/v1_csv/all_of_the_synset_csvs`. In the config add the `gso` and the `shapenet` folders. Additional options are there. - The animation will use the timeline interface see [here](https://github.com/eliabntt/GRADE-RR/blob/064c1b888727c6faa191f88519184dc272a8b950/simulator/utils/objects_utils.py#L135). - The objects loading code is [here](https://github.com/eliabntt/GRADE-RR/blob/064c1b888727c6faa191f88519184dc272a8b950/simulator/utils/objects_utils.py), for both shapenet and google scanned objects. You can see how the conversion works [here](https://github.com/eliabntt/GRADE-RR/blob/064c1b888727c6faa191f88519184dc272a8b950/simulator/utils/objects_utils.py#L65). The system will automatically save the converted USD for backup and to avoid re-conversion. - Launch this with `./python.sh simulator/people_and_objects.py --config="/your_full_path/simulator/humans_and_objects.yaml" --fix_env=Something`. `--config` is mandatory, `--fix_env` will tell to the system to select the `Something` world from the `world` environments folder, e.g. `Sample_house` </details closed> - [Optional] Launch your own SIL from within your own simulation script, add some randomization (e.g. lights, textures etc) and save GT data [link]() ## Additional scripts ### Correct data and smpl_and_bbox [This](https://github.com/eliabntt/GRADE-RR/blob/7d9cb9a3d75d57628adacb9b9f969909d7663f3d/simulator/smpl_and_bbox.py) and [this](https://github.com/eliabntt/GRADE-RR/blob/7d9cb9a3d75d57628adacb9b9f969909d7663f3d/simulator/correct_data.py) show how to access low-level information of the meshes, how it is possible to correct the 3DBbox and pose incorrect information. ### Zebra data generation and Animation Sequences [This](https://github.com/eliabntt/GRADE-RR/blob/7d9cb9a3d75d57628adacb9b9f969909d7663f3d/simulator/zebra_datagen.py) is the code that we used to generate the data for the Zebra paper. Unfortunately, we cannot share the USDs of the environments, whith the exception of the Savanna one, due to licensing limitations. You can however explore how to access low level animation sequences [link](https://github.com/eliabntt/GRADE-RR/blob/455891d5021009695a5da13c4feda0ceb258d476/simulator/utils/zebra_utils.py#L136) and how we managed to generate our data for the [Synthetic Data-based Detection of Zebras in Drone Imagery paper](https://arxiv.org/abs/2305.00432). Run it with `./python.sh GRADE-RR/simulator/zebra_datagen.py --/renderer/enabled='rtx,iray' --config='configs/config_zebra_datagen.yaml' --headless=False --fix_env=Savana` ### Replay experiment [This](https://github.com/eliabntt/GRADE-RR/blob/7d9cb9a3d75d57628adacb9b9f969909d7663f3d/simulator/replay_experiment.py) is a very useful piece of code. You can use this to replay any previously recorded experiment, modify the robot (or the scene conditions) and record new data. You can replay the experiment in two modalities, namely using teleport or by physically interpolating the trajectory. Note that the latter is subject to some drift due to the interpolation of the data itself. <details closed> Please run ``` ./python.sh GRADE-RR/simulator/replay_experiment.py --experiment_folder FOLDER ``` to do so. In our code we show how to create a new stereo camera, save previously unsaved data, save motion-vector, and create a LiDAR sensor. You need some information to be able to repeat an experiment. Namely, the joint positions. We load those [from the rosbags](https://github.com/eliabntt/GRADE-RR/blob/7d9cb9a3d75d57628adacb9b9f969909d7663f3d/simulator/replay_experiment.py#L177), although you can access them from the GT pose arrays. </details closed> ### Paper autonomous indoor exploration (humans, objects, SIL, active SLAM etc) ### Multi robot management ## Known issues 1. ros clock might have some delay in publishing. This implies that you need to sleep the simulation every time that component gets triggered. Other component behave consistently based on our tests. Alternatively, you can post-process the data as shown in [here](https://github.com/robot-perception-group/GRADE-eval) 2. BBOX3D are wrong for moving objects. The script [here](https://github.com/eliabntt/GRADE-RR/blob/main/simulator/correct_data.py#L267) show a way to solve this. 3. Pose information is wrong for some moving objects. The code [here](https://github.com/eliabntt/GRADE-RR/blob/main/simulator/correct_data.py#L224) will solve this. 4. Collisions for dynamic objects are not computed most of the times due to PhysX limitations. This is addressed by the new LiDAR-RTX of the new Isaac Sim version. However, its management is not intuitive. 5. The rendering is not blocking. Multiple calls (especially for path tracing) are necessary. Thus, this usually disrupt the motion-vector data. A possible workaround is to do two rendering steps and save the motion-vector data, and then finish rendering to save the rgb information. See [here](https://github.com/eliabntt/GRADE-RR/blob/main/simulator/replay_experiment.py#L390) an example on how to do that. Note that a rendering call is done just after the clocking. 6. In the v2022 it is not possible to set indipendent vfov of the cameras. It will take the hfov and use the AR to have a "correct" vfov. 7. In the v2022 the internal PD control for the joints will NOT work using position setpoints. Also, the maximum velocity set is not considered. 8. In the v2022 the timeline gets updated automatically even if you do not want it. You need to keep track of the ctime and constantly re-update it to correctly generate the data you want.
15,825
Markdown
96.691357
619
0.776114
eliabntt/GRADE-RR/SAMPLES.md
# Our Samples Still WIP. We have several showcase examples (all located in the simulator folder). Each one of the python files has its own configuration yaml file. More details will be given below for each file To follow these tutorials, we suggest that either you download one of our example environments [here]() and human animations [here]() or you use our code [SMPL to USD](https://github.com/eliabntt/animated_human_SMPL_to_USD) and [Blender to USD](https://github.com/eliabntt/Front3D_to_USD) to create your own assets. We also suggest that you pre-install the [drone](https://github.com/eliabntt/ros_isaac_drone) control and placement repository. This is necessary to be able to use our placement strategy, control the drone with our custom 6DOF controller, or use FUEL with IsaacSim. Each simulation file will power up the environment, load the assets and manage the saving based on the loaded configuration file. The scripts are the following: 1. `FUEL_indoor_simulation` this is the code that we used to generate the dataset. 4. `irotate_simulation` this is the code that we used to simulate [iRotate](https://github.com/eliabntt/irotate_active_slam), our active SLAM method, with Isaac Sim. This is very similar to 1 and 2, despite using an initial location but shwos how you can manage different robot with practically the same code. 5. `multi_robot_sim` simulate multi robots, a bit hardcoded but generalizable. This simulate two drones and a ground robot. The two drones will be controlled independently with two FUEL sessions, while the ground robot is controlled with iRotate. 6. `savana_simulation` to show how we created the Savana with the Zebras. Animated animals are pre-positioned within the environment. The robot is controlled through joint waypoints. **THIS DOES NOT WORK in v2022.2.1 DUE TO ISAACSIM BUGS** **Each config needs to be updated with your own paths** ___ ## Paper(ros) simulation Install ROS and create a `catkin_ws` and install [this](https://github.com/eliabntt/ros_isaac_drone). The default location for this installation is `$HOME` (`/home/user/catkin_ws`). The repo above will install 1. `FUEL`, our chosen exploration manager 2. `mav_comm` and `mav_control_rw` which are used to control the robot and get velocity commands to follow the path generated by `FUEL` 3. `custom_6dof_joint_controller` which is the bridge between the position/velocity commands and the joint velocities expected by IsaacSim 4. `moveit_based_collision_checker_and_placement` which is needed to do the placement of the "objects" The [README](https://github.com/eliabntt/ros_isaac_drone/blob/main/README.md) already explicate the dependencies. If you install it in a different location _update `setup_python_env.sh:2`_ with your new location. Remember that you can also `source ... --extend` to source different environments in cascade. At this point, assuming you are locate in the ISAAC folder you can run ``` ./python.sh GRADE-RR/simulator/paper_simulation.py --config="/GLOBAL/GRADE-RR/simulator/configs/config_paper.yaml" ``` *BASH PROCESSING* If you want to run everything (including the exploration visualization and the rosbag recorder) the `bash_process.zsh` file is what you are looking for. That file is what we used to streamline the generation and process in batches. In the config file you can easily chose which sensor to use. Similarly ``` ./python.sh GRADE-RR/simulator/simulator_ros.py --config="/GLOBAL/simulator/configs/config.yaml" ``` would work. Note that in this case you need to edit both the configs and the code otherwise the robot will not move. _______ ## iRotate simulation Download and install the iRotate package [here](https://github.com/eliabntt/irotate_active_slam/tree/isaac) from the Isaac branch. This simulation by default does NOT use animated objects. You can see how one can have a blueprint and quickly edit it based on its own convenience. _update `setup_python_env.sh:2`_ with your catkin workspace location. Before launching the simulation you need to open a terminal and run `python[3] irotate_specific/republish_tf.py` Also, run `irotate` as explained in the repo. A set of commands could be: ``` roslaunch robotino_simulations world.launch roslaunch robotino_simulations rtabmap.launch delete:=-d roslaunch active_slam active_node.launch roslaunch robotino_mpc robotino_mpc.launch ``` Note that we launch the FSM later on. With iRotate we usually let the robot start from `0,0,0` and `yaw=0`. If you change this, like with the previous work, you need to change the ekfs accordingly. The transform `world->map` is constant. `map->odom` is done by `rtabmap`. `odom->base_link` is done from the ekfs. Isaac is setted up to publish the tfs to the `/tf2` topic. Step 4 is necessary to publish everything back to the `/tf` cleaned up of the ground truth estimation. The custom joint controller has been updated. You need to be sure you are running the one from irotate repository. Thus, we need either to build everything in the same workspace or use `source ... --extend` if you are using two workspaces. You can eventually change the scripts to have it working how you want. You can launch an rviz visualization with `rviz -d irotate_specific irotate.rviz` ``` ./python.sh GRADE-RR/simulator/irotate_simulation.py --config="/GLOBAL/GRADE-RR/simulator/configs/config_irotate.yaml" ``` Once the simulation is running, you can launch `roslaunch robotino_fsm robotino_fsm.launch kind:=2 only_last_set:=false pre_fix:=true mid_optimizer:=true weighted_avg:=true robot_odom:=/odometry/filtered cam_odom:=/camera/odometry/filtered` Note how the topics are stilll without `/my_robot_x`. This should be changed in the EKF formulation. _______ ## Multi robot For this you need both the irotate repository and the original paper repository. The code will launch first the irotate robot and then two drones. You need to include both workspaces in `setup_python_env.sh:2` using first the _ros_isaac_drone_ and then the _irotate_ workspace (use the `--extend` keyword). You can follow a similar procedure like the one above to launch `iRotate`. To run the main simulation ``` ./python.sh GRADE-RR/simulator/multi_robot_sim.py --config="/GLOBAL/GRADE-RR/simulator/configs/config_multi_robot.yaml" ``` This piece of code show you how multiple robots can be loaded and controlled, how the configuration file can be expanded (e.g. only iRotate's robot has an initial location) and how everything can be customized. _______ ## Savana - not working on 2022 Is another simple scenario since everything is managed internally. The animations are already placed within the environment and the robot has pre-defined waypoints. The FSM is internal to the main source code which can be launched with ``` ./python.sh GRADE-RR/simulator/savana_simulation.py --config="/GLOBAL/GRADE-RR/simulator/configs/config_savana.yaml" ```
6,912
Markdown
57.092436
315
0.774306
eliabntt/GRADE-RR/PARAMS.md
# Which parameters are available and how to edit them Note: we try our best to update this consistently, but there might be some misalignment, this should be normal also considering different code runs. For example the "init_loc" parameter can be either a number or a list, depending on which version of the code you are running. The main simulation parameters are the last ones in this page. _____ ### Python params for simulator/smpl_and_bbox - `experiment_folder`, mandatory, the experiment folder with the USD file and the info file - `body` When true process the bodies - `garments` When true process the garments - `base_path` Human prim base path, i.e. filtering prim paths - `headless` Whether run this headless or not - `write` Whether to write results - `fast` Whether to write only the axis-aligned box or the oriented one, if False, the program will be slow - `both` Whether to write both vertex types -- preference in code is both - `only_exp` Whether to export only the experiment (considering the reverse strategy) or the whole sequences - `get_skel` Whether to get/include the skeleton info - `skel_root` This is a recognizable last part of the root of the skeleton prim, in our case _avg_root It will process ONLY the path of which the last part is this root _____ ### Python params for scripts/colorize - `viewport_folder` mandatory, the "viewport" folder where npy data is saved - `img_id` if negative process the whole sequence, otherwise just the img_id - `save_imgs` if save images - `save_video` if save videos (will produce two videos using ffmpeg) - `always_update_map` if always updating the instance mapping. default to false using the first one. Useful if you toggle objects - `semantics` if generate semantic - `output_dir` output directory _____ ### Params for scripts/process_paths - `config_file` configuration file with options to normalize the path, and critical keys that will be used in the main folder. You want to change these to your paths - `input` input USDA file. Check the main readme on how to convert the USD file and convert back or use the .sh script - `output_name` output USDA file - `output_dir` output dir _____ ### Python params for the main simulator/[*simulation.py, multi_robot_sim.py] files All the parameters are optional with the exception of the config file. If you remove something from the config file please be sure that is not used in the code. Otherwise, it will crash. - `config_file` mandatory, the yaml file with mosst of the params - `headless` if it's true the visualization is turned off - `rtx_mode` if it's true the simulation will launch with the RTX rendering (faster), else the PathTracing is used - `record` if it's true it will write to disk - `debug_vis` if it's true it will loop visualization and ros camera publishing - `neverending` if it's true it will continue forever the main loop - `fix_env` you can set this to the _name_ of the sub-folder containing the environment that you want to load ### Simulation Config YAML params -- these are described for how they are used. You can easily edit and modiy their behavior (e.g. `human_path` can be your only human asset) ***"Mandatory" params*** - `env_path`: folder containing the subfolders of the environments (`env_path/env1, env_path/env2, ...`). You can randomly chose or use `fix_env` to specify an environment. - `human_path`: folder that contains the subfolders of the human animated assets, separated in the various datasets (`human_path/dataset1/animationX`,`human_path/dataset2/animationX`) - `base_env_path`: global path of the basic environment (the background). NOTE: some configs cannot be changed from the code - `usd_robot_path`: global path of the USD of the robot. Can be an array as for multi_robot case - `robot_mesh_path`: global path of the mesh of the robot. Can be an array as for multi_robot case - `out_folder`: path of the output folder in which we will save the ROS logs, and the map - `out_folder_npy`: path of the output folder in which we will save the groundtruth from the simulator code (not the rosbags) - `num_robots`: number of robots - `_recorder_settings`: what to save or what not to save. Note that some things are not implemented. I strongly suggest to NOT save colorize data. Motion-vectors can be saved with the strategy shown in replay experiment - `fps` the fps of the simulation - `physics_hz`: NOTE THAT THIS IS THE RATE OF CLOCK AND IMU - `render_hz`: LEAVE IT EQUAL TO PHYSICS HZ - `env_prim_path` IsaacSim internal path for the prim of the environment - `robot_base_prim_path` same thing for the robot (the number of the robot is the postfix) - `is_iRotate` whether the robot is Robotino or not, this changes the launched ROS and some settings. Note that this can be a vector, and can be expanded to different robots ***THE OPTIONAL PARAMETER NEED TO BE EXPLICITLY ADDRESSED IN THE CODE. This wants to be a reference to search code and understand what is going on*** ***Depending on usage params - experiment*** - `experiment_length`: camera frames length of the experiment (`seconds * camera_fps`), will be overridden by `neverending`. In the savana experiment this has not been used (experiment ends when last waypoint reached) - `num_humans`: depends on your usage, can be fixed, minimum number or whatever you want - `[robot,npy]_sensor_size` camera sensor size for robot and npy data. Can be equal. Npy not necessary if not loaded. - `bootstrap_exploration`: seconds to boostrap the simulation before starting from time 0 (min(abs(this_value), 1/(physics_hz/ratio_camera)). It sets the time negative and cicle through physics and rendering. - `reverse_strategy`: timeline reverse strategy based on the loaded animation lengths. Possibilities are [min, max, avg, half, none], works only with animated sequences. It makes the timeline going backward/forward based on this. It will roll back the simulation timeline (not the time, just the animation). This uses the animation length (see `paper_simulation.py`) - `anim_exp_len` an alternative of `reverse_strategy`, rewinding the simulation after this many frames - `clean_base_env` whether to remove some things from the base environment loaded at the beginning. - `reload_references` whether to reload the references of the assets or not. Sometimes it might be necessary (seems solved in the newer versions) - `generate_map` whether to generate the occupancy map or not. Works only if stls are loaded. Suggest to use only with limited environments (it takes a while to add collisions). ***Depending on usage params - robot movement*** - `autonomous`: true -> use FUEL, false -> use random goals (not fully tested), this is applicable only to the main paper simulation, used with autonomous=True. just to show it. - `use_robot_traj`: Whether to use or not a predefined traj *not physics enabled* - `use_joint_traj`: Whether to use or not joint trajecotry *physics enabled*. This cannot be true at the same time of robot_traj. - `robot_traj`: The trajectory remember that movement will be linear and instantaneous. No acceleration or anything. This implies no odom, nor IMU data. If you want those, please add the same trajectory to a joint publisher. - `init_loc`: initial location for the robot (the elements can be vectors as in the multi-robot case) ***Depending on usage params - humans*** - `max_distance_human_ground`: max distance from human to ground to be consider to force the first frame grounding of animations - `allow_collision`: max number of collisions allowed between the stl of the human and the stl of the environment - `human_base_prim_path` for the humans (the number of the human is the postfix) - `[max,min]_human_anim_len`: [Max,Min]imum human animation to be considered. ***Depending on usage params - objects*** - `obstacles`: increase those numbers to load shapenet or google objects (or any other objects) - `google_obj_folder`: google_scanned_objects folder. Structure is `folder/exported_usd` and `folder/assets` - `google_obj_shortlist`: shortlist some objects, not fully tested - `shapenet_local_dir`: local dir of ShapeNet *suggestion is to download this beforehand* - `shapenet_username`: if want to download on the fly. Last time I tried it was not working anymore. - `shapenet_password`: if want to download on the fly. Last time I tried it was not working anymore. - `synsetId`: shortlist some objects, not fully tested - `modelId`: shortlist some objects, not fully tested ***Depending on usage params - simulation*** - `ratio_[tf,odom,camera,...]`: physics_hz/ratio_tf = tf publish hz - `_random_light` - `intensity` If intensity needs to be changed - `color` If color needs to be changed - `intensity_interval` - `during_experiment` Change color/intensity during the experiment - `n-frames` if during experiment True switch the color of the light - `smooth` NOT IMPLEMENTED - `_random_roughness` Roughness/reflectance of the materials - `enabled` If enabled - `intensity_interval` ***Depending on usage params - others*** - `only_placement` if the placement strategy should be the only ROS thing launched. Following a similar strategy all ROS can be disabled. - `use_stl` wheter to load the STLs of env/humans or not. This will have repercussion but gives the possibility to avoid generating/loading the STL files.
9,347
Markdown
75.62295
366
0.758853
eliabntt/GRADE-RR/EDIT_USDS.md
If you desire to edit an usd file offline for whatever reason there is an easy way to do it. `.usd` files are binary files. However, they can be converted to text file easily. Just go to the official [USD](https://github.com/PixarAnimationStudios/USD) repository and install it in your system. Then you can run the following: `usdcat -o text_version.usda binary_version.usd` to obtain the text file of your `usd`. With that you can edit all the paths and many other things (e.g. keyframe information). This may or may not be convenient depending on the use case. Both are loadable by the system. However, if you wish to convert back to binary format you can run the same command again with `usdcat -o binary_version.usd text_version.usda`. Alternatively check out our `scripts/process_paths` folder. It will be automatic and easily adaptable to your use case.
867
Markdown
47.22222
126
0.775087
eliabntt/GRADE-RR/MOVEMENT.md
# Movement control There are two main aspects: ROS and Physics. You can control the movement with or without ROS and with or without Physics. In general, including ROS implies having physics. With "With ROS" we mean that ROS is involved in the input. Clearly, you can always publish ROS information from the simulation. When using joints, the system will essentially always use physics in some way. Clearly, as in Gazebo, you can disable gravity, collisions etc to your convenience. An important thing to remember is that by being physics enabled implies that joints will be affected by the mass of the object to which those are attached. Clearly, even with teleport this might be true. In practice, not being physically enabled requires you to disable gravity of the object and collisions. To disable gravity, do so by changing the property (if exists) of the asset, similarly to what we do for the collision [here](https://github.com/eliabntt/GRADE-RR/blob/7d9cb9a3d75d57628adacb9b9f969909d7663f3d/simulator/utils/misc_utils.py#L106). With the exception of options 2 and 4 (which are pretty much equivalent) of the _Without ROS_ case. ### With ROS 1. Attach a joint publisher/listener to the robot ([link](https://github.com/eliabntt/GRADE-RR/blob/7d9cb9a3d75d57628adacb9b9f969909d7663f3d/simulator/utils/robot_utils.py#L233)) and directly publish a `joint_commands` ROS message either on your own [link](https://docs.ros.org/en/melodic/api/control_msgs/html/msg/JointJog.html), using our 6DOF joint controller [link](https://github.com/eliabntt/custom_6dof_joint_controller), through MoveIt (see the [tutorial](https://docs.omniverse.nvidia.com/isaacsim/latest/tutorial_ros_moveit.html)). 2. Use an embedded controller provided by IsaacSim and publish `cmd_vel` commands dependending on your use case. 3. Use ROS to publish setpoints in some ways, listen to the topic within the simulation loop, and fall back to the "without ROS" section. ### Without ROS 1. Move the robot by sending _joint_ position/velocity setpoints directly from IsaacSim. This will output physics and will abide the settings that you use for your joints (mass, force...). The implementations within IsaacSim is through a PD control. An example of this has been implemented [here](https://github.com/eliabntt/GRADE-RR/blob/7d9cb9a3d75d57628adacb9b9f969909d7663f3d/simulator/utils/robot_utils.py#L760). Although the implementation there is different (it assume a set of predefined checkpoints) the concept is the same. To learn about stiffness and damping, please check [this](https://forums.developer.nvidia.com/t/stiffness-damping-setting-in-joint-drive/213916) or [this](https://docs.omniverse.nvidia.com/isaacsim/latest/ext_omni_isaac_motion_generation.html). Note that this has some issues in the 2022.2.1 version of the engine. Clearly, using this you can write your own controller. <details closed> ```python import omni.kit.commands from pxr import Sdf omni.kit.commands.execute('ChangeProperty', prop_path=Sdf.Path('/World/robot/.../PrismaticJoint.drive:linear:physics:targetPosition'), value=10, prev=0) ``` ```python import omni.kit.commands from pxr import Sdf omni.kit.commands.execute('ChangeProperty', prop_path=Sdf.Path('/World/Cone/PrismaticJoint.drive:linear:physics:targetVelocity'), value=10, prev=0.0) ``` </details closed> 2. Use a strategy like the one we use for the [flying objects](https://github.com/eliabntt/GRADE-RR/blob/7d9cb9a3d75d57628adacb9b9f969909d7663f3d/simulator/utils/objects_utils.py#L191) adding [translation](https://github.com/eliabntt/GRADE-RR/blob/7d9cb9a3d75d57628adacb9b9f969909d7663f3d/simulator/utils/misc_utils.py#L288) and [rotation](https://github.com/eliabntt/GRADE-RR/blob/7d9cb9a3d75d57628adacb9b9f969909d7663f3d/simulator/utils/misc_utils.py#304) animations (also scale is possible). However, this does NOT include physics, collision or anything similar whatsoever. In this case the trajectory is followed blindly and interpolated based on your settings. 3. Use [teleporting](https://github.com/eliabntt/GRADE-RR/blob/7d9cb9a3d75d57628adacb9b9f969909d7663f3d/simulator/utils/misc_utils.py#L497). For this see [replay experiment](https://github.com/eliabntt/GRADE-RR/blob/main/simulator/replay_experiment.py) code. Note that running the physics steps will imply that the robot will be affected by physics (e.g. collisions, gravity etc) 4. Create a spline, animation sequence, or whatever and saving that to the USD file itself. Once loaded, the robot will behave as an animated object. Again, this won't follow physics low. It will still be affected by physics (accel, velocities) but not to collisions, gravity, etc. See [here](https://docs.omniverse.nvidia.com/extensions/latest/ext_animation-timeline.html) and related resources. Similar to #2. 5. Directly set joint status as done in [replay experiment](https://github.com/eliabntt/GRADE-RR/blob/7d9cb9a3d75d57628adacb9b9f969909d7663f3d/simulator/replay_experiment.py#L348) from within the simulation itself, although this is quite similar to do #1.
5,063
Markdown
94.547168
904
0.796563
eliabntt/GRADE-RR/README.md
# GRADE-RR or how to Generate Realistic Animated Dynamic Environments for Robotics Research ### Note that while we used the v2021 for the paper, that version is now deprecated. I will work only on v2022+ GRADE is a system I developed to seamlessly manage the Isaac Sim simulation software to Generate Realistic Animated Dynamic Environments for Robotics Research ![prom](NewCover.png) This will help you in: 1. managing the simulation 2. load, place, animate assets 3. load and control any robot --- with or without ROS, with or without physics 4. get sensor readings from such robots, saving *ground truth* or *noisy* data 5. customize your workflow 6. postprocess the data --- add noise, reorganize the bags, prep the data for DL models... 7. repeat any experiment --- *this includes recording new sensor, getting new data, changing the conditions and repair the data while working in realistically looking environments and in a physics enabled simulator.* Each step of the pipeline can be easily customized, expanded or removed from your workflow. If you want more information check out the [paper](https://arxiv.org/abs/2303.04466) or our [website](https://eliabntt.github.io/grade-rr). _______ ## Useful related repositories (that couldn't fit this page) 1. The tools to process the data, add noise to the rosbags or during the simulation, to evaluate the SLAM methods, generate training data can be found [here](https://github.com/robot-perception-group/GRADE_tools) 2. The code to convert SMPL-based animations to USD files is [here](https://github.com/eliabntt/animated_human_SMPL_to_USD). Use this if you want to convert AMASS animated SMPL models, the Cloth3D dataset, or any other dataset that you might have that contains skeletal animations. If you use something different than SMPL (or some of its variations), you will need to extend this code. 3. To convert any environment from Blender to USD and generate some accompanying data use [this](https://github.com/eliabntt/Front3D_to_USD). This has a special focus in indoor environmets and Front3D. Based on BlenderProc. You can use this tool also to convert ANY fbx or other file. 4. The tools we used to autonomously explore the environments during the data generation is [here](https://github.com/eliabntt/ros_isaac_drone), using RotorS, FUEL, our custom 6DOF controller, etc. 5. The modified version of DynaSLAM working with Python3 and using `detectron2` is [here](https://github.com/eliabntt/DynaSLAM) 6. `custom_6dof_joint_controller` is the bridge between the position/velocity commands and the joint velocities expected by IsaacSim. This will allow you to control any robot within the simulation environment. [Link here](https://github.com/eliabntt/custom_6dof_joint_controller/tree/main). 7. `moveit_based_collision_checker_and_placement` our Move-it based placement strategy. [Link here](https://github.com/eliabntt/moveit_based_collision_checker_and_placement/tree/main) ______ ## Our projects ### Active SLAM, indoor scenes data collection, and dynamic SLAM With this framework in conjuction with our [people generator](https://github.com/eliabntt/animated_human_SMPL_to_USD), [environment exporter](https://github.com/eliabntt/Front3D_to_USD) and [control framework](https://github.com/eliabntt/ros_isaac_drone) (which can control virtually anything thanks to our expandable [custom 6DOF joint controller](https://github.com/eliabntt/custom_6dof_joint_controller)), we generated an extensive dataset of indoor animated scenes. The data generated has been then post-processed and evaluated with our set of [tools](https://github.com/robot-perception-group/GRADE_tools) against popular SLAM libraries, and used to test the realism your synthetic data. With those tests we showed how many of these methods cannot recover from failures, and have highly degraded performance in dynamic environments even during very short sequences(60 seconds). ### In the wild Zebras observed by drones We used the teleport capabilities of the system to generate both an **outdoor synthetic Zebra** datasets. The details are in the corresponding [Zebra](https://arxiv.org/abs/2305.00432) paper. The goal was to try to bridge the gap between simulation and reality and demonstrate that we can avoid tedious tasks such as precise data annotation. Using a variety of environments from Unreal Engine and a freely available zebra model we were able to generate data realistic enough to obtain models trained from *scratch* that reached >90% accuracy on real world data. _______ ### Folder structure <details closed> <summary>A folder structure summary with comments of what is inside each folder</summary> ```bash ├── cp_local_to_diff_folder.sh # update code from/to isaac folder ├── irotate_specific # specific files used for simulate irotate in isaac sim and instructions │   └── ... ├── isaac_internals # edited isaac files │   ├── apps │   │   └── omni.isaac.sim.python.kit # pre-load some additional extensions and disable a moveit (so that we can load the one from the system) │   ├── kit # solve some bugs in the synthetic data processing │   ├── exts │   │   ├── omni.isaac.shapenet # slightly modified loader │   │   ├── omni.isaac.synthetic_recorder # custom recorder extension that allows more control │   │   └── omni.isaac.synthetic_utils # minor edits │   └── setup_python_env.sh # source the ros environment and show how to source multiple ones ├── kill.sh # script to kill the whole simulation ├── req.sh # requirements file ├── scripts # useful scripts and additional accompanying stuff │   └── ... ├── simulator # main simulator folder, each main file will have it's own description │   ├── configs # yaml configuration files │   ├── utils # utils loaded and used by the main files │ └── ... ├── meshes # folder containing meshes └── usds # usds files ``` </details closed> ___________________ ## HowToS, Installation, Tips, and Known issues The system, contrary to Gazebo, is not straightforward. This is the price you have to pay to be able to access low level APIs and have more control. We highly encourage thorugh readings of the documentation, of the tips section, and for you to get acquainted to the utils that we have organized (perhaps badly, open a pull request please). [Install, StartUp, Issues](https://github.com/eliabntt/GRADE-RR/blob/main/HOWTO.md) [Tips](https://github.com/eliabntt/GRADE-RR/blob/main/TipsAndTricks.md) --- highly encouraged reading! To [generate people based on SMPL](https://github.com/eliabntt/animated_human_SMPL_to_USD), [convert environments/objects from Front3D or other files beforehand](https://github.com/eliabntt/Front3D_to_USD) and see a possible [control framework](https://github.com/eliabntt/ros_isaac_drone) (which can act thanks to our [custom 6DOF joint controller](https://github.com/eliabntt/custom_6dof_joint_controller)), please check our other repositories. Additional scripts are provided [here](https://github.com/eliabntt/GRADE-RR/blob/main/scripts). Those can be used to process paths, get statistics of the rosbags, colorize the data filter and compress rosbags, transform the pixels to world coordinates etc. A brief description of the utils libraries used in our code is [here](https://github.com/eliabntt/GRADE-RR/blob/main/simulator/utils/UTILS.md). _____ ## Isaac's edited files details <details closed> <summary>We had to edit some of the files to have more flexibility and solve some bugs. Here are reported details</summary> Edited files are inside `isaac_internals`. The edited ones are the one that are copied by the `cp_local..` script. As per Isaac requirements, we had to include all the licenses and other files. Note that these might be outdated w.r.t. your current installation. - _synthetic\_recorder_ created a custom extension to save our data, and offset the number of cameras. In that way we can save high-resolution images to the disk, while providing ROS smaller images. We found this faster than resizing images afterwards and caused less "issues". - _synthetic\_utils_ we edited the `numpy.py` and the `syntheticdata.py` to save more data and have more flexibility. What is still missing (our bad) is the vertical fov of the camera, which is not directly exposed by Isaac Sim. - In `setup_python_env.sh` we had to prevent the loading of `$SCRIPT_DIR/exts/omni.isaac.motion_planning/bin` (you can find it commented at the very end of line 8), to be able to run the system version of `move_base`. That module could be necessary for some of the Isaac extensions or configurations. Please be aware of this. - `apps/omni.isaac.sim.python.kit` will load a couple of additional necessary extensions - `isaac_internals/kit/extscore/omni.syntheticdata` will simply solve some bugs related to out of bounds and processing errors </details closed> ______ ## Download data The data will be available in our [data repository](https://github.com/eliabntt/GRADE_data/). __________ ## Citations You acknowledge that the Data & Software is a valuable scientific resource and agree to appropriately reference the following paper in any publication making use of the Data & Software. Citation: ``` @misc{bonetto2023grade, doi = {10.48550/ARXIV.2303.04466}, url = {https://arxiv.org/abs/2303.04466}, author = {Bonetto, Elia and Xu, Chenghao and Ahmad, Aamir}, title = {GRADE: Generating Realistic Animated Dynamic Environments for Robotics Research}, publisher = {arXiv}, year = {2023}, copyright = {arXiv.org perpetual, non-exclusive license} } ``` Additionally: - If you use any Data and/or Software related to zebras(animal) detection from drone imagery reference the following paper in any publication as well ``` @INPROCEEDINGS{10256293, author={Bonetto, Elia and Ahmad, Aamir}, booktitle={2023 European Conference on Mobile Robots (ECMR)}, title={Synthetic Data-Based Detection of Zebras in Drone Imagery}, year={2023}, volume={}, number={}, pages={1-8}, doi={10.1109/ECMR59166.2023.10256293}} } ``` - If you use any Data and/or Software related to our Dyanmic SLAM evaluations ``` @inproceedings{bonetto2023dynamicSLAM, title={{S}imulation of {D}ynamic {E}nvironments for {SLAM}}, author={Elia Bonetto and Chenghao Xu and Aamir Ahmad}, booktitle={ICRA2023 Workshop on Active Methods in Autonomous Navigation}, year={2023}, url={https://arxiv.org/abs/2305.04286}, month = jun, month_numeric = {6} } ``` - If you use any Data and/or Software related to the tasks of detection/segmentation of humans in dynamic environments. ``` @inproceedings{bonetto2023learning, title={Learning from synthetic data generated with {GRADE}}, author={Elia Bonetto and Chenghao Xu and Aamir Ahmad}, booktitle={ICRA2023 Workshop on Pretraining for Robotics (PT4R)}, year={2023}, url={https://openreview.net/forum?id=SUIOuV2y-Ce}, month = jun, month_numeric = {6} } ``` ____________ ## LICENSE By downloading and/or using the Data & Software (including downloading, cloning, installing, and any other use of the corresponding github repository), you acknowledge that you have read these terms and conditions, understand them, and agree to be bound by them. If you do not agree with these terms and conditions, you must not download and/or use the Data & Software. Any infringement of the terms of this agreement will automatically terminate your rights under this License. Please read the [licensing](https://github.com/eliabntt/GRADE-RR/blob/main/LICENSE.md) agreement prior to any use of our Data or Software. Accompanying software, such as, but not limited to, the one from Isaac Sim, is licensed according to their specific term of use. If you use data/software from other projects such as, but not limited to, TUM RGB-D, 3D-Front, 3D-Future, ... it is your responsibility to follow their licensing terms, whose you implicitly agree. If you have questions regarding the license, please contact the [[email protected]](mailto:[email protected]). ______ ## Thanks I would like to thank the amazing [NVIDIA support](http://forums.developer.nvidia.com) for their quick response times and precise answers. [Chenghao Xu](http://kyle-xu-001.github.io/) for helping in testing and refining the evaluation scripts. [Aamir Ahmad](aamirahmad.de) for his supervision.
12,526
Markdown
61.949748
617
0.749481
eliabntt/GRADE-RR/LICENSE.md
# Data & Software Copyright License for non-commercial scientific research purposes Please read carefully the following terms and conditions and any accompanying documentation before you download and/or use GRADE data, models, and software, (the "Data & Software"), including synthetic images and videos, SMPL and SMPL-X parameters, 3D body and clothing meshes, 2D textures, and scripts. By downloading and/or using the Data & Software (including downloading, cloning, installing, and any other use of the corresponding code repository), you acknowledge that you have read these terms and conditions, understand them, and agree to be bound by them. If you do not agree with these terms and conditions, you must not download and/or use the Data & Software. Any infringement of the terms of this agreement will automatically terminate your rights under this License ## Ownership / Licensees The Data & Software and the associated materials have been developed at the Max Planck Institute for Intelligent Systems (hereinafter "MPI"). Any copyright or patent right is owned by and proprietary material of the Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (hereinafter “MPG”; MPI and MPG hereinafter collectively “Max-Planck”) hereinafter the “Licensor”. ## License Grant Licensor grants you (Licensee) personally a single-user, non-exclusive, non-transferable, free of charge right: To install the Data & Software on computers owned, leased or otherwise controlled by you and/or your organization; To use the Data & Software for the sole purpose of performing non-commercial scientific research, non-commercial education, or non-commercial artistic projects; Any other use, in particular any use for commercial, pornographic, military, or surveillance, purposes is prohibited. This includes, without limitation, incorporation in a commercial product, use in a commercial service, or production of other artifacts for commercial purposes. The Data & Software may not be used to create fake, libelous, misleading, or defamatory content of any kind excluding analyses in peer-reviewed scientific research. The Data & Software may not be reproduced, modified and/or made available in any form to any third party without Max-Planck’s prior written permission. The Data & Software may not be used for pornographic purposes or to generate pornographic material whether commercial or not. This license also prohibits the use of the Data & Software to train methods/algorithms/neural networks/etc. for commercial, pornographic, military, surveillance, or defamatory use of any kind. By downloading the Data & Software, you agree not to reverse engineer it. ## No Distribution The Data & Software and the license herein granted shall not be copied, shared, distributed, re-sold, offered for re-sale, transferre d or sub-licensed in whole or in part except that you may make one copy for archive purposes only. ## Disclaimer of Representations and Warranties You expressly acknowledge and agree that the Data & Software results from basic research, is provided “AS IS”, may contain errors, and that any use of the Data & Software is at your sole risk. LICENSOR MAKES NO REPRESENTATIONS OR WARRANTIES OF ANY KIND CONCERNING THE DATA & SOFTWARE, NEITHER EXPRESS NOR IMPLIED, AND THE ABSENCE OF ANY LEGAL OR ACTUAL DEFECTS, WHETHER DISCOVERABLE OR NOT. Specifically, and not to limit the foregoing, licensor makes no representations or warranties (i) regarding the merchantability or fitness for a particular purpose of the Data & Software, (ii) that the use of the Data & Software will not infringe any patents, copyrights or other intellectual property rights of a third party, and (iii) that the use of the Data & Software will not cause any damage of any kind to you or a third party. ## Limitation of Liability Because this Data & Software License Agreement qualifies as a donation, according to Section 521 of the German Civil Code (Bürgerliches Gesetzbuch – BGB) Licensor as a donor is liable for intent and gross negligence only. If the Licensor fraudulently conceals a legal or material defect, they are obliged to compensate the Licensee for the resulting damage. Licensor shall be liable for loss of data only up to the amount of typical recovery costs which would have arisen had proper and regular data backup measures been taken. For the avoidance of doubt Licensor shall be liable in accordance with the German Product Liability Act in the event of product liability. The foregoing applies also to Licensor’s legal representatives or assistants in performance. Any further liability shall be excluded. Patent claims generated through the usage of the Data & Software cannot be directed towards the copyright holders. The Data & Software is provided in the state of development the licensor defines. If modified or extended by Licensee, the Licensor makes no claims about the fitness of the Data & Software and is not responsible for any problems such modifications cause. ## No Maintenance Services You understand and agree that Licensor is under no obligation to provide either maintenance services, update services, notices of latent defects, or corrections of defects with regard to the Data & Software. Licensor nevertheless reserves the right to update, modify, or discontinue the Data & Software at any time. Defects of the Data & Software must be notified in writing to the Licensor with a comprehensible description of the error symptoms. The notification of the defect should enable the reproduction of the error. The Licensee is encouraged to communicate any use, results, modification, or publication. ## Publications using the Data & Software You acknowledge that the Data & Software is a valuable scientific resource and agree to appropriately reference the following paper in any publication making use of the Data & Software. Citation: ``` @misc{bonetto2023grade, doi = {10.48550/ARXIV.2303.04466}, url = {https://arxiv.org/abs/2303.04466}, author = {Bonetto, Elia and Xu, Chenghao and Ahmad, Aamir}, title = {GRADE: Generating Realistic Animated Dynamic Environments for Robotics Research}, publisher = {arXiv}, year = {2023}, copyright = {arXiv.org perpetual, non-exclusive license} } ``` Additionally: - If you use any Data and/or Software related to zebras(animal) detection from drone imagery reference the following paper in any publication as well ``` @inproceedings{bonetto2023synthetic, title={Synthetic Data-based Detection of Zebras in Drone Imagery}, author={Elia Bonetto and Aamir Ahmad}, year={2023}, month = sep, month_numeric = {9}, publisher = {IEEE}, url = {https://arxiv.org/abs/2305.00432}, booktitle = {2023 European Conference on Mobile Robots (ECMR 2023)}, note={To appear} } ``` - If you use any Data and/or Software related to our Dyanmic SLAM evaluations ``` @inproceedings{bonetto2023dynamicSLAM, title={{S}imulation of {D}ynamic {E}nvironments for {SLAM}}, author={Elia Bonetto and Chenghao Xu and Aamir Ahmad}, booktitle={ICRA2023 Workshop on Active Methods in Autonomous Navigation}, year={2023}, url={https://arxiv.org/abs/2305.04286}, month = jun, month_numeric = {6} } ``` - If you use any Data and/or Software related to the tasks of detection/segmentation of humans in dynamic environments. ``` @inproceedings{bonetto2023learning, title={Learning from synthetic data generated with {GRADE}}, author={Elia Bonetto and Chenghao Xu and Aamir Ahmad}, booktitle={ICRA2023 Workshop on Pretraining for Robotics (PT4R)}, year={2023}, url={https://openreview.net/forum?id=SUIOuV2y-Ce}, month = jun, month_numeric = {6} } ``` ## Commercial licensing opportunities For commercial use of the Data & Software, please send emails to [email protected] This Agreement shall be governed by the laws of the Federal Republic of Germany except for the UN Sales Convention.
8,214
Markdown
74.366972
826
0.758461
eliabntt/GRADE-RR/additional_scripts/average_rosbag.py
""" This is the code used to get the average acc speed and dynamic frames for the GRADE paper. You need some experiment folders. This code will use the bags files in those folder. Please change the folders as desired (first loop in the code, first two lines). We also suppose that you have the instance images to compute the percentage of dynamic frames. """ import rosbag import sys import numpy as np import os # loop through all the bags in the folder folders = [] folders.append("/ps/project/irotate/GRADE-paper/Test.bak2/d94ecc9f-10f6-4f6d-b49f-1ed841f86772") folders.append("/ps/project/irotate/GRADE-paper/Test.bak2/d8c14dd6-d794-46d5-aa59-01d3552828c7") folders.append("/ps/project/irotate/GRADE-paper/Test.bak2/b13a4874-00a4-49a5-aa2d-e22d7d864b56") folders.append("/ps/project/irotate/GRADE-paper/Test.bak2/75bf66e8-acb0-4f27-842d-1945ad42f9de") folders.append("/ps/project/irotate/GRADE-paper/Test.bak2/53bfe530-122d-42cb-a1f4-453e6a2a617f") folders.append("/ps/project/irotate/GRADE-paper/Test.bak2/23aae785-c0bc-4645-9e64-fdea78c42e2d") folders.append("/ps/project/irotate/GRADE-paper/Test.bak2/b0a9c3c3-d470-45ea-82c6-ac529b6882ea") folders.append("/ps/project/irotate/GRADE-paper/Test.bak2/12e463c1-4993-4ea8-9cbf-54ba9403e5f8") names = ["d94ecc9f-10f6-4f6d-b49f-1ed841f86772","d8c14dd6-d794-46d5-aa59-01d3552828c7","b13a4874-00a4-49a5-aa2d-e22d7d864b56","75bf66e8-acb0-4f27-842d-1945ad42f9de","53bfe530-122d-42cb-a1f4-453e6a2a617f","23aae785-c0bc-4645-9e64-fdea78c42e2d","b0a9c3c3-d470-45ea-82c6-ac529b6882ea","12e463c1-4993-4ea8-9cbf-54ba9403e5f8"] import pandas as pd df = pd.DataFrame(columns=['name','speed','acc','dynamic_frames','dynamic_frames_avg_coverage']) for folder in folders: bag_folder = os.path.join(folder, "reindex_bags") bags = [] for bag in os.listdir(bag_folder): if bag.endswith(".bag"): for n in names: if n in bag: bags.append(bag) break # sort bags according to the number bags = sorted(bags, key=lambda x: int(x.split("_")[1].split(".")[0])) avg_speed = [] # avg absolute speed per axis avg_acc = [] # avg absolute acc per axis for bagname in bags: print(bagname) # open the bag bag = rosbag.Bag(os.path.join(bag_folder, bagname)) old_t = None # loop through all the topics for topic, msg, t in bag.read_messages(topics=['/my_robot_0/odom']): # if the topic is the one we want if topic == "/my_robot_0/odom": # get the data data_lin = np.array([msg.twist.twist.linear.x, msg.twist.twist.linear.y, msg.twist.twist.linear.z]) data_ang = np.array([msg.twist.twist.angular.x, msg.twist.twist.angular.y, msg.twist.twist.angular.z]) # get the speed avg_speed.append([np.abs(data_lin[0]), np.abs(data_lin[1]), np.abs(data_lin[2]), np.abs(data_ang[0]), np.abs(data_ang[1]), np.abs(data_ang[2])]) # get the acceleration by using the difference between the current and the previous time if old_t is None: old_speed = [data_lin[0], data_lin[1], data_lin[2], data_ang[0], data_ang[1], data_ang[2]] old_t = t else: # get the difference between the current and the previous time dt = (t - old_t).to_sec() # get the acceleration avg_acc.append(np.abs(np.array( [(data_lin[0] - old_speed[0]) / dt, (data_lin[1] - old_speed[1]) / dt, (data_lin[2] - old_speed[2]) / dt, (data_ang[0] - old_speed[3]) / dt, (data_ang[1] - old_speed[4]) / dt, (data_ang[2] - old_speed[5]) / dt]))) # update the old speed and time old_speed = [data_lin[0], data_lin[1], data_lin[2], data_ang[0], data_ang[1], data_ang[2]] old_t = t bag.close() df = pd.concat([df, pd.DataFrame([[bagname[:-6], np.round(np.mean(avg_speed, axis=0),3), np.round(np.mean(avg_acc, axis=0),3), 0, 0]], columns=df.columns)]) folders = [] folders.append("/ps/project/irotate/DE_few_obs_cam0_horiz/d94ecc9f-10f6-4f6d-b49f-1ed841f86772") folders.append("/ps/project/irotate/DE_few_obs_cam0_horiz/d8c14dd6-d794-46d5-aa59-01d3552828c7") folders.append("/ps/project/irotate/DE_cam0_horiz/b13a4874-00a4-49a5-aa2d-e22d7d864b56") folders.append("/ps/project/irotate/DE_cam1/75bf66e8-acb0-4f27-842d-1945ad42f9de") folders.append("/ps/project/irotate/DE_few_obs_cam1/53bfe530-122d-42cb-a1f4-453e6a2a617f") folders.append("/ps/project/irotate/DE_lot_obs_cam0/23aae785-c0bc-4645-9e64-fdea78c42e2d") import cv2 for folder in folders: dynamic_images = 0 dynamic_coverage = 0 masks = os.path.join(folder, "Viewport0_occluded/instance") for mask in os.listdir(masks): if mask.endswith(".npy"): f = np.load(os.path.join(masks, mask), allow_pickle=True) classes = [] for item in f[1]: if item[3] == "human" or item[3] == "google" or item[3] == "shapenet": classes.append(item[0]) """ opencv reshape f[0] to (640, 480) """ img = cv2.resize(f[0].astype(np.uint16), (640, 480), interpolation=cv2.INTER_NEAREST) out = np.isin(img, classes) """count the number of elements of img that are equal to an element of classes""" if len(out[out==True]) > 0: dynamic_coverage += len(out[out==True]) / img.size dynamic_images += 1 df.loc[df["name"] == folder.split("/")[-1], "dynamic_frames"] = dynamic_images df.loc[df["name"] == folder.split("/")[-1], "dynamic_frames_avg_coverage"] = round(dynamic_coverage / dynamic_images*100,2) # print dataframe as latex table print(df.to_latex(index=False)) df.to_pickle("dynamic_frames.pkl")
5,411
Python
46.060869
321
0.697653
eliabntt/GRADE-RR/additional_scripts/colorize.py
""" Use this code to colorize the generated data. The code is thought to colorize all the data, create videos, and fix the vertical fov issue. Please check the arguments to understand how to use it. Please set the corresponding data_enabled to False if you do not want to colorize some kind of data (eg. depth_enabled) """ import math import argparse import colorsys import confuse import copy import cv2 import ipdb import numpy as np import os import pickle as pkl import random from PIL import Image, ImageDraw def project_pinhole(points, view_proj_matrix): """ Project 3D points to 2D camera view using a pinhole camera model. Args: points (numpy.ndarray): Array of points in world frame of shape (num_points, 3). viewport (omni.kit.viewport._viewport.IViewportWindow): Viewport from which to retrieve/create sensor. Returns: (numpy.ndarray): Image-space points of shape (num_points, 3) """ homo = np.pad(points, ((0, 0), (0, 1)), constant_values=1.0) tf_points = np.dot(homo, view_proj_matrix) tf_points = tf_points / (tf_points[..., -1:]) tf_points[..., :2] = 0.5 * (tf_points[..., :2] + 1) return tf_points[..., :3] def random_colours(N, enable_random=True, num_channels=3): """ Generate random colors. Generate visually distinct colours by linearly spacing the hue channel in HSV space and then convert to RGB space. """ start = 0 if enable_random: random.seed(10) start = random.random() hues = [(start + i / N) % 1.0 for i in range(N)] colours = [list(colorsys.hsv_to_rgb(h, 0.9, 1.0)) for i, h in enumerate(hues)] if num_channels == 4: for color in colours: color.append(1.0) if enable_random: random.shuffle(colours) return colours def colorize_bboxes(bboxes_2d_data, rgb, num_channels=4): """ Colorizes 2D bounding box data for visualization. Args: bboxes_2d_data (numpy.ndarray): 2D bounding box data from the sensor. rgb (numpy.ndarray): RGB data from the sensor to embed bounding box. num_channels (int): Specify number of channels i.e. 3 or 4. """ obj_name_list = [] rgb_img = Image.fromarray(rgb).convert("RGBA") rgb_img2 = Image.fromarray(rgb) overlay = Image.new("RGBA", rgb_img.size, (0, 0, 0, 0)) rgb_img_draw = ImageDraw.Draw(overlay) rgb_img_draw2 = ImageDraw.Draw(rgb_img2) for bbox_2d in bboxes_2d_data: obj_name_list.append(bbox_2d[1]) obj_name_list_np = np.unique(np.array(obj_name_list)) color_list = random_colours(len(obj_name_list_np.tolist()), True, num_channels) for bbox_2d in bboxes_2d_data: index = np.where(obj_name_list_np == bbox_2d[1])[0][0] bbox_color = color_list[index] outline = (int(255 * bbox_color[0]), int(255 * bbox_color[1]), int(255 * bbox_color[2])) if num_channels == 4: outline = ( int(255 * bbox_color[0]), int(255 * bbox_color[1]), int(255 * bbox_color[2]), int(255 * bbox_color[3]), ) fill = ( int(255 * bbox_color[0]), int(255 * bbox_color[1]), int(255 * bbox_color[2]), int(0.25 * 255), ) rgb_img_draw.rectangle([(bbox_2d[6], bbox_2d[7]), (bbox_2d[8], bbox_2d[9])], fill=fill, outline=outline, width=3) rgb_img_draw2.rectangle([(bbox_2d[6], bbox_2d[7]), (bbox_2d[8], bbox_2d[9])], outline=outline, width=3) bboxes_2d_rgb = Image.alpha_composite(rgb_img, overlay) bboxes_2d_rgb = np.array(bboxes_2d_rgb) bboxes_2d_rgb2 = np.array(rgb_img2) bboxes_2d_rgb3 = np.array(Image.alpha_composite(rgb_img2.convert("RGBA"), overlay)) return bboxes_2d_rgb3 # , bboxes_2d_rgb2 #only boxes def colorize_depth(depth_image): """ It takes a depth image, normalizes it, and then maps it to a color image :param depth_image: The depth image to be colorized :return: The colorized depth image. """ height, width = depth_image.shape[:2] colorized_image = np.zeros((height, width, 4)) depth_image *= 100 depth_image = np.reciprocal(depth_image) depth_image[depth_image == 0.0] = 1e-5 depth_image = np.clip(depth_image, 0, 255) depth_image -= np.min(depth_image) if np.max(depth_image) > 0: depth_image /= np.max(depth_image) + 1e-8 colorized_image[:, :, 0] = depth_image colorized_image[:, :, 1] = depth_image colorized_image[:, :, 2] = depth_image colorized_image[:, :, 3] = 1 colorized_image = (colorized_image * 255).astype(np.uint8) return colorized_image def colorize_semantic_from_instance(instance_image, instance_mappings, sem = False): """ It takes the instance image and the instance mappings and returns a colorized image :param instance_image: the instance image from the instance segmentation :param instance_mappings: a list of dictionaries, each of which has the following keys: """ if len(instance_mappings) == 0: segmentation_image = np.zeros_like(instance_image) segmentation_ids = np.unique(segmentation_image) num_colours = len(segmentation_ids) # This is to avoid generating lots of colours for semantic classes not in frame lut = np.array([segmentation_ids, list(range(num_colours))]) re_instanced = lut[1, np.searchsorted(lut[0, :], segmentation_image)] colours = np.array([[0.0] * 4] + random_colours(num_colours)) else: semantic_instances = {} changed = np.zeros(instance_image.shape) for im in instance_mappings[::-1]: semantic_instances.setdefault(im["semanticId"], []).extend(im["instanceIds"]) changed[instance_image == im["uniqueId"]] = max(im["instanceIds"]) instance_image = changed.astype(np.uint32) max_semantic_instance_id = np.max([max(il) for _, il in semantic_instances.items()]) max_instance_id = instance_image.max() lut = np.zeros(max(max_semantic_instance_id, max_instance_id) + 1, dtype=np.uint32) if sem: for i, (_, il) in enumerate(semantic_instances.items()): lut[np.array(il)] = i + 1 # +1 to differentiate from background re_instanced = np.take(lut, instance_image) colours = np.array([[0.0] * 3] + random_colours(len(semantic_instances))) else: colours = np.array([[0.0] * 3] + random_colours(len(lut))) re_instanced = instance_image rgb = np.zeros((re_instanced.shape[0], re_instanced.shape[1], 3)) for i in range(len(colours)): rgb[re_instanced == i] = colours[i] rgb = rgb * 255 return rgb.astype(np.uint8) def colorize_bboxes_3d(bboxes_3d_corners, rgb): """ > It takes a list of 3D bounding boxes and a RGB image, and returns the RGB image with the 3D bounding boxes drawn on it :param bboxes_3d_corners: in the local camera frame :param rgb: the image :return: the image with the bounding boxes drawn on it. """ height, width = rgb.shape[:2] # FILTER BOXES mask_uv = ~np.any(np.all(bboxes_3d_corners < 0, axis=1), axis=1) & ~np.any( np.all(bboxes_3d_corners > 1, axis=1), axis=1 ) mask_z = np.all(np.all(bboxes_3d_corners[..., 2:] >= 0, axis=1), axis=1) & np.all( np.all(bboxes_3d_corners[..., 2:] <= 1, axis=1), axis=1 ) bboxes_3d_corners = bboxes_3d_corners[mask_uv & mask_z] bboxes_3d_corners = bboxes_3d_corners[..., :2].reshape(-1, 8, 2) * np.array([[width, height]]) face_idx_list = [[0, 1, 3, 2], [4, 5, 7, 6], [2, 3, 7, 6], [0, 1, 5, 4], [0, 2, 6, 4], [1, 3, 7, 5]] colours = random_colours(len(face_idx_list)) master_overlay_img = Image.new("RGBA", (width, height), (0, 0, 0, 0)) for face_idxs, colour in zip(face_idx_list, colours): overlay = Image.new("RGBA", (width, height)) draw = ImageDraw.Draw(overlay) colour = [int(c * 255) for c in colour] for p in bboxes_3d_corners: draw.polygon([tuple(xy) for xy in p[face_idxs]], fill=tuple([*colour[:3], 120])) draw.line([tuple(xy) for xy in p[face_idxs]], width=3, fill=tuple(colour)) master_overlay_img = Image.alpha_composite(master_overlay_img, overlay) rgb_img = Image.fromarray(rgb).convert("RGBA") rgb_img = Image.alpha_composite(rgb_img, master_overlay_img) return np.asarray(rgb_img) def colorize_normals(normals): """ It takes a 3-channel array of normals, and returns a 4-channel array of normals with the background pixels set to transparent :param normals: a numpy array of shape (H, W, 3) containing the surface normals :return: the normals of the image. """ background_mask = np.sum(normals, axis=-1) == 0.0 # normalize from [-1, 1] to [0, 255] normals = (normals + 1.0) / 2 * 255 # Set background alpha to 0. normals = np.pad(normals, ((0, 0), (0, 0), (0, 1)), constant_values=255) normals[background_mask, 3] = 0. return normals.astype(np.uint8) def colorize_motion_vector(data): """Convert motion vector into colored image. The conversion is done by mapping 3D direction vector to HLS space, then converted to RGB. Args: data (numpy.array): data returned by the annotator of shape (H, W, 4). Return: (np.array): Data converted to uint8 RGBA image. """ r, theta, phi = _cartesian_to_spherical(data[:, :, :3]) phi += np.pi theta_degree = theta * 180 / np.pi phi_degree = phi * 180 / np.pi h = phi_degree / 360 l = theta_degree / 180 r = cv2.normalize(r, None, 0, 1, cv2.NORM_MINMAX) pixels = np.dstack((h * 180, l * 255, r * 255)).astype(np.uint8) rgb = cv2.cvtColor(pixels, cv2.COLOR_HLS2RGB) return rgb def _cartesian_to_spherical(xyz): """ It takes a 3D Cartesian coordinate and returns the corresponding spherical coordinates :param xyz: the 3D coordinates of the points in the image """ h, w = xyz.shape[0], xyz.shape[1] xyz = xyz.reshape(-1, 3) xy = xyz[:, 0] ** 2 + xyz[:, 1] ** 2 r = np.sqrt(xy + xyz[:, 2] ** 2) theta = np.arctan2(np.sqrt(xy), xyz[:, 2]) # for elevation angle defined from Z-axis down phi = np.arctan2(xyz[:, 1], xyz[:, 0]) # for elevation angle defined from XY-plane up return r.reshape(h, w), theta.reshape(h, w), phi.reshape(h, w) def boolean_string(s): """ It takes a string and returns a boolean :param s: the string to convert :return: The boolean value of the string. """ if s.lower() not in {'false', 'true'}: raise ValueError('Not a valid boolean string') return s.lower() == 'true' parser = argparse.ArgumentParser(description="Colorize data") parser.add_argument("--viewport_folder", type=str) parser.add_argument("--img_id", type=str, default="-1") parser.add_argument("--save_imgs", type=boolean_string, default=True) parser.add_argument("--save_video", type=boolean_string, default=False) parser.add_argument("--always_update_map", type=boolean_string, default=False) parser.add_argument("--semantics", type=boolean_string, default=False) parser.add_argument("--convert_depth", type=boolean_string, default=True) # used to better visualize inverse depth parser.add_argument("--corrected_bbox_folder", type=str, default="") parser.add_argument("--vertical_aperture", type=float, default=2.32) parser.add_argument("--change_aperture", type=boolean_string, default=False) parser.add_argument("--output_dir", type=str) args, unknown = parser.parse_known_args() config = confuse.Configuration("ColorizeData", __name__) config.set_args(args) minid = 1 maxid = 1801 isdigit = False try: int(config["img_id"].get()) isdigit = True except: isdigit = False if isdigit: img_id = int(config["img_id"].get()) if img_id <= -1: print("Processing all images") else: minid = img_id maxid = img_id + 1 ids = [i for i in range(minid, maxid)] else: ids = [config["img_id"].get()] vertical_aperture = config["vertical_aperture"].get() change_aperture = config["change_aperture"].get() viewport = config["viewport_folder"].get() subfolders = os.listdir(config["viewport_folder"].get()) depth_enabled = "depth" in subfolders depthLinear_enabled = "depthLinear" in subfolders normals_enabled = "normals" in subfolders bbox2d_enabled = "bbox_2d_tight" in subfolders bbox3d_enabled = "bbox_3d" in subfolders # todo these need to be fixed instance_enabled = "instance" in subfolders sem_enabled = "instance" in subfolders and config["semantics"].get() motion_enabled = "motion-vector" in subfolders always_update_map = config["always_update_map"].get() save_video = config["save_video"].get() save_img = config["save_imgs"].get() if save_video or save_img: outdir = config["output_dir"].get() if not os.path.exists(config["output_dir"].get()): os.makedirs(outdir) if config["corrected_bbox_folder"].get() != "": corrected_bbox_folder = config["corrected_bbox_folder"].get() else: corrected_bbox_folder = None old_instance_map = None vrgb, vdepth, vdepthLinear, vnormals, vbbox2d, vbbox3d, vinstance, vmotion, vsem = [], [], [], [], [], [], [], [], [] for i in ids: rgb = cv2.imread(os.path.join(viewport, "rgb", f"{i}.png")) if save_img: cv2.imwrite(os.path.join(outdir, f"rgb_{i}.png"), rgb) if save_video: vrgb.append(os.path.join(outdir, f"rgb_{i}.png")) if depthLinear_enabled: depth = np.load(os.path.join(viewport, "depthLinear", f"{i}.npy")) depth = colorize_depth(depth) if save_img: cv2.imwrite(os.path.join(outdir, f"depthLinear_{i}.png"), depth) if save_video: vdepthLinear.append(os.path.join(outdir, f"depthLinear_{i}.png")) if depth_enabled: depth = np.load(os.path.join(viewport, "depth", f"{i}.npy")) if config["convert_depth"].get(): depth = 1/depth depth = colorize_depth(depth) if save_img: cv2.imwrite(os.path.join(outdir, f"depth_{i}.png"), depth) if save_video: vdepth.append(os.path.join(outdir, f"depth_{i}.png")) if normals_enabled: normals = np.load(os.path.join(viewport, "normals", f"{i}.npy")) normals = colorize_normals(normals) if save_img: cv2.imwrite(os.path.join(outdir, f"normals_{i}.png"), normals) if save_video: vnormals.append(os.path.join(outdir, f"normals_{i}.png")) if bbox2d_enabled: bbox2d = np.load(os.path.join(viewport, "bbox_2d_tight", f"{i}.npy"), allow_pickle=True) rgb_data = copy.deepcopy(rgb) bbox2d = colorize_bboxes(bbox2d, rgb_data) if save_img: cv2.imwrite(os.path.join(outdir, f"bbox2d_{i}.png"), bbox2d) if save_video: vbbox2d.append(os.path.join(outdir, f"bbox2d_{i}.png")) if bbox3d_enabled: bbox3d = np.load(os.path.join(viewport, "bbox_3d", f"{i}.npy"), allow_pickle=True) viewport_mat = np.load(os.path.join(viewport, "camera", f"{i}.npy"), allow_pickle=True) view_mat = viewport_mat.item()["view_projection_matrix"] pose_mat = viewport_mat.item()["pose"] if change_aperture: viewproj_mat = np.dot(pose_mat, view_mat) vertical_aperture = vertical_aperture vfov = 2 * math.atan(vertical_aperture / (2 * viewport_mat.item()["focal_length"])) viewproj_mat[1,1] = 1 / math.tan(vfov / 2) viewproj_mat = np.dot(np.linalg.inv(pose_mat), viewproj_mat) corners = project_pinhole(bbox3d["corners"].reshape(-1, 3), viewproj_mat) corners = corners.reshape(-1, 8, 3) rgb_data = copy.deepcopy(rgb) e = [] for idx,bb in enumerate(bbox3d): if bb['semanticLabel'] in ['zebra','human','google','shapenet']: e.append(corners[idx]) if corrected_bbox_folder is not None: corrected_bbox = np.load(os.path.join(corrected_bbox_folder, f"{i}.npy"), allow_pickle=True) corrected_bbox = corrected_bbox.item() for idx, bb in enumerate(bbox3d): if bb[1] in corrected_bbox['bbox3d']: print(f"Correcting bbox3d for {bb[1]}") # if corrected_bbox['bbox3d'] is dictionary if isinstance(corrected_bbox['bbox3d'][bb[1]], dict): bbox3d[idx]["corners"] = corrected_bbox['bbox3d'][bb[1]]["oriented"] / 0.01 else: bbox3d[idx]["corners"] = corrected_bbox['bbox3d'][bb[1]] / 0.01 bbox3d = colorize_bboxes_3d(np.array(e), rgb_data) if save_img: cv2.imwrite(os.path.join(outdir, f"bbox3d_{i}.png"), bbox3d) if save_video: vbbox3d.append(os.path.join(outdir, f"bbox3d_{i}.png")) if instance_enabled: instance = np.load(os.path.join(viewport, "instance", f"{i}.npy"), allow_pickle=True) if old_instance_map is None or always_update_map: old_instance_map = copy.deepcopy(instance[1]) instance[1] = copy.deepcopy(old_instance_map) instance_img = colorize_semantic_from_instance(instance[0], instance[1]) if save_img: cv2.imwrite(os.path.join(outdir, f"instance_{i}.png"), instance_img) if save_video: vinstance.append(os.path.join(outdir, f"instance_{i}.png")) if sem_enabled: sem = colorize_semantic_from_instance(instance[0], instance[1], sem=True) if save_img: cv2.imwrite(os.path.join(outdir, f"sem_{i}.png"), sem) if save_video: vsem.append(os.path.join(outdir, f"sem_{i}.png")) if motion_enabled: motion = np.load(os.path.join(viewport, "motion-vector", f"{i}.npy"), allow_pickle=True) motion = colorize_motion_vector(motion) if save_img: cv2.imwrite(os.path.join(outdir, f"motion_{i}.png"), motion) if save_video: vmotion.append(os.path.join(outdir, f"motion_{i}.png")) if save_video: height, width, layers = rgb.shape for v in zip([vrgb, vdepth, vdepthLinear, vnormals, vbbox2d, vbbox3d, vinstance, vmotion, vsem], ["rgb", "depth", "depthLinear", "normals", "bbox2d", "bbox3d", "instance", "motion", "sem"]): if len(v[0]) > 0: video = cv2.VideoWriter(os.path.join(outdir, f"{v[1]}.mp4"), cv2.VideoWriter_fourcc(*"mp4v"), 30, (width, height)) for img_path in v[0]: img = cv2.imread(img_path) if img.shape[2] < 3: img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) video.write(img[:, :, :3]) video.release() os.system("ffmpeg -i " + os.path.join(outdir, f"{v[1]}.mp4") + " -vcodec libx264 -y " + os.path.join(outdir, f"{v[1]}_conv.mp4"))
17,948
Python
37.027542
127
0.651159
eliabntt/GRADE-RR/additional_scripts/pixel_to_world.py
""" This code serve as an example to project the points from the pixel coordinates to the world coordinates. You need the camera pose and projection matrix, as well as clearly the pixel depth. Those are available in the viewport folder, for example: Viewport0/camera Viewport0/depth (or depthLinear) You will load the camera viewport_mat from the camera folder. This dictionary will have the view projection matrix and the global camera pose They use a near/far clipping plane model, and not a focal length model. At the end of the file you can also check how to use the focal length model, but you need to know the focal length of the camera """ viewport_mat = np.load(os.path.join(viewport, 'camera',f'{i}.npy'), allow_pickle=True) # in Isaac view_projection is np.dot(view_matrix, proj_matrix) # view_matrix is local to world, i.e. the inverse of the pose matrix # the proj_matrix use the near far clipping plane model # a = -1.0 / np.tan(np.radians(fov / 2)) # b = -a * aspect_ratio # c = z_far / (z_far - z_near) # d = z_near * z_far / (z_far - z_near) # Construct the camera projection matrix # projection_matrix = np.array([ # [a, 0.0, 0.0, 0.0], # [0.0, b, 0.0, 0.0], # [0.0, 0.0, c, 1.0], # [0.0, 0.0, d, 0.0] # ]) view_mat = viewport_mat.item()["view_projection_matrix"] pose_mat = viewport_mat.item()["pose"] inv_VP = np.linalg.inv(view_mat) pixel_x = .... pixel_y = .... pixel_d = .... width = viewport_mat['resolution']['width'] width = viewport_mat['resolution']['height'] F = viewport_mat['clipping_range'][1] N = viewport_mat['clipping_range'][0] W = -pixel_d ndc_x = (2 * pixel_x) / width - 1 ndc_y = 1 - (2 * pixel_y) / height Z = ( (W*F/(F-N)) + N*F/(F-N) )/(W) xyz = np.array([ndc_x, ndc_y, Z, 1]) * W xyz = np.dot(xyz, inv_VP) # alternatively consider that a = -fx, b = fy, cx = widht / 2, cy = height /2 # and that the pose_mat has the translation in the last ROW (in unit coordinates, so mind the scale) tmp = np.dot(pose_mat, view_mat) fx = -tmp[0,0] fy = tmp[1,1] cx = width / 2 cy = height / 2 x = (px - cx) * d / fx y = (py - cy) * d / fy pt = [x,y,z,1] xyz = np.dot(cpose.T, pt)[:3]
2,131
Python
33.387096
128
0.656969
eliabntt/GRADE-RR/additional_scripts/check_folders.py
""" Use this to check if all the files/folders are there """ import os import ipdb mainpath = "/ps/project/irotate/" folders = ["DE_lot_obs_cam0"] tocheck = ["bbox_2d_loose","bbox_2d_tight","bbox_3d","camera","depthLinear","instance","poses","rgb"] for mainfolder in folders: for folder in os.listdir(os.path.join(mainpath, mainfolder)): for subfolder in [os.path.join(mainpath, mainfolder, folder, "Viewport0"), os.path.join(mainpath, mainfolder, folder, "Viewport0_occluded")]: print(subfolder) data = os.listdir(subfolder) if len(data) > len(tocheck): print("More than expected folders") print(subfolder) ipdb.set_trace() if len(data) < len(tocheck): print("Less than expected folders") print(subfolder) ipdb.set_trace() for f in data: if f not in tocheck: continue if len(os.listdir(os.path.join(subfolder, f))) != 1801: print("Not enough files in folder") print(os.path.join(subfolder, f)) ipdb.set_trace()
991
Python
29.060605
143
0.672048
eliabntt/GRADE-RR/additional_scripts/process_paths/parser_config.yaml
cc_path: "../.." # set your cc_texture path prefix_cc: "" front3d_path: "../.." # set your global 3d_front path prefix_front3d: "" cloth3d_path: "../../.." prefix_cloth3d: "" surreal_path: "../.." prefix_surreal: "" normpath: True
235
YAML
15.857142
53
0.604255