file_path
stringlengths
21
202
content
stringlengths
19
1.02M
size
int64
19
1.02M
lang
stringclasses
8 values
avg_line_length
float64
5.88
100
max_line_length
int64
12
993
alphanum_fraction
float64
0.27
0.93
NVIDIA-Omniverse/synthetic-data-examples/omni.replicator/README.md
# Omniverse Replicator Examples Code here requires the installation of[NVIDIA Omniverse](https://www.nvidia.com/en-us/omniverse/) and [Omniverse Replicator](https://docs.omniverse.nvidia.com/prod_extensions/prod_extensions/ext_replicator.html).
246
Markdown
60.749985
212
0.808943
NVIDIA-Omniverse/synthetic-data-examples/omni.replicator/snippets/replicator_trigger_intervals.py
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. # SPDX-License-Identifier: BSD-3-Clause # # Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its contributors # may be used to endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. import omni.replicator.core as rep with rep.new_layer(): camera = rep.create.camera(position=(0, 500, 1000), look_at=(0, 0, 0)) # Create simple shapes to manipulate plane = rep.create.plane( semantics=[("class", "plane")], position=(0, -100, 0), scale=(100, 1, 100) ) spheres = rep.create.sphere( semantics=[("class", "sphere")], position=(0, 0, 100), count=6 ) # Modify the position every 5 frames with rep.trigger.on_frame(num_frames=10, interval=5): with spheres: rep.modify.pose( position=rep.distribution.uniform((-300, 0, -300), (300, 0, 300)), scale=rep.distribution.uniform(0.1, 2), ) # Modify color every frame for 50 frames with rep.trigger.on_frame(num_frames=50): with spheres: rep.randomizer.color( colors=rep.distribution.normal((0.1, 0.1, 0.1), (1.0, 1.0, 1.0)) ) render_product = rep.create.render_product(camera, (512, 512)) writer = rep.WriterRegistry.get("BasicWriter") writer.initialize( output_dir="trigger_intervals", rgb=True, ) writer.attach([render_product])
2,854
Python
41.61194
84
0.709881
NVIDIA-Omniverse/synthetic-data-examples/omni.replicator/snippets/replicator_multiple_semantic_classes.py
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. # SPDX-License-Identifier: BSD-3-Clause # # Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its contributors # may be used to endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. import omni.replicator.core as rep with rep.new_layer(): sphere = rep.create.sphere(semantics=[("class", "sphere")], position=(0, 100, 100)) cube = rep.create.cube(semantics=[("class2", "cube")], position=(200, 200, 100)) plane = rep.create.plane(semantics=[("class3", "plane")], scale=10) def get_shapes(): shapes = rep.get.prims(semantics=[("class", "cube"), ("class", "sphere")]) with shapes: rep.modify.pose( position=rep.distribution.uniform((-500, 50, -500), (500, 50, 500)), rotation=rep.distribution.uniform((0, -180, 0), (0, 180, 0)), scale=rep.distribution.normal(1, 0.5), ) return shapes.node with rep.trigger.on_frame(num_frames=2): rep.randomizer.register(get_shapes) # Setup Camera camera = rep.create.camera(position=(500, 500, 500), look_at=(0, 0, 0)) render_product = rep.create.render_product(camera, (512, 512)) writer = rep.WriterRegistry.get("BasicWriter") writer.initialize( output_dir="semantics_classes", rgb=True, semantic_segmentation=True, colorize_semantic_segmentation=True, semantic_types=["class", "class2", "class3"], ) writer.attach([render_product]) rep.orchestrator.run()
2,956
Python
41.855072
87
0.715832
NVIDIA-Omniverse/synthetic-data-examples/omni.replicator/snippets/replicator_scatter_multi_trigger.py
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # """ This snippet shows how to setup multiple independent triggers that happen at different intervals in the simulation. """ import omni.graph.core as og import omni.replicator.core as rep # A light to see distance_light = rep.create.light(rotation=(-45, 0, 0), light_type="distant") # Create a plane to sample on plane_samp = rep.create.plane(scale=4, rotation=(20, 0, 0)) # Create a larger sphere to sample on the surface of sphere_samp = rep.create.sphere(scale=2.4, position=(0, 100, -180)) # Create a larger cylinder we do not want to collide with cylinder = rep.create.cylinder(semantics=[("class", "cylinder")], scale=(2, 1, 2)) def randomize_spheres(): # create small spheres to sample inside the plane spheres = rep.create.sphere(scale=0.4, count=60) # scatter small spheres with spheres: rep.randomizer.scatter_2d( surface_prims=[plane_samp, sphere_samp], no_coll_prims=[cylinder], check_for_collisions=True, ) # Add color to small spheres rep.randomizer.color( colors=rep.distribution.uniform((0.2, 0.2, 0.2), (1, 1, 1)) ) return spheres.node rep.randomizer.register(randomize_spheres) # Trigger will execute 5 times, every-other-frame (interval=2) with rep.trigger.on_frame(num_frames=5, interval=2): rep.randomizer.randomize_spheres() # Trigger will execute 10 times, once every frame with rep.trigger.on_frame(num_frames=10): with cylinder: rep.modify.visibility(rep.distribution.sequence([True, False])) og.Controller.evaluate_sync() # Only for snippet demonstration preview, not needed for production rep.orchestrator.preview() # Only for snippet demonstration preview, not needed for production rp = rep.create.render_product("/OmniverseKit_Persp", (1024, 768)) # Initialize and attach writer writer = rep.WriterRegistry.get("BasicWriter") writer.initialize(output_dir="scatter_example", rgb=True) writer.attach([rp]) rep.orchestrator.run()
3,612
Python
40.056818
98
0.745847
NVIDIA-Omniverse/synthetic-data-examples/omni.replicator/snippets/replicator_writer_segmentation_colors.py
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. # SPDX-License-Identifier: BSD-3-Clause # # Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its contributors # may be used to endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. """ A snippet showing to how create a custom writer to output specific colors in the semantic annotator output image. """ import omni.replicator.core as rep from omni.replicator.core import Writer, BackendDispatch, WriterRegistry class MyWriter(Writer): def __init__(self, output_dir: str): self._frame_id = 0 self.backend = BackendDispatch({"paths": {"out_dir": output_dir}}) self.annotators = ["rgb", "semantic_segmentation"] # Dictionary mapping of label to RGBA color self.CUSTOM_LABELS = { "unlabelled": (0, 0, 0, 0), "sphere": (128, 64, 128, 255), "cube": (244, 35, 232, 255), "plane": (102, 102, 156, 255), } def write(self, data): render_products = [k for k in data.keys() if k.startswith("rp_")] self._write_rgb(data, "rgb") self._write_segmentation(data, "semantic_segmentation") self._frame_id += 1 def _write_rgb(self, data, annotator: str): # Save the rgb data under the correct path rgb_file_path = f"rgb_{self._frame_id}.png" self.backend.write_image(rgb_file_path, data[annotator]) def _write_segmentation(self, data, annotator: str): seg_filepath = f"seg_{self._frame_id}.png" semantic_seg_data_colorized = rep.tools.colorize_segmentation( data[annotator]["data"], data[annotator]["info"]["idToLabels"], mapping=self.CUSTOM_LABELS, ) self.backend.write_image(seg_filepath, semantic_seg_data_colorized) def on_final_frame(self): self.backend.sync_pending_paths() # Register new writer WriterRegistry.register(MyWriter) # Create a new layer for our work to be performed in. # This is a good habit to develop for later when working on existing Usd scenes with rep.new_layer(): light = rep.create.light(light_type="dome") # Create a simple camera with a position and a point to look at camera = rep.create.camera(position=(0, 500, 1000), look_at=(0, 0, 0)) # Create some simple shapes to manipulate plane = rep.create.plane( semantics=[("class", "plane")], position=(0, -100, 0), scale=(100, 1, 100) ) torus = rep.create.torus(position=(200, 0, 100)) # Torus will be unlabeled sphere = rep.create.sphere(semantics=[("class", "sphere")], position=(0, 0, 100)) cube = rep.create.cube(semantics=[("class", "cube")], position=(-200, 0, 100)) # Randomize position and scale of each object on each frame with rep.trigger.on_frame(num_frames=10): # Creating a group so that our modify.pose operation works on all the shapes at once with rep.create.group([torus, sphere, cube]): rep.modify.pose( position=rep.distribution.uniform((-300, 0, -300), (300, 0, 300)), scale=rep.distribution.uniform(0.1, 2), ) # Initialize render product and attach a writer render_product = rep.create.render_product(camera, (1024, 1024)) writer = rep.WriterRegistry.get("MyWriter") writer.initialize(output_dir="myWriter_output") writer.attach([render_product]) rep.orchestrator.run() # Run the simulation
4,866
Python
43.245454
92
0.690506
NVIDIA-Omniverse/synthetic-data-examples/omni.replicator/snippets/replicator_remove_semantics.py
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. # SPDX-License-Identifier: BSD-3-Clause # # Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its contributors # may be used to endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. import omni.graph.core as og import omni.replicator.core as rep from omni.usd._impl.utils import get_prim_at_path from pxr import Semantics from semantics.schema.editor import remove_prim_semantics # Setup simple scene with rep.new_layer(): # Simple scene setup camera = rep.create.camera(position=(0, 500, 1000), look_at=(0, 0, 0)) # Create simple shapes to manipulate plane = rep.create.plane( semantics=[("class", "plane")], position=(0, -100, 0), scale=(100, 1, 100) ) cubes = rep.create.cube( semantics=[("class", "cube")], position=rep.distribution.uniform((-300, 0, -300), (300, 0, 300)), count=6, ) spheres = rep.create.sphere( semantics=[("class", "sphere")], position=rep.distribution.uniform((-300, 0, -300), (300, 0, 300)), count=6, ) # Get prims to remove semantics on - Execute this first by itself my_spheres = rep.get.prims(semantics=[("class", "sphere")]) og.Controller.evaluate_sync() # Trigger an OmniGraph evaluation of the graph to set the values get_targets = rep.utils.get_node_targets(my_spheres.node, "outputs_prims") print(get_targets) # [Sdf.Path('/Replicator/Sphere_Xform'), Sdf.Path('/Replicator/Sphere_Xform_01'), Sdf.Path('/Replicator/Sphere_Xform_02'), Sdf.Path('/Replicator/Sphere_Xform_03'), Sdf.Path('/Replicator/Sphere_Xform_04'), Sdf.Path('/Replicator/Sphere_Xform_05')] # Loop through each prim_path and remove all semantic data for prim_path in get_targets: prim = get_prim_at_path(prim_path) # print(prim.HasAPI(Semantics.SemanticsAPI)) result = remove_prim_semantics(prim) # To remove all semantics # result = remove_prim_semantics(prim, label_type='class') # To remove only 'class' semantics print(result)
3,457
Python
45.729729
245
0.731559
NVIDIA-Omniverse/synthetic-data-examples/omni.replicator/snippets/replcator_clear_layer.py
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. # SPDX-License-Identifier: BSD-3-Clause # # Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its contributors # may be used to endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. import omni.usd stage = omni.usd.get_context().get_stage() for layer in stage.GetLayerStack(): if layer.GetDisplayName() == "test": # del layer layer.Clear()
1,868
Python
46.923076
84
0.770343
NVIDIA-Omniverse/synthetic-data-examples/omni.replicator/snippets/replicator_annotator_segmentation.py
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. # SPDX-License-Identifier: BSD-3-Clause # # Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its contributors # may be used to endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. """ This is an example of how to view annotator data if needed. """ import asyncio import omni.replicator.core as rep import omni.syntheticdata as sd async def test_semantics(): cone = rep.create.cone(semantics=[("prim", "cone")], position=(100, 0, 0)) sphere = rep.create.sphere(semantics=[("prim", "sphere")], position=(-100, 0, 0)) invalid_type = rep.create.cube(semantics=[("shape", "boxy")], position=(0, 100, 0)) # Setup semantic filter # sd.SyntheticData.Get().set_instance_mapping_semantic_filter("prim:*") cam = rep.create.camera(position=(500, 500, 500), look_at=(0, 0, 0)) rp = rep.create.render_product(cam, (1024, 512)) segmentation = rep.AnnotatorRegistry.get_annotator("semantic_segmentation") segmentation.attach(rp) # step_async() tells Omniverse to update, otherwise the annoation buffer could be empty await rep.orchestrator.step_async() data = segmentation.get_data() print(data) # Example Output: # { # "data": array( # [ # [0, 0, 0, ..., 0, 0, 0], # [0, 0, 0, ..., 0, 0, 0], # [0, 0, 0, ..., 0, 0, 0], # ..., # [0, 0, 0, ..., 0, 0, 0], # [0, 0, 0, ..., 0, 0, 0], # [0, 0, 0, ..., 0, 0, 0], # ], # dtype=uint32, # ), # "info": { # "_uniqueInstanceIDs": array([1, 1, 1], dtype=uint8), # "idToLabels": { # "0": {"class": "BACKGROUND"}, # "2": {"prim": "cone"}, # "3": {"prim": "sphere"}, # "4": {"shape": "boxy"}, # }, # }, # } asyncio.ensure_future(test_semantics())
3,332
Python
38.211764
91
0.653962
NVIDIA-Omniverse/synthetic-data-examples/omni.replicator/snippets/replicator_multi_object_visibility_toggle.py
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. # SPDX-License-Identifier: BSD-3-Clause # # Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its contributors # may be used to endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. """This will create a group from a list of objects and 1. Render all the objects together 2. Toggle sole visiblity for each object & render 3. Randomize pose for all objects, repeat This can be useful for training on object occlusions. """ import omni.replicator.core as rep NUM_POSE_RANDOMIZATIONS = 10 # Make a list-of-lists of True/False for each object # In this example of 3 objects: # [[True, True, True] # [True, False, False] # [False, True, False] # [False, False, True]] def make_visibility_lists(num_objects): visib = [] # Make an all-visible first pass visib.append(tuple([True for x in range(num_objects)])) # List to toggle one object visible at a time for x in range(num_objects): sub_vis = [] for i in range(num_objects): if x == i: sub_vis.append(True) else: sub_vis.append(False) visib.append(tuple(sub_vis)) return visib with rep.new_layer(): # Setup camera and simple light camera = rep.create.camera(position=(0, 500, 1000), look_at=(0, 0, 0)) light = rep.create.light(rotation=(-45, 45, 0)) # Create simple shapes to manipulate plane = rep.create.plane( semantics=[("class", "plane")], position=(0, -100, 0), scale=(100, 1, 100) ) torus = rep.create.torus(semantics=[("class", "torus")], position=(200, 0, 100)) sphere = rep.create.sphere(semantics=[("class", "sphere")], position=(0, 0, 100)) cube = rep.create.cube(semantics=[("class", "cube")], position=(-200, 0, 100)) # Create a group of the objects we will be manipulating # Leaving-out camera, light, and plane from visibility toggling and pose randomization object_group = rep.create.group([torus, sphere, cube]) # Get the number of objects to toggle, can work with any number of objects num_objects_to_toggle = len(object_group.get_output_prims()["prims"]) # Create our lists-of-lists for visibility visibility_sequence = make_visibility_lists(num_objects_to_toggle) # Trigger to toggle visibility one at a time with rep.trigger.on_frame( max_execs=(num_objects_to_toggle + 1) * NUM_POSE_RANDOMIZATIONS ): with object_group: rep.modify.visibility(rep.distribution.sequence(visibility_sequence)) # Trigger to randomize position and scale, interval set to number of objects +1(1 extra for the "all visible" frame) with rep.trigger.on_frame( max_execs=NUM_POSE_RANDOMIZATIONS, interval=num_objects_to_toggle + 1 ): with object_group: rep.modify.pose( position=rep.distribution.uniform((-300, 0, -300), (300, 0, 300)), scale=rep.distribution.uniform(0.1, 2), ) # Initialize render product and attach writer render_product = rep.create.render_product(camera, (512, 512)) writer = rep.WriterRegistry.get("BasicWriter") writer.initialize( output_dir="toggle_multi_visibility", rgb=True, semantic_segmentation=True, ) writer.attach([render_product]) rep.orchestrator.run()
4,759
Python
40.391304
120
0.703929
NVIDIA-Omniverse/synthetic-data-examples/omni.replicator/snippets/surface_scratches/scratches_randomization.py
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. # SPDX-License-Identifier: BSD-3-Clause # # Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its contributors # may be used to endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. from pathlib import Path import carb import omni.replicator.core as rep import omni.usd from pxr import Sdf, UsdGeom """ Instructions: Open the example scene file "scratches_randomization.usda", located adjacent to this script, in Omniverse prior to using this script """ # Get the current Usd "stage". This is where all the scene objects live stage = omni.usd.get_context().get_stage() with rep.new_layer(): camera = rep.create.camera(position=(-30, 38, 60), look_at=(0, 0, 0)) render_product = rep.create.render_product(camera, (1280, 720)) # Get Scene cube cube_prim = stage.GetPrimAtPath("/World/RoundedCube2/Cube/Cube") # Set the primvars on the cubes once primvars_api = UsdGeom.PrimvarsAPI(cube_prim) primvars_api.CreatePrimvar("random_color", Sdf.ValueTypeNames.Float3).Set( (1.0, 1.0, 1.0) ) primvars_api.CreatePrimvar("random_intensity", Sdf.ValueTypeNames.Float3).Set( (1.0, 1.0, 1.0) ) def change_colors(): # Change color primvars cubes = rep.get.prims( path_pattern="/World/RoundedCube2/Cube/Cube", prim_types=["Mesh"] ) with cubes: rep.modify.attribute( "primvars:random_color", rep.distribution.uniform((0.0, 0.0, 0.0), (1.0, 1.0, 1.0)), attribute_type="float3", ) rep.modify.attribute( "primvars:random_intensity", rep.distribution.uniform((0.0, 0.0, 0.0), (10.0, 10.0, 10.0)), attribute_type="float3", ) return cubes.node rep.randomizer.register(change_colors) # Setup randomization of colors, different each frame with rep.trigger.on_frame(num_frames=10): rep.randomizer.change_colors() # (optional) Write output images to disk writer = rep.WriterRegistry.get("BasicWriter") writer.initialize( output_dir="~/replicator_examples/box_scratches", rgb=True, bounding_box_2d_tight=True, semantic_segmentation=True, distance_to_image_plane=True, ) writer.attach([render_product]) carb.log_info("scratches randomization complete")
3,884
Python
37.465346
84
0.697477
NVIDIA-Omniverse/synthetic-data-examples/omni.replicator/tutorials/fall_2022_DLI/22_Change_Textures.py
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. # SPDX-License-Identifier: BSD-3-Clause # # Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its contributors # may be used to endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. import omni.replicator.core as rep # create new objects to be used in the dataset with rep.new_layer(): sphere = rep.create.sphere( semantics=[("class", "sphere")], position=(0, 100, 100), count=5 ) cube = rep.create.cube( semantics=[("class", "cube")], position=(200, 200, 100), count=5 ) cone = rep.create.cone( semantics=[("class", "cone")], position=(200, 400, 200), count=10 ) cylinder = rep.create.cylinder( semantics=[("class", "cylinder")], position=(200, 100, 200), count=5 ) # create new camera & render product and attach to camera camera = rep.create.camera(position=(0, 0, 1000)) render_product = rep.create.render_product(camera, (1024, 1024)) # create plane if needed (but unused here) plane = rep.create.plane(scale=10) # function to get shapes that you've created above, via their semantic labels def get_shapes(): shapes = rep.get.prims( semantics=[ ("class", "cube"), ("class", "sphere"), ("class", "cone"), ("class", "cylinder"), ] ) with shapes: # assign textures to the different objects rep.randomizer.texture( textures=[ "omniverse://localhost/NVIDIA/Materials/vMaterials_2/Ground/textures/aggregate_exposed_diff.jpg", "omniverse://localhost/NVIDIA/Materials/vMaterials_2/Ground/textures/gravel_track_ballast_diff.jpg", "omniverse://localhost/NVIDIA/Materials/vMaterials_2/Ground/textures/gravel_track_ballast_multi_R_rough_G_ao.jpg", "omniverse://localhost/NVIDIA/Materials/vMaterials_2/Ground/textures/rough_gravel_rough.jpg", ] ) # modify pose and distribution rep.modify.pose( position=rep.distribution.uniform((-500, 50, -500), (500, 50, 500)), rotation=rep.distribution.uniform((0, -180, 0), (0, 180, 0)), scale=rep.distribution.normal(1, 0.5), ) return shapes.node # register the get shapes function as a randomizer function rep.randomizer.register(get_shapes) # Setup randomization. 100 variations here from 'num_frames' with rep.trigger.on_frame(num_frames=100): rep.randomizer.get_shapes() # Initialize and attach writer writer = rep.WriterRegistry.get("BasicWriter") writer.initialize(output_dir="~/replicator_examples/dli_example_22", rgb=True) writer.attach([render_product]) rep.orchestrator.run()
4,321
Python
43.556701
134
0.672992
NVIDIA-Omniverse/synthetic-data-examples/omni.replicator/tutorials/fall_2022_DLI/03_replicator_advanced.py
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. # SPDX-License-Identifier: BSD-3-Clause # # Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its contributors # may be used to endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. import omni.replicator.core as rep with rep.new_layer(): def dome_lights(): lights = rep.create.light( light_type="Dome", rotation=(270, 0, 0), texture=rep.distribution.choice( [ "omniverse://localhost/NVIDIA/Assets/Skies/Indoor/ZetoCGcom_ExhibitionHall_Interior1.hdr", "omniverse://localhost/NVIDIA/Assets/Skies/Indoor/ZetoCG_com_WarehouseInterior2b.hdr", ] ), ) return lights.node rep.randomizer.register(dome_lights) conference_tables = ( "omniverse://localhost/NVIDIA/Assets/ArchVis/Commercial/Conference/" ) # create randomizer function conference table assets. # This randomization includes placement and rotation of the assets on the surface. def env_conference_table(size=5): confTable = rep.randomizer.instantiate( rep.utils.get_usd_files(conference_tables, recursive=False), size=size, mode="scene_instance", ) with confTable: rep.modify.pose( position=rep.distribution.uniform((-500, 0, -500), (500, 0, 500)), rotation=rep.distribution.uniform((-90, -180, 0), (-90, 180, 0)), ) return confTable.node # Register randomization rep.randomizer.register(env_conference_table) # Setup camera and attach it to render product camera = rep.create.camera() render_product = rep.create.render_product(camera, resolution=(1024, 1024)) surface = rep.create.disk(scale=100, visible=False) # trigger on frame for an interval with rep.trigger.on_frame(5): rep.randomizer.env_conference_table(2) rep.randomizer.dome_lights() with camera: rep.modify.pose( position=rep.distribution.uniform((-500, 200, 1000), (500, 500, 1500)), look_at=surface, ) # Initialize and attach writer writer = rep.WriterRegistry.get("BasicWriter") writer.initialize(output_dir="~/replicator_examples/dli_example_3", rgb=True) writer.attach([render_product])
3,860
Python
40.074468
110
0.688083
NVIDIA-Omniverse/synthetic-data-examples/omni.replicator/tutorials/fall_2022_DLI/01_hello_replicator.py
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. # SPDX-License-Identifier: BSD-3-Clause # # Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its contributors # may be used to endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. import omni.replicator.core as rep # Create a new layer for our work to be performed in. # This is a good habit to develop for later when working on existing Usd scenes with rep.new_layer(): # Create a simple camera with a position and a point to look at camera = rep.create.camera(position=(0, 500, 1000), look_at=(0, 0, 0)) # Create some simple shapes to manipulate plane = rep.create.plane( semantics=[("class", "plane")], position=(0, -100, 0), scale=(100, 1, 100) ) torus = rep.create.torus(semantics=[("class", "torus")], position=(200, 0, 100)) sphere = rep.create.sphere(semantics=[("class", "sphere")], position=(0, 0, 100)) cube = rep.create.cube(semantics=[("class", "cube")], position=(-200, 0, 100)) # Randomize position and scale of each object on each frame with rep.trigger.on_frame(num_frames=10): # Creating a group so that our modify.pose operation works on all the shapes at once with rep.create.group([torus, sphere, cube]): rep.modify.pose( position=rep.distribution.uniform((-300, 0, -300), (300, 0, 300)), scale=rep.distribution.uniform(0.1, 2), ) # Initialize render product and attach a writer render_product = rep.create.render_product(camera, (1024, 1024)) writer = rep.WriterRegistry.get("BasicWriter") writer.initialize( output_dir="~/replicator_examples/dli_hello_replicator/", rgb=True, semantic_segmentation=True, bounding_box_2d_tight=True, ) writer.attach([render_product]) rep.orchestrator.run()
3,261
Python
46.970588
92
0.726771
NVIDIA-Omniverse/synthetic-data-examples/omni.replicator/tutorials/fall_2022_DLI/physics.py
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. # SPDX-License-Identifier: BSD-3-Clause # # Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its contributors # may be used to endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. import omni.replicator.core as rep with rep.new_layer(): # Define paths for the character, the props, the environment and the surface where the assets will be scattered in. PROPS = "omniverse://localhost/NVIDIA/Assets/Isaac/2022.1/Isaac/Props/YCB/Axis_Aligned_Physics" SURFACE = ( "omniverse://localhost/NVIDIA/Assets/Scenes/Templates/Basic/display_riser.usd" ) ENVS = "omniverse://localhost/NVIDIA/Assets/Scenes/Templates/Interior/ZetCG_ExhibitionHall.usd" # Define randomizer function for Base assets. This randomization includes placement and rotation of the assets on the surface. def env_props(size=50): instances = rep.randomizer.instantiate( rep.utils.get_usd_files(PROPS, recursive=True), size=size, mode="scene_instance", ) with instances: rep.modify.pose( position=rep.distribution.uniform((-50, 5, -50), (50, 20, 50)), rotation=rep.distribution.uniform((0, -180, 0), (0, 180, 0)), scale=100, ) rep.physics.rigid_body( velocity=rep.distribution.uniform((-0, 0, -0), (0, 0, 0)), angular_velocity=rep.distribution.uniform((-0, 0, -100), (0, 0, 0)), ) return instances.node # Register randomization rep.randomizer.register(env_props) # Setup the static elements env = rep.create.from_usd(ENVS) surface = rep.create.from_usd(SURFACE) with surface: rep.physics.collider() # Setup camera and attach it to render product camera = rep.create.camera() render_product = rep.create.render_product(camera, resolution=(1024, 1024)) # sphere lights for extra randomization def sphere_lights(num): lights = rep.create.light( light_type="Sphere", temperature=rep.distribution.normal(6500, 500), intensity=rep.distribution.normal(35000, 5000), position=rep.distribution.uniform((-300, -300, -300), (300, 300, 300)), scale=rep.distribution.uniform(50, 100), count=num, ) return lights.node rep.randomizer.register(sphere_lights) # trigger on frame for an interval with rep.trigger.on_time(interval=2, num=10): rep.randomizer.env_props(10) rep.randomizer.sphere_lights(10) with camera: rep.modify.pose( position=rep.distribution.uniform((-50, 20, 100), (50, 50, 150)), look_at=surface, ) # Initialize and attach writer writer = rep.WriterRegistry.get("BasicWriter") writer.initialize( output_dir="~/replicator_examples/dli_physics", rgb=True, bounding_box_2d_tight=True, ) writer.attach([render_product])
4,509
Python
41.952381
130
0.681082
NVIDIA-Omniverse/synthetic-data-examples/omni.replicator/tutorials/fall_2022_DLI/02_background_randomization.py
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. # SPDX-License-Identifier: BSD-3-Clause # # Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its contributors # may be used to endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. import omni.replicator.core as rep with rep.new_layer(): def dome_lights(): lights = rep.create.light( light_type="Dome", rotation=(270, 0, 0), texture=rep.distribution.choice( [ "omniverse://localhost/NVIDIA/Assets/Skies/Cloudy/champagne_castle_1_4k.hdr", "omniverse://localhost/NVIDIA/Assets/Skies/Clear/evening_road_01_4k.hdr", "omniverse://localhost/NVIDIA/Assets/Skies/Cloudy/kloofendal_48d_partly_cloudy_4k.hdr", "omniverse://localhost/NVIDIA/Assets/Skies/Clear/qwantani_4k.hdr", ] ), ) return lights.node rep.randomizer.register(dome_lights) torus = rep.create.torus(semantics=[("class", "torus")], position=(0, -200, 100)) # create surface surface = rep.create.disk(scale=5, visible=False) # create camera & render product for the scene camera = rep.create.camera() render_product = rep.create.render_product(camera, resolution=(1024, 1024)) with rep.trigger.on_frame(num_frames=10, interval=10): rep.randomizer.dome_lights() with rep.create.group([torus]): rep.modify.pose( position=rep.distribution.uniform((-100, -100, -100), (200, 200, 200)), scale=rep.distribution.uniform(0.1, 2), ) with camera: rep.modify.pose( position=rep.distribution.uniform((-500, 200, 1000), (500, 500, 1500)), look_at=surface, ) # Initialize and attach writer writer = rep.WriterRegistry.get("BasicWriter") writer.initialize(output_dir="~/replicator_examples/dli_example_02", rgb=True) writer.attach([render_product]) # Run Replicator # rep.orchestrator.run()
3,520
Python
41.421686
107
0.685227
NVIDIA-Omniverse/synthetic-data-examples/omni.replicator_yaml/tutorial_randomizer_materials.yaml
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. # SPDX-License-Identifier: BSD-3-Clause # # Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its contributors # may be used to endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # Default scene settings. This script default is 1 (IsaacSim), so if you change # the meters per unit or up axis, you need to alter the coordinates and rotations. # IsaacSim default is 1, Code is 0.01 stage_unit_setting: settings.set_stage_meters_per_unit: meters_per_unit: 1 # IsaacSim default is "Z"", Code is "Y" stage_up_axis_setting: settings.set_stage_up_axis: up_axis: "Z" # This YAML script example demonstrates: # Creating materials, and applying them to objects in a scene with a randomizer # Create the materials to apply to the objects mats: create.material_omnipbr: diffuse: distribution.uniform: lower: [0, 0, 0] upper: [1, 1, 1] count: 100 # Create the objects in the scene spheres: create.sphere: scale: 0.2 position: distribution.uniform: lower: [-1, -1, -1] upper: [1, 1, 1] count: 100 plane: create.plane: semantics: [["class", "plane"]] position: [0, 0, -1.5] visible: true scale: 100 # Create the camera and render product camera: create.camera: position: [5, 0, 0] render_product: create.render_product: camera: camera resolution: [1024, 1024] # Create the writer and initialize writer: writers.get: name: "BasicWriter" init_params: output_dir: "_output_yaml/TutorialRandomizerMaterials/" rgb: True writer_attach: writer.attach: render_products: render_product # Register a randomizer that sets the materials of the spheres register_materials: randomizer.register: get_spheres: inputs: spheres: null mats: null with.spheres: randomizer.materials: materials: mats # Set the trigger as on_frame, setting subframes to accumulate frames for a # higher quality render trigger: trigger.on_frame: max_execs: 20 rt_subframes: 3 # When the trigger executes, apply the randomizer with_trigger: with.trigger: randomizer.get_spheres: spheres: spheres mats: mats
3,700
YAML
29.841666
84
0.721892
NVIDIA-Omniverse/synthetic-data-examples/end-to-end-workflows/palletjack_with_tao/CLA.md
## Individual Contributor License Agreement (CLA) **Thank you for submitting your contributions to this project.** By signing this CLA, you agree that the following terms apply to all of your past, present and future contributions to the project. ### License. You hereby represent that all present, past and future contributions are governed by the [MIT License](https://opensource.org/licenses/MIT) copyright statement. This entails that to the extent possible under law, you transfer all copyright and related or neighboring rights of the code or documents you contribute to the project itself or its maintainers. Furthermore you also represent that you have the authority to perform the above waiver with respect to the entirety of you contributions. ### Moral Rights. To the fullest extent permitted under applicable law, you hereby waive, and agree not to assert, all of your “moral rights” in or relating to your contributions for the benefit of the project. ### Third Party Content. If your Contribution includes or is based on any source code, object code, bug fixes, configuration changes, tools, specifications, documentation, data, materials, feedback, information or other works of authorship that were not authored by you (“Third Party Content”) or if you are aware of any third party intellectual property or proprietary rights associated with your Contribution (“Third Party Rights”), then you agree to include with the submission of your Contribution full details respecting such Third Party Content and Third Party Rights, including, without limitation, identification of which aspects of your Contribution contain Third Party Content or are associated with Third Party Rights, the owner/author of the Third Party Content and Third Party Rights, where you obtained the Third Party Content, and any applicable third party license terms or restrictions respecting the Third Party Content and Third Party Rights. For greater certainty, the foregoing obligations respecting the identification of Third Party Content and Third Party Rights do not apply to any portion of a Project that is incorporated into your Contribution to that same Project. ### Representations. You represent that, other than the Third Party Content and Third Party Rights identified by you in accordance with this Agreement, you are the sole author of your Contributions and are legally entitled to grant the foregoing licenses and waivers in respect of your Contributions. If your Contributions were created in the course of your employment with your past or present employer(s), you represent that such employer(s) has authorized you to make your Contributions on behalf of such employer(s) or such employer (s) has waived all of their right, title or interest in or to your Contributions. ### Disclaimer. To the fullest extent permitted under applicable law, your Contributions are provided on an "as is" basis, without any warranties or conditions, express or implied, including, without limitation, any implied warranties or conditions of non-infringement, merchantability or fitness for a particular purpose. You are not required to provide support for your Contributions, except to the extent you desire to provide support. ### No Obligation. You acknowledge that the maintainers of this project are under no obligation to use or incorporate your contributions into the project. The decision to use or incorporate your contributions into the project will be made at the sole discretion of the maintainers or their authorized delegates.
3,543
Markdown
60.103447
117
0.812024
NVIDIA-Omniverse/synthetic-data-examples/end-to-end-workflows/palletjack_with_tao/README.md
# Synthetic Data Generation and Training with Sim Ready Assets This project provides a workflow for Training Computer Vision models with Synthetic Data. We will use Isaac Sim with Omniverse Replicator to generate data for our use case and objects of interest. To ensure seamless compatibility with model training, the data generated is in the KITTI format. These steps can be followed on a Cloud/remote GPU instance or locally ## How to use this repository - [Guide](local/README.md) for running the workflow locally - [Guide](cloud/README.md) for running on a cloud/remote instance ## Workflow Components: * Generating Data: Use Isaac Sim to generate data * Training: We will use TAO toolkit, however users can train a model in a framework of their choice with data generated ### SDG - Using the `palletjack` assets from the Warehouse Sim Ready Asset collection - Carry out Domain Randomization in the scene with Replicator: - Various attributes of the scene like lighting, textures, object pose and materials can be modified - Important to generate a good quality dataset to ensure model detects objects in the real world - Data output KITTI format - We will use the KITTI Writer for generating annotations - Possible to implement a custom writer (can be useful when data is expected in a certain format for your model) - Sample generated images: <p> <img src="images/sample_synthetic/21.png" height="256"/> <img src="images/sample_synthetic/653.png" height="256"/> </p> <p> <img src="images/sample_synthetic/896.png" height="256"/> <img src="images/sample_synthetic/1545.png" height="256"/> </p> ### Training - TAO: Outline of steps - Generating Tfrecords - Model training and evaluation - Model backbone selction - Hyperparameters specified via `spec` file (provided with repo) - Running inference with trained model - Sample real world detections on LOCO dataset images: <p> <img src="images/real_world_results/1564562568.298206.jpg" height="256"/> <img src="images/real_world_results/1564562843.0618184.jpg" height="256"/> </p> <p> <img src="images/real_world_results/593768,3659.jpg" height="256"/> <img src="images/real_world_results/510196244,1362.jpg" height="256"/> </p> <p> <img src="images/real_world_results/1574675156.7667925.jpg" height="256"/> <img src="images/real_world_results/426023,9672.jpg" height="256"/> </p> ### Deployment - Perform Optimizations: Pruning and QAT with TAO to reduce model size and improve performance - Deploy on NVIDIA Jetson powered Robot with Isaac ROS or Deepstream ## References: - Real world images from the [LOCO dataset](https://github.com/tum-fml/loco) are used for visualizing model performance
2,771
Markdown
36.972602
294
0.738001
NVIDIA-Omniverse/synthetic-data-examples/end-to-end-workflows/palletjack_with_tao/LICENSE.md
SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. SPDX-License-Identifier: MIT Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
1,167
Markdown
54.619045
97
0.796058
NVIDIA-Omniverse/synthetic-data-examples/end-to-end-workflows/palletjack_with_tao/cloud/README.md
# Requirements - Access to a cloud/remote GPU instance (workflow tested on a `g4dn` AWS EC2 instance with T4 GPU) - Docker setup instructions are provided in the notebooks - Entire workflow can be run in `headless` mode (SDG script and training) ## Synthetic Data Generation - Use the Isaac Sim docker container for running the Data Generation [script](../palletjack_sdg/palletjack_datagen.sh) - We will generate data for warehouse `palletjack` objects in KITTI format - Follow the steps in the `cloud_sdg` notebook - This generated data can be used to train your own model (framework and architecture of your choice), in this workflow we demonstrate using TAO for training ## Training with TAO Toolkit - The `training/cloud_train` notebook provides a walkthrough of the steps: - Setting up TAO docker container - Downloading pre-trained model, we will use the `DetectNet_v2` model with a `resnet_18` backbone - Running TAO training with `spec` files provided - Visualizing model performance on real world data - Visualize model metric with Tensorboard <img src="../images/tensorboard/tensorboard_resized_palletjack.png"/> ## Next steps ### Generating Synthetic Data for your use case - Make changes in the Domain Randomization under the Synthetic Data Generation [script](../palletjack_sdg/standalone_palletjack_sdg.py) - Add additional objects of interest in the scene (similar to how palletjacks are added, you can add forklifts, ladders etc.) to generate dataUse different models for training with TAO (for object detection, you can use YOLO, SSD, EfficientDet) - Replicator provides Semantic Segmentation, Instance Segmentation, Depth and various other ground truth annotations along with RGB. You can also write your own ground truth annotator (eg: Pose Estimation: Refer to [sample](https://docs.omniverse.nvidia.com/isaacsim/latest/tutorial_replicator_offline_pose_estimation.html) These can be used for training a model of your own framework and choice) - Exploring the option of using Synthetic + Real data for training a network. Can be particularly useful for generating more data around particular corner cases ### Deploying Trained Models - The trained model can be pruned and optimized for inference with TAO - This can then be deployed on a robot with NVIDIA Jetson
2,308
Markdown
66.911763
396
0.786395
NVIDIA-Omniverse/synthetic-data-examples/end-to-end-workflows/palletjack_with_tao/local/README.md
# Requirements - Install [Isaac Sim](https://docs.omniverse.nvidia.com/isaacsim/latest/install_workstation.html) - Training via TAO Toolkit Docker container (TAO setup instructions in `local_train` notebook) ## Synthetic Data Generation - Provide the path of your Isaac Sim installation folder in the `generate_data.sh` script - Make the script an executable after adding the Isaac Sim Path (`chmod +x generate_data.sh`) - Run the script (`./generate_data.sh`) - We will generate data for the `palletjack` class of objects with annotations in KITTI format - This generated data can be used to train your own model (framework and architecture of your choice) ## Training with TAO Toolkit - The data generated in the previus step can be directly fed to TAO for training - The `local_train` notebook provides a walkthrough of the steps: - Setting up TAO docker container - Downloading pre-trained model, we will use the `DetectNet_v2` model with a `resnet_18` backbone - Running TAO training with `spec` files provided - Visualizing model performance on real world data - Visualize model metric with Tensorboard <img src="../images/tensorboard/tensorboard_resized_palletjack.png"/> ## Next steps ### Generating Synthetic Data for your use case - Make changes in the Domain Randomization under the Synthetic Data Generation [script](../palletjack_sdg/standalone_palletjack_sdg.py) - Add additional objects of interest in the scene (similar to how palletjacks are added, you can add forklifts, ladders etc.) to generate dataUse different models for training with TAO (for object detection, you can use YOLO, SSD, EfficientDet) - Replicator provides Semantic Segmentation, Instance Segmentation, Depth and various other ground truth annotations along with RGB. You can also write your own ground truth annotator (eg: Pose Estimation: Refer to [sample](https://docs.omniverse.nvidia.com/isaacsim/latest/tutorial_replicator_offline_pose_estimation.html) These can be used for training a model of your own framework and choice) - Exploring the option of using Synthetic + Real data for training a network. Can be particularly useful for generating more data around particular corner cases ### Deploying Trained Models - The trained model can be pruned and optimized for inference with TAO - This can then be deployed on a robot with NVIDIA Jetson
2,370
Markdown
66.742855
396
0.7827
NVIDIA-Omniverse/synthetic-data-examples/end-to-end-workflows/palletjack_with_tao/palletjack_sdg/standalone_palletjack_sdg.py
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: MIT # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. from omni.isaac.kit import SimulationApp import os import argparse parser = argparse.ArgumentParser("Dataset generator") parser.add_argument("--headless", type=bool, default=False, help="Launch script headless, default is False") parser.add_argument("--height", type=int, default=544, help="Height of image") parser.add_argument("--width", type=int, default=960, help="Width of image") parser.add_argument("--num_frames", type=int, default=1000, help="Number of frames to record") parser.add_argument("--distractors", type=str, default="warehouse", help="Options are 'warehouse' (default), 'additional' or None") parser.add_argument("--data_dir", type=str, default=os.getcwd() + "/_palletjack_data", help="Location where data will be output") args, unknown_args = parser.parse_known_args() # This is the config used to launch simulation. CONFIG = {"renderer": "RayTracedLighting", "headless": args.headless, "width": args.width, "height": args.height, "num_frames": args.num_frames} simulation_app = SimulationApp(launch_config=CONFIG) ## This is the path which has the background scene in which objects will be added. ENV_URL = "/Isaac/Environments/Simple_Warehouse/warehouse.usd" import carb import omni import omni.usd from omni.isaac.core.utils.nucleus import get_assets_root_path from omni.isaac.core.utils.stage import get_current_stage, open_stage from pxr import Semantics import omni.replicator.core as rep from omni.isaac.core.utils.semantics import get_semantics # Increase subframes if shadows/ghosting appears of moving objects # See known issues: https://docs.omniverse.nvidia.com/prod_extensions/prod_extensions/ext_replicator.html#known-issues rep.settings.carb_settings("/omni/replicator/RTSubframes", 4) # This is the location of the palletjacks in the simready asset library PALLETJACKS = ["http://omniverse-content-production.s3-us-west-2.amazonaws.com/Assets/DigitalTwin/Assets/Warehouse/Equipment/Pallet_Trucks/Scale_A/PalletTruckScale_A01_PR_NVD_01.usd", "http://omniverse-content-production.s3-us-west-2.amazonaws.com/Assets/DigitalTwin/Assets/Warehouse/Equipment/Pallet_Trucks/Heavy_Duty_A/HeavyDutyPalletTruck_A01_PR_NVD_01.usd", "http://omniverse-content-production.s3-us-west-2.amazonaws.com/Assets/DigitalTwin/Assets/Warehouse/Equipment/Pallet_Trucks/Low_Profile_A/LowProfilePalletTruck_A01_PR_NVD_01.usd"] # The warehouse distractors which will be added to the scene and randomized DISTRACTORS_WAREHOUSE = 2 * ["/Isaac/Environments/Simple_Warehouse/Props/S_TrafficCone.usd", "/Isaac/Environments/Simple_Warehouse/Props/S_WetFloorSign.usd", "/Isaac/Environments/Simple_Warehouse/Props/SM_BarelPlastic_A_01.usd", "/Isaac/Environments/Simple_Warehouse/Props/SM_BarelPlastic_A_02.usd", "/Isaac/Environments/Simple_Warehouse/Props/SM_BarelPlastic_A_03.usd", "/Isaac/Environments/Simple_Warehouse/Props/SM_BarelPlastic_B_01.usd", "/Isaac/Environments/Simple_Warehouse/Props/SM_BarelPlastic_B_01.usd", "/Isaac/Environments/Simple_Warehouse/Props/SM_BarelPlastic_B_03.usd", "/Isaac/Environments/Simple_Warehouse/Props/SM_BarelPlastic_C_02.usd", "/Isaac/Environments/Simple_Warehouse/Props/SM_BottlePlasticA_02.usd", "/Isaac/Environments/Simple_Warehouse/Props/SM_BottlePlasticB_01.usd", "/Isaac/Environments/Simple_Warehouse/Props/SM_BottlePlasticA_02.usd", "/Isaac/Environments/Simple_Warehouse/Props/SM_BottlePlasticA_02.usd", "/Isaac/Environments/Simple_Warehouse/Props/SM_BottlePlasticD_01.usd", "/Isaac/Environments/Simple_Warehouse/Props/SM_BottlePlasticE_01.usd", "/Isaac/Environments/Simple_Warehouse/Props/SM_BucketPlastic_B.usd", "/Isaac/Environments/Simple_Warehouse/Props/SM_CardBoxB_01_1262.usd", "/Isaac/Environments/Simple_Warehouse/Props/SM_CardBoxB_01_1268.usd", "/Isaac/Environments/Simple_Warehouse/Props/SM_CardBoxB_01_1482.usd", "/Isaac/Environments/Simple_Warehouse/Props/SM_CardBoxB_01_1683.usd", "/Isaac/Environments/Simple_Warehouse/Props/SM_CardBoxB_01_291.usd", "/Isaac/Environments/Simple_Warehouse/Props/SM_CardBoxD_01_1454.usd", "/Isaac/Environments/Simple_Warehouse/Props/SM_CardBoxD_01_1513.usd", "/Isaac/Environments/Simple_Warehouse/Props/SM_CratePlastic_A_04.usd", "/Isaac/Environments/Simple_Warehouse/Props/SM_CratePlastic_B_03.usd", "/Isaac/Environments/Simple_Warehouse/Props/SM_CratePlastic_B_05.usd", "/Isaac/Environments/Simple_Warehouse/Props/SM_CratePlastic_C_02.usd", "/Isaac/Environments/Simple_Warehouse/Props/SM_CratePlastic_E_02.usd", "/Isaac/Environments/Simple_Warehouse/Props/SM_PushcartA_02.usd", "/Isaac/Environments/Simple_Warehouse/Props/SM_RackPile_04.usd", "/Isaac/Environments/Simple_Warehouse/Props/SM_RackPile_03.usd"] ## Additional distractors which can be added to the scene DISTRACTORS_ADDITIONAL = ["/Isaac/Environments/Hospital/Props/Pharmacy_Low.usd", "/Isaac/Environments/Hospital/Props/SM_BedSideTable_01b.usd", "/Isaac/Environments/Hospital/Props/SM_BooksSet_26.usd", "/Isaac/Environments/Hospital/Props/SM_BottleB.usd", "/Isaac/Environments/Hospital/Props/SM_BottleA.usd", "/Isaac/Environments/Hospital/Props/SM_BottleC.usd", "/Isaac/Environments/Hospital/Props/SM_Cart_01a.usd", "/Isaac/Environments/Hospital/Props/SM_Chair_02a.usd", "/Isaac/Environments/Hospital/Props/SM_Chair_01a.usd", "/Isaac/Environments/Hospital/Props/SM_Computer_02b.usd", "/Isaac/Environments/Hospital/Props/SM_Desk_04a.usd", "/Isaac/Environments/Hospital/Props/SM_DisposalStand_02.usd", "/Isaac/Environments/Hospital/Props/SM_FirstAidKit_01a.usd", "/Isaac/Environments/Hospital/Props/SM_GasCart_01c.usd", "/Isaac/Environments/Hospital/Props/SM_Gurney_01b.usd", "/Isaac/Environments/Hospital/Props/SM_HospitalBed_01b.usd", "/Isaac/Environments/Hospital/Props/SM_MedicalBag_01a.usd", "/Isaac/Environments/Hospital/Props/SM_Mirror.usd", "/Isaac/Environments/Hospital/Props/SM_MopSet_01b.usd", "/Isaac/Environments/Hospital/Props/SM_SideTable_02a.usd", "/Isaac/Environments/Hospital/Props/SM_SupplyCabinet_01c.usd", "/Isaac/Environments/Hospital/Props/SM_SupplyCart_01e.usd", "/Isaac/Environments/Hospital/Props/SM_TrashCan.usd", "/Isaac/Environments/Hospital/Props/SM_Washbasin.usd", "/Isaac/Environments/Hospital/Props/SM_WheelChair_01a.usd", "/Isaac/Environments/Office/Props/SM_WaterCooler.usd", "/Isaac/Environments/Office/Props/SM_TV.usd", "/Isaac/Environments/Office/Props/SM_TableC.usd", "/Isaac/Environments/Office/Props/SM_Recliner.usd", "/Isaac/Environments/Office/Props/SM_Personenleitsystem_Red1m.usd", "/Isaac/Environments/Office/Props/SM_Lamp02_162.usd", "/Isaac/Environments/Office/Props/SM_Lamp02.usd", "/Isaac/Environments/Office/Props/SM_HandDryer.usd", "/Isaac/Environments/Office/Props/SM_Extinguisher.usd"] # The textures which will be randomized for the wall and floor TEXTURES = ["/Isaac/Materials/Textures/Patterns/nv_asphalt_yellow_weathered.jpg", "/Isaac/Materials/Textures/Patterns/nv_tile_hexagonal_green_white.jpg", "/Isaac/Materials/Textures/Patterns/nv_rubber_woven_charcoal.jpg", "/Isaac/Materials/Textures/Patterns/nv_granite_tile.jpg", "/Isaac/Materials/Textures/Patterns/nv_tile_square_green.jpg", "/Isaac/Materials/Textures/Patterns/nv_marble.jpg", "/Isaac/Materials/Textures/Patterns/nv_brick_reclaimed.jpg", "/Isaac/Materials/Textures/Patterns/nv_concrete_aged_with_lines.jpg", "/Isaac/Materials/Textures/Patterns/nv_wooden_wall.jpg", "/Isaac/Materials/Textures/Patterns/nv_stone_painted_grey.jpg", "/Isaac/Materials/Textures/Patterns/nv_wood_shingles_brown.jpg", "/Isaac/Materials/Textures/Patterns/nv_tile_hexagonal_various.jpg", "/Isaac/Materials/Textures/Patterns/nv_carpet_abstract_pattern.jpg", "/Isaac/Materials/Textures/Patterns/nv_wood_siding_weathered_green.jpg", "/Isaac/Materials/Textures/Patterns/nv_animalfur_pattern_greys.jpg", "/Isaac/Materials/Textures/Patterns/nv_artificialgrass_green.jpg", "/Isaac/Materials/Textures/Patterns/nv_bamboo_desktop.jpg", "/Isaac/Materials/Textures/Patterns/nv_brick_reclaimed.jpg", "/Isaac/Materials/Textures/Patterns/nv_brick_red_stacked.jpg", "/Isaac/Materials/Textures/Patterns/nv_fireplace_wall.jpg", "/Isaac/Materials/Textures/Patterns/nv_fabric_square_grid.jpg", "/Isaac/Materials/Textures/Patterns/nv_granite_tile.jpg", "/Isaac/Materials/Textures/Patterns/nv_marble.jpg", "/Isaac/Materials/Textures/Patterns/nv_gravel_grey_leaves.jpg", "/Isaac/Materials/Textures/Patterns/nv_plastic_blue.jpg", "/Isaac/Materials/Textures/Patterns/nv_stone_red_hatch.jpg", "/Isaac/Materials/Textures/Patterns/nv_stucco_red_painted.jpg", "/Isaac/Materials/Textures/Patterns/nv_rubber_woven_charcoal.jpg", "/Isaac/Materials/Textures/Patterns/nv_stucco_smooth_blue.jpg", "/Isaac/Materials/Textures/Patterns/nv_wood_shingles_brown.jpg", "/Isaac/Materials/Textures/Patterns/nv_wooden_wall.jpg"] def update_semantics(stage, keep_semantics=[]): """ Remove semantics from the stage except for keep_semantic classes""" for prim in stage.Traverse(): if prim.HasAPI(Semantics.SemanticsAPI): processed_instances = set() for property in prim.GetProperties(): is_semantic = Semantics.SemanticsAPI.IsSemanticsAPIPath(property.GetPath()) if is_semantic: instance_name = property.SplitName()[1] if instance_name in processed_instances: # Skip repeated instance, instances are iterated twice due to their two semantic properties (class, data) continue processed_instances.add(instance_name) sem = Semantics.SemanticsAPI.Get(prim, instance_name) type_attr = sem.GetSemanticTypeAttr() data_attr = sem.GetSemanticDataAttr() for semantic_class in keep_semantics: # Check for our data classes needed for the model if data_attr.Get() == semantic_class: continue else: # remove semantics of all other prims prim.RemoveProperty(type_attr.GetName()) prim.RemoveProperty(data_attr.GetName()) prim.RemoveAPI(Semantics.SemanticsAPI, instance_name) # needed for loading textures correctly def prefix_with_isaac_asset_server(relative_path): assets_root_path = get_assets_root_path() if assets_root_path is None: raise Exception("Nucleus server not found, could not access Isaac Sim assets folder") return assets_root_path + relative_path def full_distractors_list(distractor_type="warehouse"): """Distractor type allowed are warehouse, additional or None. They load corresponding objects and add them to the scene for DR""" full_dist_list = [] if distractor_type == "warehouse": for distractor in DISTRACTORS_WAREHOUSE: full_dist_list.append(prefix_with_isaac_asset_server(distractor)) elif distractor_type == "additional": for distractor in DISTRACTORS_ADDITIONAL: full_dist_list.append(prefix_with_isaac_asset_server(distractor)) else: print("No Distractors being added to the current scene for SDG") return full_dist_list def full_textures_list(): full_tex_list = [] for texture in TEXTURES: full_tex_list.append(prefix_with_isaac_asset_server(texture)) return full_tex_list def add_palletjacks(): rep_obj_list = [rep.create.from_usd(palletjack_path, semantics=[("class", "palletjack")], count=2) for palletjack_path in PALLETJACKS] rep_palletjack_group = rep.create.group(rep_obj_list) return rep_palletjack_group def add_distractors(distractor_type="warehouse"): full_distractors = full_distractors_list(distractor_type) distractors = [rep.create.from_usd(distractor_path, count=1) for distractor_path in full_distractors] distractor_group = rep.create.group(distractors) return distractor_group # This will handle replicator def run_orchestrator(): rep.orchestrator.run() # Wait until started while not rep.orchestrator.get_is_started(): simulation_app.update() # Wait until stopped while rep.orchestrator.get_is_started(): simulation_app.update() rep.BackendDispatch.wait_until_done() rep.orchestrator.stop() def main(): # Open the environment in a new stage print(f"Loading Stage {ENV_URL}") open_stage(prefix_with_isaac_asset_server(ENV_URL)) stage = get_current_stage() # Run some app updates to make sure things are properly loaded for i in range(100): if i % 10 == 0: print(f"App uppdate {i}..") simulation_app.update() textures = full_textures_list() rep_palletjack_group = add_palletjacks() rep_distractor_group = add_distractors(distractor_type=args.distractors) # We only need labels for the palletjack objects update_semantics(stage=stage, keep_semantics=["palletjack"]) # Create camera with Replicator API for gathering data cam = rep.create.camera(clipping_range=(0.1, 1000000)) # trigger replicator pipeline with rep.trigger.on_frame(num_frames=CONFIG["num_frames"]): # Move the camera around in the scene, focus on the center of warehouse with cam: rep.modify.pose(position=rep.distribution.uniform((-9.2, -11.8, 0.4), (7.2, 15.8, 4)), look_at=(0, 0, 0)) # Get the Palletjack body mesh and modify its color with rep.get.prims(path_pattern="SteerAxles"): rep.randomizer.color(colors=rep.distribution.uniform((0, 0, 0), (1, 1, 1))) # Randomize the pose of all the added palletjacks with rep_palletjack_group: rep.modify.pose(position=rep.distribution.uniform((-6, -6, 0), (6, 12, 0)), rotation=rep.distribution.uniform((0, 0, 0), (0, 0, 360)), scale=rep.distribution.uniform((0.01, 0.01, 0.01), (0.01, 0.01, 0.01))) # Modify the pose of all the distractors in the scene with rep_distractor_group: rep.modify.pose(position=rep.distribution.uniform((-6, -6, 0), (6, 12, 0)), rotation=rep.distribution.uniform((0, 0, 0), (0, 0, 360)), scale=rep.distribution.uniform(1, 1.5)) # Randomize the lighting of the scene with rep.get.prims(path_pattern="RectLight"): rep.modify.attribute("color", rep.distribution.uniform((0, 0, 0), (1, 1, 1))) rep.modify.attribute("intensity", rep.distribution.normal(100000.0, 600000.0)) rep.modify.visibility(rep.distribution.choice([True, False, False, False, False, False, False])) # select floor material random_mat_floor = rep.create.material_omnipbr(diffuse_texture=rep.distribution.choice(textures), roughness=rep.distribution.uniform(0, 1), metallic=rep.distribution.choice([0, 1]), emissive_texture=rep.distribution.choice(textures), emissive_intensity=rep.distribution.uniform(0, 1000),) with rep.get.prims(path_pattern="SM_Floor"): rep.randomizer.materials(random_mat_floor) # select random wall material random_mat_wall = rep.create.material_omnipbr(diffuse_texture=rep.distribution.choice(textures), roughness=rep.distribution.uniform(0, 1), metallic=rep.distribution.choice([0, 1]), emissive_texture=rep.distribution.choice(textures), emissive_intensity=rep.distribution.uniform(0, 1000),) with rep.get.prims(path_pattern="SM_Wall"): rep.randomizer.materials(random_mat_wall) # Set up the writer writer = rep.WriterRegistry.get("KittiWriter") # output directory of writer output_directory = args.data_dir print("Outputting data to ", output_directory) # use writer for bounding boxes, rgb and segmentation writer.initialize(output_dir=output_directory, omit_semantic_type=True,) # attach camera render products to wrieter so that data is outputted RESOLUTION = (CONFIG["width"], CONFIG["height"]) render_product = rep.create.render_product(cam, RESOLUTION) writer.attach(render_product) # run rep pipeline run_orchestrator() simulation_app.update() if __name__ == "__main__": try: main() except Exception as e: carb.log_error(f"Exception: {e}") import traceback traceback.print_exc() finally: simulation_app.close()
20,199
Python
52.439153
191
0.634388
NVIDIA-Omniverse/synthetic-data-examples/end-to-end-workflows/object_detection_fruit/docker-compose.yml
version: '3' services: deployment: build: deployment # Access the lab with: http://127.0.0.1:8884/lab?token=nvsecuretoken2 command: jupyter lab -y --allow-root --no-browser --ip=0.0.0.0 --port=8884 --notebook-dir=/opt/project/ --NotebookApp.token='nvsecuretoken2' --NotebookApp.password='nvsecurepassword' shm_size: '2gb' volumes: - './deployment/code/:/opt/project/' - './models:/opt/models/' ports: - "8884:8884" training: build: training # Access the lab with: http://127.0.0.1:8883/lab?token=nvsecuretoken1 command: jupyter lab -y --allow-root --no-browser --ip=0.0.0.0 --port=8883 --notebook-dir=/opt/project/ --NotebookApp.token='nvsecuretoken1' --NotebookApp.password='nvsecurepassword' volumes: - './training/code/:/opt/project/' ports: - "8883:8883" data-generation: build: data_generation # Access the lab with: http://127.0.0.1:8882/lab?token=nvsecuretoken0s command: jupyter lab -y --allow-root --no-browser --ip=0.0.0.0 --port=8882 --notebook-dir=/opt/project/ --NotebookApp.token='nvsecuretoken0' --NotebookApp.password='nvsecurepassword' volumes: - './data_generation/code/:/opt/project/' ports: - "8882:8882"
1,238
YAML
40.299999
186
0.661551
NVIDIA-Omniverse/synthetic-data-examples/end-to-end-workflows/object_detection_fruit/README.md
## Getting started ### Install Dependencies - [`docker-compose`](https://docs.docker.com/compose/install/) - [NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html) ### Run the labs ``` bash run.sh ``` ### Access the labs Part 1: Generate synthetic data with Omnvierse - [http://127.0.0.1:8882/lab?token=nvsecuretoken0](http://127.0.0.1:8882/lab?token=nvsecuretoken0) \ Part 2: Training a model with synthetic data - [http://127.0.0.1:8883/lab?token=nvsecuretoken1](http://127.0.0.1:8883/lab?token=nvsecuretoken1) \ Part 3: Deploy model to Triton - [http://127.0.0.1:8884/lab?token=nvsecuretoken2](http://127.0.0.1:8884/lab?token=nvsecuretoken2)
708
Markdown
38.388887
147
0.731638
NVIDIA-Omniverse/synthetic-data-examples/end-to-end-workflows/object_detection_fruit/training/code/visualize.py
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. # SPDX-License-Identifier: BSD-3-Clause # # Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its contributors # may be used to endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. import os import json import hashlib from PIL import Image import numpy as np import matplotlib.pyplot as plt import matplotlib.patches as patches from optparse import OptionParser """ Takes in the data from a specific label id and maps it to the proper color for the bounding box """ def data_to_colour(data): if isinstance(data, str): data = bytes(data, "utf-8") else: data = bytes(data) m = hashlib.sha256() m.update(data) key = int(m.hexdigest()[:8], 16) r = ((((key >> 0) & 0xFF) + 1) * 33) % 255 g = ((((key >> 8) & 0xFF) + 1) * 33) % 255 b = ((((key >> 16) & 0xFF) + 1) * 33) % 255 # illumination normalization to 128 inv_norm_i = 128 * (3.0 / (r + g + b)) return ( int(r * inv_norm_i) / 255, int(g * inv_norm_i) / 255, int(b * inv_norm_i) / 255, ) """ Takes in the path to the rgb image for the background, then it takes bounding box data, the labels and the place to store the visualization. It outputs a colorized bounding box. """ def colorize_bbox_2d(rgb_path, data, id_to_labels, file_path): rgb_img = Image.open(rgb_path) colors = [data_to_colour(bbox["semanticId"]) for bbox in data] fig, ax = plt.subplots(figsize=(10, 10)) ax.imshow(rgb_img) for bbox_2d, color, index in zip(data, colors, range(len(data))): labels = id_to_labels[str(index)] rect = patches.Rectangle( xy=(bbox_2d["x_min"], bbox_2d["y_min"]), width=bbox_2d["x_max"] - bbox_2d["x_min"], height=bbox_2d["y_max"] - bbox_2d["y_min"], edgecolor=color, linewidth=2, label=labels, fill=False, ) ax.add_patch(rect) plt.legend(loc="upper left") plt.savefig(file_path) """ Parses command line options. Requires input directory, output directory, and number for image to use. """ def parse_input(): usage = "usage: visualize.py [options] arg1 arg2 arg3" parser = OptionParser(usage) parser.add_option( "-d", "--data_dir", dest="data_dir", help="Directory location for Omniverse synthetic data", ) parser.add_option( "-o", "--out_dir", dest="out_dir", help="Directory location for output image" ) parser.add_option( "-n", "--number", dest="number", help="Number of image to use for visualization" ) (options, args) = parser.parse_args() return options, args def main(): options, args = parse_input() out_dir = options.data_dir rgb = "png/rgb_" + options.number + ".png" rgb_path = os.path.join(out_dir, rgb) bbox2d_tight_file_name = "npy/bounding_box_2d_tight_" + options.number + ".npy" data = np.load(os.path.join(options.data_dir, bbox2d_tight_file_name)) # Check for labels bbox2d_tight_labels_file_name = ( "json/bounding_box_2d_tight_labels_" + options.number + ".json" ) with open( os.path.join(options.data_dir, bbox2d_tight_labels_file_name), "r" ) as json_data: bbox2d_tight_id_to_labels = json.load(json_data) # colorize and save image colorize_bbox_2d( rgb_path, data, bbox2d_tight_id_to_labels, os.path.join(options.out_dir, "bbox2d_tight.png"), ) if __name__ == "__main__": main()
5,013
Python
32.651006
177
0.658687
NVIDIA-Omniverse/synthetic-data-examples/end-to-end-workflows/object_detection_fruit/training/code/export.py
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. # SPDX-License-Identifier: BSD-3-Clause # # Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its contributors # may be used to endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. import os import torch import torchvision from optparse import OptionParser def parse_input(): usage = "usage: export.py [options] arg1 " parser = OptionParser(usage) parser.add_option( "-d", "--pytorch_dir", dest="pytorch_dir", help="Location of output PyTorch model", ) parser.add_option( "-o", "--output_dir", dest="output_dir", help="Export and save ONNX model to this path", ) (options, args) = parser.parse_args() return options, args def main(): torch.manual_seed(0) options, args = parse_input() model = torch.load(options.pytorch_dir) model.eval() OUTPUT_DIR = options.output_dir os.makedirs(OUTPUT_DIR, exist_ok=True) model = torchvision.models.detection.fasterrcnn_resnet50_fpn( weights="DEFAULT", num_classes=91 ) model.eval() dummy_input = torch.rand(1, 3, 1024, 1024) torch.onnx.export( model, dummy_input, os.path.join(OUTPUT_DIR, "model.onnx"), opset_version=11, input_names=["input"], output_names=["output"], ) if __name__ == "__main__": main()
2,865
Python
33.119047
84
0.704363
NVIDIA-Omniverse/synthetic-data-examples/end-to-end-workflows/object_detection_fruit/training/code/train.py
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. # SPDX-License-Identifier: BSD-3-Clause # # Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its contributors # may be used to endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. from PIL import Image import os import numpy as np import torch import torch.utils.data import torchvision from torchvision.models.detection.faster_rcnn import FastRCNNPredictor from torchvision import transforms as T import json import shutil from optparse import OptionParser from torch.utils.tensorboard import SummaryWriter class FruitDataset(torch.utils.data.Dataset): def __init__(self, root, transforms): self.root = root self.transforms = transforms list_ = os.listdir(root) for file_ in list_: name, ext = os.path.splitext(file_) ext = ext[1:] if ext == "": continue if os.path.exists(root + "/" + ext): shutil.move(root + "/" + file_, root + "/" + ext + "/" + file_) else: os.makedirs(root + "/" + ext) shutil.move(root + "/" + file_, root + "/" + ext + "/" + file_) self.imgs = list(sorted(os.listdir(os.path.join(root, "png")))) self.label = list(sorted(os.listdir(os.path.join(root, "json")))) self.box = list(sorted(os.listdir(os.path.join(root, "npy")))) def __getitem__(self, idx): img_path = os.path.join(self.root, "png", self.imgs[idx]) img = Image.open(img_path).convert("RGB") label_path = os.path.join(self.root, "json", self.label[idx]) with open(os.path.join("root", label_path), "r") as json_data: json_labels = json.load(json_data) box_path = os.path.join(self.root, "npy", self.box[idx]) dat = np.load(str(box_path)) boxes = [] labels = [] for i in dat: obj_val = i[0] xmin = torch.as_tensor(np.min(i[1]), dtype=torch.float32) xmax = torch.as_tensor(np.max(i[3]), dtype=torch.float32) ymin = torch.as_tensor(np.min(i[2]), dtype=torch.float32) ymax = torch.as_tensor(np.max(i[4]), dtype=torch.float32) if (ymax > ymin) & (xmax > xmin): boxes.append([xmin, ymin, xmax, ymax]) area = (xmax - xmin) * (ymax - ymin) labels += [json_labels.get(str(obj_val)).get("class")] label_dict = {} static_labels = { "apple": 0, "avocado": 1, "kiwi": 2, "lime": 3, "lychee": 4, "pomegranate": 5, "onion": 6, "strawberry": 7, "lemon": 8, "orange": 9, } labels_out = [] for i in range(len(labels)): label_dict[i] = labels[i] for i in label_dict: fruit = label_dict[i] final_fruit_label = static_labels[fruit] labels_out += [final_fruit_label] target = {} target["boxes"] = torch.as_tensor(boxes, dtype=torch.float32) target["labels"] = torch.as_tensor(labels_out, dtype=torch.int64) target["image_id"] = torch.tensor([idx]) target["area"] = area if self.transforms is not None: img = self.transforms(img) return img, target def __len__(self): return len(self.imgs) """ Parses command line options. Requires input data directory, output torch file, and number epochs used to train. """ def parse_input(): usage = "usage: train.py [options] arg1 arg2 " parser = OptionParser(usage) parser.add_option( "-d", "--data_dir", dest="data_dir", help="Directory location for Omniverse synthetic data.", ) parser.add_option( "-o", "--output_file", dest="output_file", help="Save torch model to this file and location (file ending in .pth)", ) parser.add_option( "-e", "--epochs", dest="epochs", help="Give number of epochs to be used for training", ) (options, args) = parser.parse_args() return options, args def get_transform(train): transforms = [] transforms.append(T.PILToTensor()) transforms.append(T.ConvertImageDtype(torch.float)) return T.Compose(transforms) def collate_fn(batch): return tuple(zip(*batch)) def create_model(num_classes): model = torchvision.models.detection.fasterrcnn_resnet50_fpn(weights="DEFAULT") in_features = model.roi_heads.box_predictor.cls_score.in_features model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes) return model def main(): writer = SummaryWriter() options, args = parse_input() dataset = FruitDataset(options.data_dir, get_transform(train=True)) train_size = int(len(dataset) * 0.7) valid_size = int(len(dataset) * 0.2) test_size = len(dataset) - valid_size - train_size train, valid, test = torch.utils.data.random_split( dataset, [train_size, valid_size, test_size] ) data_loader = torch.utils.data.DataLoader( dataset, batch_size=16, shuffle=True, num_workers=4, collate_fn=collate_fn ) validloader = torch.utils.data.DataLoader( valid, batch_size=16, shuffle=True, num_workers=4, collate_fn=collate_fn ) device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") num_classes = 10 num_epochs = int(options.epochs) model = create_model(num_classes) model.to(device) params = [p for p in model.parameters() if p.requires_grad] optimizer = torch.optim.SGD(params, lr=0.001) len_dataloader = len(data_loader) model.train() for epoch in range(num_epochs): optimizer.zero_grad() i = 0 for imgs, annotations in data_loader: i += 1 imgs = list(img.to(device) for img in imgs) annotations = [{k: v.to(device) for k, v in t.items()} for t in annotations] loss_dict = model(imgs, annotations) losses = sum(loss for loss in loss_dict.values()) writer.add_scalar("Loss/train", losses, epoch) losses.backward() optimizer.step() print(f"Iteration: {i}/{len_dataloader}, Loss: {losses}") writer.close() torch.save(model, options.output_file) if __name__ == "__main__": main()
7,902
Python
32.487288
111
0.621362
NVIDIA-Omniverse/synthetic-data-examples/end-to-end-workflows/object_detection_fruit/data_generation/code/generate_data_gui.py
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. # SPDX-License-Identifier: BSD-3-Clause # # Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its contributors # may be used to endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. import datetime now = datetime.datetime.now() from functools import partial import omni.replicator.core as rep with rep.new_layer(): # Define paths for the character, the props, the environment and the surface where the assets will be scattered in. CRATE = "omniverse://localhost/NVIDIA/Samples/Marbles/assets/standalone/SM_room_crate_3/SM_room_crate_3.usd" SURFACE = ( "omniverse://localhost/NVIDIA/Assets/Scenes/Templates/Basic/display_riser.usd" ) ENVS = "omniverse://localhost/NVIDIA/Assets/Scenes/Templates/Interior/ZetCG_ExhibitionHall.usd" FRUIT_PROPS = { "apple": "omniverse://localhost/NVIDIA/Assets/ArchVis/Residential/Food/Fruit/Apple.usd", "avocado": "omniverse://localhost/NVIDIA/Assets/ArchVis/Residential/Food/Fruit/Avocado01.usd", "kiwi": "omniverse://localhost/NVIDIA/Assets/ArchVis/Residential/Food/Fruit/Kiwi01.usd", "lime": "omniverse://localhost/NVIDIA/Assets/ArchVis/Residential/Food/Fruit/Lime01.usd", "lychee": "omniverse://localhost/NVIDIA/Assets/ArchVis/Residential/Food/Fruit/Lychee01.usd", "pomegranate": "omniverse://localhost/NVIDIA/Assets/ArchVis/Residential/Food/Fruit/Pomegranate01.usd", "onion": "omniverse://localhost/NVIDIA/Assets/ArchVis/Residential/Food/Vegetables/RedOnion.usd", "strawberry": "omniverse://localhost/NVIDIA/Assets/ArchVis/Residential/Food/Berries/strawberry.usd", "lemon": "omniverse://localhost/NVIDIA/Assets/ArchVis/Residential/Decor/Tchotchkes/Lemon_01.usd", "orange": "omniverse://localhost/NVIDIA/Assets/ArchVis/Residential/Decor/Tchotchkes/Orange_01.usd", } # Define randomizer function for Base assets. This randomization includes placement and rotation of the assets on the surface. def random_props(file_name, class_name, max_number=1, one_in_n_chance=3): instances = rep.randomizer.instantiate( file_name, size=max_number, mode="scene_instance" ) print(file_name) with instances: rep.modify.semantics([("class", class_name)]) rep.modify.pose( position=rep.distribution.uniform((-8, 5, -25), (8, 30, 25)), rotation=rep.distribution.uniform((-180, -180, -180), (180, 180, 180)), scale=rep.distribution.uniform((0.8), (1.2)), ) rep.modify.visibility( rep.distribution.choice([True], [False] * (one_in_n_chance)) ) return instances.node # Define randomizer function for sphere lights. def sphere_lights(num): lights = rep.create.light( light_type="Sphere", temperature=rep.distribution.normal(6500, 500), intensity=rep.distribution.normal(30000, 5000), position=rep.distribution.uniform((-300, -300, -300), (300, 300, 300)), scale=rep.distribution.uniform(50, 100), count=num, ) return lights.node rep.randomizer.register(random_props) # Setup the static elements env = rep.create.from_usd(ENVS) surface = rep.create.from_usd(SURFACE) with surface: rep.physics.collider() crate = rep.create.from_usd(CRATE) with crate: rep.physics.collider("none") rep.physics.mass(mass=10000) rep.modify.pose(position=(0, 20, 0), rotation=(0, 0, 90)) # Setup camera and attach it to render product camera = rep.create.camera() render_product = rep.create.render_product(camera, resolution=(1024, 1024)) rep.randomizer.register(sphere_lights) # trigger on frame for an interval with rep.trigger.on_frame(num_frames=100): for n, f in FRUIT_PROPS.items(): random_props(f, n) rep.randomizer.sphere_lights(5) with camera: rep.modify.pose( position=rep.distribution.uniform((-3, 114, -17), (-1, 116, -15)), look_at=(0, 20, 0), ) # Initialize and attach writer writer = rep.WriterRegistry.get("BasicWriter") now = now.strftime("%Y-%m-%d") output_dir = "fruit_data_" + now writer.initialize(output_dir=output_dir, rgb=True, bounding_box_2d_tight=True) writer.attach([render_product])
5,923
Python
47.162601
130
0.690866
NVIDIA-Omniverse/synthetic-data-examples/end-to-end-workflows/object_detection_fruit/deployment/code/deploy.py
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. # SPDX-License-Identifier: BSD-3-Clause # # Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its contributors # may be used to endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. import tritonclient.grpc as grpcclient from optparse import OptionParser # load image data import cv2 import numpy as np from matplotlib import pyplot as plt import subprocess def install(name): subprocess.call(["pip", "install", name]) """ Parses command line options. Requires input sample png """ def parse_input(): usage = "usage: deploy.py [options] arg1 " parser = OptionParser(usage) parser.add_option( "-p", "--png", dest="png", help="Directory location for single sample image." ) (options, args) = parser.parse_args() return options, args def main(): options, args = parse_input() target_width, target_height = 1024, 1024 # add path to test image image_sample = options.png image_bgr = cv2.imread(image_sample) image_bgr image_bgr = cv2.resize(image_bgr, (target_width, target_height)) image_rgb = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2RGB) image = np.float32(image_rgb) # preprocessing image = image / 255 image = np.moveaxis(image, -1, 0) # HWC to CHW image = image[np.newaxis, :] # add batch dimension image = np.float32(image) plt.imshow(image_rgb) inference_server_url = "0.0.0.0:9001" triton_client = grpcclient.InferenceServerClient(url=inference_server_url) # find out info about model model_name = "fasterrcnn_resnet50" triton_client.get_model_config(model_name) # create input input_name = "input" inputs = [grpcclient.InferInput(input_name, image.shape, "FP32")] inputs[0].set_data_from_numpy(image) output_name = "output" outputs = [grpcclient.InferRequestedOutput("output")] results = triton_client.infer(model_name, inputs, outputs=outputs) output = results.as_numpy("output") # annotate annotated_image = image_bgr.copy() if output.size > 0: # ensure something is found for box in output: box_top_left = int(box[0]), int(box[1]) box_bottom_right = int(box[2]), int(box[3]) text_origin = int(box[0]), int(box[3]) border_color = (50, 0, 100) text_color = (255, 255, 255) font_scale = 0.9 thickness = 1 # bounding box cv2.rectangle( annotated_image, box_top_left, box_bottom_right, border_color, thickness=5, lineType=cv2.LINE_8, ) plt.imshow(cv2.cvtColor(annotated_image, cv2.COLOR_BGR2RGB)) if __name__ == "__main__": main()
4,261
Python
31.287879
85
0.680357
NVIDIA-Omniverse/synthetic-data-examples/end-to-end-workflows/object_detection_fruit/docs/part_2.md
# Part 2: Training a model with synthetic data ## Setup training To use the training script you can see required parameters by running -`python train.py --help` - Example command: - `python train.py -d /home/omni.replicator_out/fruit_data_$DATE/ -o /home/model.pth -e 10` ## Visualize training We have included a visdualization script to run after your first training. This will show how Omniverse generates the labeled data. To see required parameters - `python visualize.py --help` - Example command: - `python visualize.py -d /home/$USER/omni.replicator_out/fruit_data_$DATE -o /home/$USER -n 0` ## Export model - To use the export script you can see required parameters by running - `python export.py --help` - Example command, make sure to dave to the `models/fasterrcnn_resnet50/1` - `python export.py -d /home/out.pth -o /home/models/fasterrcnn_resnet50/1`
874
Markdown
38.772726
158
0.744851
NVIDIA-Omniverse/synthetic-data-examples/end-to-end-workflows/object_detection_fruit/docs/part_3.md
# Part 3: Deploy model to Triton ## Start triton server When we start the server we want our model to be properly located in the `/models/fasterrcnn_resnet50/1` folder. `sudo docker run --gpus=1 --rm -p9000:8000 -p9001:8001 -p9002:8002 -v /home/$USER/sdg_workflow/models/:/models nvcr.io/nvidia/tritonserver:23.01-py3 tritonserver --model-repository=/models` Once started, you should see: ``` +---------------------+---------+--------+ | Model | Version | Status | +---------------------+---------+--------+ | fasterrcnn_resnet50 | 1 | READY | +---------------------+---------+--------+ ``` ## Start triton client In another terminal window, with your server running start your client - `sudo docker run -it --rm --net=host -v /home/zoe/Desktop/sdg_workflow:/workspace nvcr.io/nvidia/tritonserver:23.01-py3-sdk` - To use the deploy script you can see required parameters by running - `python deploy.py --help` - Example command: - ` python deploy.py -p /workspace/rgb_0.png`
1,006
Markdown
34.964284
189
0.622266
NVIDIA-Omniverse/synthetic-data-examples/end-to-end-workflows/object_detection_fruit/docs/part_1.md
# Part 1: Generate synthetic data with Omnvierse ## Install Dependencies In this section you can generate your synthetic data using the Omniverse GUI or as a headless version in your local terminal. Either option requires an Omniverse install. - [Install Omniverse Launcher](https://docs.omniverse.nvidia.com/prod_install-guide/prod_install-guide/overview.html#omniverse-install-guide) ## Omniverse Launcher & Code - [Install Omniverse Code](https://docs.omniverse.nvidia.com/prod_workflows/prod_workflows/extensions/environment_configuration.html#step-2-install-omniverse-code) from the `Exchange` tab within Omniverse Launcher ## Generate data in Omniverse GUI Copy the contents of the generate_data.py script into the Script Editor tab in the bottom section of the Code window. Press the RUn1 button or ctrl + Enter on your keyboard to load the scene in the Viewport. From there you can preview a single scene in the Replciator tab at the top by clicking Preview or run the full script by clicking Run. If you make no changes to this script it will generate 100 frames. - From inside the Code GUI using the [script editor](https://docs.omniverse.nvidia.com/app_code/prod_extensions/ext_script-editor.html) - If using Linux, copy code from `generate_data_gui.py` into the Script Editor window -Execute code by clicking the `Run` button or pressing `ctrl+Enter` - To preview what the scene will look like click Replicator then `Preview` in the top bar of your Omniverse Code window - When you are ready to generate all your data go ahead and click `Replicator` and then `Run`, this will generate the designated number of frames and drop the RGB, bounding box data, and labels into the desired folder ## Generate data headlessly Follow the documentation guidelines to launch a terminal in the correct folder location. The correct script to pass to your --/omni/replicator.scrip is generate_data_headless.py. This will generate and save the synthetic data in the same way as before, without utilizing the Omniverse GUI. - [How to run](https://docs.omniverse.nvidia.com/prod_extensions/prod_extensions/ext_replicator/headless_example.html) - Script location: `/FruitBasketOVEReplicatorDemo/data_generation/code/generate_data_headless.py` - We need to locate `omni.code.replicator.sh` To find look for where Omniverse ode is locally installed - Run (script dictates where the output data is stored): `./omni.code.replicator.sh --no-window --/omni/replicator/script= “/FruitBasketOVEReplicatorDemo/data_generation/code/generate_data_headless.py”`
2,562
Markdown
72.228569
411
0.79313
NVIDIA-Omniverse/synthetic-data-examples/training_examples/sdg_pallet_model/predict.py
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import utils import cv2 import torch if __name__ == "__main__": # Parse command line arguments parser = argparse.ArgumentParser() parser.add_argument( "engine", type=str, help="The file path of the TensorRT engine." ) parser.add_argument( "image", type=str, help="The file path of the image provided as input for inference." ) parser.add_argument( "--output", type=str, default=None, help="The path to output the inference visualization." ) parser.add_argument( "--inference-size", type=str, default="512x512", help="The height and width that the image is resized to for inference." " Denoted as (height)x(width)." ) parser.add_argument( "--peak-window", type=str, default="7x7", help="The size of the window used when finding local peaks. Denoted as " " (window_height)x(window_width)." ) parser.add_argument( '--peak-threshold', type=float, default=0.5, help="The heatmap threshold to use when finding peaks. Values must be " " larger than this value to be considered peaks." ) parser.add_argument( '--line-thickness', type=int, default=1, help="The line thickness for drawn boxes" ) args = parser.parse_args() # Parse inference height, width from arguments inference_size = tuple(int(x) for x in args.inference_size.split('x')) peak_window = tuple(int(x) for x in args.peak_window.split('x')) if args.output is None: output_path = '.'.join(args.image.split('.')[:-1]) + "_output.jpg" else: output_path = args.output # Create offset grid offset_grid = utils.make_offset_grid(inference_size).to("cuda") # Load model model = utils.load_trt_engine_wrapper( args.engine, input_names=["input"], output_names=["heatmap", "vectormap"] ) # Load image image = cv2.imread(args.image) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # Pad and resize image (aspect ratio preserving resize) image, _, _ = utils.pad_resize(image, inference_size) with torch.no_grad(): # Format image for inference x = utils.format_bgr8_image(image) x = x.to("cuda") # Execute model heatmap, vectormap = model(x) # Scale and offset vectormap keypointmap = utils.vectormap_to_keypointmap( offset_grid, vectormap ) # Find local peaks peak_mask = utils.find_heatmap_peak_mask( heatmap, peak_window, args.peak_threshold ) # Extract keypoints at local peak keypoints = keypointmap[0][peak_mask[0, 0]] # Draw vis_image = utils.draw_box( image, keypoints, color=(118, 186, 0), thickness=args.line_thickness ) vis_image = cv2.cvtColor(vis_image, cv2.COLOR_RGB2BGR) cv2.imwrite(output_path, vis_image)
3,833
Python
26.191489
98
0.610749
NVIDIA-Omniverse/synthetic-data-examples/training_examples/sdg_pallet_model/utils.py
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch import torch.nn.functional as F import numpy as np import cv2 import einops import tensorrt as trt import torch2trt from typing import Sequence BOX_EDGES = [ [0, 1], [1, 5], [5, 4], [4, 0], [2, 3], [3, 7], [7, 6], [6, 2], [0, 2], [1, 3], [4, 6], [5, 7] ] def make_offset_grid( size, stride=(1, 1) ): grid = torch.stack( torch.meshgrid( stride[0] * (torch.arange(size[0]) + 0.5), stride[1] * (torch.arange(size[1]) + 0.5) ), dim=-1 ) return grid def vectormap_to_keypointmap( offset_grid, vector_map, vector_scale: float = 1./256. ): vector_map = vector_map / vector_scale keypoint_map = einops.rearrange(vector_map, "b (k d) h w -> b h w k d", d=2) keypoint_map = keypoint_map + offset_grid[:, :, None, :] # yx -> xy keypoint_map = keypoint_map[..., [1, 0]] return keypoint_map def find_heatmap_peak_mask(heatmap, window=3, threshold=0.5): all_indices = torch.arange( heatmap.numel(), device=heatmap.device ) all_indices = all_indices.reshape(heatmap.shape) if isinstance(window, int): window = (window, window) values, max_indices = F.max_pool2d_with_indices( heatmap, kernel_size=window, stride=1, padding=(window[0] // 2, window[1] // 2) ) is_above_threshold = heatmap >= threshold is_max = max_indices == all_indices is_peak = is_above_threshold & is_max return is_peak def draw_box(image_bgr, keypoints, color=(118, 186, 0), thickness=1): num_objects = int(keypoints.shape[0]) for i in range(num_objects): keypoints_i = keypoints[i] kps_i = [(int(x), int(y)) for x, y in keypoints_i] edges = BOX_EDGES for e in edges: cv2.line( image_bgr, kps_i[e[0]], kps_i[e[1]], (118, 186, 0), thickness=thickness ) return image_bgr def pad_resize(image, output_shape): ar_i = image.shape[1] / image.shape[0] ar_o = output_shape[1] / output_shape[0] # resize if ar_i > ar_o: w_i = output_shape[1] h_i = min(int(w_i / ar_i), output_shape[0]) else: h_i = output_shape[0] w_i = min(int(h_i * ar_i), output_shape[1]) # paste pad_left = (output_shape[1] - w_i) // 2 pad_top = (output_shape[0] - h_i) // 2 image_resize = cv2.resize(image, (w_i, h_i)) out = np.zeros_like( image, shape=(output_shape[0], output_shape[1], image.shape[2]) ) out[pad_top:pad_top + h_i, pad_left:pad_left + w_i] = image_resize pad = (pad_top, pad_left) scale = (image.shape[0] / h_i, image.shape[1] / w_i) return out, pad, scale def load_trt_engine(path: str): with trt.Logger() as logger, trt.Runtime(logger) as runtime: with open(path, 'rb') as f: engine_bytes = f.read() engine = runtime.deserialize_cuda_engine(engine_bytes) return engine def load_trt_engine_wrapper( path: str, input_names: Sequence, output_names: Sequence ): engine = load_trt_engine(path) wrapper = torch2trt.TRTModule( engine=engine, input_names=input_names, output_names=output_names ) return wrapper def format_bgr8_image(image, device="cuda"): x = torch.from_numpy(image) x = x.permute(2, 0, 1)[None, ...] x = (x / 255 - 0.45) / 0.25 return x
4,290
Python
22.194594
98
0.577156
NVIDIA-Omniverse/synthetic-data-examples/training_examples/sdg_pallet_model/README.md
# SDG Pallet Model <img src="images/test_image_1_output.jpg" height="256"/> This repository contains code for performing optimized TensorRT inference with a pre-trained pallet detection model that was trained using synthetic data with [NVIDIA Omniverse Replicator](https://developer.nvidia.com/omniverse/replicator). The model takes as input a monocular RGB image, and outputs the pallet box estimates. The box esimates are defined for each pallet side face. So a single pallet may have multiple box estimates. If you have any questions, please feel free to reach out by opening an issue! ## Instructions ### Step 1 - Install dependencies Assumes you've already set up your system with OpenCV, PyTorch and numpy. Install einops for some utility functions. ```bash pip3 install einops ``` Install [torch2trt](https://github.com/NVIDIA-AI-IOT/torch2trt). This is used for the ``TRTModule`` class which simplifies engine inference. ```bash git clone https://github.com/NVIDIA-AI-IOT/torch2trt cd torch2trt python3 setup.py develop ``` ### Step 2 - Download the ONNX model Download the pallet model ONNX file. | Model | Notes | Links | |-------|-------|-------| | pallet_model_v1_all | Trained for wood and other pallets (metal, plastic). | [onnx](https://drive.google.com/file/d/1Vsl7s5YhBFxkTkd3UYYgPWFCLNRm_O_Q/view?usp=share_link) | | pallet_model_v1_wood | Trained only for wood pallets. | [onnx](https://drive.google.com/file/d/1Fd1gS7NYkWHPhUn7iZLK43hLQ1qDkuvb/view?usp=share_link) | ### Step 3 - Build the TensorRT engine #### Option 1 (*recommended*) - Build the FP16 engine To build the FP16 engine, call the following: ```bash ./build_trt_fp16.sh <onnx_path> <engine_output_path> ``` #### Option 2 - Build the INT8 engine > The INT8 model instructions do not yet include calibration. Please only use > this model for throughput profiling. The accuracy is likely to vary from > FP32/FP16 models. However, once calibration is included, this may become > the recommended option given the improved throughput results. To build the INT8 engine, call the following: ```bash ./build_trt_int8.sh <onnx_path> <engine_output_path> ``` We hope to provide instructions for using the Deep Learning Accelerator (DLA) on Jetson AGX Orin, and INT8 calibration soon. ### Step 3 - Profile the engine To profile the engine with the ``trtexec`` tool, call the following: ```bash ./profile_engine.sh <engine_path> ``` Here are the results for a model inference at 256x256 resolution, profiled on Jetson AGX Orin. <a id="throughput_results"/> | Precision | Throughput (FPS) | |-----------|------------------| | FP16 | 465 | | INT8 | 710 | Notes: - Called ``jetson_clocks`` before running - Using MAXN power mode by calling ``sudo nvpmodel -m0`` - Batch size 1 - ``--useSpinWait`` flag enabled to stabilize timings - ``--useCudaGraph`` flag enabled to use CUDA graph optimizations. Cuda graph isn't yet used in the predict function. ### Step 4 - Run inference on an example image. ```bash python3 predict.py <engine_path> <image_path> --output=<output_path> ``` For more options ``` python3 predict.py --help ``` ### Next Steps Try modifying the predict.py code to visualize inference on a live camera feed.
3,292
Markdown
27.634782
174
0.717801
NVIDIA-Omniverse/synthetic-data-examples/training_examples/sdg_pallet_model/LICENSE.md
SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. SPDX-License-Identifier: Apache-2.0 Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
654
Markdown
42.666664
96
0.801223
NVIDIA-Omniverse/IsaacSim-ros_workspaces/README.md
# Isaac Sim ROS & ROS2 Workspaces This repository contains three workspaces: `noetic_ws` (ROS Noetic), `foxy_ws` (ROS2 Foxy) and `humble_ws` (ROS2 Humble). [Click here for usage and installation instructions with Isaac Sim](https://docs.omniverse.nvidia.com/isaacsim/latest/installation/install_ros.html) When cloning this repository, all three workspaces are downloaded. Depending on which ROS distro you are using, follow the [setup instructions](https://docs.omniverse.nvidia.com/isaacsim/latest/installation/install_ros.html#setting-up-workspaces) for building your specific workspace.
593
Markdown
83.857131
284
0.797639
NVIDIA-Omniverse/IsaacSim-ros_workspaces/foxy_ws/src/isaac_tutorials/scripts/ros2_publisher.py
#!/usr/bin/env python3 # Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. import rclpy from rclpy.node import Node from sensor_msgs.msg import JointState import numpy as np import time class TestROS2Bridge(Node): def __init__(self): super().__init__("test_ros2bridge") # Create the publisher. This publisher will publish a JointState message to the /joint_command topic. self.publisher_ = self.create_publisher(JointState, "joint_command", 10) # Create a JointState message self.joint_state = JointState() self.joint_state.name = [ "panda_joint1", "panda_joint2", "panda_joint3", "panda_joint4", "panda_joint5", "panda_joint6", "panda_joint7", "panda_finger_joint1", "panda_finger_joint2", ] num_joints = len(self.joint_state.name) # make sure kit's editor is playing for receiving messages self.joint_state.position = np.array([0.0] * num_joints, dtype=np.float64).tolist() self.default_joints = [0.0, -1.16, -0.0, -2.3, -0.0, 1.6, 1.1, 0.4, 0.4] # limiting the movements to a smaller range (this is not the range of the robot, just the range of the movement self.max_joints = np.array(self.default_joints) + 0.5 self.min_joints = np.array(self.default_joints) - 0.5 # position control the robot to wiggle around each joint self.time_start = time.time() timer_period = 0.05 # seconds self.timer = self.create_timer(timer_period, self.timer_callback) def timer_callback(self): self.joint_state.header.stamp = self.get_clock().now().to_msg() joint_position = ( np.sin(time.time() - self.time_start) * (self.max_joints - self.min_joints) * 0.5 + self.default_joints ) self.joint_state.position = joint_position.tolist() # Publish the message to the topic self.publisher_.publish(self.joint_state) def main(args=None): rclpy.init(args=args) ros2_publisher = TestROS2Bridge() rclpy.spin(ros2_publisher) # Destroy the node explicitly ros2_publisher.destroy_node() rclpy.shutdown() if __name__ == "__main__": main()
2,662
Python
31.084337
119
0.644252
NVIDIA-Omniverse/IsaacSim-ros_workspaces/foxy_ws/src/navigation/carter_navigation/launch/carter_navigation.launch.py
## Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. ## NVIDIA CORPORATION and its licensors retain all intellectual property ## and proprietary rights in and to this software, related documentation ## and any modifications thereto. Any use, reproduction, disclosure or ## distribution of this software and related documentation without an express ## license agreement from NVIDIA CORPORATION is strictly prohibited. import os from ament_index_python.packages import get_package_share_directory from launch import LaunchDescription from launch.actions import DeclareLaunchArgument from launch.actions import IncludeLaunchDescription from launch.launch_description_sources import PythonLaunchDescriptionSource from launch.substitutions import LaunchConfiguration from launch_ros.actions import Node def generate_launch_description(): use_sim_time = LaunchConfiguration("use_sim_time", default="True") map_dir = LaunchConfiguration( "map", default=os.path.join( get_package_share_directory("carter_navigation"), "maps", "carter_warehouse_navigation.yaml" ), ) param_dir = LaunchConfiguration( "params_file", default=os.path.join( get_package_share_directory("carter_navigation"), "params", "carter_navigation_params.yaml" ), ) nav2_bringup_launch_dir = os.path.join(get_package_share_directory("nav2_bringup"), "launch") rviz_config_dir = os.path.join(get_package_share_directory("carter_navigation"), "rviz2", "carter_navigation.rviz") return LaunchDescription( [ DeclareLaunchArgument("map", default_value=map_dir, description="Full path to map file to load"), DeclareLaunchArgument( "params_file", default_value=param_dir, description="Full path to param file to load" ), DeclareLaunchArgument( "use_sim_time", default_value="true", description="Use simulation (Omniverse Isaac Sim) clock if true" ), IncludeLaunchDescription( PythonLaunchDescriptionSource(os.path.join(nav2_bringup_launch_dir, "rviz_launch.py")), launch_arguments={"namespace": "", "use_namespace": "False", "rviz_config": rviz_config_dir}.items(), ), IncludeLaunchDescription( PythonLaunchDescriptionSource([nav2_bringup_launch_dir, "/bringup_launch.py"]), launch_arguments={"map": map_dir, "use_sim_time": use_sim_time, "params_file": param_dir}.items(), ), Node( package='pointcloud_to_laserscan', executable='pointcloud_to_laserscan_node', remappings=[('cloud_in', ['/front_3d_lidar/point_cloud']), ('scan', ['/scan'])], parameters=[{ 'target_frame': 'front_3d_lidar', 'transform_tolerance': 0.01, 'min_height': -0.4, 'max_height': 1.5, 'angle_min': -1.5708, # -M_PI/2 'angle_max': 1.5708, # M_PI/2 'angle_increment': 0.0087, # M_PI/360.0 'scan_time': 0.3333, 'range_min': 0.05, 'range_max': 100.0, 'use_inf': True, 'inf_epsilon': 1.0, # 'concurrency_level': 1, }], name='pointcloud_to_laserscan' ) ] )
3,521
Python
42.481481
119
0.601534
NVIDIA-Omniverse/IsaacSim-ros_workspaces/foxy_ws/src/navigation/carter_navigation/launch/carter_navigation_individual.launch.py
## Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. ## NVIDIA CORPORATION and its licensors retain all intellectual property ## and proprietary rights in and to this software, related documentation ## and any modifications thereto. Any use, reproduction, disclosure or ## distribution of this software and related documentation without an express ## license agreement from NVIDIA CORPORATION is strictly prohibited. import os from ament_index_python.packages import get_package_share_directory from launch import LaunchDescription from launch.actions import DeclareLaunchArgument, ExecuteProcess, IncludeLaunchDescription from launch.conditions import IfCondition from launch.launch_description_sources import PythonLaunchDescriptionSource from launch.substitutions import LaunchConfiguration, PythonExpression, TextSubstitution from launch_ros.actions import Node def generate_launch_description(): # Get the launch directory nav2_launch_dir = os.path.join(get_package_share_directory("nav2_bringup"), "launch") # Create the launch configuration variables slam = LaunchConfiguration("slam") namespace = LaunchConfiguration("namespace") use_namespace = LaunchConfiguration("use_namespace") map_yaml_file = LaunchConfiguration("map") use_sim_time = LaunchConfiguration("use_sim_time") params_file = LaunchConfiguration("params_file") default_bt_xml_filename = LaunchConfiguration("default_bt_xml_filename") autostart = LaunchConfiguration("autostart") # Declare the launch arguments declare_namespace_cmd = DeclareLaunchArgument("namespace", default_value="", description="Top-level namespace") declare_use_namespace_cmd = DeclareLaunchArgument( "use_namespace", default_value="false", description="Whether to apply a namespace to the navigation stack" ) declare_slam_cmd = DeclareLaunchArgument("slam", default_value="False", description="Whether run a SLAM") declare_map_yaml_cmd = DeclareLaunchArgument( "map", default_value=os.path.join(nav2_launch_dir, "maps", "carter_warehouse_navigation.yaml"), description="Full path to map file to load", ) declare_use_sim_time_cmd = DeclareLaunchArgument( "use_sim_time", default_value="True", description="Use simulation (Isaac Sim) clock if true" ) declare_params_file_cmd = DeclareLaunchArgument( "params_file", default_value=os.path.join(nav2_launch_dir, "params", "nav2_params.yaml"), description="Full path to the ROS2 parameters file to use for all launched nodes", ) declare_bt_xml_cmd = DeclareLaunchArgument( "default_bt_xml_filename", default_value=os.path.join( get_package_share_directory("nav2_bt_navigator"), "behavior_trees", "navigate_w_replanning_and_recovery.xml" ), description="Full path to the behavior tree xml file to use", ) declare_autostart_cmd = DeclareLaunchArgument( "autostart", default_value="true", description="Automatically startup the nav2 stack" ) bringup_cmd = IncludeLaunchDescription( PythonLaunchDescriptionSource(os.path.join(nav2_launch_dir, "bringup_launch.py")), launch_arguments={ "namespace": namespace, "use_namespace": use_namespace, "slam": slam, "map": map_yaml_file, "use_sim_time": use_sim_time, "params_file": params_file, "default_bt_xml_filename": default_bt_xml_filename, "autostart": autostart, }.items(), ) # Create the launch description and populate ld = LaunchDescription() # Declare the launch options ld.add_action(declare_namespace_cmd) ld.add_action(declare_use_namespace_cmd) ld.add_action(declare_slam_cmd) ld.add_action(declare_map_yaml_cmd) ld.add_action(declare_use_sim_time_cmd) ld.add_action(declare_params_file_cmd) ld.add_action(declare_bt_xml_cmd) ld.add_action(declare_autostart_cmd) ld.add_action(bringup_cmd) return ld
4,076
Python
39.366336
120
0.711237
NVIDIA-Omniverse/IsaacSim-ros_workspaces/foxy_ws/src/navigation/carter_navigation/launch/multiple_robot_carter_navigation_hospital.launch.py
## Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. ## NVIDIA CORPORATION and its licensors retain all intellectual property ## and proprietary rights in and to this software, related documentation ## and any modifications thereto. Any use, reproduction, disclosure or ## distribution of this software and related documentation without an express ## license agreement from NVIDIA CORPORATION is strictly prohibited. """ Example for spawing multiple robots in Gazebo. This is an example on how to create a launch file for spawning multiple robots into Gazebo and launch multiple instances of the navigation stack, each controlling one robot. The robots co-exist on a shared environment and are controlled by independent nav stacks """ import os from ament_index_python.packages import get_package_share_directory from launch import LaunchDescription from launch.actions import DeclareLaunchArgument, ExecuteProcess, GroupAction, IncludeLaunchDescription, LogInfo from launch.conditions import IfCondition from launch.launch_description_sources import PythonLaunchDescriptionSource from launch.substitutions import LaunchConfiguration, TextSubstitution from launch_ros.actions import Node def generate_launch_description(): # Get the launch and rviz directories carter_nav2_bringup_dir = get_package_share_directory("carter_navigation") nav2_bringup_dir = get_package_share_directory("nav2_bringup") nav2_bringup_launch_dir = os.path.join(nav2_bringup_dir, "launch") rviz_config_dir = os.path.join(carter_nav2_bringup_dir, "rviz2", "carter_navigation_namespaced.rviz") # Names and poses of the robots robots = [{"name": "carter1"}, {"name": "carter2"}, {"name": "carter3"}] # Common settings ENV_MAP_FILE = "carter_hospital_navigation.yaml" use_sim_time = LaunchConfiguration("use_sim_time", default="True") map_yaml_file = LaunchConfiguration("map") default_bt_xml_filename = LaunchConfiguration("default_bt_xml_filename") autostart = LaunchConfiguration("autostart") rviz_config_file = LaunchConfiguration("rviz_config") use_rviz = LaunchConfiguration("use_rviz") log_settings = LaunchConfiguration("log_settings", default="true") # Declare the launch arguments declare_map_yaml_cmd = DeclareLaunchArgument( "map", default_value=os.path.join(carter_nav2_bringup_dir, "maps", ENV_MAP_FILE), description="Full path to map file to load", ) declare_robot1_params_file_cmd = DeclareLaunchArgument( "carter1_params_file", default_value=os.path.join( carter_nav2_bringup_dir, "params", "hospital", "multi_robot_carter_navigation_params_1.yaml" ), description="Full path to the ROS2 parameters file to use for robot1 launched nodes", ) declare_robot2_params_file_cmd = DeclareLaunchArgument( "carter2_params_file", default_value=os.path.join( carter_nav2_bringup_dir, "params", "hospital", "multi_robot_carter_navigation_params_2.yaml" ), description="Full path to the ROS2 parameters file to use for robot2 launched nodes", ) declare_robot3_params_file_cmd = DeclareLaunchArgument( "carter3_params_file", default_value=os.path.join( carter_nav2_bringup_dir, "params", "hospital", "multi_robot_carter_navigation_params_3.yaml" ), description="Full path to the ROS2 parameters file to use for robot3 launched nodes", ) declare_bt_xml_cmd = DeclareLaunchArgument( "default_bt_xml_filename", default_value=os.path.join( get_package_share_directory("nav2_bt_navigator"), "behavior_trees", "navigate_w_replanning_and_recovery.xml" ), description="Full path to the behavior tree xml file to use", ) declare_autostart_cmd = DeclareLaunchArgument( "autostart", default_value="True", description="Automatically startup the stacks" ) declare_rviz_config_file_cmd = DeclareLaunchArgument( "rviz_config", default_value=rviz_config_dir, description="Full path to the RVIZ config file to use." ) declare_use_rviz_cmd = DeclareLaunchArgument("use_rviz", default_value="True", description="Whether to start RVIZ") # Define commands for launching the navigation instances nav_instances_cmds = [] for robot in robots: params_file = LaunchConfiguration(robot["name"] + "_params_file") group = GroupAction( [ IncludeLaunchDescription( PythonLaunchDescriptionSource(os.path.join(nav2_bringup_launch_dir, "rviz_launch.py")), condition=IfCondition(use_rviz), launch_arguments={ "namespace": TextSubstitution(text=robot["name"]), "use_namespace": "True", "rviz_config": rviz_config_file, }.items(), ), IncludeLaunchDescription( PythonLaunchDescriptionSource( os.path.join(carter_nav2_bringup_dir, "launch", "carter_navigation_individual.launch.py") ), launch_arguments={ "namespace": robot["name"], "use_namespace": "True", "map": map_yaml_file, "use_sim_time": use_sim_time, "params_file": params_file, "default_bt_xml_filename": default_bt_xml_filename, "autostart": autostart, "use_rviz": "False", "use_simulator": "False", "headless": "False", }.items(), ), Node( package='pointcloud_to_laserscan', executable='pointcloud_to_laserscan_node', remappings=[('cloud_in', ['front_3d_lidar/point_cloud']), ('scan', ['scan'])], parameters=[{ 'target_frame': 'front_3d_lidar', 'transform_tolerance': 0.01, 'min_height': -0.4, 'max_height': 1.5, 'angle_min': -1.5708, # -M_PI/2 'angle_max': 1.5708, # M_PI/2 'angle_increment': 0.0087, # M_PI/360.0 'scan_time': 0.3333, 'range_min': 0.05, 'range_max': 100.0, 'use_inf': True, 'inf_epsilon': 1.0, # 'concurrency_level': 1, }], name='pointcloud_to_laserscan', namespace = robot["name"] ), LogInfo(condition=IfCondition(log_settings), msg=["Launching ", robot["name"]]), LogInfo(condition=IfCondition(log_settings), msg=[robot["name"], " map yaml: ", map_yaml_file]), LogInfo(condition=IfCondition(log_settings), msg=[robot["name"], " params yaml: ", params_file]), LogInfo( condition=IfCondition(log_settings), msg=[robot["name"], " behavior tree xml: ", default_bt_xml_filename], ), LogInfo( condition=IfCondition(log_settings), msg=[robot["name"], " rviz config file: ", rviz_config_file] ), LogInfo(condition=IfCondition(log_settings), msg=[robot["name"], " autostart: ", autostart]), ] ) nav_instances_cmds.append(group) # Create the launch description and populate ld = LaunchDescription() # Declare the launch options ld.add_action(declare_map_yaml_cmd) ld.add_action(declare_robot1_params_file_cmd) ld.add_action(declare_robot2_params_file_cmd) ld.add_action(declare_robot3_params_file_cmd) ld.add_action(declare_bt_xml_cmd) ld.add_action(declare_use_rviz_cmd) ld.add_action(declare_autostart_cmd) ld.add_action(declare_rviz_config_file_cmd) for simulation_instance_cmd in nav_instances_cmds: ld.add_action(simulation_instance_cmd) return ld
8,338
Python
42.432291
120
0.601823
NVIDIA-Omniverse/IsaacSim-ros_workspaces/foxy_ws/src/navigation/carter_navigation/maps/carter_office_navigation.yaml
image: carter_office_navigation.png resolution: 0.05 origin: [-29.975, -39.975, 0.0000] negate: 0 occupied_thresh: 0.65 free_thresh: 0.196
139
YAML
18.999997
35
0.733813
NVIDIA-Omniverse/IsaacSim-ros_workspaces/foxy_ws/src/navigation/carter_navigation/maps/carter_hospital_navigation.yaml
image: carter_hospital_navigation.png resolution: 0.05 origin: [-49.625, -4.675, 0.0000] negate: 0 occupied_thresh: 0.65 free_thresh: 0.196
140
YAML
19.142854
37
0.735714
NVIDIA-Omniverse/IsaacSim-ros_workspaces/foxy_ws/src/navigation/carter_navigation/maps/carter_warehouse_navigation.yaml
image: carter_warehouse_navigation.png resolution: 0.05 origin: [-11.975, -17.975, 0.0000] negate: 0 occupied_thresh: 0.65 free_thresh: 0.196
142
YAML
19.428569
38
0.739437
NVIDIA-Omniverse/IsaacSim-ros_workspaces/foxy_ws/src/navigation/isaac_ros_navigation_goal/setup.py
from setuptools import setup from glob import glob import os package_name = "isaac_ros_navigation_goal" setup( name=package_name, version="0.0.1", packages=[package_name, package_name + "/goal_generators"], data_files=[ ("share/ament_index/resource_index/packages", ["resource/" + package_name]), ("share/" + package_name, ["package.xml"]), (os.path.join("share", package_name, "launch"), glob("launch/*.launch.py")), ("share/" + package_name + "/assets", glob("assets/*")), ], install_requires=["setuptools"], zip_safe=True, maintainer="isaac sim", maintainer_email="[email protected]", description="Package to set goals for navigation stack.", license="NVIDIA Isaac ROS Software License", tests_require=["pytest"], entry_points={"console_scripts": ["SetNavigationGoal = isaac_ros_navigation_goal.set_goal:main"]}, )
906
Python
33.884614
102
0.651214
NVIDIA-Omniverse/IsaacSim-ros_workspaces/foxy_ws/src/navigation/isaac_ros_navigation_goal/test/test_flake8.py
# Copyright 2017 Open Source Robotics Foundation, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ament_flake8.main import main_with_errors import pytest @pytest.mark.flake8 @pytest.mark.linter def test_flake8(): rc, errors = main_with_errors(argv=[]) assert rc == 0, "Found %d code style errors / warnings:\n" % len(errors) + "\n".join(errors)
864
Python
35.041665
96
0.741898
NVIDIA-Omniverse/IsaacSim-ros_workspaces/foxy_ws/src/navigation/isaac_ros_navigation_goal/launch/isaac_ros_navigation_goal.launch.py
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. import os from ament_index_python.packages import get_package_share_directory from launch import LaunchDescription from launch.substitutions import LaunchConfiguration from launch_ros.actions import Node def generate_launch_description(): map_yaml_file = LaunchConfiguration( "map_yaml_path", default=os.path.join( get_package_share_directory("isaac_ros_navigation_goal"), "assets", "carter_warehouse_navigation.yaml" ), ) goal_text_file = LaunchConfiguration( "goal_text_file_path", default=os.path.join(get_package_share_directory("isaac_ros_navigation_goal"), "assets", "goals.txt"), ) navigation_goal_node = Node( name="set_navigation_goal", package="isaac_ros_navigation_goal", executable="SetNavigationGoal", parameters=[ { "map_yaml_path": map_yaml_file, "iteration_count": 3, "goal_generator_type": "RandomGoalGenerator", "action_server_name": "navigate_to_pose", "obstacle_search_distance_in_meters": 0.2, "goal_text_file_path": goal_text_file, "initial_pose": [-6.4, -1.04, 0.0, 0.0, 0.0, 0.99, 0.02], } ], output="screen", ) return LaunchDescription([navigation_goal_node])
1,782
Python
35.387754
114
0.654882
NVIDIA-Omniverse/IsaacSim-ros_workspaces/foxy_ws/src/navigation/isaac_ros_navigation_goal/isaac_ros_navigation_goal/obstacle_map.py
import numpy as np import yaml import os import math from PIL import Image class GridMap: def __init__(self, yaml_file_path): self.__get_meta_from_yaml(yaml_file_path) self.__get_raw_map() self.__add_max_range_to_meta() # print(self.__map_meta) def __get_meta_from_yaml(self, yaml_file_path): """ Reads map meta from the yaml file. Parameters ---------- yaml_file_path: path of the yaml file. """ with open(yaml_file_path, "r") as f: file_content = f.read() self.__map_meta = yaml.safe_load(file_content) self.__map_meta["image"] = os.path.join(os.path.dirname(yaml_file_path), self.__map_meta["image"]) def __get_raw_map(self): """ Reads the map image and generates the grid map.\n Grid map is a 2D boolean matrix where True=>occupied space & False=>Free space. """ img = Image.open(self.__map_meta.get("image")) img = np.array(img) # Anything greater than free_thresh is considered as occupied if self.__map_meta["negate"]: res = np.where((img / 255)[:, :, 0] > self.__map_meta["free_thresh"]) else: res = np.where(((255 - img) / 255)[:, :, 0] > self.__map_meta["free_thresh"]) self.__grid_map = np.zeros(shape=(img.shape[:2]), dtype=bool) for i in range(res[0].shape[0]): self.__grid_map[res[0][i], res[1][i]] = 1 def __add_max_range_to_meta(self): """ Calculates and adds the max value of pose in x & y direction to the meta. """ max_x = self.__grid_map.shape[1] * self.__map_meta["resolution"] + self.__map_meta["origin"][0] max_y = self.__grid_map.shape[0] * self.__map_meta["resolution"] + self.__map_meta["origin"][1] self.__map_meta["max_x"] = round(max_x, 2) self.__map_meta["max_y"] = round(max_y, 2) def __pad_obstacles(self, distance): pass def get_range(self): """ Returns the bounds of pose values in x & y direction.\n Returns ------- [List]:\n Where list[0][0]: min value in x direction list[0][1]: max value in x direction list[1][0]: min value in y direction list[1][1]: max value in y direction """ return [ [self.__map_meta["origin"][0], self.__map_meta["max_x"]], [self.__map_meta["origin"][1], self.__map_meta["max_y"]], ] def __transform_to_image_coordinates(self, point): """ Transforms a pose in meters to image pixel coordinates. Parameters ---------- Point: A point as list. where list[0]=>pose.x and list[1]=pose.y Returns ------- [Tuple]: tuple[0]=>pixel value in x direction. i.e column index. tuple[1]=> pixel vlaue in y direction. i.e row index. """ p_x, p_y = point i_x = math.floor((p_x - self.__map_meta["origin"][0]) / self.__map_meta["resolution"]) i_y = math.floor((p_y - self.__map_meta["origin"][1]) / self.__map_meta["resolution"]) # because origin in yaml is at bottom left of image i_y = self.__grid_map.shape[0] - i_y return i_x, i_y def __transform_distance_to_pixels(self, distance): """ Converts the distance in meters to number of pixels based on the resolution. Parameters ---------- distance: value in meters Returns ------- [Integer]: number of pixel which represent the same distance. """ return math.ceil(distance / self.__map_meta["resolution"]) def __is_obstacle_in_distance(self, img_point, distance): """ Checks if any obstacle is in vicinity of the given image point. Parameters ---------- img_point: pixel values of the point distance: distnace in pixels in which there shouldn't be any obstacle. Returns ------- [Bool]: True if any obstacle found else False. """ # need to make sure that patch xmin & ymin are >=0, # because of python's negative indexing capability row_start_idx = 0 if img_point[1] - distance < 0 else img_point[1] - distance col_start_idx = 0 if img_point[0] - distance < 0 else img_point[0] - distance # image point acts as the center of the square, where each side of square is of size # 2xdistance patch = self.__grid_map[row_start_idx : img_point[1] + distance, col_start_idx : img_point[0] + distance] obstacles = np.where(patch == True) return len(obstacles[0]) > 0 def is_valid_pose(self, point, distance=0.2): """ Checks if a given pose is "distance" away from a obstacle. Parameters ---------- point: pose in 2D space. where point[0]=pose.x and point[1]=pose.y distance: distance in meters. Returns ------- [Bool]: True if pose is valid else False """ assert len(point) == 2 img_point = self.__transform_to_image_coordinates(point) img_pixel_distance = self.__transform_distance_to_pixels(distance) # Pose is not valid if there is obstacle in the vicinity return not self.__is_obstacle_in_distance(img_point, img_pixel_distance)
5,443
Python
33.455696
113
0.553188
NVIDIA-Omniverse/IsaacSim-ros_workspaces/foxy_ws/src/navigation/isaac_ros_navigation_goal/isaac_ros_navigation_goal/set_goal.py
import rclpy from rclpy.action import ActionClient from rclpy.node import Node from nav2_msgs.action import NavigateToPose from .obstacle_map import GridMap from .goal_generators import RandomGoalGenerator, GoalReader import sys from geometry_msgs.msg import PoseWithCovarianceStamped import time class SetNavigationGoal(Node): def __init__(self): super().__init__("set_navigation_goal") self.declare_parameters( namespace="", parameters=[ ("iteration_count", 1), ("goal_generator_type", "RandomGoalGenerator"), ("action_server_name", "navigate_to_pose"), ("obstacle_search_distance_in_meters", 0.2), ("frame_id", "map"), ("map_yaml_path", None), ("goal_text_file_path", None), ("initial_pose", None), ], ) self.__goal_generator = self.__create_goal_generator() action_server_name = self.get_parameter("action_server_name").value self._action_client = ActionClient(self, NavigateToPose, action_server_name) self.MAX_ITERATION_COUNT = self.get_parameter("iteration_count").value assert self.MAX_ITERATION_COUNT > 0 self.curr_iteration_count = 1 self.__initial_goal_publisher = self.create_publisher(PoseWithCovarianceStamped, "/initialpose", 1) self.__initial_pose = self.get_parameter("initial_pose").value self.__is_initial_pose_sent = True if self.__initial_pose is None else False def __send_initial_pose(self): """ Publishes the initial pose. This function is only called once that too before sending any goal pose to the mission server. """ goal = PoseWithCovarianceStamped() goal.header.frame_id = self.get_parameter("frame_id").value goal.header.stamp = self.get_clock().now().to_msg() goal.pose.pose.position.x = self.__initial_pose[0] goal.pose.pose.position.y = self.__initial_pose[1] goal.pose.pose.position.z = self.__initial_pose[2] goal.pose.pose.orientation.x = self.__initial_pose[3] goal.pose.pose.orientation.y = self.__initial_pose[4] goal.pose.pose.orientation.z = self.__initial_pose[5] goal.pose.pose.orientation.w = self.__initial_pose[6] self.__initial_goal_publisher.publish(goal) def send_goal(self): """ Sends the goal to the action server. """ if not self.__is_initial_pose_sent: self.get_logger().info("Sending initial pose") self.__send_initial_pose() self.__is_initial_pose_sent = True # Assumption is that initial pose is set after publishing first time in this duration. # Can be changed to more sophisticated way. e.g. /particlecloud topic has no msg until # the initial pose is set. time.sleep(10) self.get_logger().info("Sending first goal") self._action_client.wait_for_server() goal_msg = self.__get_goal() if goal_msg is None: rclpy.shutdown() sys.exit(1) self._send_goal_future = self._action_client.send_goal_async( goal_msg, feedback_callback=self.__feedback_callback ) self._send_goal_future.add_done_callback(self.__goal_response_callback) def __goal_response_callback(self, future): """ Callback function to check the response(goal accpted/rejected) from the server.\n If the Goal is rejected it stops the execution for now.(We can change to resample the pose if rejected.) """ goal_handle = future.result() if not goal_handle.accepted: self.get_logger().info("Goal rejected :(") rclpy.shutdown() return self.get_logger().info("Goal accepted :)") self._get_result_future = goal_handle.get_result_async() self._get_result_future.add_done_callback(self.__get_result_callback) def __get_goal(self): """ Get the next goal from the goal generator. Returns ------- [NavigateToPose][goal] or None if the next goal couldn't be generated. """ goal_msg = NavigateToPose.Goal() goal_msg.pose.header.frame_id = self.get_parameter("frame_id").value goal_msg.pose.header.stamp = self.get_clock().now().to_msg() pose = self.__goal_generator.generate_goal() # couldn't sample a pose which is not close to obstacles. Rare but might happen in dense maps. if pose is None: self.get_logger().error( "Could not generate next goal. Returning. Possible reasons for this error could be:" ) self.get_logger().error( "1. If you are using GoalReader then please make sure iteration count <= number of goals avaiable in file." ) self.get_logger().error( "2. If RandomGoalGenerator is being used then it was not able to sample a pose which is given distance away from the obstacles." ) return self.get_logger().info("Generated goal pose: {0}".format(pose)) goal_msg.pose.pose.position.x = pose[0] goal_msg.pose.pose.position.y = pose[1] goal_msg.pose.pose.orientation.x = pose[2] goal_msg.pose.pose.orientation.y = pose[3] goal_msg.pose.pose.orientation.z = pose[4] goal_msg.pose.pose.orientation.w = pose[5] return goal_msg def __get_result_callback(self, future): """ Callback to check result.\n It calls the send_goal() function in case current goal sent count < required goals count. """ # Nav2 is sending empty message for success as well as for failure. result = future.result().result self.get_logger().info("Result: {0}".format(result.result)) if self.curr_iteration_count < self.MAX_ITERATION_COUNT: self.curr_iteration_count += 1 self.send_goal() else: rclpy.shutdown() def __feedback_callback(self, feedback_msg): """ This is feeback callback. We can compare/compute/log while the robot is on its way to goal. """ # self.get_logger().info('FEEDBACK: {}\n'.format(feedback_msg)) pass def __create_goal_generator(self): """ Creates the GoalGenerator object based on the specified ros param value. """ goal_generator_type = self.get_parameter("goal_generator_type").value goal_generator = None if goal_generator_type == "RandomGoalGenerator": if self.get_parameter("map_yaml_path").value is None: self.get_logger().info("Yaml file path is not given. Returning..") sys.exit(1) yaml_file_path = self.get_parameter("map_yaml_path").value grid_map = GridMap(yaml_file_path) obstacle_search_distance_in_meters = self.get_parameter("obstacle_search_distance_in_meters").value assert obstacle_search_distance_in_meters > 0 goal_generator = RandomGoalGenerator(grid_map, obstacle_search_distance_in_meters) elif goal_generator_type == "GoalReader": if self.get_parameter("goal_text_file_path").value is None: self.get_logger().info("Goal text file path is not given. Returning..") sys.exit(1) file_path = self.get_parameter("goal_text_file_path").value goal_generator = GoalReader(file_path) else: self.get_logger().info("Invalid goal generator specified. Returning...") sys.exit(1) return goal_generator def main(): rclpy.init() set_goal = SetNavigationGoal() result = set_goal.send_goal() rclpy.spin(set_goal) if __name__ == "__main__": main()
7,971
Python
37.887805
144
0.605696
NVIDIA-Omniverse/IsaacSim-ros_workspaces/foxy_ws/src/navigation/isaac_ros_navigation_goal/isaac_ros_navigation_goal/goal_generators/goal_reader.py
from .goal_generator import GoalGenerator class GoalReader(GoalGenerator): def __init__(self, file_path): self.__file_path = file_path self.__generator = self.__get_goal() def generate_goal(self, max_num_of_trials=1000): try: return next(self.__generator) except StopIteration: return def __get_goal(self): for row in open(self.__file_path, "r"): yield list(map(float, row.strip().split(" ")))
486
Python
26.055554
58
0.584362
NVIDIA-Omniverse/IsaacSim-ros_workspaces/foxy_ws/src/navigation/isaac_ros_navigation_goal/isaac_ros_navigation_goal/goal_generators/random_goal_generator.py
import numpy as np from .goal_generator import GoalGenerator class RandomGoalGenerator(GoalGenerator): """ Random goal generator. parameters ---------- grid_map: GridMap Object distance: distance in meters to check vicinity for obstacles. """ def __init__(self, grid_map, distance): self.__grid_map = grid_map self.__distance = distance def generate_goal(self, max_num_of_trials=1000): """ Generate the goal. Parameters ---------- max_num_of_trials: maximum number of pose generations when generated pose keep is not a valid pose. Returns ------- [List][Pose]: Pose in format [pose.x,pose.y,orientaion.x,orientaion.y,orientaion.z,orientaion.w] """ range_ = self.__grid_map.get_range() trial_count = 0 while trial_count < max_num_of_trials: x = np.random.uniform(range_[0][0], range_[0][1]) y = np.random.uniform(range_[1][0], range_[1][1]) orient_x = np.random.uniform(0, 1) orient_y = np.random.uniform(0, 1) orient_z = np.random.uniform(0, 1) orient_w = np.random.uniform(0, 1) if self.__grid_map.is_valid_pose([x, y], self.__distance): goal = [x, y, orient_x, orient_y, orient_z, orient_w] return goal trial_count += 1
1,405
Python
30.954545
107
0.560854
NVIDIA-Omniverse/IsaacSim-ros_workspaces/foxy_ws/src/navigation/isaac_ros_navigation_goal/isaac_ros_navigation_goal/goal_generators/__init__.py
from .random_goal_generator import RandomGoalGenerator from .goal_reader import GoalReader
91
Python
29.666657
54
0.857143
NVIDIA-Omniverse/IsaacSim-ros_workspaces/foxy_ws/src/navigation/isaac_ros_navigation_goal/isaac_ros_navigation_goal/goal_generators/goal_generator.py
from abc import ABC, abstractmethod class GoalGenerator(ABC): """ Parent class for the Goal generators """ def __init__(self): pass @abstractmethod def generate_goal(self, max_num_of_trials=2000): """ Generate the goal. Parameters ---------- max_num_of_trials: maximum number of pose generations when generated pose keep is not a valid pose. Returns ------- [List][Pose]: Pose in format [pose.x,pose.y,orientaion.x,orientaion.y,orientaion.z,orientaion.w] """ pass
582
Python
21.423076
107
0.580756
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control/src/cortex/math/state.cpp
/* * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved. * * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. */ #include "cortex/math/state.h" #include <Eigen/Core> #include <ros/assert.h> namespace cortex { namespace math { State::State(int n) : state(Eigen::VectorXd::Zero(2 * n)) {} State::State(const Eigen::VectorXd &x, const Eigen::VectorXd &xd) : State(x.size()) { ROS_ASSERT(x.size() == xd.size()); pos() = x; vel() = xd; } } // namespace math } // namespace cortex
820
C++
28.321428
85
0.717073
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control/src/cortex/math/state.h
/* * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved. * * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. */ #pragma once #include <Eigen/Core> namespace cortex { namespace math { // Represents a vector s = (x, xd) \in \R^{2d} where d is the space dim. class State { public: State() = delete; explicit State(int n); // Initialize to the zero state (0,0)\in\R^n X \R^n. State(const Eigen::VectorXd &x, const Eigen::VectorXd &xd); Eigen::Ref<Eigen::VectorXd> pos() { return state.head(dim()); } Eigen::Ref<Eigen::VectorXd> vel() { return state.tail(dim()); } Eigen::Ref<Eigen::VectorXd> vector() { return state; } Eigen::Ref<const Eigen::VectorXd> pos() const { return state.head(dim()); } Eigen::Ref<const Eigen::VectorXd> vel() const { return state.tail(dim()); } Eigen::Ref<const Eigen::VectorXd> vector() const { return state; } int dim() const { return state.size() / 2; } // Returns one integration step forward. // // Equations: // x_next = x + dt xd // xd_next = xd + dt xdd State Step(double dt, const Eigen::VectorXd &xdd) { return State(pos() + dt * vel(), vel() + dt * xdd); } private: Eigen::VectorXd state; }; } // namespace math } // namespace cortex
1,558
C
30.816326
78
0.675866
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control/src/cortex/math/interpolation/interpolator.h
/* * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved. * * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. */ // Pure virtual base class interface for an interpolator. #pragma once #include <iostream> #include <list> #include <sstream> #include <string> #include <ros/assert.h> #include "cortex/math/interpolation/pos_vel_acc.h" #include "cortex/math/interpolation/time_scaled_interpolator.h" namespace cortex { namespace math { // Represents a generic interpolator interface giving an API of the form: // // 1. Add p = (q, qd, qdd) point at time t: // // interp.AddPt(t, p); // // 2. Evaluate at a given time t: // // auto p = interp.Eval(t); // auto p = interp(t); // // Deriving classes need to implement the pure virtual functions // // AddPt() and Eval() // // Deriving classes might add additional restrictions, such as monotonicity of add // times t (IncrementalInterpolator). template <class vec_t> class Interpolator { public: typedef vec_t VectorXx; virtual bool AddPt(double t, const PosVelAcc<VectorXx>& p, std::string* error_str = nullptr) = 0; virtual bool Eval(double t, PosVelAcc<VectorXx>& ret, std::string* error_str) const = 0; // Asserting version. PosVelAccXd Eval(double t) const { std::string error_str; PosVelAccXd p; ROS_ASSERT_MSG(Eval(t, p, &error_str), "%s", error_str.c_str()); return p; } Eigen::VectorXd operator()(double t) const { return Eval(t).x; } }; } // namespace math } // namespace cortex
1,825
C
26.253731
99
0.706849
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control/src/cortex/math/interpolation/cubic_position_interpolator.cpp
/* * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved. * * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. */ #include "cortex/math/interpolation/cubic_position_interpolator.h" #include <fstream> #include <string> #include <vector> #include <Eigen/Dense> namespace cortex { namespace math { // Returns true iff t \in [0,1]. inline bool InZeroOne(double t) { return 0 <= t && t <= 1; } // clang-format off #define CUBIC_POSITION_INTERP_MATRIX \ 0, 0, 0, 1, \ 0, 0, 1, 0, \ 0, 2, 0, 0, \ 1, 1, 1, 1 // clang-format on CubicPositionInterpolator1d::CubicPositionInterpolator1d(const PosVelAcc1d& p0, const PosVelAcc1d& p1, bool validate_interpolation_evals) : validate_interpolation_evals_(validate_interpolation_evals), A_((Eigen::MatrixXd(4, 4) << CUBIC_POSITION_INTERP_MATRIX).finished()), b_((Eigen::VectorXd(4) << p0.x, p0.xd, p0.xdd, p1.x).finished()), coeffs_(A_.colPivHouseholderQr().solve(b_)) {} bool CubicPositionInterpolator1d::Eval(double t, PosVelAcc1d& ret, std::string* error_str) const { if (validate_interpolation_evals_ && !InZeroOne(t)) { std::stringstream ss; ss << "t not in [0,1] (t = " << t << "). "; if (error_str) { *error_str += ss.str(); } return false; } auto a3 = coeffs_[0]; auto a2 = coeffs_[1]; auto a1 = coeffs_[2]; auto a0 = coeffs_[3]; std::vector<double> t_powers(4, 1); for (size_t i = 1; i < t_powers.size(); ++i) { t_powers[i] = t * t_powers[i - 1]; } auto x = a3 * t_powers[3] + a2 * t_powers[2] + a1 * t_powers[1] + a0; auto xd = 3. * a3 * t_powers[2] + 2. * a2 * t_powers[1] + a1; auto xdd = 6. * a3 * t_powers[1] + 2. * a2; ret = PosVelAcc1d(x, xd, xdd); return true; } } // namespace math } // namespace cortex
2,211
C++
30.6
98
0.61194
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control/src/cortex/math/interpolation/cubic_position_interpolator.h
/* * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved. * * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. */ #pragma once #include <iostream> #include <list> #include <sstream> #include <string> #include <Eigen/Core> #include <ros/assert.h> #include "cortex/math/interpolation/pos_vel_acc.h" #include "cortex/math/interpolation/time_scaled_interpolator.h" #include "cortex/math/interpolation/trajectories.h" namespace cortex { namespace math { // One-dimensional cubic interpolating polynomial. Interpolates between (x0, // xd0) and (x1, xd1). class CubicPositionInterpolator1d { public: typedef double VectorXx; CubicPositionInterpolator1d() {} // Creates a cubic spline that interpolates between p0 and p1 at t = 0 and // 1, respectively. CubicPositionInterpolator1d(const PosVelAcc1d& p0, const PosVelAcc1d& p1, bool validate_interpolation_evals = false); // Evaluate the polynomial at t. If validate_interpolating_evals is set to // true, enforces that the evaluations are only interpolating, i.e. t is in // [0, 1]; fails if not. The interpolated value is returned in the ret return // parameter. On failure, returns false and sets the error string if it's // provided. bool Eval(double t, PosVelAcc1d& ret, std::string* error_str = nullptr) const; // This verion asserts on error. PosVelAcc1d Eval(double t) const { PosVelAcc1d ret; std::string error_str; ROS_ASSERT_MSG(Eval(t, ret, &error_str), "%s", error_str.c_str()); return ret; } double operator()(double t) const { auto p = Eval(t); return p.x; } // Accessor. const Eigen::VectorXd& coeffs() const { return coeffs_; } protected: bool validate_interpolation_evals_; const Eigen::MatrixXd A_; const Eigen::VectorXd b_; const Eigen::VectorXd coeffs_; }; template <class vec_t> MultiDimInterp<CubicPositionInterpolator1d, vec_t> CubicInterpolator( const PosVelAcc<vec_t>& p0, const PosVelAcc<vec_t>& p1, bool validate_interpolation_evals = false) { return MultiDimInterp<CubicPositionInterpolator1d, vec_t>(p0, p1, validate_interpolation_evals); } typedef MultiDimInterp<CubicPositionInterpolator1d, Eigen::VectorXd> CubicPositionInterpolatorXd; } // namespace math } // namespace cortex
2,646
C
30.511904
98
0.721088
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control/src/cortex/math/interpolation/incremental_interpolator.h
/* * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved. * * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. */ #pragma once #include <iostream> #include <list> #include <sstream> #include <string> #include <Eigen/Core> #include "cortex/math/interpolation/interpolator.h" #include "cortex/math/interpolation/pos_vel_acc.h" #include "cortex/math/interpolation/quintic_interpolator.h" // Note: an incremental interpolator is one that leverages monotonicity // assumptions on the evaluation times to continually grow the interpolator head // while removing stale segments from the tail. namespace cortex { namespace math { // Enables the interpolation of a sequence of (x, xd, xdd) way-points using // quintic polynomials for each region between points. Evaluations and adding // of new way points can be interleaved, although evaluations are expected to // be with monotonically increasing time. There's a notion of a "delay_buffer" // which enables points to be received and added with wall-clock time // simultaneous with wall-clock evaluations by evaluating at a fixed time // interval in the past. The delay buffer is the number of intervals in the past // to set that fixed time offset to. // // When interpolating between (x, xd, xdd) way points at a non unity dt // (i.e. each way point is dt seconds apart), we need to scale the xd and // xdd by dt and dt^2, respectively, when adding them and undo that scaling // when evaluating. Intuition: if dt is small, it's moving fast from one // point to the next. If we then interpolate pretending that it takes a // full second to get from one to the next, it's moving and accelerating // much much slower, so we need to scale by dt and dt^2. // // This can be more rigorously derived by looking how time dilation scalars // propagate through the derivative expressions. class IncrementalInterpolator : public Interpolator<Eigen::VectorXd> { public: explicit IncrementalInterpolator(bool prune_history = true, bool validate_interpolation_evals = true); // Add a new waypoint, the time should be the current cycle time. Evals will // be offset into the past by delay_buffer number of intervals to that // incoming points can be added with the same time stamp as active // evaluations. bool AddPt(double t, const PosVelAccXd& p, std::string* error_str = nullptr) override; // Evaluates the interpolator at the given time. It uses a delay buffer to // offset the evaluations into the past so that points can be added at the // same time as evaluations and evaluations can be made after the latest // point safely as long as they're within the delay buffer (see description // above). // // This delay buffer functionality can also be implemented manually simply by // setting the delay_buffer to zero no construction and manually offsetting // evaluation points into the past. // // It's assumed the eval points are monotonically increasing. Fails if not. // the evaluation point is returned as ret. Returns true if successful and // false otherwise. bool Eval(double t, PosVelAccXd& ret, std::string* error_str = nullptr) const override; using Interpolator<Eigen::VectorXd>::Eval; int num_intervals() const { return segment_interpolators_.size(); } bool IsReady(double t) const; protected: mutable std::list<TimeScaledInterpolatorXd> segment_interpolators_; bool is_first_; double prev_add_t_; PosVelAccXd prev_add_p_; bool validate_interpolation_evals_; bool prune_history_; }; typedef IncrementalInterpolator SequentialQuinticInterpolator; } // namespace math } // namespace cortex
4,004
C
39.867347
80
0.747502
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control/src/cortex/math/interpolation/incremental_interpolator.cpp
/* * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved. * * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. */ #include "cortex/math/interpolation/incremental_interpolator.h" #include <fstream> #include <string> #include "cortex/math/interpolation/quintic_interpolator.h" namespace cortex { namespace math { IncrementalInterpolator::IncrementalInterpolator(bool prune_history, bool validate_interpolation_evals) : is_first_(true), validate_interpolation_evals_(validate_interpolation_evals), prune_history_(prune_history) {} bool IncrementalInterpolator::AddPt(double t, const PosVelAccXd& p, std::string* error_str) { if (is_first_) { prev_add_t_ = t; prev_add_p_ = p; is_first_ = false; return true; } if (t <= prev_add_t_) { if (error_str) { std::stringstream ss; ss << "Add times nonmonotonic -- t = " << t << " vs prev t = " << prev_add_t_; *error_str += ss.str(); } return false; } segment_interpolators_.push_back( TimeScaledInterpolatorXd(prev_add_t_, prev_add_p_, t, p, validate_interpolation_evals_)); prev_add_t_ = t; prev_add_p_ = p; return true; } bool IncrementalInterpolator::Eval(double t, PosVelAccXd& ret, std::string* error_str) const { if (segment_interpolators_.size() == 0) { if (error_str) { *error_str += "No interpolators found."; } return false; } auto earliest_time = segment_interpolators_.front().t0(); auto latest_time = segment_interpolators_.back().t1(); if (validate_interpolation_evals_ && t < earliest_time) { if (error_str) { std::stringstream ss; ss << "Nonmonotonic evals -- t = " << t << ", earliest time segment starts with t0 = " << earliest_time; *error_str += ss.str(); } return false; } if (validate_interpolation_evals_ && t > latest_time) { if (error_str) { std::stringstream ss; ss << "Future eval (overflow) -- t = " << t << ", latest time segment ends with t1 = " << latest_time; *error_str += ss.str(); } return false; } // Find the first segment whose upper time bound is greater than the curren // time. Since the segments are contiguous and monotonically increasing, we're // guaranteed that t \in [t0, t1] of this segment. TimeScaledInterpolatorXd* active_interpolator = nullptr; for (auto it = segment_interpolators_.begin(); it != segment_interpolators_.end();) { if (t <= it->t1()) { active_interpolator = &(*it); break; } else { if (prune_history_) { it = segment_interpolators_.erase(it); } else { ++it; } } } if (!active_interpolator && !validate_interpolation_evals_) { active_interpolator = &segment_interpolators_.back(); } if (active_interpolator) { return active_interpolator->Eval(t, ret, error_str); } else { if (error_str) { std::stringstream ss; ss << "Eval time in the future -- t = " << t << " vs latest segment time = " << latest_time; *error_str += ss.str(); } return false; } } bool IncrementalInterpolator::IsReady(double t) const { return (segment_interpolators_.size() > 0) && (t >= segment_interpolators_.front().t0()); } } // namespace math } // namespace cortex
3,657
C++
30
98
0.631939
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control/src/cortex/math/interpolation/pos_vel_acc.h
/* * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved. * * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. */ #pragma once #include <iostream> #include <vector> #include <Eigen/Core> #include <ros/assert.h> namespace cortex { namespace math { struct PosVelAcc1d; // Represents a triple of simultaneous position, velocity, and acceleration. template <class vec_t> struct PosVelAcc { vec_t x; vec_t xd; vec_t xdd; int dim() const { return x.size(); } PosVelAcc<vec_t> Scale(double dt) const { return PosVelAcc<vec_t>(x, dt * xd, (dt * dt) * xdd); } PosVelAcc<vec_t> Unscale(double dt) const { return PosVelAcc<vec_t>(x, xd / dt, xdd / (dt * dt)); } PosVelAcc() {} // Initialize to all zeros with a particular dimensionality. explicit PosVelAcc(int d) { x = vec_t::Zero(d); xd = vec_t::Zero(d); xdd = vec_t::Zero(d); } // Initialize to specific (x, xd, xdd). Each vector much be the same // dimension, otherwise assert. PosVelAcc(const vec_t& x, const vec_t& xd, const vec_t& xdd); // Join a collection of one-dimensional PosVelAcc1d's into a single object of // this type. Aggregates the individual dimensions into vectors, x, xd, xdd. static PosVelAcc Join(const std::vector<PosVelAcc1d>& dims); }; // One dimensional variant of pos, vel, acc. struct PosVelAcc1d { double x; double xd; double xdd; PosVelAcc1d() {} PosVelAcc1d(double x, double xd, double xdd) : x(x), xd(xd), xdd(xdd) {} // Slice a multi-dimensional pos, vel, acc into a one-dimensional variant // containing only the specified dimension. template <class vec_t> static PosVelAcc1d Slice(const PosVelAcc<vec_t>& p, int dim) { return PosVelAcc1d(p.x[dim], p.xd[dim], p.xdd[dim]); } }; //============================================================================== // Template implementations //============================================================================== template <class vec_t> PosVelAcc<vec_t>::PosVelAcc(const vec_t& x, const vec_t& xd, const vec_t& xdd) : x(x), xd(xd), xdd(xdd) { ROS_ASSERT(x.size() == xd.size()); ROS_ASSERT(x.size() == xdd.size()); } template <class vec_t> PosVelAcc<vec_t> PosVelAcc<vec_t>::Join(const std::vector<PosVelAcc1d>& dims) { PosVelAcc<vec_t> p(dims.size()); for (size_t i = 0; i < dims.size(); ++i) { p.x[i] = dims[i].x; p.xd[i] = dims[i].xd; p.xdd[i] = dims[i].xdd; } return p; } // Add specialization for VectorXd for convenience. typedef PosVelAcc<Eigen::VectorXd> PosVelAccXd; } // namespace math } // namespace cortex inline std::ostream& operator<<(std::ostream& os, const cortex::math::PosVelAcc1d& p) { os << " x = " << p.x << ", xd = " << p.xd << ", xdd = " << p.xdd; return os; } template <class vec_t> std::ostream& operator<<(std::ostream& os, const cortex::math::PosVelAcc<vec_t>& p) { os << "x = " << p.x.transpose() << "\n"; os << "xd = " << p.xd.transpose() << "\n"; os << "xdd = " << p.xdd.transpose() << "\n"; return os; }
3,333
C
28.504425
99
0.624662
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control/src/cortex/math/interpolation/quartic_interpolator.cpp
/* * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved. * * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. */ #include "cortex/math/interpolation/quartic_interpolator.h" #include <fstream> #include <string> #include <vector> #include <Eigen/Dense> namespace cortex { namespace math { // Returns true iff t \in [0,1]. inline bool InZeroOne(double t) { return 0 <= t && t <= 1; } // clang-format off #define QUARTIC_INTERP_MATRIX \ 0, 0, 0, 0, 1, \ 0, 0, 0, 1, 0, \ 0, 0, 2, 0, 0, \ 1, 1, 1, 1, 1, \ 4, 3, 2, 1, 0 // clang-format on QuarticInterpolator1d::QuarticInterpolator1d(const PosVelAcc1d& p0, const PosVelAcc1d& p1, bool validate_interpolation_evals) : validate_interpolation_evals_(validate_interpolation_evals), A_((Eigen::MatrixXd(5, 5) << QUARTIC_INTERP_MATRIX).finished()), b_((Eigen::VectorXd(5) << p0.x, p0.xd, p0.xdd, p1.x, p1.xd).finished()), coeffs_(A_.colPivHouseholderQr().solve(b_)) {} bool QuarticInterpolator1d::Eval(double t, PosVelAcc1d& ret, std::string* error_str) const { if (validate_interpolation_evals_ && !InZeroOne(t)) { std::stringstream ss; ss << "t not in [0,1] (t = " << t << "). "; if (error_str) { *error_str += ss.str(); } return false; } auto a4 = coeffs_[0]; auto a3 = coeffs_[1]; auto a2 = coeffs_[2]; auto a1 = coeffs_[3]; auto a0 = coeffs_[4]; std::vector<double> t_powers(5, 1); for (size_t i = 1; i < t_powers.size(); ++i) { t_powers[i] = t * t_powers[i - 1]; } auto x = a4 * t_powers[4] + a3 * t_powers[3] + a2 * t_powers[2] + a1 * t_powers[1] + a0; auto xd = 4. * a4 * t_powers[3] + 3. * a3 * t_powers[2] + 2. * a2 * t_powers[1] + a1; auto xdd = 12. * a4 * t_powers[2] + 6. * a3 * t_powers[1] + 2. * a2; ret = PosVelAcc1d(x, xd, xdd); return true; } } // namespace math } // namespace cortex
2,280
C++
30.680555
92
0.603509
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control/src/cortex/math/interpolation/time_scaled_interpolator.h
/* * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved. * * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. */ #pragma once #include <iostream> #include <list> #include <sstream> #include <string> #include <Eigen/Core> #include <ros/assert.h> #include "cortex/math/interpolation/pos_vel_acc.h" #include "cortex/math/interpolation/trajectories.h" namespace cortex { namespace math { // Represents a quintic interpolator interpolating between two end points // at specific times. If validate_interpolation_evals is true, valid evals // are only those within the time range of the two end points. template <class traj_t> class TimeScaledInterpolator { public: typedef typename TimeScaledTraj<traj_t>::VectorXx VectorXx; TimeScaledInterpolator() {} TimeScaledInterpolator(double t0, const PosVelAcc<VectorXx>& p0, double t1, const PosVelAcc<VectorXx>& p1, bool validate_interpolation_evals = false) : t0_(t0), p0_(p0), t1_(t1), p1_(p1), time_range_(t1 - t0), scaled_traj_( traj_t(p0.Scale(time_range_), p1.Scale(time_range_), validate_interpolation_evals), time_range_), validate_interpolation_evals_(validate_interpolation_evals) {} bool Eval(double t, PosVelAcc<VectorXx>& ret, std::string* error_str = nullptr) const { if (validate_interpolation_evals_ && !(t0_ <= t && t <= t1_)) { if (error_str) { std::stringstream ss; ss << "t = " << t << " outside valid range [" << t0_ << ", " << t1_ << "]"; *error_str += ss.str(); } return false; } return scaled_traj_.Eval((t - t0_) / time_range_, ret, error_str); } PosVelAcc<VectorXx> Eval(double t) const { std::string error_str; PosVelAcc<VectorXx> ret; ROS_ASSERT_MSG(scaled_traj_.Eval((t - t0_) / time_range_, ret, &error_str), "%s", error_str.c_str()); return ret; } // Performs a time shifted eval since the underlying trajectory starts at t0_ VectorXx operator()(double t) const { return Eval(t).x; } double t0() const { return t0_; } const PosVelAcc<VectorXx>& p0() const { return p0_; } double t1() const { return t1_; } const PosVelAcc<VectorXx>& p1() const { return p1_; } protected: double t0_; PosVelAcc<VectorXx> p0_; double t1_; PosVelAcc<VectorXx> p1_; double time_range_; TimeScaledTraj<traj_t> scaled_traj_; bool validate_interpolation_evals_; }; } // namespace math } // namespace cortex
2,908
C
30.967033
95
0.644085
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control/src/cortex/math/interpolation/trajectories.h
/* * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved. * * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. */ #pragma once #include <iostream> #include <list> #include <sstream> #include <string> #include <vector> #include <Eigen/Core> #include <ros/assert.h> #include "cortex/math/interpolation/pos_vel_acc.h" namespace cortex { namespace math { // Represents a multidimensional trajectory as a collection of 1D trajectories. template <class traj1d_t, class vec_t> class MultiDimTraj { public: typedef vec_t VectorXx; MultiDimTraj() {} explicit MultiDimTraj(const std::vector<traj1d_t>& trajectories) : trajectories_(trajectories) {} bool Eval(double t, PosVelAcc<vec_t>& ret, std::string* error_str) const; // This verion asserts on error. PosVelAccXd Eval(double t) const { PosVelAccXd ret; std::string error_str; ROS_ASSERT_MSG(Eval(t, ret, &error_str), "%s", error_str.c_str()); return ret; } int dim() const { return trajectories_.size(); } protected: std::vector<traj1d_t> trajectories_; }; // Creates a vector of 1D interpolators for each dimension of the given // PosVelAcc end-point objects. If validate_interpolation_evals is true, the // resulting interpolators will validate that the query points are between 0 // and 1. template <class interp1d_t, class vec_t> std::vector<interp1d_t> MakeDimInterps(const PosVelAcc<vec_t>& p0, const PosVelAcc<vec_t>& p1, bool validate_interpolation_evals) { ROS_ASSERT(p0.dim() == p1.dim()); std::vector<interp1d_t> trajectories; for (int i = 0; i < p0.dim(); ++i) { trajectories.push_back(interp1d_t( PosVelAcc1d::Slice(p0, i), PosVelAcc1d::Slice(p1, i), validate_interpolation_evals)); } return trajectories; } // Represents a multi-dimensional interpolator interpolating between a pair of // PosVelAcc points. template <class interp1d_t, class vec_t> class MultiDimInterp : public MultiDimTraj<interp1d_t, vec_t> { public: typedef vec_t VectorXx; MultiDimInterp() {} MultiDimInterp(const PosVelAcc<vec_t>& p0, const PosVelAcc<vec_t>& p1, bool validate_interpolation_evals = false) : MultiDimTraj<interp1d_t, vec_t>( MakeDimInterps<interp1d_t, vec_t>(p0, p1, validate_interpolation_evals)) {} protected: }; // Represents a trajectory whose time is scaled by some scaling factor. The // semantics of scaling is that if the original time interval were [0,1] the // new time interval would be [0, scalar], i.e. the original trajectory on // [0,1] would be stretched to fit across the entire interval [0, scalar]. template <class traj_t> class TimeScaledTraj { public: typedef typename traj_t::VectorXx VectorXx; TimeScaledTraj() {} TimeScaledTraj(const traj_t& traj, double scalar) : traj_(traj), scalar_(scalar) {} VectorXx operator()(double t) const { return Eval(t).x; } PosVelAcc<typename traj_t::VectorXx> Eval(double t) const { return traj_.Eval(t).Unscale(scalar_); } bool Eval(double t, PosVelAcc<VectorXx>& ret, std::string* error_str = nullptr) const { PosVelAcc<VectorXx> scaled_ret; if (!traj_.Eval(t, scaled_ret, error_str)) { return false; } ret = scaled_ret.Unscale(scalar_); return true; } double scalar() const { return scalar_; } protected: traj_t traj_; double scalar_; }; template <class traj_t> TimeScaledTraj<traj_t> TimeScaleTraj(const traj_t& traj, double scalar) { return TimeScaledTraj<traj_t>(traj, scalar); } // traj_t should have an evaluation operator: // // vec_t operator()(double t) const // // This function performs finite-differencing to find the velocity. // traj_t should also have a type vec_t: // // typename traj_t::vec_t // template <class traj_t> typename traj_t::VectorXx CentralFdVel(const traj_t& traj, double t, double dt = 1e-5) { auto x_up = traj(t + dt / 2); auto x_down = traj(t - dt / 2); return (x_up - x_down) / dt; } template <class traj_t> typename traj_t::VectorXx FdAcc(const traj_t& traj, double t, double dt = 1e-5) { auto x = traj(t); auto x_up = traj(t + dt / 2); auto x_down = traj(t - dt / 2); return (x_up + x_down - 2 * x) / (dt * dt / 4); } // Converts a trajectory into a velocity trajectory using finite-differencing. template <class traj_t> class FdVelTraj { public: typedef typename traj_t::VectorXx VectorXx; explicit FdVelTraj(const traj_t& traj, double dt = 1e-5) : traj_(traj), dt_(dt) {} VectorXx operator()(double t) const { return CentralFdVel(traj_, t, dt_); } protected: traj_t traj_; double dt_; }; template <class traj_t> FdVelTraj<traj_t> ToFdVelTraj(const traj_t& traj) { return FdVelTraj<traj_t>(traj); } // Converts a trajectory into an acceleration trajectory using // finite-differencing. template <class traj_t> class FdAccTraj { public: typedef typename traj_t::VectorXx VectorXx; explicit FdAccTraj(const traj_t& traj, double dt = 1e-5) : traj_(traj), dt_(dt) {} VectorXx operator()(double t) const { return FdAcc(traj_, t, dt_); } protected: traj_t traj_; double dt_; }; template <class traj_t> FdAccTraj<traj_t> ToFdAccTraj(const traj_t& traj) { return FdAccTraj<traj_t>(traj); } // Represents f(t) = c1 * sin(c2 * (t - t0)) + c3 // // Derivatives: // f' = c1 * c2 * cos(c2 * (t - t0)) // f'' = -c1 * c2^2 * sin(c2 * (t - t0)) class SinusoidalTraj { public: typedef double VectorXx; SinusoidalTraj(double c1, double c2, double c3, double t0) : c1_(c1), c2_(c2), c3_(c3), t0_(t0) {} PosVelAcc1d Eval(double t) const { std::string error_str; PosVelAcc1d ret; ROS_ASSERT_MSG(Eval(t, ret, &error_str), "%s", error_str.c_str()); return ret; } bool Eval(double t, PosVelAcc1d& ret, std::string* error_str = nullptr) const { // Suppress warnings that "error_str" is never written to. (void)error_str; auto t_affine = c2_ * (t - t0_); auto x = c1_ * sin(t_affine) + c3_; auto xd = c1_ * c2_ * cos(t_affine); auto xdd = -c1_ * c2_ * c2_ * sin(t_affine); ret = PosVelAcc1d(x, xd, xdd); return true; } double operator()(double t) const { return Eval(t).x; } protected: double c1_, c2_, c3_, t0_; }; //============================================================================== // Template implementations //============================================================================== template <class traj1d_t, class vec_t> bool MultiDimTraj<traj1d_t, vec_t>::Eval(double t, PosVelAcc<vec_t>& ret, std::string* error_str) const { std::vector<PosVelAcc1d> dim_evals(dim()); for (size_t i = 0; i < trajectories_.size(); ++i) { if (!trajectories_[i].Eval(t, dim_evals[i], error_str)) { return false; } } ret = PosVelAcc<vec_t>::Join(dim_evals); return true; } } // namespace math } // namespace cortex
7,290
C
28.518219
100
0.64513
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control/src/cortex/math/interpolation/smoothing_incremental_interpolator.h
/* * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved. * * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. */ // Simple and generic smoothing incremental interpolator that creates each new // polynomial segment between the latest evaluated point (the point sent to // control) and the incoming point. This adds a level of robustness to noise // governed by the size of the eval shift window. #pragma once #include <iostream> #include <list> #include <sstream> #include <string> #include "cortex/math/interpolation/interpolator.h" #include "cortex/math/interpolation/pos_vel_acc.h" #include "cortex/math/interpolation/time_scaled_interpolator.h" namespace cortex { namespace math { template <class interp_t> class SmoothingIncrementalInterpolator : public Interpolator<typename interp_t::VectorXx> { public: SmoothingIncrementalInterpolator() : is_first_(true), is_ready_(false) {} bool AddPt(double t, const PosVelAcc<typename interp_t::VectorXx>& p, std::string* error_str = nullptr) override { if (is_first_) { prev_eval_t_ = t; prev_eval_p_ = p; is_first_ = false; return true; } is_ready_ = true; if (t <= prev_eval_t_) { if (error_str) { std::stringstream ss; ss << "Add time must be beyond the last eval time = " << t << " vs last eval t = " << prev_eval_t_; *error_str += ss.str(); } return false; } interpolator_ = TimeScaledInterpolator<interp_t>(prev_eval_t_, prev_eval_p_, t, p); return true; } // Note: only adds to the error string if there's an error. Typically string // operations aren't real time safe, but in this case we'd be bailing out. bool Eval(double t, PosVelAcc<typename interp_t::VectorXx>& ret, std::string* error_str) const override { if (!IsReady(t)) { if (error_str) { *error_str += "Smoothing increment interpolator not ready. Must see at least two " "points before evaluating."; } return false; } if (t < interpolator_.t0()) { if (error_str) { std::stringstream ss; ss << "Nonmonotonic evals -- t = " << t << ", last eval was at " << interpolator_.t0(); *error_str += ss.str(); } return false; } if (t > interpolator_.t1()) { // TODO(roflaherty): Convert this over to a version that extrapolates with zero // acceleration. Include a jitter buffer (only extrapolate so far). // // For now, though, this is unsupported and it just errors. if (error_str) { std::stringstream ss; ss << "Future eval requested. Currently unsupported. Expects eval " << "monotonicity -- t = " << t << ", last eval time = " << interpolator_.t1(); *error_str += ss.str(); } return false; } if (!interpolator_.Eval(t, ret, error_str)) { return false; } prev_eval_t_ = t; prev_eval_p_ = ret; return true; } using Interpolator<typename interp_t::VectorXx>::Eval; // Returns true iff the interpolator was created as least enough time in the // past so the shifted evaluation time falls within the valid range of the // interpolator. // // Note that once the interpolator is ready (has return ready once), since // new interpolators are always created to be lower bounded at the shifted // interpolation eval time, and eval times are always monotonically // increasing, it will always be ready (always return true). bool IsReady(double t) const { return is_ready_ && (t >= interpolator_.t0()); } protected: TimeScaledInterpolator<interp_t> interpolator_; bool is_first_; bool is_ready_; mutable double prev_eval_t_; mutable PosVelAccXd prev_eval_p_; }; } // namespace math } // namespace cortex
4,172
C
31.858267
95
0.649089
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control/src/cortex/control/rmpflow_commanded_joints_listener.cpp
/* Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. * * NVIDIA CORPORATION and its licensors retain all intellectual property and proprietary rights in * and to this software, related documentation and any modifications thereto. Any use, * reproduction, disclosure or distribution of this software and related documentation without an * express license agreement from NVIDIA CORPORATION is strictly prohibited. */ #include "cortex/control/rmpflow_commanded_joints_listener.h" namespace cortex { namespace control { RmpflowCommandedJointsListener::RmpflowCommandedJointsListener( const std::string& rmpflow_commands_topic, const std::string& joint_state_topic) : rmpflow_commands_listener_(rmpflow_commands_topic, 1), joint_state_listener_(std::make_shared<util::JointStateListener>()) { joint_state_listener_->Init(joint_state_topic); rmpflow_commands_listener_.RegisterCallback([&](const auto& msg) { std::lock_guard<std::mutex> guard(mutex_); joint_state_listener_->SetRequiredJoints(msg.names); }); } bool RmpflowCommandedJointsListener::IsAvailable() const { std::lock_guard<std::mutex> guard(mutex_); return is_set_ && joint_state_listener_->is_available(); } void RmpflowCommandedJointsListener::WaitUntilAvailable(double poll_rate) const { ros::Rate rate(poll_rate); while (ros::ok() && !IsAvailable()) { rate.sleep(); } } const std::shared_ptr<util::JointStateListener> RmpflowCommandedJointsListener::joint_state_listener() const { return joint_state_listener_; } } // namespace control } // namespace cortex
1,597
C++
34.51111
98
0.751409
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control/src/cortex/control/command_stream_interpolator.h
/** * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved. * * NVIDIA CORPORATION and its licensors retain all intellectual property and proprietary rights in * and to this software, related documentation and any modifications thereto. Any use, * reproduction, disclosure or distribution of this software and related documentation without an * express license agreement from NVIDIA CORPORATION is strictly prohibited. */ #pragma once #include <atomic> #include <memory> #include <mutex> #include <thread> #include <vector> #include <cortex_control/JointPosVelAccCommand.h> #include <ros/ros.h> #include <ros/time.h> #include "cortex/math/interpolation/interpolator.h" namespace cortex { namespace control { enum class ControllerState { // The controller hasn't started yet so we should ignore any incoming commands so we don't start // processing state changes prematurely. StartingController = 0, // The controller's eval calls have started. We need to wait on the backend to start. (The backend // may already be started, in which case it'll immediately transition once the next incoming // command is received). WaitingOnBackend, // We need to sync the backend with the current state of the robot by suppressing it briefly. // Suppression automatically sets the backend to latest measured state from the robot. We remain // in this state until we've detected we're no longer receiving messages from the backend. SyncingBackend, // After we've detected the backend suppression has been successful, we stop suppressing and // transition to initializing the interpolator. The next incoming command will be used to // initialize the interpolator. The NextCommand() interface can be used to blend between the // measured state of the robot and the interpolated command for a blending duration specified on // initialization. InitializingInterpolator, // Once the interpolator is initialized, we're operation and running as expected. Operational }; } // namespace control } // namespace cortex namespace cortex { namespace control { // Enables usage of the following form: // // auto suppressor = std::make_shared<cortex::control::CommandSuppressor>(topic, rate_hz); // suppressor->StartSuppression(); // ros::Duration(2.).sleep(); // suppressor->StopSuppression(); // // Internally, it constantly sends suppression messages at the specified rate and switches from // sending std_msgs::String("1") for suppression to st_msgs::String("0") when not suppressing. class CommandSuppressor { public: static std::string default_topic; static double default_rate_hz; // Defaults topic to default_topic and the publication rate to default_rate_hz. CommandSuppressor() : CommandSuppressor(default_topic, default_rate_hz) {} // Initialize to publish on the specified topic at the specified rate. Constantly publishes in a // separate thread. CommandSuppressor(const std::string& topic, double rate_hz); ~CommandSuppressor(); void StartSuppressing() { is_suppressing_ = true; } void StopSuppressing() { is_suppressing_ = false; } protected: void Run(); std::atomic_bool is_suppressing_; // True when suppressing. std::atomic_bool is_running_; // Set to false to stop the thread. std::string topic_; // Topic it'll publish on. double rate_hz_; // Rate at which it'll publish. ros::Publisher suppression_pub_; // The publisher itself. std::thread run_thread_; // Thread running the constant publication stream. }; // Interpolator receiving a stream of cortex commands and reconstructing the integral curve they // describe using a quintic interpolator. It's assumed that Eval() is called at a regular control // rate; the eval times are used as a clock for the system. class CommandStreamInterpolator { public: static const double default_blending_duration; static const double default_backend_timeout; static const double default_time_between_interp_pubs; // A command is a commanded position plus return information on the availability from the Eval() // method. This enables the following syntax // // auto command = stream_interpolator->Eval(...); // if (command) { // Send(command); // } // // There's a Command::Unavailable() static convenince method for retrieving a generic unavailable // command. struct Command { bool is_available; Eigen::VectorXd commanded_position; Command(const Eigen::VectorXd& commanded_position) : is_available(true), commanded_position(commanded_position) {} Command() : is_available(false) {} // Enables checking boolean truth value of the command to see whether // or not it's available. operator bool() { return is_available; } static Command Unavailable() { return Command(); } }; // By default doesn't use the smoothing interpolator. bool Init(const ros::Duration& interpolator_lookup_delay_buffer, const std::string& cortex_command_topic, ros::Duration blending_duration = ros::Duration(default_blending_duration)) { return Init(interpolator_lookup_delay_buffer, false, cortex_command_topic); } // interpolator_lookup_delay_buffer is how far in the past to look up interpolated values to // accommodate possible jitter. // // use_smoothing_interpolator: if true, uses a smoothing interpolator. Otherwise, uses a basic // quintic interpolator. // // cortex_command_topic: topic on which cortex_control::JointPosVelAccCommand messages are broadcast. // // blending_duration: how long to blend for during start up when using NextCommand(). bool Init(const ros::Duration& interpolator_lookup_delay_buffer, bool use_smoothing_interpolator, const std::string& cortex_command_topic, ros::Duration blending_duration = ros::Duration(default_blending_duration), double backend_timeout = default_backend_timeout); bool Init(const ros::Duration& interpolator_lookup_delay_buffer, bool use_smoothing_interpolator, const std::string& cortex_command_topic, const std::string& cortex_command_ack_topic, const std::string& cortex_command_suppress_topic, const std::string& cortex_command_interpolated_topic, ros::Duration blending_duration = ros::Duration(default_blending_duration), double backend_timeout = default_backend_timeout); void Start(); // Returns true if enough time has passed since the last cortex command callback to designate the // backend as having been stopped or successfully suppressed. bool IsBackendTimedOut(const ros::Time& time) const; // Evaluate the interpolator at the specified time index. Time indices should be monotonically // increasing, and calling this method steps the protocol. The Command is flagged as not available // until the protocol is in the Operational state. Command EvalAndStep(const ros::Time& time); // Internally calls EvalAndStep(time), but handles unavailable commands cleanly and smoothly // interpolates as needed to create a smooth transition to interpolation on startup. // // Automatically switches between returning q_measured when the interpolator isn't ready, blending // between q_measured and the interpolated values for a predefined duration (blend_duration, set // at initialization), and fully returning the interpolated values once blending is complete. It // is recommended that this method be used for smooth transitioning to interpolated command stream // control. // // q_measured can be smaller in length than the internal interpolated commands. In that case, // just the first q_measured.size() joint commands are used, and the returned command vector is of // length q_measured.size(). Eigen::VectorXd NextCommand(const ros::Time& time, const Eigen::VectorXd& q_measured, bool* is_interpolator_active = nullptr); private: void CommandCallback(const cortex_control::JointPosVelAccCommand& msg); // Add the command in the given command_msg to the interpolator. The command messages were meant // to describe waypoints along an integral curve, so their command_msg.t time stamp is a rectified // (jitter free) time stamp that can be used for interpolation. void AddPointToInterpolator(const cortex_control::JointPosVelAccCommand& command_msg); // Add the given interpolation point to the interolator at the given time. void AddPointToInterpolator(const ros::Time& time, const cortex::math::PosVelAccXd& point); // This method error checks on the state of the controller and shifts the time point to index the // interpolator correctly. The time input should be controller time. // // Returns the result in eval_point. // // If there's an error, returns false (and if the optional error_str is available, sets the error // string). Otherwise, return true on success. bool EvalInterpolator(const ros::Time& time, cortex::math::PosVelAccXd& eval_point, std::string* error_str = nullptr) const; // Publish the given interpolated point as a // // cortex_control::JointPosVelAccCommand // // on <joint_command_topic>/interpolated. void PublishInterpolatedPoint(const ros::Time& time, const cortex::math::PosVelAccXd& point) const; // Resets the interpolator to the initial state. One should always call this method for any event // that transitions the system back to the WaitingOnBackend state. void ResetInterpolator(); // Protects all members between calls to Eval() and CommandCallback(). std::mutex mutex_; // Time at the most recent eval. This enables syncing the clocks between Eval() and Callback(). ros::Time last_eval_time_; // Number of seconds in the past to evaluate the interpolator. The interpolator is effectively // evaluated as interpolator->Eval(<now> - <delay_buffer>), ...); There are some details about // syncing the clocks between incoming commands and controllers Eval() time, but the gist of it is // that new incoming points are added at time <now> and we evaluate at <now> - <delay_buffer>. ros::Duration interpolator_lookup_delay_buffer_; // Time of the incoming command when the interpolator was initialized (this is actually the second // point in the interpolator -- we actually step that back by buffer delay and interpolate from // the current position to this initial incoming command). ros::Time eval_time_at_interpolator_start_; ros::Time command_time_at_interpolator_start_; ros::Duration control_time_offset_from_now_; // The underlying quintic interpolator. std::shared_ptr<cortex::math::Interpolator<Eigen::VectorXd>> interp_; // Current state of the stream interpolator. This orchestrates the sync protocol with the cortex // commander. ControllerState state_; // The time stamp of the Eval() call when the latest incoming command was received. ros::Time eval_time_at_last_callback_; // ROS publishers and subscribers. ros::Subscriber cortex_command_sub_; ros::Publisher interpolated_command_pub_; ros::Publisher cortex_command_time_pub_; cortex_control::JointPosVelAccCommand latest_command_msg_; // A command suppressor used during the backend sync to sync the backend with the measured // joint states. std::shared_ptr<CommandSuppressor> command_suppressor_; // If true, uses an auto smoothing interpolator when ResetInterpolator() is called. bool use_smoothing_interpolator_; // These three members are used to coordinate blending during ros::Time blending_start_time_; bool start_blending_; ros::Duration blending_duration_; ros::Time next_print_time_; ros::Duration print_period_; double time_offset_; double momentum_; double time_between_interp_pubs_; mutable ros::Time time_at_last_pub_; double backend_timeout_; }; } // namespace control } // namespace cortex
12,048
C
41.575972
103
0.729914
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control/src/cortex/control/joint_pos_vel_acc_command_publisher.cpp
/* * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved. * * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. */ #include "cortex/control/joint_pos_vel_acc_command_publisher.h" #include <vector> #include <cortex_control/JointPosVelAccCommand.h> namespace cortex { namespace control { JointPosVelAccCommandPublisher::JointPosVelAccCommandPublisher( const std::string& topic, bool stamp_header_with_controller_time) : stamp_header_with_controller_time_(stamp_header_with_controller_time), is_first_(true), next_id_(0) { topic_ = topic; ros::NodeHandle nh; joint_command_publisher_ = nh.advertise<cortex_control::JointPosVelAccCommand>(topic_, 10); } JointPosVelAccCommandPublisher::~JointPosVelAccCommandPublisher() {} void JointPosVelAccCommandPublisher::Publish(uint64_t id, const ros::Time& t, const std::vector<std::string>& joint_names, const Eigen::VectorXd& q, const Eigen::VectorXd& qd, const Eigen::VectorXd& qdd) { cortex_control::JointPosVelAccCommand joint_command; if (stamp_header_with_controller_time_) { joint_command.header.stamp = t; } else { if (is_first_) { controller_time_offset_ = ros::Time::now() - t; } // We want to report the current time, but with the steadiness of the // controller time. joint_command.header.stamp = t + controller_time_offset_; } joint_command.id = id; if (is_first_) { // Usually this first message is missed by the interpolator (or it's // dropped because of syncing protocols), but even if it's used, the // interpolator won't use the period field because that's only used for // knowing the period between the previous point (there isn't one) and this // one. joint_command.period = ros::Duration(0.); is_first_ = false; } else { joint_command.period = (t - prev_t_); } joint_command.t = t; joint_command.names = joint_names; joint_command.q = std::vector<double>(q.data(), q.data() + q.size()); joint_command.qd = std::vector<double>(qd.data(), qd.data() + qd.size()); joint_command.qdd = std::vector<double>(qdd.data(), qdd.data() + qdd.size()); joint_command_publisher_.publish(joint_command); // Updating the next_id_ member here means we can always set an ID once with // a call explicitly to this Publish(...) method and then use the ID-less // Publish(...) method to continue publishing sequential IDs from there. next_id_ = id + 1; prev_t_ = t; } void JointPosVelAccCommandPublisher::Publish(const ros::Time& t, const std::vector<std::string>& joint_names, const Eigen::VectorXd& q, const Eigen::VectorXd& qd, const Eigen::VectorXd& qdd) { // Note that this call automatically increments next_id. Publish(next_id_, t, joint_names, q, qd, qdd); } } // namespace control } // namespace cortex
3,532
C++
38.255555
93
0.62769
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control/src/cortex/control/joint_pos_vel_acc_command_publisher.h
/* * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved. * * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. */ #pragma once #include <string> #include <vector> #include <Eigen/Core> #include <ros/ros.h> namespace cortex { namespace control { /*!\brief Abstract class representing the base of a oint position, velocity, * and acceleration command publisher. The topic name of the publisher is * defined as ns + "joint_command". */ class JointPosVelAccCommandPublisher { public: /*!\brief Creates a JointPosVelAccCommandPublisher under the given name * space *ns*. The topic name of the publisher is defined as ns + * "joint_command". * * There are two time stamps in each JointPosVelAccCommand message, one in * the header and another as an explicit field t. The explicit field is * always set to be the controller time (with each message exactly a period * duration between), but by default (if stamp_header_with_controller_time is * false) the header contains the wall clock time so we can see the jitter in * the calculation using tools like rqt_plot. If * stamp_header_with_controller_time is true, that header stamp is also set * to the controller time so that becomes observable in plotters. */ JointPosVelAccCommandPublisher(const std::string& ns, bool stamp_header_with_controller_time = false); /*!\brief Default virtual destructor */ ~JointPosVelAccCommandPublisher(); /*!\brief Publishes the position, velocity, and acceleration command. Each * call to this method sets the id counter to the provided value, so * subsequent calls to the id-less API will increment from this id. * * \param time The time stamp of this command. * \param id the sequence id of this command. * \param joint_names Joint names vector. This vector must have the same * order as q qd, and qdd, i.e. the i-th name must correspond to the i-th q, * qd, qdd values. * \param q Joint position values * \param qd Joint velocity values * \param qdd Joint acceleration values */ virtual void Publish(uint64_t id, const ros::Time& t, const std::vector<std::string>& joint_names, const Eigen::VectorXd& q, const Eigen::VectorXd& qd, const Eigen::VectorXd& qdd); /*!\brief This version automatically creates the sequence id, starting from * zero and incrementing once for each call. */ void Publish(const ros::Time& t, const std::vector<std::string>& joint_names, const Eigen::VectorXd& q, const Eigen::VectorXd& qd, const Eigen::VectorXd& qdd); const std::string& topic() const { return topic_; } protected: bool stamp_header_with_controller_time_; ros::Publisher joint_command_publisher_; ros::Duration controller_time_offset_; bool is_first_; ros::Time prev_t_; uint64_t next_id_; std::string topic_; }; } // namespace control } // namespace cortex
3,403
C
35.60215
81
0.684396
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control/src/cortex/control/builders.cpp
/** * Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. * * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. */ #include "cortex/control/builders.h" #include "cortex/util/yaml.h" #include <ros/time.h> namespace cortex { namespace control { std::shared_ptr<CommandStreamInterpolator> LoadCommandStreamInterpolatorFromYaml( const YAML::Node& command_stream_interpolator_config, bool verbose) { // Extract params from yaml config. auto params = util::GetFieldOrDie(command_stream_interpolator_config, "params"); auto interpolation_delay = util::GetOrDie<double>(params, "interpolation_delay"); auto use_smoothing_interpolator = util::GetOrDie<bool>(params, "use_smoothing_interpolator"); auto blending_duration = util::GetOrDie<double>(params, "blending_duration"); auto backend_timeout = util::GetOrDie<double>(params, "backend_timeout"); // Extract ROS topics from yaml config. auto ros_topics = util::GetFieldOrDie(command_stream_interpolator_config, "ros_topics"); auto command_topics = util::GetFieldOrDie(ros_topics, "rmpflow_commands"); auto rmpflow_command_topic = util::GetOrDie<std::string>(command_topics, "command"); auto rmpflow_command_ack_topic = util::GetOrDie<std::string>(command_topics, "ack"); auto rmpflow_command_suppress_topic = util::GetOrDie<std::string>(command_topics, "suppress"); auto rmpflow_command_interpolated_topic = util::GetOrDie<std::string>(command_topics, "interpolated"); if (verbose) { std::cout << "RMPflow backend config:" << std::endl; std::cout << " params:" << std::endl; std::cout << " interpolation delay: " << interpolation_delay << std::endl; std::cout << " use smoothing interpolator: " << use_smoothing_interpolator << std::endl; std::cout << " blending duration: " << blending_duration << std::endl; std::cout << " backend timeout: " << backend_timeout << std::endl; std::cout << " ros_topics:" << std::endl; std::cout << " rmpflow_commands:" << std::endl; std::cout << " command: " << rmpflow_command_topic << std::endl; std::cout << " ack: " << rmpflow_command_ack_topic << std::endl; std::cout << " suppress: " << rmpflow_command_suppress_topic << std::endl; std::cout << " interpolated: " << rmpflow_command_interpolated_topic << std::endl; } auto stream_interpolator = std::make_shared<cortex::control::CommandStreamInterpolator>(); stream_interpolator->Init(ros::Duration(interpolation_delay), use_smoothing_interpolator, rmpflow_command_topic, rmpflow_command_ack_topic, rmpflow_command_suppress_topic, rmpflow_command_interpolated_topic, ros::Duration(blending_duration), backend_timeout); return stream_interpolator; } } // namespace control } // namespace cortex
3,336
C++
49.560605
96
0.660372
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control/src/cortex/control/builders.h
/** * Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. * * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. */ #pragma once #include <memory> #include <yaml-cpp/node/node.h> #include "cortex/control/command_stream_interpolator.h" namespace cortex { namespace control { //! Makes and initializes a command stream interpolator from the specified YAML config. One still needs //! to call Start() on the returned object to start the streaming interpolation. std::shared_ptr<CommandStreamInterpolator> LoadCommandStreamInterpolatorFromYaml( const YAML::Node& command_stream_interpolator_config, bool verbose = false); } // namespace control } // namespace cortex
1,005
C
34.92857
103
0.7801
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control/src/cortex/control/command_stream_interpolator_main.cpp
/* * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved. * * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. */ // Runs a generic CommandStreamInterpolator without sending the commands to // a physical robot. This enables visualizing the underlying interpolated // commands to analyze interpolation techniques for specific problems. Note it // doesn't use the NextCommand() interface, but directly jumps to the // interpolations, so blending doesn't pollute early signals. #include <iostream> #include "cortex/control/builders.h" #include "cortex/control/command_stream_interpolator.h" #include "cortex/control/rmpflow_commanded_joints_listener.h" #include "cortex/util/joint_state_listener.h" #include "cortex/util/ros_util.h" #include "cortex/util/yaml.h" #include <gflags/gflags.h> #include <ros/ros.h> DEFINE_string(command_stream_interpolator_config, "package://cortex_control/config/command_stream_interpolator.yaml", ""); DEFINE_double(interpolated_control_rate_hz, 500., "Rate in Hz at which the low-level control will be sending " "commands. In this program, those commands are published on a " "new topic <command_topic>/interpolated."); DEFINE_bool(use_rectified_cycles, false, "If true, rectifies the time stamp so they're always exactly a period " "apart. Otherwise (default), sets the time stamp to the current wall-clock " "time."); DEFINE_bool(analysis_mode, false, "If true, runs in analysis mode. Doesn't use NextCommand() for interpolation " "between interpolated and desired when starting up. In general, you'll want to " "use NextCommand() in real controllers."); DEFINE_bool(verbose, false, "Print extra messages."); class MockControllerInterface { public: bool is_interpolator_active; MockControllerInterface( const std::shared_ptr<cortex::util::JointStateListener>& joint_state_listener) : is_interpolator_active(false), joint_state_listener_(joint_state_listener) {} Eigen::VectorXd GetMeasuredPositions() { if (is_interpolator_active) { // The interpolator is active, so as part of the protocol the joint state listener has // been set to listen to the same joints as found in the commands and the interpolator has // made sure those are available in the joint state listener. Therefore, we can return the // measure states from the listener. return joint_state_listener_->CurrentState().q; } else { // Otherwise, return a zero length vector. That will get the NextCommand() calls to return a // zero length vector as well. return Eigen::VectorXd(0); } } protected: std::shared_ptr<cortex::util::JointStateListener> joint_state_listener_; }; int main(int argc, char** argv) { try { gflags::ParseCommandLineFlags(&argc, &argv, true); ros::init(argc, argv, "cortex_command_stream_interpolator"); ros::NodeHandle node_handle; ros::AsyncSpinner spinner(4); spinner.start(); auto command_stream_interpolator_config = YAML::LoadFile( cortex::util::ExpandRosPkgRelPath(FLAGS_command_stream_interpolator_config)); auto command_stream_interpolator = cortex::control::LoadCommandStreamInterpolatorFromYaml( command_stream_interpolator_config); command_stream_interpolator->Start(); auto ros_topics = cortex::util::GetFieldOrDie(command_stream_interpolator_config, "ros_topics"); auto joint_state_topic = cortex::util::GetOrDie<std::string>(ros_topics, "joint_state"); auto command_topics = cortex::util::GetFieldOrDie(ros_topics, "rmpflow_commands"); auto rmpflow_command_topic = cortex::util::GetOrDie<std::string>(command_topics, "command"); cortex::control::RmpflowCommandedJointsListener rmpflow_commanded_joints_listener( rmpflow_command_topic, joint_state_topic); std::cout << "Waiting until joint states are available..." << std::endl; rmpflow_commanded_joints_listener.WaitUntilAvailable(30.); std::cout << "<done>" << std::endl; auto controller_interface = MockControllerInterface(rmpflow_commanded_joints_listener.joint_state_listener()); auto rate_hz = FLAGS_interpolated_control_rate_hz; auto period = ros::Duration(1. / rate_hz); auto time = ros::Time::now(); auto time_at_next_print = time; Eigen::VectorXd q_des; ros::Rate rate(rate_hz); bool is_interpolator_active = false; while (ros::ok()) { if (FLAGS_use_rectified_cycles) { time += period; } else { time = ros::Time::now(); } if (FLAGS_analysis_mode) { // Analysis mode. Allows us to see the interpolated commands without the blending introduced // by NextCommand(). Controllers will typically want to use NextCommand(). auto command = command_stream_interpolator->EvalAndStep(time); if (command) { q_des = command.commanded_position; } } else { // Standard mode. Usually you would send this next_command to the controller. Here, we just // use the internal functionality of the command stream interpolator to publish the command // on the specified interpolated commands topic. auto q_measured = controller_interface.GetMeasuredPositions(); q_des = command_stream_interpolator->NextCommand( time, q_measured, &controller_interface.is_interpolator_active); } if (FLAGS_verbose && time >= time_at_next_print) { std::cout << "time = " << time << ", q_des = " << q_des.transpose() << std::endl; time_at_next_print += ros::Duration(.2); } rate.sleep(); } std::cout << "<done>" << std::endl; } catch (const std::exception& ex) { std::cout << "Exception caught: " << ex.what() << std::endl; } return 0; }
6,247
C++
40.377483
100
0.683368
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control/src/cortex/control/rmpflow_commanded_joints_listener.h
/* Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. * * NVIDIA CORPORATION and its licensors retain all intellectual property and proprietary rights in * and to this software, related documentation and any modifications thereto. Any use, * reproduction, disclosure or distribution of this software and related documentation without an * express license agreement from NVIDIA CORPORATION is strictly prohibited. */ #include <mutex> #include <cortex_control/JointPosVelAccCommand.h> #include "cortex/util/joint_state_listener.h" #include "cortex/util/ros_message_listener.h" namespace cortex { namespace control { // A wrapper around the joint state listener ensuring that we listen to the same joints we're // controlling with the RMPflow commander's commands. // // Listens to the RMPflow commander's commands as well as the joint state topic. Once we receive // the first command, we register the joint names with the joint state listener as required joints. // The IsAvailable() method (or WaitUntilAvailable()) can then be used to check whether the joint // state listener is ready and has measured values for each of those named joints. class RmpflowCommandedJointsListener { public: RmpflowCommandedJointsListener(const std::string& rmpflow_commands_topic, const std::string& joint_state_topic); bool IsAvailable() const; void WaitUntilAvailable(double poll_rate) const; const std::shared_ptr<util::JointStateListener> joint_state_listener() const; protected: mutable std::mutex mutex_; util::RosMessageListener<cortex_control::JointPosVelAccCommand> rmpflow_commands_listener_; bool is_set_; std::shared_ptr<util::JointStateListener> joint_state_listener_; }; } // namespace control } // namespace cortex
1,799
C
38.130434
99
0.764869
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control/src/cortex/control/command_stream_interpolator.cpp
/** * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved. * * NVIDIA CORPORATION and its licensors retain all intellectual property and proprietary rights in * and to this software, related documentation and any modifications thereto. Any use, * reproduction, disclosure or distribution of this software and related documentation without an * express license agreement from NVIDIA CORPORATION is strictly prohibited. */ #include "cortex/control/command_stream_interpolator.h" #include <algorithm> #include <sstream> #include <vector> #include <Eigen/Core> #include <std_msgs/Bool.h> #include <std_msgs/Time.h> #include "cortex/math/interpolation/cubic_position_interpolator.h" #include "cortex/math/interpolation/incremental_interpolator.h" #include "cortex/math/interpolation/pos_vel_acc.h" #include "cortex/math/interpolation/quartic_interpolator.h" #include "cortex/math/interpolation/smoothing_incremental_interpolator.h" #include "cortex_control/CortexCommandAck.h" namespace cortex { namespace control { inline std::ostream& operator<<(std::ostream& os, ControllerState state) { using namespace cortex::control; switch (state) { case ControllerState::StartingController: os << "ControllerState::StartingController"; break; case ControllerState::WaitingOnBackend: os << "ControllerState::WaitingOnBackend"; break; case ControllerState::SyncingBackend: os << "ControllerState::SyncingBackend"; break; case ControllerState::InitializingInterpolator: os << "ControllerState::InitializingInterpolator"; break; case ControllerState::Operational: os << "ControllerState::Operational"; break; default: os << "ControllerState::<unknown>"; } return os; } inline std::ostream& operator<<(std::ostream& os, CommandStreamInterpolator::Command& command) { if (command) { os << "[" << command.commanded_position.transpose() << "]"; } else { os << "<unavailable>"; } return os; } std::string CommandSuppressor::default_topic = "/robot/command_suppression/right"; double CommandSuppressor::default_rate_hz = 30.; CommandSuppressor::CommandSuppressor(const std::string& topic, double rate_hz) : topic_(topic), rate_hz_(rate_hz) { is_running_ = true; is_suppressing_ = false; ros::NodeHandle node_handle; suppression_pub_ = node_handle.advertise<std_msgs::Bool>(topic_, 10); run_thread_ = std::thread(&CommandSuppressor::Run, this); } CommandSuppressor::~CommandSuppressor() { is_running_ = false; run_thread_.join(); } void CommandSuppressor::Run() { ros::Rate rate(rate_hz_); while (ros::ok() && is_running_) { std_msgs::Bool msg; if (is_suppressing_) { msg.data = true; } else { msg.data = false; } suppression_pub_.publish(msg); rate.sleep(); } } const double CommandStreamInterpolator::default_blending_duration = 2.; const double CommandStreamInterpolator::default_backend_timeout = .5; const double CommandStreamInterpolator::default_time_between_interp_pubs = 1. / 60; // 60 hz bool CommandStreamInterpolator::Init(const ros::Duration& interpolator_lookup_delay_buffer, bool use_smoothing_interpolator, const std::string& cortex_command_topic, ros::Duration blending_duration, double backend_timeout) { return Init(interpolator_lookup_delay_buffer, use_smoothing_interpolator, cortex_command_topic, cortex_command_topic + "/ack", cortex_command_topic + "/suppress", cortex_command_topic + "/interpolated", blending_duration, backend_timeout); } bool CommandStreamInterpolator::Init(const ros::Duration& interpolator_lookup_delay_buffer, bool use_smoothing_interpolator, const std::string& cortex_command_topic, const std::string& cortex_command_ack_topic, const std::string& cortex_command_suppress_topic, const std::string& cortex_command_interpolated_topic, ros::Duration blending_duration, double backend_timeout) { interpolator_lookup_delay_buffer_ = interpolator_lookup_delay_buffer; use_smoothing_interpolator_ = use_smoothing_interpolator; blending_duration_ = blending_duration; backend_timeout_ = backend_timeout; time_between_interp_pubs_ = default_time_between_interp_pubs; ros::NodeHandle node_handle; // Create pub-subs. cortex_command_sub_ = node_handle.subscribe( cortex_command_topic, 1, &CommandStreamInterpolator::CommandCallback, this); interpolated_command_pub_ = node_handle.advertise<cortex_control::JointPosVelAccCommand>(cortex_command_interpolated_topic, 10); cortex_command_time_pub_ = node_handle.advertise<cortex_control::CortexCommandAck>(cortex_command_ack_topic, 10); // Create the suppressor with defaults. command_suppressor_ = std::make_shared<CommandSuppressor>( cortex_command_suppress_topic, CommandSuppressor::default_rate_hz); return true; } void CommandStreamInterpolator::Start() { std::lock_guard<std::mutex> lock(mutex_); std::cout << "<starting_controller>" << std::endl; state_ = ControllerState::StartingController; } bool CommandStreamInterpolator::IsBackendTimedOut(const ros::Time& time) const { auto delta = (time - eval_time_at_last_callback_).toSec(); return delta >= backend_timeout_; } CommandStreamInterpolator::Command CommandStreamInterpolator::EvalAndStep( const ros::Time& time) { std::lock_guard<std::mutex> lock(mutex_); last_eval_time_ = time; // Check state transitions. if (state_ == ControllerState::StartingController) { control_time_offset_from_now_ = ros::Time::now() - time; ResetInterpolator(); std::cout << "<starting> --> <waiting_on_backend>" << std::endl; state_ = ControllerState::WaitingOnBackend; } else if (state_ == ControllerState::WaitingOnBackend) { // The callback switches us out of this one. } else if (state_ == ControllerState::SyncingBackend) { // If we've stopped receiving messages from the backend, stop suppressing and transition to // initializing the interpolator. if (IsBackendTimedOut(time)) { std::cout << "<syncing_backend> --> <initializing_interpolator>" << std::endl; state_ = ControllerState::InitializingInterpolator; command_suppressor_->StopSuppressing(); } } else if (state_ == ControllerState::InitializingInterpolator) { // The callback switches us out of this one. } else if (state_ == ControllerState::Operational) { // We're good to go. We'll just execute until it looks like we've lost communication with the // backend. if (IsBackendTimedOut(time)) { ResetInterpolator(); std::cout << "<operational> --> <waiting_on_backend>" << std::endl; state_ = ControllerState::WaitingOnBackend; } } // Process states. if (state_ == ControllerState::StartingController) { // we should immediately transition to waiting on backend. std::cerr << "There's something wrong. We should never get here. Diagnose " << "immediately."; throw std::runtime_error("Bad state in CommandStreamInterpolator"); return Command::Unavailable(); } else if (state_ == ControllerState::WaitingOnBackend) { // Just wait until we start receiving messages. return Command::Unavailable(); } else if (state_ == ControllerState::SyncingBackend) { // We're currently suppressing in a separate thread using the command_suppressor_. // Otherwise, do nothing. return Command::Unavailable(); } else if (state_ == ControllerState::InitializingInterpolator) { time_at_last_pub_ = time; // This is handled by the callback. return Command::Unavailable(); } else if (state_ == ControllerState::Operational) { auto lookup_time = time - interpolator_lookup_delay_buffer_; if (lookup_time < eval_time_at_interpolator_start_) { return Command::Unavailable(); } // Get interpolated command. cortex::math::PosVelAccXd eval_point; std::string error_str; if (!EvalInterpolator(lookup_time, eval_point, &error_str)) { ROS_WARN_STREAM("[cortex] " << error_str); return Command::Unavailable(); } PublishInterpolatedPoint(time, eval_point); return Command(eval_point.x); } else { std::cerr << "Unrecognized state: " << state_; throw std::runtime_error("Bad state in CommandStreamInterpolator"); } } Eigen::VectorXd CommandStreamInterpolator::NextCommand(const ros::Time& time, const Eigen::VectorXd& q_measured, bool* is_interpolator_active) { auto command = EvalAndStep(time); if (is_interpolator_active) { *is_interpolator_active = static_cast<bool>(command); } if (command) { if (start_blending_) { blending_start_time_ = time; start_blending_ = false; } auto elapse = (time - blending_start_time_).toSec(); auto blend_duration = blending_duration_.toSec(); Eigen::VectorXd q_des = command.commanded_position.head(q_measured.size()); if (elapse < blend_duration) { auto alpha = elapse / blend_duration; // Goes linearly from zero to one. alpha *= alpha; // Quadratic increase. q_des = alpha * q_des + (1. - alpha) * q_measured; } return q_des; } else { start_blending_ = true; return q_measured; } } void CommandStreamInterpolator::AddPointToInterpolator( const cortex_control::JointPosVelAccCommand& command_msg) { cortex::math::PosVelAccXd point; point.x = Eigen::Map<const Eigen::VectorXd>(command_msg.q.data(), command_msg.q.size()); point.xd = Eigen::Map<const Eigen::VectorXd>(command_msg.qd.data(), command_msg.qd.size()); point.xdd = Eigen::VectorXd::Zero(point.x.size()); // Accelerations not used by interpolator. AddPointToInterpolator(command_msg.t, point); } void CommandStreamInterpolator::AddPointToInterpolator(const ros::Time& time, const cortex::math::PosVelAccXd& point) { std::string error_str; if (!interp_->AddPt( // We add the first point slightly in the past. (time - command_time_at_interpolator_start_).toSec(), point, &error_str)) { ROS_ERROR_STREAM("[monolithic]: " << error_str); } } bool CommandStreamInterpolator::EvalInterpolator(const ros::Time& time, cortex::math::PosVelAccXd& eval_point, std::string* error_str) const { if (state_ != ControllerState::Operational) { if (error_str) { std::stringstream ss; ss << "Attempting to evaluate interpolator before reaching " "ControllerState::Operational. Current state: " << cortex::control::ControllerState::Operational; *error_str = ss.str(); } return false; } return interp_->Eval( (time - eval_time_at_interpolator_start_).toSec() + time_offset_, eval_point, error_str); } void CommandStreamInterpolator::PublishInterpolatedPoint( const ros::Time& time, const cortex::math::PosVelAccXd& point) const { if ((time - time_at_last_pub_).toSec() >= time_between_interp_pubs_) { cortex_control::JointPosVelAccCommand command_msg; command_msg.header.stamp = time + control_time_offset_from_now_; command_msg.names = latest_command_msg_.names; command_msg.q = std::vector<double>(point.x.data(), point.x.data() + point.x.size()); command_msg.qd = std::vector<double>(point.xd.data(), point.xd.data() + point.xd.size()); command_msg.qdd = std::vector<double>(point.xdd.data(), point.xdd.data() + point.xdd.size()); interpolated_command_pub_.publish(command_msg); time_at_last_pub_ = time; } } void CommandStreamInterpolator::CommandCallback( const cortex_control::JointPosVelAccCommand& command_msg) { if (command_msg.period == ros::Duration(0.)) { std::cout << "<rejecting first message sent by backend>" << std::endl; return; } std::lock_guard<std::mutex> lock(mutex_); latest_command_msg_ = command_msg; // While syncing the backend (state ControllerState::SyncingBackend) we suppress commands so // callbacks stop. We need to check how much time's elapsed since the last callback (and it needs // to be comparable to eval times, hence we set it to last_eval_time_). Note it's important that // we check time since the last callback and not time since the state transition because // transitioning to that state causes suppression commands to be sent to the backend. We want to // measure how much time has elapsed since the commands actually start being suppressed, not since // we started *trying* to suppress commands. eval_time_at_last_callback_ = last_eval_time_; if (state_ == ControllerState::StartingController) { return; // Don't do anything until Update has been called once. } else if (state_ == ControllerState::WaitingOnBackend) { // The fact we're in the callback means we're up and running. Transition to syncing the backend. std::cout << "<waiting_on_backend> --> <syncing_backend>" << std::endl; state_ = ControllerState::SyncingBackend; command_suppressor_->StartSuppressing(); // Until the backend's synced, we don't want to be interpolating points. return; } else if (state_ == ControllerState::SyncingBackend) { return; // Still syncing. } else if (state_ == ControllerState::InitializingInterpolator) { // This aligns the interpolator's start time (command_msg.t) with the last controller time at // the last Eval. eval_time_at_interpolator_start_ = last_eval_time_; command_time_at_interpolator_start_ = command_msg.t; time_offset_ = 0.; momentum_ = 0.; // Now add the current commanded target at the current time and we're ready to start // interpolating. AddPointToInterpolator(command_msg); std::cout << "<initializing_interpolator> --> <operational>" << std::endl; state_ = ControllerState::Operational; next_print_time_ = eval_time_at_last_callback_; print_period_ = ros::Duration(1.); } else if (state_ == ControllerState::Operational) { AddPointToInterpolator(command_msg); auto interp_time = (eval_time_at_last_callback_ - eval_time_at_interpolator_start_).toSec(); auto command_time = (command_msg.t - command_time_at_interpolator_start_).toSec(); auto time_error = command_time - (interp_time + time_offset_); auto now = (ros::Time::now() - eval_time_at_interpolator_start_).toSec(); cortex_control::CortexCommandAck command_ack; command_ack.cortex_command_time = command_msg.t; command_ack.cortex_command_id = command_msg.id; command_ack.time_offset = ros::Duration(-time_error); cortex_command_time_pub_.publish(command_ack); if (eval_time_at_last_callback_ >= next_print_time_) { std::cout << std::setprecision(10) << "[stream interpolator (" << time_offset_ << ")] " << "interp time: " << interp_time << ", now: " << now << ", command time: " << command_time << ", interp - command diff: " << -time_error << std::endl; next_print_time_ += print_period_; } } } void CommandStreamInterpolator::ResetInterpolator() { start_blending_ = true; if (use_smoothing_interpolator_) { // Auto smoothing quartic interpolation. This version always interpolates between the latest // evaluated (q, qd, qdd) and the incoming (q_target, qd_target). interp_ = std::make_shared< cortex::math::SmoothingIncrementalInterpolator<cortex::math::CubicPositionInterpolatorXd>>(); } else { // Basic quintic interpolation. interp_ = std::make_shared<cortex::math::IncrementalInterpolator>(); } } } // namespace control } // namespace cortex
16,265
C++
39.064039
106
0.662588
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control/src/cortex/util/joint_state_listener.h
/* * Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved. * * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. */ //! @file //! @brief A simple and general joint state listener to collect the latest information //! about the robot's state. #pragma once #include "cortex/util/state_listener.h" #include <atomic> #include <mutex> #include <unordered_map> #include <Eigen/Core> #include <ros/ros.h> #include <sensor_msgs/JointState.h> namespace cortex { namespace util { /*!\brief Contains information about the state of a single joint. Includes * the time stamp of the message that last updated the joint. */ struct SingleJointState { double position; double velocity; double effort; ros::Time stamp; SingleJointState() {} SingleJointState(double pos, double vel, double eff, const ros::Time &stamp) : position(pos), velocity(vel), effort(eff), stamp(stamp) {} }; typedef std::unordered_map<std::string, SingleJointState> JointStateMap; /*!\brief A very simple joint state listener that records the latest joint * state information in an unordered map mapping the joint name to the most * recent SingleJointState information. * * It's necessary to process the information this way rather than simply * recording the joint state messages because there's no guarantee that each * joint state message contains information about all of the joints. (This, for * instance, is an issue with Baxter.) * * This class is thread safe. */ class JointStateListener : public StateListener { public: JointStateListener() = default; /*!\brief Initialize to listen on the specified topic for the given required joints. Blocks * waiting for for the joints to be available before returning, polling at the given poll rate. */ void Init(const std::string &topic, const std::vector<std::string> &required_joints, int poll_rate); /*!\brief Initialize to listen on the specified topic for the given required joints. This version * does not block. Users must check explicitly is_available() before accessing. */ void Init(const std::string &topic, const std::vector<std::string> &required_joints); void Init(const std::string &topic, int poll_rate); void Init(const std::string &topic); /*!\brief Initializes the listener with 0.0 as the a default joint state * values. The listener becomes immediately available. */ void InitWithZero(const std::string &topic, const std::vector<std::string> &required_joints); /*!\brief Set the required joints (often used in conjunction with * Init(topic, poll_rate)). Optionally set wait_until_available to true to * block until they're available. */ void SetRequiredJoints(const std::vector<std::string> &required_joints); /*!\brief Wait until at information for at least the specified * required_joints is available. */ void WaitUntilAvailable(int poll_rate) const; /*!\brief Returns true if the required joints are available. */ bool is_available() const { return is_available_; } /*!\brief This variant of the accessor is not atomic. It performs no * locking. */ const JointStateMap &current_state_map() const; /*!\brief This variant is atomic. The only way to ensure no race condition * is to fully copy the internal state out. */ JointStateMap current_state_map_atomic() const; /*!\brief Returns a vector of position values for the given named joints * retaining the specified joint order. */ std::vector<double> CurrentPositions(const std::vector<std::string> &names) const; /*!\brief Returns the state of the system stamped with the minimum time * stamp (oldest) of all the active joints. The state is the positions, * velocities, and accelerations of the active joints. */ StampedState CurrentState() const; /*!\brief Accessors implementing the StateListener API. */ StampedState State() const override { return CurrentState(); } bool IsReady() const override; /*!\brief Accessor for the vector of required joints. */ const std::vector<std::string> &required_joints() const { return required_joints_; } protected: /*!\brief Initialize to listen on the specified topic for the given required joints. If * wait_until_available is true, blocks waiting for for the joints to be available before * returning, polling at the given poll rate. */ void Init(const std::string &topic, const std::vector<std::string> &required_joints, bool wait_until_available, int poll_rate); // Calls to this method should be externally protected through a msg_mutex_ // lock. bool HasRequiredJoints() const; /*!\brief Callback consuming sensor_msgs::JointState messages. Writes the * information into the internal current_state_map_. */ void Callback(const sensor_msgs::JointState &joint_states); mutable std::mutex msg_mutex_; std::vector<std::string> required_joints_; std::unordered_map<std::string, SingleJointState> current_state_map_; ros::NodeHandle node_handle_; ros::Subscriber subscriber_; std::atomic_bool is_available_; }; //------------------------------------------------------------------------------ // Helper methods //------------------------------------------------------------------------------ std::unordered_map<std::string, SingleJointState> ToMap( const sensor_msgs::JointState &joint_states); std::vector<double> ExtractNamedPositions( const std::unordered_map<std::string, SingleJointState> &jstates, const std::vector<std::string> &names); std::vector<double> ExtractNamedPositions(const sensor_msgs::JointState &joint_states, const std::vector<std::string> &names); } // namespace util } // namespace cortex
6,110
C
34.736842
99
0.701964
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control/src/cortex/util/stamped_state.h
/* * Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved. * * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. */ #pragma once #include <cstdint> #include <Eigen/Core> namespace cortex { namespace util { struct StampedState { double time; Eigen::VectorXd q; Eigen::VectorXd qd; Eigen::VectorXd u; int dim() const { return q.size(); } StampedState() = default; StampedState(uint32_t num_dim); StampedState(double time, const Eigen::VectorXd &q, const Eigen::VectorXd &qd); virtual ~StampedState() = default; bool HasU() const; }; } // namespace util } // namespace cortex
933
C
23.578947
81
0.732047
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control/src/cortex/util/state_listener.h
/* * Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved. * * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. */ #pragma once #include <atomic> #include <Eigen/Core> #include "cortex/util/stamped_state.h" namespace cortex { namespace util { /** * \brief Abstract state listener. */ class StateListener { public: /** * \brief Creates a StateListener. */ StateListener(); /** * \brief Default virtual destructor. */ virtual ~StateListener() = default; /** * \brief Returns the latest state. */ virtual StampedState State() const = 0; /** * \brief Returns true if the state is available. */ virtual bool IsReady() const = 0; /** * \brief Blocking call to wait until the state is available. */ virtual void WaitForReady(double poll_hz = 100) const; private: // This is an alternative and ros free implementation of the thread SIGINT // signal handling // static void signal_handler(int signal); // static std::atomic_bool interruped_; }; } // namespace util } // namespace cortex
1,384
C
21.704918
77
0.700145
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control/src/cortex/util/ros_util.cpp
/* * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved. * * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. */ #include "cortex/util/ros_util.h" #include <vector> #include <ros/package.h> #include <ros/ros.h> #include "cortex/util/string.h" namespace cortex { namespace util { void WaitForConnections(const ros::Publisher& pub, double stable_time, double rate_hz) { std::cout << "Waiting for connections" << std::flush; auto rate = ros::Rate(rate_hz); auto last_change_time = ros::Time::now(); auto num_con = pub.getNumSubscribers(); while (ros::ok()) { std::cout << '.' << std::flush; auto curr_time = ros::Time::now(); auto latest_num_con = pub.getNumSubscribers(); auto elapse_sec = (curr_time - last_change_time).toSec(); if (latest_num_con != num_con) { num_con = latest_num_con; std::cout << num_con << std::flush; last_change_time = curr_time; } else if (latest_num_con > 0 && latest_num_con == num_con && elapse_sec >= stable_time) { std::cout << "<stable>" << std::endl; break; } rate.sleep(); } } std::string ExpandRosPkgRelPathRaw(const std::string& pkg_relative_path) { // Parse out the json config file. char delim = '/'; std::vector<std::string> tokens = Split(pkg_relative_path, delim); if (tokens.size() == 0) { return ""; } else if (tokens.size() < 2) { return tokens.front(); } auto pkg_name = tokens.front(); auto rel_path = Join(tokens, delim, 1); // Join all but first. auto package_path = ros::package::getPath(pkg_name); auto full_path = package_path + delim + rel_path; return full_path; } std::string ExpandRosPkgRelPath(const std::string& pkg_relative_path) { std::string expected_prefix = "package://"; if (pkg_relative_path.find(expected_prefix) == 0) { return ExpandRosPkgRelPathRaw(pkg_relative_path.substr(expected_prefix.size())); } else { // The string doesn't start with the expected prefix, but we're still // supporting that for the time being. WARNING -- this functionality // is DEPRECATED; we'll require the package:// prefix soon. ROS_WARN_STREAM( "Package expansion without the 'package://' prefix is DEPRECATED: " << pkg_relative_path); return ExpandRosPkgRelPathRaw(pkg_relative_path); } } } // namespace util } // namespace cortex
2,670
C++
31.57317
98
0.671161
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control/src/cortex/util/joint_state_publisher.h
/* * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved. * * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. */ //! @file //! @brief A simple and general joint state listener to collect the latest information //! about the robot's state. #pragma once #include <vector> #include <ros/ros.h> #include <sensor_msgs/JointState.h> #include "cortex/math/state.h" namespace cortex { namespace util { class JointStatePublisher { public: JointStatePublisher(const std::vector<std::string>& joint_names, const std::string& topic, int queue_size) : joint_names_(joint_names), seq_(0) { ros::NodeHandle node_handle; pub_ = node_handle.advertise<sensor_msgs::JointState>(topic, queue_size); } void Publish(const math::State& state) { sensor_msgs::JointState msg; msg.header.seq = seq_++; msg.header.stamp = ros::Time::now(); msg.name = joint_names_; msg.position = std::vector<double>(state.pos().data(), state.pos().data() + state.pos().size()); msg.velocity = std::vector<double>(state.vel().data(), state.vel().data() + state.vel().size()); pub_.publish(msg); } protected: ros::Publisher pub_; std::vector<std::string> joint_names_; int32_t seq_; }; } // namespace util } // namespace cortex
1,631
C
27.631578
100
0.679951
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control/src/cortex/util/string.cpp
/* * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved. * * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. */ #include "cortex/util/string.h" #include <sstream> namespace cortex { namespace util { std::vector<std::string> Split(const std::string& str, char delimiter){ std::vector<std::string> tokens; std::string token; std::istringstream token_stream(str); while (std::getline(token_stream, token, delimiter)) { if (token.size() > 0) { tokens.push_back(token); } } return tokens; } std::string Join(const std::vector<std::string>& tokens, char delimiter, size_t pos) { std::stringstream ss; for (auto i = pos; i < tokens.size(); ++i) { if (i > pos) ss << delimiter; ss << tokens[i]; } return ss.str(); } } // namespace util } // namespace cortex
1,133
C++
26.658536
86
0.695499
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control/src/cortex/util/yaml.h
/* * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved. * * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. */ #pragma once #include <ros/assert.h> #include <yaml-cpp/yaml.h> namespace cortex { namespace util { //! Extract the named YAML field or assert if the field doesn't exist. YAML::Node GetFieldOrDie(const YAML::Node& node, const std::string& name) { auto field = node[name]; ROS_ASSERT_MSG(field, "YAML field not found: %s", name.c_str()); return field; } //! Extract a field of the specified type from the YAML node or assert if the field doesn't exist. template <class T> T GetOrDie(const YAML::Node& node, const std::string& name) { auto field = node[name]; ROS_ASSERT_MSG(field, "Could not extract YAML field: %s", name.c_str()); return field.as<T>(); } } // namespace util } // namespace cortex
1,162
C
31.305555
98
0.728055
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control/src/cortex/util/joint_state_listener.cpp
/* * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved. * * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. */ #include "cortex/util/joint_state_listener.h" #include <atomic> #include <mutex> #include <ros/assert.h> namespace cortex { namespace util { //------------------------------------------------------------------------------ // JointStateListener implementation //------------------------------------------------------------------------------ void JointStateListener::Init(const std::string &topic, const std::vector<std::string> &required_joints, int poll_rate) { Init(topic, required_joints, true, poll_rate); } void JointStateListener::Init(const std::string &topic) { Init(topic, std::vector<std::string>()); } void JointStateListener::Init(const std::string &topic, int poll_rate) { Init(topic, std::vector<std::string>(), poll_rate); } void JointStateListener::Init(const std::string &topic, const std::vector<std::string> &required_joints) { Init(topic, required_joints, false, 0); } // This version is protected (internal). void JointStateListener::Init(const std::string &topic, const std::vector<std::string> &required_joints, bool wait_until_available, int poll_rate) { is_available_ = false; required_joints_ = required_joints; subscriber_ = node_handle_.subscribe(topic, 10, // Queue size. &JointStateListener::Callback, this); if (wait_until_available) { WaitUntilAvailable(poll_rate); } } void JointStateListener::InitWithZero(const std::string &topic, const std::vector<std::string> &required_joints) { required_joints_ = required_joints; subscriber_ = node_handle_.subscribe(topic, 10, // Queue size. &JointStateListener::Callback, this); for (uint32_t i = 0; i < required_joints_.size(); ++i) { current_state_map_[required_joints_[i]] = SingleJointState(0.0, 0.0, 0., ros::Time::now()); } is_available_ = true; } void JointStateListener::SetRequiredJoints(const std::vector<std::string> &required_joints) { required_joints_ = required_joints; } void JointStateListener::WaitUntilAvailable(int poll_rate) const { ros::Rate rate(poll_rate); while (ros::ok() && !is_available()) { rate.sleep(); } } void JointStateListener::Callback(const sensor_msgs::JointState &joint_states) { std::lock_guard<std::mutex> guard(msg_mutex_); auto n = joint_states.name.size(); ROS_ASSERT(joint_states.position.size() == n); ROS_ASSERT(joint_states.velocity.size() == n); ROS_ASSERT(joint_states.effort.size() == 0 || joint_states.effort.size() == n); bool has_efforts = (joint_states.effort.size() > 0); for (uint32_t i = 0; i < n; ++i) { current_state_map_[joint_states.name[i]] = SingleJointState(joint_states.position[i], joint_states.velocity[i], has_efforts ? joint_states.effort[i] : 0., joint_states.header.stamp); } if (!is_available_) { // The method HasRequiredJoints(), which requires looping through the // required joints to see if they're ready, is only called during the // period of time when we're waiting for the first full set of // information to be available. is_available_ = HasRequiredJoints(); } } const std::unordered_map<std::string, SingleJointState> &JointStateListener::current_state_map() const { return current_state_map_; } std::unordered_map<std::string, SingleJointState> JointStateListener::current_state_map_atomic() const { std::lock_guard<std::mutex> guard(msg_mutex_); return current_state_map_; } std::vector<double> JointStateListener::CurrentPositions( const std::vector<std::string> &names) const { std::lock_guard<std::mutex> guard(msg_mutex_); return ExtractNamedPositions(current_state_map_, names); } StampedState JointStateListener::CurrentState() const { std::lock_guard<std::mutex> guard(msg_mutex_); StampedState state(required_joints_.size()); double min_time = 0.; for (uint32_t i = 0; i < required_joints_.size(); ++i) { const auto &name = required_joints_[i]; auto access_iter = current_state_map_.find(name); ROS_ASSERT_MSG(access_iter != current_state_map_.end(), "Required joint not found: %s", name.c_str()); const auto &single_joint_state = access_iter->second; state.q(i) = single_joint_state.position; state.qd(i) = single_joint_state.velocity; state.u(i) = single_joint_state.effort; double time = single_joint_state.stamp.toSec(); if (i == 0 || time < min_time) min_time = time; } state.time = min_time; return state; } bool JointStateListener::IsReady() const { return is_available(); } //------------------------------------------------------------------------------ // Helper methods implementation //------------------------------------------------------------------------------ bool JointStateListener::HasRequiredJoints() const { bool has_required_joints = true; std::cout << "Checking required joints: "; for (const auto &entry_name : required_joints_) { std::cout << "[" << entry_name << "("; if (current_state_map_.find(entry_name) == current_state_map_.end()) { std::cout << "-"; has_required_joints = false; } else { std::cout << "+"; } std::cout << ")]"; } std::cout << "|" << std::endl; return has_required_joints; } std::unordered_map<std::string, SingleJointState> ToMap( const sensor_msgs::JointState &joint_states) { auto n = joint_states.name.size(); ROS_ASSERT(joint_states.position.size() == n); ROS_ASSERT(joint_states.velocity.size() == n); ROS_ASSERT(joint_states.effort.size() == n); std::unordered_map<std::string, SingleJointState> js_map; for (uint32_t i = 0; i < n; ++i) { js_map[joint_states.name[i]] = SingleJointState(joint_states.position[i], joint_states.velocity[i], joint_states.effort[i], joint_states.header.stamp); } return js_map; } std::vector<double> ExtractNamedPositions( const std::unordered_map<std::string, SingleJointState> &jstates, const std::vector<std::string> &names) { std::vector<double> positions; for (const auto &name : names) { auto access_iter = jstates.find(name); ROS_ASSERT(access_iter != jstates.end()); positions.push_back(access_iter->second.position); } return positions; } std::vector<double> ExtractNamedPositions(const sensor_msgs::JointState &joint_states, const std::vector<std::string> &names) { return ExtractNamedPositions(ToMap(joint_states), names); } } // namespace util } // namespace cortex
7,532
C++
35.043062
100
0.595327
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control/src/cortex/util/string.h
/* * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved. * * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. */ #pragma once #include <string> #include <vector> namespace cortex { namespace util { //! Split the specified string `str` into a set of strings delimited by the `delimiter` character. //! If the delimiter is not found, the entire string is returned as a single token. The returned //! vector always contains, in union, the set of all characters in the string that aren't //! delimiters. std::vector<std::string> Split(const std::string& str, char delimiter); //! Join the tokens together separated by the specified `delimiter` character. Start with token //! `pos`. By default, `pos` is zero, so all tokens are included. std::string Join(const std::vector<std::string>& tokens, char delimiter, size_t pos = 0); } // namespace util } // namespace cortex
1,209
C
38.032257
98
0.748553
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control/src/cortex/util/set_state_listener.h
/* * Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved. * * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. */ #pragma once #include <Eigen/Core> #include "cortex/util/state_listener.h" namespace cortex { namespace util { /** * \brief This is a very simple state listener that just reports its set state. */ class SetStateListener : public StateListener { public: SetStateListener() : is_set_(false) {} StampedState State() const override { return state_; } bool IsReady() const override { return is_set_; } void set_stamped_state(const StampedState &state) { state_ = state; } protected: bool is_set_; StampedState state_; }; } // namespace util } // namespace cortex
1,028
C
26.81081
79
0.737354
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control/src/cortex/util/stamped_state.cpp
/* * Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved. * * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. */ #include <ros/ros.h> #include "cortex/util/state_listener.h" namespace cortex { namespace util { bool StampedState::HasU() const { return u.size() > 0; } StampedState::StampedState(uint32_t num_dim) : time(0.), q(Eigen::VectorXd::Zero(num_dim)), qd(Eigen::VectorXd::Zero(num_dim)), u(Eigen::VectorXd::Zero(num_dim)) {} StampedState::StampedState(double time, const Eigen::VectorXd &q, const Eigen::VectorXd &qd) : time(time), q(q), qd(qd) {} } // namespace util } // namespace cortex
964
C++
30.129031
92
0.715768
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control/src/cortex/util/ros_util.h
/* * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved. * * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. */ #pragma once #include <iostream> #include <sstream> #include <Eigen/Core> #include <ros/ros.h> #include <ros/serialization.h> #include <yaml-cpp/yaml.h> namespace cortex { namespace util { //------------------------------------------------------------------------------ // Parameter helpers //------------------------------------------------------------------------------ /*!\briefGeneric more convenient ROS parameter retrieval method that explicitly * returns the parameter value. * * Call as: * * auto value = GetParam("/robot/step_size", .5); * auto str_value = GetParam("/robot/controller_name", "lqr_controller"); * * Infers the type by the type of the default value passed in. * * TODO: Figure out a way to get this to work with passing in const char* * string literals. */ template <class value_t> value_t GetParam(const std::string& param_name, const value_t& default_value) { value_t param_value; ros::param::param(param_name, param_value, default_value); return param_value; } /*!\brief Call as: auto value = GetParam<double>("/robot/step_size"); Need to * specific supply the template argument for the parameter type. */ template <class value_t> value_t GetParam(const std::string& param_name) { value_t param_value; ros::param::get(param_name, param_value); return param_value; } /*!\brief Get all parameters under a particular namespace. */ std::vector<std::string> GetNsParams(const std::string& ns); /*!\brief Returns all of the names and corresponding tags under the given * namespace. * * Returns a vector of pairs with the first element being a name and the second * being a vector of strings for the tags: * * /ns/first/1 * /ns/first/2 * /ns/second * /ns/third/1 * /ns/third/2 * /ns/third/3 * * Corresponding return structure: * * { "first", {"1", "2"}, * "second", {}, * "third", {"1", "2", "3"} } * */ void GetNsElements(const std::string& ns, std::map<std::string, std::set<std::string>>& elements); //------------------------------------------------------------------------------ // Subscription helpers //------------------------------------------------------------------------------ /*!\brief Wait until connections to the publisher stabilize. * * Checks at a rate of rate_hz, and requires that the number of subscribers * doesn't change for stable_time seconds before considering the connection to * be stable and returning. */ void WaitForConnections(const ros::Publisher& pub, double stable_time = .2, double rate_hz = 30.); //------------------------------------------------------------------------------ // Package helpers //------------------------------------------------------------------------------ /*!\brief Converts a ROS package relative path into a full path. * * The ROS package relative path should take the form: * package://<pkg_name>/<rel_path> * * Returns <global_path_to_pkg>/<rel_path> * * For legacy reasons, currently the package:// prefix can be left off, but * that functionality is nonstandard with ROS and now deprecated. In the near * future, we'll require these strings to be prefixed with package://. */ std::string ExpandRosPkgRelPath(const std::string& pkg_relative_path); } // namespace util } // namespace cortex
3,724
C
30.837607
98
0.61493
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control/src/cortex/util/state_listener.cpp
/* * Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved. * * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. */ #include "cortex/util/state_listener.h" #include <chrono> #include <csignal> #include <thread> #include <ros/ros.h> namespace cortex { namespace util { // std::atomic_bool StateListener::interruped_(false); StateListener::StateListener() { // std::signal(SIGINT, &StateListener::signal_handler); } void StateListener::WaitForReady(double poll_hz) const { // This is an alternative and ros free implementation of the thread SIGINT // signal handling // auto sleep_duration = std::chrono::duration<double>(1. / poll_hz); // while (!interruped_.load() && !IsReady()) { // std::this_thread::sleep_for(sleep_duration); // } ros::Rate rate(poll_hz); while (ros::ok() && !IsReady()) { rate.sleep(); } } // void StateListener::signal_handler(int signal) { interruped_.store(true); } } // namespace util } // namespace cortex
1,301
C++
26.702127
78
0.712529
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control/src/cortex/util/ros_message_listener.h
/* * Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. * * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. */ #pragma once #include <atomic> #include <functional> #include <mutex> #include <vector> #include <ros/ros.h> namespace cortex { namespace util { // Generic message listener that saves off the latest message and makes it available atomically. // // Includes flag accessor is_available() saying whether the first message has been received. // Thereafter, it always reports the last received message through GetLatestMessage(). There is no // timeout mechanism on these messages, so once is_available() returns true for the first time, it // will be true for every call after that. template <class msg_t> class RosMessageListener { public: RosMessageListener(const std::string& topic, int queue_size) { is_available_ = false; ros::NodeHandle node_handle; sub_ = node_handle.subscribe(topic, queue_size, &RosMessageListener<msg_t>::Callback, this); } void Callback(const msg_t& msg) { std::lock_guard<std::mutex> guard(mutex_); msg_ = msg; is_available_ = true; for (auto& f : callbacks_) { f(msg_); } } bool is_available() const { return is_available_; } msg_t GetLatestMessage() const { std::lock_guard<std::mutex> guard(mutex_); return msg_; } void RegisterCallback(const std::function<void(const msg_t&)>& f) { callbacks_.push_back(f); } protected: mutable std::mutex mutex_; ros::Subscriber sub_; std::atomic_bool is_available_; msg_t msg_; std::vector<std::function<void(const msg_t&)>> callbacks_; }; } // namespace util } // namespace cortex
1,980
C
28.132353
98
0.712626
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control/config/command_stream_interpolator.yaml
params: interpolation_delay: .1 use_smoothing_interpolator: true blending_duration: 2. backend_timeout: .1 # 6 backend cycles at dt = 1/60 ros_topics: joint_state: /robot/joint_state # Only used by main(). rmpflow_commands: command: /cortex/arm/command ack: /cortex/arm/command/ack suppress: /cortex/arm/command/suppress interpolated: /cortex/arm/command/interpolated
426
YAML
31.846151
58
0.671362
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/isaac_tutorials/scripts/ros_publisher.py
#!/usr/bin/env python # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. import rospy from sensor_msgs.msg import JointState import numpy as np import time rospy.init_node("test_rosbridge", anonymous=True) pub = rospy.Publisher("/joint_command", JointState, queue_size=10) joint_state = JointState() joint_state.name = [ "panda_joint1", "panda_joint2", "panda_joint3", "panda_joint4", "panda_joint5", "panda_joint6", "panda_joint7", "panda_finger_joint1", "panda_finger_joint2", ] num_joints = len(joint_state.name) # make sure kit's editor is playing for receiving messages ## joint_state.position = np.array([0.0] * num_joints) default_joints = [0.0, -1.16, -0.0, -2.3, -0.0, 1.6, 1.1, 0.4, 0.4] # limiting the movements to a smaller range (this is not the range of the robot, just the range of the movement max_joints = np.array(default_joints) + 0.5 min_joints = np.array(default_joints) - 0.5 # position control the robot to wiggle around each joint time_start = time.time() rate = rospy.Rate(20) while not rospy.is_shutdown(): joint_state.position = np.sin(time.time() - time_start) * (max_joints - min_joints) * 0.5 + default_joints pub.publish(joint_state) rate.sleep()
1,618
Python
29.547169
111
0.717553
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/isaac_tutorials/scripts/ros_service_client.py
#!/usr/bin/env python # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. import rospy import numpy as np from isaac_ros_messages.srv import IsaacPose from isaac_ros_messages.srv import IsaacPoseRequest from geometry_msgs.msg import Pose def teleport_client(msg): rospy.wait_for_service("teleport") try: teleport = rospy.ServiceProxy("teleport", IsaacPose) teleport(msg) return except rospy.ServiceException as e: print("Service call failed: %s" % e) # compose teleport messages cube_pose = Pose() cube_pose.position.x = np.random.uniform(-2, 2) cube_pose.position.y = 0 cube_pose.position.z = 0 cube_pose.orientation.w = 1 cube_pose.orientation.x = 0 cube_pose.orientation.y = 0 cube_pose.orientation.z = 0 cone_pose = Pose() cone_pose.position.x = 0 cone_pose.position.y = np.random.uniform(-2, 2) cone_pose.position.z = 0 cone_pose.orientation.w = 1 cone_pose.orientation.x = 0 cone_pose.orientation.y = 0 cone_pose.orientation.z = 0 teleport_msg = IsaacPoseRequest() teleport_msg.names = ["/World/Cube", "/World/Cone"] teleport_msg.poses = [cube_pose, cone_pose] teleport_client(teleport_msg)
1,530
Python
27.886792
76
0.744444
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/isaac_moveit/scripts/panda_combined_joints_publisher.py
#!/usr/bin/env python # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. import rospy from sensor_msgs.msg import JointState joints_dict = {} def joint_states_callback(message): joint_commands = JointState() joint_commands.header = message.header for i, name in enumerate(message.name): # Storing arm joint names and positions joints_dict[name] = message.position[i] if name == "panda_finger_joint1": # Adding additional panda_finger_joint2 state info (extra joint used in isaac sim) # panda_finger_joint2 mirrors panda_finger_joint1 joints_dict["panda_finger_joint2"] = message.position[i] joint_commands.name = joints_dict.keys() joint_commands.position = joints_dict.values() # Publishing combined message containing all arm and finger joints pub.publish(joint_commands) return if __name__ == "__main__": rospy.init_node("panda_combined_joints_publisher") pub = rospy.Publisher("/joint_command", JointState, queue_size=1) rospy.Subscriber("/joint_command_desired", JointState, joint_states_callback, queue_size=1) rospy.spin()
1,535
Python
30.346938
95
0.718567
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control_franka/src/interpolated_command_stream_controller.cpp
// Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. #include <iomanip> #include <iostream> #include <sstream> #include <controller_interface/controller_base.h> #include <cortex/math/interpolation/pos_vel_acc.h> #include <hardware_interface/hardware_interface.h> #include <pluginlib/class_list_macros.h> #include <ros/ros.h> #include <std_msgs/String.h> #include "cortex/control/builders.h" #include "cortex/util/ros_util.h" // TODO: verify has ExpandRosPkgRelPath() #include "cortex/control/franka/interpolated_command_stream_controller.h" namespace cortex { namespace control { namespace franka { bool InterpolatedCommandStreamController::init(hardware_interface::RobotHW *robot_hardware, ros::NodeHandle &node_handle) { // Initialize connection to the robot and obtain joint handles joint_interface_ = robot_hardware->get<hardware_interface::PositionJointInterface>(); if (joint_interface_ == nullptr) { ROS_ERROR("InterpolatedCommandStreamController: Error getting position joint " "interface from hardware!"); return false; } std::vector<std::string> joint_names; if (!node_handle.getParam("joint_names", joint_names)) { ROS_ERROR("InterpolatedCommandStreamController: Could not parse joint names"); } if (joint_names.size() != 7) { ROS_ERROR_STREAM("InterpolatedCommandStreamController: Wrong number of joint names, got" << joint_names.size() << " instead of 7 names!"); return false; } joint_handles_.resize(joint_names.size()); for (size_t i = 0; i < joint_names.size(); ++i) { try { joint_handles_[i] = joint_interface_->getHandle(joint_names[i]); } catch (hardware_interface::HardwareInterfaceException const &e) { ROS_ERROR_STREAM( "InterpolatedCommandStreamController: Exception getting joint handles: " << e.what()); return false; } } auto command_stream_interpolator_config = YAML::LoadFile( cortex::util::ExpandRosPkgRelPath("package://cortex_control_franka/config/command_stream_interpolator.yaml")); command_stream_interpolator_ = cortex::control::LoadCommandStreamInterpolatorFromYaml( command_stream_interpolator_config); return true; } void InterpolatedCommandStreamController::starting(ros::Time const &time) { initialize_blending_ = true; print_period_ = ros::Duration(1.); start_time_ = time; controller_time_ = time; next_print_time_ = time; command_stream_interpolator_->Start(); } Eigen::VectorXd InterpolatedCommandStreamController::current_position() const { Eigen::VectorXd q(joint_handles_.size()); for (size_t i = 0; i < joint_handles_.size(); ++i) { q[i] = joint_handles_[i].getPosition(); } return q; } void InterpolatedCommandStreamController::send_current_position() { send_position_command(current_position()); } void InterpolatedCommandStreamController::send_position_command(Eigen::VectorXd const &q) { for (size_t i = 0; i < joint_handles_.size(); ++i) { joint_handles_[i].setCommand(q[i]); } } void InterpolatedCommandStreamController::update(ros::Time const &time, ros::Duration const &period) { // Update time information. // // WARNING: This method of accumulation into a duration using the period // provided to the method is the only way of handling time // that works, all other options will result in the robot // producing motor noises during motion. controller_time_ += period; bool is_interpolator_active; send_position_command(command_stream_interpolator_->NextCommand( controller_time_, current_position(), &is_interpolator_active)); if (time >= next_print_time_) { std::cout << std::setprecision(10) << "[franka] time: " << (time - start_time_).toSec() << ", control_time: " << (controller_time_ - start_time_).toSec() << ", now: " << (ros::Time::now() - start_time_).toSec() << ", period: " << period.toSec() << std::endl; next_print_time_ += print_period_; } } } // namespace franka } // namespace control } // namespace cortex PLUGINLIB_EXPORT_CLASS(cortex::control::franka::InterpolatedCommandStreamController, controller_interface::ControllerBase)
4,712
C++
37.317073
116
0.686121
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control_franka/src/python/franka_gripper_commander.py
# Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # Simple action client interface to the gripper action server. from __future__ import absolute_import, division, print_function, unicode_literals from franka_gripper.msg import GraspAction, GraspGoal, GraspEpsilon, MoveAction, MoveGoal import numpy as np import rospy import actionlib import argparse # A gripper opening width of 0.8 appears full open, but Franka claims it will cause issues. # The nominal maximum opening width is 0.7. Here we compromise between the two. open_pos = 0.75 class FrankaGripperCommander(object): def __init__(self, verbose=False): self.verbose = verbose self.grasp_client = actionlib.SimpleActionClient("/franka_gripper/grasp", GraspAction) self.move_client = actionlib.SimpleActionClient("/franka_gripper/move", MoveAction) if self.verbose: print("Waiting for grasp client...") self.grasp_client.wait_for_server() if self.verbose: print("Waiting for move client...") self.move_client.wait_for_server() def close(self, width=0.0, speed=0.03, force=40.0, grasp_eps=(0.2, 0.2), wait=True): grasp_goal = GraspGoal() grasp_goal.width = width grasp_goal.speed = speed grasp_goal.force = force grasp_goal.epsilon = GraspEpsilon(inner=grasp_eps[0], outer=grasp_eps[1]) self.grasp_client.send_goal(grasp_goal) if wait: self.grasp_client.wait_for_result() if self.verbose: print("result:", self.grasp_client.get_result()) def move(self, width, speed=0.03, wait=True): move_goal = MoveGoal() move_goal.width = width move_goal.speed = speed print("sending goal") self.move_client.send_goal(move_goal) if wait: print("waiting for finish") self.move_client.wait_for_result() if self.verbose: print("result:", self.move_client.get_result()) print("move complete") def open(self, speed=0.03, wait=True): self.move(open_pos, speed=speed, wait=wait) if __name__ == "__main__": def Grasp(args): print("Grasping...") client = actionlib.SimpleActionClient("/franka_gripper/grasp", GraspAction) # Waits until the action server has started up and started # listening for goals. client.wait_for_server() # Creates a goal to send to the action server. grasp_goal = GraspGoal() grasp_goal.width = args.grasp_width grasp_goal.speed = args.speed grasp_goal.force = args.force grasp_goal.epsilon = GraspEpsilon(inner=args.eps_inner, outer=args.eps_outer) # Sends the goal to the action server. print(">>>>", grasp_goal) client.send_goal(grasp_goal) # Waits for the server to finish performing the action. client.wait_for_result() # Prints out the result of executing the action print("result:", client.get_result()) def Move(args): print("Moving...") client = actionlib.SimpleActionClient("/franka_gripper/move", MoveAction) # Waits until the action server has started up and started # listening for goals. client.wait_for_server() # Creates a goal to send to the action server. move_goal = GraspGoal() move_goal.width = args.width move_goal.speed = args.speed # Sends the goal to the action server. client.send_goal(move_goal) # Waits for the server to finish performing the action. client.wait_for_result() # Prints out the result of executing the action print("result:", client.get_result()) def FrankaGripperCommanderTest(args): print("Creating gripper commander...") gripper_commander = FrankaGripperCommander() print("Closing...") gripper_commander.close() print("Opening to all the way...") gripper_commander.move(0.08) print("Opening to .2...") gripper_commander.move(0.02) print("Opening to .5...") gripper_commander.move(0.05) print("Closing...") gripper_commander.close() print("Opening to all the way...") gripper_commander.move(0.08) def RobustnessTest(args): commander = FrankaGripperCommander() mode = "open" while not rospy.is_shutdown(): if mode == "open": commander.open(speed=0.2, wait=False) print("opening...") mode = "close" elif mode == "close": commander.close(speed=0.2, wait=False) print("closing...") mode = "open" else: raise RuntimeError("Invalid mode:", mode) wait_time = abs(np.random.normal(loc=0.5, scale=0.75)) print(" wait:", wait_time) rospy.sleep(wait_time) parser = argparse.ArgumentParser("gripper_test") parser.add_argument( "--mode", type=str, required=True, help="Which mode: close, move, gripper_commander_test, robustness_test." ) parser.add_argument( "--width", type=float, default=None, help="How wide in meters. Note that the gripper can open to about .8m wide.", ) parser.add_argument("--speed", type=float, default=0.03, help="How fast to go in meter per second.") parser.add_argument("--force", type=float, default=0.03, help="How strongly to grip.") parser.add_argument( "--grasp_width", type=float, default=0.0, help="Width of the grasp. Defaults to closing all the way. " "In conjunction with the default error (set wide) the default " "behavior is to just close until it feels something.", ) parser.add_argument( "--eps_inner", type=float, default=0.2, help="Inner epsilon threshold. Defaults to enabling any error." ) parser.add_argument( "--eps_outer", type=float, default=0.2, help="Outer epsilon threshold. Defaults to enabling any error." ) args = parser.parse_args() rospy.init_node("gripper_test") if args.mode == "move": Move(args) elif args.mode == "close": Grasp(args) elif args.mode == "gripper_commander_test": FrankaGripperCommanderTest(args) elif args.mode == "robustness_test": RobustnessTest(args) else: print("ERROR -- unrecognized mode:", args.mode)
6,912
Python
33.914141
115
0.624421
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control_franka/src/python/franka_gripper_command_relay.py
#!/usr/bin/python # Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # Simple action client interface to the gripper action server. from __future__ import print_function import argparse import json import threading import rospy from sensor_msgs.msg import JointState from std_msgs.msg import String from franka_gripper_commander import FrankaGripperCommander pinch_width = 0.0265 speed = 0.2 class SimGripperCommander(object): def __init__(self): pass def move(self, width, speed, wait=True): print("[move] width: %.4f, speed %.2f" % (width, speed)) def close(self, width=0.0, speed=0.03, force=40.0, grasp_eps=(0.2, 0.2), wait=True): print("[close] width: %.4f, speed: %.2f, force: %.2f" % (width, speed, force)) class FrankaGripperCommandRelay(object): def __init__(self, is_sim=False): print("Setting up gripper commander") self.is_sim = is_sim if self.is_sim: print("<is sim>") self.gripper_commander = SimGripperCommander() else: print("<is real>") self.gripper_commander = FrankaGripperCommander(verbose=True) self.start_time = rospy.Time.now() self.last_tick_time = self.start_time self.seconds_between_tick_prints = 0.1 self.command_queue = [] self.command_queue_lock = threading.Lock() print("Starting subscriber...") self.command_sub = rospy.Subscriber("/cortex/gripper/command", String, self.command_callback) print("<ready and listening>") def command_callback(self, msg): try: command = json.loads(msg.data) try: self.command_queue_lock.acquire() self.command_queue.append(command) finally: self.command_queue_lock.release() except ValueError as ve: print("Jsg parse error -- could not parse command:\n", msg.data) except Exception as e: print("Exception in processing command:", e) print("message data:\n", msg.data) def process_latest_commands(self): now = rospy.Time.now() if (now - self.last_tick_time).to_sec() >= self.seconds_between_tick_prints: self.last_tick_time = now try: self.command_queue_lock.acquire() command_queue = self.command_queue self.command_queue = [] finally: self.command_queue_lock.release() for command in command_queue: self.process_latest_command(command) def process_latest_command(self, cmd): try: print("\nprocessing command:", cmd["command"]) if cmd["command"] == "move_to": print("moving to:", cmd["width"]) self.gripper_commander.move(cmd["width"], speed=speed, wait=True) elif cmd["command"] == "close_to_grasp": print("closing to grasp") self.gripper_commander.close(speed=speed) else: print("WARNING -- unrecognized gripper command:", cmd["command"]) except Exception as e: print("ERROR processing command:\n", cmd) print("exception:", e) def run(self): rate = rospy.Rate(60.0) while not rospy.is_shutdown(): self.process_latest_commands() rate.sleep() if __name__ == "__main__": node_name = "franka_gripper_commander_relay" rospy.init_node(node_name) parser = argparse.ArgumentParser(node_name) parser.add_argument("--is_sim", action="store_true", help="Set to start in simulated env.") parser.add_argument("--open", action="store_true", help="Open the gripper then exit.") parser.add_argument("--close", action="store_true", help="Close the gripper then exit.") parser.add_argument("--close_pinch", action="store_true", help="Close the gripper then exit.") args = parser.parse_args() if args.open: gripper_commander = FrankaGripperCommander(verbose=True) gripper_commander.open(speed=speed) elif args.close: gripper_commander = FrankaGripperCommander(verbose=True) gripper_commander.close(speed=speed) elif args.close_pinch: gripper_commander = FrankaGripperCommander(verbose=True) gripper_commander.move(pinch_width, speed=speed, wait=True) else: listener = FrankaGripperCommandRelay(args.is_sim) listener.run()
4,852
Python
34.166666
101
0.628607
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control_franka/src/python/set_high_collision_thresholds.py
#!/usr/bin/env python # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # Simple action client interface to the gripper action server. import rospy from franka_control.srv import SetJointImpedance from franka_control.srv import SetJointImpedanceRequest from franka_control.srv import SetForceTorqueCollisionBehavior from franka_control.srv import SetForceTorqueCollisionBehaviorRequest rospy.init_node("set_control_parameters") force_torque_srv = "/franka_control/set_force_torque_collision_behavior" lower_torque_thresholds_nominal = [1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0] upper_torque_thresholds_nominal = [1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0] lower_force_thresholds_nominal = [1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0] upper_force_thresholds_nominal = [1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0] ft_req = SetForceTorqueCollisionBehaviorRequest() ft_req.lower_torque_thresholds_nominal = lower_torque_thresholds_nominal ft_req.upper_torque_thresholds_nominal = upper_torque_thresholds_nominal ft_req.lower_force_thresholds_nominal = lower_force_thresholds_nominal ft_req.upper_force_thresholds_nominal = upper_force_thresholds_nominal print(ft_req) rospy.loginfo("Waiting for services...") rospy.wait_for_service(force_torque_srv) rospy.loginfo("Services ready.") ft_srv = rospy.ServiceProxy(force_torque_srv, SetForceTorqueCollisionBehavior) resp = ft_srv(ft_req) failed = False if not resp.success: rospy.logerr("Could not set force torque collision behavior!") failed = True else: rospy.loginfo("Set force torque collision behavior!") if failed: raise RuntimeError("Failed to set control parameters")
2,062
Python
39.45098
90
0.78613
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control_franka/include/cortex/control/franka/interpolated_command_stream_controller.h
// Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. #pragma once #include <memory> #include <mutex> #include <vector> #include <controller_interface/multi_interface_controller.h> #include <cortex/control/command_stream_interpolator.h> #include <cortex_control/JointPosVelAccCommand.h> #include <hardware_interface/joint_command_interface.h> #include <hardware_interface/robot_hw.h> #include <ros/node_handle.h> #include <ros/time.h> namespace cortex { namespace control { namespace franka { /** * \brief Joint position controller using cortex rmp control commands. * * This controller forwards with interpolation the received cortex * control commands to the robot's joint interface. */ class InterpolatedCommandStreamController : public controller_interface::MultiInterfaceController< hardware_interface::PositionJointInterface> { public: /** * \brief Initializes the controller. * * \param robot_hardware handle to the robot's hardware abstraction * \param node_handle node handle instance */ bool init(hardware_interface::RobotHW *robot_hardware, ros::NodeHandle &node_handle) override; /** * \brief Initialization of the controller upon activation. * * \param time time at which the controller was activated */ void starting(ros::Time const &time) override; /** * \brief Control update loop execution. * * \param time current time * \param period time elapsed since last call */ void update(ros::Time const &time, ros::Duration const &period) override; private: /** * \brief Retrieves the current position from the joint handles. */ Eigen::VectorXd current_position() const; /** * \brief Sends the robot's current pose to the robot. */ void send_current_position(); /** * \brief Sends the defined position to the robot's joints. * * \param q joint position to be sent to the robot */ void send_position_command(const Eigen::VectorXd &q); private: std::shared_ptr<cortex::control::CommandStreamInterpolator> command_stream_interpolator_; bool initialize_blending_; ros::Time controller_time_; ros::Time start_time_; hardware_interface::PositionJointInterface *joint_interface_; std::vector<hardware_interface::JointHandle> joint_handles_; ros::Duration print_period_; ros::Time next_print_time_; }; } // namespace franka } // namespace control } // namespace cortex
2,800
C
28.797872
96
0.734286
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control_franka/config/controller.yaml
joint_position_controller: type: cortex_control_franka/InterpolatedCommandStreamController joint_names: - panda_joint1 - panda_joint2 - panda_joint3 - panda_joint4 - panda_joint5 - panda_joint6 - panda_joint7
273
YAML
23.909089
67
0.615385
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/cortex_control_franka/config/command_stream_interpolator.yaml
params: interpolation_delay: .2 use_smoothing_interpolator: true blending_duration: 2. backend_timeout: .5 ros_topics: joint_state: /robot/joint_state rmpflow_commands: command: /cortex/arm/command ack: /cortex/arm/command/ack suppress: /cortex/arm/command/suppress interpolated: /cortex/arm/command/interpolated
370
YAML
27.538459
54
0.678378
NVIDIA-Omniverse/IsaacSim-ros_workspaces/noetic_ws/src/isaac_vins/config/isaac_a1/vins_fusion_isaac_a1.yaml
%YAML:1.0 #common parameters #support: 1 imu 1 cam; 1 imu 2 cam: 2 cam; imu: 0 num_of_cam: 2 imu_topic: "/isaac_a1/imu_data" image0_topic: "/isaac_a1/camera_forward/camera_left/rgb" image1_topic: "/isaac_a1/camera_forward/camera_right/rgb" output_path: "~/output" cam0_calib: "isaac_left.yaml" cam1_calib: "isaac_right.yaml" image_width: 640 image_height: 480 # Extrinsic parameter between IMU and Camera. estimate_extrinsic: 1 # 0 Have an accurate extrinsic parameters. We will trust the following imu^R_cam, imu^T_cam, don't change it. # 1 Have an initial guess about extrinsic parameters. We will optimize around your initial guess. body_T_cam0: !!opencv-matrix rows: 4 cols: 4 dt: d data: [ 0, 0, 1, 0.2693, -1, 0, 0, 0.025, 0, -1, 0, 0.067, 0., 0., 0., 1. ] body_T_cam1: !!opencv-matrix rows: 4 cols: 4 dt: d data: [ 0, 0, 1, 0.2693, -1, 0, 0, -0.025, 0, -1, 0, 0.067, 0., 0., 0., 1. ] #Multiple thread support multiple_thread: 0 #feature traker paprameters max_cnt: 150 # max feature number in feature tracking min_dist: 10 # min distance between two features freq: 15 # frequence (Hz) of publish tracking result. At least 10Hz for good estimation. If set 0, the frequence will be same as raw image F_threshold: 1.0 # ransac threshold (pixel) show_track: 1 # publish tracking image as topic flow_back: 1 # perform forward and backward optical flow to improve feature tracking accuracy #optimization parameters max_solver_time: 0.04 # max solver itration time (ms), to guarantee real time max_num_iterations: 8 # max solver itrations, to guarantee real time keyframe_parallax: 10.0 # keyframe selection threshold (pixel) #imu parameters The more accurate parameters you provide, the better performance acc_n: 0.5 # accelerometer measurement noise standard deviation. #0.2 0.04 gyr_n: 0.1 # gyroscope measurement noise standard deviation. #0.05 0.004 acc_w: 0.001 # accelerometer bias random work noise standard deviation. #0.002 gyr_w: 0.0001 # gyroscope bias random work noise standard deviation. #4.0e-5 g_norm: 9.805 # gravity magnitude #unsynchronization parameters estimate_td: 0 # online estimate time offset between camera and imu td: 0.0 # initial value of time offset. unit: s. readed image clock + td = real image clock (IMU clock) #loop closure parameters load_previous_pose_graph: 0 # load and reuse previous pose graph; load from 'pose_graph_save_path' pose_graph_save_path: "~/output/pose_graph/" # save and load path save_image: 0 # save image in pose graph for visualization prupose; you can close this function by setting 0
2,909
YAML
39.416666
154
0.649708