file_path
stringlengths 20
207
| content
stringlengths 5
3.85M
| size
int64 5
3.85M
| lang
stringclasses 9
values | avg_line_length
float64 1.33
100
| max_line_length
int64 4
993
| alphanum_fraction
float64 0.26
0.93
|
---|---|---|---|---|---|---|
AshisGhosh/roboai/model_server/model_server/hf_mxbai_embed.py | from sentence_transformers import SentenceTransformer
class HuggingFaceMXBaiEmbedLarge:
def __init__(self):
self.model = SentenceTransformer("mixedbread-ai/mxbai-embed-large-v1")
def embed(self, text):
return self.model.encode(text).tolist()
| 269 | Python | 25.999997 | 78 | 0.724907 |
AshisGhosh/roboai/model_server/app/main.py | #!/usr/bin/python -u
import io
import logging
from PIL import Image
from fastapi import FastAPI, File, UploadFile
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import JSONResponse
from model_server.hf_moondream2 import HuggingFaceMoonDream2
from model_server.hf_mxbai_embed import HuggingFaceMXBaiEmbedLarge
moondream = HuggingFaceMoonDream2()
mxbai_embed = HuggingFaceMXBaiEmbedLarge()
logging.basicConfig(level=logging.INFO)
# Create FastAPI instance
app = FastAPI()
# List of allowed origins (you can use '*' to allow all origins)
origins = [
"http://localhost:3000", # Allow your Next.js app
# Add any other origins as needed
]
# Add CORSMiddleware to the application
app.add_middleware(
CORSMiddleware,
allow_origins=origins, # List of allowed origins
allow_credentials=True,
allow_methods=["*"], # Allow all methods
allow_headers=["*"], # Allow all headers
)
# Example route
@app.get("/")
async def read_root():
return {"message": "Hello, FastAPI! This is the model server."}
@app.post("/answer_question")
async def answer_question(file: UploadFile = File(...), question: str = ""):
# Read the image file
image_bytes = await file.read()
# Convert bytes to a file-like object
image_stream = io.BytesIO(image_bytes)
# Use PIL to open the image
image = Image.open(image_stream)
# Perform object detection
result = moondream.answer_question_from_image(image, question)
# Return the result
return JSONResponse(content={"result": result})
@app.post("/embed")
async def embed(text: str = ""):
# Perform embedding
result = mxbai_embed.embed(text)
# Return the result
return JSONResponse(content={"embedding": result})
| 1,753 | Python | 25.179104 | 76 | 0.715345 |
AshisGhosh/roboai/robosuite/robosim/pyproject.toml | [tool.poetry]
name = "robosim"
version = "0.1.0"
description = ""
authors = ["AshisGhosh <[email protected]>"]
readme = "README.md"
[tool.poetry.dependencies]
python = "^3.8"
fastapi = "^0.110.0"
uvicorn = "^0.29.0"
python-multipart = "^0.0.9"
h5py = "^3.10.0"
[build-system]
requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api"
| 350 | TOML | 17.473683 | 46 | 0.657143 |
AshisGhosh/roboai/robosuite/robosim/robosim/camera.py | import numpy as np
from robosuite.utils.camera_utils import (
get_camera_intrinsic_matrix,
get_camera_extrinsic_matrix,
get_camera_transform_matrix,
get_real_depth_map,
transform_from_pixels_to_world,
)
import logging
log = logging.getLogger("robosim robot camera")
log.setLevel(logging.DEBUG)
class Camera:
def __init__(self, env, name, camera_height=480, camera_width=640):
self.env = env
self.name = name
self.camera_height = camera_height
self.camera_width = camera_width
log.debug(f"Getting intrinsic matrix for {name}")
self.intrinsic_matrix = get_camera_intrinsic_matrix(
env.sim, name, camera_height, camera_width
)
log.debug(f"Getting extrinsic matrix for {name}")
self.extrinsic_matrix = get_camera_extrinsic_matrix(env.sim, name)
log.debug(f"Getting transform matrix for {name}")
self.transform_matrix = get_camera_transform_matrix(
env.sim, name, camera_height, camera_width
)
log.debug(f"Getting camera to world transform for {name}")
self.camera_to_world_transform = np.linalg.inv(self.transform_matrix)
log.debug(f"Camera initialized for {name}")
def get_world_coords_from_pixels(self, pixels, depth):
# cv2.imshow("Depth", depth)
# cv2.waitKey(0)
log.debug(
f"Getting world coordinates from pixels {pixels} and depth {depth.shape}"
)
real_depth_map = get_real_depth_map(self.env.sim, depth)
log.debug(f"Real depth map: {real_depth_map.shape}")
log.debug(
f"pixels leading shape: depth map leading shape -- {pixels.shape[:-1]} -- {real_depth_map.shape[:-3]}"
)
return transform_from_pixels_to_world(
pixels, real_depth_map, self.camera_to_world_transform
)
def pixel_to_world(self, pixel):
depth = self.env._get_observations()["robot0_eye_in_hand_depth"][::-1]
return self.get_world_coords_from_pixels(np.array(pixel), depth)
| 2,055 | Python | 37.074073 | 114 | 0.640876 |
AshisGhosh/roboai/robosuite/robosim/robosim/grasp_handler.py | import base64
import numpy as np
from PIL import Image
import cv2
from enum import Enum
from robosim.camera import Camera
from shared.utils.grasp_client import _check_server, _get_grasp_from_image
from shared.utils.robotic_grasping_client import _get_grasps_from_rgb_and_depth
import shared.utils.llm_utils as llm_utils
import logging
log = logging.getLogger("robosim robot grasp")
log.setLevel(logging.DEBUG)
class GraspMethod(Enum):
GRASP_DET_SEG = "grasp_det_seg"
GR_CONVNET = "gr_convnet"
class Grasp:
def __init__(
self, r_bbox, image, depth, env, bbox=None, cls=None, cls_name=None, score=None
):
log.debug("Initializing Grasp object.")
self.r_bbox = r_bbox
self.image = image
self.depth = depth
self.env = env
log.debug("Initializing camera")
self.camera = Camera(self.env, "robot0_eye_in_hand")
self.cls = cls
self.cls_name = cls_name
self.score = score
self.bbox = bbox
self.appoach_poses = []
self.grasp_pose = None
self.retract_poses = []
log.debug(f"Generated grasp for {self}")
def generate_grasp_sequence(self):
log.info(f"Generating grasp sequence for {self}")
self.grasp_pose = self.get_grasp_pose_from_r_bbox()
return self.appoach_poses, self.grasp_pose, self.retract_poses
def get_grasp_pose_from_r_bbox(self):
# Get the center of the bounding box
log.debug(f"Getting grasp pose from r_bbox: {self.r_bbox}")
# pixels work in y, x not x, y
center = (
int(np.mean([coord[1] for coord in self.r_bbox])),
int(np.mean([coord[0] for coord in self.r_bbox])),
)
log.debug(f"Center of the bounding box: {center}")
# Get the world coordinates of the center
log.debug(f"{np.array(center).shape} -- {np.array(self.depth).shape}")
world_coords = self.camera.get_world_coords_from_pixels(
np.array(center), np.array(self.depth)
)
log.debug(f"World coordinates of the center: {world_coords}")
self.grasp_postion = world_coords
# Get grasp orientation
# Get the angle from the bounding box
pt1 = self.r_bbox[0]
pt2 = self.r_bbox[1]
angle = np.arctan2(pt2[1] - pt1[1], pt2[0] - pt1[0]) + np.pi / 2
self.grasp_orientation = angle
log.debug(f"Grasp orientation: {angle}")
return world_coords, angle
def __str__(self):
return f"Grasp: {self.cls_name} with score {self.score} at r_bbox {self.r_bbox}"
class GraspHandler:
def __init__(self, robot):
self.robot = robot
self.env = robot.robosim.env
async def get_grasps_from_image(self, image: Image, visualize=True):
res = await _get_grasp_from_image(image)
if visualize:
self.show_image(res["image"])
return res["result"]
async def get_grasp_image(self) -> Image:
# turn off marker visualization
markers = [
"gripper0_grip_site",
"gripper0_grip_site_cylinder",
"gripper_goal",
"grasp_marker",
]
for marker in markers:
self.env.sim.model.site_rgba[self.env.sim.model.site_name2id(marker)][3] = 0
im = self.env._get_observations()["robot0_eye_in_hand_image"]
img = Image.fromarray(im[::-1])
# turn on marker visualization
for marker in markers:
self.env.sim.model.site_rgba[self.env.sim.model.site_name2id(marker)][3] = (
0.25
)
return img
async def get_grasp_image_and_depth(self):
# turn off marker visualization
markers = [
"gripper0_grip_site",
"gripper0_grip_site_cylinder",
"gripper_goal",
"grasp_marker",
]
for marker in markers:
self.env.sim.model.site_rgba[
self.robot.robosim.env.sim.model.site_name2id(marker)
][3] = 0
self.env.step(np.zeros(self.env.action_dim))
im = self.env._get_observations()
img = Image.fromarray(im["robot0_eye_in_hand_image"][::-1])
depth = im["robot0_eye_in_hand_depth"][::-1]
# turn on marker visualization
for marker in markers:
self.env.sim.model.site_rgba[self.env.sim.model.site_name2id(marker)][3] = (
0.25
)
return img, depth
async def get_grasp_image_and_depth_image(self):
img, depth = await self.get_grasp_image_and_depth()
squeezed_depth = np.squeeze(depth)
normalized_depth = (
(squeezed_depth - np.min(squeezed_depth))
/ (np.max(squeezed_depth) - np.min(squeezed_depth))
* 255
)
depth_uint8 = normalized_depth.astype(np.uint8)
depth_image = Image.fromarray(depth_uint8)
return img, depth_image, depth
async def get_grasp(self, obj_name, method=GraspMethod.GRASP_DET_SEG):
if method == GraspMethod.GRASP_DET_SEG:
log.debug("Getting grasp from grasp_det_seg...")
return await self.get_grasp_grasp_det_seg(obj_name)
elif method == GraspMethod.GR_CONVNET:
log.debug("Getting grasp from grasp convnet...")
return await self.get_grasp_gr_convnet(obj_name)
else:
raise ValueError(f"Invalid grasp method: {method}")
async def get_grasp_grasp_det_seg(self, obj_name):
# return await self.get_grasps()
log.debug("Getting grasp image and depth...")
img, depth = await self.get_grasp_image_and_depth()
log.debug("Getting grasp from image...")
grasps = await self.get_grasps_from_image(img)
if len(grasps) == 0:
log.error("No grasps found.")
return None
candidate_objs = [obj["cls_name"].replace("_", " ") for obj in grasps]
log.info(f"Getting closest object to '{obj_name}' from {candidate_objs}")
closest_obj = await llm_utils.get_closest_text(obj_name, candidate_objs)
log.info(f"Closest object: {closest_obj}")
grasp = grasps[candidate_objs.index(closest_obj)]
g_obj = Grasp(
cls=grasp["cls"],
cls_name=grasp["cls_name"],
score=grasp["obj"],
bbox=grasp["bbox"],
r_bbox=grasp["r_bbox"],
image=img,
depth=depth,
env=self.robot.robosim.env,
)
return grasp, g_obj.generate_grasp_sequence()
async def get_grasp_gr_convnet(self, obj_name):
log.debug("Getting grasp image and depth...")
img, depth_image, depth = await self.get_grasp_image_and_depth_image()
log.debug("Getting grasp from image...")
grasps = await _get_grasps_from_rgb_and_depth(img, depth_image)
grasp = grasps[0]
log.debug(f"r_bbox: {grasp['r_bbox']}")
g_obj = Grasp(
cls=None,
cls_name=None,
score=None,
bbox=None,
r_bbox=grasp["r_bbox"],
image=img,
depth=depth,
env=self.robot.robosim.env,
)
return grasp, g_obj.generate_grasp_sequence()
async def check_server(self):
return await _check_server()
def show_image(self, base64_image):
image_bytes = base64.b64decode(base64_image)
nparr = np.frombuffer(image_bytes, np.uint8)
image = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
# Display the image using OpenCV
cv2.imshow("Image", image)
cv2.waitKey(0)
cv2.destroyAllWindows()
| 7,655 | Python | 33.486486 | 88 | 0.585761 |
AshisGhosh/roboai/robosuite/robosim/robosim/robosim.py | import numpy as np
from dataclasses import dataclass
from enum import Enum
import asyncio
import copy
from PIL import Image
import httpx
import robosuite as suite
from robosuite import load_controller_config
from robosuite.wrappers import VisualizationWrapper
from robosuite.utils.transform_utils import mat2quat, euler2mat
from robosuite.utils.camera_utils import CameraMover
from robosuite.utils.mjcf_utils import new_body, new_site
from robosim.task import TaskFactory, TaskClass, TaskStatus
from robosim.robot import Robot
from robosim.grasp_handler import Camera
import shared.utils.gradio_client as gradio
from shared.utils.model_server_client import _answer_question_from_image
import shared.utils.replicate_client as replicate # noqa: F401
import logging
log = logging.getLogger("robosim")
log.setLevel(logging.INFO)
class ControllerType(Enum):
JOINT_VELOCITY = 1
OSC_POSE = 2
@dataclass
class OSCControlStep:
dx: float = 0
dy: float = 0
dz: float = 0
droll: float = 0
dpitch: float = 0
dyaw: float = 0
gripper: float = 0
def to_list(self):
return [
self.dx,
self.dy,
self.dz,
self.droll,
self.dpitch,
self.dyaw,
self.gripper,
]
class RoboSim:
def __init__(self, controller_type=ControllerType.OSC_POSE):
self.controller_type = controller_type
self.env = None
self.task_factory = TaskFactory()
self.tasks = []
self.current_task = None
self._last_task = None
self._last_task_finish_status = None
self.render_task = None
self.execute_async_task = None
self.__close_renderer_flag = asyncio.Event()
self.__executing_async = asyncio.Event()
self.__pause_execution = asyncio.Event()
self.__stop_execution = asyncio.Event()
self.__getting_image = asyncio.Event()
self.__getting_image_ts = None
def setup(self):
self.env = self.setup_env()
self.setup_markers()
self.setup_cameras()
self.robot = Robot(self)
self.register_tasks()
# self.test_action([0,0,0,0,0,0,0,0])
def register_tasks(self):
self.task_factory = TaskFactory()
self.task_factory.register_task(self.robot.go_to_position)
self.task_factory.register_task(self.robot.go_to_relative_position)
self.task_factory.register_task(self.robot.go_to_orientation)
self.task_factory.register_task(self.robot.go_to_pick_center)
self.task_factory.register_task(self.robot.go_to_object)
self.task_factory.register_task(self.robot.get_grasp, TaskClass.DATA_TASK)
self.task_factory.register_task(self.robot.go_to_grasp_orientation)
self.task_factory.register_task(self.robot.go_to_grasp_position)
self.task_factory.register_task(self.robot.go_to_pose)
self.task_factory.register_task(self.robot.go_to_pre_grasp)
self.task_factory.register_task(self.add_grasp_marker, TaskClass.DATA_TASK)
self.task_factory.register_task(self.robot.close_gripper)
self.task_factory.register_task(self.robot.open_gripper)
self.task_factory.register_task(self.robot.go_to_drop)
def setup_env(self):
config = load_controller_config(
default_controller=self.controller_type.name
) # load default controller config
# create environment instance
env = suite.make(
env_name="PickPlace", # try with other tasks like "Stack" and "Door"
robots="Panda", # try with other robots like "Sawyer" and "Jaco"
gripper_types="default",
controller_configs=config,
control_freq=20,
has_renderer=True,
render_camera="frontview",
# render_camera="robot0_eye_in_hand",
camera_names=["frontview", "agentview", "robot0_eye_in_hand"],
camera_heights=[672, 672, 480],
camera_widths=[672, 672, 640],
camera_depths=[False, False, True], # set to true for using depth sensor
has_offscreen_renderer=True,
use_object_obs=False,
use_camera_obs=True,
)
# reset the environment
env.reset()
return env
def setup_cameras(self):
self.camera_mover = CameraMover(self.env, "agentview")
self.camera_mover.set_camera_pose(pos=[0.65, -0.25, 1.4])
log.info(f"Camera Pose: {self.camera_mover.get_camera_pose()}")
self.env.sim.forward()
self.env.sim.step()
self.env.step(np.zeros(self.env.action_dim))
def setup_markers(self):
self.markers = []
# self.add_marker([0.5, 0, 1.0], size=0.3, name="indicator_ball")
self.add_marker(
[0.5, 0, 2.0], size=0.05, name="grasp_marker", rgba=[0, 0, 1, 0.65]
)
self.add_marker(
[0.5, 0, 1.0], type="box", size=(0.03, 0.05, 0.1), name="gripper_goal"
)
def test_action(self, action, *args):
obs, reward, done, info = self.env.step(action)
def add_marker(
self, pos, type="sphere", size=0.03, name="indicator_ball", rgba=[1, 0, 0, 0.65]
):
indicator_config = {
"name": name,
"type": type,
"size": size,
"rgba": rgba,
"pos": pos,
}
self.markers.append(indicator_config)
self.env = VisualizationWrapper(self.env, self.markers)
self.env.sim.forward()
self.env.sim.step()
self.env.set_xml_processor(processor=None)
def _add_indicators(self, xml):
import xml.etree.ElementTree as ET
root = ET.fromstring(xml)
worldbody = root.find("worldbody")
for indicator_config in self.markers:
config = copy.deepcopy(indicator_config)
indicator_body = new_body(
name=config["name"] + "_body", pos=config.pop("pos", (0, 0, 0))
)
indicator_body.append(new_site(**config))
worldbody.append(indicator_body)
xml = ET.tostring(root, encoding="utf8").decode("utf8")
return xml
async def add_grasp_marker(self, *args):
grasp_pos = self.robot.get_grasp_pose()[0]
self.add_marker(grasp_pos, name="grasp_marker")
self.env.render()
return f"Marker added at {grasp_pos}."
def reset(self):
self.env.reset()
self.setup_markers()
self.setup_cameras()
def move_gripper_goal_to_gripper(self):
gripper_pos = self.robot.get_gripper_position()
gripper_ori = mat2quat(self.robot.get_gripper_orientation())
self.move_marker(gripper_pos, gripper_ori, name="gripper_goal")
return f"Marker moved to gripper position: {gripper_pos}."
def move_marker(self, position=None, orientation=None, name="grasp_marker", *args):
if position is None and orientation is None:
raise ValueError("Either position or orientation must be provided.")
if position is not None:
self.env.sim.model.body_pos[
self.env.sim.model.body_name2id(name + "_body")
] = position
if orientation is not None:
if len(orientation) == 3:
base_orientation = np.array([np.pi, 0, np.pi / 2])
o = copy.deepcopy(orientation)
o = np.array(o) - base_orientation
orientation = base_orientation + [-o[1], o[2], -o[0]]
orientation = euler2mat(orientation)
orientation = mat2quat(orientation)
self.env.sim.model.body_quat[
self.env.sim.model.body_name2id(name + "_body")
] = orientation
self.env.sim.forward()
self.env.sim.step()
self.env.render()
resp = f"Marker {name} moved to {position} with orientation {orientation}."
log.debug(resp)
return resp
def pixel_to_marker(self, pixel, camera_name="robot0_eye_in_hand"):
if camera_name != "robot0_eye_in_hand":
raise NotImplementedError(
"pixel_to_marker only supports robot0_eye_in_hand currently."
)
camera = Camera(self.env, camera_name)
marker_pose = camera.pixel_to_world(pixel)
log.debug(f"Marker Pose: {marker_pose}")
self.move_marker(marker_pose)
return str(marker_pose)
def start(self):
log.info("Starting Robosuite Simulation...")
# self.env.reset()
self.reset()
self.env.render()
action = None
for i in range(1000):
action = self.check_for_action()
if action is None:
action = OSCControlStep().to_list()
obs, reward, done, info = self.env.step(
action
) # take action in the environment
self.env.render() # render on display
async def start_async(self):
if self.render_task is None or self.render_task.done():
self.__close_renderer_flag.clear()
# self.env.reset()
self.reset()
self.render_task = asyncio.create_task(self.render())
return True
async def render(self):
hz = 5
while not self.__close_renderer_flag.is_set(): # Use the Event for checking
if not self.__executing_async.is_set():
self.env.render()
await asyncio.sleep(1 / hz)
async def close_renderer(self):
self.__close_renderer_flag.set()
if self.render_task and not self.render_task.done():
await self.render_task
self.env.close_renderer()
return True
async def start_execution(self):
self.execute_async_task = asyncio.create_task(self.execute_async())
return True
async def execute_async(self):
if not self.render_task or self.render_task.done():
await self.start_async()
self.__pause_execution.clear()
self.__executing_async.set()
while self.tasks or self.current_task:
if self.__stop_execution.is_set():
self.__executing_async.clear()
return "Execution stopped."
if self.__pause_execution.is_set():
await self.manage_execution_delay()
continue
action = await self.check_for_action()
if action is None:
action = OSCControlStep().to_list()
obs, reward, done, info = self.env.step(action)
if self.__getting_image.is_set():
continue
else:
self.env.render()
await self.manage_execution_delay()
self.__executing_async.clear()
return "All tasks executed."
async def manage_execution_delay(self):
delay = 0.0
if self.__getting_image.is_set():
delay = 0.1
else:
if self.__getting_image_ts is not None:
current_time = asyncio.get_event_loop().time()
if current_time - self.__getting_image_ts < 1:
delay = 0.1
else:
self.__getting_image_ts = None
await asyncio.sleep(delay)
async def stop_execution(self):
log.info("Stopping execution...")
self.__stop_execution.set()
return True
async def pause_execution(self):
log.info("Pausing execution...")
self.__pause_execution.set()
return True
async def resume_execution(self):
log.info("Resuming execution...")
self.__pause_execution.clear()
self.__executing_async.set()
return True
async def check_for_action(self):
"""
Check if there is a task in the queue. If there is, execute it.
"""
if self.current_task is None and self.tasks:
self.current_task = self.tasks.pop(0)
log.info(f"Current Task: {self.current_task.name}")
if self.current_task:
if self.current_task.task_class != TaskClass.CONTROL_TASK:
log.info(f"Executing Task: {self.current_task.name}")
data = await self.current_task.execute()
log.info(f"Data: {data}")
if data is None:
self.finish_current_task(
status=TaskStatus.FAILED, status_msg="Task failed."
)
self.finish_current_task()
return OSCControlStep().to_list()
return await self.do_current_task()
return None
async def do_current_task(self):
"""
Execute the current task in the queue.
"""
action = self.current_task.execute()
log.debug(f"Action: {action}")
self.check_if_task_finished(action)
return action
def check_if_task_finished(self, action):
if action == OSCControlStep().to_list():
self.finish_current_task()
if not self.robot.is_gripper_moving(action):
self.finish_current_task(
status=TaskStatus.FAILED, status_msg="Gripper not moving."
)
def finish_current_task(self, status=TaskStatus.COMPLETED, status_msg=None):
"""
Finish the current task in the queue.
"""
log.info(
f"Task finished: {self.current_task.name} with status {status} and message {status_msg}."
)
self._last_task = self.current_task
self._last_task_finish_status = {"status": status, "status_msg": status_msg}
self.current_task = None
self.robot.__goal_position = None
self.robot.__goal_orientation = None
def add_task(self, name, function, *args, **kwargs):
"""
Add a task to the queue.
"""
task = self.task_factory.create_task(function, name, *args, **kwargs)
self.tasks.append(task)
log.info(f"Task added: {task}")
def get_tasks(self):
return self.tasks
def clear_tasks(self):
self.tasks = []
if self.current_task:
self.finish_current_task()
async def get_image_realtime(
self, camera_name="agentview", width=512, height=512
) -> Image:
self.__getting_image.set()
self.__getting_image_ts = asyncio.get_event_loop().time()
im = self.env.sim.render(width=width, height=height, camera_name=camera_name)
img = Image.fromarray(im[::-1])
self.__getting_image.clear()
return img
async def get_image(self, camera_name="agentview") -> Image:
markers = [
"gripper0_grip_site",
"gripper0_grip_site_cylinder",
"gripper_goal",
"grasp_marker",
]
for marker in markers:
self.env.sim.model.site_rgba[self.env.sim.model.site_name2id(marker)][3] = 0
self.env.step(np.zeros(self.env.action_dim))
im = self.env._get_observations()[camera_name + "_image"]
img = Image.fromarray(im[::-1])
# turn on marker visualization
for marker in markers:
self.env.sim.model.site_rgba[self.env.sim.model.site_name2id(marker)][3] = (
0.25
)
return img
async def get_image_with_markers(self, camera_name="agentview") -> Image:
self.env.step(np.zeros(self.env.action_dim))
im = self.env._get_observations()[camera_name + "_image"]
img = Image.fromarray(im[::-1])
return img
def get_object_names(self):
return [obj.name for obj in self.env.objects]
def get_object_pose(self):
for obj in self.env.objects:
dist = self.env._gripper_to_target(
gripper=self.env.robots[0].gripper,
target=obj.root_body,
target_type="body",
return_distance=True,
)
log.info(f"Object {obj.name}: {dist}")
async def pick(self, object_name):
self.clear_tasks()
await self.start_async()
# self.add_task("go to object", "go_to_object", object_name)
self.add_task("go to pick center", "go_to_pick_center", "")
self.add_task("get grasp", "get_grasp", object_name)
await self.execute_async()
if self._last_task_finish_status["status"] == TaskStatus.FAILED:
retry_attempts = 3
for i in range(retry_attempts):
log.info(f"Retrying pick attempt {i+1}...")
self.add_task("go to pick center", "go_to_pick_center", "")
self.add_task("get grasp", "get_grasp", object_name)
if self._last_task_finish_status["status"] == TaskStatus.COMPLETED:
break
success, _ = await self.get_feedback("grasp-selection-feedback", object_name)
if not success:
log.info("Grasp selection feedback failed.")
return False
self.add_task("move to pre-grasp", "go_to_pre_grasp", "")
self.add_task("open gripper", "open_gripper", "")
self.add_task("go to grasp position", "go_to_grasp_position", "")
self.add_task("close gripper", "close_gripper", "")
await self.execute_async()
if self._last_task_finish_status["status"] != TaskStatus.COMPLETED:
log.info("Pick failed.")
return False
success, _ = await self.get_feedback("grasp-feedback", object_name)
async def get_feedback(self, feedback_type, object_name):
if feedback_type == "grasp-selection-feedback":
image = await self.get_image_with_markers()
question = f"Is the the blue sphere marker over the {object_name}?"
elif feedback_type == "grasp-feedback":
image = await self.get_image()
question = f"Is the object {object_name} grasped by the robot?"
log.info(f"Giving feedback for {feedback_type}...")
log.info(f"Question: {question}")
# output = await _answer_question_from_image(image, question)
try:
# output = gradio.moondream_answer_question_from_image(image, question)
# output = replicate.moondream_answer_question_from_image(image, question)
output = gradio.qwen_vl_max_answer_question_from_image(image, question)
except httpx.ConnectError as e:
log.error(f"Error connecting to the model server: {e}")
output = await _answer_question_from_image(image, question)
log.warn(output)
if "yes" in output["result"].lower():
return True, output
return False, output
if __name__ == "__main__":
sim = RoboSim()
sim.setup()
available_tasks = sim.task_factory.get_task_types()
log.info(f"Available Tasks: {available_tasks}")
sim.add_task("Position Check", "go_to_position", [-0.3, -0.3, 1])
sim.add_task("Relative Position Check", "go_to_relative_position", [0.3, 0.1, 0.1])
sim.add_task("Go to can", "go_to_object", "Can")
sim.start()
| 19,083 | Python | 35.489484 | 101 | 0.584394 |
AshisGhosh/roboai/robosuite/robosim/robosim/robot.py | import numpy as np
import copy
from robosim.grasp_handler import GraspHandler
from robosuite.utils.transform_utils import euler2mat, mat2euler, mat2quat
from robosuite.utils.sim_utils import get_contacts
import logging
logging.basicConfig(level=logging.WARN)
log = logging.getLogger("robosim robot")
log.setLevel(logging.INFO)
class Robot:
def __init__(self, robosim):
self.robosim = robosim
self.env = robosim.env
self.grasp = GraspHandler(self)
self._last_position = None
self._last_orientation = None
self.__goal_position = None
self.__goal_orientation = None
self.__grasp_sequence = None
self.__gripper_contact_started = None
def go_to_position(self, position):
if self.__goal_position is None:
self.__goal_position = position
marker_orientation = (
self.__goal_orientation
if self.__goal_orientation is not None
else self.get_gripper_orientation_in_world_as_euler()
)
self.robosim.move_marker(
name="gripper_goal",
position=self.__goal_position,
orientation=marker_orientation,
)
if len(position) != 3:
raise ValueError("Position must be a 3D point.")
dist = self.distance_to_position(position)
# log.debug(f"Distance: {dist}")
action = self.simple_velocity_control(dist)
if action[:-1] == [0, 0, 0, 0, 0, 0]:
self.__goal_position = None
return action
def go_to_relative_position(self, position, frame="gripper"):
if len(position) != 3:
raise ValueError("Position must be a 3D point.")
if frame != "gripper":
raise NotImplementedError("Only gripper frame is supported for now.")
if self.__goal_position is None:
self.__goal_position = self.get_gripper_position() + np.array(position)
self.robosim.move_marker(
name="gripper_goal",
position=self.__goal_position,
orientation=mat2quat(self.get_gripper_orientation_in_world()),
)
dist = self.distance_to_position(self.__goal_position)
return self.simple_velocity_control(dist)
def go_to_pick_center(self, *args):
return self.go_to_pose(pose=[-0.02, -0.27, 1.1, 0, 0, 0])
def go_to_drop(self, *args):
return self.go_to_pose(pose=[0.1, -0.57, 1.1, 0, 0, 0])
def go_to_orientation(self, orientation, roll_only=False):
if len(orientation) != 3:
raise ValueError("Orientation must be a 3D rotation.")
dist = self.delta_to_orientation(orientation)
log.debug(f"Distance (orientation): {dist}")
if roll_only:
dist[0] = dist[1] = 0
log.debug(f"Distance (roll only): {dist}")
action = self.simple_orientation_control(dist)
if action[:-1] == [0, 0, 0, 0, 0, 0]:
self.__goal_orientation = None
return action
def go_to_pose(self, pose, gripper=0):
position = pose[:3]
orientation = pose[3:]
if self.__goal_position is None:
self.__goal_position = position
dist = self.distance_to_position(position)
position_action = self.simple_velocity_control(dist)[:3]
dist = self.delta_to_orientation(orientation)
orientation_action = self.simple_orientation_control(dist)[3:-1]
if (position_action == [0, 0, 0]) and (orientation_action == [0, 0, 0]):
self.__goal_position = None
self.__goal_orientation = None
action = [*position_action, *orientation_action, gripper]
log.debug(f"ACTION: {action}")
return action
async def get_grasp(self, object_name="Cereal", *args):
log.debug(f"Getting grasp for object: {object_name}")
grasp, grasp_sequence = await self.grasp.get_grasp(obj_name=object_name)
self.__grasp_sequence = grasp_sequence
self.robosim.move_marker(grasp_sequence[1][0])
return grasp
def get_grasp_sequence(self):
return self.__grasp_sequence
def get_grasp_pose(self):
return self.__grasp_sequence[1]
def go_to_grasp_orientation(self, *args):
grasp_pose = self.__grasp_sequence[1]
grasp_ori = [0, 0, grasp_pose[1]]
return self.go_to_orientation(grasp_ori)
def go_to_grasp_position(self, *args):
grasp_pose = copy.deepcopy(self.__grasp_sequence[1])
grasp_position = grasp_pose[0]
grasp_position[2] -= 0.01
return self.go_to_position(grasp_position)
def go_to_pre_grasp(self, *args):
grasp_pose = self.__grasp_sequence[1]
pre_grasp_pos = [grasp_pose[0][0], grasp_pose[0][1], 1.05]
pre_grasp_ori = [0, 0, grasp_pose[1]]
return self.go_to_pose([*pre_grasp_pos, *pre_grasp_ori])
def get_gripper_position(self):
gripper = self.env.robots[0].gripper
gripper_pos = copy.deepcopy(
self.env.sim.data.get_site_xpos(gripper.important_sites["grip_site"])
)
return gripper_pos
def get_gripper_orientation_in_world(self):
gripper_ori = self.robosim.env._eef_xmat
return gripper_ori
def get_gripper_orientation_in_world_as_euler(self):
gripper_ori = self.get_gripper_orientation_in_world()
gripper_ori = mat2euler(gripper_ori, axes="rxyz")
return gripper_ori
def is_gripper_moving(self, action):
if action[-1]:
return True
if self._last_position is None:
self._last_position = [self.get_gripper_position()]
return True
if self._last_orientation is None:
self._last_orientation = [self.get_gripper_orientation_in_world_as_euler()]
return True
if len(self._last_position) < 10:
self._last_position.append(self.get_gripper_position())
self._last_position = None
self._last_orientation = None
return True
if len(self._last_orientation) < 10:
self._last_orientation.append(
self.get_gripper_orientation_in_world_as_euler()
)
self._last_position = None
self._last_orientation = None
return True
if len(self._last_position) > 10:
self._last_position.pop(0)
if len(self._last_orientation) > 10:
self._last_orientation.pop(0)
current_pos = self.get_gripper_position()
current_ori = self.get_gripper_orientation_in_world_as_euler()
delta_pos = np.linalg.norm(self._last_position[-1] - current_pos)
delta_ori = np.linalg.norm(
np.array(
[
self._get_closest_distance(a, b)
for a, b in zip(self._last_orientation[-1], current_ori)
]
)
)
self._last_position.append(current_pos)
self._last_orientation.append(current_ori)
log.info(f"Delta Position: {delta_pos}, Delta Orientation: {delta_ori}")
if delta_pos < 0.001 and delta_ori < 0.01:
return False
return True
def distance_to_position(self, position):
log.debug(f" Goal Position: {position}")
gripper_pos = self.get_gripper_position()
log.debug(f" Gripper Position: {gripper_pos}")
dist = position - gripper_pos
log.debug(f" Distance: {dist}")
return dist
def _get_closest_distance(self, a, b):
dist = np.remainder(a - b, 2 * np.pi)
if dist > np.pi:
dist -= 2 * np.pi
elif dist < -np.pi:
dist += 2 * np.pi
return dist
def delta_to_orientation(self, orientation):
gripper_calibration_euler = [3.13, 0.14, -1.56]
gripper_calibration = euler2mat(gripper_calibration_euler)
gripper_calibration_quat = mat2quat(gripper_calibration)
log.debug("-----")
log.debug(f" request: {orientation}")
goal_mat = euler2mat(orientation)
goal_in_world_mat = np.dot(gripper_calibration, goal_mat)
goal_in_world_euler = mat2euler(goal_in_world_mat, axes="rxyz")
goal_in_world_quat = mat2quat(goal_in_world_mat)
current_gripper_ori_mat = self.robosim.env._eef_xmat
current_ori = mat2euler(current_gripper_ori_mat, axes="rxyz")
current_ori_quat = mat2quat(current_gripper_ori_mat)
actual_dist = np.array(
[
self._get_closest_distance(a, b)
for a, b in zip(goal_in_world_euler, current_ori)
]
)
dist = actual_dist
dist[1] *= -1
dist[2] *= -1
if self.__goal_orientation is None:
self.__goal_orientation = goal_in_world_euler
marker_position = (
self.__goal_position
if self.__goal_position is not None
else self.get_gripper_position()
)
self.robosim.move_marker(
name="gripper_goal",
orientation=goal_in_world_euler,
position=marker_position,
)
log.debug(f" Gripper Calibration: {gripper_calibration_euler}")
log.debug(f" Goal in world: {goal_in_world_euler}")
log.debug(f" Current in world: {current_ori}")
log.debug(" ")
log.debug(f" Gripper Calibration [quat]: {gripper_calibration_quat}")
log.debug(f" Goal in world [quat]: {goal_in_world_quat}")
log.debug(f" Current in world [quat]: {current_ori_quat}")
return dist
def go_to_object(self, target_obj_name="Can"):
obj = self.env.objects[self.env.object_to_id[target_obj_name.lower()]]
dist = self.env._gripper_to_target(
gripper=self.env.robots[0].gripper,
target=obj.root_body,
target_type="body",
)
return self.simple_velocity_control(dist)
def simple_velocity_control(self, dist):
euclidean_dist = np.linalg.norm(dist)
if euclidean_dist < 0.01:
return [0, 0, 0, 0, 0, 0, 0]
cartesian_velocities = dist / euclidean_dist
log.debug(f" Cartesian Velocities: {cartesian_velocities}")
action = [*cartesian_velocities, 0, 0, 0, 0]
log.debug(f"XYZ Action: {action}")
return action
def simple_orientation_control(self, orientation):
euclidean_dist = np.linalg.norm(orientation)
if euclidean_dist < 0.02:
return [0, 0, 0, 0, 0, 0, 0]
max_vel = 0.4
if euclidean_dist < 0.4:
max_vel = 0.1
if euclidean_dist < 0.2:
max_vel = 0.05
# if euclidean_dist < 0.05:
# max_vel = 0.02
cartesian_velocities = orientation / euclidean_dist
cartesian_velocities = np.clip(cartesian_velocities, -max_vel, max_vel)
for i in range(3):
if abs(orientation[i]) < 0.02: # ~ 1 degree threshold
cartesian_velocities[i] = 0
action = [0, 0, 0, *cartesian_velocities, 0]
log.debug(f"RPY Action: {action} (euclidean_dist: {euclidean_dist})")
return action
def close_gripper(self, *args):
# get current gripper position
gripper = self.env.robots[0].gripper
gripper_contacts = get_contacts(self.robosim.env.sim, gripper)
log.info(f"Gripper contacts: {gripper_contacts}")
right_fingerpad_pos = self.env.sim.data.get_geom_xpos(
gripper.important_geoms["right_fingerpad"][0]
)
left_fingerpad_pos = self.env.sim.data.get_geom_xpos(
gripper.important_geoms["left_fingerpad"][0]
)
log.debug(f" Right fingerpad position: {right_fingerpad_pos}")
log.debug(f" Left fingerpad position: {left_fingerpad_pos}")
distance = np.linalg.norm(right_fingerpad_pos - left_fingerpad_pos)
log.debug(f" Distance: {distance}")
if self._is_gripper_closed():
return [0, 0, 0, 0, 0, 0, 0]
return [0, 0, 0, 0, 0, 0, 1]
def _is_gripper_closed(self):
gripper = self.env.robots[0].gripper
gripper_contacts = get_contacts(self.robosim.env.sim, gripper)
log.info(f"Gripper contacts: {gripper_contacts}")
right_fingerpad_pos = self.env.sim.data.get_geom_xpos(
gripper.important_geoms["right_fingerpad"][0]
)
left_fingerpad_pos = self.env.sim.data.get_geom_xpos(
gripper.important_geoms["left_fingerpad"][0]
)
log.debug(f" Right fingerpad position: {right_fingerpad_pos}")
log.debug(f" Left fingerpad position: {left_fingerpad_pos}")
distance = np.linalg.norm(right_fingerpad_pos - left_fingerpad_pos)
log.debug(f" Distance: {distance}")
if gripper_contacts:
if self.__gripper_contact_started is None:
self.__gripper_contact_started = [
left_fingerpad_pos,
right_fingerpad_pos,
]
else:
if (
np.linalg.norm(
self.__gripper_contact_started[0] - left_fingerpad_pos
)
> 0.01
and np.linalg.norm(
self.__gripper_contact_started[1] - right_fingerpad_pos
)
> 0.01
):
return False
return True
else:
self.__gripper_contact_started = None
if distance < 0.01:
return True
return False
def open_gripper(self, *args):
gripper = self.env.robots[0].gripper
right_fingerpad_pos = self.env.sim.data.get_geom_xpos(
gripper.important_geoms["right_fingerpad"][0]
)
left_fingerpad_pos = self.env.sim.data.get_geom_xpos(
gripper.important_geoms["left_fingerpad"][0]
)
log.debug(f" Right fingerpad position: {right_fingerpad_pos}")
log.debug(f" Left fingerpad position: {left_fingerpad_pos}")
distance = np.linalg.norm(right_fingerpad_pos - left_fingerpad_pos)
log.debug(f" Distance: {distance}")
if distance > 0.08:
return [0, 0, 0, 0, 0, 0, 0]
return [0, 0, 0, 0, 0, 0, -1]
| 14,488 | Python | 36.246787 | 87 | 0.571162 |
AshisGhosh/roboai/robosuite/robosim/robosim/task.py | from enum import Enum
import logging
logging.basicConfig(level=logging.INFO)
class TaskClass(Enum):
CONTROL_TASK = 0
DATA_TASK = 1
ASYNC_DATA_TASK = 2
class TaskStatus(Enum):
PENDING = 0
RUNNING = 1
COMPLETED = 2
FAILED = 3
class TaskFactory:
def __init__(self):
self._creators = {}
def register_task(self, creator, task_class=TaskClass.CONTROL_TASK):
self.register_task_type(
creator.__name__,
lambda name, *args, **kwargs: Task(
name, creator, task_class, *args, **kwargs
),
)
def register_task_type(self, task_type, creator):
self._creators[task_type] = creator
def create_task(self, task_type, task_name=None, *args, **kwargs):
creator = self._creators.get(task_type)
if not creator:
raise ValueError(f"Task type {task_type} not registered.")
if task_name is not None:
# Use the provided task_name or fallback to a default naming convention
return creator(task_name, *args, **kwargs)
else:
return creator(task_type, *args, **kwargs)
def get_task_types(self):
return self._creators.keys()
class Task:
def __init__(self, name, function, task_class, *args, **kwargs):
self.name = name
self.function = function
self.task_class = task_class
self.args = args
self.kwargs = kwargs
def execute(self):
try:
return self.function(*self.args, **self.kwargs)
except Exception as e:
logging.error(f"Error executing task {self.name}: {e}")
def __str__(self):
return f"Task: {self.name}\n Function: {self.function}\n Args: {self.args}\n Kwargs: {self.kwargs}"
| 1,793 | Python | 26.181818 | 116 | 0.588957 |
AshisGhosh/roboai/robosuite/robosim/robosim/quick_start.py | import numpy as np
from dataclasses import dataclass
from enum import Enum
import robosuite as suite
from robosuite import load_controller_config
class ControllerType(Enum):
JOINT_VELOCITY = 1
OSC_POSE = 2
@dataclass
class OSCControlStep:
dx: float
dy: float
dz: float
droll: float
dpitch: float
dyaw: float
gripper: float
def to_list(self):
return [
self.dx,
self.dy,
self.dz,
self.droll,
self.dpitch,
self.dyaw,
self.gripper,
]
def dummy_joint_vel_control(action, env, step):
"""
Dummy control function for joint velocity control
"""
if action is None:
action = np.random.randn(env.robots[0].dof) # sample random action
for i, a in enumerate(action):
action[i] += 0.1 * np.sin(step / 100) # add some oscillation to the action
print(f"Action {i}: {action}")
return action
def dummy_osc_control(action, env, step):
"""
Dummy control function for OSC control
dx, dy, dz, droll, dpitch, dyaw, gripper
"""
if action is None:
action = OSCControlStep(0, 0, 0, 0, 0, 0, 0)
else:
action = OSCControlStep(*action)
action.dx = 0.1 * np.sin(step / 100)
action.dy = 0.1 * np.cos(step / 100)
action.dz = 0.1 * np.sin(step / 100)
action.droll = 0.1 * np.cos(step / 100)
action.dpitch = 0.1 * np.sin(step / 100)
action.dyaw = 0.1 * np.cos(step / 100)
action.gripper = 0.1 * np.sin(step / 100)
print(f"Action: {action.to_list()}")
return action.to_list()
class robosim:
def __init__(self, controller_type=ControllerType.OSC_POSE):
self.controller_type = controller_type
self.update_action = self.get_action_func()
def get_action_func(self):
if self.controller_type == ControllerType.JOINT_VELOCITY:
return dummy_joint_vel_control
elif self.controller_type == ControllerType.OSC_POSE:
return dummy_osc_control
else:
raise ValueError("Invalid controller type")
def start(self):
print("Starting Robosuite Simulation...")
config = load_controller_config(
default_controller=self.controller_type.name
) # load default controller config
# create environment instance
env = suite.make(
env_name="Lift", # try with other tasks like "Stack" and "Door"
robots="Panda", # try with other robots like "Sawyer" and "Jaco"
gripper_types="default",
controller_configs=config,
control_freq=20,
has_renderer=True,
render_camera="frontview",
camera_names=["frontview", "agentview"],
has_offscreen_renderer=True,
use_object_obs=False,
use_camera_obs=True,
)
# reset the environment
env.reset()
action = None
for i in range(1000):
action = self.update_action(action, env, i)
obs, reward, done, info = env.step(action) # take action in the environment
env.render() # render on display
if __name__ == "__main__":
sim = robosim()
sim.start()
| 3,246 | Python | 26.991379 | 88 | 0.587184 |
AshisGhosh/roboai/robosuite/robosim/app/main.py | #!/usr/bin/python -u
import io
from fastapi import FastAPI, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from starlette.responses import StreamingResponse
from pydantic import BaseModel
from robosim.robosim import RoboSim
import logging
logging.basicConfig(level=logging.DEBUG)
# Create FastAPI instance
app = FastAPI()
robosim = RoboSim()
# List of allowed origins (you can use '*' to allow all origins)
origins = [
"http://localhost:3000", # Allow your Next.js app
# Add any other origins as needed
]
# Add CORSMiddleware to the application
app.add_middleware(
CORSMiddleware,
allow_origins=origins, # List of allowed origins
allow_credentials=True,
allow_methods=["*"], # Allow all methods
allow_headers=["*"], # Allow all headers
)
class Task(BaseModel):
name: str
type: str
args: list | str
# Example route
@app.get("/")
async def read_root():
return {"message": "Hello, FastAPI! This is the robosim server."}
@app.on_event("startup")
async def startup_event():
return robosim.setup()
@app.post("/get_feedback")
async def get_feedback():
return await robosim.get_feedback("grasp-selection-feedback", "cereal")
@app.post("/pick")
async def pick(object_name: str):
return await robosim.pick(object_name)
@app.post("/test")
async def test():
robosim.clear_tasks()
await add_task(Task(name="go to pick", type="go_to_pick_center", args=""))
await add_task(Task(name="get grasp", type="get_grasp", args="cereal"))
await add_task(Task(name="go to pre grasp", type="go_to_pre_grasp", args=""))
await add_task(Task(name="open gripper", type="open_gripper", args=""))
await add_task(Task(name="go to grasp pos", type="go_to_grasp_position", args=""))
await add_task(Task(name="close gripper", type="close_gripper", args=""))
await add_task(Task(name="go to pre grasp", type="go_to_pre_grasp", args=""))
await add_task(Task(name="go to drop", type="go_to_drop", args=""))
await add_task(Task(name="open gripper", type="open_gripper", args=""))
await robosim.start_execution()
return {"msg": "Test task added and execution started."}
@app.post("/start")
async def start():
return await robosim.start_async()
@app.post("/move_pose")
async def move_pose(pose: list[float]):
await add_task(Task(name="move pose", type="go_to_pose", args=pose))
await robosim.start_execution()
return {"msg": "Pose move task added and execution started."}
@app.post("/move_orientation")
async def move_orientation(orientation: list[float]):
await add_task(
Task(name="move orientation", type="go_to_orientation", args=orientation)
)
await robosim.start_execution()
return {"msg": "Orientation move task added and execution started."}
@app.post("/move_position")
async def move_position(position: list[float]):
await add_task(Task(name="move position", type="go_to_position", args=position))
await robosim.start_execution()
return {"msg": "Position move task added and execution started."}
@app.get("/move_gripper_goal_to_gripper")
async def move_gripper_goal_to_gripper():
return robosim.move_gripper_goal_to_gripper()
@app.get("/get_gripper_orientation")
async def get_gripper_orientation():
return str(robosim.robot.get_gripper_orientation_as_euler())
@app.get("/get_gripper_orientation_in_world")
async def get_gripper_orientation_in_world():
return str(robosim.robot.get_gripper_orientation_in_world_as_euler())
@app.post("/pixel_to_marker")
async def pixel_to_marker(pixel: list[int]):
return robosim.pixel_to_marker(pixel)
@app.post("/add_marker")
async def add_marker(position: list[float]):
return robosim.add_marker(position)
@app.post("/move_marker")
async def move_marker(
name: str, position: list[float] | None, orientation: list[float] | None
):
return robosim.move_marker(name=name, position=position, orientation=orientation)
@app.get("/get_grasp_image")
async def get_grasp_image():
logging.info("Getting grasp image...")
img = await robosim.robot.grasp.get_grasp_image()
logging.debug("Image received.")
buf = io.BytesIO()
img.save(buf, format="PNG")
buf.seek(0)
logging.debug("Image saved. Ready to stream.")
return StreamingResponse(buf, media_type="image/png")
@app.get("/get_grasp_image_and_depth")
async def get_grasp_image_and_depth():
logging.info("Getting grasp image and depth...")
img, depth = await robosim.robot.grasp.get_grasp_image_and_depth()
logging.debug("Image and depth received.")
buf = io.BytesIO()
img.save(buf, format="PNG")
buf.seek(0)
logging.debug("Image saved. Ready to stream.")
return StreamingResponse(buf, media_type="image/png")
@app.get("/get_grasp_depth_image")
async def get_grasp_image_and_depth_image():
logging.info("Getting grasp image and depth...")
_img, depth = await robosim.robot.grasp.get_grasp_image_and_depth_image()
logging.debug("Image and depth received.")
buf_depth = io.BytesIO()
depth.save(buf_depth, format="PNG")
buf_depth.seek(0)
return StreamingResponse(buf_depth, media_type="image/png")
@app.get("/get_image")
async def get_image():
logging.info("Getting image...")
img = await robosim.get_image()
logging.debug("Image received.")
buf = io.BytesIO()
img.save(buf, format="PNG")
buf.seek(0)
logging.debug("Image saved. Ready to stream.")
return StreamingResponse(buf, media_type="image/png")
@app.get("/get_image_with_markers")
async def get_image_with_markers():
logging.info("Getting image with markers...")
img = await robosim.get_image_with_markers()
logging.debug("Image received.")
buf = io.BytesIO()
img.save(buf, format="PNG")
buf.seek(0)
logging.debug("Image saved. Ready to stream.")
return StreamingResponse(buf, media_type="image/png")
@app.post("/pause")
async def pause():
return await robosim.pause_execution()
@app.post("/resume")
async def resume():
return await robosim.resume_execution()
@app.post("/close")
async def close():
return await robosim.close_renderer()
@app.post("/execute_tasks")
async def execute_tasks():
return await robosim.execute_async()
@app.post("/add_task")
async def add_task(task: Task):
logging.info(f"Adding task: {task.name} of type {task.type} with args {task.args}")
try:
robosim.add_task(task.name, task.type, task.args)
return {"msg": "Task added"}
except ValueError as e:
raise HTTPException(status_code=400, detail=str(e))
@app.get("/get_tasks")
async def get_tasks():
tasks = [
Task(name=t.name, type=t.function.__name__, args=t.args)
for t in robosim.get_tasks()
]
return tasks
@app.get("/get_objects")
async def get_objects():
return robosim.get_object_names()
| 6,879 | Python | 27.312757 | 87 | 0.682657 |
AshisGhosh/roboai/robosuite/robomimic_sim/pyproject.toml | [tool.poetry]
name = "robomimic-sim"
version = "0.1.0"
description = ""
authors = ["AshisGhosh <[email protected]>"]
readme = "README.md"
[tool.poetry.dependencies]
python = "^3.10"
fastapi = "^0.110.0"
uvicorn = "^0.29.0"
[build-system]
requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api"
| 312 | TOML | 17.411764 | 46 | 0.673077 |
AshisGhosh/roboai/robosuite/robomimic_sim/robomimic_sim/robomimic_sim.py | import imageio
import numpy as np
import os
from copy import deepcopy
import asyncio
from PIL import Image
import torch
import robosuite
from robosuite import load_controller_config
import robomimic.utils.file_utils as FileUtils
import robomimic.utils.torch_utils as TorchUtils
from robomimic.envs.env_base import EnvBase
from robomimic.algo import RolloutPolicy
import urllib.request
class RobomimicSim:
def __init__(self):
self.rollout_visualizing = False
self.rollout_task = None
self.render_task = None
self.close_renderer_flag = (
asyncio.Event()
) # Use an asyncio Event for better coordination
def setup(self):
# Get pretrained checkpooint from the model zoo
ckpt_path = "models/lift_ph_low_dim_epoch_1000_succ_100.pth"
# Lift (Proficient Human)
urllib.request.urlretrieve(
"http://downloads.cs.stanford.edu/downloads/rt_benchmark/model_zoo/lift/bc_rnn/lift_ph_low_dim_epoch_1000_succ_100.pth",
filename=ckpt_path,
)
assert os.path.exists(ckpt_path)
device = TorchUtils.get_torch_device(try_to_use_cuda=True)
# restore policy
policy, ckpt_dict = FileUtils.policy_from_checkpoint(
ckpt_path=ckpt_path, device=device, verbose=True
)
# create environment from saved checkpoint
env, _ = FileUtils.env_from_checkpoint(
ckpt_dict=ckpt_dict,
render=True,
render_offscreen=True, # render to RGB images for video
verbose=True,
)
self.policy = policy
self.env = env
def custom_env(self):
load_controller_config(
default_controller="OSC_POSE"
) # load default controller config
# create environment from scratch
env = robosuite.make(
env_name="Lift", # try with other tasks like "Stack" and "Door"
robots="Panda", # try with other robots like "Sawyer" and "Jaco"
gripper_types="default",
controller_configs=None,
control_freq=20,
has_renderer=True,
render_camera="frontview",
camera_names=["frontview", "agentview"],
has_offscreen_renderer=True,
use_object_obs=False,
use_camera_obs=True,
)
self.env = env
async def start_rollout(self):
if self.rollout_task is None or self.rollout_task.done():
self.close_renderer_flag.clear()
await self.start_renderer()
self.rollout_task = asyncio.create_task(self.run())
async def close_renderer(self):
self.close_renderer_flag.set() # Signal to stop the tasks
if self.render_task and not self.render_task.done():
await self.render_task # Await the task to ensure it completes
if self.rollout_task and not self.rollout_task.done():
self.rollout_task.cancel() # Cancel rollout task as it might be waiting for external input
try:
await (
self.rollout_task
) # Attempt to await the task to handle any cleanup
except asyncio.CancelledError:
pass # Expected if the task was cancelled
self.env.base_env.close_renderer()
async def render(self):
hz = 5
while not self.close_renderer_flag.is_set(): # Use the Event for checking
if not self.rollout_visualizing:
self.env.render(mode="human", camera_name="frontview")
await asyncio.sleep(1 / hz)
async def start_renderer(self):
if self.render_task is None or self.render_task.done():
self.close_renderer_flag.clear()
self.env.reset()
print("Now starting renderer...")
self.render_task = asyncio.create_task(self.render())
return True
async def reset(self):
self.env.reset()
return True
async def rollout(
self,
policy,
env,
horizon,
render=False,
video_writer=None,
video_skip=5,
camera_names=None,
):
"""
Helper function to carry out rollouts. Supports on-screen rendering, off-screen rendering to a video,
and returns the rollout trajectory.
Args:
policy (instance of RolloutPolicy): policy loaded from a checkpoint
env (instance of EnvBase): env loaded from a checkpoint or demonstration metadata
horizon (int): maximum horizon for the rollout
render (bool): whether to render rollout on-screen
video_writer (imageio writer): if provided, use to write rollout to video
video_skip (int): how often to write video frames
camera_names (list): determines which camera(s) are used for rendering. Pass more than
one to output a video with multiple camera views concatenated horizontally.
Returns:
stats (dict): some statistics for the rollout - such as return, horizon, and task success
"""
print("Rolling out policy...")
assert isinstance(env, EnvBase)
assert isinstance(policy, RolloutPolicy)
assert not (render and (video_writer is not None))
policy.start_episode()
# obs = env.reset()
# state_dict = env.get_state()
# # hack that is necessary for robosuite tasks for deterministic action playback
# obs = env.reset_to(state_dict)
obs = env.get_observation()
video_count = 0 # video frame counter
total_reward = 0.0
self.rollout_visualizing = True
try:
for step_i in range(horizon):
await asyncio.sleep(0) # Allow other tasks to run
if self.close_renderer_flag.is_set():
print("Stopping rollout due to renderer close request...")
break
# get action from policy
act = policy(ob=obs)
# play action
next_obs, r, done, _ = env.step(act)
# compute reward
total_reward += r
success = env.is_success()["task"]
# visualization
if render:
env.render(mode="human", camera_name=camera_names[0])
if video_writer is not None:
if video_count % video_skip == 0:
video_img = []
for cam_name in camera_names:
video_img.append(
env.render(
mode="rgb_array",
height=512,
width=512,
camera_name=cam_name,
)
)
video_img = np.concatenate(
video_img, axis=1
) # concatenate horizontally
video_writer.append_data(video_img)
video_count += 1
# break if done or if success
if done or success:
break
# update for next iter
obs = deepcopy(next_obs)
env.get_state()
except env.rollout_exceptions as e:
print("WARNING: got rollout exception {}".format(e))
self.rollout_visualizing = False
stats = dict(
Return=total_reward, Horizon=(step_i + 1), Success_Rate=float(success)
)
return stats
async def run(self):
rollout_horizon = 400
np.random.seed(0)
torch.manual_seed(0)
video_path = "output/rollout.mp4"
video_writer = imageio.get_writer(video_path, fps=20)
policy = self.policy
env = self.env
stats = await self.rollout(
policy=policy,
env=env,
horizon=rollout_horizon,
render=True,
# render=False,
# video_writer=video_writer,
# video_skip=5,
camera_names=["frontview", "agentview"],
)
print(stats)
video_writer.close()
def get_image(self):
img = self.env.render(
mode="rgb_array", height=512, width=512, camera_name="frontview"
)
img = Image.fromarray(img)
return img
| 8,536 | Python | 33.28514 | 132 | 0.554944 |
AshisGhosh/roboai/robosuite/robomimic_sim/robomimic_sim/train_bc_rnn.py | """
WARNING: This script is only for instructive purposes, to point out different portions
of the config -- the preferred way to launch training runs is still with external
jsons and scripts/train.py (and optionally using scripts/hyperparameter_helper.py
to generate several config jsons by sweeping config settings). See the online
documentation for more information about launching training.
Example script for training a BC-RNN agent by manually setting portions of the config in
python code.
To see a quick training run, use the following command:
python train_bc_rnn.py --debug
To run a full length training run on your own dataset, use the following command:
python train_bc_rnn.py --dataset /path/to/dataset.hdf5 --output /path/to/output_dir
"""
import argparse
import robomimic.utils.torch_utils as TorchUtils
import robomimic.utils.test_utils as TestUtils
import robomimic.macros as Macros
from robomimic.config import config_factory
from robomimic.scripts.train import train
def robosuite_hyperparameters(config):
"""
Sets robosuite-specific hyperparameters.
Args:
config (Config): Config to modify
Returns:
Config: Modified config
"""
## save config - if and when to save checkpoints ##
config.experiment.save.enabled = (
True # whether model saving should be enabled or disabled
)
config.experiment.save.every_n_seconds = (
None # save model every n seconds (set to None to disable)
)
config.experiment.save.every_n_epochs = (
50 # save model every n epochs (set to None to disable)
)
config.experiment.save.epochs = [] # save model on these specific epochs
config.experiment.save.on_best_validation = (
False # save models that achieve best validation score
)
config.experiment.save.on_best_rollout_return = (
False # save models that achieve best rollout return
)
config.experiment.save.on_best_rollout_success_rate = (
True # save models that achieve best success rate
)
# epoch definition - if not None, set an epoch to be this many gradient steps, else the full dataset size will be used
config.experiment.epoch_every_n_steps = 100 # each epoch is 100 gradient steps
config.experiment.validation_epoch_every_n_steps = (
10 # each validation epoch is 10 gradient steps
)
# envs to evaluate model on (assuming rollouts are enabled), to override the metadata stored in dataset
config.experiment.env = None # no need to set this (unless you want to override)
config.experiment.additional_envs = (
None # additional environments that should get evaluated
)
## rendering config ##
config.experiment.render = False # render on-screen or not
config.experiment.render_video = True # render evaluation rollouts to videos
config.experiment.keep_all_videos = False # save all videos, instead of only saving those for saved model checkpoints
config.experiment.video_skip = (
5 # render video frame every n environment steps during rollout
)
## evaluation rollout config ##
config.experiment.rollout.enabled = True # enable evaluation rollouts
config.experiment.rollout.n = 50 # number of rollouts per evaluation
config.experiment.rollout.horizon = 400 # set horizon based on length of demonstrations (can be obtained with scripts/get_dataset_info.py)
config.experiment.rollout.rate = 50 # do rollouts every @rate epochs
config.experiment.rollout.warmstart = (
0 # number of epochs to wait before starting rollouts
)
config.experiment.rollout.terminate_on_success = (
True # end rollout early after task success
)
## dataset loader config ##
# num workers for loading data - generally set to 0 for low-dim datasets, and 2 for image datasets
config.train.num_data_workers = 0 # assume low-dim dataset
# One of ["all", "low_dim", or None]. Set to "all" to cache entire hdf5 in memory - this is
# by far the fastest for data loading. Set to "low_dim" to cache all non-image data. Set
# to None to use no caching - in this case, every batch sample is retrieved via file i/o.
# You should almost never set this to None, even for large image datasets.
config.train.hdf5_cache_mode = "all"
config.train.hdf5_use_swmr = True # used for parallel data loading
# if true, normalize observations at train and test time, using the global mean and standard deviation
# of each observation in each dimension, computed across the training set. See SequenceDataset.normalize_obs
# in utils/dataset.py for more information.
config.train.hdf5_normalize_obs = False # no obs normalization
# if provided, demonstrations are filtered by the list of demo keys under "mask/@hdf5_filter_key"
config.train.hdf5_filter_key = "train" # by default, use "train" and "valid" filter keys corresponding to train-valid split
config.train.hdf5_validation_filter_key = "valid"
# fetch sequences of length 10 from dataset for RNN training
config.train.seq_length = 10
# keys from hdf5 to load per demonstration, besides "obs" and "next_obs"
config.train.dataset_keys = (
"actions",
"rewards",
"dones",
)
# one of [None, "last"] - set to "last" to include goal observations in each batch
config.train.goal_mode = None # no need for goal observations
## learning config ##
config.train.cuda = True # try to use GPU (if present) or not
config.train.batch_size = 100 # batch size
config.train.num_epochs = 2000 # number of training epochs
config.train.seed = 1 # seed for training
### Observation Config ###
config.observation.modalities.obs.low_dim = [ # specify low-dim observations for agent
"robot0_eef_pos",
"robot0_eef_quat",
"robot0_gripper_qpos",
"object",
]
config.observation.modalities.obs.rgb = [] # no image observations
config.observation.modalities.goal.low_dim = [] # no low-dim goals
config.observation.modalities.goal.rgb = [] # no image goals
# observation encoder architecture - applies to all networks that take observation dicts as input
config.observation.encoder.rgb.core_class = "VisualCore"
config.observation.encoder.rgb.core_kwargs.feature_dimension = 64
config.observation.encoder.rgb.core_kwargs.backbone_class = "ResNet18Conv" # ResNet backbone for image observations (unused if no image observations)
config.observation.encoder.rgb.core_kwargs.backbone_kwargs.pretrained = (
False # kwargs for visual core
)
config.observation.encoder.rgb.core_kwargs.backbone_kwargs.input_coord_conv = False
config.observation.encoder.rgb.core_kwargs.pool_class = (
"SpatialSoftmax" # Alternate options are "SpatialMeanPool" or None (no pooling)
)
config.observation.encoder.rgb.core_kwargs.pool_kwargs.num_kp = (
32 # Default arguments for "SpatialSoftmax"
)
config.observation.encoder.rgb.core_kwargs.pool_kwargs.learnable_temperature = (
False # Default arguments for "SpatialSoftmax"
)
config.observation.encoder.rgb.core_kwargs.pool_kwargs.temperature = (
1.0 # Default arguments for "SpatialSoftmax"
)
config.observation.encoder.rgb.core_kwargs.pool_kwargs.noise_std = (
0.0 # Default arguments for "SpatialSoftmax"
)
# if you prefer to use pre-trained visual representations, uncomment the following lines
# R3M
# config.observation.encoder.rgb.core_kwargs.backbone_class = 'R3MConv' # R3M backbone for image observations (unused if no image observations)
# config.observation.encoder.rgb.core_kwargs.backbone_kwargs.r3m_model_class = 'resnet18' # R3M model class (resnet18, resnet34, resnet50)
# config.observation.encoder.rgb.core_kwargs.backbone_kwargs.freeze = True # whether to freeze network during training or allow finetuning
# config.observation.encoder.rgb.core_kwargs.pool_class = None # no pooling class for pretraining model
# MVP
# config.observation.encoder.rgb.core_kwargs.backbone_class = 'MVPConv' # MVP backbone for image observations (unused if no image observations)
# config.observation.encoder.rgb.core_kwargs.backbone_kwargs.mvp_model_class = 'vitb-mae-egosoup' # MVP model class (vits-mae-hoi, vits-mae-in, vits-sup-in, vitb-mae-egosoup, vitl-256-mae-egosoup)
# config.observation.encoder.rgb.core_kwargs.backbone_kwargs.freeze = True # whether to freeze network during training or allow finetuning
# config.observation.encoder.rgb.core_kwargs.pool_class = None # no pooling class for pretraining model
# observation randomizer class - set to None to use no randomization, or 'CropRandomizer' to use crop randomization
config.observation.encoder.rgb.obs_randomizer_class = None
# kwargs for observation randomizers (for the CropRandomizer, this is size and number of crops)
config.observation.encoder.rgb.obs_randomizer_kwargs.crop_height = 76
config.observation.encoder.rgb.obs_randomizer_kwargs.crop_width = 76
config.observation.encoder.rgb.obs_randomizer_kwargs.num_crops = 1
config.observation.encoder.rgb.obs_randomizer_kwargs.pos_enc = False
### Algo Config ###
# optimization parameters
config.algo.optim_params.policy.learning_rate.initial = 1e-4 # policy learning rate
config.algo.optim_params.policy.learning_rate.decay_factor = (
0.1 # factor to decay LR by (if epoch schedule non-empty)
)
config.algo.optim_params.policy.learning_rate.epoch_schedule = [] # epochs where LR decay occurs
config.algo.optim_params.policy.regularization.L2 = (
0.00 # L2 regularization strength
)
# loss weights
config.algo.loss.l2_weight = 1.0 # L2 loss weight
config.algo.loss.l1_weight = 0.0 # L1 loss weight
config.algo.loss.cos_weight = 0.0 # cosine loss weight
# MLP network architecture (layers after observation encoder and RNN, if present)
config.algo.actor_layer_dims = () # empty MLP - go from RNN layer directly to action output
# stochastic GMM policy
config.algo.gmm.enabled = (
True # enable GMM policy - policy outputs GMM action distribution
)
config.algo.gmm.num_modes = 5 # number of GMM modes
config.algo.gmm.min_std = 0.0001 # minimum std output from network
config.algo.gmm.std_activation = (
"softplus" # activation to use for std output from policy net
)
config.algo.gmm.low_noise_eval = True # low-std at test-time
# rnn policy config
config.algo.rnn.enabled = True # enable RNN policy
config.algo.rnn.horizon = (
10 # unroll length for RNN - should usually match train.seq_length
)
config.algo.rnn.hidden_dim = 400 # hidden dimension size
config.algo.rnn.rnn_type = "LSTM" # rnn type - one of "LSTM" or "GRU"
config.algo.rnn.num_layers = 2 # number of RNN layers that are stacked
config.algo.rnn.open_loop = False # if True, action predictions are only based on a single observation (not sequence) + hidden state
config.algo.rnn.kwargs.bidirectional = False # rnn kwargs
return config
def momart_hyperparameters(config):
"""
Sets momart-specific hyperparameters.
Args:
config (Config): Config to modify
Returns:
Config: Modified config
"""
## save config - if and when to save checkpoints ##
config.experiment.save.enabled = (
True # whether model saving should be enabled or disabled
)
config.experiment.save.every_n_seconds = (
None # save model every n seconds (set to None to disable)
)
config.experiment.save.every_n_epochs = (
3 # save model every n epochs (set to None to disable)
)
config.experiment.save.epochs = [] # save model on these specific epochs
config.experiment.save.on_best_validation = (
True # save models that achieve best validation score
)
config.experiment.save.on_best_rollout_return = (
False # save models that achieve best rollout return
)
config.experiment.save.on_best_rollout_success_rate = (
True # save models that achieve best success rate
)
# epoch definition - if not None, set an epoch to be this many gradient steps, else the full dataset size will be used
config.experiment.epoch_every_n_steps = None # each epoch is 100 gradient steps
config.experiment.validation_epoch_every_n_steps = (
10 # each validation epoch is 10 gradient steps
)
# envs to evaluate model on (assuming rollouts are enabled), to override the metadata stored in dataset
config.experiment.env = None # no need to set this (unless you want to override)
config.experiment.additional_envs = (
None # additional environments that should get evaluated
)
## rendering config ##
config.experiment.render = False # render on-screen or not
config.experiment.render_video = True # render evaluation rollouts to videos
config.experiment.keep_all_videos = False # save all videos, instead of only saving those for saved model checkpoints
config.experiment.video_skip = (
5 # render video frame every n environment steps during rollout
)
## evaluation rollout config ##
config.experiment.rollout.enabled = True # enable evaluation rollouts
config.experiment.rollout.n = 30 # number of rollouts per evaluation
config.experiment.rollout.horizon = 1500 # maximum number of env steps per rollout
config.experiment.rollout.rate = 3 # do rollouts every @rate epochs
config.experiment.rollout.warmstart = (
0 # number of epochs to wait before starting rollouts
)
config.experiment.rollout.terminate_on_success = (
True # end rollout early after task success
)
## dataset loader config ##
# num workers for loading data - generally set to 0 for low-dim datasets, and 2 for image datasets
config.train.num_data_workers = 2 # assume low-dim dataset
# One of ["all", "low_dim", or None]. Set to "all" to cache entire hdf5 in memory - this is
# by far the fastest for data loading. Set to "low_dim" to cache all non-image data. Set
# to None to use no caching - in this case, every batch sample is retrieved via file i/o.
# You should almost never set this to None, even for large image datasets.
config.train.hdf5_cache_mode = "low_dim"
config.train.hdf5_use_swmr = True # used for parallel data loading
# if true, normalize observations at train and test time, using the global mean and standard deviation
# of each observation in each dimension, computed across the training set. See SequenceDataset.normalize_obs
# in utils/dataset.py for more information.
config.train.hdf5_normalize_obs = False # no obs normalization
# if provided, demonstrations are filtered by the list of demo keys under "mask/@hdf5_filter_key"
config.train.hdf5_filter_key = "train" # by default, use "train" and "valid" filter keys corresponding to train-valid split
config.train.hdf5_validation_filter_key = "valid"
# fetch sequences of length 10 from dataset for RNN training
config.train.seq_length = 50
# keys from hdf5 to load per demonstration, besides "obs" and "next_obs"
config.train.dataset_keys = (
"actions",
"rewards",
"dones",
)
# one of [None, "last"] - set to "last" to include goal observations in each batch
config.train.goal_mode = "last" # no need for goal observations
## learning config ##
config.train.cuda = True # try to use GPU (if present) or not
config.train.batch_size = 4 # batch size
config.train.num_epochs = 31 # number of training epochs
config.train.seed = 1 # seed for training
### Observation Config ###
config.observation.modalities.obs.low_dim = [ # specify low-dim observations for agent
"proprio",
]
config.observation.modalities.obs.rgb = [
"rgb",
"rgb_wrist",
]
config.observation.modalities.obs.depth = [
"depth",
"depth_wrist",
]
config.observation.modalities.obs.scan = [
"scan",
]
config.observation.modalities.goal.low_dim = [] # no low-dim goals
config.observation.modalities.goal.rgb = [] # no rgb image goals
### Algo Config ###
# optimization parameters
config.algo.optim_params.policy.learning_rate.initial = 1e-4 # policy learning rate
config.algo.optim_params.policy.learning_rate.decay_factor = (
0.1 # factor to decay LR by (if epoch schedule non-empty)
)
config.algo.optim_params.policy.learning_rate.epoch_schedule = [] # epochs where LR decay occurs
config.algo.optim_params.policy.regularization.L2 = (
0.00 # L2 regularization strength
)
# loss weights
config.algo.loss.l2_weight = 1.0 # L2 loss weight
config.algo.loss.l1_weight = 0.0 # L1 loss weight
config.algo.loss.cos_weight = 0.0 # cosine loss weight
# MLP network architecture (layers after observation encoder and RNN, if present)
config.algo.actor_layer_dims = (
300,
400,
) # MLP layers between RNN layer and action output
# stochastic GMM policy
config.algo.gmm.enabled = (
True # enable GMM policy - policy outputs GMM action distribution
)
config.algo.gmm.num_modes = 5 # number of GMM modes
config.algo.gmm.min_std = 0.01 # minimum std output from network
config.algo.gmm.std_activation = (
"softplus" # activation to use for std output from policy net
)
config.algo.gmm.low_noise_eval = True # low-std at test-time
# rnn policy config
config.algo.rnn.enabled = True # enable RNN policy
config.algo.rnn.horizon = (
50 # unroll length for RNN - should usually match train.seq_length
)
config.algo.rnn.hidden_dim = 1200 # hidden dimension size
config.algo.rnn.rnn_type = "LSTM" # rnn type - one of "LSTM" or "GRU"
config.algo.rnn.num_layers = 2 # number of RNN layers that are stacked
config.algo.rnn.open_loop = False # if True, action predictions are only based on a single observation (not sequence) + hidden state
config.algo.rnn.kwargs.bidirectional = False # rnn kwargs
return config
# Valid dataset types to use
DATASET_TYPES = {
"robosuite": {
"default_dataset_func": TestUtils.example_dataset_path,
"hp": robosuite_hyperparameters,
},
"momart": {
"default_dataset_func": TestUtils.example_momart_dataset_path,
"hp": momart_hyperparameters,
},
}
def get_config(
dataset_type="robosuite", dataset_path=None, output_dir=None, debug=False
):
"""
Construct config for training.
Args:
dataset_type (str): Dataset type to use. Valid options are DATASET_TYPES. Default is "robosuite"
dataset_path (str or None): path to hdf5 dataset. Pass None to use a small default dataset.
output_dir (str): path to output folder, where logs, model checkpoints, and videos
will be written. If it doesn't exist, the directory will be created. Pass
None to use a default directory in /tmp.
debug (bool): if True, shrink training and rollout times to test a full training
run quickly.
"""
assert (
dataset_type in DATASET_TYPES
), f"Invalid dataset type. Valid options are: {list(DATASET_TYPES.keys())}, got: {dataset_type}"
# handle args
if dataset_path is None:
# small dataset with a handful of trajectories
dataset_path = DATASET_TYPES[dataset_type]["default_dataset_func"]()
if output_dir is None:
# default output directory created in /tmp
output_dir = TestUtils.temp_model_dir_path()
# make default BC config
config = config_factory(algo_name="bc")
### Experiment Config ###
config.experiment.name = (
f"{dataset_type}_bc_rnn_example" # name of experiment used to make log files
)
config.experiment.validate = True # whether to do validation or not
config.experiment.logging.terminal_output_to_txt = (
False # whether to log stdout to txt file
)
config.experiment.logging.log_tb = True # enable tensorboard logging
### Train Config ###
config.train.data = dataset_path # path to hdf5 dataset
# Write all results to this directory. A new folder with the timestamp will be created
# in this directory, and it will contain three subfolders - "log", "models", and "videos".
# The "log" directory will contain tensorboard and stdout txt logs. The "models" directory
# will contain saved model checkpoints. The "videos" directory contains evaluation rollout
# videos.
config.train.output_dir = output_dir # path to output folder
# Load default hyperparameters based on dataset type
config = DATASET_TYPES[dataset_type]["hp"](config)
# maybe make training length small for a quick run
if debug:
# train and validate for 3 gradient steps per epoch, and 2 total epochs
config.experiment.epoch_every_n_steps = 3
config.experiment.validation_epoch_every_n_steps = 3
config.train.num_epochs = 2
# rollout and model saving every epoch, and make rollouts short
config.experiment.save.every_n_epochs = 1
config.experiment.rollout.rate = 1
config.experiment.rollout.n = 2
config.experiment.rollout.horizon = 10
return config
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Dataset path
parser.add_argument(
"--dataset",
type=str,
default=None,
help="(optional) path to input hdf5 dataset to use in example script. If not provided, \
a default hdf5 packaged with the repository will be used.",
)
# Output dir
parser.add_argument(
"--output",
type=str,
default=None,
help="(optional) path to folder to use (or create) to output logs, model checkpoints, and rollout \
videos. If not provided, a folder in /tmp will be used.",
)
# debug flag for quick training run
parser.add_argument(
"--debug",
action="store_true",
help="set this flag to run a quick training run for debugging purposes",
)
# type
parser.add_argument(
"--dataset_type",
type=str,
default="robosuite",
choices=list(DATASET_TYPES.keys()),
help=f"Dataset type to use. This will determine the default hyperparameter settings to use for training."
f"Valid options are: {list(DATASET_TYPES.keys())}. Default is robosuite.",
)
args = parser.parse_args()
# Turn debug mode on possibly
if args.debug:
Macros.DEBUG = True
# config for training
config = get_config(
dataset_type=args.dataset_type,
dataset_path=args.dataset,
output_dir=args.output,
debug=args.debug,
)
# set torch device
device = TorchUtils.get_torch_device(try_to_use_cuda=config.train.cuda)
# run training
train(config, device=device)
| 23,496 | Python | 42.034798 | 208 | 0.682074 |
AshisGhosh/roboai/robosuite/robomimic_sim/robomimic_sim/run_policy.py | import imageio
import numpy as np
import os
from copy import deepcopy
import torch
import robomimic.utils.file_utils as FileUtils
import robomimic.utils.torch_utils as TorchUtils
from robomimic.envs.env_base import EnvBase
from robomimic.algo import RolloutPolicy
import urllib.request
# Get pretrained checkpooint from the model zoo
ckpt_path = "models/lift_ph_low_dim_epoch_1000_succ_100.pth"
# Lift (Proficient Human)
urllib.request.urlretrieve(
"http://downloads.cs.stanford.edu/downloads/rt_benchmark/model_zoo/lift/bc_rnn/lift_ph_low_dim_epoch_1000_succ_100.pth",
filename=ckpt_path,
)
assert os.path.exists(ckpt_path)
device = TorchUtils.get_torch_device(try_to_use_cuda=True)
# restore policy
policy, ckpt_dict = FileUtils.policy_from_checkpoint(
ckpt_path=ckpt_path, device=device, verbose=True
)
# create environment from saved checkpoint
env, _ = FileUtils.env_from_checkpoint(
ckpt_dict=ckpt_dict,
render=True, # we won't do on-screen rendering in the notebook
render_offscreen=True, # render to RGB images for video
verbose=True,
)
def rollout(
policy,
env,
horizon,
render=False,
video_writer=None,
video_skip=5,
camera_names=None,
):
"""
Helper function to carry out rollouts. Supports on-screen rendering, off-screen rendering to a video,
and returns the rollout trajectory.
Args:
policy (instance of RolloutPolicy): policy loaded from a checkpoint
env (instance of EnvBase): env loaded from a checkpoint or demonstration metadata
horizon (int): maximum horizon for the rollout
render (bool): whether to render rollout on-screen
video_writer (imageio writer): if provided, use to write rollout to video
video_skip (int): how often to write video frames
camera_names (list): determines which camera(s) are used for rendering. Pass more than
one to output a video with multiple camera views concatenated horizontally.
Returns:
stats (dict): some statistics for the rollout - such as return, horizon, and task success
"""
assert isinstance(env, EnvBase)
assert isinstance(policy, RolloutPolicy)
assert not (render and (video_writer is not None))
policy.start_episode()
obs = env.reset()
state_dict = env.get_state()
# hack that is necessary for robosuite tasks for deterministic action playback
obs = env.reset_to(state_dict)
video_count = 0 # video frame counter
total_reward = 0.0
if render:
env.render(mode="human", camera_name=camera_names[0])
try:
for step_i in range(horizon):
# get action from policy
act = policy(ob=obs)
# play action
next_obs, r, done, _ = env.step(act)
# compute reward
total_reward += r
success = env.is_success()["task"]
# visualization
if render:
env.render(mode="human", camera_name=camera_names[0])
if video_writer is not None:
if video_count % video_skip == 0:
video_img = []
for cam_name in camera_names:
video_img.append(
env.render(
mode="rgb_array",
height=512,
width=512,
camera_name=cam_name,
)
)
video_img = np.concatenate(
video_img, axis=1
) # concatenate horizontally
video_writer.append_data(video_img)
video_count += 1
# break if done or if success
if done or success:
break
# update for next iter
obs = deepcopy(next_obs)
state_dict = env.get_state()
except env.rollout_exceptions as e:
print("WARNING: got rollout exception {}".format(e))
stats = dict(Return=total_reward, Horizon=(step_i + 1), Success_Rate=float(success))
return stats
rollout_horizon = 400
np.random.seed(0)
torch.manual_seed(0)
video_path = "output/rollout.mp4"
video_writer = imageio.get_writer(video_path, fps=20)
stats = rollout(
policy=policy,
env=env,
horizon=rollout_horizon,
render=True,
# render=False,
# video_writer=video_writer,
# video_skip=5,
camera_names=["frontview", "agentview"],
)
print(stats)
video_writer.close()
| 4,566 | Python | 29.446666 | 124 | 0.613666 |
AshisGhosh/roboai/robosuite/robomimic_sim/app/main.py | #!/usr/bin/python -u
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from starlette.responses import StreamingResponse
import io
from robomimic_sim.robomimic_sim import RobomimicSim
# Create FastAPI instance
app = FastAPI()
robosim = RobomimicSim()
# List of allowed origins (you can use '*' to allow all origins)
origins = [
"http://localhost:3000", # Allow your Next.js app
# Add any other origins as needed
]
# Add CORSMiddleware to the application
app.add_middleware(
CORSMiddleware,
allow_origins=origins, # List of allowed origins
allow_credentials=True,
allow_methods=["*"], # Allow all methods
allow_headers=["*"], # Allow all headers
)
# Example route
@app.get("/")
async def read_root():
return {"message": "Hello, FastAPI! This is the robomimic server."}
@app.on_event("startup")
async def startup_event():
return robosim.setup()
@app.post("/run")
async def run():
print("Running robomimic simulation...")
return await robosim.start_rollout()
@app.post("/reset")
async def reset():
print("Resetting robomimic simulation...")
return await robosim.reset()
@app.post("/start_renderer")
async def start_renderer():
print("Starting robomimic simulation...")
return await robosim.start_renderer()
@app.post("/close_renderer")
async def close_renderer():
print("Closing robomimic simulation...")
return await robosim.close_renderer()
@app.get("/get_policy")
async def get_policy():
return repr(robosim.policy)
@app.get("/get_image")
async def get_image():
print("Getting image...")
img = robosim.get_image()
buf = io.BytesIO()
img.save(buf, format="PNG")
buf.seek(0)
return StreamingResponse(buf, media_type="image/png")
| 1,778 | Python | 21.518987 | 71 | 0.692351 |
AshisGhosh/roboai/agent_frameworks/crewai_roboai/pyproject.toml | [tool.poetry]
name = "crewai-roboai"
version = "0.1.0"
description = ""
authors = ["AshisGhosh <[email protected]>"]
readme = "README.md"
[tool.poetry.dependencies]
python = ">=3.10,<=3.13"
crewai = {extras = ["tools"], version = "^0.22.5"}
[build-system]
requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api"
| 330 | TOML | 19.687499 | 50 | 0.660606 |
AshisGhosh/roboai/agent_frameworks/crewai_roboai/crewai_roboai/test.py | from crewai import Crew, Process, Agent, Task
from crewai_tools import tool
from dotenv import load_dotenv
load_dotenv() # take environment variables from .env.
@tool("Get objects on the table.")
def get_objects_on_the_table() -> str:
"""Get objects on the table"""
return "Milk, Cereal" # string to be sent back to the agent
# Define your agents
planner = Agent(
role="Planner",
goal="Create plans for robots.",
backstory="An experienced planner that breaks down tasks into steps for robots.",
tools=[],
verbose=True,
allow_delegation=False,
)
analyst = Agent(
role="Scene Analyzer",
goal="Identify objects in the scene.",
backstory="An experienced analyst that can identify objects in a scene.",
tools=[get_objects_on_the_table],
verbose=True,
allow_delegation=False,
)
# Define the tasks in sequence
planner_task = Task(
description="Create a plan for a robot to clear the table.",
agent=planner,
expected_output="List of steps for a robot.",
)
analysis_task = Task(
description="List the objects that are on the table",
agent=analyst,
expected_output="List of objects.",
)
# Form the crew with a sequential process
crew = Crew(
agents=[planner, analyst],
tasks=[analysis_task, planner_task],
process=Process.sequential,
verbose=2,
)
crew.kickoff()
| 1,360 | Python | 23.745454 | 85 | 0.691176 |
AshisGhosh/roboai/agent_frameworks/langroid_roboai/pyproject.toml | [tool.poetry]
name = "langroid-roboai"
version = "0.1.0"
description = ""
authors = ["AshisGhosh <[email protected]>"]
readme = "README.md"
[tool.poetry.dependencies]
python = "<3.12,>=3.9.1"
langroid = {extras = ["litellm"], version = "^0.1.222"}
[build-system]
requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api"
| 337 | TOML | 20.124999 | 55 | 0.667656 |
AshisGhosh/roboai/agent_frameworks/langroid_roboai/langroid_roboai/test.py | import langroid as lr
import langroid.language_models as lm
# set up LLM
llm_cfg = lm.OpenAIGPTConfig( # or OpenAIAssistant to use Assistant API
# any model served via an OpenAI-compatible API
# chat_model="litellm/openrouter/mistralai/mistral-7b-instruct:free"
chat_model="litellm/openrouter/huggingfaceh4/zephyr-7b-beta:free"
)
# # use LLM directly
# mdl = lm.OpenAIGPT(llm_cfg)
# response = mdl.chat("What is the capital of Ontario?", max_tokens=10)
# # use LLM in an Agent
# agent_cfg = lr.ChatAgentConfig(llm=llm_cfg)
# agent = lr.ChatAgent(agent_cfg)
# agent.llm_response("What is the capital of China?")
# response = agent.llm_response("And India?") # maintains conversation state
# wrap Agent in a Task to run interactive loop with user (or other agents)
# task = lr.Task(agent, name="Bot", system_message="You are a helpful assistant")
# task.run("Hello") # kick off with user saying "Hello"
# 2-Agent chat loop: Teacher Agent asks questions to Student Agent
agent_cfg = lr.ChatAgentConfig(llm=llm_cfg)
robot_agent = lr.ChatAgent(agent_cfg)
robot_task = lr.Task(
robot_agent,
name="Robot",
system_message="""
You are a robot and have a high level task.
You must ask the planner to break it down into steps you can do.
Your skills involve 'pick' and 'place' actions.
""",
# done_if_response=[Entity.LLM],
interactive=False,
)
planner_agent = lr.ChatAgent(agent_cfg)
planner_task = lr.Task(
planner_agent,
name="Planner",
system_message="""
Concisely return numbered steps of a plan for a robot.
The plan can only involve 'pick' and 'place' actions.
If the plan is valid, respond with 'DONE'.
""",
single_round=True,
interactive=False,
)
robot_task.add_sub_task(planner_task)
robot_task.run(
"The task is to clear the table, it has the following objects: 'Milk', 'Cereal', and a 'Can'."
)
| 1,912 | Python | 33.160714 | 98 | 0.695607 |
AshisGhosh/roboai/agent_frameworks/autogen_roboai/pyproject.toml | [tool.poetry]
name = "roboai"
version = "0.1.0"
description = ""
authors = ["AshisGhosh <[email protected]>"]
readme = "README.md"
[tool.poetry.dependencies]
python = ">=3.10,<3.13"
pyautogen = "^0.2.21"
[build-system]
requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api"
| 293 | TOML | 17.374999 | 46 | 0.672355 |
AshisGhosh/roboai/agent_frameworks/autogen_roboai/autogen_roboai/test.py | import autogen
from autogen import AssistantAgent, UserProxyAgent
import tempfile
from autogen.coding import LocalCommandLineCodeExecutor
from typing_extensions import Annotated
import logging
logging.basicConfig(level=logging.INFO)
filter_dict = {"tags": ["zephyr"]}
config_list = autogen.config_list_from_json(
env_or_file="OAI_CONFIG_LIST", filter_dict=filter_dict
)
assert len(config_list) == 1
llm_config = {
"config_list": config_list,
"timeout": 120,
}
task = "Create a list of steps for a robot to clear the table."
# create an AssistantAgent instance named "assistant" with the LLM configuration.
assistant = AssistantAgent(
name="assistant",
llm_config=llm_config,
system_message="""
You are a helpful assistant who can break down tasks into steps.
Please help the user with their task.
Use the functions provided to learn more about the task.
Respond with 'TERMINATE' when you are done.
""",
)
# Create a temporary directory to store the code files.
temp_dir = tempfile.TemporaryDirectory()
# Create a local command line code executor.
executor = LocalCommandLineCodeExecutor(
timeout=10, # Timeout for each code execution in seconds.
work_dir=temp_dir.name, # Use the temporary directory to store the code files.
)
user_proxy = UserProxyAgent(
name="user_proxy",
human_input_mode="NEVER",
system_message="A proxy for the user for executing code.",
code_execution_config={"executor": executor},
is_termination_msg=lambda x: "content" in x
and x["content"] is not None
and "TERMINATE" in x["content"]
and "``" not in x["content"],
)
@user_proxy.register_for_execution()
@assistant.register_for_llm(
name="identify_objs_on_table",
description="Python function to get a list of objects on the table.",
)
def identify_objs_on_table(
message: Annotated[
str, "Message to ask the inspector for the objects on the table."
],
) -> str:
logging.info("Asked for objects.")
return "Milk, Cereal, a Can."
# inspector = AssistantAgent(
# name="inspector",
# llm_config=llm_config,
# system_message="You are an inspector who can identify objects in a scene. There is 'Milk', 'Cereal' and a 'Can' on the table. Please respond with 'TERMINATE' when you are done."
# )
# user_inspector = UserProxyAgent(
# name="user_inspector",
# human_input_mode="NEVER",
# is_termination_msg=lambda x: "content" in x
# and x["content"] is not None
# and "TERMINATE" in x["content"]
# )
# @user_inspector.register_for_execution()
# @inspector.register_for_llm(
# name="identify_objects",
# description="Identify objects in the scene.",
# )
# def identify_objects(message: Annotated[str, "Message to identify objects in the scene."]):
# return "Milk, Cereal, a Can."
user_proxy.initiate_chat(assistant, message=task)
# logging.info(f"Chat result: {chat_result}")
| 2,951 | Python | 29.122449 | 183 | 0.694341 |
AshisGhosh/roboai/franka_moveit/isaac_demo.launch.py | import os
from launch import LaunchDescription
from launch.actions import DeclareLaunchArgument
from launch.substitutions import LaunchConfiguration
from launch_ros.actions import Node
from ament_index_python.packages import get_package_share_directory
from moveit_configs_utils import MoveItConfigsBuilder
def generate_launch_description():
# Command-line arguments
ros2_control_hardware_type = DeclareLaunchArgument(
"ros2_control_hardware_type",
default_value="isaac",
description="ROS2 control hardware interface type to use for the launch file -- possible values: [mock_components, isaac]",
)
moveit_config = (
MoveItConfigsBuilder("moveit_resources_panda")
.robot_description(
file_path="config/panda.urdf.xacro",
mappings={
"ros2_control_hardware_type": LaunchConfiguration(
"ros2_control_hardware_type"
)
},
)
.robot_description_semantic(file_path="config/panda.srdf")
.trajectory_execution(file_path="config/gripper_moveit_controllers.yaml")
.planning_scene_monitor(
publish_robot_description=True, publish_robot_description_semantic=True
)
.planning_pipelines(pipelines=["ompl", "pilz_industrial_motion_planner"])
.to_moveit_configs()
)
# Start the actual move_group node/action server
move_group_node = Node(
package="moveit_ros_move_group",
executable="move_group",
output="screen",
parameters=[moveit_config.to_dict()],
arguments=["--ros-args", "--log-level", "info"],
)
# RViz
rviz_config_file = os.path.join(
get_package_share_directory("moveit2_tutorials"),
"config",
"panda_moveit_config.rviz",
)
rviz_node = Node(
package="rviz2",
executable="rviz2",
name="rviz2",
output="log",
arguments=["-d", rviz_config_file],
parameters=[
moveit_config.robot_description,
moveit_config.robot_description_semantic,
moveit_config.robot_description_kinematics,
moveit_config.planning_pipelines,
moveit_config.joint_limits,
],
)
# Static TF
world2robot_tf_node = Node(
package="tf2_ros",
executable="static_transform_publisher",
name="static_transform_publisher",
output="log",
arguments=["--frame-id", "world", "--child-frame-id", "panda_link0"],
)
hand2camera_tf_node = Node(
package="tf2_ros",
executable="static_transform_publisher",
name="static_transform_publisher",
output="log",
arguments=[
"0.04",
"0.0",
"0.04",
"1.57",
"0.0",
"0.0",
"panda_hand",
"sim_camera",
],
)
# Publish TF
robot_state_publisher = Node(
package="robot_state_publisher",
executable="robot_state_publisher",
name="robot_state_publisher",
output="both",
parameters=[moveit_config.robot_description],
)
# ros2_control using FakeSystem as hardware
ros2_controllers_path = os.path.join(
get_package_share_directory("moveit_resources_panda_moveit_config"),
"config",
"ros2_controllers.yaml",
)
ros2_control_node = Node(
package="controller_manager",
executable="ros2_control_node",
parameters=[ros2_controllers_path],
remappings=[
("/controller_manager/robot_description", "/robot_description"),
],
output="screen",
)
joint_state_broadcaster_spawner = Node(
package="controller_manager",
executable="spawner",
arguments=[
"joint_state_broadcaster",
"--controller-manager",
"/controller_manager",
],
)
panda_arm_controller_spawner = Node(
package="controller_manager",
executable="spawner",
arguments=["panda_arm_controller", "-c", "/controller_manager"],
)
panda_hand_controller_spawner = Node(
package="controller_manager",
executable="spawner",
arguments=["panda_hand_controller", "-c", "/controller_manager"],
)
return LaunchDescription(
[
ros2_control_hardware_type,
rviz_node,
world2robot_tf_node,
hand2camera_tf_node,
robot_state_publisher,
move_group_node,
ros2_control_node,
joint_state_broadcaster_spawner,
panda_arm_controller_spawner,
panda_hand_controller_spawner,
]
)
| 4,745 | Python | 29.818182 | 131 | 0.589041 |
AshisGhosh/roboai/roboai/pyproject.toml | [tool.poetry]
name = "roboai"
version = "0.1.0"
description = ""
authors = ["AshisGhosh <[email protected]>"]
readme = "README.md"
[tool.poetry.dependencies]
python = "^3.10"
litellm = "^1.34.16"
langfuse = "^2.21.1"
burr = {version = "0.19.1", extras = ["start", "streamlit"]}
graphviz = "^0.20.3"
[build-system]
requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api"
| 388 | TOML | 19.473683 | 60 | 0.649485 |
AshisGhosh/roboai/roboai/roboai/roboai.py | import time
from typing import List, Optional, Tuple
from PIL import Image # noqa: F401
from enum import Enum
from uuid import uuid4
from burr.core import Application, ApplicationBuilder, State, default, when
from burr.core.action import action
from burr.lifecycle import LifecycleAdapter
from burr.tracking import LocalTrackingClient
from shared.utils.llm_utils import (
get_closest_text_sync as get_closest_text,
get_most_important_sync as get_most_important,
)
# from shared.utils.isaacsim_client import get_image as get_image_from_sim, pick, place # noqa: F401
from shared.utils.omnigibson_client import (
get_image as get_image_from_sim,
pick,
place,
navigate_to,
get_obj_in_hand,
wait_until_ready,
) # noqa: F401
from shared.utils.image_utils import pil_to_b64, b64_to_pil
from shared.utils.gradio_client import moondream_answer_question_from_image as moondream
from task import Task
from agent import Agent
from plans import PLANS
from skills import SKILLS
from semantic_locations import SEMANTIC_LOCATIONS
from role_context import ROBOT_CONTEXT, ROLE_CONTEXT, EMPLOYEE_HANDBOOK
from knowledge_base_utils import KnowledgeBase
import logging
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
knowledge_base = KnowledgeBase()
DEFAULT_MODEL = "openrouter/meta-llama/llama-3-8b-instruct:free"
# DEFAULT_MODEL = "openrouter/huggingfaceh4/zephyr-7b-beta:free"
# DEFAULT_MODEL = "ollama/llama3:latest"
# DEFAULT_MODEL = "ollama/phi3"
# CODING_MODEL = "ollama/codegemma:instruct"
CODING_MODEL = DEFAULT_MODEL
class LogColors:
HEADER = "\033[95m"
OKBLUE = "\033[94m"
OKCYAN = "\033[96m"
OKGREEN = "\033[92m"
WARNING = "\033[93m"
FAIL = "\033[91m"
ENDC = "\033[0m"
BOLD = "\033[1m"
UNDERLINE = "\033[4m"
def pick_mock(object_name: str):
name = pick_mock.__name__
print(f"Called {name} TEST MODE ENABLED")
return True
def place_mock(location: str):
name = place_mock.__name__
print(f"Called {name} TEST MODE ENABLED")
return True
def extract_code(raw_input, language="python"):
start_delimiter = f"```{language}"
if start_delimiter not in raw_input:
start_delimiter = "```"
code_start_index = raw_input.find(start_delimiter)
if code_start_index == -1:
code_start_index = 0
else:
code_start_index += len(start_delimiter)
end_delimiter = "```"
code_end_index = raw_input.find(end_delimiter, code_start_index)
if code_end_index == -1:
code_end_index = len(raw_input)
code = raw_input[code_start_index:code_end_index].strip()
log.debug(f"Extracted code: \n{code}")
return code
def exec_code(code, exec_vars, attempts=3):
success = False
history = []
for _ in range(attempts):
try:
if history:
log.warn(
f"{LogColors.WARNING}Executing code, retry attempt {len(history)}{LogColors.ENDC}"
)
coder_task = Task(
f"""Given the following error, fix the syntax. Here is the error:
{history[-1][1]}\n{code}
Ensure any explanations are formatted as comments.
""",
expected_output_format="""
```python
# explanatations are only formatted as comments
my_variable = "proper_python_syntax"
my_list = ["proper_python_syntax"]
```
""",
)
coder_agent = Agent(
name="Coder",
model=CODING_MODEL,
system_message="You are an expert coder. Only return proper syntax.",
)
coder_task.add_solving_agent(coder_agent)
output = coder_task.run()
code = extract_code(output)
log.info(f"{LogColors.OKBLUE}Fixed code: \n{code}{LogColors.ENDC}")
exec(code, exec_vars)
success = True
break
except Exception as e:
log.error(f"Error executing code: {e}")
history.append((code, f"Error executing code: {e}"))
time.sleep(1)
return success
@action(reads=["current_state"], writes=["chat_history", "prompt"])
def process_prompt(state: State, prompt: str) -> Tuple[dict, State]:
result = {"chat_item": {"role": "user", "content": prompt, "type": "text"}}
if state["current_state"] == "FAILED":
original_prompt = state["prompt"]
if isinstance(original_prompt, str):
prompt = [original_prompt, prompt]
elif isinstance(original_prompt, list):
prompt = original_prompt.append(prompt)
log.info(f"{LogColors.OKCYAN}Prompt: {prompt}{LogColors.ENDC}")
return result, state.append(chat_history=result["chat_item"]).update(prompt=prompt)
class PromptType(Enum):
UNKNOWN = "unknown"
PERFORM_NEW_TASK = "perform new task"
RESPOND_TO_QUESTION = "respond to question"
UPDATE_KNOWLEDGE_BASE = "update knowledge base"
RETRY_EXISTING_TASK = "retry existing task"
MODIFY_EXISTING_TASK = "modify existing task"
@classmethod
def all_values(cls):
return [prompt.value for prompt in cls]
def __eq__(self, other):
if isinstance(other, str):
return self.value == other
if isinstance(other, PromptType):
return self.value == other.value
return NotImplemented
@action(reads=["prompt"], writes=["current_state", "prompt_cls", "task_state_idx"])
def parse_prompt(state: State) -> Tuple[dict, State]:
# determine the mode of the prompt
prompt = state["prompt"]
if state["current_state"] == "FAILED":
prompt = state["prompt"][-1]
options = PromptType.all_values()
classify_prompt = Task(
f"""Given the following prompt, classify the type of prompt it is:
{prompt}
Options: {options}
Examples
- Prompt: "Go to the kitchen and grab the items" Output: "perform new task"
- Prompt: "What skills do you have?" Output: "respond to question"
- Prompt: "Don't handle bottles." Output: "update knowledge base"
- Prompt: "Remember to ignore the red cup." Output: "update knowledge base"
- Prompt: "You can find the items in the kitchen." Output: "update knowledge base"
- Prompt: "The beer is in the fridge." Output: "update knowledge base"
- Prompt: "Beer goes in the fridge." Output: "update knowledge base"
- Prompt: "I want to retry the task" Output: "retry existing task"
- Prompt: "Don't pick up the items, just scan the scene" Output: "modify existing task"
""",
expected_output_format="A string respresenting the classification.",
)
classify_prompt_agent = Agent(
name="Update Classifier",
model=DEFAULT_MODEL,
system_message="""
You are a helpful agent that concisely responds.
""",
)
classify_prompt.add_solving_agent(classify_prompt_agent)
output = classify_prompt.run()
prompt_cls = get_closest_text(output, options)
log.info(f"{LogColors.OKGREEN}Prompt classification: {prompt_cls}{LogColors.ENDC}")
task_state_idx = 0
if prompt_cls == PromptType.PERFORM_NEW_TASK:
current_state = "STARTING"
content = "Starting a new task..."
elif prompt_cls == PromptType.RESPOND_TO_QUESTION:
current_state = "PENDING"
content = "Responding to a question..."
elif prompt_cls == PromptType.UPDATE_KNOWLEDGE_BASE:
current_state = "PENDING"
content = "Updating knowledge..."
elif prompt_cls == PromptType.RETRY_EXISTING_TASK:
current_state = "RUNNING"
content = "Retrying the task..."
task_state_idx = state["task_state_idx"] - 1
if state["current_state"] != "FAILED":
current_state = "PENDING"
prompt_cls = PromptType.UNKNOWN
content = "There is no task to retry."
elif prompt_cls == PromptType.MODIFY_EXISTING_TASK:
current_state = "PENDING"
content = "Modifying existing task...\n\nThis feature is not supported yet."
else:
prompt_cls = PromptType.UNKNOWN
current_state = "PENDING"
content = "Unknown prompt. Please provide a valid prompt."
result = {
"current_state": current_state,
"prompt_cls": prompt_cls,
"task_state_idx": task_state_idx,
}
chat_item = {
"content": content,
"type": "text",
"role": "assistant",
}
return result, state.append(chat_history=chat_item).update(**result)
@action(reads=["prompt"], writes=[])
def respond_to_question(state: State) -> Tuple[dict, State]:
answer_question = Task(
f"""
Given the following prompt, answer the question.
Here is the prompt: \n{state['prompt']}
Here is context:
{ROBOT_CONTEXT}
{ROLE_CONTEXT}
{EMPLOYEE_HANDBOOK}
Locations available: {list(SEMANTIC_LOCATIONS.keys())}
Skills available: {list(SKILLS.keys())}
Previous plans: {list(PLANS.keys())}
Here is additional knowledge you've learned:
{knowledge_base.get_knowledge_as_string()}
If you do not know the answer, return "I do not know."
""",
expected_output_format="A string representing the answer.",
)
answer_question_agent = Agent(
name="Answerer",
model=DEFAULT_MODEL,
system_message="""
You are an agent that answers questions.
""",
)
answer_question.add_solving_agent(answer_question_agent)
output = answer_question.run()
chat_item = {
"content": output,
"type": "text",
"role": "assistant",
}
return {}, state.append(chat_history=chat_item)
@action(reads=["prompt"], writes=[])
def update_knowledge_base(state: State) -> Tuple[dict, State]:
parse_knowledge = Task(
f"""Given the following text, extract the knowledge: \n{state['prompt']}
Do not include any other text that is not the extracted knowledge.
Examples:
- Text: "Don't handle bottles." Output: "Don't handle bottles"
- Text: "Remember to ignore the red cup." Output: "Ignore the red cup"
- Text: "If you're looking for the cheese, there's a very high chance you could find it in the kitchen." Output: "Cheese in the kitchen"
""",
expected_output_format="A string representing the knowledge.",
)
parse_knowledge_agent = Agent(
name="Knowledge Extractor",
model=DEFAULT_MODEL,
system_message="""
You are an agent that extracts knowledge.
""",
)
parse_knowledge.add_solving_agent(parse_knowledge_agent)
new_knowledge = parse_knowledge.run()
knowledge_tags = get_most_important(new_knowledge, 2)
# make tags lower case and strip any non-alphanumeric characters
knowledge_tags = [tag.lower().strip() for tag in knowledge_tags]
knowledge_base.add_data(uuid4().hex, new_knowledge, knowledge_tags)
chat_item = {
"content": f"""Updated knowledge base with:
{new_knowledge}
Tags: {knowledge_tags}""",
"type": "text",
"role": "assistant",
}
return {}, state.append(chat_history=chat_item)
@action(reads=["prompt"], writes=["task"])
def determine_if_task_in_skill_library(state: State) -> Tuple[dict, State]:
closest_text = get_closest_text(state["prompt"], list(PLANS.keys()), threshold=0.75)
if closest_text:
result = {"task": closest_text}
content = f"Task determined to be **{result['task']}**"
else:
result = {"task": "unknown"}
content = f"Parsing unknown task... **{state['prompt']}**"
chat_item = {"role": "assistant", "content": content, "type": "text"}
return result, state.append(chat_history=chat_item).update(**result)
@action(reads=["prompt"], writes=["task"])
def create_plan_for_unknown_task(state: State) -> Tuple[dict, State]:
result = {"task": state["prompt"]}
chat_item = {
"content": f"Creating plan for **{state['prompt']}**",
"type": "text",
"role": "assistant",
}
return result, state.append(chat_history=chat_item).update(**result)
@action(reads=["prompt"], writes=["closest_plans"])
def get_closest_plans(state: State) -> Tuple[dict, State]:
closest_plans = get_closest_text(
state["prompt"], list(PLANS.keys()), k=2, threshold=0.75
)
chat_item = {
"content": f"Closest plans: {closest_plans}",
"type": "text",
"role": "assistant",
}
result = {"closest_plans": closest_plans}
return result, state.append(chat_history=chat_item).update(
closest_plans=closest_plans
)
@action(
reads=["prompt", "closest_plans"],
writes=[
"robot_context",
"role_context",
"employee_context",
],
)
def get_role_and_location_context(state: State) -> Tuple[dict, State]:
result = {
"robot_context": ROBOT_CONTEXT,
"role_context": ROLE_CONTEXT,
"employee_context": EMPLOYEE_HANDBOOK,
}
chat_item = {
"content": "Getting role and location context. Creating initial plan.",
"type": "text",
"role": "assistant",
}
return result, state.append(chat_history=chat_item).update(**result)
@action(
reads=[
"prompt",
"closest_plans",
"robot_context",
"role_context",
"employee_context",
],
writes=["plan"],
)
def create_initial_plan(state: State) -> Tuple[dict, State]:
closest_plans = state["closest_plans"]
if closest_plans:
closest_plans = [f"{k}:\n {PLANS[k]}" for k in closest_plans]
closest_plans = "\n".join(closest_plans)
task = Task(
f"""
Given the following prompt and current robot state, return a simplified high level plan for a robot to perform.
Do not include steps related to confirming successful execution or getting feedback. Do not include steps that indicate to repeat steps.
Prompt:
{state['prompt']}
Current robot location: {state['location'] if 'location' in state else 'living room'}
Examples:
{closest_plans if closest_plans else "No examples available."}
If information is needed, use the skills to get observe or scan the scene.
Context:
Robot:
{state['robot_context']}
Role:
{state['role_context']}
Employee Handbook:
{state['employee_context']}
Here is a list of locations that the robot can go to:
{list(SEMANTIC_LOCATIONS.keys())}
Here is additional knowledge you've learned:
{knowledge_base.get_knowledge_as_string()}
""",
expected_output_format="A numbered list of steps.",
)
parser_agent = Agent(
name="Parser",
model=DEFAULT_MODEL,
system_message="""
You are a helpful agent that concisely responds.
""",
)
task.add_solving_agent(parser_agent)
plan = task.run()
chat_item = {
"content": f"Initial plan: \n\n{plan}",
"type": "text",
"role": "assistant",
}
return {"plan": plan}, state.append(chat_history=chat_item).update(plan=plan)
@action(reads=["plan"], writes=["plan"])
def create_robot_grounded_plan(state: State) -> Tuple[dict, State]:
plan = state["plan"]
skills_verbose = "\n".join([f"{k} : {v['description']}" for k, v in SKILLS.items()])
robot_grounded_plan = Task(
f"""
Map and consolidate the following steps to the Available Robot Skills and locations.
{plan}
Try to match the number of steps. Do not add any additional steps.
Available Robot Skills:
{skills_verbose}
If there is no match for that step, return 'False'. Be conservative in the matching. There shall only be one skill per step. Summarize if the plan if feasible at the end.
Here is a list of locations that the robot can go to: {list(SEMANTIC_LOCATIONS.keys())}
If there are pick and place steps following an observation or scanning step, consolidate those steps into a rollout step for a pick and place plan.
""",
expected_output_format="A numbered list of steps mapped to single skill each or 'False' followed by a summary if the task is feasible.",
)
robot_grounded_agent = Agent(
name="Robot Grounded",
model=DEFAULT_MODEL,
system_message="""
You are an agent that grounds a set of actions to robot skills.
""",
)
robot_grounded_plan.add_solving_agent(robot_grounded_agent)
robot_grounded_plan_output = robot_grounded_plan.run()
chat_item = {
"content": f"**Robot grounded plan**: \n\n{robot_grounded_plan_output}",
"type": "text",
"role": "assistant",
}
return {"plan": robot_grounded_plan_output}, state.append(
chat_history=chat_item
).update(plan=robot_grounded_plan_output)
@action(reads=["plan"], writes=["feasible"])
def determine_if_plan_is_feasibile(state: State) -> Tuple[dict, State]:
robot_grounded_plan_output = state["plan"]
extract_feasibility = Task(
f"Given the following summary, return if the task is feasible. Summary: \n{robot_grounded_plan_output}",
expected_output_format="True or False. Do not add any other information.",
)
feasibility_agent = Agent(
name="Feasibility",
# model="openrouter/huggingfaceh4/zephyr-7b-beta:free",
model=DEFAULT_MODEL,
system_message="""
You are a conservative agent that determines if a plan is feasible.
""",
)
extract_feasibility.add_solving_agent(feasibility_agent)
feasibility_output = extract_feasibility.run()
feasible = get_closest_text(feasibility_output, ["True", "False"])
feasible = True if feasible == "True" else False if feasible is not None else None
result = {
"task": robot_grounded_plan_output,
"feasible": feasible,
}
chat_item = {
"content": f"Feasible: `{result['feasible']}`",
"type": "text",
"role": "assistant",
}
return result, state.append(chat_history=chat_item).update(**result)
@action(reads=["task", "feasible"], writes=["response", "current_state", "task"])
def convert_plan_to_steps(state: State) -> Tuple[dict, State]:
plan = state["task"]
plan_to_list_of_steps = Task(
f"""Given the following output, take the numbered list and return is as a python list assigned to `list_of_steps`.
Do not remove any relevant information. Include information about skills and locations into the correct list item.
Here is the plan:
{plan}
Here is an example:
Plan:
1. navigate to location, the location is the kitchen
2. scan the kitchen for relevant objects
3. roll out a plan to pick and place the objects
Output:
```python
list_of_steps = ["navigate to location, the location is the kitchen", "scan the kitchen for relevant objects", "roll out a plan to pick and place the objects"]
```
""",
expected_output_format="""
```python
list_of_steps = ["step 1", "step 2", "step 3"]
```
""",
)
plan_to_list_of_steps_agent = Agent(
name="Plan to List of Steps",
model=DEFAULT_MODEL,
system_message="""
You are a helpful agent that concisely responds with only code.
""",
)
plan_to_list_of_steps.add_solving_agent(plan_to_list_of_steps_agent)
output = plan_to_list_of_steps.run()
code = extract_code(output)
try:
exec_vars = {}
exec_code(code, exec_vars)
log.info(exec_vars.get("list_of_steps", None))
steps = exec_vars.get("list_of_steps", None)
content = "Steps:\n\n" + "\n".join(
[f"{i+1}. {step}" for i, step in enumerate(steps)]
)
except Exception as e:
log.error(f"Error executing code: {e}")
steps = None
content = "Failed to extract steps. Please check the plan and try again."
# formatted_steps = "\n".join([f"{i+1}. {step}" for i, step in enumerate(steps)])
feasible = state["feasible"]
current_state = "STARTING" if feasible else "DONE"
result = {
"response": {
"content": content,
"type": "text" if steps is not None else "error",
"role": "assistant",
},
"current_state": current_state,
"task": steps,
}
return result, state.append(chat_history=result["response"]).update(**result)
def get_closest_state_from_skills(step: str, skills: dict) -> str:
skill_descriptions = [s["description"] for s in skills.values()]
closest_description = get_closest_text(step, skill_descriptions)
state_idx = skill_descriptions.index(closest_description)
return list(skills.keys())[state_idx]
@action(
reads=["task"],
writes=["state_machine", "task_state", "task_state_idx", "current_state", "task"],
)
def create_state_machine(state: State) -> Tuple[dict, State]:
"""
Create a viable state machine for the task.
Every task requires:
* the robot and environment state
* ensuring the robot has the skills to perform the required steps
"""
task = state["task"]
if state["task"] == "What is on the table?":
result = {
"state_machine": [
"get_image",
"ask_vla",
"get_list_of_objects",
],
"task_state": "not_started",
"current_state": "RUNNING",
}
elif state["task"] == "Clear the table":
result = {
"state_machine": [
"get_image",
"ask_vla",
"get_list_of_objects",
"create_plan",
"code",
"execute_code",
],
"task_state": "not_started",
"current_state": "RUNNING",
}
elif state["task"] == "unknown":
result = {
"state_machine": "unknown",
"task_state": "unknown",
"current_state": "DONE",
}
else:
plan = state["task"]
state_machine = [get_closest_state_from_skills(step, SKILLS) for step in plan]
log.info(f"STATE_MACHINE:\n\n{state_machine}\n")
### Use symbolic logic to prune plan
# Ensure that there are only rollout steps after an observation step until the next observation or navigation step
observation_steps = ["scan the scene"]
observation_step_idxs = [
i for i, step in enumerate(state_machine) if step in observation_steps
]
pick_and_place_steps = [
"rollout pick and place plan",
"pick object",
"place in location",
]
pick_and_place_step_idxs = [
i for i, step in enumerate(state_machine) if step in pick_and_place_steps
]
if len(observation_step_idxs) > 0 and len(pick_and_place_step_idxs) > 0:
for i, observation_idx in enumerate(observation_step_idxs):
pick_and_place_exists = False
if observation_idx + 1 < len(state_machine):
while state_machine[observation_idx + 1] in pick_and_place_steps:
pick_and_place_exists = observation_idx + 1
state_machine.pop(observation_idx + 1)
task.pop(observation_idx + 1)
print(state_machine[observation_idx + 1 :])
if observation_idx + 1 >= len(state_machine):
break
if pick_and_place_exists:
state_machine.insert(
pick_and_place_exists, "rollout pick and place plan"
)
task.insert(pick_and_place_exists, "rollout pick and place plan")
log.info(f"UPDATED STATE_MACHINE (prune for rollout):\n\n{state_machine}\n")
# Consolidate adjacent roll out steps
rollout_steps = ["rollout pick and place plan"]
rollout_step_idxs = [
i for i, step in enumerate(state_machine) if step in rollout_steps
]
if len(rollout_step_idxs) > 1:
consolidated_state_machine = []
for i, s in enumerate(state_machine):
if s in rollout_steps:
if i > 0 and state_machine[i - 1] not in rollout_steps:
consolidated_state_machine.append("rollout pick and place plan")
else:
consolidated_state_machine.append(s)
state_machine = consolidated_state_machine
log.info(
f"UPDATED STATE_MACHINE (consolidate rollout steps):\n\n{state_machine}\n"
)
result = {
"state_machine": state_machine,
"task_state": "not_started",
"current_state": "RUNNING",
}
result["task"] = task
result["task_state_idx"] = 0
log.info(f"Task: {task}")
log.info(f"State machine: {state_machine}")
output = "Here is the consolidated task:"
output += "\n\n"
output += "```\n"
output += "\n".join([f"{idx+1}. {step}" for idx, step in enumerate(task)])
output += "\n```"
output += "\n\n"
output += "Here is the state machine:"
output += "\n\n"
output += "```\n"
output += "\n".join([f"{idx+1}. {step}" for idx, step in enumerate(state_machine)])
output += "\n```"
chat_item = {
"content": output,
"type": "text",
"role": "assistant",
}
return result, state.append(chat_history=chat_item).update(**result)
@action(
reads=["state_machine", "task_state_idx"],
writes=["task_state", "task_state_idx", "current_state", "state_machine", "task"],
)
def execute_state_machine(state: State) -> Tuple[dict, State]:
"""
State machine manages the execution of fully observable steps
"""
task = state["task"]
current_state = "RUNNING"
state_machine = state["state_machine"]
if state["task_state"] == "not_started":
task_state = state["state_machine"][0]
task_state_idx = state["task_state_idx"]
else:
task_state_idx = state["task_state_idx"] + 1
if task_state_idx < len(state["state_machine"]):
task_state = state["state_machine"][task_state_idx]
else:
task_state = "done"
current_state = "DONE"
result = {
"task_state": task_state,
"task_state_idx": task_state_idx,
"current_state": current_state,
"state_machine": state_machine,
"task": task,
}
if task_state_idx < len(state_machine):
content = f"Executing task: **{task[task_state_idx]}**\n\nTask state: `{task_state}`\n\nStep {task_state_idx+1} of {len(state_machine)}"
else:
content = f"Task completed: **{state['prompt']}**"
chat_item = {
"content": content,
"type": "text",
"role": "assistant",
}
return result, state.append(chat_history=chat_item).update(**result)
@action(
reads=["state_machine", "task", "task_state", "task_state_idx"], writes=["location"]
)
def navigate_to_location(state: State) -> Tuple[dict, State]:
step = state["task"][state["task_state_idx"]]
extract_location = Task(
f"""
Given the following step, extract the location (e.g. kitchen), item (e.g. sink) or destination and return it.
Here is the string to extract the location:
{step}
Examples:
- Text: "navigate to the kitchen" Output: "kitchen"
""",
expected_output_format="A string representing the location, item or destination.",
)
extract_location_agent = Agent(
name="Location Extractor",
model=DEFAULT_MODEL,
system_message="""
You are a helpful agent that concisely responds.
""",
)
extract_location.add_solving_agent(extract_location_agent)
output = extract_location.run()
try:
location = get_closest_text(output, list(SEMANTIC_LOCATIONS.keys()))
if not navigate_to(
SEMANTIC_LOCATIONS[location]["name"],
SEMANTIC_LOCATIONS[location]["location"],
):
raise Exception(f"Error navigating to location: {location}")
if not wait_until_ready():
raise Exception(f"Error navigating to location: {location}")
content = f"Navigating to location: **{location}**"
except Exception as e:
log.error(f"Error: {e}")
location = None
content = f"{e}"
result = {"location": location}
chat_item = {
"content": content,
"type": "text" if location is not None else "error",
"role": "assistant",
}
return result, state.append(chat_history=chat_item).update(**result)
@action(reads=["state_machine"], writes=[])
def scan_the_scene(state: State) -> Tuple[dict, State]:
result = {}
chat_item = {
"content": "Scanning the scene...",
"type": "text",
"role": "assistant",
}
return result, state.append(chat_history=chat_item).update(**result)
@action(reads=["state_machine"], writes=["image"])
def get_image(state: State) -> Tuple[dict, State]:
try:
image = get_image_from_sim()
# image = Image.open("shared/data/test1.png")
image = pil_to_b64(image)
result = {"image": image}
chat_item = {
"content": image,
"type": "image",
"role": "assistant",
}
except Exception as e:
log.error(f"Error getting image: {e}")
result = {"image": None}
chat_item = {
"content": f"Error getting image: {e}",
"type": "text",
"role": "assistant",
}
return result, state.append(chat_history=chat_item).update(**result)
@action(reads=["image"], writes=["vla_response"])
def ask_vla(
state: State, vla_prompt: str = "Describe the image."
) -> Tuple[dict, State]:
image = b64_to_pil(state["image"])
result = {"vla_response": moondream(image, vla_prompt)["result"]}
chat_item = {
"content": f"**Image Description:**:\n\n{result['vla_response']}",
"type": "text",
"role": "assistant",
}
return result, state.append(chat_history=chat_item).update(**result)
@action(reads=["vla_response"], writes=["observations"])
def get_list_of_objects(state: State) -> Tuple[dict, State]:
task = Task(
f"""Given the following, return a list assigned to `objects_on_table` of the objects on the table. The table is not an object.
Summary:
{state['vla_response']}
Example:
Summary:
There is an object on the table called "Object 1", an object on the table called "Object 2", and an object on the table called "Object 3".
Output:
```
objects_on_table = ["Object 1", "Object 2", "Object 3"]
```
Don't use any functions, manually identify the objects on the table from the summary.
""",
expected_output_format="""
```
objects_on_table = ["Object 1", "Object 2", "Object 3"]
```
""",
)
analyzer_agent = Agent(
name="Analyzer",
model=DEFAULT_MODEL,
system_message="""
You are a helpful agent that concisely responds with lists.
""",
)
task.add_solving_agent(analyzer_agent)
output = task.run()
code = extract_code(output)
try:
exec_vars = {}
exec_code(code, exec_vars)
log.info(exec_vars.get("objects_on_table", None))
objects_on_table = exec_vars.get("objects_on_table", None)
observations = {"objects_on_table": objects_on_table}
except Exception as e:
log.error(f"Error executing code: {e}")
result = {"observations": observations}
chat_item = {
"content": f"Objects on table: \n\n`{objects_on_table}`",
"type": "text",
"role": "assistant",
}
return result, state.append(chat_history=chat_item).update(**result)
@action(
reads=["observations", "task_state_idx", "location"],
writes=["state_machine", "task"],
)
def rollout_pick_and_place_plan(state: State) -> Tuple[dict, State]:
task_idx = state["task_state_idx"]
state_machine = state["state_machine"]
task = state["task"]
rollout_task = Task(
f"""Rollout a pick and place plan for the robot given the following objects:
{state['observations']}
The robot and the objects are at {state['location']}
Here are the locations the robot can go to:
{list(SEMANTIC_LOCATIONS.keys())}
Here is additional knowledge you've learned:
{knowledge_base.get_knowledge_as_string()}
Here is an example:
'{{'objects_on_table': ['cheese', 'milk', 'book']}}
The robot and the objects are at {{'counter'}}
Output:
```
pick_and_place_tasks = ["Pick cheese at counter and place at kitchen", "Pick milk at counter and place at kitchen", "Pick book at counter and place at shelf"]
```
Don't use any functions, manually synthesize the pick and place tasks from the summary.
Here is additional knowledge you've learned, factor this into the plan:
{knowledge_base.get_knowledge_as_string()}
""",
expected_output_format="""
```
pick_and_place_tasks = ["Pick object1 at location and place at destination", "Pick object2 at location and place at destination", "Pick object3 at location and place at destination"]
```
""",
)
planner_agent = Agent(
name="Planner",
model=DEFAULT_MODEL,
system_message="""
You are a helpful agent that concisely responds with lists.
""",
)
rollout_task.add_solving_agent(planner_agent)
output = rollout_task.run()
code = extract_code(output)
try:
exec_vars = {}
exec_code(code, exec_vars)
log.info(exec_vars.get("pick_and_place_tasks", None))
pick_and_place_tasks = exec_vars.get("pick_and_place_tasks", None)
task = task[: task_idx + 1] + pick_and_place_tasks + task[task_idx + 1 :]
state_machine = (
state_machine[: task_idx + 1]
+ ["pick_and_place"] * len(pick_and_place_tasks)
+ state_machine[task_idx + 1 :]
)
except Exception as e:
log.error(f"Error executing code: {e}")
raise NotImplementedError(
"error handling for Rollout pick and place plan not implemented"
)
result = {"state_machine": state_machine, "task": task}
log.info(f"Task: {task}")
log.info(f"State machine: {state_machine}")
output = "Here is the task:"
output += "\n\n"
output += "```\n"
output += "\n".join([f"{idx+1}. {step}" for idx, step in enumerate(task)])
output += "\n```"
output += "\n\n"
output += "Here is the state machine:"
output += "\n\n"
output += "```\n"
output += "\n".join([f"{idx+1}. {step}" for idx, step in enumerate(state_machine)])
output += "\n```"
chat_item = {
"content": output,
"type": "text",
"role": "assistant",
}
return result, state.append(chat_history=chat_item).update(**result)
@action(
reads=["task", "state_machine", "task_state_idx", "location"],
writes=["obj_to_grasp", "obj_location", "obj_destination"],
)
def pick_and_place(state: State) -> Tuple[dict, State]:
get_object = Task(
f"""Given the following, extract the object of interest as a string assigned to `obj_to_grasp`.
Here is the string to extract the object:
{state["task"][state["task_state_idx"]]}
Here is an example:
Here is the string to extract the object:
Pick cheese at counter and place in kitchen
Output:
```
obj_to_grasp = "cheese"
```
Don't use any functions. Manually identify the object from the summary.
""",
expected_output_format="""
```
obj_to_grasp = "object1"
```
""",
)
analyzer_agent = Agent(
name="Analyzer",
model=DEFAULT_MODEL,
system_message="""
You are a helpful agent that concisely responds with variables.
""",
)
get_object.add_solving_agent(analyzer_agent)
output = get_object.run()
code = extract_code(output)
try:
exec_vars = {}
exec_code(code, exec_vars)
obj_to_grasp = exec_vars.get("obj_to_grasp", None)
except Exception as e:
log.error(f"Error executing code: {e}")
# Assume object location is current unless previously stored
location = state["location"]
if "obj_location" in state:
location = state["obj_location"]
get_obj_destination = Task(
f"""Given the following, extract the destination as a string assigned to `obj_destination`.
Here is the string to extract the destination:
{state["task"][state["task_state_idx"]]}
Example:
Here is the string to extract the destination:
Pick cheese at counter and place in kitchen
Output: 'kitchen'
""",
expected_output_format="String representing the destination.",
)
get_obj_destination_agent = Agent(
name="Destination Extractor",
model=DEFAULT_MODEL,
system_message="""
You are a helpful agent that concisely responds with variables.
""",
)
get_obj_destination.add_solving_agent(get_obj_destination_agent)
output = get_obj_destination.run()
destination = get_closest_text(output, list(SEMANTIC_LOCATIONS.keys()))
result = {
"obj_to_grasp": obj_to_grasp,
"obj_location": location,
"obj_destination": destination,
}
chat_item = {
"content": f"Pick and place **{obj_to_grasp}** at **{location}**",
"type": "text",
"role": "assistant",
}
return result, state.append(chat_history=chat_item).update(**result)
@action(reads=["obj_to_grasp", "obj_location"], writes=["location"])
def navigate_for_pick(state: State) -> Tuple[dict, State]:
location = state["obj_location"]
obj_to_grasp = state["obj_to_grasp"]
location = get_closest_text(location, list(SEMANTIC_LOCATIONS.keys()))
log.info(f"Pick and place {obj_to_grasp} at {location}")
try:
if state["location"] != location:
log.info(f"Changing location from {state['location']} to {location}")
if not navigate_to(
SEMANTIC_LOCATIONS[location]["name"],
SEMANTIC_LOCATIONS[location]["location"],
):
raise Exception(f"Error navigating to location: {location}")
if not wait_until_ready():
raise Exception(f"Error navigating to location: {location}")
content = f"Navigated to **{location}** to pick **{obj_to_grasp}**"
except Exception as e:
log.error(f"Error navigating to location: {e}")
location = None
content = f"{e}"
result = {"location": location}
chat_item = {
"content": content,
"type": "text" if location is not None else "error",
"role": "assistant",
}
return result, state.append(chat_history=chat_item).update(**result)
@action(reads=["obj_to_grasp", "obj_location"], writes=["obj_in_hand", "obj_to_grasp"])
def pick_object(state: State) -> Tuple[dict, State]:
PICK_TIMEOUT = 30.0
obj_to_grasp = state["obj_to_grasp"]
print(f"Pick {obj_to_grasp}")
try:
if not pick(obj_to_grasp):
raise Exception(f"Error picking object: {obj_to_grasp}")
pick_start_time = time.time()
obj_in_hand = None
while not obj_in_hand and time.time() - pick_start_time < PICK_TIMEOUT:
obj_in_hand = get_obj_in_hand()
time.sleep(0.1)
if obj_in_hand:
print(f"Object in hand: {obj_in_hand}")
obj_to_grasp = None
if not wait_until_ready():
raise Exception(f"Error picking object: {obj_to_grasp}")
content = f"Picked **{obj_in_hand}**"
except Exception as e:
log.error(f"Error picking object: {e}")
obj_in_hand = None
content = f"{e}"
result = {"obj_in_hand": obj_in_hand, "obj_to_grasp": obj_to_grasp}
chat_item = {
"content": content,
"type": "text" if obj_in_hand else "error",
"role": "assistant",
}
return result, state.append(chat_history=chat_item).update(**result)
@action(reads=["obj_destination", "location"], writes=["location"])
def navigate_for_place(state: State) -> Tuple[dict, State]:
destination = state["obj_destination"]
try:
if state["location"] != destination:
log.info(f"Changing location from {state['location']} to {destination}")
if not navigate_to(
SEMANTIC_LOCATIONS[destination]["name"],
SEMANTIC_LOCATIONS[destination]["location"],
):
raise Exception(f"Error navigating to destination: {destination}")
if not wait_until_ready():
raise Exception(f"Error navigating to destination: {destination}")
content = f"Navigated to **{destination}** to place **{state['obj_in_hand']}**"
location = destination
except Exception as e:
log.error(f"Error navigating to destination: {e}")
location = None
content = f"{e}"
result = {"location": location}
chat_item = {
"content": content,
"type": "text",
"role": "assistant",
}
return result, state.append(chat_history=chat_item).update(**result)
@action(reads=["obj_in_hand"], writes=["obj_in_hand"])
def place_object(state: State) -> Tuple[dict, State]:
obj_to_place = state["obj_in_hand"]
try:
if not place(SEMANTIC_LOCATIONS[state["location"]]["name"]):
raise Exception(f"Error placing object: {obj_to_place}")
if not wait_until_ready():
raise Exception(f"Error placing object: {obj_to_place}")
obj_in_hand = None
content = f"Placed object **{obj_to_place}**"
except Exception as e:
log.error(f"Error placing object: {e}")
obj_in_hand = obj_to_place
content = f"{e}"
result = {"obj_in_hand": obj_in_hand}
chat_item = {
"content": content,
"type": "text" if obj_in_hand is None else "error",
"role": "assistant",
}
return result, state.append(chat_history=chat_item).update(**result)
@action(reads=["task", "safe"], writes=["response"])
def prompt_for_more(state: State) -> Tuple[dict, State]:
result = {
"response": {
"content": "None of the response modes I support apply to your question. Please clarify?",
"type": "text",
"role": "assistant",
}
}
return result, state.update(**result)
@action(
reads=["current_state"],
writes=["response", "current_state", "code_attempts"],
)
def create_error_response(state: State) -> Tuple[dict, State]:
content = "Could not complete the task."
if "task_state" in state:
content += f" I have failed on {state['task_state']}."
result = {
"response": {
"content": content,
"type": "error",
"role": "assistant",
},
"current_state": "FAILED",
"code_attempts": 0,
}
return result, state.append(chat_history=result["response"]).update(**result)
@action(reads=["current_state"], writes=["response", "current_state"])
def finish_and_score_task(state: State) -> Tuple[dict, State]:
response = {
"content": "I'm done. Goodbye!",
"type": "text",
"role": "assistant",
}
current_state = "PENDING"
result = {"response": response, "current_state": current_state}
return result, state.append(chat_history=result["response"]).update(**result)
@action(reads=["current_state"], writes=["chat_history", "current_state"])
def response(state: State) -> Tuple[dict, State]:
if state["current_state"] == "DONE":
current_state = "PENDING"
response = {
"content": "**Task execution successful :+1:**",
"type": "text",
"role": "assistant",
}
elif state["current_state"] == "FAILED":
current_state = "FAILED"
response = {
"content": "**Task execution failed**\nLet me know if you'd like me to retry or just give me a new task.",
"type": "error",
"role": "assistant",
}
else:
current_state = state["current_state"]
response = {
"content": "Ready for next prompt.",
"type": "text",
"role": "assistant",
}
result = {"chat_item": response, "current_state": current_state}
return result, state.append(chat_history=response).update(**result)
MAX_CODE_ATTEMPTS = 3
def base_application(
hooks: List[LifecycleAdapter],
app_id: str,
storage_dir: str,
project_id: str,
):
if hooks is None:
hooks = []
# we're initializing above so we can load from this as well
# we could also use `with_tracker("local", project=project_id, params={"storage_dir": storage_dir})`
tracker = LocalTrackingClient(project=project_id, storage_dir=storage_dir)
sequence_id = None
return (
ApplicationBuilder()
.with_actions(
prompt=process_prompt,
parse_prompt=parse_prompt,
respond_to_question=respond_to_question,
update_knowledge_base=update_knowledge_base,
determine_if_task_in_skill_library=determine_if_task_in_skill_library,
create_plan_for_unknown_task=create_plan_for_unknown_task,
get_closest_plans=get_closest_plans,
get_role_and_location_context=get_role_and_location_context,
create_initial_plan=create_initial_plan,
create_robot_grounded_plan=create_robot_grounded_plan,
determine_if_plan_is_feasibile=determine_if_plan_is_feasibile,
convert_plan_to_steps=convert_plan_to_steps,
create_state_machine=create_state_machine,
execute_state_machine=execute_state_machine,
navigate_to_location=navigate_to_location,
scan_the_scene=scan_the_scene,
get_image=get_image,
ask_vla=ask_vla,
get_list_of_objects=get_list_of_objects,
rollout_pick_and_place_plan=rollout_pick_and_place_plan,
pick_and_place=pick_and_place,
navigate_for_pick=navigate_for_pick,
pick_object=pick_object,
navigate_for_place=navigate_for_place,
place_object=place_object,
finish_and_score_task=finish_and_score_task,
create_error_response=create_error_response,
prompt_for_more=prompt_for_more,
response=response,
)
.with_transitions(
("prompt", "parse_prompt", default),
(
"parse_prompt",
"determine_if_task_in_skill_library",
when(prompt_cls=PromptType.PERFORM_NEW_TASK),
),
(
"parse_prompt",
"respond_to_question",
when(prompt_cls=PromptType.RESPOND_TO_QUESTION),
),
(
"parse_prompt",
"update_knowledge_base",
when(prompt_cls=PromptType.UPDATE_KNOWLEDGE_BASE),
),
(
"parse_prompt",
"execute_state_machine",
when(prompt_cls=PromptType.RETRY_EXISTING_TASK),
),
("parse_prompt", "response", default),
("respond_to_question", "response", default),
("update_knowledge_base", "response", default),
(
"determine_if_task_in_skill_library",
"create_plan_for_unknown_task",
when(task="unknown"),
),
("create_plan_for_unknown_task", "get_closest_plans", default),
("get_closest_plans", "get_role_and_location_context", default),
("get_role_and_location_context", "create_initial_plan", default),
("create_initial_plan", "create_robot_grounded_plan", default),
("create_robot_grounded_plan", "determine_if_plan_is_feasibile", default),
(
"determine_if_plan_is_feasibile",
"create_error_response",
when(feasible=False),
),
("determine_if_plan_is_feasibile", "convert_plan_to_steps", default),
(
"convert_plan_to_steps",
"create_error_response",
when(task="unknown"),
),
("convert_plan_to_steps", "create_state_machine", default),
("determine_if_task_in_skill_library", "create_state_machine", default),
("create_state_machine", "execute_state_machine", default),
("create_state_machine", "prompt_for_more", when(state_machine="unknown")),
(
"execute_state_machine",
"navigate_to_location",
when(task_state="navigate to location"),
),
(
"execute_state_machine",
"scan_the_scene",
when(task_state="scan the scene"),
),
(
"execute_state_machine",
"rollout_pick_and_place_plan",
when(task_state="rollout pick and place plan"),
),
("rollout_pick_and_place_plan", "execute_state_machine", default),
(
"execute_state_machine",
"pick_and_place",
when(task_state="pick_and_place"),
),
("pick_and_place", "navigate_for_pick", default),
("navigate_for_pick", "create_error_response", when(location=None)),
("navigate_for_pick", "pick_object", default),
("pick_object", "create_error_response", when(obj_in_hand=None)),
("pick_object", "navigate_for_place", default),
("navigate_for_place", "place_object", default),
("place_object", "execute_state_machine", default),
("navigate_to_location", "create_error_response", when(location=None)),
("navigate_to_location", "execute_state_machine", default),
("scan_the_scene", "get_image", default),
("get_image", "create_error_response", when(image=None)),
("get_image", "ask_vla", default),
("ask_vla", "get_list_of_objects", default),
("get_list_of_objects", "execute_state_machine", default),
("execute_state_machine", "finish_and_score_task", when(task_state="done")),
("finish_and_score_task", "prompt", default),
("response", "prompt", when(current_state="PENDING")),
("response", "execute_state_machine", when(current_state="RUNNING")),
("prompt_for_more", "response", default),
("create_error_response", "response", default),
("response", "prompt", when(current_state="FAILED")),
)
# initializes from the tracking log if it does not already exist
.initialize_from(
tracker,
resume_at_next_action=True, # always resume from entrypoint in the case of failure
default_state={"chat_history": [], "current_state": "PENDING"},
default_entrypoint="prompt",
# fork_from_app_id="670b9f83-d0fa-49ce-b396-dcaba416edc8",
# fork_from_sequence_id=55,
)
.with_hooks(*hooks)
.with_tracker(tracker)
.with_identifiers(app_id=app_id, sequence_id=sequence_id)
.build()
)
def application(
app_id: Optional[str] = None,
project_id: str = "roboai",
storage_dir: Optional[str] = "~/.burr",
hooks: Optional[List[LifecycleAdapter]] = None,
) -> Application:
return base_application(hooks, app_id, storage_dir, project_id=project_id)
if __name__ == "__main__":
app = application()
# app.visualize(
# output_file_path="statemachine", include_conditions=False, view=False, format="png"
# )
app.run(halt_after=["response"])
| 53,152 | Python | 35.158503 | 194 | 0.582988 |
AshisGhosh/roboai/roboai/roboai/role_context.py | ROBOT_CONTEXT = """
You are creating a plan for a robot with 2 arms and a mobile base.
The arms can pick and place objects.
The mobile base can navigate to different locations in the environment. You can also call for support if you need help. You can update the plan if needed.
"""
ROLE_CONTEXT = """
The robot is in a house environment where it does useful tasks such as cleaning or fetching items.
"""
EMPLOYEE_HANDBOOK = """
This is the employee handbook for the robot. The following are basic instructions:
Cleaning:
- Remove objects and placing them in the cabinet
Fetching:
- Towels can be found in the kitchen
"""
| 644 | Python | 31.249998 | 154 | 0.736025 |
AshisGhosh/roboai/roboai/roboai/roboai_demo.py | from pydantic import BaseModel
from typing import Callable
import base64 # noqa: F401
from PIL import Image # noqa: F401
from abc import ABC, abstractmethod
from roboai.agent import Agent
from roboai.task import Task
# from shared.utils.robosim_client import ( # noqa: F401
# get_objects_on_table,
# pick,
# place,
# get_image,
# get_grasp_image,
# )
from shared.utils.isaacsim_client import get_image, pick, place
from shared.utils.model_server_client import answer_question_from_image # noqa: F401
import shared.utils.gradio_client as gradio # noqa: F401
import shared.utils.replicate_client as replicate # noqa: F401
from shared.utils.llm_utils import get_closest_text_sync as get_closest_text
import gradio as gr
import logging
log = logging.getLogger("roboai")
log.setLevel(logging.DEBUG)
class Tool(BaseModel):
name: str
func: Callable
description: str
example: str
def extract_code(raw_input, language="python"):
start_delimiter = f"```{language}"
if start_delimiter not in raw_input:
start_delimiter = "```"
code_start_index = raw_input.find(start_delimiter)
if code_start_index == -1:
code_start_index = 0
else:
code_start_index += len(start_delimiter)
end_delimiter = "```"
code_end_index = raw_input.find(end_delimiter, code_start_index)
if code_end_index == -1:
code_end_index = len(raw_input)
code = raw_input[code_start_index:code_end_index].strip()
log.debug(f"Extracted code: \n{code}")
return code
class RobotJob(ABC):
def __init__(self):
pass
@abstractmethod
def run(self):
pass
class ClearTableJob(RobotJob):
def __init__(self):
pass
def run(self, chat_history=None):
"""
Job to:
1. Understand the scene
2. Create a plan to clear the table
"""
if chat_history:
if not chat_history[-1][1]:
chat_history[-1][1] = ""
else:
chat_history[-1][1] += "\n"
chat_history[-1][1] += "Getting image...\n"
yield chat_history
im = get_image()
prompt = "What objects are on the table?"
if chat_history:
chat_history[-1][1] += "Asking VLA model...\n"
yield chat_history
output = gradio.moondream_answer_question_from_image(im, prompt)["result"]
if chat_history:
chat_history[-1][1] += f"Response:\n{output}\n"
yield chat_history
if chat_history:
chat_history[-1][1] += "Creating plan...\n"
yield chat_history
task = Task(
f"Given the following summary, return just a list in python of the objects on the table. The table is not an object. Summary: \n{output}",
expected_output_format="""
objects_on_table = ["Object 1", "Object 2", "Object 3"]
""",
)
analyzer_agent = Agent(
name="Analyzer",
model="openrouter/huggingfaceh4/zephyr-7b-beta:free",
system_message="""
You are a helpful agent that concisely responds with only code.
Use only the provided functions, do not add any extra code.
""",
)
task.add_solving_agent(analyzer_agent)
output = task.run()
# output = '```objects_on_table = ["Box of Cereal", "Carton of Milk", "Can of Soup"]```'
code = extract_code(output)
try:
exec_vars = {}
exec(code, exec_vars)
log.info(exec_vars.get("objects_on_table", None))
list_of_objects = exec_vars.get("objects_on_table", None)
except Exception as e:
log.error(f"Error executing code: {e}")
list_of_objects = None
if chat_history:
chat_history[-1][1] += f"Error executing code: {e}"
yield chat_history
return
plan_task = Task(
f"""Create a plan for a robot to remove the following objects from the table:
{list_of_objects}
Do not add any extra steps.
""",
# expected_output_format="""
# 1. pick object1
# 2. place object1
# 3. pick object2
# 4. place object2
# 5. pick object3
# 6. place object3
# """
expected_output_format="A numbered list of steps constrained to the provided functions.",
)
plan_task.register_tool(
name="pick",
func=pick,
description="Robot picks up the provided arg 'object_name'",
example='"pick_success = pick(object_name="Object 1")" --> Returns: True ',
)
plan_task.register_tool(
name="place",
func=place,
description="Robot places the provided arg 'object_name'",
example='"place_success = place(object_name="Object 1")" --> Returns: True ',
)
planner_agent = Agent(
name="Planner",
model="openrouter/huggingfaceh4/zephyr-7b-beta:free",
system_message="""
You are a planner that breaks down tasks into steps for robots.
Create a conscise set of steps that a robot can do.
Do not add any extra steps.
"""
+ plan_task.generate_tool_prompt(),
)
plan_task.add_solving_agent(planner_agent)
# log.info(plan_task)
output = plan_task.run()
log.info(output)
if chat_history:
chat_history[-1][1] += f"Response:\n{output}"
yield chat_history
if chat_history:
chat_history[-1][1] += "Converting plan to code...\n"
yield chat_history
plan_generated = True
code = extract_code(output)
exec_vars = plan_task.get_exec_vars()
try:
exec(code, exec_vars)
except Exception as e:
log.error(f"Error executing plan: {e}")
plan_generated = False
# Validate the plan?
# Execute the plan
if not plan_generated:
coder_task = Task(
f"""Return python code to execute the plan using only the provided functions.
{output}
"""
)
coder_task.register_tool(
name="pick",
func=pick,
description="Robot picks up the provided arg 'object_name'",
example='"pick_success = pick(object_name="Object 1")" --> Returns: True ',
)
coder_task.register_tool(
name="place",
func=place,
description="Robot places the provided arg 'object_name'",
example='"place_success = place(object_name="Object 1")" --> Returns: True ',
)
coder_agent = Agent(
name="Coder",
# model="ollama/gemma:7b",
model="openrouter/huggingfaceh4/zephyr-7b-beta:free",
system_message="""
You are a coder that writes concise and exact code to execute the plan.
Use only the provided functions.
"""
+ coder_task.generate_tool_prompt(),
)
coder_task.add_solving_agent(coder_agent)
log.info(coder_task)
output = coder_task.run()
if chat_history:
chat_history[-1][1] += f"Response:\n{output}\n"
yield chat_history
if chat_history:
chat_history[-1][1] += "Extracting and running code...\n"
yield chat_history
code = extract_code(output)
if chat_history:
chat_history[-1][1] += f"Response:\n```{code}\n```"
yield chat_history
try:
exec_vars = coder_task.get_exec_vars()
exec(code, exec_vars)
result = "Successful execution of plan."
except Exception as e:
log.error(f"Error executing code: {e}")
result = "Error executing plan."
finally:
if chat_history:
chat_history[-1][1] += f"\nResponse:\n**{result}**"
yield chat_history
class WhatIsOnTableJob(RobotJob):
image = None
def __init__(self):
self.image = get_image()
def get_image(self):
if not self.image:
self.image = get_image()
return self.image
def run(self, chat_history=None):
if chat_history:
if not chat_history[-1][1]:
chat_history[-1][1] = ""
else:
chat_history[-1][1] += "\n"
yield chat_history
chat_history[-1][1] += "Getting image...\n"
yield chat_history
im = get_image()
prompt = "What objects are on the table?"
if chat_history:
chat_history[-1][1] += "Asking VLA model...\n"
yield chat_history
output = gradio.moondream_answer_question_from_image(im, prompt)
if chat_history:
chat_history[-1][1] += f"Response:\n{output['result']}"
yield chat_history
return output["result"]
class TestJob(RobotJob):
def __init__(self):
pass
def run(self, chat_history=None):
responses = [
"I am a robot.",
"I can help you with tasks.",
"Ask me to do something",
]
if chat_history:
if not chat_history[-1][1]:
chat_history[-1][1] = ""
else:
chat_history[-1][1] += "\n"
yield chat_history
for response in responses:
chat_history[-1][1] += response
yield chat_history
def chat():
with gr.Blocks() as demo:
gr.Markdown("## RoboAI Chatbot")
chatbot = gr.Chatbot(height=700)
msg = gr.Textbox(placeholder="Ask me to do a task.", container=False, scale=7)
image_output = gr.Image(label="Response Image")
clear = gr.ClearButton([msg, chatbot]) # noqa: F841
current_task = [None]
def respond(message, chat_history):
nonlocal current_task
closest_text = get_closest_text(
message, ["Clear the table", "What is on the table?"]
)
image = None
if closest_text:
print(f"Closest text: {closest_text}")
current_task[0] = closest_text
chat_history.append((message, None))
return "", chat_history, image
def do_function(chat_history):
nonlocal current_task
if not current_task:
return "", chat_history, None
chat_history[-1][1] = f"**{current_task[0]}**"
yield chat_history
if current_task[0] == "What is on the table?":
job = WhatIsOnTableJob()
image = job.get_image()
yield from job.run(chat_history)
elif current_task[0] == "Clear the table":
job = ClearTableJob()
yield from job.run(chat_history)
image = WhatIsOnTableJob().get_image()
elif current_task[0] == "Test Job":
job = TestJob()
yield from job.run(chat_history)
else:
chat_history[-1][1] = "Sorry, I don't understand that command."
image = None
return None, chat_history, image
def get_image_output():
image_output = WhatIsOnTableJob().get_image()
return image_output
msg.submit(
respond, [msg, chatbot], [msg, chatbot, image_output], queue=False
).then(get_image_output, [], [image_output]).then(do_function, chatbot, chatbot)
demo.queue()
demo.launch()
if __name__ == "__main__":
chat()
| 12,136 | Python | 31.365333 | 150 | 0.528593 |
AshisGhosh/roboai/roboai/roboai/test.py | import logging
from litellm import completion
from roboai.agent import Agent
from roboai.task import Task
from dotenv import load_dotenv
load_dotenv() # take environment variables from .env.
logging.basicConfig(level=logging.WARN)
log = logging.getLogger("roboai")
log.setLevel(logging.DEBUG)
# litellm.success_callback = ["langfuse"]
# litellm.set_verbose=True
def test_task():
planner_agent = Agent(
name="Planner",
model="openrouter/huggingfaceh4/zephyr-7b-beta:free",
system_message="""You are a planner that breaks down tasks into steps for robots.
Create a set of steps that a robot with wheels and one arm can do.
""",
)
# task_handler = Agent(
# name="Task Handler",
# model="openrouter/huggingfaceh4/zephyr-7b-beta:free",
# system_message="""
# You are a task handler that can handle tasks for robots.
# """
# )
task_handler = "Create a plan to clear the table"
task = Task(task_handler, [planner_agent])
task.run()
def test():
messages = [{"content": "Hello, how are you?", "role": "user"}]
response = completion(
model="openrouter/huggingfaceh4/zephyr-7b-beta:free", messages=messages
)
print(response)
def test_agent():
agent = Agent(name="test", model="openrouter/huggingfaceh4/zephyr-7b-beta:free")
response = agent.chat("Hello, how are you?")
print(response)
print(agent.get_last_response())
print(agent.get_last_response_obj())
agent.clear_messages()
print(agent.messages)
response = agent.chat("What is the capital of China?")
print(response)
print(agent.get_last_response())
print(agent.get_last_response_obj())
agent.clear_messages()
print(agent.messages)
response = agent.chat("And India?")
print(response)
print(agent.get_last_response())
print(agent.get_last_response_obj())
agent.clear_messages()
print(agent.messages)
if __name__ == "__main__":
# test()
# test_agent()
test_task()
| 2,191 | Python | 27.467532 | 94 | 0.61068 |
AshisGhosh/roboai/roboai/roboai/server.py | import functools
import importlib
from typing import List, Literal
import pydantic
from fastapi import APIRouter
from burr.core import Application
"""This file represents a simple chatbot API backed with Burr.
We manage an application, write to it with post endpoints, and read with
get/ endpoints.
This demonstrates how you can build interactive web applications with Burr!
"""
# We're doing dynamic import cause this lives within examples/ (and that module has dashes)
# navigate to the examples directory to read more about this!
chat_application = importlib.import_module(
"burr.examples.multi-modal-chatbot.application"
) # noqa: F401
# the app is commented out as we include the router.
# app = FastAPI()
router = APIRouter()
class ChatItem(pydantic.BaseModel):
"""Pydantic model for a chat item. This is used to render the chat history."""
content: str
type: Literal["image", "text", "code", "error"]
role: Literal["user", "assistant"]
@functools.lru_cache(maxsize=128)
def _get_application(project_id: str, app_id: str) -> Application:
"""Quick tool to get the application -- caches it"""
chat_app = chat_application.application(app_id=app_id, project_id=project_id)
return chat_app
@router.post("/response/{{project_id}}/{{app_id}}", response_model=List[ChatItem])
def chat_response(project_id: str, app_id: str, prompt: str) -> List[ChatItem]:
"""Chat response endpoint. User passes in a prompt and the system returns the
full chat history, so its easier to render.
:param project_id: Project ID to run
:param app_id: Application ID to run
:param prompt: Prompt to send to the chatbot
:return:
"""
burr_app = _get_application(project_id, app_id)
_, _, state = burr_app.run(halt_after=["response"], inputs=dict(prompt=prompt))
return state.get("chat_history", [])
@router.get("/response/{project_id}/{app_id}", response_model=List[ChatItem])
def chat_history(project_id: str, app_id: str) -> List[ChatItem]:
"""Endpoint to get chat history. Gets the application and returns the chat history from state.
:param project_id: Project ID
:param app_id: App ID.
:return: The list of chat items in the state
"""
chat_app = _get_application(project_id, app_id)
state = chat_app.state
return state.get("chat_history", [])
@router.post("/create/{project_id}/{app_id}", response_model=str)
async def create_new_application(project_id: str, app_id: str) -> str:
"""Endpoint to create a new application -- used by the FE when
the user types in a new App ID
:param project_id: Project ID
:param app_id: App ID
:return: The app ID
"""
# side-effect of this persists it -- see the application function for details
chat_application.application(app_id=app_id, project_id=project_id)
return app_id # just return it for now
# comment this back in fro a standalone chatbot API
# app.include_router(router, prefix="/api/v0/chatbot")
| 2,984 | Python | 33.310344 | 98 | 0.704424 |
AshisGhosh/roboai/roboai/roboai/semantic_locations.py | COFFEE_TABLE = {
"coffee_table": {
"name": "coffee_table_fqluyq_0",
"location": "0.75 -1.1 3.14",
}
}
TABLE = {
"table": {
"name": "table",
"location": None,
}
}
SUPPLY_CABINET = {
"supply_cabinet": {"name": "bottom_cabinet_bamfsz_0", "location": "-1.0 -0.5 3.14"}
}
FRIDGE = {"fridge": {"name": "fridge_xyejdx_0", "location": "0.2 2.0 1.57"}}
TRASH_CAN = {"trash_can": {"name": "trash_can_zotrbg_0", "location": "0.0 2.4 3.14"}}
SEMANTIC_LOCATIONS = {**COFFEE_TABLE, **TABLE, **SUPPLY_CABINET, **FRIDGE}
| 562 | Python | 22.458332 | 87 | 0.537367 |
AshisGhosh/roboai/roboai/roboai/streamlit_app.py | import time
from typing import Optional
import roboai as chatbot_application
import streamlit as st
from burr.integrations.streamlit import (
AppState,
Record,
get_state,
render_explorer,
set_slider_to_current,
update_state,
)
st.set_page_config(layout="wide")
st.markdown("This is a demo of RoboAI - LLM based planning for robots.")
def render_chat_message(record: Record):
# if record.action in ["prompt", "response"]:
recent_chat_message = record.state["chat_history"][-1]
content = recent_chat_message["content"]
content_type = recent_chat_message["type"]
role = recent_chat_message["role"]
with st.chat_message(role):
if content_type == "image":
st.image(content)
elif content_type == "code":
st.code(content)
elif content_type == "text":
st.write(content)
elif content_type == "error":
st.error(content)
def retrieve_state():
if "burr_state" not in st.session_state:
# TODO --enable usage of hamilton. Currently its not wiring in inputs
# But it should be easy enough
state = AppState.from_empty(
app=chatbot_application.application(),
)
else:
state = get_state()
return state
def chatbot_step(app_state: AppState, prompt: Optional[str]) -> bool:
"""Pushes state forward for the chatbot. Returns whether or not to rerun the app.
:param app_state: State of the app
:param prompt: Prompt to set the chatbot to. If this is None it means it should continue and not be reset.
:return:
"""
inputs = None
if prompt is not None:
# We need to update
inputs = {"prompt": prompt}
# app_state.app.update_state(app_state.app.state.update(prompt=prompt))
st.session_state.running = True # set to running
# if its not running this is a no-op
if not st.session_state.get("running", False):
return False
application = app_state.app
step_output = application.step(inputs=inputs)
# if step_output is None:
# st.session_state.running = False
# return False
action, result, state = step_output
app_state.history.append(Record(state.get_all(), action.name, result))
set_slider_to_current()
if action.name in ["response", "finish_and_score_task"] and state[
"current_state"
] in ["PENDING", "FAILED"]:
# we've gotten to the end
st.session_state.running = False
return True
return True
def main():
st.title("RoboAI")
app_state = retrieve_state() # retrieve first so we can use for the ret of the step
columns = st.columns(2)
with columns[0]:
prompt = st.chat_input(
"...", disabled=st.session_state.get("running", False), key="chat_input"
)
should_rerun = chatbot_step(app_state, prompt)
with st.container(height=850):
for item in app_state.history:
render_chat_message(item)
# wait for 0.1 seconds to allow the UI to update
time.sleep(0.1)
with columns[1]:
render_explorer(app_state)
update_state(app_state) # update so the next iteration knows what to do
if should_rerun:
st.rerun()
if __name__ == "__main__":
main()
| 3,307 | Python | 30.207547 | 110 | 0.625038 |
AshisGhosh/roboai/roboai/roboai/agent.py | import time
import logging
from litellm import completion
from dotenv import load_dotenv
logging.basicConfig(level=logging.WARN)
load_dotenv("shared/.env") # take environment variables from .env.
log = logging.getLogger("roboai")
log.setLevel(logging.INFO)
# litellm.success_callback = ["langfuse"]
class Agent:
def __init__(self, name, model, system_message="", base_url=None):
self.name = name
self.model = model
self._last_response = None
self._last_response_content = None
self.messages = []
self.system_message = system_message
self.set_system_message(system_message)
self.base_url = base_url
def chat(self, message):
self.messages.append({"content": message, "role": "user"})
completion_args = {
"model": self.model,
"messages": self.messages,
}
if self.base_url:
completion_args["base_url"] = self.base_url
response = completion(**completion_args)
self._last_response = response
self._last_response_content = response["choices"][0]["message"]["content"]
self.messages.append(
{"content": self._last_response_content, "role": "assistant"}
)
return self._last_response_content
def task_chat(self, messages):
completion_args = {
"model": self.model,
"messages": messages,
}
if self.base_url:
completion_args["base_url"] = self.base_url
start = time.time()
response = completion(**completion_args)
log.debug(f"Completion time: {time.time() - start}")
self._last_response = response
self._last_response_content = response["choices"][0]["message"]["content"]
return self._last_response_content
def get_last_response(self):
return self._last_response_content
def get_last_response_obj(self):
return self._last_response
def clear_messages(self):
self.messages = []
def set_system_message(self, message):
if not message:
log.warn(f"System message for agent '{self.name}' is empty.")
return
system_message = None
for m in self.messages:
if m["role"] == "system":
system_message = m
break
if system_message:
system_message["content"] = message
else:
self.messages.append({"content": message, "role": "system"})
self.system_message = message
| 2,547 | Python | 28.97647 | 82 | 0.591676 |
AshisGhosh/roboai/roboai/roboai/skills.py | SCAN_THE_SCENE = {
"scan the scene": {
"symbol": "scan_the_scene",
"description": """
Can scan to scene to retreieve information.
Can also be used to identify objects in the scene.
Should already be at the required location.
""",
}
}
PICK_OBJECT = {
"pick object": {
"symbol": "pick",
"description": """
Can pick up an object in the scene.
Requires specifying the object.
""",
}
}
PLACE_IN_LOCATION = {
"place in location": {
"symbol": "place",
"description": """
Can place an object in the scene.
Requires already holding the object.
Requires specifying the location.
""",
}
}
NAVIGATE_TO_LOCATION = {
"navigate to location": {
"symbol": "navigate_to",
"description": """
Can navigate to a location in the scene.
Location can also be specified by an object.
""",
}
}
CALL_SUPPORT = {
"call support": {
"symbol": "call_support",
"description": """
Can call support for an issue in the scene.
""",
}
}
UPDATE_PLAN = {
"update plan": {
"symbol": "update_plan",
"description": """
Can update the plan to a new one.
""",
}
}
ROLLOUT_PICK_AND_PLACE_PLAN = {
"rollout pick and place plan": {
"symbol": "rollout",
"description": """
Given an observation or a scan step, can rollout a pick and place plan.
""",
}
}
SKILLS = {
**SCAN_THE_SCENE,
**PICK_OBJECT,
**PLACE_IN_LOCATION,
**NAVIGATE_TO_LOCATION,
**CALL_SUPPORT,
**UPDATE_PLAN,
**ROLLOUT_PICK_AND_PLACE_PLAN,
}
| 2,047 | Python | 24.28395 | 95 | 0.447484 |
AshisGhosh/roboai/roboai/roboai/task.py | import io
import base64
import logging
from pydantic import BaseModel
from typing import Callable
logging.basicConfig(level=logging.WARN)
log = logging.getLogger("roboai")
log.setLevel(logging.INFO)
class Tool(BaseModel):
name: str
func: Callable
description: str
example: str
def extract_code(raw_input, language="python"):
start_delimiter = f"```{language}\n"
end_delimiter = "\n```"
code_start_index = raw_input.find(start_delimiter) + len(start_delimiter)
code_end_index = raw_input.find(end_delimiter, code_start_index)
code = raw_input[code_start_index:code_end_index].strip()
return code
def str_from_messages(messages):
# Extract the text from the messages, ignore images
text = ""
for m in messages:
if isinstance(m["content"], str):
text += m["role"] + ": " + m["content"] + "\n"
else:
text += m["role"] + ": " + m["content"]["text"] + "\n"
return text
class Task:
def __init__(
self,
task_description,
solving_agents=None,
expected_output_format=None,
finish_when=None,
):
self.task_description = task_description
self.solving_agents = solving_agents if solving_agents else []
self.expected_output_format = expected_output_format
self.finish_when = finish_when
self.chat_messages = []
self.tools = []
def add_solving_agent(self, agent):
self.solving_agents.append(agent)
@property
def task_description_str(self):
if isinstance(self.task_description, list):
return self.task_description[0]["text"]
return self.task_description
def add_task_image(self, image):
try:
# Create a bytes buffer to hold the image data
buffer = io.BytesIO()
# Save the Pillow image object to the buffer in a specific format (e.g., JPEG)
image.save(buffer, format="JPEG")
# Seek to the start of the buffer
buffer.seek(0)
# Read the buffer content and encode it to Base64
image_str = base64.b64encode(buffer.read()).decode("utf-8")
# Format the Base64 string as a data URL, specifying the MIME type
# data_url = f"data:image/jpeg;base64,{image_str}"
data_url = image_str
# Update the task description with the text and the image data URL
self.task_description = [
{"type": "text", "text": self.task_description},
{"type": "image_url", "image_url": {"url": data_url}},
]
log.info("Task image added.")
except Exception as e:
log.error(f"Failed to add task image: {e}")
def register_tool(self, name, func, description, example):
self.tools.append(
Tool(name=name, func=func, description=description, example=example)
)
log.debug(f"Tool {name} added.")
def generate_tool_prompt(self):
tool_prompt = """
You can use the following python functions:
"""
for tool in self.tools:
tool_prompt += f"""'{tool.name}()'
Description: {tool.description}
Usage: {tool.example}
"""
return tool_prompt
def get_exec_vars(self):
exec_vars = {}
for tool in self.tools:
exec_vars[tool.name] = tool.func
return exec_vars
def get_exec_vars_serialized(self):
exec_vars = {}
for tool in self.tools:
exec_vars[tool.name] = tool.func.__name__
return exec_vars
def get_complete_prompt(self, agentid: int):
task_description = self.task_description
if self.expected_output_format:
task_description += f"""
Ensure your output follows the following format strictly: \n{self.expected_output_format}"""
prompt = f"""
{task_description}
"""
if self.tools:
prompt += self.generate_tool_prompt()
return prompt
def run(self):
task_description = self.task_description
if self.expected_output_format:
task_description += f"""
Ensure your output follows the following format strictly: \n{self.expected_output_format}"""
self.chat_messages.append(
{"task": {"content": task_description, "role": "user"}}
)
log.info(f"Task: '{self.task_description_str}'")
for agent in self.solving_agents:
response = self.task_chat(agent, self.chat_messages)
log.info(f"> AGENT '{agent.name}': {response}")
self.chat_messages.append(
{agent.name: {"content": response, "role": "assistant"}}
)
return next(iter(self.chat_messages[-1].values()))["content"]
def task_chat(self, agent, messages):
agent_messages = []
if agent.system_message:
agent_messages.append({"role": "system", "content": agent.system_message})
for m in messages:
if next(iter(m)) == "task":
agent_messages.append(m["task"])
elif next(iter(m)) in [a.name for a in self.solving_agents if a != agent]:
message = m[next(iter(m))]
message["role"] = "user"
agent_messages.append(message)
elif next(iter(m)) == agent.name:
message = m[next(iter(m))]
agent_messages.append(message)
log.debug(f"{str_from_messages(agent_messages)}")
response = agent.task_chat(agent_messages)
return response
def __str__(self):
task_info = f"Task: {self.task_description_str}"
if self.expected_output_format:
task_info += f"\n Expected Output Format: {self.expected_output_format}"
if self.solving_agents:
task_info += "\n Solving Agents:"
for a in self.solving_agents:
task_info += f"\n - {a.name}"
if self.tools:
task_info += "\n Registered Tools:"
for t in self.tools:
task_info += f"\n - {t.name}"
return task_info
| 6,279 | Python | 33.31694 | 116 | 0.565058 |
AshisGhosh/roboai/roboai/roboai/plans.py | CLEAR_TABLE_PLAN = {
"clear the table": """
1. Navigate to the table
2. Scan the table for objects
3. Rollout pick and place plan to remove objects
"""
}
CLEAN_BATHROOM_PLAN = {
"clean the bathroom": """
Given the bathroom is dirty and has a toilet, sink, and shower
1. Spray the shower with cleaner
2. Spray the sink with cleaner
3. Spray the toilet with cleaner
4. Scrub the sink
5. Scrub the toilet
6. Scrub the shower
"""
}
PLANS = {
**CLEAR_TABLE_PLAN,
**CLEAN_BATHROOM_PLAN,
}
| 597 | Python | 22.919999 | 70 | 0.577889 |
AshisGhosh/roboai/roboai/roboai/knowledge_base_utils.py | import os
import json
import pydantic
from datetime import datetime
class KnowledgeBaseItem(pydantic.BaseModel):
key: str
value: pydantic.Json
tags: list[str] = []
timestamp: str = pydantic.Field(default_factory=lambda: datetime.now().isoformat())
class KnowledgeBase:
def __init__(self, file_path: str = "/app/roboai/knowledge_base.json"):
self.file_path = file_path
self.data = self.load_data()
def load_data(self):
print("Current working directory:", os.getcwd())
print("Files in the directory:", os.listdir())
with open(self.file_path, "r") as f:
data = json.load(f)
return data
@property
def all_data(self):
return self.data
@property
def knowledge(self):
return [
KnowledgeBaseItem(
key=key,
value=value["value"],
tags=value["tags"],
timestamp=value["timestamp"],
)
for key, value in self.data.items()
]
def get_knowledge_as_string(self):
return "\n".join([f"{value['value']}" for value in self.data.values()])
def get_data(self, key: str):
return self.data.get(key, None)
def add_data(self, key: str, value, tags: list[str] = []):
self.data[key] = {
"value": value,
"tags": tags,
"timestamp": datetime.now().isoformat(),
}
self.save_data()
def save_data(self):
with open(self.file_path, "w") as f:
json.dump(self.data, f, indent=4)
| 1,586 | Python | 25.898305 | 87 | 0.558008 |
AshisGhosh/roboai/omnigibson/roboai/tiago_primitives.yaml | env:
action_frequency: 30 # (int): environment executes action at the action_frequency rate
physics_frequency: 120 # (int): physics frequency (1 / physics_timestep for physx)
device: null # (None or str): specifies the device to be used if running on the gpu with torch backend
automatic_reset: false # (bool): whether to automatic reset after an episode finishes
flatten_action_space: false # (bool): whether to flatten the action space as a sinle 1D-array
flatten_obs_space: false # (bool): whether the observation space should be flattened when generated
use_external_obs: false # (bool): Whether to use external observations or not
initial_pos_z_offset: 0.1
external_sensors: null # (None or list): If specified, list of sensor configurations for external sensors to add. Should specify sensor "type" and any additional kwargs to instantiate the sensor. Each entry should be the kwargs passed to @create_sensor, in addition to position, orientation
render:
viewer_width: 1280
viewer_height: 720
scene:
type: InteractiveTraversableScene
scene_model: Rs_int
trav_map_resolution: 0.1
default_erosion_radius: 0.0
trav_map_with_objects: true
num_waypoints: 1
waypoint_resolution: 0.1
load_object_categories: null
not_load_object_categories: null
load_room_types: null
load_room_instances: null
load_task_relevant_only: true
seg_map_resolution: 0.1
scene_source: OG
include_robots: false
robots:
- type: Tiago
obs_modalities: ["rgb", "depth", "seg_instance", "seg_instance_id", "normal", "scan", "occupancy_grid"]
scale: 1.0
self_collisions: true
action_normalize: false
action_type: continuous
grasping_mode: physical
rigid_trunk: false
default_trunk_offset: 0.365
default_arm_pose: horizontal
controller_config:
base:
name: JointController
arm_left:
name: JointController
use_delta_commands: true
arm_right:
name: JointController
use_delta_commands: true
gripper_left:
name: JointController
motor_type: position
command_input_limits: [-1, 1]
command_output_limits: null
use_delta_commands: true
gripper_right:
name: JointController
motor_type: position
command_input_limits: [-1, 1]
command_output_limits: null
use_delta_commands: true
camera:
name: JointController
use_delta_commands: False
sensor_config:
VisionSensor:
sensor_kwargs:
image_height: 480
image_width: 640
objects: []
task:
type: DummyTask
# task:
# type: BehaviorTask
# activity_name: putting_away_Halloween_decorations
# activity_definition_id: 0
# activity_instance_id: 0
# predefined_problem: null
# online_object_sampling: false
# debug_object_sampling: null
# highlight_task_relevant_objects: false
# termination_config:
# max_steps: 500
# reward_config:
# r_potential: 1.0
scene_graph:
egocentric: true
full_obs: true
only_true: true
merge_parallel_edges: false | 3,215 | YAML | 32.5 | 307 | 0.66283 |
AshisGhosh/roboai/omnigibson/roboai/roboai.py | import os
import yaml
import numpy as np
import asyncio
import multiprocessing
import time
import omnigibson as og
from omnigibson.macros import gm # noqa F401
from omnigibson.action_primitives.starter_semantic_action_primitives import ( # noqa F401
StarterSemanticActionPrimitives,
StarterSemanticActionPrimitiveSet,
)
from omnigibson.action_primitives.symbolic_semantic_action_primitives import (
SymbolicSemanticActionPrimitives,
SymbolicSemanticActionPrimitiveSet,
)
from omnigibson.robots import Tiago
from .visualize_scene_graph import visualize_scene_graph, visualize_ascii_scene_graph # noqa F401
from .primitive_patches import _quick_settle_robot, _simplified_place_with_predicate
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
import io
from PIL import Image
from starlette.responses import StreamingResponse
# gm.USE_GPU_DYNAMICS = True
# gm.ENABLE_FLATCACHE = True
class ActionHandler:
def __init__(self, env, controller, scene, task_queue):
self.env = env
self.controller = controller
self.scene = scene
self.actions = task_queue
self._last_camera_action = None
async def add_action(self, action: str):
"""
Add an action to the list of actions to be executed
"""
self.actions.put(action)
def execute_controller(self, ctrl_gen):
robot = self.env.robots[0]
for action in ctrl_gen:
state, reward, done, info = self.env.step(action)
self._last_camera_action = action[robot.controller_action_idx["camera"]]
def execute_action(self, action):
"""
Execute the action at the top of the list
"""
# robot = self.env.robots[0]
action, args = action[0], action[1:]
if action == "pick":
print(f"Attempting: 'pick' with args: {args}")
obj_name = args[0]
grasp_obj = self.scene.object_registry("name", obj_name)
# grasp_obj.disable_gravity()
# print(f"navigating to object {grasp_obj.name}")
self.controller._tracking_object = grasp_obj
# self.execute_controller(
# self.controller.apply_ref(
# SymbolicSemanticActionPrimitiveSet.NAVIGATE_TO,
# grasp_obj,
# attempts=10,
# )
# )
print(f"grasping object {grasp_obj.name}")
self.execute_controller(
self.controller.apply_ref(
SymbolicSemanticActionPrimitiveSet.GRASP, grasp_obj
)
)
print("Finished executing pick")
elif action == "place":
print(f"Attempting: 'place' with args: {args}")
obj_name = args[0]
if obj_name in ["None", "", None]:
obj_name = "table"
print(f"no object specified, defaulting to {obj_name}")
destination = self.scene.object_registry("name", obj_name)
# print(f"navigating to object {destination.name}")
self.controller._tracking_object = destination
# self.execute_controller(
# self.controller.apply_ref(
# SymbolicSemanticActionPrimitiveSet.NAVIGATE_TO, destination, attempts=10
# )
# )
print(f"placing object on top of {destination.name}")
self.execute_controller(
self.controller.apply_ref(
SymbolicSemanticActionPrimitiveSet.PLACE_ON_TOP,
destination,
attempts=1,
)
)
print("Finished executing place")
elif action == "navigate_to":
print(f"Attempting: 'navigate_to' with args: {args}")
obj_name = args[0]
obj = self.scene.object_registry("name", obj_name)
self.controller._tracking_object = obj
pose = args[1]
pose = [float(p) for p in pose.split(" ")]
print(f"navigating to object {obj.name}")
self.execute_controller(
self.controller.apply_ref("navigate_to_pose", pose, attempts=10)
)
print("Finished executing navigate_to")
elif action == "navigate_to_object":
print(f"Attempting: 'navigate_to_object' with args: {args}")
obj_name = args[0]
obj = self.scene.object_registry("name", obj_name)
self.controller._tracking_object = obj
print(f"navigating to object {obj.name}")
self.execute_controller(
self.controller.apply_ref(
SymbolicSemanticActionPrimitiveSet.NAVIGATE_TO, obj, attempts=10
)
)
print("Finished executing navigate_to_object")
elif action == "pick_test":
print("Executing pick")
grasp_obj = self.scene.object_registry("name", "black_cologne_bottle")
print(f"navigating to object {grasp_obj.name}")
self.controller._tracking_object = grasp_obj
self.execute_controller(
self.controller.apply_ref(
SymbolicSemanticActionPrimitiveSet.NAVIGATE_TO,
grasp_obj,
attempts=10,
)
)
print(f"grasping object {grasp_obj.name}")
# self.execute_controller(self.controller.apply_ref(StarterSemanticActionPrimitiveSet.GRASP, grasp_obj))
self.execute_controller(
self.controller.apply_ref(
SymbolicSemanticActionPrimitiveSet.GRASP, grasp_obj
)
)
print("Finished executing pick")
elif action == "place_test":
print("Executing place")
table = self.scene.object_registry("name", "table")
# print(f"navigating to object {table.name}")
self.controller._tracking_object = table
# self.execute_controller(self.controller.apply_ref(SymbolicSemanticActionPrimitiveSet.NAVIGATE_TO, table, attempts=10))
print(f"placing object on top of {table.name}")
# self.execute_controller(self.controller.apply_ref(StarterSemanticActionPrimitiveSet.PLACE_ON_TOP, table))
self.execute_controller(
self.controller.apply_ref(
SymbolicSemanticActionPrimitiveSet.PLACE_ON_TOP, table
)
)
print("Finished executing place")
elif action == "navigate_to_coffee_table":
# print("Executing navigate_to_coffee_table")
coffee_table = self.scene.object_registry("name", "coffee_table_fqluyq_0")
self.controller._tracking_object = coffee_table
print(f"navigating to object {coffee_table.name}")
self.execute_controller(
self.controller.apply_ref(
SymbolicSemanticActionPrimitiveSet.NAVIGATE_TO,
coffee_table,
attempts=10,
)
)
print("Finished executing navigate_to_coffee_table")
elif action == "viz":
print("Visualizing scene graph")
graph = self.env.get_scene_graph()
print(graph)
visualize_ascii_scene_graph(self.scene, graph)
# visualize_scene_graph(self.scene, graph)
print("Finished visualizing scene graph")
def check_for_action(self):
"""
Check if there is an action to be executed
"""
if not self.actions.empty():
action = self.actions.get()
self.execute_action(action)
return True
action = np.zeros(self.env.robots[0].action_dim)
if self._last_camera_action is not None:
action[self.env.robots[0].controller_action_idx["camera"]] = (
self._last_camera_action
)
# print(f"ACTION - {action}")
state, reward, done, info = self.env.step(action)
# print(f"info: {info}")
return False
class SimWrapper:
def __init__(self, task_queue, image_queue, obj_in_hand_queue, ready_queue):
self.task_queue = task_queue
self.image_queue = image_queue
self.obj_in_hand_queue = obj_in_hand_queue
self.ready_queue = ready_queue
asyncio.run(self.run())
async def run(self):
"""
Demonstrates how to use the action primitives to pick and place an object in an empty scene.
It loads Rs_int with a Fetch robot, and the robot picks and places a bottle of cologne.
"""
# Load the config
# config_filename = os.path.join(og.example_config_path, "fetch_primitives.yaml")
config_filename = os.path.join(
"/omnigibson-src/roboai", "tiago_primitives.yaml"
)
config = yaml.load(open(config_filename, "r"), Loader=yaml.FullLoader)
config["scene"]["load_object_categories"] = [
"floors",
"ceilings",
"walls",
"coffee_table",
"bottom_cabinet",
"top_cabinet",
"floor_lamp",
"shelf",
"trash_can",
"counter_top",
"fridge",
"sink",
]
# # SHOW TRAVERSABLE AREA
# import matplotlib.pyplot as plt
# import cv2
# scene_model = "Rs_int"
# trav_map_size = 200
# trav_map_erosion = 2
# trav_map = Image.open(os.path.join(get_og_scene_path(scene_model), "layout", "floor_trav_0.png"))
# trav_map = np.array(trav_map.resize((trav_map_size, trav_map_size)))
# trav_map = cv2.erode(trav_map, np.ones((trav_map_erosion, trav_map_erosion)))
# plt.figure(figsize=(12, 12))
# plt.imshow(trav_map)
# plt.title(f"Traversable area of {scene_model} scene")
# plt.show()
config["scene"]["not_load_object_categories"] = ["ceilings"]
config["objects"] = [
{
"type": "DatasetObject",
"name": "black_cologne_bottle",
"category": "bottle_of_cologne",
"model": "lyipur",
"position": [-0.3, -0.8, 0.5],
"orientation": [0, 0, 0, 1],
},
{
"type": "DatasetObject",
"name": "apple",
"category": "apple",
"model": "agveuv",
"position": [-0.3, -1.1, 0.5],
"orientation": [0, 0, 0, 1],
},
{
"type": "DatasetObject",
"name": "cleaner_bottle",
"category": "bottle_of_cleaner",
"model": "svzbeq",
"position": [-0.5, -0.8, 0.6],
"orientation": [0, 1, 0, 0],
},
{
"type": "DatasetObject",
"name": "tomato_can",
"category": "can_of_tomatoes",
"model": "ckdouu",
"position": [-0.6, -1.1, 0.5],
"orientation": [0, 0, 0, 1],
},
{
"type": "DatasetObject",
"name": "table",
"category": "breakfast_table",
"model": "rjgmmy",
"scale": [0.3, 0.3, 0.3],
"position": [-0.7, 0.5, 0.2],
"orientation": [0, 0, 0, 1],
},
]
# Load the environment
env = og.Environment(configs=config)
scene = env.scene
robot = env.robots[0]
print(type(robot))
print(robot.default_arm)
delattr(Tiago, "simplified_mesh_usd_path")
# del robot.simplified_mesh_usd_path
# print(robot.simplified_mesh_usd_path)
# Allow user to move camera more easily
og.sim.enable_viewer_camera_teleoperation()
cam = og.sim.viewer_camera
# Living Room View
# camera pose: array([0.92048866, -5.66129052, 5.39363818]), array([0.44288347, 0.04140454, 0.08336682, 0.89173419])
# cam.set_position_orientation(
# position=np.array([0.92048866, -5.66129052, 5.39363818]),
# orientation=np.array([0.44288347, 0.04140454, 0.08336682, 0.89173419]),
# )
# Living Room + Kitchen View
# cam pose: (array([2.78592041, 0.56388298, 7.03105183]), array([0.15355086, 0.15665731, 0.69675768, 0.68294169]))
cam.set_position_orientation(
position=np.array([2.78592041, 0.56388298, 7.03105183]),
orientation=np.array([0.15355086, 0.15665731, 0.69675768, 0.68294169]),
)
# controller = StarterSemanticActionPrimitives(env, enable_head_tracking=False)
SymbolicSemanticActionPrimitives._place_with_predicate = (
_simplified_place_with_predicate
)
SymbolicSemanticActionPrimitives._settle_robot = _quick_settle_robot
controller = SymbolicSemanticActionPrimitives(env)
controller.controller_functions["navigate_to_pose"] = (
controller._navigate_to_pose
)
# Task queue
action_handler = ActionHandler(
env, controller, scene, task_queue=self.task_queue
)
if False:
print("\n\n####### TASK DATA #######\n")
task_str, _, _ = env.task.show_instruction()
print(task_str)
task_obs, _ = env.task._get_obs(env)
agent_pos = task_obs["agent.n.01_1_pos"]
print(task_obs)
print(env.task.object_scope)
for k, v in env.task.object_scope.items():
dist = np.linalg.norm(
np.array(task_obs[f"{k}_pos"]) - np.array(agent_pos)
)
print(f"{k}: {v.name} {v.category} {v.exists} {dist:.3f}")
print("\n#########################\n\n")
while True:
await asyncio.sleep(0.1)
obs, info = robot.get_obs()
# print(info["robot0:eyes:Camera:0"]["seg_semantic"])
# img = obs["robot0:eyes:Camera:0"]["rgb"]
# print(obs["robot0:eyes:Camera:0"].keys())
# print(f"seg_semantic: {obs['robot0:eyes:Camera:0']['seg_semantic'].shape}")
# print(f"seg_instance: {obs['robot0:eyes:Camera:0']['seg_instance'].shape}")
# print(scene.seg_map)
if self.image_queue.full():
self.image_queue.get()
self.image_queue.put(
(obs["robot0:eyes:Camera:0"], info["robot0:eyes:Camera:0"])
)
if self.obj_in_hand_queue.full():
self.obj_in_hand_queue.get()
obj_in_hand = controller._get_obj_in_hand()
if obj_in_hand is not None:
self.obj_in_hand_queue.put(obj_in_hand.name)
else:
self.obj_in_hand_queue.put(None)
if self.ready_queue.full():
self.ready_queue.get()
self.ready_queue.put(time.time())
action_handler.check_for_action()
# task_str, _, _ = env.task.show_instruction()
# print(task_str)
# if self.scene_graph_queue.full():
# self.scene_graph_queue.get()
# graph = env.get_scene_graph()
# self.scene_graph_queue.put(graph)
# current = robot.get_joint_positions(normalized=False)
# print(f"current: {current}")
# arm_left = robot._controllers["arm_left"]
# print(f"arm_left: {arm_left.control}")
app = FastAPI()
# List of allowed origins (you can use '*' to allow all origins)
origins = [
"http://localhost:3000", # Allow your Next.js app
# Add any other origins as needed
]
# Add CORSMiddleware to the application
app.add_middleware(
CORSMiddleware,
allow_origins=origins, # List of allowed origins
allow_credentials=True,
allow_methods=["*"], # Allow all methods
allow_headers=["*"], # Allow all headers
)
task_queue = multiprocessing.Queue()
image_queue = multiprocessing.Queue(maxsize=1)
obj_in_hand_queue = multiprocessing.Queue(maxsize=1)
ready_queue = multiprocessing.Queue(maxsize=1)
sim = multiprocessing.Process(
target=SimWrapper, args=(task_queue, image_queue, obj_in_hand_queue, ready_queue)
)
@app.post("/add_action")
async def add_action(action: str):
action = action.split(",")
action = (action[0], *action[1:])
print(f"Adding action: {action}")
task_queue.put(action)
return {"action": action}
@app.get("/get_image")
async def get_image():
image, _ = image_queue.get()
await asyncio.sleep(2) # wait for the image to be updated
image, _ = image_queue.get()
image = image["rgb"]
img_array = image
img = Image.fromarray(img_array)
buf = io.BytesIO()
img.save(buf, format="PNG")
buf.seek(0)
return StreamingResponse(buf, media_type="image/png")
@app.get("/get_visible_objects")
async def get_visible_objects():
image, info = image_queue.get()
await asyncio.sleep(2) # wait for the image to be updated
image, info = image_queue.get()
image = image["seg_instance"]
info = info["seg_instance"]
visible_objects = list(info.values())
# filter out background object keywords
background_obj_keywords = ["floor", "wall", "robot", "table"]
visible_objects = [
obj
for obj in visible_objects
if not any(keyword in obj.lower() for keyword in background_obj_keywords)
]
return {"objects": visible_objects}
@app.get("/get_obj_in_hand")
async def get_obj_in_hand():
obj_in_hand = obj_in_hand_queue.get()
return {"obj_in_hand": obj_in_hand}
@app.get("/get_is_ready")
async def get_is_ready():
READY_THRESHOLD = 1.0
ready = ready_queue.get()
ready = ready
if time.time() - ready > READY_THRESHOLD:
ready = False
return {"is_ready": ready}
@app.get("/wait_until_ready")
async def wait_until_ready():
READY_THRESHOLD = 1.0
ready = ready_queue.get()
is_ready = False
while not is_ready:
ready = ready_queue.get()
if time.time() - ready < READY_THRESHOLD:
is_ready = True
await asyncio.sleep(0.1)
return {"is_ready": is_ready}
@app.get("/get_semantic_segmentation")
async def get_semantic_segmentation():
image, info = image_queue.get()
await asyncio.sleep(2) # wait for the image to be updated
image, info = image_queue.get()
image = image["seg_semantic"]
info = info["seg_semantic"]
print(info)
img_array = image
img = Image.fromarray(img_array)
buf = io.BytesIO()
img.save(buf, format="PNG")
buf.seek(0)
return StreamingResponse(buf, media_type="image/png")
@app.get("/get_instance_segmentation")
async def get_instance_segmentation():
image, info = image_queue.get()
await asyncio.sleep(2) # wait for the image to be updated
image, info = image_queue.get()
image = image["seg_instance"]
info = info["seg_instance"]
print(info)
img_array = image
img = Image.fromarray(img_array)
buf = io.BytesIO()
img.save(buf, format="PNG")
buf.seek(0)
return StreamingResponse(buf, media_type="image/png")
@app.get("/get_id_instance_segmentation")
async def get_id_instance_segmentation():
image, info = image_queue.get()
await asyncio.sleep(2) # wait for the image to be updated
image, info = image_queue.get()
image = image["seg_instance_id"]
info = info["seg_instance_id"]
print(info)
img_array = image
img = Image.fromarray(img_array)
buf = io.BytesIO()
img.save(buf, format="PNG")
buf.seek(0)
return StreamingResponse(buf, media_type="image/png")
if __name__ == "__main__":
sim.start()
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=8000)
| 19,970 | Python | 34.472469 | 132 | 0.565448 |
AshisGhosh/roboai/omnigibson/roboai/visualize_scene_graph.py | import networkx as nx
import numpy as np
from PIL import Image
from matplotlib import pyplot as plt
from omnigibson.sensors import VisionSensor
def visualize_ascii_scene_graph(scene, G):
# def print_graph_ascii(G):
# for line in nx.generate_adjlist(G):
# print(line)
# # Example usage:
# print_graph_ascii(G)
nx.write_network_text(G)
def visualize_scene_graph(scene, G, show_window=True, realistic_positioning=False):
"""
Converts the graph into an image and shows it in a cv2 window if preferred.
Args:
show_window (bool): Whether a cv2 GUI window containing the visualization should be shown.
realistic_positioning (bool): Whether nodes should be positioned based on their position in the scene (if True)
or placed using a graphviz layout (neato) that makes it easier to read edges & find clusters.
"""
def _draw_graph():
nodes = list(G.nodes)
node_labels = {obj: obj.category for obj in nodes}
# colors = [
# "yellow" if obj.category == "agent"
# else (
# "green" if obj.states.get(object_states.ObjectsInFOVOfRobot, False)
# else "red" if object_states.ObjectsInFOVOfRobot in obj.states
# else "blue"
# )
# for obj in nodes
# ]
positions = (
{obj: (-pose[0][1], pose[0][0]) for obj, pose in G.nodes.data("pose")}
if realistic_positioning
else nx.nx_pydot.pydot_layout(G, prog="neato")
)
nx.drawing.draw_networkx(
G,
pos=positions,
labels=node_labels,
nodelist=nodes,
# node_color=colors,
font_size=4,
arrowsize=5,
node_size=150,
)
edge_labels = {
edge: ", ".join(
f"{state}={value}" for state, value in G.edges[edge]["states"]
)
for edge in G.edges
}
nx.drawing.draw_networkx_edge_labels(
G, pos=positions, edge_labels=edge_labels, font_size=4
)
# Prepare pyplot figure sized to match the robot video.
robot = scene.robots[0]
robot_camera_sensor = next(
s
for s in robot.sensors.values()
if isinstance(s, VisionSensor) and "rgb" in s.modalities
)
robot_view = (robot_camera_sensor.get_obs()[0]["rgb"][..., :3]).astype(np.uint8)
imgheight, imgwidth, _ = robot_view.shape
figheight = 4.8
figdpi = imgheight / figheight
figwidth = imgwidth / figdpi
# Draw the graph onto the figure.
fig = plt.figure(figsize=(figwidth, figheight), dpi=figdpi)
_draw_graph()
fig.canvas.draw()
# Convert the canvas to image
graph_view = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep="")
graph_view = graph_view.reshape(fig.canvas.get_width_height()[::-1] + (3,))
assert graph_view.shape == robot_view.shape
plt.close(fig)
# Combine the two images side-by-side
img = np.hstack((robot_view, graph_view))
# Convert to BGR for cv2-based viewing.
if show_window:
import cv2
cv_img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
cv2.imshow("SceneGraph", cv_img)
cv2.waitKey(1)
return Image.fromarray(img).save(r"test.png")
| 3,343 | Python | 31.153846 | 119 | 0.591684 |
AshisGhosh/roboai/omnigibson/roboai/primitive_patches.py | import numpy as np
from omnigibson.action_primitives.action_primitive_set_base import ActionPrimitiveError
def _simplified_place_with_predicate(
self, obj, predicate, near_poses=None, near_poses_threshold=None
):
"""
Yields action for the robot to navigate to the object if needed, then to place it
Args:
obj (StatefulObject): Object for robot to place the object in its hand on
predicate (object_states.OnTop or object_states.Inside): Determines whether to place on top or inside
Returns:
np.array or None: Action array for one step for the robot to place or None if place completed
"""
obj_in_hand = self._get_obj_in_hand()
if obj_in_hand is None:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.PRE_CONDITION_ERROR,
"You need to be grasping an object first to place it somewhere.",
)
# Find a spot to put it
# obj_pose = self._sample_pose_with_object_and_predicate(
# predicate,
# obj_in_hand,
# obj,
# near_poses=near_poses,
# near_poses_threshold=near_poses_threshold,
# )
place_locations = {
"table": {
"position": np.array([-0.7, 0.5, 0.5]),
"orientation": np.array([0.0, 0.0, 0.0, 1.0]),
},
"coffee_table_fqluyq_0": {
"position": np.array([-0.5, -1.1, 0.5]),
"orientation": np.array([0.0, 0.0, 0.0, 1.0]),
},
"bottom_cabinet_bamfsz_0": {
"position": np.array([-1.8, -0.5, 0.7]),
"orientation": np.array([0.0, 0.0, 0.0, 1.0]),
},
"fridge_xyejdx_0": {
"position": np.array([0.2, 3.2, 1.0]),
"orientation": np.array([0.0, 0.0, 0.0, 1.0]),
},
"trash_can_zotrbg_0": {
"position": np.array([-1.8, 2.65, 0.5]),
"orientation": np.array([0.0, 0.0, 0.0, 1.0]),
},
}
obj_pose = place_locations[obj.name]
obj_pose = (obj_pose["position"], obj_pose["orientation"])
# Get close, release the object.
# yield from self._navigate_if_needed(obj, pose_on_obj=obj_pose)
yield from self._release()
# Actually move the object to the spot and step a bit to settle it.
obj_in_hand.set_position_orientation(*obj_pose)
# yield from self._settle_robot()
def _quick_settle_robot(self):
"""
Yields a no op action for a few steps to allow the robot and physics to settle
Returns:
np.array or None: Action array for one step for the robot to do nothing
"""
print("Settling robot")
for _ in range(10):
empty_action = self._empty_action()
yield self._postprocess_action(empty_action)
print("Settled robot")
| 2,741 | Python | 33.275 | 109 | 0.588107 |
AshisGhosh/roboai/shared/pyproject.toml | [tool.poetry]
name = "utils"
version = "0.1.0"
description = ""
authors = ["AshisGhosh <[email protected]>"]
readme = "README.md"
[tool.poetry.dependencies]
python = "<3.13,>=3.10.0"
httpx = "^0.27.0"
python-dotenv = "^1.0.1"
litellm = "^1.34.22"
pillow = "^10.3.0"
opencv-python = "^4.9.0.80"
ollama = "^0.1.8"
gradio-client = "^0.16.1"
replicate = "^0.25.1"
gradio = "^4.29.0"
fastembed = "^0.2.7"
[build-system]
requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api"
| 489 | TOML | 18.599999 | 46 | 0.627812 |
AshisGhosh/roboai/shared/utils/isaacsim_client.py | import asyncio
from shared.utils.http_client import post_request, get_image_request
import io
from PIL import Image
from functools import wraps
SERVER_NAME = "http://localhost:8080"
async def _get_image() -> Image:
img_data = await get_image_request(f"{SERVER_NAME}/get_image")
return Image.open(io.BytesIO(img_data))
def get_image():
return asyncio.run(_get_image())
async def _add_task(task: str):
return await post_request(
f"{SERVER_NAME}/add_task",
params=task,
)
def add_task(task: str):
return asyncio.run(_add_task(task))
def add_test_mode(func):
@wraps(func)
def wrapper(*args, **kwargs):
if globals().get("test_mode", False):
print("TEST MODE ENABLED")
return True
else:
return func(*args, **kwargs)
return wrapper
def pick(object_name: str):
print(f"picking {object_name}")
task = {"task": "pick"}
add_task(task)
def place(object_name: str):
print(f"placing {object_name}")
print("placing object")
task = {"task": "place"}
print(f"Dummy task: {task}")
| 1,112 | Python | 19.611111 | 68 | 0.625 |
AshisGhosh/roboai/shared/utils/image_utils.py | import io
import base64
from PIL import Image
def pil_to_b64(image: Image) -> str:
"""Converts a PIL image to a base64 string."""
# Save the image to a bytes buffer
buffer = io.BytesIO()
image.save(
buffer, format=image.format
) # You can change the format to PNG or other supported formats
# Encode the buffer to base64
img_str = base64.b64encode(buffer.getvalue()).decode("utf-8")
# Optionally, prepend the URI scheme to make it ready for HTML or data transfer
img_base64 = f"data:image/jpeg;base64,{img_str}"
return img_base64
def b64_to_pil(image_b64: str) -> Image:
"""Converts a base64 string to a PIL image."""
# Remove the URI scheme
img_str = image_b64.split(",")[1]
# Decode the base64 string
img_bytes = base64.b64decode(img_str)
# Convert bytes to PIL image
image = Image.open(io.BytesIO(img_bytes))
return image
| 914 | Python | 26.727272 | 83 | 0.665208 |
AshisGhosh/roboai/shared/utils/robotic_grasping_client.py | import io
import asyncio
from shared.utils.http_client import get_request, post_request
from typing import Any, Dict
from PIL import Image
import logging
log = logging.getLogger("robotic_grasping_client")
log.setLevel(logging.DEBUG)
SERVER_NAME = "http://localhost:8003"
async def _get_grasps_from_rgb_and_depth(
rgb_image: Image, depth_image: Image
) -> Dict[str, Any]:
log.debug("Getting grasp from GR-ConvNet")
timeout = 30.0
image_byte_array = io.BytesIO()
rgb_image.save(image_byte_array, format="JPEG")
image_byte_array = image_byte_array.getvalue()
log.debug("RGB image byte array saved")
depth_image_byte_array = io.BytesIO()
depth_image.save(depth_image_byte_array, format="JPEG")
depth_image_byte_array = depth_image_byte_array.getvalue()
log.debug("Depth image byte array saved")
files = {
"rgb_image": ("rgb_image.jpg", image_byte_array, "image/jpeg"),
"depth_image": ("depth_image.jpg", depth_image_byte_array, "image/jpeg"),
}
response = await post_request(
f"{SERVER_NAME}/get_grasps", files=files, timeout=timeout
)
return response
def get_grasps_from_rgb_and_depth(
rgb_image: Image, depth_image: Image
) -> Dict[str, Any]:
return asyncio.run(_get_grasps_from_rgb_and_depth(rgb_image, depth_image))
async def _check_server() -> str:
response = await get_request(f"{SERVER_NAME}/")
return response
| 1,432 | Python | 26.557692 | 81 | 0.682263 |
AshisGhosh/roboai/shared/utils/robosim_client.py | import asyncio
from shared.utils.http_client import get_request, post_request, get_image_request
import io
from PIL import Image
SERVER_NAME = "http://localhost:8000"
async def _get_objects_on_table() -> list[str]:
return await get_request(f"{SERVER_NAME}/get_objects")
def get_objects_on_table():
return asyncio.run(_get_objects_on_table())
async def _get_image() -> Image:
img_data = await get_image_request(f"{SERVER_NAME}/get_image")
return Image.open(io.BytesIO(img_data))
def get_image():
return asyncio.run(_get_image())
async def _get_grasp_image() -> Image:
img_data = await get_image_request(f"{SERVER_NAME}/get_grasp_image")
return Image.open(io.BytesIO(img_data))
def get_grasp_image():
return asyncio.run(_get_grasp_image())
async def _open_gripper():
return await post_request(
f"{SERVER_NAME}/add_task",
data={"name": "open gripper", "type": "open_gripper", "args": ""},
)
def open_gripper():
return asyncio.run(_open_gripper())
async def _close_gripper():
return await post_request(
f"{SERVER_NAME}/add_task",
data={"name": "close gripper", "type": "close_gripper", "args": ""},
)
def close_gripper():
return asyncio.run(_close_gripper())
async def _go_to_pick_center():
return await post_request(
f"{SERVER_NAME}/add_task",
data={"name": "go to pick center", "type": "go_to_pick_center", "args": ""},
)
async def _get_grasp(object_name: str):
return await post_request(
f"{SERVER_NAME}/add_task",
data={
"name": f"get grasp {object_name}",
"type": "get_grasp",
"args": object_name,
},
)
def get_grasp(object_name: str):
asyncio.run(_go_to_pick_center())
return asyncio.run(_get_grasp(object_name))
async def _go_to_pre_grasp():
return await post_request(
f"{SERVER_NAME}/add_task",
data={"name": "go to pre grasp", "type": "go_to_pre_grasp", "args": ""},
)
def go_to_pre_grasp():
return asyncio.run(_go_to_pre_grasp())
async def _go_to_grasp_position():
return await post_request(
f"{SERVER_NAME}/add_task",
data={"name": "go to grasp pos", "type": "go_to_grasp_position", "args": ""},
)
def go_to_grasp_position():
return asyncio.run(_go_to_grasp_position())
async def _go_to_drop():
return await post_request(
f"{SERVER_NAME}/add_task",
data={"name": "go to drop", "type": "go_to_drop", "args": ""},
)
def go_to_drop():
return asyncio.run(_go_to_drop())
def pick(object_name: str):
get_grasp(object_name)
go_to_pre_grasp()
open_gripper()
go_to_grasp_position()
close_gripper()
go_to_pre_grasp()
def place(object_name: str):
go_to_drop()
open_gripper()
| 2,818 | Python | 21.552 | 85 | 0.606104 |
AshisGhosh/roboai/shared/utils/gradio_client.py | from PIL import Image
from typing import Dict, Any
import gradio_client
from gradio_client import Client
import time
def moondream_answer_question_from_image(image: Image, question: str) -> Dict[str, Any]:
client = Client("vikhyatk/moondream2")
# client = Client("Kartik2503/ImageToText")
image.save("/app/shared/data/tmp.png")
start_time = time.time()
result = client.predict(
gradio_client.file("/app/shared/data/tmp.png"),
question,
api_name="/answer_question",
)
print(f"[Gradio] Time taken: {time.time() - start_time}")
return {"result": result}
def qwen_vl_max_answer_question_from_image(
image: Image, question: str
) -> Dict[str, Any]:
client = Client("https://qwen-qwen-vl-max.hf.space/--replicas/fi9fr/")
image.save("/app/shared/data/tmp.png")
start_time = time.time()
# result = client.predict(
# fn_index=3
# )
# json_str = "/tmp/gradio/tmp0af5pyui.json"
# result = client.predict(
# json_str,
# img_path, # str (filepath on your computer (or URL) of file) in '📁 Upload (上传文件)' Uploadbutton component
# fn_index=5
# )
result = client.predict(
# json_str,
# "Hi",
fn_index=2
)
print(f"[Gradio] Time taken: {time.time() - start_time}")
return {"result": result}
| 1,334 | Python | 25.699999 | 111 | 0.622939 |
AshisGhosh/roboai/shared/utils/model_server_client.py | import io
import asyncio
from PIL import Image
from typing import Any, Dict
from shared.utils.http_client import post_request
import logging
log = logging.getLogger("model_server_client")
log.setLevel(logging.INFO)
SERVER_NAME = "http://localhost:8002"
async def _answer_question_from_image(image: Image, question: str) -> Dict[str, Any]:
timeout = 120.0
image_byte_array = io.BytesIO()
image.save(image_byte_array, format="JPEG")
image_byte_array = image_byte_array.getvalue()
files = {"file": ("image.jpg", image_byte_array, "image/jpeg")}
response = await post_request(
f"{SERVER_NAME}/answer_question",
files=files,
params={"question": question},
timeout=timeout,
)
return response
def answer_question_from_image(image: Image, question: str) -> Dict[str, Any]:
return asyncio.run(_answer_question_from_image(image, question))
async def _embed(text: str) -> Dict[str, Any]:
log.debug(f"async _embed call: Embedding text: {text}")
return await post_request(f"{SERVER_NAME}/embed", params={"text": text})
def embed(text: str) -> Dict[str, Any]:
log.debug(f"Embedding text: {text}")
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
result = loop.run_until_complete(_embed(text))
loop.close()
return result
| 1,331 | Python | 26.749999 | 85 | 0.676183 |
AshisGhosh/roboai/shared/utils/http_client.py | import httpx
import logging
from typing import Any, Dict, Optional
from httpx import Timeout
from dotenv import load_dotenv
load_dotenv()
log = logging.getLogger("http_client")
log.setLevel(logging.DEBUG)
TIMEOUT_DEFAULT = 5.0
async def post_request(
url: str,
params: Optional[Dict[str, Any]] = None,
data: Optional[Dict[str, Any]] = None,
files: Optional[Dict[str, Any]] = None,
headers: Optional[Dict[str, Any]] = None,
timeout: float = TIMEOUT_DEFAULT,
) -> Dict[str, Any]:
timeout = Timeout(timeout)
log.debug(f"Sending POST request to {url}:")
log.debug(f" headers: {headers}")
log.debug(f" params: {params}")
log.debug(f" data: {data}")
if files:
log.debug(f" files len: {len(files)}")
log.debug(f" timeout: {timeout}")
try:
async with httpx.AsyncClient(timeout=timeout) as client:
response = await client.post(
url,
params=params,
json=data,
files=files,
headers=headers,
timeout=timeout,
)
if response.status_code == 200:
response = response.json()
log.debug(f"Response: {response}")
return response
raise Exception(f"Error:{response.status_code}: {response.text}")
except httpx.ReadTimeout as e:
log.debug(
f"Timeout sending POST request to {url} with params: {params} and timeout: {timeout}: {e}"
)
return {
"success": False,
"text": f"httpx.ReadTimeout: Timeout sending POST request to {url} with params: {params} and timeout: {timeout}: {e}",
}
except Exception as e:
log.debug(
f"Error sending POST request to {url} with params: {params} and timeout: {timeout}: {e}"
)
return {
"success": False,
"text": f"Error sending POST request to {url} with params: {params} and timeout: {timeout}: {e}",
}
async def get_request(url: str, timeout: float = TIMEOUT_DEFAULT) -> Dict[str, Any]:
log.debug(f"Sending GET request to {url}")
async with httpx.AsyncClient(timeout=timeout) as client:
response = await client.get(url, timeout=timeout)
log.debug(response)
if response.status_code == 200:
log.debug(f"Response: {response.json()}")
return response.json()
else:
log.debug(f"Error: {response.text}")
return {"success": False, "text": f"{response.status}{response.text}"}
async def get_image_request(url: str, timeout: float = TIMEOUT_DEFAULT) -> bytes:
log.debug(f"Sending GET request to {url}")
async with httpx.AsyncClient(timeout=timeout) as client:
response = await client.get(url, timeout=timeout)
log.debug(response)
if response.status_code == 200:
image_data = bytearray()
for chunk in response.iter_bytes():
image_data += chunk
log.debug(f"Response: ({type(image_data)}) {image_data[:10]}")
return image_data
else:
log.debug(f"Error: {response.text}")
return b""
| 3,228 | Python | 32.989473 | 130 | 0.579926 |
AshisGhosh/roboai/shared/utils/grasp_client.py | import io
import asyncio
from shared.utils.http_client import get_request, post_request
from typing import Any, Dict
from PIL import Image
SERVER_NAME = "http://localhost:8005"
async def _get_grasp_from_image(image: Image) -> Dict[str, Any]:
timeout = 30.0
image_byte_array = io.BytesIO()
image.save(image_byte_array, format="JPEG")
image_byte_array = image_byte_array.getvalue()
files = {"file": ("image.jpg", image_byte_array, "image/jpeg")}
response = await post_request(f"{SERVER_NAME}/detect", files=files, timeout=timeout)
return response
def get_grasp_from_image(image: Image) -> Dict[str, Any]:
return asyncio.run(_get_grasp_from_image(image))
async def _check_server() -> str:
response = await get_request(f"{SERVER_NAME}/")
return response
| 801 | Python | 24.870967 | 88 | 0.692884 |
AshisGhosh/roboai/shared/utils/omnigibson_client.py | import asyncio
from shared.utils.http_client import post_request, get_image_request, get_request
import io
from PIL import Image
from shared.utils.llm_utils import get_closest_text_sync as get_closest_text
SERVER_NAME = "http://localhost:8000"
OMNIGIBSON_TIMEOUT = 30
async def _get_image() -> Image:
img_data = await get_image_request(
f"{SERVER_NAME}/get_image", timeout=OMNIGIBSON_TIMEOUT
)
return Image.open(io.BytesIO(img_data))
def get_image():
response = asyncio.run(_get_image())
if isinstance(response, dict):
return response.get("success", True)
return response
async def _get_visible_objects() -> dict:
return await get_request(
f"{SERVER_NAME}/get_visible_objects", timeout=OMNIGIBSON_TIMEOUT
)
def get_visible_objects():
response = asyncio.run(_get_visible_objects())
if "success" in response.keys():
return response.get("success", True)
return response["objects"]
async def _get_obj_in_hand() -> dict:
return await get_request(
f"{SERVER_NAME}/get_obj_in_hand", timeout=OMNIGIBSON_TIMEOUT
)
def get_obj_in_hand():
try:
response = asyncio.run(_get_obj_in_hand())
return response["obj_in_hand"]
except Exception as e:
print(f"Error getting object in hand: {e}")
return False
async def _wait_until_ready() -> dict:
await asyncio.sleep(1)
return await get_request(
f"{SERVER_NAME}/wait_until_ready", timeout=OMNIGIBSON_TIMEOUT
)
def wait_until_ready():
try:
response = asyncio.run(_wait_until_ready())
return response["is_ready"]
except Exception as e:
print(f"Error waiting until ready: {e}")
return False
async def _add_action(action: str):
return await post_request(
f"{SERVER_NAME}/add_action", params=action, timeout=OMNIGIBSON_TIMEOUT
)
def add_action(action: str):
response = asyncio.run(_add_action(action))
if isinstance(response, dict):
return response.get("success", True)
return response
def pick(object_name: str):
print(f"Attempting to pick {object_name}. Referencing against visible objects.")
objects = get_visible_objects()
object_name = get_closest_text(object_name, objects, threshold=0.2)
print(f"picking object {object_name}")
action = {"action": f"pick,{object_name}"}
return add_action(action)
def place(location: str):
print(f"placing object in {location}")
print("placing object")
action = {"action": f"place,{location}"}
return add_action(action)
def navigate_to(object_name: str, location: str = None):
print(f"navigating to {object_name}, {location}")
if location:
action = {"action": f"navigate_to,{object_name},{location}"}
else:
action = {"action": f"navigate_to_object,{object_name}"}
return add_action(action)
| 2,883 | Python | 25.703703 | 84 | 0.660076 |
AshisGhosh/roboai/shared/utils/huggingface_client.py | import asyncio
from shared.utils.http_client import post_request
API_URL = "https://api-inference.huggingface.co/models/Efficient-Large-Model/VILA-2.7b"
headers = {"Authorization": "Bearer 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"}
async def _vila_query(text, image=None):
json = {"inputs": text}
response = await post_request(API_URL, headers=headers, data=json)
print(response)
return response
def vila_query(text, image=None):
return asyncio.run(_vila_query(text, image))
| 497 | Python | 28.294116 | 87 | 0.738431 |
AshisGhosh/roboai/shared/utils/replicate_client.py | from PIL import Image
from typing import Dict, Any
import replicate
import time
def moondream_answer_question_from_image(image: Image, question: str) -> Dict[str, Any]:
image.save("/app/shared/data/tmp.png")
image_handler = open("/app/shared/data/tmp.png", "rb")
input = {"image": image_handler, "prompt": question}
start_time = time.time()
output = replicate.run(
"lucataco/moondream2:392a53ac3f36d630d2d07ce0e78142acaccc338d6caeeb8ca552fe5baca2781e",
input=input,
)
output = "".join(output)
print(f"[Replicate] Time taken: {time.time() - start_time}")
return {"result": output}
| 636 | Python | 29.333332 | 95 | 0.683962 |
AshisGhosh/roboai/shared/utils/llm_utils.py | import litellm
import ollama
import logging
import numpy as np
from fastembed import TextEmbedding
import asyncio
from shared.utils.model_server_client import _embed
from dotenv import load_dotenv
load_dotenv("shared/.env") # take environment variables from .env.
log = logging.getLogger("llm_utils")
log.setLevel(logging.INFO)
def log_debug(msg):
log.debug(msg)
# print(msg)
def log_info(msg):
log.info(msg)
# print(msg)
async def get_embedding_sentence_transformers(text):
log_debug(f"Getting sentence_transformer/HF embedding for text: {text}")
response = await _embed(text)
return response["embedding"]
def get_embedding_ollama(text):
log_debug(f"Getting ollama embedding for text: {text}")
response = ollama.embeddings(model="mxbai-embed-large", prompt=text)
return response["embedding"]
def get_embedding_litellm(text):
log_debug(f"Getting litellm/HF embedding for text: {text}")
response = litellm.embedding(
model="huggingface/mixedbread-ai/mxbai-embed-large-v1", input=[text]
)
log_debug(f"Embedding received: {response}")
return response["data"][0]["embedding"]
global fast_embed_model
fast_embed_model = None
def get_embedding_fastembed(text):
global fast_embed_model
if not fast_embed_model:
fast_embed_model = TextEmbedding("mixedbread-ai/mxbai-embed-large-v1")
embed = list(fast_embed_model.embed(text))[0]
return embed
async def get_embedding(text):
log_debug(f"Getting embedding for text: {text}")
return get_embedding_fastembed(text)
def cosine_similarity(v1: np.ndarray, v2: np.ndarray) -> float:
dot_product = np.dot(v1, v2)
norm_v1 = np.linalg.norm(v1)
norm_v2 = np.linalg.norm(v2)
return dot_product / (norm_v1 * norm_v2)
async def get_closest_text(
text: str, text_list: list[str], k: int = 1, threshold: float = 0.5
) -> str:
log_info(f"Getting closest text for: '{text}' in list: {text_list}")
query_vector = await get_embedding(text)
log_debug(f"Query vector: {query_vector}")
vectors = [await get_embedding(text) for text in text_list]
similarities = [cosine_similarity(query_vector, vector) for vector in vectors]
log_debug(f"Similarities: {similarities}")
if k > 1:
closest_indices = np.argsort(similarities)[-k:]
log_info(f"Closest texts: {[text_list[i] for i in closest_indices]}")
return [text_list[i] for i in closest_indices]
closest_index = np.argmax(similarities)
if similarities[closest_index] < threshold:
log_info(f"Similarity below threshold: {similarities[closest_index]}")
return None
log_info(f"Closest text: {text_list[closest_index]}")
return text_list[closest_index]
def get_closest_text_sync(
text: str, text_list: list[str], k: int = 1, threshold: float = 0.5
):
return asyncio.run(get_closest_text(text, text_list, k, threshold))
async def get_most_important(texts: list[str] | str, k: int = 1):
log_info(f"Getting most important text from: {texts}")
if isinstance(texts, list):
texts = " ".join(texts)
texts_embedding = await get_embedding(texts)
texts = texts.split()
vectors = [await get_embedding(text) for text in texts]
similarities = [cosine_similarity(texts_embedding, vector) for vector in vectors]
log_debug(f"Similarities: {similarities}")
closest_indices = np.argsort(similarities)[-k:]
log_info(f"Closest texts: {[texts[i] for i in closest_indices]}")
return [texts[i] for i in closest_indices]
def get_most_important_sync(texts: list[str], k: int = 1):
return asyncio.run(get_most_important(texts, k))
| 3,662 | Python | 29.781512 | 85 | 0.688422 |
AshisGhosh/roboai/grasping/scale_balanced_grasp/pyproject.toml | [tool.poetry]
name = "scale-balanced-grasp"
version = "0.1.0"
description = ""
authors = ["Ashis Ghosh <[email protected]>"]
readme = "README.md"
[tool.poetry.dependencies]
python = "^3.10"
[build-system]
requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api"
| 279 | TOML | 17.666665 | 47 | 0.691756 |
AshisGhosh/roboai/grasping/grasp_server/pyproject.toml | [tool.poetry]
name = "grasp-server"
version = "0.1.0"
description = ""
authors = ["AshisGhosh <[email protected]>"]
readme = "README.md"
[tool.poetry.dependencies]
python = "^3.7"
fastapi = "0.103.2"
uvicorn = "0.22.0"
python-multipart = "0.0.8"
[build-system]
requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api"
| 335 | TOML | 17.666666 | 46 | 0.674627 |
AshisGhosh/roboai/grasping/grasp_server/README.md | Download weights linked here: https://github.com/stefan-ainetter/grasp_det_seg_cnn/tree/1cfeb2f239e0745e127055ad597461f1585a7e94
Model Weights: https://files.icg.tugraz.at/d/10296a970cc242aa90ff/
ResNet101 Weights: https://files.icg.tugraz.at/d/1e84f72c1109485ba9f9/ | 269 | Markdown | 43.999993 | 128 | 0.828996 |
AshisGhosh/roboai/grasping/grasp_server/app/main.py | #!/usr/bin/python -u
import io
import base64
import numpy as np
import cv2
from fastapi import FastAPI, File, UploadFile
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import JSONResponse
from grasp_server.grasp_det_seg import GraspServer
import logging
logging.basicConfig(level=logging.INFO)
# Create GraspServer instance
grasp = GraspServer()
# Create FastAPI instance
app = FastAPI()
# List of allowed origins (you can use '*' to allow all origins)
origins = [
"http://localhost:3000", # Allow your Next.js app
# Add any other origins as needed
]
# Add CORSMiddleware to the application
app.add_middleware(
CORSMiddleware,
allow_origins=origins, # List of allowed origins
allow_credentials=True,
allow_methods=["*"], # Allow all methods
allow_headers=["*"], # Allow all headers
)
# Example route
@app.get("/")
async def read_root():
return {"message": "Hello, FastAPI! This is the grasp server."}
@app.post("/detect")
async def detect(file: UploadFile = File(...)):
# Read the image file
image_bytes = await file.read()
nparr = np.frombuffer(image_bytes, np.uint8)
# Decode the image
image = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
# Perform object detection
result, img = grasp.detect(image)
# Return the result
return JSONResponse(content={"result": result, "image": get_image_response(img)})
@app.post("/test")
async def test():
result, img = grasp.test_detect()
return JSONResponse(content={"result": result, "image": get_image_response(img)})
def get_image_response(image):
buf = io.BytesIO()
image.save(buf, format="JPEG")
buf.seek(0)
base64_image = base64.b64encode(buf.getvalue()).decode("utf-8")
return base64_image
# return StreamingResponse(buf, media_type="image/jpeg")
| 1,834 | Python | 23.797297 | 85 | 0.698473 |
AshisGhosh/roboai/grasping/grasp_server/grasp_server/test_grasp_det_seg.py | import argparse
import time
import os
import numpy as np
import scipy
import cv2
from functools import partial
import torch
import torch.optim as optim
import torch.utils.data as data
from torch import distributed
import grasp_det_seg.models as models
from grasp_det_seg.algos.detection import (
PredictionGenerator,
ProposalMatcher,
DetectionLoss,
)
from grasp_det_seg.algos.fpn import DetectionAlgoFPN, RPNAlgoFPN
from grasp_det_seg.algos.rpn import AnchorMatcher, ProposalGenerator, RPNLoss
from grasp_det_seg.algos.semantic_seg import SemanticSegAlgo, SemanticSegLoss
from grasp_det_seg.config import load_config
from grasp_det_seg.data_OCID import iss_collate_fn, OCIDTestDataset, OCIDTestTransform
from grasp_det_seg.data_OCID.OCID_class_dict import colors_list
from grasp_det_seg.data_OCID.sampler import DistributedARBatchSampler
from grasp_det_seg.models.det_seg import DetSegNet
from grasp_det_seg.modules.fpn import FPN, FPNBody
from grasp_det_seg.modules.heads import RPNHead, FPNROIHead, FPNSemanticHeadDeeplab
from grasp_det_seg.utils import logging
from grasp_det_seg.utils.meters import AverageMeter
from grasp_det_seg.utils.misc import (
config_to_string,
scheduler_from_config,
norm_act_from_config,
freeze_params,
NORM_LAYERS,
OTHER_LAYERS,
)
from grasp_det_seg.utils.parallel import DistributedDataParallel
from grasp_det_seg.utils.snapshot import resume_from_snapshot
parser = argparse.ArgumentParser(
description="OCID detection and segmentation test script"
)
parser.add_argument("--local_rank", type=int)
parser.add_argument(
"--log_dir", type=str, default=".", help="Write logs to the given directory"
)
parser.add_argument(
"config", metavar="FILE", type=str, help="Path to configuration file"
)
parser.add_argument("model", metavar="FILE", type=str, help="Path to model file")
parser.add_argument("data", metavar="DIR", type=str, help="Path to dataset")
parser.add_argument("out_dir", metavar="DIR", type=str, help="Path to output directory")
def save_param_file(writer, param_file):
data_sum = ""
with open(param_file) as fp:
Lines = fp.readlines()
for line in Lines:
data_sum += line + " \n"
writer.add_text("dataset_parameters", data_sum)
return
def ensure_dir(dir_path):
try:
os.mkdir(dir_path)
except FileExistsError:
pass
def Rotate2D(pts, cnt, ang):
ang = np.deg2rad(ang)
return (
scipy.dot(
pts - cnt,
scipy.array(
[[scipy.cos(ang), scipy.sin(ang)], [-scipy.sin(ang), scipy.cos(ang)]]
),
)
+ cnt
)
def save_prediction_image(raw_pred, img_abs_path, img_root_path, im_size, out_dir):
num_classes_theta = 18
# grasp candidate confidence threshold
threshold = 0.06
iou_seg_threshold = 100 # in px
for i, (sem_pred, bbx_pred, cls_pred, obj_pred) in enumerate(
zip(
raw_pred["sem_pred"],
raw_pred["bbx_pred"],
raw_pred["cls_pred"],
raw_pred["obj_pred"],
)
):
item = os.path.join(img_root_path[i], img_abs_path[i])
im_size_ = im_size[i]
ensure_dir(out_dir)
seq_path, im_name = item.split(",")
sem_pred = np.asarray(sem_pred.detach().cpu().numpy(), dtype=np.uint8)
seg_mask_vis = np.zeros((im_size_[0], im_size_[1], 3))
cls_labels = np.unique(sem_pred)
img_path = os.path.join(img_root_path[i], seq_path, "rgb", im_name)
mask_path = os.path.join(
img_root_path[i], seq_path, "seg_mask_labeled_combi", im_name
)
img = cv2.imread(img_path)
img_best_boxes = np.copy(img)
mask_gt = cv2.imread(mask_path, cv2.IMREAD_UNCHANGED)
for cnt, label in enumerate(cls_labels):
if label == 0:
continue
seg_mask_vis[sem_pred == label] = colors_list[label]
mask_per_label = np.zeros_like(sem_pred)
mask_per_label_gt = np.zeros_like(sem_pred)
mask_per_label[sem_pred == label] = 1
mask_per_label_gt[mask_gt == label] = 1
if sum(map(sum, mask_per_label)) < iou_seg_threshold:
continue
ensure_dir(out_dir)
out_path = os.path.join(out_dir, im_name[:-4] + ".png")
img_mask = img * 0.25 + seg_mask_vis * 0.75
if bbx_pred is None:
continue
anno_per_class_dir = os.path.join(
os.path.join(
img_root_path[i], seq_path, "Annotations_per_class", im_name[:-4]
)
)
for class_dir in os.listdir(anno_per_class_dir):
if not os.path.isdir(os.path.join(anno_per_class_dir, class_dir)):
continue
best_confidence = 0.0
r_bbox_best = None
for bbx_pred_i, cls_pred_i, obj_pred_i in zip(bbx_pred, cls_pred, obj_pred):
if obj_pred_i.item() > threshold:
pt1 = (int(bbx_pred_i[0]), int(bbx_pred_i[1]))
pt2 = (int(bbx_pred_i[2]), int(bbx_pred_i[3]))
cls = cls_pred_i.item()
if cls > 17:
assert False
theta = ((180 / num_classes_theta) * cls) + 5
pts = scipy.array(
[
[pt1[0], pt1[1]],
[pt2[0], pt1[1]],
[pt2[0], pt2[1]],
[pt1[0], pt2[1]],
]
)
cnt = scipy.array(
[
(int(bbx_pred_i[0]) + int(bbx_pred_i[2])) / 2,
(int(bbx_pred_i[1]) + int(bbx_pred_i[3])) / 2,
]
)
r_bbox_ = Rotate2D(pts, cnt, 90 - theta)
r_bbox_ = r_bbox_.astype("int16")
if (int(cnt[1]) >= im_size_[0]) or (int(cnt[0]) >= im_size_[1]):
continue
if sem_pred[int(cnt[1]), int(cnt[0])] == int(class_dir):
if obj_pred_i.item() >= best_confidence:
best_confidence = obj_pred_i.item()
r_bbox_best = r_bbox_
if r_bbox_best is not None:
cv2.line(
img_best_boxes,
tuple(r_bbox_best[0]),
tuple(r_bbox_best[1]),
(255, 0, 0),
2,
)
cv2.line(
img_best_boxes,
tuple(r_bbox_best[1]),
tuple(r_bbox_best[2]),
(0, 0, 255),
2,
)
cv2.line(
img_best_boxes,
tuple(r_bbox_best[2]),
tuple(r_bbox_best[3]),
(255, 0, 0),
2,
)
cv2.line(
img_best_boxes,
tuple(r_bbox_best[3]),
tuple(r_bbox_best[0]),
(0, 0, 255),
2,
)
res = np.hstack((img, img_best_boxes, img_mask))
scale_percent = 75 # percent of original size
width = int(res.shape[1] * scale_percent / 100)
height = int(res.shape[0] * scale_percent / 100)
dim = (width, height)
# resize image
resized = cv2.resize(res, dim, interpolation=cv2.INTER_AREA)
cv2.imwrite(out_path, resized)
return
def log_debug(msg, *args, **kwargs):
if distributed.get_rank() == 0:
logging.get_logger().debug(msg, *args, **kwargs)
def log_info(msg, *args, **kwargs):
if distributed.get_rank() == 0:
logging.get_logger().info(msg, *args, **kwargs)
def make_config(args):
log_debug("Loading configuration from %s", args.config)
conf = load_config(args.config, args.config)
log_debug("\n%s", config_to_string(conf))
return conf
def make_dataloader(args, config, rank, world_size):
config = config["dataloader"]
log_debug("Creating dataloaders for dataset in %s", args.data)
# Validation dataloader
val_tf = OCIDTestTransform(
config.getint("shortest_size"),
config.getint("longest_max_size"),
config.getstruct("rgb_mean"),
config.getstruct("rgb_std"),
)
config["root_path"] = "/app/data/OCID_grasp"
val_db = OCIDTestDataset(args.data, config["root_path"], config["test_set"], val_tf)
val_sampler = DistributedARBatchSampler(
val_db, config.getint("val_batch_size"), world_size, rank, False
)
val_dl = data.DataLoader(
val_db,
batch_sampler=val_sampler,
collate_fn=iss_collate_fn,
pin_memory=True,
num_workers=config.getint("num_workers"),
)
return val_dl
def make_model(config):
body_config = config["body"]
fpn_config = config["fpn"]
rpn_config = config["rpn"]
roi_config = config["roi"]
sem_config = config["sem"]
general_config = config["general"]
classes = {
"total": int(general_config["num_things"]) + int(general_config["num_stuff"]),
"stuff": int(general_config["num_stuff"]),
"thing": int(general_config["num_things"]),
"semantic": int(general_config["num_semantic"]),
}
# BN + activation
norm_act_static, norm_act_dynamic = norm_act_from_config(body_config)
# Create backbone
log_debug("Creating backbone model %s", body_config["body"])
body_fn = models.__dict__["net_" + body_config["body"]]
body_params = (
body_config.getstruct("body_params") if body_config.get("body_params") else {}
)
body = body_fn(norm_act=norm_act_static, **body_params)
if body_config.get("weights"):
body_config["weights"] = "/app/data/weights/resnet101"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
body.load_state_dict(torch.load(body_config["weights"], map_location=device))
# Freeze parameters
for n, m in body.named_modules():
for mod_id in range(1, body_config.getint("num_frozen") + 1):
if ("mod%d" % mod_id) in n:
freeze_params(m)
body_channels = body_config.getstruct("out_channels")
# Create FPN
fpn_inputs = fpn_config.getstruct("inputs")
fpn = FPN(
[body_channels[inp] for inp in fpn_inputs],
fpn_config.getint("out_channels"),
fpn_config.getint("extra_scales"),
norm_act_static,
fpn_config["interpolation"],
)
body = FPNBody(body, fpn, fpn_inputs)
# Create RPN
proposal_generator = ProposalGenerator(
rpn_config.getfloat("nms_threshold"),
rpn_config.getint("num_pre_nms_train"),
rpn_config.getint("num_post_nms_train"),
rpn_config.getint("num_pre_nms_val"),
rpn_config.getint("num_post_nms_val"),
rpn_config.getint("min_size"),
)
anchor_matcher = AnchorMatcher(
rpn_config.getint("num_samples"),
rpn_config.getfloat("pos_ratio"),
rpn_config.getfloat("pos_threshold"),
rpn_config.getfloat("neg_threshold"),
rpn_config.getfloat("void_threshold"),
)
rpn_loss = RPNLoss(rpn_config.getfloat("sigma"))
rpn_algo = RPNAlgoFPN(
proposal_generator,
anchor_matcher,
rpn_loss,
rpn_config.getint("anchor_scale"),
rpn_config.getstruct("anchor_ratios"),
fpn_config.getstruct("out_strides"),
rpn_config.getint("fpn_min_level"),
rpn_config.getint("fpn_levels"),
)
rpn_head = RPNHead(
fpn_config.getint("out_channels"),
len(rpn_config.getstruct("anchor_ratios")),
1,
rpn_config.getint("hidden_channels"),
norm_act_dynamic,
)
# Create detection network
prediction_generator = PredictionGenerator(
roi_config.getfloat("nms_threshold"),
roi_config.getfloat("score_threshold"),
roi_config.getint("max_predictions"),
)
proposal_matcher = ProposalMatcher(
classes,
roi_config.getint("num_samples"),
roi_config.getfloat("pos_ratio"),
roi_config.getfloat("pos_threshold"),
roi_config.getfloat("neg_threshold_hi"),
roi_config.getfloat("neg_threshold_lo"),
roi_config.getfloat("void_threshold"),
)
roi_loss = DetectionLoss(roi_config.getfloat("sigma"))
roi_size = roi_config.getstruct("roi_size")
roi_algo = DetectionAlgoFPN(
prediction_generator,
proposal_matcher,
roi_loss,
classes,
roi_config.getstruct("bbx_reg_weights"),
roi_config.getint("fpn_canonical_scale"),
roi_config.getint("fpn_canonical_level"),
roi_size,
roi_config.getint("fpn_min_level"),
roi_config.getint("fpn_levels"),
)
roi_head = FPNROIHead(
fpn_config.getint("out_channels"), classes, roi_size, norm_act=norm_act_dynamic
)
# Create semantic segmentation network
sem_loss = SemanticSegLoss(ohem=sem_config.getfloat("ohem"))
sem_algo = SemanticSegAlgo(sem_loss, classes["semantic"])
sem_head = FPNSemanticHeadDeeplab(
fpn_config.getint("out_channels"),
sem_config.getint("fpn_min_level"),
sem_config.getint("fpn_levels"),
classes["semantic"],
pooling_size=sem_config.getstruct("pooling_size"),
norm_act=norm_act_static,
)
# Create final network
return DetSegNet(
body, rpn_head, roi_head, sem_head, rpn_algo, roi_algo, sem_algo, classes
)
def make_optimizer(config, model, epoch_length):
body_config = config["body"]
opt_config = config["optimizer"]
sch_config = config["scheduler"]
# Gather parameters from the network
norm_parameters = []
other_parameters = []
for m in model.modules():
if any(isinstance(m, layer) for layer in NORM_LAYERS):
norm_parameters += [p for p in m.parameters() if p.requires_grad]
elif any(isinstance(m, layer) for layer in OTHER_LAYERS):
other_parameters += [p for p in m.parameters() if p.requires_grad]
assert len(norm_parameters) + len(other_parameters) == len(
[p for p in model.parameters() if p.requires_grad]
), "Not all parameters that require grad are accounted for in the optimizer"
# Set-up optimizer hyper-parameters
parameters = [
{
"params": norm_parameters,
"lr": opt_config.getfloat("lr")
if not body_config.getboolean("bn_frozen")
else 0.0,
"weight_decay": opt_config.getfloat("weight_decay")
if opt_config.getboolean("weight_decay_norm")
else 0.0,
},
{
"params": other_parameters,
"lr": opt_config.getfloat("lr"),
"weight_decay": opt_config.getfloat("weight_decay"),
},
]
optimizer = optim.SGD(
parameters,
momentum=opt_config.getfloat("momentum"),
nesterov=opt_config.getboolean("nesterov"),
)
scheduler = scheduler_from_config(sch_config, optimizer, epoch_length)
assert sch_config["update_mode"] in ("batch", "epoch")
batch_update = sch_config["update_mode"] == "batch"
total_epochs = sch_config.getint("epochs")
return optimizer, scheduler, batch_update, total_epochs
def test(model, dataloader, **varargs):
model.eval()
dataloader.batch_sampler.set_epoch(0)
data_time_meter = AverageMeter(())
batch_time_meter = AverageMeter(())
data_time = time.time()
for it, batch in enumerate(dataloader):
print("Batch no. : " + str(it))
with torch.no_grad():
# Extract data
img = batch["img"].cuda(device=varargs["device"], non_blocking=True)
# img = batch["img"]
abs_paths = batch["abs_path"]
root_paths = batch["root_path"]
im_size = batch["im_size"]
data_time_meter.update(torch.tensor(time.time() - data_time))
batch_time = time.time()
# Run network
_, pred, conf = model(img=img, do_loss=False, do_prediction=True)
# Update meters
batch_time_meter.update(torch.tensor(time.time() - batch_time))
varargs["save_function"](pred, abs_paths, root_paths, im_size)
data_time = time.time()
def test_model(model, img):
# img may be limited to 640x480
model.eval()
with torch.no_grad():
# Run network
_, pred, conf = model(img=img, do_loss=False, do_prediction=True)
def main(args):
# Adjust backend based on CUDA availability
backend = "nccl" if torch.cuda.is_available() else "gloo"
distributed.init_process_group(backend=backend, init_method="env://")
if torch.cuda.is_available():
device_id, device = args.local_rank, torch.device(f"cuda:{args.local_rank}")
torch.cuda.set_device(device_id)
else:
device_id, device = None, torch.device("cpu")
rank, world_size = distributed.get_rank(), distributed.get_world_size()
# Load configuration
config = make_config(args)
# Create dataloaders
test_dataloader = make_dataloader(args, config, rank, world_size)
# Create model
model = make_model(config)
log_debug("Loading snapshot from %s", args.model)
resume_from_snapshot(
model, args.model, ["body", "rpn_head", "roi_head", "sem_head"]
)
# Initialize GPU specific settings if a GPU is available
if torch.cuda.is_available():
torch.backends.cudnn.benchmark = config["general"].getboolean("cudnn_benchmark")
if torch.cuda.is_available() and "cuda" in device.type:
model = model.cuda(device)
model = DistributedDataParallel(
model,
device_ids=[device_id],
output_device=device_id,
find_unused_parameters=True,
)
else:
# Adjust the model for CPU-based distributed computing if necessary
# Note: You might need to adjust this part based on your specific needs and setup
model = DistributedDataParallel(
model, device_ids=None, output_device=None, find_unused_parameters=True
)
save_function = partial(save_prediction_image, out_dir=args.out_dir)
test(
model,
test_dataloader,
device=device,
summary=None,
log_interval=config["general"].getint("log_interval"),
save_function=save_function,
)
if __name__ == "__main__":
main(parser.parse_args())
| 18,729 | Python | 32.808664 | 89 | 0.576966 |
AshisGhosh/roboai/grasping/grasp_server/grasp_server/grasp_det_seg.py | import numpy as np
import torch
from PIL import Image
import cv2
import scipy
import copy
import grasp_det_seg.models as models
from grasp_det_seg.modules.fpn import FPN, FPNBody
from grasp_det_seg.algos.rpn import ProposalGenerator, AnchorMatcher, RPNLoss
from grasp_det_seg.algos.fpn import RPNAlgoFPN, DetectionAlgoFPN
from grasp_det_seg.modules.heads import RPNHead, FPNROIHead, FPNSemanticHeadDeeplab
from grasp_det_seg.algos.detection import (
PredictionGenerator,
ProposalMatcher,
DetectionLoss,
)
from grasp_det_seg.algos.semantic_seg import SemanticSegLoss, SemanticSegAlgo
from grasp_det_seg.models.det_seg import DetSegNet
from grasp_det_seg.config import load_config
from grasp_det_seg.utils.misc import (
config_to_string,
norm_act_from_config,
freeze_params,
)
from grasp_det_seg.data_OCID import OCIDTestTransform
from grasp_det_seg.utils.parallel import PackedSequence
from grasp_det_seg.data_OCID.OCID_class_dict import cls_list, colors_list
from grasp_det_seg.utils.snapshot import resume_from_snapshot
import logging
def log_debug(msg, *args):
logging.getLogger().debug(msg, *args)
def log_info(msg, *args):
logging.getLogger().info(msg, *args)
def Rotate2D(pts, cnt, ang):
ang = np.deg2rad(ang)
return (
scipy.dot(
pts - cnt,
scipy.array(
[[scipy.cos(ang), scipy.sin(ang)], [-scipy.sin(ang), scipy.cos(ang)]]
),
)
+ cnt
)
def make_config(config_path):
log_debug("Loading configuration from %s", config_path)
conf = load_config(config_path, config_path)
log_debug("\n%s", config_to_string(conf))
return conf
def make_model(config):
body_config = config["body"]
fpn_config = config["fpn"]
rpn_config = config["rpn"]
roi_config = config["roi"]
sem_config = config["sem"]
general_config = config["general"]
classes = {
"total": int(general_config["num_things"]) + int(general_config["num_stuff"]),
"stuff": int(general_config["num_stuff"]),
"thing": int(general_config["num_things"]),
"semantic": int(general_config["num_semantic"]),
}
# BN + activation
norm_act_static, norm_act_dynamic = norm_act_from_config(body_config)
# Create backbone
log_debug("Creating backbone model %s", body_config["body"])
# body_fn = models.__dict__["net_" + body_config["body"]]
body_fn = models.__dict__["net_resnet101"]
body_params = (
body_config.getstruct("body_params") if body_config.get("body_params") else {}
)
body = body_fn(norm_act=norm_act_static, **body_params)
# if body_config.get("weights"):
# body_config["weights"] = "/app/data/weights/resnet101"
# body.load_state_dict(torch.load(body_config["weights"], map_location="cpu"))
weights_path = "/app/data/weights/resnet101"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
body.load_state_dict(torch.load(weights_path, map_location=device))
# Freeze parameters
for n, m in body.named_modules():
for mod_id in range(1, body_config.getint("num_frozen") + 1):
if ("mod%d" % mod_id) in n:
freeze_params(m)
body_channels = body_config.getstruct("out_channels")
# Create FPN
fpn_inputs = fpn_config.getstruct("inputs")
fpn = FPN(
[body_channels[inp] for inp in fpn_inputs],
fpn_config.getint("out_channels"),
fpn_config.getint("extra_scales"),
norm_act_static,
fpn_config["interpolation"],
)
body = FPNBody(body, fpn, fpn_inputs)
# Create RPN
proposal_generator = ProposalGenerator(
rpn_config.getfloat("nms_threshold"),
rpn_config.getint("num_pre_nms_train"),
rpn_config.getint("num_post_nms_train"),
rpn_config.getint("num_pre_nms_val"),
rpn_config.getint("num_post_nms_val"),
rpn_config.getint("min_size"),
)
anchor_matcher = AnchorMatcher(
rpn_config.getint("num_samples"),
rpn_config.getfloat("pos_ratio"),
rpn_config.getfloat("pos_threshold"),
rpn_config.getfloat("neg_threshold"),
rpn_config.getfloat("void_threshold"),
)
rpn_loss = RPNLoss(rpn_config.getfloat("sigma"))
rpn_algo = RPNAlgoFPN(
proposal_generator,
anchor_matcher,
rpn_loss,
rpn_config.getint("anchor_scale"),
rpn_config.getstruct("anchor_ratios"),
fpn_config.getstruct("out_strides"),
rpn_config.getint("fpn_min_level"),
rpn_config.getint("fpn_levels"),
)
rpn_head = RPNHead(
fpn_config.getint("out_channels"),
len(rpn_config.getstruct("anchor_ratios")),
1,
rpn_config.getint("hidden_channels"),
norm_act_dynamic,
)
# Create detection network
prediction_generator = PredictionGenerator(
roi_config.getfloat("nms_threshold"),
roi_config.getfloat("score_threshold"),
roi_config.getint("max_predictions"),
)
proposal_matcher = ProposalMatcher(
classes,
roi_config.getint("num_samples"),
roi_config.getfloat("pos_ratio"),
roi_config.getfloat("pos_threshold"),
roi_config.getfloat("neg_threshold_hi"),
roi_config.getfloat("neg_threshold_lo"),
roi_config.getfloat("void_threshold"),
)
roi_loss = DetectionLoss(roi_config.getfloat("sigma"))
roi_size = roi_config.getstruct("roi_size")
roi_algo = DetectionAlgoFPN(
prediction_generator,
proposal_matcher,
roi_loss,
classes,
roi_config.getstruct("bbx_reg_weights"),
roi_config.getint("fpn_canonical_scale"),
roi_config.getint("fpn_canonical_level"),
roi_size,
roi_config.getint("fpn_min_level"),
roi_config.getint("fpn_levels"),
)
roi_head = FPNROIHead(
fpn_config.getint("out_channels"), classes, roi_size, norm_act=norm_act_dynamic
)
# Create semantic segmentation network
sem_loss = SemanticSegLoss(ohem=sem_config.getfloat("ohem"))
sem_algo = SemanticSegAlgo(sem_loss, classes["semantic"])
sem_head = FPNSemanticHeadDeeplab(
fpn_config.getint("out_channels"),
sem_config.getint("fpn_min_level"),
sem_config.getint("fpn_levels"),
classes["semantic"],
pooling_size=sem_config.getstruct("pooling_size"),
norm_act=norm_act_static,
)
# Create final network
return DetSegNet(
body, rpn_head, roi_head, sem_head, rpn_algo, roi_algo, sem_algo, classes
)
def test(model, img, visualize=True, **varargs):
model.eval()
shortest_size = 480
longest_max_size = 640
rgb_mean = (0.485, 0.456, 0.406)
rgb_std = (0.229, 0.224, 0.225)
preprocess = OCIDTestTransform(
shortest_size=shortest_size,
longest_max_size=longest_max_size,
rgb_mean=rgb_mean,
rgb_std=rgb_std,
)
img_tensor, im_size = preprocess(img)
with torch.no_grad():
# Extract data
packed_img = PackedSequence(img_tensor)
print(packed_img[0].shape)
# exit()
# Run network
_, pred, conf = model(img=packed_img, do_loss=False, do_prediction=True)
# Update meters
res = output_pred(pred, img, im_size, visualize)
return res
def output_pred(raw_pred, img, im_size_, visualize):
# https://github.com/stefan-ainetter/grasp_det_seg_cnn/blob/main/grasp_det_seg/data_OCID/OCID_class_dict.py
# ^ class_list and color_list
output = []
for i, (sem_pred, bbx_pred, cls_pred, obj_pred) in enumerate(
zip(
raw_pred["sem_pred"],
raw_pred["bbx_pred"],
raw_pred["cls_pred"],
raw_pred["obj_pred"],
)
):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
sem_pred = sem_pred.to(device)
sem_pred = np.asarray(sem_pred.detach().cpu().numpy(), dtype=np.uint8)
# print(f"sem_pred: {sem_pred.shape}")
# print(f"bbx_pred: {bbx_pred.shape}")
# print(f"cls_pred: {cls_pred.shape}")
# print(f"obj_pred: {obj_pred.shape}")
seg_mask_vis = np.zeros((im_size_[0], im_size_[1], 3))
cls_labels = np.unique(sem_pred)
for cls in cls_labels:
seg_mask_vis[sem_pred == cls] = colors_list[cls]
mask_per_label = np.zeros_like(sem_pred)
mask_per_label[sem_pred == cls] = 1
iou_seg = np.sum(mask_per_label)
if iou_seg < 100:
continue
# cv2.imshow(f"Mask {cls_list[cls]}", mask_per_label.astype(np.uint8)*255)
# cv2.waitKey(0)
print(f"{cls_list[cls]} {sum(map(sum,mask_per_label))}")
# mask_per_label = mask_per_label.astype(np.uint8) * 255
try:
img_mask = img * 0.25 + seg_mask_vis * 0.75
except ValueError as e:
log_debug(f"Error: {e}")
img_mask = seg_mask_vis
img_mask = img_mask.astype(np.uint8) * 255
for cls in cls_labels:
if cls == 0:
continue
best_confidence = 0
bbox_best = None
r_bbox_best = None
print(f"Getting best for cls: {cls} {cls_list[cls]}")
for bbx_pred_i, cls_pred_i, obj_pred_i in zip(bbx_pred, cls_pred, obj_pred):
threshold = 0.06
cnt = np.array(
[
(int(bbx_pred_i[0]) + int(bbx_pred_i[2])) / 2,
(int(bbx_pred_i[1]) + int(bbx_pred_i[3])) / 2,
]
)
if (int(cnt[1]) >= im_size_[0]) or (int(cnt[0]) >= im_size_[1]):
continue
actual_class = sem_pred[int(cnt[1]), int(cnt[0])]
if actual_class != cls:
continue
if obj_pred_i.item() > threshold:
# print(f"obj_pred_i: {obj_pred_i.item()}")
# print(f"cls_pred_i: {cls_pred_i} {cls_list[cls_pred_i.item()]}")
# print(f"bbx_pred_i: {bbx_pred_i}")
pt1 = (int(bbx_pred_i[0]), int(bbx_pred_i[1]))
pt2 = (int(bbx_pred_i[2]), int(bbx_pred_i[3]))
newcls = cls_pred_i.item()
if newcls > 17:
assert False
num_classes_theta = 18
# theta = ((180 / num_classes_theta) * newcls) + 5 # 5 degrees offset?
theta = (180 / num_classes_theta) * newcls
pts = np.array(
[
[pt1[0], pt1[1]],
[pt2[0], pt1[1]],
[pt2[0], pt2[1]],
[pt1[0], pt2[1]],
]
)
cnt = np.array(
[
(int(bbx_pred_i[0]) + int(bbx_pred_i[2])) / 2,
(int(bbx_pred_i[1]) + int(bbx_pred_i[3])) / 2,
]
)
r_bbox_ = Rotate2D(pts, cnt, 90 - theta)
r_bbox_ = r_bbox_.astype("int16")
# print(f"r_bbox_: {r_bbox_}")
# if (int(cnt[1]) >= im_size_[0]) or (int(cnt[0]) >= im_size_[1]):
# continue
# filter out gripper - any result with the center in the bottom 100 pixels
# TODO: find a better solution
if cnt[1] > im_size_[0] - 100:
continue
# if sem_pred[int(cnt[1]), int(cnt[0])] == cls:
# print(f"Seg class: {cls_list[sem_pred[int(cnt[1]), int(cnt[0])]]}")
if obj_pred_i.item() >= best_confidence:
best_confidence = obj_pred_i.item()
bbox_best = bbx_pred_i
r_bbox_best = copy.deepcopy(r_bbox_)
if bbox_best is not None:
res = {
"cls": cls,
"obj": best_confidence,
"bbox": bbox_best,
"r_bbox": r_bbox_best,
}
cnt = np.array(
[
(int(bbox_best[0]) + int(bbox_best[2])) / 2,
(int(bbox_best[1]) + int(bbox_best[3])) / 2,
]
)
print(
f"res {cls_list[cls]} | {cls_list[sem_pred[int(cnt[1]), int(cnt[0])]]}: {res}"
)
output.append(res)
pt1 = (int(bbox_best[0]), int(bbox_best[1]))
pt2 = (int(bbox_best[2]), int(bbox_best[3]))
# cv2.rectangle(img, pt1, pt2, (0, 255, 0), 2)
cv2.putText(
img_mask,
cls_list[cls],
(int(bbox_best[0]), int(bbox_best[1])),
cv2.FONT_HERSHEY_SIMPLEX,
1,
(0, 255, 0),
2,
cv2.LINE_AA,
)
if r_bbox_best is not None:
cv2.line(
img_mask,
tuple(r_bbox_best[0]),
tuple(r_bbox_best[1]),
(255, 0, 0),
2,
)
cv2.line(
img_mask,
tuple(r_bbox_best[1]),
tuple(r_bbox_best[2]),
(0, 0, 255),
2,
)
cv2.line(
img_mask,
tuple(r_bbox_best[2]),
tuple(r_bbox_best[3]),
(255, 0, 0),
2,
)
cv2.line(
img_mask,
tuple(r_bbox_best[3]),
tuple(r_bbox_best[0]),
(0, 0, 255),
2,
)
# print(f"output: {output}")
# img_mask = (img * 0.25 + seg_mask_vis * 0.75)
# img_mask = img_mask.astype(np.uint8)*255
if visualize:
cv2.imshow("Image Mask", img_mask)
cv2.waitKey(0)
return output, img_mask
class GraspServer:
def __init__(self):
config_path = "/app/data/config/test.ini"
print(f"Loading configuration from {config_path}")
config = make_config(config_path)
print("Creating model...")
self.model = make_model(config)
weights_path = "/app/data/weights/model_last.pth.tar"
log_debug("Loading snapshot from %s", weights_path)
resume_from_snapshot(
self.model, weights_path, ["body", "rpn_head", "roi_head", "sem_head"]
)
self.visualize = False
def detect(self, img):
res, img = test(self.model, img, visualize=self.visualize)
# Convert to JSON serializable format
res_dict = []
for r in res:
res_dict.append(
{
"cls": int(r["cls"]),
"cls_name": cls_list[int(r["cls"])],
"obj": r["obj"],
"bbox": r["bbox"].tolist(),
"r_bbox": r["r_bbox"].tolist(),
}
)
return res_dict, Image.fromarray(img)
def detect_from_path(self, img_path):
img_bgr = cv2.imread(img_path)
img_rgb = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2RGB)
return self.detect(img_rgb)
def test_detect(self):
return self.detect_from_path(
"/app/data/OCID_grasp/ARID20/table/top/seq08/rgb/result_2018-08-21-14-44-31.png"
)
if __name__ == "__main__":
print("Testing Grasp_Det_Seg")
config_path = "/app/data/config/test.ini"
print(f"Loading configuration from {config_path}")
config = make_config(config_path)
print("Creating model...")
model = make_model(config)
weights_path = "/app/data/weights/model_last.pth.tar"
log_debug("Loading snapshot from %s", weights_path)
snapshot = resume_from_snapshot(
model, weights_path, ["body", "rpn_head", "roi_head", "sem_head"]
)
# rank, world_size = distributed.get_rank(), distributed.get_world_size()
# model = DistributedDataParallel(model, device_ids=None, output_device=None, find_unused_parameters=True)
print("Loading image...")
# img_path = "/app/data/OCID_grasp/ARID20/table/top/seq12/rgb/result_2018-08-21-16-53-16.png"
# img_path = "/app/data/OCID_grasp/ARID20/table/top/seq04/rgb/result_2018-08-21-12-13-01.png"
# img_path="/app/data/OCID_grasp/ARID20/table/top/seq08/rgb/result_2018-08-21-14-44-31.png"
img_path = "/app/data/test.png"
img_bgr = cv2.imread(img_path)
img_rgb = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2RGB)
# cv2.imshow("Image", img_rgb)
# cv2.waitKey(0)
print("Testing model...")
test(model, img_rgb)
| 17,224 | Python | 34.010163 | 111 | 0.521598 |
AshisGhosh/roboai/grasping/robotic_grasping_server/pyproject.toml | [tool.poetry]
name = "robotic-grasping-server"
version = "0.1.0"
description = ""
authors = ["AshisGhosh <[email protected]>"]
readme = "README.md"
[tool.poetry.dependencies]
python = "^3.10"
fastapi = "^0.110.1"
uvicorn = "^0.29.0"
python-multipart = "^0.0.9"
[build-system]
requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api"
| 350 | TOML | 18.499999 | 46 | 0.677143 |
AshisGhosh/roboai/grasping/robotic_grasping_server/app/main.py | from PIL import Image
from fastapi import FastAPI, File, UploadFile
from fastapi.middleware.cors import CORSMiddleware
from robotic_grasping_server.robotic_grasping import GraspGenerator
import logging
log = logging.getLogger("robotic_grasping_server app")
log.setLevel(logging.INFO)
app = FastAPI()
grasp = GraspGenerator(visualize=True)
# List of allowed origins (you can use '*' to allow all origins)
origins = [
"http://localhost:3000", # Allow your Next.js app
# Add any other origins as needed
]
# Add CORSMiddleware to the application
app.add_middleware(
CORSMiddleware,
allow_origins=origins, # List of allowed origins
allow_credentials=True,
allow_methods=["*"], # Allow all methods
allow_headers=["*"], # Allow all headers
)
@app.get("/")
async def read_root():
return {"message": "Hello, FastAPI! This is the robotic grasping server."}
@app.on_event("startup")
async def startup_event():
log.info("Starting up the grasp server...")
grasp.load_model()
@app.post("/get_grasps")
async def get_grasps(
rgb_image: UploadFile = File(...), depth_image: UploadFile = File(...)
):
log.debug("Received get_grasp request.")
rgb_image = Image.open(rgb_image.file)
depth_image = Image.open(depth_image.file)
return grasp.run(rgb_image, depth_image)
| 1,325 | Python | 24.5 | 78 | 0.704906 |
AshisGhosh/roboai/grasping/robotic_grasping_server/robotic_grasping_server/robotic_grasping.py | import matplotlib.pyplot as plt
import numpy as np
import torch
from hardware.device import get_device
from inference.post_process import post_process_output
from utils.data.camera_data import CameraData
from utils.dataset_processing.grasp import detect_grasps
from utils.visualisation.plot import plot_grasp
from PIL import Image
class GraspGenerator:
def __init__(
self,
saved_model_path="/robotic-grasping/trained-models/cornell-randsplit-rgbd-grconvnet3-drop1-ch32/epoch_19_iou_0.98",
visualize=False,
force_cpu=False,
):
self.saved_model_path = saved_model_path
self.saved_model_path = saved_model_path
self.model = None
self.device = get_device(force_cpu=force_cpu)
self.cam_data = CameraData(
include_depth=True, include_rgb=True, output_size=360
)
if visualize:
self.fig = plt.figure(figsize=(10, 10))
else:
self.fig = None
def load_model(self):
# monkey patching
np.float = float
print("Loading model... ")
self.model = torch.load(self.saved_model_path, map_location=self.device)
self.model.to(self.device) # Ensure model parameters are on the correct device
def generate(self, rgb, depth):
x, depth_img, rgb_img = self.cam_data.get_data(rgb=rgb, depth=depth)
# Predict the grasp pose using the saved model
with torch.no_grad():
xc = x.to(self.device)
pred = self.model.predict(xc)
q_img, ang_img, width_img = post_process_output(
pred["pos"], pred["cos"], pred["sin"], pred["width"]
)
grasps = detect_grasps(q_img, ang_img, width_img)
for grasp in grasps:
print(grasp.as_gr)
if self.fig:
plot_grasp(
fig=self.fig,
rgb_img=self.cam_data.get_rgb(rgb, False),
grasps=grasps,
save=True,
)
return grasps
def run_test(self):
rgb = Image.open("shared/data/test_pair1_rgb.png")
rgb = np.array(rgb)
print(rgb.shape)
depth = Image.open("shared/data/test_pair1_depth.png")
depth = np.array(depth)
depth = np.expand_dims(depth, axis=2)
print(depth.shape)
self.generate(rgb, depth)
def run(self, rgb, depth):
rgb = np.array(rgb)
depth = np.array(depth)
depth = np.expand_dims(depth, axis=2)
grasps = self.generate(rgb, depth)
grasp_dict = []
print(grasps[0].as_gr)
for grasp in grasps:
r_bbox = [[pt[0], pt[1]] for pt in grasp.as_gr.points]
grasp_dict.append({"r_bbox": r_bbox})
return grasp_dict
if __name__ == "__main__":
np.float = float
generator = GraspGenerator(
saved_model_path="/robotic-grasping/trained-models/cornell-randsplit-rgbd-grconvnet3-drop1-ch32/epoch_19_iou_0.98",
visualize=True,
force_cpu=False,
)
generator.load_model()
generator.run_test()
| 3,082 | Python | 28.932039 | 123 | 0.594744 |
AshisGhosh/roboai/isaac_sim/pyproject.toml | [tool.poetry]
name = "isaac-sim"
version = "0.1.0"
description = ""
authors = ["Ashis Ghosh <[email protected]>"]
readme = "README.md"
[tool.poetry.dependencies]
python = "^3.10"
fastapi = "^0.110.2"
uvicorn = "^0.29.0"
python-multipart = "^0.0.9"
[build-system]
requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api"
| 337 | TOML | 17.777777 | 47 | 0.664688 |
AshisGhosh/roboai/isaac_sim/README.md | # README
## Launch Docker
`docker compose up isaac-sim`
Enter docker:
`docker exec -it roboai-isaac-sim-1 bash`
## Run pthon standalone (will launch sim as well)
`./python.sh roboai/robosim.py`
## Run jupyter
** requires local Nucleus server **
https://docs.omniverse.nvidia.com/nucleus/latest/workstation.html
`./jupyter_notebook.sh --allow-root roboai/test_nb.`
## Isaac Slow Loading Issue (v2023.1.0)
https://github.com/NVIDIA-Omniverse/OmniIsaacGymEnvs/issues/92#issuecomment-1797057491
```
def check_server(server: str, path: str) -> bool:
"""Check a specific server for a path
Args:
server (str): Name of Nucleus server
path (str): Path to search
Returns:
bool: True if folder is found
"""
carb.log_info("Checking path: {}{}".format(server, path))
# Increase hang detection timeout
if "localhost" not in server:
omni.client.set_hang_detection_time_ms(10000)
result, _ = omni.client.stat("{}{}".format(server, path))
if result == Result.OK:
carb.log_info("Success: {}{}".format(server, path))
return True
carb.log_info("Failure: {}{} not accessible".format(server, path))
return False
```
to:
```
def check_server(server: str, path: str, timeout: float = 10.0) -> bool:
"""Check a specific server for a path
Args:
server (str): Name of Nucleus server
path (str): Path to search
timeout (float): Default value: 10 seconds
Returns:
bool: True if folder is found
"""
carb.log_info("Checking path: {}{}".format(server, path))
# Increase hang detection timeout
if "localhost" not in server:
omni.client.set_hang_detection_time_ms(20000)
result, _ = omni.client.stat("{}{}".format(server, path))
if result == Result.OK:
carb.log_info("Success: {}{}".format(server, path))
return True
carb.log_info("Failure: {}{} not accessible".format(server, path))
return False
```
| 2,007 | Markdown | 27.685714 | 86 | 0.633284 |
AshisGhosh/roboai/isaac_sim/app/main.py | import nest_asyncio
import logging
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from roboai.robosim import SimManager
from roboai.standalone_stream_server import StreamServer
nest_asyncio.apply()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
# Create FastAPI instance
app = FastAPI()
robosim = SimManager()
ss = StreamServer()
# List of allowed origins (you can use '*' to allow all origins)
origins = [
"http://localhost:3000", # Allow your Next.js app
# Add any other origins as needed
]
# Add CORSMiddleware to the application
app.add_middleware(
CORSMiddleware,
allow_origins=origins, # List of allowed origins
allow_credentials=True,
allow_methods=["*"], # Allow all methods
allow_headers=["*"], # Allow all headers
)
# Example route
@app.get("/")
async def read_root():
return {"message": "Hello, FastAPI! This is the robosim server."}
@app.on_event("startup")
def startup_event():
pass
@app.post("/test")
async def test():
ss.start()
return True
@app.post("/start_sim")
async def start_sim():
# threading.Thread(target=robosim.start_sim).start()
robosim.start_sim(headless=True)
return True
@app.post("/run_sim")
async def run_sim():
return robosim.run_sim()
@app.post("/close_sim")
async def close_sim():
return await robosim.close_sim()
| 1,389 | Python | 19.144927 | 69 | 0.698344 |
AshisGhosh/roboai/isaac_sim/omniverse_patch/nucleus.py | # Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import asyncio
import json
import os
# python
import typing
from collections import namedtuple
from urllib.parse import urlparse
import carb
# omniverse
import omni.client
from omni.client._omniclient import CopyBehavior, Result
from omni.isaac.version import get_version
class Version(namedtuple("Version", "major minor patch")):
def __new__(cls, s):
return super().__new__(cls, *map(int, s.split(".")))
def __repr__(self):
return ".".join(map(str, self))
def get_url_root(url: str) -> str:
"""Get root from URL or path
Args:
url (str): full http or omniverse path
Returns:
str: Root path or URL or Nucleus server
"""
supported_list = ["omniverse", "http", "https"]
protocol = urlparse(url).scheme
if protocol not in supported_list:
carb.log_warn("Unable to find root for {}".format(url))
return ""
server = f"{protocol}://{urlparse(url).netloc}"
return server
def create_folder(server: str, path: str) -> bool:
"""Create a folder on server
Args:
server (str): Name of Nucleus server
path (str): Path to folder
Returns:
bool: True if folder is created successfully
"""
carb.log_info("Create {} folder on {} Server".format(path, server))
# Increase hang detection timeout
omni.client.set_hang_detection_time_ms(10000)
result = omni.client.create_folder("{}{}".format(server, path))
if result == Result.OK:
carb.log_info("Success: {} Server has {} folder created".format(server, path))
return True
else:
carb.log_warn(
"Failure: Server {} not able to create {} folder".format(server, path)
)
return False
def delete_folder(server: str, path: str) -> bool:
"""Remove folder and all of its contents
Args:
server (str): Name of Nucleus server
path (str): Path to folder
Returns:
bool: True if folder is deleted successfully
"""
carb.log_info("Cleanup {} folder on {} Server".format(path, server))
# Increase hang detection timeout
omni.client.set_hang_detection_time_ms(10000)
result = omni.client.delete("{}{}".format(server, path))
if result == Result.OK:
carb.log_info("Success: {} Server has {} folder deleted".format(server, path))
return True
else:
carb.log_warn(
"Failure: Server {} not able to delete {} folder".format(server, path)
)
return False
async def _list_files(url: str) -> typing.Tuple[str, typing.List]:
"""List files under a URL
Args:
url (str): URL of Nucleus server with path to folder
Returns:
root (str): Root of URL of Nucleus server
paths (typing.List): List of path to each file
"""
root, paths = await _collect_files(url)
return root, paths
async def download_assets_async(
src: str,
dst: str,
progress_callback,
concurrency: int = 10,
copy_behaviour: omni.client._omniclient.CopyBehavior = CopyBehavior.OVERWRITE,
copy_after_delete: bool = True,
timeout: float = 300.0,
) -> omni.client._omniclient.Result:
"""Download assets from S3 bucket
Args:
src (str): URL of S3 bucket as source
dst (str): URL of Nucleus server to copy assets to
progress_callback: Callback function to keep track of progress of copy
concurrency (int): Number of concurrent copy operations. Default value: 3
copy_behaviour (omni.client._omniclient.CopyBehavior): Behavior if the destination exists. Default value: OVERWRITE
copy_after_delete (bool): True if destination needs to be deleted before a copy. Default value: True
timeout (float): Default value: 300 seconds
Returns:
Result (omni.client._omniclient.Result): Result of copy
"""
# omni.client is a singleton, import locally to allow to run with multiprocessing
import omni.client
count = 0
result = Result.ERROR
if copy_after_delete and check_server(dst, ""):
carb.log_info("Deleting existing folder {}".format(dst))
delete_folder(dst, "")
sem = asyncio.Semaphore(concurrency)
carb.log_info("Listing {} ...".format(src))
root_source, paths = await _list_files("{}".format(src))
total = len(paths)
carb.log_info("Found {} files from {}".format(total, root_source))
for entry in reversed(paths):
count += 1
path = os.path.relpath(entry, root_source).replace("\\", "/")
carb.log_info(
"Downloading asset {} of {} from {}/{} to {}/{}".format(
count, total, root_source, path, dst, path
)
)
try:
async with sem:
result = await asyncio.wait_for(
omni.client.copy_async(
"{}/{}".format(root_source, path),
"{}/{}".format(dst, path),
copy_behaviour,
),
timeout=timeout,
)
if result != Result.OK:
carb.log_warn(f"Failed to copy {path} to {dst}.")
return Result.ERROR_ACCESS_LOST
except asyncio.CancelledError:
carb.log_warn("Assets download cancelled.")
return Result.ERROR
except Exception as ex:
carb.log_warn(f"Exception: {type(ex).__name__}")
return Result.ERROR
progress_callback(count, total)
return result
def check_server(server: str, path: str, timeout: float = 10.0) -> bool:
"""Check a specific server for a path
Args:
server (str): Name of Nucleus server
path (str): Path to search
timeout (float): Default value: 10 seconds
Returns:
bool: True if folder is found
"""
carb.log_info("Checking path: {}{}".format(server, path))
# Increase hang detection timeout
if "localhost" not in server:
omni.client.set_hang_detection_time_ms(20000)
result, _ = omni.client.stat("{}{}".format(server, path))
if result == Result.OK:
carb.log_info("Success: {}{}".format(server, path))
return True
carb.log_info("Failure: {}{} not accessible".format(server, path))
return False
async def check_server_async(server: str, path: str, timeout: float = 10.0) -> bool:
"""Check a specific server for a path (asynchronous version).
Args:
server (str): Name of Nucleus server
path (str): Path to search
timeout (float): Default value: 10 seconds
Returns:
bool: True if folder is found
"""
carb.log_info("Checking path: {}{}".format(server, path))
try:
result, _ = await asyncio.wait_for(
omni.client.stat_async("{}{}".format(server, path)), timeout
)
if result == Result.OK:
carb.log_info("Success: {}{}".format(server, path))
return True
else:
carb.log_info("Failure: {}{} not accessible".format(server, path))
return False
except asyncio.TimeoutError:
carb.log_warn(f"check_server_async() timeout {timeout}")
return False
except Exception as ex:
carb.log_warn(f"Exception: {type(ex).__name__}")
return False
def build_server_list() -> typing.List:
"""Return list with all known servers to check
Returns:
all_servers (typing.List): List of servers found
"""
mounted_drives = carb.settings.get_settings().get_settings_dictionary(
"/persistent/app/omniverse/mountedDrives"
)
all_servers = []
if mounted_drives is not None:
mounted_dict = json.loads(mounted_drives.get_dict())
for drive in mounted_dict.items():
all_servers.append(drive[1])
else:
carb.log_info("/persistent/app/omniverse/mountedDrives setting not found")
return all_servers
def find_nucleus_server(suffix: str) -> typing.Tuple[bool, str]:
"""Attempts to determine best Nucleus server to use based on existing mountedDrives setting and the
default server specified in json config at "/persistent/isaac/asset_root/". Call is blocking
Args:
suffix (str): Path to folder to search for. Default value: /Isaac
Returns:
bool: True if Nucleus server with suffix is found
url (str): URL of found Nucleus
"""
carb.log_warn("find_nucleus_server() is deprecated. Use get_assets_root_path().")
return False, ""
def get_server_path(suffix: str = "") -> typing.Union[str, None]:
"""Tries to find a Nucleus server with specific path
Args:
suffix (str): Path to folder to search for.
Returns:
url (str): URL of Nucleus server with path to folder.
Returns None if Nucleus server not found.
"""
carb.log_info("Check /persistent/isaac/asset_root/default setting")
default_asset_root = carb.settings.get_settings().get(
"/persistent/isaac/asset_root/default"
)
server_root = get_url_root(default_asset_root)
if server_root:
result = check_server(server_root, suffix)
if result:
return server_root
carb.log_warn("Could not find Nucleus server with {} folder".format(suffix))
return None
async def get_server_path_async(suffix: str = "") -> typing.Union[str, None]:
"""Tries to find a Nucleus server with specific path (asynchronous version).
Args:
suffix (str): Path to folder to search for.
Returns:
url (str): URL of Nucleus server with path to folder.
Returns None if Nucleus server not found.
"""
carb.log_info("Check /persistent/isaac/asset_root/default setting")
default_asset_root = carb.settings.get_settings().get(
"/persistent/isaac/asset_root/default"
)
server_root = get_url_root(default_asset_root)
if server_root:
result = await check_server_async(server_root, suffix)
if result:
return server_root
carb.log_warn("Could not find Nucleus server with {} folder".format(suffix))
return None
def verify_asset_root_path(path: str) -> typing.Tuple[omni.client.Result, str]:
"""Attempts to determine Isaac assets version and check if there are updates.
(asynchronous version)
Args:
path (str): URL or path of asset root to verify
Returns:
omni.client.Result: OK if Assets verified
ver (str): Version of Isaac Sim assets
"""
# omni.client is a singleton, import locally to allow to run with multiprocessing
import omni.client
ver_asset = Version("0.0.0")
version_core, _, _, _, _, _, _, _ = get_version()
ver_app = Version(version_core)
# Get asset version
carb.log_info(f"Verifying {path}")
try:
# Increase hang detection timeout
omni.client.set_hang_detection_time_ms(10000)
omni.client.push_base_url(f"{path}/")
file_path = omni.client.combine_with_base_url("version.txt")
# carb.log_warn(f"Looking for version file at: {file_path}")
result, _, file_content = omni.client.read_file(file_path)
if result != omni.client.Result.OK:
carb.log_info(f"Unable to find version file: {file_path}.")
else:
ver_asset = Version(memoryview(file_content).tobytes().decode())
except ValueError:
carb.log_info(f"Unable to parse version file: {file_path}.")
except UnicodeDecodeError:
carb.log_info(f"Unable to read version file: {file_path}.")
except Exception as ex:
carb.log_warn(f"Exception: {type(ex).__name__}")
# Compare versions
# carb.log_warn(f"ver_asset = {ver_asset.major}.{ver_asset.minor}.{ver_asset.patch}")
# carb.log_warn(f"ver_app = {ver_app.major}.{ver_app.minor}.{ver_app.patch}")
if ver_asset == Version("0.0.0"):
carb.log_info(f"Error verifying Isaac Sim assets at {path}")
return Result.ERROR_NOT_FOUND, ""
elif ver_asset.major != ver_app.major:
carb.log_info(
f"Unsupported version of Isaac Sim assets found at {path}: {ver_asset}"
)
return Result.ERROR_BAD_VERSION, ver_asset
elif ver_asset.minor != ver_app.minor:
carb.log_info(
f"Unsupported version of Isaac Sim assets found at {path}: {ver_asset}"
)
return Result.ERROR_BAD_VERSION, ver_asset
else:
return Result.OK, ver_asset
def get_full_asset_path(path: str) -> typing.Union[str, None]:
"""Tries to find the full asset path on connected servers
Args:
path (str): Path of asset from root to verify
Returns:
url (str): URL or full path to assets.
Returns None if assets not found.
"""
# 1 - Check /persistent/isaac/asset_root/default setting
default_asset_root = carb.settings.get_settings().get(
"/persistent/isaac/asset_root/default"
)
if default_asset_root:
result = check_server(default_asset_root, path)
if result:
carb.log_info("Asset path found at {}{}".format(default_asset_root, path))
return default_asset_root + path
# 2 - Check mountedDrives setting
connected_servers = build_server_list()
if len(connected_servers):
for server_name in connected_servers:
result = check_server(server_name, path)
if result:
carb.log_info("Asset path found at {}{}".format(server_name, path))
return server_name + path
carb.log_warn("Could not find assets path: {}".format(path))
return None
async def get_full_asset_path_async(path: str) -> typing.Union[str, None]:
"""Tries to find the full asset path on connected servers (asynchronous version).
Args:
path (str): Path of asset from root to verify
Returns:
url (str): URL or full path to assets.
Returns None if assets not found.
"""
# 1 - Check /persistent/isaac/asset_root/default setting
default_asset_root = carb.settings.get_settings().get(
"/persistent/isaac/asset_root/default"
)
if default_asset_root:
result = await check_server_async(default_asset_root, path)
if result:
carb.log_info("Asset path found at {}{}".format(default_asset_root, path))
return default_asset_root + path
# 2 - Check mountedDrives setting
connected_servers = build_server_list()
if len(connected_servers):
for server_name in connected_servers:
result = await check_server_async(server_name, path)
if result:
carb.log_info("Asset path found at {}{}".format(server_name, path))
return server_name + path
carb.log_warn("Could not find assets path: {}".format(path))
return None
def get_nvidia_asset_root_path() -> typing.Union[str, None]:
"""Tries to find the root path to the NVIDIA assets
Returns:
url (str): URL or root path to NVIDIA assets folder.
Returns None if NVIDIA assets not found.
"""
# 1 - Check /persistent/isaac/asset_root/nvidia setting
carb.log_info("Check /persistent/isaac/asset_root/nvidia setting")
nvidia_asset_root = carb.settings.get_settings().get(
"/persistent/isaac/asset_root/nvidia"
)
if nvidia_asset_root:
result = check_server(nvidia_asset_root, "")
if result:
carb.log_info("NVIDIA assets found at {}".format(nvidia_asset_root))
return nvidia_asset_root
# 2 - Check root on /persistent/isaac/asset_root/nvidia and mountedDrives setting for /NVIDIA folder
nvidia_asset_path = "/NVIDIA"
server_root = get_url_root(nvidia_asset_path)
if server_root:
result = check_server(server_root, nvidia_asset_path)
if result:
carb.log_info("NVIDIA assets found at {}".format(nvidia_asset_root))
return server_root + nvidia_asset_path
connected_servers = build_server_list()
if len(connected_servers):
for server_name in connected_servers:
result = check_server(server_name, nvidia_asset_path)
if result:
carb.log_info("NVIDIA assets found at {}".format(server_name))
return server_name + nvidia_asset_path
# 3 - Check cloud for http://omniverse-content-production.s3-us-west-2.amazonaws.com folder
nvidia_assets_url = "http://omniverse-content-production.s3-us-west-2.amazonaws.com"
carb.log_info("Check {}".format(nvidia_assets_url))
if nvidia_assets_url:
result = check_server(nvidia_assets_url, "/Assets")
if result:
carb.log_info("NVIDIA assets found at {}".format(nvidia_assets_url))
return nvidia_assets_url
carb.log_warn("Could not find NVIDIA assets folder")
return None
def get_isaac_asset_root_path() -> typing.Union[str, None]:
"""Tries to find the root path to the Isaac Sim assets
Returns:
url (str): URL or root path to Isaac Sim assets folder.
Returns None if Isaac Sim assets not found.
"""
_, _, version_major, version_minor, _, _, _, _ = get_version()
# 1 - Check /persistent/isaac/asset_root/isaac setting
carb.log_info("Check /persistent/isaac/asset_root/isaac setting")
isaac_asset_root = carb.settings.get_settings().get(
"/persistent/isaac/asset_root/isaac"
)
if isaac_asset_root:
result = check_server(isaac_asset_root, "")
if result:
result, ver_asset = verify_asset_root_path(isaac_asset_root)
if result is Result.OK:
carb.log_info(
"Isaac Sim assets version {} found at {}".format(
ver_asset, isaac_asset_root
)
)
return isaac_asset_root
# 2 - Check root on /persistent/isaac/asset_root/default and mountedDrives setting for /Isaac folder
carb.log_info("Check /persistent/isaac/asset_root/default setting")
default_asset_root = carb.settings.get_settings().get(
"/persistent/isaac/asset_root/default"
)
isaac_path = "/Isaac"
server_root = get_url_root(isaac_asset_root)
if default_asset_root:
result = check_server(default_asset_root, isaac_path)
if result:
result, ver_asset = verify_asset_root_path(default_asset_root + isaac_path)
if result is Result.OK:
carb.log_info(
"Isaac Sim assets version {} found at {}".format(
ver_asset, default_asset_root + isaac_path
)
)
return default_asset_root + isaac_path
connected_servers = build_server_list()
if len(connected_servers):
for server_name in connected_servers:
result = check_server(server_name, isaac_path)
if result:
result, ver_asset = verify_asset_root_path(server_name + isaac_path)
if result is Result.OK:
carb.log_info(
"Isaac Sim assets version {} found at {}".format(
ver_asset, server_name + isaac_path
)
)
return server_name + isaac_path
# 3 - Check root on /persistent/isaac/asset_root/default and mountedDrives setting for /NVIDIA/Assets/Isaac/{version_major}.{version_minor} folder
isaac_path = f"/NVIDIA/Assets/Isaac/{version_major}.{version_minor}"
server_root = get_url_root(isaac_asset_root)
if server_root:
result = check_server(server_root, isaac_path)
if result:
result, ver_asset = verify_asset_root_path(server_root + isaac_path)
if result is Result.OK:
carb.log_info(
"Isaac Sim assets version {} found at {}".format(
ver_asset, server_root + isaac_path
)
)
return server_root + isaac_path
connected_servers = build_server_list()
if len(connected_servers):
for server_name in connected_servers:
result = check_server(server_name, isaac_path)
if result:
result, ver_asset = verify_asset_root_path(server_name + isaac_path)
if result is Result.OK:
carb.log_info(
"Isaac Sim assets version {} found at {}".format(
ver_asset, server_name + isaac_path
)
)
return server_name + isaac_path
# 4 - Check cloud for /Assets/Isaac/{version_major}.{version_minor} folder
cloud_assetsURL = carb.settings.get_settings().get_as_string(
"/persistent/isaac/asset_root/cloud"
)
carb.log_info("Check {}".format(cloud_assetsURL))
if cloud_assetsURL:
result = check_server(cloud_assetsURL, "")
if result:
result, ver_asset = verify_asset_root_path(cloud_assetsURL)
if result is Result.OK:
carb.log_info(
"Isaac Sim assets version {} found at {}".format(
ver_asset, cloud_assetsURL
)
)
return cloud_assetsURL
carb.log_warn("Could not find Isaac Sim assets folder")
return None
def get_assets_root_path() -> typing.Union[str, None]:
"""Tries to find the root path to the Isaac Sim assets on a Nucleus server
Returns:
url (str): URL of Nucleus server with root path to assets folder.
Returns None if Nucleus server not found.
"""
# get timeout
timeout = carb.settings.get_settings().get("/persistent/isaac/asset_root/timeout")
if not isinstance(timeout, (int, float)):
timeout = 10.0
# 1 - Check /persistent/isaac/asset_root/default setting
carb.log_info("Check /persistent/isaac/asset_root/default setting")
default_asset_root = carb.settings.get_settings().get(
"/persistent/isaac/asset_root/default"
)
if default_asset_root:
result = check_server(default_asset_root, "/Isaac", timeout)
if result:
result = check_server(default_asset_root, "/NVIDIA", timeout)
if result:
carb.log_info("Assets root found at {}".format(default_asset_root))
return default_asset_root
# 2 - Check root on mountedDrives setting
connected_servers = build_server_list()
if len(connected_servers):
for server_name in connected_servers:
# carb.log_info("Found {}".format(server_name))
result = check_server(server_name, "/Isaac", timeout)
if result:
result = check_server(server_name, "/NVIDIA", timeout)
if result:
carb.log_info("Assets root found at {}".format(server_name))
return server_name
# 3 - Check cloud for /Assets/Isaac/{version_major}.{version_minor} folder
cloud_assets_url = carb.settings.get_settings().get(
"/persistent/isaac/asset_root/cloud"
)
carb.log_info("Checking {}...".format(cloud_assets_url))
if cloud_assets_url:
result = check_server(cloud_assets_url, "/Isaac", timeout)
if result:
result = check_server(cloud_assets_url, "/NVIDIA", timeout)
if result:
carb.log_info("Assets root found at {}".format(cloud_assets_url))
return cloud_assets_url
carb.log_warn("Could not find assets root folder")
return None
async def get_assets_root_path_async() -> typing.Union[str, None]:
"""Tries to find the root path to the Isaac Sim assets on a Nucleus server (asynchronous version).
Returns:
url (str): URL of Nucleus server with root path to assets folder.
Returns None if Nucleus server not found.
"""
# get timeout
timeout = carb.settings.get_settings().get("/persistent/isaac/asset_root/timeout")
if not isinstance(timeout, (int, float)):
timeout = 10.0
# 1 - Check /persistent/isaac/asset_root/default setting
carb.log_info("Check /persistent/isaac/asset_root/default setting")
default_asset_root = carb.settings.get_settings().get(
"/persistent/isaac/asset_root/default"
)
if default_asset_root:
result = await check_server_async(default_asset_root, "/Isaac", timeout)
if result:
result = await check_server_async(default_asset_root, "/NVIDIA", timeout)
if result:
carb.log_info("Assets root found at {}".format(default_asset_root))
return default_asset_root
# 2 - Check root on mountedDrives setting
connected_servers = build_server_list()
if len(connected_servers):
for server_name in connected_servers:
# carb.log_info("Found {}".format(server_name))
result = await check_server_async(server_name, "/Isaac", timeout)
if result:
result = await check_server_async(server_name, "/NVIDIA", timeout)
if result:
carb.log_info("Assets root found at {}".format(server_name))
return server_name
# 3 - Check cloud for /Assets/Isaac/{version_major}.{version_minor} folder
cloud_assets_url = carb.settings.get_settings().get(
"/persistent/isaac/asset_root/cloud"
)
carb.log_info("Checking {}...".format(cloud_assets_url))
if cloud_assets_url:
result = await check_server_async(cloud_assets_url, "/Isaac", timeout)
if result:
result = await check_server_async(cloud_assets_url, "/NVIDIA", timeout)
if result:
carb.log_info("Assets root found at {}".format(cloud_assets_url))
return cloud_assets_url
carb.log_warn("Could not find assets root folder")
return None
def get_assets_server() -> typing.Union[str, None]:
"""Tries to find a server with the Isaac Sim assets
Returns:
url (str): URL of Nucleus server with the Isaac Sim assets
Returns None if Nucleus server not found.
"""
carb.log_warn("get_assets_server() is deprecated. Use get_server_path().")
return None
async def _collect_files(url: str) -> typing.Tuple[str, typing.List]:
"""Collect files under a URL.
Args:
url (str): URL of Nucleus server with path to folder
Returns:
root (str): Root of URL of Nucleus server
paths (typing.List): List of path to each file
"""
paths = []
if await is_dir_async(url):
root = url + "/"
paths.extend(await recursive_list_folder(root))
return url, paths
else:
if await is_file_async(url):
root = os.path.dirname(url)
return root, [url]
async def is_dir_async(path: str) -> bool:
"""Check if path is a folder
Args:
path (str): Path to folder
Returns:
bool: True if path is a folder
"""
result, folder = await asyncio.wait_for(omni.client.list_async(path), timeout=10)
if result != omni.client.Result.OK:
raise Exception(f"Failed to determine if {path} is a folder: {result}")
return True if len(folder) > 0 else False
async def is_file_async(path: str) -> bool:
"""Check if path is a file
Args:
path (str): Path to file
Returns:
bool: True if path is a file
"""
result, file = await asyncio.wait_for(omni.client.stat_async(path), timeout=10)
if result != omni.client.Result.OK:
raise Exception(f"Failed to determine if {path} is a file: {result}")
return False if file.flags & omni.client.ItemFlags.CAN_HAVE_CHILDREN > 0 else True
def is_file(path: str) -> bool:
"""Check if path is a file
Args:
path (str): Path to file
Returns:
bool: True if path is a file
"""
# Increase hang detection timeout
omni.client.set_hang_detection_time_ms(10000)
result, file = omni.client.stat(path)
if result != omni.client.Result.OK:
raise Exception(f"Failed to determine if {path} is a file: {result}")
return False if file.flags & omni.client.ItemFlags.CAN_HAVE_CHILDREN > 0 else True
async def recursive_list_folder(path: str) -> typing.List:
"""Recursively list all files
Args:
path (str): Path to folder
Returns:
paths (typing.List): List of path to each file
"""
paths = []
files, dirs = await list_folder(path)
paths.extend(files)
tasks = []
for dir in dirs:
tasks.append(asyncio.create_task(recursive_list_folder(dir)))
results = await asyncio.gather(*tasks)
for result in results:
paths.extend(result)
return paths
async def list_folder(path: str) -> typing.Tuple[typing.List, typing.List]:
"""List files and sub-folders from root path
Args:
path (str): Path to root folder
Raises:
Exception: When unable to find files under the path.
Returns:
files (typing.List): List of path to each file
dirs (typing.List): List of path to each sub-folder
"""
# omni.client is a singleton, import locally to allow to run with multiprocessing
import omni.client
files = []
dirs = []
carb.log_info(f"Collecting files for {path}")
result, entries = await asyncio.wait_for(omni.client.list_async(path), timeout=10)
if result != omni.client.Result.OK:
raise Exception(f"Failed to list entries for {path}: {result}")
for entry in entries:
# Increase hang detection timeout
omni.client.set_hang_detection_time_ms(10000)
full_path = omni.client.combine_urls(path, entry.relative_path)
if entry.flags & omni.client.ItemFlags.CAN_HAVE_CHILDREN > 0:
dirs.append(full_path + "/")
else:
carb.log_info(f"Enqueuing {full_path} for processing")
files.append(full_path)
return files, dirs
| 30,457 | Python | 34.457509 | 150 | 0.61513 |
AshisGhosh/roboai/isaac_sim/humble_ws/src/robot_api/setup.py | from setuptools import find_packages, setup
package_name = "robot_api"
setup(
name=package_name,
version="0.0.0",
packages=find_packages(exclude=["test"]),
data_files=[
("share/ament_index/resource_index/packages", ["resource/" + package_name]),
("share/" + package_name, ["package.xml"]),
("share/" + package_name + "/config", ["config/moveit_franka_python.yaml"]),
],
install_requires=["setuptools"],
zip_safe=True,
maintainer="root",
maintainer_email="[email protected]",
description="TODO: Package description",
license="TODO: License declaration",
tests_require=["pytest"],
entry_points={
"console_scripts": [
"manipulation_example = robot_api.manipulation_example:main",
"manipulation = robot_api.manipulation:main",
"task_manager = robot_api.task_manager:main",
],
},
)
| 908 | Python | 30.344827 | 84 | 0.615639 |
AshisGhosh/roboai/isaac_sim/humble_ws/src/robot_api/test/test_flake8.py | # Copyright 2017 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ament_flake8.main import main_with_errors
import pytest
@pytest.mark.flake8
@pytest.mark.linter
def test_flake8():
rc, errors = main_with_errors(argv=[])
assert rc == 0, "Found %d code style errors / warnings:\n" % len(
errors
) + "\n".join(errors)
| 878 | Python | 32.807691 | 74 | 0.730068 |
AshisGhosh/roboai/isaac_sim/humble_ws/src/robot_api/test/test_pep257.py | # Copyright 2015 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ament_pep257.main import main
import pytest
@pytest.mark.linter
@pytest.mark.pep257
def test_pep257():
rc = main(argv=[".", "test"])
assert rc == 0, "Found code style errors / warnings"
| 803 | Python | 32.499999 | 74 | 0.743462 |
AshisGhosh/roboai/isaac_sim/humble_ws/src/robot_api/test/test_copyright.py | # Copyright 2015 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ament_copyright.main import main
import pytest
# Remove the `skip` decorator once the source file(s) have a copyright header
@pytest.mark.skip(
reason="No copyright header has been placed in the generated source file."
)
@pytest.mark.copyright
@pytest.mark.linter
def test_copyright():
rc = main(argv=[".", "test"])
assert rc == 0, "Found errors"
| 968 | Python | 33.607142 | 78 | 0.746901 |
AshisGhosh/roboai/isaac_sim/humble_ws/src/robot_api/robot_api/task_manager.py | from enum import Enum
from collections import deque
from uuid import uuid4
import threading
import asyncio
from pathlib import Path
from nicegui import Client, app, ui, ui_run
from abc import ABC, abstractmethod
import copy
# generic ros libraries
import rclpy
from rclpy.node import Node
from rclpy.action import ActionClient
from rclpy.executors import MultiThreadedExecutor
from geometry_msgs.msg import PoseStamped
from sensor_msgs.msg import Image as ROSImage
from roboai_interfaces.action import MoveArm, ControlGripper, GetGrasp
# RoboAI Interface imports
from starlette.responses import StreamingResponse
from PIL import Image
import io
import numpy as np
def pose_to_list(pose):
return [
pose.position.x,
pose.position.y,
pose.position.z,
pose.orientation.x,
pose.orientation.y,
pose.orientation.z,
pose.orientation.w,
]
class TaskStatus(Enum):
PENDING = "PENDING"
RUNNING = "RUNNING"
SUCCESS = "SUCCESS"
FAILURE = "FAILURE"
ABORTED = "ABORTED"
PAUSED = "PAUSED"
class Task(ABC):
def __init__(self, name, logger=None) -> None:
self.name = name
self.status = TaskStatus.PENDING
self.uuid = uuid4()
self.logger = logger
self.result = None
self.log(f"TASK ({self.uuid}): {self.name} created; Status: {self.status}")
@abstractmethod
def run(self) -> None:
pass
def abort(self) -> None:
self.status = TaskStatus.ABORTED
def update_status(self, status) -> None:
self.status = status
self.log(f"TASK ({self.uuid}): {self.name} updated to: {status}")
def log(self, message) -> None:
if self.logger:
self.logger.info(message)
else:
print(message)
def __str__(self) -> str:
return f"{self.name}; Status: {self.status}"
def __repr__(self) -> str:
return super().__repr__() + f" {self.name}; Status: {self.status}"
class ActionClientTask(Task):
def __init__(self, name, action_client, action_type, logger=None) -> None:
super().__init__(name, logger)
self._action_client = action_client
self.action_type = action_type
self.goal_handle = None
def run(self) -> None:
self.log(f"Sending goal to action server: {self.action_type}")
self.update_status(TaskStatus.RUNNING)
try:
self.send_goal()
except Exception as e:
self.log(f"Error while sending goal: {e}")
self.update_status(TaskStatus.FAILURE)
@abstractmethod
def create_goal_msg(self) -> None:
pass
def send_goal(self) -> None:
goal_msg = self.create_goal_msg()
self._action_client.wait_for_server(timeout_sec=1)
future = self._action_client.send_goal_async(goal_msg)
future.add_done_callback(self.goal_response_callback)
def goal_response_callback(self, future) -> None:
self.goal_handle = future.result()
if not self.goal_handle.accepted:
self.log("Goal rejected :(")
self.update_status(TaskStatus.FAILURE)
return
self.log("Goal accepted :)")
# Wait for the result
result_future = self.goal_handle.get_result_async()
result_future.add_done_callback(self.get_result_callback)
def get_result_callback(self, future) -> None:
self.result = future.result().result
self.log(f"Result received: {self.result}, {type(self.result.status)}")
if self.result.status == "SUCCEEDED":
self.log(f"Result received: {self.result.message}")
self.update_status(TaskStatus.SUCCESS)
else:
self.log(f"Action did not succeed with status: {self.result.status}")
self.update_status(TaskStatus.FAILURE)
def abort(self) -> None:
if self.goal_handle:
self.goal_handle.cancel_goal()
self._action_client._cancel_goal(self.goal_handle)
super().abort()
class MoveArmTask(ActionClientTask):
def __init__(
self, name, goal: str | list[float], action_client, logger=None
) -> None:
super().__init__(name, action_client, MoveArm, logger=None)
self.goal = goal
def create_goal_msg(self) -> None:
goal_msg = MoveArm.Goal()
if isinstance(self.goal, str):
goal_msg.configuration_goal = self.goal
elif isinstance(self.goal, list):
if len(self.goal) == 6:
goal_msg.joint_goal = self.goal
elif len(self.goal) == 7:
goal_msg.cartesian_goal = self.goal
else:
raise ValueError(
f"Invalid goal length: {self.goal}, length: {len(self.goal)}"
)
elif isinstance(self.goal, PoseStamped):
goal_msg.cartesian_pose_goal = self.goal
else:
raise ValueError(f"Invalid goal: {self.goal}, type: {type(self.goal)}")
return goal_msg
class ControlGripperTask(ActionClientTask):
def __init__(self, name, goal: str, action_client, logger=None) -> None:
super().__init__(name, action_client, ControlGripper, logger=None)
self.goal = goal
def create_goal_msg(self) -> None:
goal_msg = ControlGripper.Goal()
if isinstance(self.goal, str):
goal_msg.goal_state = self.goal
else:
raise ValueError(f"Invalid goal: {self.goal}")
return goal_msg
class GetGraspTask(ActionClientTask):
def __init__(
self, name, goal_object: str, action_client, task_vars, logger=None
) -> None:
super().__init__(name, action_client, GetGrasp, logger=None)
self.goal_object = goal_object
self.task_vars = task_vars
def create_goal_msg(self) -> None:
goal_msg = GetGrasp.Goal()
if isinstance(self.goal_object, str):
goal_msg.object_name = self.goal_object
else:
raise ValueError(f"Invalid goal: {self.goal_object}")
return goal_msg
def get_result_callback(self, future) -> None:
self.result = future.result().result
self.log(f"Result received: {self.result}")
if self.result.success:
self.log("Grasp received")
self.update_status(TaskStatus.SUCCESS)
self.task_vars["grasp_pose"] = self.result.grasp
else:
self.log("Grasp not received")
self.update_status(TaskStatus.FAILURE)
class PlannerTask(Task):
def __init__(self, name, task_manager, task_vars, logger=None) -> None:
super().__init__(name, logger)
self.task_manager = task_manager
self.states = []
self.current_state = None
self.task_vars = task_vars
def run(self) -> None:
self.log(f"Running planner: {self.name}")
self.update_status(TaskStatus.RUNNING)
try:
self.plan()
except Exception as e:
self.log(f"Error while planning: {e}")
self.update_status(TaskStatus.FAILURE)
def get_next_state(self) -> str:
if self.current_state is None:
self.current_state = self.states.pop(0)
else:
self.current_state = self.states[self.states.index(self.current_state) + 1]
return self.current_state
@abstractmethod
def plan(self) -> None:
pass
class PickTask(PlannerTask):
def __init__(
self,
name,
task_manager,
task_vars,
object_name,
current_state=None,
logger=None,
) -> None:
super().__init__(name, task_manager, task_vars, logger)
self.object_name = object_name
self.states = [
"get_grasp",
"execute_grasp",
"move_to_ready",
]
self.current_state = current_state or self.states[0]
def plan(self) -> None:
self.log(f"Running pick task: {self.name}")
self.update_status(TaskStatus.RUNNING)
if self.current_state == "get_grasp":
task = self.task_manager.add_task(
GetGraspTask(
name=f"Get grasp for {self.object_name}",
goal_object=self.object_name,
action_client=self.task_manager.get_grasp_action_client,
task_vars=self.task_vars,
),
after=self,
)
self.add_next_plan(after=task)
self.update_status(TaskStatus.SUCCESS)
return
if self.current_state == "execute_grasp":
grasp = copy.deepcopy(self.task_vars["grasp_pose"])
gripper_grasp = copy.deepcopy(grasp)
gripper_grasp.pose.position.z += 0.09
pre_grasp = copy.deepcopy(gripper_grasp)
pre_grasp.pose.position.z += 0.2
task = self.task_manager.add_task_to_move_to_position(
pre_grasp, name="Move to pregrasp", after=self
)
task = self.task_manager.add_task_to_control_gripper(
"open", name="Open gripper", after=task
)
task = self.task_manager.add_task_to_move_to_position(
gripper_grasp, name="Move to grasp", after=task
)
task = self.task_manager.add_task_to_control_gripper(
"close", name="Close gripper", after=task
)
task = self.task_manager.add_task_to_move_to_position(
pre_grasp, name="Move to pregrasp", after=task
)
self.add_next_plan(after=task)
self.update_status(TaskStatus.SUCCESS)
return
if self.current_state == "move_to_ready":
self.task_manager.add_task(
MoveArmTask(
name="Move to ready",
goal="ready",
action_client=self.task_manager.move_arm_action_client,
),
after=self,
)
self.update_status(TaskStatus.SUCCESS)
return
def add_next_plan(self, after: Task = None) -> None:
next_state = self.get_next_state()
self.task_manager.add_task(
PickTask(
name=f"Pick {self.object_name} - {next_state}",
task_manager=self.task_manager,
task_vars=self.task_vars,
object_name=self.object_name,
current_state=next_state,
),
after=after,
)
class TaskManager(Node):
def __init__(self) -> None:
super().__init__("task_manager")
self.tasks = deque()
self.current_task = None
self.task_history = []
self.task_vars = {}
self.move_arm_action_client = ActionClient(self, MoveArm, "/move_arm")
self.control_gripper_action_client = ActionClient(
self, ControlGripper, "/control_gripper"
)
self.get_grasp_action_client = ActionClient(self, GetGrasp, "/get_grasp")
self.setup_gui()
self.get_logger().info("Task Manager initialized")
self.add_task(
MoveArmTask(
name="Move to extended",
goal="extended",
action_client=self.move_arm_action_client,
)
)
self.add_task(
ControlGripperTask(
name="Open gripper",
goal="open",
action_client=self.control_gripper_action_client,
)
)
self.add_task(
MoveArmTask(
name="Move to ready",
goal="ready",
action_client=self.move_arm_action_client,
)
)
self.add_task(
ControlGripperTask(
name="Close gripper",
goal="close",
action_client=self.control_gripper_action_client,
)
)
def setup_gui(self) -> None:
with Client.auto_index_client:
ui.label("Task Manager").style("font-size: 24px")
self.grid = ui.aggrid(
{
"defaultColDef": {"flex": 1},
"columnDefs": [
{"headerName": "Name", "field": "name", "sortable": True},
{"headerName": "Status", "field": "status", "sortable": True},
],
"rowData": [],
},
).classes("h-96")
self.update_grid()
arm_positions = ["extended", "ready", "pick_center", "drop"]
self.position_input = ui.input(
label="Enter Move Arm position:",
placeholder=f"{', '.join(arm_positions)}",
autocomplete=arm_positions,
)
ui.button(
"Add Move Arm Task (Configuration)",
on_click=self.add_move_arm_task_click,
)
gripper_positions = ["open", "close"]
self.gripper_position_input = ui.input(
label="Enter Control Gripper position:",
placeholder=f"{', '.join(gripper_positions)}",
autocomplete=gripper_positions,
)
ui.button(
"Add Control Gripper Task", on_click=self.add_control_gripper_task_click
)
self.numerical_list_input = ui.input(
label="Enter cartesian position as list of 7 numbers separated by commas:",
placeholder="x, y, z, qx, qy, qz, qw",
)
ui.button(
"Add Move Arm Task (Cartesian)",
on_click=self.add_move_arm_task_cartesian_click,
)
ui.button("Add Pick Tasks", on_click=self.add_pick_tasks_click)
ui.button(
"Run Tasks", on_click=lambda: asyncio.create_task(self.run_tasks())
)
ui.button(
"Abort Current Task",
on_click=lambda: asyncio.create_task(self.abort_current_task()),
)
ui.button("Clear Tasks", on_click=self.clear_tasks)
ui.button("Retry Last Task", on_click=self.retry_last_task)
def update_grid(self) -> None:
task_dict = [
{"name": task.name, "status": task.status} for task in self.task_history
] + [{"name": task.name, "status": task.status} for task in self.tasks]
self.grid.options["rowData"] = task_dict
self.get_logger().debug(f"{task_dict}")
self.grid.update()
def add_move_arm_task_click(self, event):
position = (
self.position_input.value
) # Get the current value from the input field
self.add_task_to_move_to_position(position)
def add_task_to_move_to_position(
self,
position: str | list[float] | PoseStamped,
name: str = None,
after: Task = None,
) -> None:
if name is None:
name = f"Move to {position}"
return self.add_task(
MoveArmTask(
name=name,
goal=position,
action_client=self.move_arm_action_client,
),
after=after,
)
def add_control_gripper_task_click(self, event):
position = self.gripper_position_input.value
self.add_task_to_control_gripper(position)
def add_task_to_control_gripper(
self, position: str, name=None, after: Task = None
) -> None:
if name is None:
name = f"Control gripper to {position}"
return self.add_task(
ControlGripperTask(
name=f"Control gripper to {position}",
goal=position,
action_client=self.control_gripper_action_client,
),
after=after,
)
def add_move_arm_task_cartesian_click(self, event):
position = [float(x) for x in self.numerical_list_input.value.split(",")]
self.add_task_to_move_to_position(
position, name=f"Move to cartesian {position}"
)
def add_pick_tasks_click(self, event):
# position = [0.5, 0.1, 0.3, 0.924, -0.383, 0.0, 0.0]
# self.add_pick_tasks(position)
self.add_task(
PickTask(
name="Pick task",
task_manager=self,
task_vars=self.task_vars,
object_name="cereal",
)
)
def add_pick_tasks(self, grasp_pose: list[float]) -> None:
pre_grasp = grasp_pose.copy()
pre_grasp[2] += 0.1
self.add_task_to_move_to_position(pre_grasp)
self.add_task_to_control_gripper("open")
self.add_task_to_move_to_position(grasp_pose)
self.add_task_to_control_gripper("close")
self.add_task_to_move_to_position(pre_grasp)
self.add_task_to_move_to_position("ready")
def add_task(self, task: Task, after: Task = None) -> None:
task.logger = self.get_logger()
if after:
if after in self.tasks:
self.tasks.insert(self.tasks.index(after) + 1, task)
elif after in self.task_history:
self.tasks.insert(0, task)
else:
self.get_logger().error(f"Task not found: {after}, cannot add task.")
else:
self.tasks.append(task)
self.update_grid()
self.get_logger().info(f"Task added: {task}")
return task
def remove_task(self, task: Task) -> None:
self.tasks.remove(task)
self.update_grid()
self.get_logger().info(f"Task removed: {task}")
def clear_tasks(self) -> None:
self.tasks.clear()
self.task_history.clear()
self.update_grid()
self.get_logger().info("Tasks cleared")
def retry_last_task(self) -> None:
if self.task_history:
self.retry_task(self.task_history[-1])
else:
self.get_logger().info("No tasks in history to retry")
def retry_task(self, task: Task) -> None:
self.task_history.remove(task)
task.status = TaskStatus.PENDING
self.tasks.appendleft(task)
self.update_grid()
self.get_logger().info(f"Task retried: {task}")
async def run_tasks(self) -> None:
while self.tasks:
self.set_current_task(self.tasks.popleft())
self.current_task.run()
self.update_grid()
while self.current_task.status == TaskStatus.RUNNING:
await asyncio.sleep(0.1)
if self.current_task.status in [TaskStatus.ABORTED, TaskStatus.FAILURE]:
self.get_logger().error(f"Task failed: {self.current_task}")
self.update_grid()
return
self.set_current_task(None)
self.get_logger().info("All tasks completed")
def set_current_task(self, task: Task | None) -> None:
self.current_task = task
if task:
self.task_history.append(task)
self.get_logger().info(f"Current task: {task}")
self.update_grid()
async def abort_current_task(self) -> None:
if self.current_task:
self.current_task.abort()
self.get_logger().info(f"Current task aborted: {self.current_task}")
else:
self.get_logger().info("No current task to abort")
def get_tasks(self) -> deque[Task]:
return self.tasks
def destroy_node(self) -> None:
self.move_arm_action_client.destroy()
self.abort_current_task()
super().destroy_node()
global agentview_image
global task_manager
class RoboAIInterface(Node):
def __init__(self, task_manager: TaskManager):
super().__init__("roboai_interface")
self.task_manager = task_manager
self.agentview_image = None
self.create_subscription(
ROSImage,
"/agentview/rgb",
self.image_callback,
10,
)
self.get_logger().info("RoboAI Interface initialized")
def image_callback(self, msg: ROSImage) -> None:
self.agentview_image = msg
global agentview_image
agentview_image = msg
@classmethod
@app.get("/get_image")
async def get_image() -> str:
if agentview_image:
# return base64.b64encode(agentview_image.data).decode("utf-8")
# Image.frombytes("RGB", (agentview_image.width, agentview_image.height), agentview_image.data)
print(f"Image type: {type(agentview_image.data)}")
img_array = np.frombuffer(agentview_image.data, np.uint8).reshape(
agentview_image.height, agentview_image.width, 3
)
img = Image.fromarray(img_array)
buf = io.BytesIO()
img.save(buf, format="PNG")
buf.seek(0)
return StreamingResponse(buf, media_type="image/png")
return None
@classmethod
@app.post("/add_task")
async def add_task(task: str) -> None:
if task == "pick":
task_manager.add_task(
PickTask(
name="Pick task",
task_manager=task_manager,
task_vars=task_manager.task_vars,
object_name="cereal",
)
)
return True
return False
@app.post("/run_tasks")
async def run_tasks() -> None:
asyncio.create_task(task_manager.run_tasks())
def destroy_node(self):
self.task_manager.destroy_node()
super().destroy_node()
def main() -> None:
# NOTE: This function is defined as the ROS entry point in setup.py, but it's empty to enable NiceGUI auto-reloading
# https://github.com/zauberzeug/nicegui/blob/main/examples/ros2/ros2_ws/src/gui/gui/node.py
pass
def ros_main() -> None:
rclpy.init()
global task_manager
task_manager = TaskManager()
executor = MultiThreadedExecutor()
executor.add_node(task_manager)
task_manager.get_logger().info("Task Manager started")
roboai_interface = RoboAIInterface(task_manager)
executor.add_node(roboai_interface)
task_manager.get_logger().info("RoboAI Interface started")
try:
executor.spin()
finally:
task_manager.destroy_node()
roboai_interface.destroy_node()
rclpy.shutdown()
app.on_startup(lambda: threading.Thread(target=ros_main).start())
ui_run.APP_IMPORT_STRING = f"{__name__}:app" # ROS2 uses a non-standard module name, so we need to specify it here
ui.run(uvicorn_reload_dirs=str(Path(__file__).parent.resolve()), favicon="🤖")
| 22,691 | Python | 32.224012 | 120 | 0.561236 |
AshisGhosh/roboai/isaac_sim/humble_ws/src/robot_api/robot_api/manipulation.py | import time
from typing import List
from dataclasses import dataclass
# generic ros libraries
import rclpy
from tf2_ros import Buffer, TransformListener
from tf2_ros import LookupException, ConnectivityException, ExtrapolationException
from tf2_geometry_msgs import do_transform_pose_stamped
# moveit python library
from moveit.core.robot_state import RobotState
from moveit.planning import (
MoveItPy,
PlanRequestParameters,
)
from rclpy.node import Node
from geometry_msgs.msg import PoseStamped, Pose
from moveit_msgs.msg import Constraints
from rclpy.action import ActionServer, CancelResponse, GoalResponse, ActionClient
from control_msgs.action import GripperCommand
from visualization_msgs.msg import Marker
from roboai_interfaces.action import MoveArm, ControlGripper
@dataclass
class ArmState:
JOINT_VALUES = "joint_values"
POSE = "pose"
name: str
type: str
joint_values: List[float | int] | None = None
pose: List[float | int] | None = None
def __post_init__(self):
if self.joint_values is not None and self.pose is not None:
raise ValueError("Only one of joint_values or pose can be set")
if self.joint_values is None and self.pose is None:
raise ValueError("Either joint_values or pose must be set")
@classmethod
def get_pose_array_as_pose(self, pose_array: List[float | int]) -> Pose:
pose = Pose()
pose.position.x = pose_array[0]
pose.position.y = pose_array[1]
pose.position.z = pose_array[2]
pose.orientation.x = pose_array[3]
pose.orientation.y = pose_array[4]
pose.orientation.z = pose_array[5]
pose.orientation.w = pose_array[6]
return pose
@classmethod
def get_pose_array_as_pose_stamped(
self, pose_array: List[float | int]
) -> PoseStamped:
pose_stamped = PoseStamped()
pose_stamped.pose = self.get_pose_array_as_pose(pose_array)
return pose_stamped
# Arm states
# LOOK_DOWN_QUAT = [0.924, -0.383, 0.0, 0.0]
LOOK_DOWN_QUAT = [1.0, 0.0, 0.0, 0.0]
PICK_CENTER = ArmState(
name="pick_center", type=ArmState.POSE, pose=[0.5, 0.0, 0.5, *LOOK_DOWN_QUAT]
)
DROP = ArmState(name="drop", type=ArmState.POSE, pose=[0.5, -0.5, 0.5, *LOOK_DOWN_QUAT])
ARM_STATE_LIST = [PICK_CENTER, DROP]
ARM_STATES = {state.name: state for state in ARM_STATE_LIST}
class ManipulationAPI(Node):
def __init__(
self,
robot_arm_planning_component="panda_arm",
robot_arm_eef_link="panda_hand",
):
super().__init__("manipulation_api")
moveit_config = self._get_moveit_config()
self.tf_buffer = Buffer()
self.tf_listener = TransformListener(self.tf_buffer, self)
self.robot_arm_planning_component_name = robot_arm_planning_component
self.robot_arm_eef_link = robot_arm_eef_link
self.robot = MoveItPy(node_name="moveit_py", config_dict=moveit_config)
self.robot_arm_planning_component = self.robot.get_planning_component(
self.robot_arm_planning_component_name
)
self._action_server = ActionServer(
self,
MoveArm,
"move_arm",
execute_callback=self.execute_callback,
goal_callback=self.goal_callback,
cancel_callback=self.cancel_callback,
)
self._last_gripper_result = None
self._gripper_action_client = ActionClient(
self, GripperCommand, "/panda_hand_controller/gripper_cmd"
)
self._gripper_action_server = ActionServer(
self,
ControlGripper,
"control_gripper",
execute_callback=self.gripper_execute_callback,
goal_callback=self.gripper_goal_callback,
cancel_callback=self.cancel_callback,
)
self.cartesian_goal_marker_publisher = self.create_publisher(
Marker, "cartesian_goal_marker", 10
)
self.get_logger().info("Manipulation API initialized")
def _get_moveit_config(self):
from moveit_configs_utils import MoveItConfigsBuilder
from ament_index_python.packages import get_package_share_directory
moveit_config = (
MoveItConfigsBuilder(
robot_name="panda", package_name="moveit_resources_panda_moveit_config"
)
.robot_description(file_path="config/panda.urdf.xacro")
.trajectory_execution(file_path="config/gripper_moveit_controllers.yaml")
.moveit_cpp(
file_path=get_package_share_directory("robot_api")
+ "/config/moveit_franka_python.yaml"
)
.to_moveit_configs()
)
moveit_config = moveit_config.to_dict()
return moveit_config
def destroy(self):
self._action_server.destroy()
super().destroy_node()
def goal_callback(self, goal_request):
"""Accept or reject a client request to begin an action."""
# This server allows multiple goals in parallel
self.get_logger().info("Received goal request")
return GoalResponse.ACCEPT
def cancel_callback(self, goal_handle):
"""Accept or reject a client request to cancel an action."""
# TODO: Implement cancel
self.get_logger().info("Received cancel request")
self.get_logger().error("Cancel not implemented")
return CancelResponse.REJECT
def execute_callback(self, goal_handle):
goal = goal_handle.request
self.get_logger().info(f"Received goal: {goal}")
result = MoveArm.Result()
status = self.move_arm(
goal.configuration_goal,
goal.cartesian_goal,
goal.joint_goal,
goal.constraints_goal,
goal.cartesian_pose_goal,
goal.start_state,
)
self.get_logger().info(f"Move arm status: {status.status}")
result.status = status.status
if result.status == "SUCCEEDED":
goal_handle.succeed()
else:
goal_handle.abort()
return result
def gripper_goal_callback(self, goal_request):
"""Accept or reject a client request to begin an action."""
self.get_logger().info("Received gripper goal request")
return GoalResponse.ACCEPT
def gripper_execute_callback(self, goal_handle):
goal = goal_handle.request
self.get_logger().info(f"Received gripper goal: {goal}")
result = ControlGripper.Result()
status = self.control_gripper(goal.goal_state)
self.get_logger().info(f"Gripper status: {status}")
result.status = status
if result.status == "SUCCEEDED":
goal_handle.succeed()
else:
goal_handle.abort()
return result
def publish_cartesian_goal_marker(self, pose_stamped: PoseStamped):
marker = Marker()
marker.header = pose_stamped.header
marker.type = Marker.ARROW
marker.action = Marker.ADD
marker.pose = pose_stamped.pose
marker.scale.x = 0.1
marker.scale.y = 0.01
marker.scale.z = 0.01
marker.color.a = 1.0
marker.color.r = 1.0
marker.color.g = 0.0
marker.color.b = 0.0
self.cartesian_goal_marker_publisher.publish(marker)
def plan(
self,
goal_state,
start_state=None,
):
self.get_logger().info("Planning trajectory")
self.get_logger().info(
f"Goal state: {goal_state} type {type(goal_state)}: {isinstance(goal_state, str)}"
)
plan_request_parameters = None
if start_state is None:
self.robot_arm_planning_component.set_start_state_to_current_state()
else:
self.robot_arm_planning_component.set_start_state(robot_state=start_state)
if isinstance(goal_state, str):
self.get_logger().info(f"Setting goal state to {goal_state}")
self.robot_arm_planning_component.set_goal_state(
configuration_name=goal_state
)
elif isinstance(goal_state, RobotState):
self.robot_arm_planning_component.set_goal_state(robot_state=goal_state)
elif isinstance(goal_state, PoseStamped):
self.robot_arm_planning_component.set_goal_state(
pose_stamped_msg=goal_state, pose_link=self.robot_arm_eef_link
)
plan_request_parameters = PlanRequestParameters(
self.robot, "pilz_industrial_motion_planner"
)
plan_request_parameters.planning_time = 1.0
plan_request_parameters.planning_attempts = 1
plan_request_parameters.max_velocity_scaling_factor = 0.1
plan_request_parameters.max_acceleration_scaling_factor = 0.1
plan_request_parameters.planning_pipeline = "pilz_industrial_motion_planner"
plan_request_parameters.planner_id = "PTP"
elif isinstance(goal_state, Constraints):
self.robot_arm_planning_component.set_goal_state(
motion_plan_constraints=[goal_state]
)
elif isinstance(goal_state, list):
self.robot_arm_planning_component.set_goal_state(
motion_plan_constraints=goal_state
)
else:
raise ValueError("Invalid goal state type")
self.get_logger().info(
f"Planning trajectory for goal of type {type(goal_state)}"
)
start_time = time.time()
if plan_request_parameters is not None:
plan_result = self.robot_arm_planning_component.plan(
single_plan_parameters=plan_request_parameters
)
else:
plan_result = self.robot_arm_planning_component.plan()
end_time = time.time()
self.get_logger().info("Planning completed")
self.get_logger().info(f"Planning time: {end_time - start_time}")
return plan_result
def execute(self, trajectory):
self.get_logger().info("Executing trajectory")
return self.robot.execute(trajectory, controllers=[])
# execution_manager = self.robot.get_trajactory_execution_manager()
# current_status = execution_manager.get_execution_status()
# # https://moveit.picknik.ai/main/api/html/structmoveit__controller__manager_1_1ExecutionStatus.html
def move_arm(
self,
configuration_goal=None,
cartesian_goal=None,
joint_goal=None,
constraints_goal=None,
cartesian_pose_goal=None,
start_state=None,
):
self.get_logger().info("Moving arm")
if start_state:
raise NotImplementedError("Custom start state not implemented")
if configuration_goal:
if configuration_goal in ["extended", "ready"]:
goal_state = configuration_goal
elif configuration_goal in ARM_STATES:
goal_state = ARM_STATES[configuration_goal]
if goal_state.type == ArmState.JOINT_VALUES:
return self.move_arm(joint_goal=goal_state.joint_values)
elif goal_state.type == ArmState.POSE:
return self.move_arm(cartesian_goal=goal_state.pose)
else:
raise ValueError("Invalid configuration goal")
elif cartesian_goal:
pose_stamped = PoseStamped()
pose_stamped.header.frame_id = "panda_link0"
pose_stamped.pose = ArmState.get_pose_array_as_pose(cartesian_goal)
goal_state = pose_stamped
self.publish_cartesian_goal_marker(goal_state)
elif joint_goal:
robot_model = self.robot.get_robot_model()
robot_state = RobotState(robot_model)
robot_state.set_joint_group_positions(
self.robot_arm_planning_component_name, joint_goal
)
goal_state = robot_state
elif constraints_goal:
raise NotImplementedError("Constraints goal not implemented")
goal_state = constraints_goal
elif cartesian_pose_goal:
goal_state = cartesian_pose_goal
if goal_state.header.frame_id == "":
goal_state.header.frame_id = "panda_link0"
if goal_state.header.frame_id != "panda_link0":
self.get_logger().warn(
f"Transforming goal state from {goal_state.header.frame_id} to panda_link0"
)
self.get_logger().info(f"Goal state: {type(goal_state)}")
try:
transform = self.tf_buffer.lookup_transform(
"panda_link0",
goal_state.header.frame_id,
goal_state.header.stamp,
timeout=rclpy.duration.Duration(seconds=1),
)
goal_state = do_transform_pose_stamped(goal_state, transform)
except (
LookupException,
ConnectivityException,
ExtrapolationException,
) as e:
self.get_logger().error(f"Failed to transform point: {str(e)}")
self.publish_cartesian_goal_marker(goal_state)
else:
raise ValueError("No goal state provided")
plan_result = self.plan(goal_state)
return self.execute(plan_result.trajectory)
def gripper_send_goal(self, goal: str):
goal_msg = GripperCommand.Goal()
goal_msg.command.position = 0.04 if goal == "open" else 0.0
self._gripper_action_client.wait_for_server(timeout_sec=1)
future = self._gripper_action_client.send_goal_async(goal_msg)
future.add_done_callback(self.goal_response_callback)
def goal_response_callback(self, future) -> None:
self.goal_handle = future.result()
if not self.goal_handle.accepted:
self.get_logger().error("Goal rejected :(")
return
self.get_logger().info("Goal accepted :)")
# Wait for the result
result_future = self.goal_handle.get_result_async()
result_future.add_done_callback(self.gripper_get_result_callback)
def gripper_get_result_callback(self, future) -> None:
result = future.result().result
self.get_logger().info(
f"Result received: {result}, reached goal: {result.reached_goal}, stalled: {result.stalled}"
)
if result.reached_goal or result.stalled:
self.get_logger().info("Gripper command success.")
else:
self.get_logger().error("Gripper command failed.")
self._last_gripper_result = result
def control_gripper(self, goal: str):
self._last_gripper_result = None
if goal not in ["open", "close"]:
raise ValueError("Invalid gripper goal")
self.get_logger().info(f"Control gripper to {goal}")
self.gripper_send_goal(goal)
while self._last_gripper_result is None:
rclpy.spin_once(self, timeout_sec=0.1)
self.get_logger().info(f"Result: {self._last_gripper_result}")
return "SUCCEEDED"
def main():
rclpy.init()
node = ManipulationAPI()
try:
while True:
rclpy.spin_once(node)
except KeyboardInterrupt:
node.get_logger().info("Shutting down")
node.destroy()
rclpy.shutdown()
if __name__ == "__main__":
main()
| 15,500 | Python | 36.172662 | 109 | 0.608968 |
AshisGhosh/roboai/isaac_sim/humble_ws/src/robot_api/robot_api/manipulation_example.py | #!/usr/bin/env python3
"""
A script to outline the fundamentals of the moveit_py motion planning API.
"""
import time
# generic ros libraries
import rclpy
from rclpy.logging import get_logger
# moveit python library
from moveit.core.robot_state import RobotState
from moveit.planning import (
MoveItPy,
MultiPipelinePlanRequestParameters,
)
def plan_and_execute(
robot,
planning_component,
logger,
single_plan_parameters=None,
multi_plan_parameters=None,
sleep_time=0.0,
):
"""Helper function to plan and execute a motion."""
# plan to goal
logger.info("Planning trajectory")
if multi_plan_parameters is not None:
plan_result = planning_component.plan(
multi_plan_parameters=multi_plan_parameters
)
elif single_plan_parameters is not None:
plan_result = planning_component.plan(
single_plan_parameters=single_plan_parameters
)
else:
plan_result = planning_component.plan()
# execute the plan
if plan_result:
logger.info("Executing plan")
robot_trajectory = plan_result.trajectory
robot.execute(robot_trajectory, controllers=[])
else:
logger.error("Planning failed")
time.sleep(sleep_time)
def main():
###################################################################
# MoveItPy Setup
###################################################################
rclpy.init()
logger = get_logger("moveit_py.pose_goal")
from moveit_configs_utils import MoveItConfigsBuilder
from ament_index_python.packages import get_package_share_directory
moveit_config = (
MoveItConfigsBuilder(
robot_name="panda", package_name="moveit_resources_panda_moveit_config"
)
.robot_description(file_path="config/panda.urdf.xacro")
.trajectory_execution(file_path="config/gripper_moveit_controllers.yaml")
.moveit_cpp(
file_path=get_package_share_directory("robot_api")
+ "/config/moveit_franka_python.yaml"
)
.to_moveit_configs()
)
moveit_config = moveit_config.to_dict()
# instantiate MoveItPy instance and get planning component
panda = MoveItPy(node_name="moveit_py", config_dict=moveit_config)
panda_arm = panda.get_planning_component("panda_arm")
logger.info("MoveItPy instance created")
###########################################################################
# Plan 1 - set states with predefined string
###########################################################################
# set plan start state using predefined state
panda_arm.set_start_state(configuration_name="ready")
# set pose goal using predefined state
panda_arm.set_goal_state(configuration_name="extended")
# plan to goal
plan_and_execute(panda, panda_arm, logger, sleep_time=3.0)
###########################################################################
# Plan 2 - set goal state with RobotState object
###########################################################################
# instantiate a RobotState instance using the current robot model
robot_model = panda.get_robot_model()
robot_state = RobotState(robot_model)
# randomize the robot state
robot_state.set_to_random_positions()
# set plan start state to current state
panda_arm.set_start_state_to_current_state()
# set goal state to the initialized robot state
logger.info("Set goal state to the initialized robot state")
panda_arm.set_goal_state(robot_state=robot_state)
# plan to goal
plan_and_execute(panda, panda_arm, logger, sleep_time=3.0)
###########################################################################
# Plan 3 - set goal state with PoseStamped message
###########################################################################
# set plan start state to current state
panda_arm.set_start_state_to_current_state()
# set pose goal with PoseStamped message
from geometry_msgs.msg import PoseStamped
pose_goal = PoseStamped()
pose_goal.header.frame_id = "panda_link0"
pose_goal.pose.orientation.w = 1.0
pose_goal.pose.position.x = 0.28
pose_goal.pose.position.y = -0.2
pose_goal.pose.position.z = 0.5
panda_arm.set_goal_state(pose_stamped_msg=pose_goal, pose_link="panda_link8")
# plan to goal
plan_and_execute(panda, panda_arm, logger, sleep_time=3.0)
###########################################################################
# Plan 4 - set goal state with constraints
###########################################################################
# set plan start state to current state
panda_arm.set_start_state_to_current_state()
# set constraints message
from moveit.core.kinematic_constraints import construct_joint_constraint
joint_values = {
"panda_joint1": -1.0,
"panda_joint2": 0.7,
"panda_joint3": 0.7,
"panda_joint4": -1.5,
"panda_joint5": -0.7,
"panda_joint6": 2.0,
"panda_joint7": 0.0,
}
robot_state.joint_positions = joint_values
joint_constraint = construct_joint_constraint(
robot_state=robot_state,
joint_model_group=panda.get_robot_model().get_joint_model_group("panda_arm"),
)
panda_arm.set_goal_state(motion_plan_constraints=[joint_constraint])
# plan to goal
plan_and_execute(panda, panda_arm, logger, sleep_time=3.0)
###########################################################################
# Plan 5 - Planning with Multiple Pipelines simultaneously
###########################################################################
# set plan start state to current state
panda_arm.set_start_state_to_current_state()
# set pose goal with PoseStamped message
panda_arm.set_goal_state(configuration_name="ready")
# initialise multi-pipeline plan request parameters
multi_pipeline_plan_request_params = MultiPipelinePlanRequestParameters(
panda, ["ompl_rrtc", "pilz_lin", "chomp_planner"]
)
# plan to goal
plan_and_execute(
panda,
panda_arm,
logger,
multi_plan_parameters=multi_pipeline_plan_request_params,
sleep_time=3.0,
)
if __name__ == "__main__":
main()
| 6,354 | Python | 31.927461 | 85 | 0.572081 |
AshisGhosh/roboai/isaac_sim/humble_ws/src/robot_api/config/moveit_franka_python.yaml | planning_scene_monitor_options:
name: "planning_scene_monitor"
robot_description: "robot_description"
joint_state_topic: "/joint_states"
attached_collision_object_topic: "/moveit_cpp/planning_scene_monitor"
publish_planning_scene_topic: "/moveit_cpp/publish_planning_scene"
monitored_planning_scene_topic: "/moveit_cpp/monitored_planning_scene"
wait_for_initial_state_timeout: 10.0
planning_pipelines:
pipeline_names: ["ompl", "pilz_industrial_motion_planner", "chomp"]
plan_request_params:
planning_attempts: 1
planning_pipeline: ompl
max_velocity_scaling_factor: 1.0
max_acceleration_scaling_factor: 1.0
ompl_rrtc: # Namespace for individual plan request
plan_request_params: # PlanRequestParameters similar to the ones that are used by the single pipeline planning of moveit_cpp
planning_attempts: 1 # Number of attempts the planning pipeline tries to solve a given motion planning problem
planning_pipeline: ompl # Name of the pipeline that is being used
planner_id: "RRTConnectkConfigDefault" # Name of the specific planner to be used by the pipeline
max_velocity_scaling_factor: 1.0 # Velocity scaling parameter for the trajectory generation algorithm that is called (if configured) after the path planning
max_acceleration_scaling_factor: 1.0 # Acceleration scaling parameter for the trajectory generation algorithm that is called (if configured) after the path planning
planning_time: 1.0 # Time budget for the motion plan request. If the planning problem cannot be solved within this time, an empty solution with error code is returned
pilz_lin:
plan_request_params:
planning_attempts: 1
planning_pipeline: pilz_industrial_motion_planner
planner_id: "PTP"
max_velocity_scaling_factor: 1.0
max_acceleration_scaling_factor: 1.0
planning_time: 0.8
chomp_planner:
plan_request_params:
planning_attempts: 1
planning_pipeline: chomp
max_velocity_scaling_factor: 1.0
max_acceleration_scaling_factor: 1.0
planning_time: 1.5
| 2,036 | YAML | 45.295454 | 171 | 0.761297 |
AshisGhosh/roboai/isaac_sim/humble_ws/src/isaac_tutorials/scripts/ros2_publisher.py | #!/usr/bin/env python3
# Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import rclpy
from rclpy.node import Node
from sensor_msgs.msg import JointState
import numpy as np
import time
class TestROS2Bridge(Node):
def __init__(self):
super().__init__("test_ros2bridge")
# Create the publisher. This publisher will publish a JointState message to the /joint_command topic.
self.publisher_ = self.create_publisher(JointState, "joint_command", 10)
# Create a JointState message
self.joint_state = JointState()
self.joint_state.name = [
"panda_joint1",
"panda_joint2",
"panda_joint3",
"panda_joint4",
"panda_joint5",
"panda_joint6",
"panda_joint7",
"panda_finger_joint1",
"panda_finger_joint2",
]
num_joints = len(self.joint_state.name)
# make sure kit's editor is playing for receiving messages
self.joint_state.position = np.array(
[0.0] * num_joints, dtype=np.float64
).tolist()
self.default_joints = [0.0, -1.16, -0.0, -2.3, -0.0, 1.6, 1.1, 0.4, 0.4]
# limiting the movements to a smaller range (this is not the range of the robot, just the range of the movement
self.max_joints = np.array(self.default_joints) + 0.5
self.min_joints = np.array(self.default_joints) - 0.5
# position control the robot to wiggle around each joint
self.time_start = time.time()
timer_period = 0.05 # seconds
self.timer = self.create_timer(timer_period, self.timer_callback)
def timer_callback(self):
self.joint_state.header.stamp = self.get_clock().now().to_msg()
joint_position = (
np.sin(time.time() - self.time_start)
* (self.max_joints - self.min_joints)
* 0.5
+ self.default_joints
)
self.joint_state.position = joint_position.tolist()
# Publish the message to the topic
self.publisher_.publish(self.joint_state)
def main(args=None):
rclpy.init(args=args)
ros2_publisher = TestROS2Bridge()
rclpy.spin(ros2_publisher)
# Destroy the node explicitly
ros2_publisher.destroy_node()
rclpy.shutdown()
if __name__ == "__main__":
main()
| 2,719 | Python | 30.264367 | 119 | 0.630747 |
AshisGhosh/roboai/isaac_sim/humble_ws/src/data_gen/setup.py | import os
from setuptools import find_packages, setup
package_name = "data_gen"
setup(
name=package_name,
version="0.0.0",
packages=find_packages(exclude=["test"]),
data_files=[
("share/ament_index/resource_index/packages", ["resource/" + package_name]),
("share/" + package_name, ["package.xml"]),
(
os.path.join("share", package_name, "launch"),
[os.path.join("launch", "object_sorter.launch.py")],
),
],
install_requires=["setuptools"],
zip_safe=True,
maintainer="root",
maintainer_email="[email protected]",
description="TODO: Package description",
license="TODO: License declaration",
tests_require=["pytest"],
entry_points={
"console_scripts": ["object_sorter = data_gen.object_sorter:main"],
},
)
| 823 | Python | 27.413792 | 84 | 0.606318 |
AshisGhosh/roboai/isaac_sim/humble_ws/src/data_gen/test/test_flake8.py | # Copyright 2017 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ament_flake8.main import main_with_errors
import pytest
@pytest.mark.flake8
@pytest.mark.linter
def test_flake8():
rc, errors = main_with_errors(argv=[])
assert rc == 0, "Found %d code style errors / warnings:\n" % len(
errors
) + "\n".join(errors)
| 878 | Python | 32.807691 | 74 | 0.730068 |
AshisGhosh/roboai/isaac_sim/humble_ws/src/data_gen/test/test_pep257.py | # Copyright 2015 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ament_pep257.main import main
import pytest
@pytest.mark.linter
@pytest.mark.pep257
def test_pep257():
rc = main(argv=[".", "test"])
assert rc == 0, "Found code style errors / warnings"
| 803 | Python | 32.499999 | 74 | 0.743462 |
AshisGhosh/roboai/isaac_sim/humble_ws/src/data_gen/test/test_copyright.py | # Copyright 2015 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ament_copyright.main import main
import pytest
# Remove the `skip` decorator once the source file(s) have a copyright header
@pytest.mark.skip(
reason="No copyright header has been placed in the generated source file."
)
@pytest.mark.copyright
@pytest.mark.linter
def test_copyright():
rc = main(argv=[".", "test"])
assert rc == 0, "Found errors"
| 968 | Python | 33.607142 | 78 | 0.746901 |
AshisGhosh/roboai/isaac_sim/humble_ws/src/data_gen/launch/object_sorter.launch.py | from launch import LaunchDescription
from launch_ros.actions import Node
def generate_launch_description():
return LaunchDescription(
[
Node(
package="data_gen",
executable="object_sorter",
name="object_sorter",
parameters=[
{"rgb_topic": "/agentview/rgb"},
{"segmentation_topic": "/agentview/instance_segmentation_repub"},
{"labels_topic": "/agentview/semantic_labels"},
],
)
]
)
| 571 | Python | 27.599999 | 85 | 0.499124 |
AshisGhosh/roboai/isaac_sim/humble_ws/src/data_gen/data_gen/object_sorter.py | import json
import rclpy
from rclpy.node import Node
from sensor_msgs.msg import Image
from std_msgs.msg import String
from roboai_interfaces.msg import ObjectOrder
from roboai_interfaces.srv import SaveObjects
import numpy as np
import cv2
from cv_bridge import CvBridge
class ObjectSorter(Node):
def __init__(self):
super().__init__("object_sorter")
# Parameters
self.declare_parameter("rgb_topic", "/rgb")
self.declare_parameter("segmentation_topic", "/instance_segmentation")
self.declare_parameter("labels_topic", "/segmentation_labels")
# Subscribers
self.rgb_subscriber = self.create_subscription(
Image,
self.get_parameter("rgb_topic").get_parameter_value().string_value,
self.rgb_callback,
10,
)
self.segmentation_subscriber = self.create_subscription(
Image,
self.get_parameter("segmentation_topic").get_parameter_value().string_value,
self.segmentation_callback,
10,
)
self.labels_subscriber = self.create_subscription(
String,
self.get_parameter("labels_topic").get_parameter_value().string_value,
self.labels_callback,
10,
)
# Publisher
self.order_publisher = self.create_publisher(ObjectOrder, "sorted_objects", 10)
# Service
self.save_service = self.create_service(
SaveObjects, "save_objects", self.save_objects_handler
)
# Variables
self.bridge = CvBridge()
self.current_rgb_image = None
self.current_segmentation_image = None
self.labels = {}
def rgb_callback(self, msg):
self.current_rgb_image = self.bridge.imgmsg_to_cv2(msg, "bgr8")
def segmentation_callback(self, msg):
segmentation_image = self.bridge.imgmsg_to_cv2(msg, "mono8")
self.current_segmentation_image = segmentation_image
self.process_and_publish_order(segmentation_image)
def labels_callback(self, msg):
# self.labels = json.loads(msg.data)
# labels indexes are 0, 1, 2, 3, etc but pixel values scale to 255
labels = json.loads(msg.data)
labels.pop("time_stamp", None)
# pop where value is BACKGROUND or UNLABBELED
labels = {
k: v for k, v in labels.items() if v not in ["BACKGROUND", "UNLABELLED"]
}
self.labels = {
int((i + 1) * 255 / len(labels)): v
for i, (k, v) in enumerate(labels.items())
}
def process_and_publish_order(self, segmentation_image):
object_order = self.sort_objects(segmentation_image)
if object_order:
order_msg = ObjectOrder(object_names=object_order)
self.order_publisher.publish(order_msg)
def sort_objects(self, segmentation_image):
unique_objects = np.unique(segmentation_image)
object_positions = {}
self.get_logger().info(f"Unique objects: {unique_objects}")
self.get_logger().info(f"Labels: {self.labels}")
for obj_id in unique_objects:
if (
obj_id == 0 or obj_id not in self.labels
): # Skip background or unknown labels
self.get_logger().info(f"Skipping object {obj_id}")
continue
y, x = np.where(segmentation_image == obj_id)
min_x = np.min(x) # Leftmost point
self.get_logger().info(f"Object {obj_id} at {min_x}")
object_positions[self.labels[obj_id]] = min_x
# Sort objects by their leftmost points
sorted_objects = sorted(object_positions.items(), key=lambda x: x[1])
return [obj[0] for obj in sorted_objects]
def save_objects_handler(self, request, response):
if self.current_rgb_image is None or self.current_segmentation_image is None:
response.success = False
response.message = "No data available to save"
return response
# Save the RGB image and the object order
cv2.imwrite("latest_rgb_image.png", self.current_rgb_image)
order = self.sort_objects(self.current_segmentation_image)
data = {"objects": {"count": len(order), "names": order}}
with open("latest_object_order.json", "w") as f:
json.dump(data, f)
response.success = True
response.message = "Saved successfully"
return response
def main(args=None):
rclpy.init(args=args)
node = ObjectSorter()
rclpy.spin(node)
node.destroy_node()
rclpy.shutdown()
if __name__ == "__main__":
main()
| 4,659 | Python | 34.30303 | 88 | 0.6085 |
AshisGhosh/roboai/isaac_sim/humble_ws/src/grasps_ros/setup.py | import os
from setuptools import find_packages, setup
package_name = "grasps_ros"
setup(
name=package_name,
version="0.0.0",
packages=find_packages(exclude=["test"]),
data_files=[
("share/ament_index/resource_index/packages", ["resource/" + package_name]),
("share/" + package_name, ["package.xml"]),
(
os.path.join("share", package_name, "launch"),
[os.path.join("launch", "image_processor.launch.py")],
),
],
install_requires=["setuptools"],
zip_safe=True,
maintainer="root",
maintainer_email="[email protected]",
description="TODO: Package description",
license="TODO: License declaration",
tests_require=["pytest"],
entry_points={
"console_scripts": ["image_processor = grasps_ros.image_processor:main"],
},
)
| 833 | Python | 27.75862 | 84 | 0.611044 |
AshisGhosh/roboai/isaac_sim/humble_ws/src/grasps_ros/test/test_flake8.py | # Copyright 2017 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ament_flake8.main import main_with_errors
import pytest
@pytest.mark.flake8
@pytest.mark.linter
def test_flake8():
rc, errors = main_with_errors(argv=[])
assert rc == 0, "Found %d code style errors / warnings:\n" % len(
errors
) + "\n".join(errors)
| 878 | Python | 32.807691 | 74 | 0.730068 |
AshisGhosh/roboai/isaac_sim/humble_ws/src/grasps_ros/test/test_pep257.py | # Copyright 2015 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ament_pep257.main import main
import pytest
@pytest.mark.linter
@pytest.mark.pep257
def test_pep257():
rc = main(argv=[".", "test"])
assert rc == 0, "Found code style errors / warnings"
| 803 | Python | 32.499999 | 74 | 0.743462 |
AshisGhosh/roboai/isaac_sim/humble_ws/src/grasps_ros/test/test_copyright.py | # Copyright 2015 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ament_copyright.main import main
import pytest
# Remove the `skip` decorator once the source file(s) have a copyright header
@pytest.mark.skip(
reason="No copyright header has been placed in the generated source file."
)
@pytest.mark.copyright
@pytest.mark.linter
def test_copyright():
rc = main(argv=[".", "test"])
assert rc == 0, "Found errors"
| 968 | Python | 33.607142 | 78 | 0.746901 |
AshisGhosh/roboai/isaac_sim/humble_ws/src/grasps_ros/launch/image_processor.launch.py | from launch import LaunchDescription
from launch_ros.actions import Node
def generate_launch_description():
node_action = Node(
package="grasps_ros",
executable="image_processor",
name="image_processor",
output="screen",
remappings=[
("/rgb_image", "/rgb"),
("/depth_image", "/depth"),
("/camera_info", "/camera_info"),
("/grasp_image", "/grasp_image"),
("/grasp_markers", "/grasp_markers"),
],
)
return LaunchDescription([node_action])
| 560 | Python | 25.714284 | 49 | 0.55 |
AshisGhosh/roboai/isaac_sim/humble_ws/src/grasps_ros/grasps_ros/image_processor.py | import time
import cv2
from PIL import Image
import numpy as np
import rclpy
from rclpy.node import Node
from rclpy.action import ActionServer, CancelResponse, GoalResponse
from tf2_ros import Buffer, TransformListener
from tf2_ros import LookupException, ConnectivityException, ExtrapolationException
from tf2_geometry_msgs import do_transform_pose_stamped
import message_filters
from sensor_msgs.msg import Image as ROSImage
from sensor_msgs.msg import CameraInfo
from cv_bridge import CvBridge
from visualization_msgs.msg import Marker
from geometry_msgs.msg import Point, PoseStamped
from roboai_interfaces.action import GetGrasp
from roboai.shared.utils.grasp_client import get_grasp_from_image
def interpolate(p1, p2, num_points=5):
"""Interpolates num_points between p1 and p2, inclusive of p1 and p2."""
return [
((1 - t) * np.array(p1) + t * np.array(p2))
for t in np.linspace(0, 1, num_points)
]
def get_grid_from_box(box):
sides = []
for i in range(len(box)):
p1 = box[i]
p2 = box[(i + 1) % len(box)]
if i < len(box) - 1:
sides.append(interpolate(p1, p2)[:-1])
else:
sides.append(interpolate(p1, p2))
grid = []
for i in range(len(sides[0])):
for j in range(len(sides[1])):
grid.extend(interpolate(sides[0][i], sides[1][j]))
return grid
def get_angle_from_box(box):
p1 = box[2]
p2 = box[3]
angle = np.arctan2(p2[1] - p1[1], p2[0] - p1[0])
return angle
class ImageProcessor(Node):
def __init__(self):
super().__init__("image_processor")
self.get_logger().info("Image Processor node has been initialized")
self.tf_buffer = Buffer()
self.tf_listener = TransformListener(self.tf_buffer, self)
self.last_image_ts = None
rgb_sub = message_filters.Subscriber(self, ROSImage, "/rgb_image")
depth_sub = message_filters.Subscriber(self, ROSImage, "/depth_image")
ts = message_filters.TimeSynchronizer([rgb_sub, depth_sub], 10)
ts.registerCallback(self.image_callback)
self.camera_info = None
self.camera_info_sub = self.create_subscription(
CameraInfo, "/camera_info", self.camera_info_callback, 10
)
self.publisher = self.create_publisher(ROSImage, "/grasp_image", 10)
self.bridge = CvBridge()
self.marker_pub = self.create_publisher(Marker, "/grasp_markers", 10)
self.grasp_axis_pub = self.create_publisher(Marker, "/grasp_axis_markers", 10)
self.grasps = None
self._action_server = ActionServer(
self,
GetGrasp,
"get_grasp",
self.execute_callback,
goal_callback=self.goal_callback,
cancel_callback=self.cancel_callback,
)
def camera_info_callback(self, msg):
self.camera_info = msg
def image_callback(self, rgb_msg, depth_msg):
# Convert ROS Image message to OpenCV format
if not self.camera_info:
self.get_logger().warn("Camera info not available")
return
self.last_image_ts = rclpy.time.Time()
cv_rgb_image = self.bridge.imgmsg_to_cv2(
rgb_msg, desired_encoding="passthrough"
)
cv_depth_image = self.bridge.imgmsg_to_cv2(depth_msg, "32FC1")
image = Image.fromarray(cv_rgb_image)
start_time = time.time()
response = get_grasp_from_image(image)
self.get_logger().info(f"Time taken to get grasps: {time.time() - start_time}")
self.handle_response(response, cv_rgb_image, cv_depth_image)
def handle_response(self, response, cv_rgb_image, cv_depth_image):
try:
grasps = response["result"]
if grasps:
self.get_logger().info(
f"Received grasp poses: {[(grasp['cls_name'], round(grasp['obj'],2)) for grasp in response['result']]}"
)
self.publish_grasp_image(grasps, cv_rgb_image)
self.publish_grasp_markers(grasps, cv_depth_image)
grasp_dict = self.get_grasp_poses(grasps, cv_depth_image)
grasp_timestamp = self.get_clock().now().to_msg()
self.grasps = {
"timestamp": grasp_timestamp,
"grasps": grasp_dict,
}
self.publish_grasp_axis_markers()
except KeyError as e:
self.get_logger().warn(
f"KeyError: Failed to receive valid response or grasp poses: {e}"
)
def publish_grasp_image(self, grasps, original_image):
for grasp in grasps:
points = np.array(grasp["r_bbox"], np.int32)
points = points.reshape((-1, 1, 2))
cv2.polylines(
original_image, [points], isClosed=True, color=(0, 255, 0), thickness=2
)
# Get the label for the class
class_label = grasp["cls_name"]
score = grasp["obj"]
label_with_score = (
f"{class_label} ({score:.2f})" # Formatting score to 2 decimal places
)
label_position = (points[0][0][0], points[0][0][1] - 10)
cv2.putText(
original_image,
label_with_score,
label_position,
cv2.FONT_HERSHEY_SIMPLEX,
0.5,
(255, 255, 255),
2,
)
ros_image = self.bridge.cv2_to_imgmsg(original_image, "rgb8")
self.publisher.publish(ros_image)
def project_to_3d(self, x, y, depth):
depth = float(depth)
fx = self.camera_info.k[0]
fy = self.camera_info.k[4]
cx = self.camera_info.k[2]
cy = self.camera_info.k[5]
x = (x - cx) * depth / fx
y = (y - cy) * depth / fy
return x, y, depth
def get_grasp_poses(self, grasps, cv_depth_image):
grasp_poses = {}
timestamp = self.last_image_ts.to_msg()
for grasp in grasps:
grasp_points = grasp["r_bbox"]
center = np.mean(grasp_points, axis=0)
average_depth = np.mean(
[
cv_depth_image[int(pt[1]), int(pt[0])]
for pt in get_grid_from_box(grasp_points)
]
)
point_3d = self.project_to_3d(center[0], center[1], average_depth)
if point_3d:
pose_msg = PoseStamped()
pose_msg.header.frame_id = self.camera_info.header.frame_id
pose_msg.header.stamp = timestamp
pose_msg.pose.position.x = point_3d[0]
pose_msg.pose.position.y = point_3d[1]
pose_msg.pose.position.z = point_3d[2]
angle = get_angle_from_box(grasp_points)
pose_msg.pose.orientation.z = np.sin(angle / 2)
pose_msg.pose.orientation.w = np.cos(angle / 2)
try:
transform = self.tf_buffer.lookup_transform(
"world",
pose_msg.header.frame_id,
timestamp,
timeout=rclpy.duration.Duration(seconds=10),
)
pose_msg = do_transform_pose_stamped(pose_msg, transform)
grasp_poses[grasp["cls_name"]] = pose_msg
except (
LookupException,
ConnectivityException,
ExtrapolationException,
) as e:
self.get_logger().error(f"Failed to transform point: {str(e)}")
self.get_logger().info(f"Grasp poses: {len(grasp_poses)}")
return grasp_poses
def publish_grasp_markers(self, grasps, cv_depth_image, publish_grid=False):
if not self.camera_info:
return
scale = 0.02
if publish_grid:
scale = 0.002
marker = Marker()
marker.header.frame_id = self.camera_info.header.frame_id
marker.header.stamp = self.last_image_ts.to_msg()
marker.ns = "grasps"
marker.id = 0
marker.type = Marker.POINTS
marker.action = Marker.ADD
marker.pose.orientation.w = 1.0
marker.scale.x = scale
marker.scale.y = scale
marker.scale.z = scale
marker.color.r = 1.0
marker.color.a = 1.0
marker.points = []
for grasp in grasps:
grasp_points = grasp["r_bbox"]
if publish_grid:
grasp_points = get_grid_from_box(grasp_points)
for pt in grasp_points:
point_3d = self.project_to_3d(
pt[0], pt[1], cv_depth_image[int(pt[1]), int(pt[0])]
)
if point_3d:
marker.points.append(
Point(x=point_3d[0], y=point_3d[1], z=point_3d[2])
)
self.marker_pub.publish(marker)
def publish_grasp_axis_markers(self):
if not self.grasps:
return
marker = Marker()
marker.header.frame_id = list(self.grasps["grasps"].values())[0].header.frame_id
marker.header.stamp = self.last_image_ts.to_msg()
marker.ns = "grasp_axis"
marker.id = 0
marker.type = Marker.ARROW
marker.action = Marker.ADD
marker.pose.orientation.w = 1.0
marker.scale.x = 0.1
marker.scale.y = 0.015
marker.scale.z = 0.015
marker.color.b = 1.0
marker.color.a = 0.5
# for grasp in self.grasps["grasps"].values():
grasp = list(self.grasps["grasps"].values())[0]
# Draw the axis
marker.pose.position = grasp.pose.position
marker.pose.orientation = grasp.pose.orientation
self.grasp_axis_pub.publish(marker)
def goal_callback(self, goal_request):
self.get_logger().info("Received goal request")
return GoalResponse.ACCEPT
def cancel_callback(self, goal_handle):
self.get_logger().info("Received cancel request")
self.get_logger().error("Cancel not implemented")
return CancelResponse.REJECT
def execute_callback(self, goal_handle):
self.get_logger().info("Received execute request")
object_name = goal_handle.request.object_name
self.get_logger().info(f"Looking for object name: {object_name}")
time_tolerance = rclpy.time.Duration(seconds=3)
timeout = 10.0
while True and timeout > 0:
self.get_logger().info(
f"Grasps: {len(self.grasps['grasps'])}, TS: {self.grasps['timestamp']}, now: {self.get_clock().now()}"
)
self.get_logger().info(
f"Time diff: {(self.get_clock().now() - rclpy.time.Time.from_msg(self.grasps['timestamp'])).nanoseconds/1e9}"
)
if (
self.grasps
and self.get_clock().now()
- rclpy.time.Time.from_msg(self.grasps["timestamp"])
< time_tolerance
):
self.get_logger().info(f"Found grasps for object: {object_name}")
result = GetGrasp.Result(success=True)
grasp = list(self.grasps["grasps"].values())[0]
result.grasp = grasp
goal_handle.succeed()
return result
timeout -= 0.1
rclpy.spin_once(self, timeout_sec=0.1)
self.get_logger().warn(f"Couldn't find grasps for object: {object_name}")
goal_handle.succeed()
return GetGrasp.Result(success=False)
def main(args=None):
rclpy.init(args=args)
image_processor = ImageProcessor()
try:
while True:
rclpy.spin_once(image_processor)
except KeyboardInterrupt:
image_processor.get_logger().info("Shutting down")
image_processor.destroy_node()
rclpy.shutdown()
if __name__ == "__main__":
main()
| 12,018 | Python | 33.736994 | 125 | 0.555999 |
AshisGhosh/roboai/isaac_sim/humble_ws/src/navigation/carter_navigation/launch/carter_navigation.launch.py | ## Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
## NVIDIA CORPORATION and its licensors retain all intellectual property
## and proprietary rights in and to this software, related documentation
## and any modifications thereto. Any use, reproduction, disclosure or
## distribution of this software and related documentation without an express
## license agreement from NVIDIA CORPORATION is strictly prohibited.
import os
from ament_index_python.packages import get_package_share_directory
from launch import LaunchDescription
from launch.actions import DeclareLaunchArgument
from launch.actions import IncludeLaunchDescription
from launch.launch_description_sources import PythonLaunchDescriptionSource
from launch.substitutions import LaunchConfiguration
from launch_ros.actions import Node
def generate_launch_description():
use_sim_time = LaunchConfiguration("use_sim_time", default="True")
map_dir = LaunchConfiguration(
"map",
default=os.path.join(
get_package_share_directory("carter_navigation"),
"maps",
"carter_warehouse_navigation.yaml",
),
)
param_dir = LaunchConfiguration(
"params_file",
default=os.path.join(
get_package_share_directory("carter_navigation"),
"params",
"carter_navigation_params.yaml",
),
)
nav2_bringup_launch_dir = os.path.join(
get_package_share_directory("nav2_bringup"), "launch"
)
rviz_config_dir = os.path.join(
get_package_share_directory("carter_navigation"),
"rviz2",
"carter_navigation.rviz",
)
return LaunchDescription(
[
DeclareLaunchArgument(
"map",
default_value=map_dir,
description="Full path to map file to load",
),
DeclareLaunchArgument(
"params_file",
default_value=param_dir,
description="Full path to param file to load",
),
DeclareLaunchArgument(
"use_sim_time",
default_value="true",
description="Use simulation (Omniverse Isaac Sim) clock if true",
),
IncludeLaunchDescription(
PythonLaunchDescriptionSource(
os.path.join(nav2_bringup_launch_dir, "rviz_launch.py")
),
launch_arguments={
"namespace": "",
"use_namespace": "False",
"rviz_config": rviz_config_dir,
}.items(),
),
IncludeLaunchDescription(
PythonLaunchDescriptionSource(
[nav2_bringup_launch_dir, "/bringup_launch.py"]
),
launch_arguments={
"map": map_dir,
"use_sim_time": use_sim_time,
"params_file": param_dir,
}.items(),
),
Node(
package="pointcloud_to_laserscan",
executable="pointcloud_to_laserscan_node",
remappings=[
("cloud_in", ["/front_3d_lidar/point_cloud"]),
("scan", ["/scan"]),
],
parameters=[
{
"target_frame": "front_3d_lidar",
"transform_tolerance": 0.01,
"min_height": -0.4,
"max_height": 1.5,
"angle_min": -1.5708, # -M_PI/2
"angle_max": 1.5708, # M_PI/2
"angle_increment": 0.0087, # M_PI/360.0
"scan_time": 0.3333,
"range_min": 0.05,
"range_max": 100.0,
"use_inf": True,
"inf_epsilon": 1.0,
# 'concurrency_level': 1,
}
],
name="pointcloud_to_laserscan",
),
]
)
| 4,120 | Python | 35.149122 | 81 | 0.514078 |
AshisGhosh/roboai/isaac_sim/humble_ws/src/navigation/carter_navigation/launch/carter_navigation_individual.launch.py | ## Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
## NVIDIA CORPORATION and its licensors retain all intellectual property
## and proprietary rights in and to this software, related documentation
## and any modifications thereto. Any use, reproduction, disclosure or
## distribution of this software and related documentation without an express
## license agreement from NVIDIA CORPORATION is strictly prohibited.
import os
from ament_index_python.packages import get_package_share_directory
from launch import LaunchDescription
from launch.actions import DeclareLaunchArgument, IncludeLaunchDescription
from launch.launch_description_sources import PythonLaunchDescriptionSource
from launch.substitutions import LaunchConfiguration
def generate_launch_description():
# Get the launch directory
nav2_launch_dir = os.path.join(
get_package_share_directory("nav2_bringup"), "launch"
)
# Create the launch configuration variables
slam = LaunchConfiguration("slam")
namespace = LaunchConfiguration("namespace")
use_namespace = LaunchConfiguration("use_namespace")
map_yaml_file = LaunchConfiguration("map")
use_sim_time = LaunchConfiguration("use_sim_time")
params_file = LaunchConfiguration("params_file")
default_bt_xml_filename = LaunchConfiguration("default_bt_xml_filename")
autostart = LaunchConfiguration("autostart")
# Declare the launch arguments
declare_namespace_cmd = DeclareLaunchArgument(
"namespace", default_value="", description="Top-level namespace"
)
declare_use_namespace_cmd = DeclareLaunchArgument(
"use_namespace",
default_value="false",
description="Whether to apply a namespace to the navigation stack",
)
declare_slam_cmd = DeclareLaunchArgument(
"slam", default_value="False", description="Whether run a SLAM"
)
declare_map_yaml_cmd = DeclareLaunchArgument(
"map",
default_value=os.path.join(
nav2_launch_dir, "maps", "carter_warehouse_navigation.yaml"
),
description="Full path to map file to load",
)
declare_use_sim_time_cmd = DeclareLaunchArgument(
"use_sim_time",
default_value="True",
description="Use simulation (Isaac Sim) clock if true",
)
declare_params_file_cmd = DeclareLaunchArgument(
"params_file",
default_value=os.path.join(nav2_launch_dir, "params", "nav2_params.yaml"),
description="Full path to the ROS2 parameters file to use for all launched nodes",
)
declare_bt_xml_cmd = DeclareLaunchArgument(
"default_bt_xml_filename",
default_value=os.path.join(
get_package_share_directory("nav2_bt_navigator"),
"behavior_trees",
"navigate_w_replanning_and_recovery.xml",
),
description="Full path to the behavior tree xml file to use",
)
declare_autostart_cmd = DeclareLaunchArgument(
"autostart",
default_value="true",
description="Automatically startup the nav2 stack",
)
bringup_cmd = IncludeLaunchDescription(
PythonLaunchDescriptionSource(
os.path.join(nav2_launch_dir, "bringup_launch.py")
),
launch_arguments={
"namespace": namespace,
"use_namespace": use_namespace,
"slam": slam,
"map": map_yaml_file,
"use_sim_time": use_sim_time,
"params_file": params_file,
"default_bt_xml_filename": default_bt_xml_filename,
"autostart": autostart,
}.items(),
)
# Create the launch description and populate
ld = LaunchDescription()
# Declare the launch options
ld.add_action(declare_namespace_cmd)
ld.add_action(declare_use_namespace_cmd)
ld.add_action(declare_slam_cmd)
ld.add_action(declare_map_yaml_cmd)
ld.add_action(declare_use_sim_time_cmd)
ld.add_action(declare_params_file_cmd)
ld.add_action(declare_bt_xml_cmd)
ld.add_action(declare_autostart_cmd)
ld.add_action(bringup_cmd)
return ld
| 4,104 | Python | 34.387931 | 90 | 0.67885 |
AshisGhosh/roboai/isaac_sim/humble_ws/src/navigation/carter_navigation/launch/multiple_robot_carter_navigation_hospital.launch.py | ## Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
## NVIDIA CORPORATION and its licensors retain all intellectual property
## and proprietary rights in and to this software, related documentation
## and any modifications thereto. Any use, reproduction, disclosure or
## distribution of this software and related documentation without an express
## license agreement from NVIDIA CORPORATION is strictly prohibited.
"""
Example for spawing multiple robots in Gazebo.
This is an example on how to create a launch file for spawning multiple robots into Gazebo
and launch multiple instances of the navigation stack, each controlling one robot.
The robots co-exist on a shared environment and are controlled by independent nav stacks
"""
import os
from ament_index_python.packages import get_package_share_directory
from launch import LaunchDescription
from launch.actions import (
DeclareLaunchArgument,
GroupAction,
IncludeLaunchDescription,
LogInfo,
)
from launch.conditions import IfCondition
from launch.launch_description_sources import PythonLaunchDescriptionSource
from launch.substitutions import LaunchConfiguration, TextSubstitution
from launch_ros.actions import Node
def generate_launch_description():
# Get the launch and rviz directories
carter_nav2_bringup_dir = get_package_share_directory("carter_navigation")
nav2_bringup_dir = get_package_share_directory("nav2_bringup")
nav2_bringup_launch_dir = os.path.join(nav2_bringup_dir, "launch")
rviz_config_dir = os.path.join(
carter_nav2_bringup_dir, "rviz2", "carter_navigation_namespaced.rviz"
)
# Names and poses of the robots
robots = [{"name": "carter1"}, {"name": "carter2"}, {"name": "carter3"}]
# Common settings
ENV_MAP_FILE = "carter_hospital_navigation.yaml"
use_sim_time = LaunchConfiguration("use_sim_time", default="True")
map_yaml_file = LaunchConfiguration("map")
default_bt_xml_filename = LaunchConfiguration("default_bt_xml_filename")
autostart = LaunchConfiguration("autostart")
rviz_config_file = LaunchConfiguration("rviz_config")
use_rviz = LaunchConfiguration("use_rviz")
log_settings = LaunchConfiguration("log_settings", default="true")
# Declare the launch arguments
declare_map_yaml_cmd = DeclareLaunchArgument(
"map",
default_value=os.path.join(carter_nav2_bringup_dir, "maps", ENV_MAP_FILE),
description="Full path to map file to load",
)
declare_robot1_params_file_cmd = DeclareLaunchArgument(
"carter1_params_file",
default_value=os.path.join(
carter_nav2_bringup_dir,
"params",
"hospital",
"multi_robot_carter_navigation_params_1.yaml",
),
description="Full path to the ROS2 parameters file to use for robot1 launched nodes",
)
declare_robot2_params_file_cmd = DeclareLaunchArgument(
"carter2_params_file",
default_value=os.path.join(
carter_nav2_bringup_dir,
"params",
"hospital",
"multi_robot_carter_navigation_params_2.yaml",
),
description="Full path to the ROS2 parameters file to use for robot2 launched nodes",
)
declare_robot3_params_file_cmd = DeclareLaunchArgument(
"carter3_params_file",
default_value=os.path.join(
carter_nav2_bringup_dir,
"params",
"hospital",
"multi_robot_carter_navigation_params_3.yaml",
),
description="Full path to the ROS2 parameters file to use for robot3 launched nodes",
)
declare_bt_xml_cmd = DeclareLaunchArgument(
"default_bt_xml_filename",
default_value=os.path.join(
get_package_share_directory("nav2_bt_navigator"),
"behavior_trees",
"navigate_w_replanning_and_recovery.xml",
),
description="Full path to the behavior tree xml file to use",
)
declare_autostart_cmd = DeclareLaunchArgument(
"autostart",
default_value="True",
description="Automatically startup the stacks",
)
declare_rviz_config_file_cmd = DeclareLaunchArgument(
"rviz_config",
default_value=rviz_config_dir,
description="Full path to the RVIZ config file to use.",
)
declare_use_rviz_cmd = DeclareLaunchArgument(
"use_rviz", default_value="True", description="Whether to start RVIZ"
)
# Define commands for launching the navigation instances
nav_instances_cmds = []
for robot in robots:
params_file = LaunchConfiguration(robot["name"] + "_params_file")
group = GroupAction(
[
IncludeLaunchDescription(
PythonLaunchDescriptionSource(
os.path.join(nav2_bringup_launch_dir, "rviz_launch.py")
),
condition=IfCondition(use_rviz),
launch_arguments={
"namespace": TextSubstitution(text=robot["name"]),
"use_namespace": "True",
"rviz_config": rviz_config_file,
}.items(),
),
IncludeLaunchDescription(
PythonLaunchDescriptionSource(
os.path.join(
carter_nav2_bringup_dir,
"launch",
"carter_navigation_individual.launch.py",
)
),
launch_arguments={
"namespace": robot["name"],
"use_namespace": "True",
"map": map_yaml_file,
"use_sim_time": use_sim_time,
"params_file": params_file,
"default_bt_xml_filename": default_bt_xml_filename,
"autostart": autostart,
"use_rviz": "False",
"use_simulator": "False",
"headless": "False",
}.items(),
),
Node(
package="pointcloud_to_laserscan",
executable="pointcloud_to_laserscan_node",
remappings=[
("cloud_in", ["front_3d_lidar/point_cloud"]),
("scan", ["scan"]),
],
parameters=[
{
"target_frame": "front_3d_lidar",
"transform_tolerance": 0.01,
"min_height": -0.4,
"max_height": 1.5,
"angle_min": -1.5708, # -M_PI/2
"angle_max": 1.5708, # M_PI/2
"angle_increment": 0.0087, # M_PI/360.0
"scan_time": 0.3333,
"range_min": 0.05,
"range_max": 100.0,
"use_inf": True,
"inf_epsilon": 1.0,
# 'concurrency_level': 1,
}
],
name="pointcloud_to_laserscan",
namespace=robot["name"],
),
LogInfo(
condition=IfCondition(log_settings),
msg=["Launching ", robot["name"]],
),
LogInfo(
condition=IfCondition(log_settings),
msg=[robot["name"], " map yaml: ", map_yaml_file],
),
LogInfo(
condition=IfCondition(log_settings),
msg=[robot["name"], " params yaml: ", params_file],
),
LogInfo(
condition=IfCondition(log_settings),
msg=[
robot["name"],
" behavior tree xml: ",
default_bt_xml_filename,
],
),
LogInfo(
condition=IfCondition(log_settings),
msg=[robot["name"], " rviz config file: ", rviz_config_file],
),
LogInfo(
condition=IfCondition(log_settings),
msg=[robot["name"], " autostart: ", autostart],
),
]
)
nav_instances_cmds.append(group)
# Create the launch description and populate
ld = LaunchDescription()
# Declare the launch options
ld.add_action(declare_map_yaml_cmd)
ld.add_action(declare_robot1_params_file_cmd)
ld.add_action(declare_robot2_params_file_cmd)
ld.add_action(declare_robot3_params_file_cmd)
ld.add_action(declare_bt_xml_cmd)
ld.add_action(declare_use_rviz_cmd)
ld.add_action(declare_autostart_cmd)
ld.add_action(declare_rviz_config_file_cmd)
for simulation_instance_cmd in nav_instances_cmds:
ld.add_action(simulation_instance_cmd)
return ld
| 9,192 | Python | 36.987603 | 93 | 0.544386 |
AshisGhosh/roboai/isaac_sim/humble_ws/src/navigation/carter_navigation/launch/multiple_robot_carter_navigation_office.launch.py | ## Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
## NVIDIA CORPORATION and its licensors retain all intellectual property
## and proprietary rights in and to this software, related documentation
## and any modifications thereto. Any use, reproduction, disclosure or
## distribution of this software and related documentation without an express
## license agreement from NVIDIA CORPORATION is strictly prohibited.
"""
Example for spawing multiple robots in Gazebo.
This is an example on how to create a launch file for spawning multiple robots into Gazebo
and launch multiple instances of the navigation stack, each controlling one robot.
The robots co-exist on a shared environment and are controlled by independent nav stacks
"""
import os
from ament_index_python.packages import get_package_share_directory
from launch import LaunchDescription
from launch.actions import (
DeclareLaunchArgument,
GroupAction,
IncludeLaunchDescription,
LogInfo,
)
from launch.conditions import IfCondition
from launch.launch_description_sources import PythonLaunchDescriptionSource
from launch.substitutions import LaunchConfiguration, TextSubstitution
from launch_ros.actions import Node
def generate_launch_description():
# Get the launch and rviz directories
carter_nav2_bringup_dir = get_package_share_directory("carter_navigation")
nav2_bringup_dir = get_package_share_directory("nav2_bringup")
nav2_bringup_launch_dir = os.path.join(nav2_bringup_dir, "launch")
rviz_config_dir = os.path.join(
carter_nav2_bringup_dir, "rviz2", "carter_navigation_namespaced.rviz"
)
# Names and poses of the robots
robots = [{"name": "carter1"}, {"name": "carter2"}, {"name": "carter3"}]
# Common settings
ENV_MAP_FILE = "carter_office_navigation.yaml"
use_sim_time = LaunchConfiguration("use_sim_time", default="True")
map_yaml_file = LaunchConfiguration("map")
default_bt_xml_filename = LaunchConfiguration("default_bt_xml_filename")
autostart = LaunchConfiguration("autostart")
rviz_config_file = LaunchConfiguration("rviz_config")
use_rviz = LaunchConfiguration("use_rviz")
log_settings = LaunchConfiguration("log_settings", default="true")
# Declare the launch arguments
declare_map_yaml_cmd = DeclareLaunchArgument(
"map",
default_value=os.path.join(carter_nav2_bringup_dir, "maps", ENV_MAP_FILE),
description="Full path to map file to load",
)
declare_robot1_params_file_cmd = DeclareLaunchArgument(
"carter1_params_file",
default_value=os.path.join(
carter_nav2_bringup_dir,
"params",
"office",
"multi_robot_carter_navigation_params_1.yaml",
),
description="Full path to the ROS2 parameters file to use for robot1 launched nodes",
)
declare_robot2_params_file_cmd = DeclareLaunchArgument(
"carter2_params_file",
default_value=os.path.join(
carter_nav2_bringup_dir,
"params",
"office",
"multi_robot_carter_navigation_params_2.yaml",
),
description="Full path to the ROS2 parameters file to use for robot2 launched nodes",
)
declare_robot3_params_file_cmd = DeclareLaunchArgument(
"carter3_params_file",
default_value=os.path.join(
carter_nav2_bringup_dir,
"params",
"office",
"multi_robot_carter_navigation_params_3.yaml",
),
description="Full path to the ROS2 parameters file to use for robot3 launched nodes",
)
declare_bt_xml_cmd = DeclareLaunchArgument(
"default_bt_xml_filename",
default_value=os.path.join(
get_package_share_directory("nav2_bt_navigator"),
"behavior_trees",
"navigate_w_replanning_and_recovery.xml",
),
description="Full path to the behavior tree xml file to use",
)
declare_autostart_cmd = DeclareLaunchArgument(
"autostart",
default_value="True",
description="Automatically startup the stacks",
)
declare_rviz_config_file_cmd = DeclareLaunchArgument(
"rviz_config",
default_value=rviz_config_dir,
description="Full path to the RVIZ config file to use.",
)
declare_use_rviz_cmd = DeclareLaunchArgument(
"use_rviz", default_value="True", description="Whether to start RVIZ"
)
# Define commands for launching the navigation instances
nav_instances_cmds = []
for robot in robots:
params_file = LaunchConfiguration(robot["name"] + "_params_file")
group = GroupAction(
[
IncludeLaunchDescription(
PythonLaunchDescriptionSource(
os.path.join(nav2_bringup_launch_dir, "rviz_launch.py")
),
condition=IfCondition(use_rviz),
launch_arguments={
"namespace": TextSubstitution(text=robot["name"]),
"use_namespace": "True",
"rviz_config": rviz_config_file,
}.items(),
),
IncludeLaunchDescription(
PythonLaunchDescriptionSource(
os.path.join(
carter_nav2_bringup_dir,
"launch",
"carter_navigation_individual.launch.py",
)
),
launch_arguments={
"namespace": robot["name"],
"use_namespace": "True",
"map": map_yaml_file,
"use_sim_time": use_sim_time,
"params_file": params_file,
"default_bt_xml_filename": default_bt_xml_filename,
"autostart": autostart,
"use_rviz": "False",
"use_simulator": "False",
"headless": "False",
}.items(),
),
Node(
package="pointcloud_to_laserscan",
executable="pointcloud_to_laserscan_node",
remappings=[
("cloud_in", ["front_3d_lidar/point_cloud"]),
("scan", ["scan"]),
],
parameters=[
{
"target_frame": "front_3d_lidar",
"transform_tolerance": 0.01,
"min_height": -0.4,
"max_height": 1.5,
"angle_min": -1.5708, # -M_PI/2
"angle_max": 1.5708, # M_PI/2
"angle_increment": 0.0087, # M_PI/360.0
"scan_time": 0.3333,
"range_min": 0.05,
"range_max": 100.0,
"use_inf": True,
"inf_epsilon": 1.0,
# 'concurrency_level': 1,
}
],
name="pointcloud_to_laserscan",
namespace=robot["name"],
),
LogInfo(
condition=IfCondition(log_settings),
msg=["Launching ", robot["name"]],
),
LogInfo(
condition=IfCondition(log_settings),
msg=[robot["name"], " map yaml: ", map_yaml_file],
),
LogInfo(
condition=IfCondition(log_settings),
msg=[robot["name"], " params yaml: ", params_file],
),
LogInfo(
condition=IfCondition(log_settings),
msg=[
robot["name"],
" behavior tree xml: ",
default_bt_xml_filename,
],
),
LogInfo(
condition=IfCondition(log_settings),
msg=[robot["name"], " rviz config file: ", rviz_config_file],
),
LogInfo(
condition=IfCondition(log_settings),
msg=[robot["name"], " autostart: ", autostart],
),
]
)
nav_instances_cmds.append(group)
# Create the launch description and populate
ld = LaunchDescription()
# Declare the launch options
ld.add_action(declare_map_yaml_cmd)
ld.add_action(declare_robot1_params_file_cmd)
ld.add_action(declare_robot2_params_file_cmd)
ld.add_action(declare_robot3_params_file_cmd)
ld.add_action(declare_bt_xml_cmd)
ld.add_action(declare_use_rviz_cmd)
ld.add_action(declare_autostart_cmd)
ld.add_action(declare_rviz_config_file_cmd)
for simulation_instance_cmd in nav_instances_cmds:
ld.add_action(simulation_instance_cmd)
return ld
| 9,184 | Python | 36.954545 | 93 | 0.54399 |
AshisGhosh/roboai/isaac_sim/humble_ws/src/navigation/carter_navigation/maps/carter_office_navigation.yaml | image: carter_office_navigation.png
mode: trinary
resolution: 0.05
origin: [-29.975, -39.975, 0.0000]
negate: 0
occupied_thresh: 0.65
free_thresh: 0.196
| 153 | YAML | 18.249998 | 35 | 0.738562 |
AshisGhosh/roboai/isaac_sim/humble_ws/src/navigation/carter_navigation/maps/carter_hospital_navigation.yaml | image: carter_hospital_navigation.png
mode: trinary
resolution: 0.05
origin: [-49.625, -4.675, 0.0000]
negate: 0
occupied_thresh: 0.65
free_thresh: 0.196
| 154 | YAML | 18.374998 | 37 | 0.74026 |
AshisGhosh/roboai/isaac_sim/humble_ws/src/navigation/carter_navigation/maps/carter_warehouse_navigation.yaml | image: carter_warehouse_navigation.png
mode: trinary
resolution: 0.05
origin: [-11.975, -17.975, 0.0000]
negate: 0
occupied_thresh: 0.65
free_thresh: 0.196
| 156 | YAML | 18.624998 | 38 | 0.74359 |
AshisGhosh/roboai/isaac_sim/humble_ws/src/navigation/isaac_ros_navigation_goal/setup.py | from setuptools import setup
from glob import glob
import os
package_name = "isaac_ros_navigation_goal"
setup(
name=package_name,
version="0.0.1",
packages=[package_name, package_name + "/goal_generators"],
data_files=[
("share/ament_index/resource_index/packages", ["resource/" + package_name]),
("share/" + package_name, ["package.xml"]),
(os.path.join("share", package_name, "launch"), glob("launch/*.launch.py")),
("share/" + package_name + "/assets", glob("assets/*")),
],
install_requires=["setuptools"],
zip_safe=True,
maintainer="isaac sim",
maintainer_email="[email protected]",
description="Package to set goals for navigation stack.",
license="NVIDIA Isaac ROS Software License",
tests_require=["pytest"],
entry_points={
"console_scripts": [
"SetNavigationGoal = isaac_ros_navigation_goal.set_goal:main"
]
},
)
| 942 | Python | 30.433332 | 84 | 0.626327 |
AshisGhosh/roboai/isaac_sim/humble_ws/src/navigation/isaac_ros_navigation_goal/test/test_flake8.py | # Copyright 2017 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ament_flake8.main import main_with_errors
import pytest
@pytest.mark.flake8
@pytest.mark.linter
def test_flake8():
rc, errors = main_with_errors(argv=[])
assert rc == 0, "Found %d code style errors / warnings:\n" % len(
errors
) + "\n".join(errors)
| 878 | Python | 32.807691 | 74 | 0.730068 |
AshisGhosh/roboai/isaac_sim/humble_ws/src/navigation/isaac_ros_navigation_goal/test/test_pep257.py | # Copyright 2015 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ament_pep257.main import main
import pytest
@pytest.mark.linter
@pytest.mark.pep257
def test_pep257():
rc = main(argv=[".", "test"])
assert rc == 0, "Found code style errors / warnings"
| 803 | Python | 32.499999 | 74 | 0.743462 |
AshisGhosh/roboai/isaac_sim/humble_ws/src/navigation/isaac_ros_navigation_goal/launch/isaac_ros_navigation_goal.launch.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import os
from ament_index_python.packages import get_package_share_directory
from launch import LaunchDescription
from launch.substitutions import LaunchConfiguration
from launch_ros.actions import Node
def generate_launch_description():
map_yaml_file = LaunchConfiguration(
"map_yaml_path",
default=os.path.join(
get_package_share_directory("isaac_ros_navigation_goal"),
"assets",
"carter_warehouse_navigation.yaml",
),
)
goal_text_file = LaunchConfiguration(
"goal_text_file_path",
default=os.path.join(
get_package_share_directory("isaac_ros_navigation_goal"),
"assets",
"goals.txt",
),
)
navigation_goal_node = Node(
name="set_navigation_goal",
package="isaac_ros_navigation_goal",
executable="SetNavigationGoal",
parameters=[
{
"map_yaml_path": map_yaml_file,
"iteration_count": 3,
"goal_generator_type": "RandomGoalGenerator",
"action_server_name": "navigate_to_pose",
"obstacle_search_distance_in_meters": 0.2,
"goal_text_file_path": goal_text_file,
"initial_pose": [-6.4, -1.04, 0.0, 0.0, 0.0, 0.99, 0.02],
}
],
output="screen",
)
return LaunchDescription([navigation_goal_node])
| 1,853 | Python | 33.333333 | 76 | 0.62979 |
AshisGhosh/roboai/isaac_sim/humble_ws/src/navigation/isaac_ros_navigation_goal/isaac_ros_navigation_goal/obstacle_map.py | import numpy as np
import yaml
import os
import math
from PIL import Image
class GridMap:
def __init__(self, yaml_file_path):
self.__get_meta_from_yaml(yaml_file_path)
self.__get_raw_map()
self.__add_max_range_to_meta()
# print(self.__map_meta)
def __get_meta_from_yaml(self, yaml_file_path):
"""
Reads map meta from the yaml file.
Parameters
----------
yaml_file_path: path of the yaml file.
"""
with open(yaml_file_path, "r") as f:
file_content = f.read()
self.__map_meta = yaml.safe_load(file_content)
self.__map_meta["image"] = os.path.join(
os.path.dirname(yaml_file_path), self.__map_meta["image"]
)
def __get_raw_map(self):
"""
Reads the map image and generates the grid map.\n
Grid map is a 2D boolean matrix where True=>occupied space & False=>Free space.
"""
img = Image.open(self.__map_meta.get("image"))
img = np.array(img)
# Anything greater than free_thresh is considered as occupied
if self.__map_meta["negate"]:
res = np.where((img / 255)[:, :, 0] > self.__map_meta["free_thresh"])
else:
res = np.where(
((255 - img) / 255)[:, :, 0] > self.__map_meta["free_thresh"]
)
self.__grid_map = np.zeros(shape=(img.shape[:2]), dtype=bool)
for i in range(res[0].shape[0]):
self.__grid_map[res[0][i], res[1][i]] = 1
def __add_max_range_to_meta(self):
"""
Calculates and adds the max value of pose in x & y direction to the meta.
"""
max_x = (
self.__grid_map.shape[1] * self.__map_meta["resolution"]
+ self.__map_meta["origin"][0]
)
max_y = (
self.__grid_map.shape[0] * self.__map_meta["resolution"]
+ self.__map_meta["origin"][1]
)
self.__map_meta["max_x"] = round(max_x, 2)
self.__map_meta["max_y"] = round(max_y, 2)
def __pad_obstacles(self, distance):
pass
def get_range(self):
"""
Returns the bounds of pose values in x & y direction.\n
Returns
-------
[List]:\n
Where list[0][0]: min value in x direction
list[0][1]: max value in x direction
list[1][0]: min value in y direction
list[1][1]: max value in y direction
"""
return [
[self.__map_meta["origin"][0], self.__map_meta["max_x"]],
[self.__map_meta["origin"][1], self.__map_meta["max_y"]],
]
def __transform_to_image_coordinates(self, point):
"""
Transforms a pose in meters to image pixel coordinates.
Parameters
----------
Point: A point as list. where list[0]=>pose.x and list[1]=pose.y
Returns
-------
[Tuple]: tuple[0]=>pixel value in x direction. i.e column index.
tuple[1]=> pixel vlaue in y direction. i.e row index.
"""
p_x, p_y = point
i_x = math.floor(
(p_x - self.__map_meta["origin"][0]) / self.__map_meta["resolution"]
)
i_y = math.floor(
(p_y - self.__map_meta["origin"][1]) / self.__map_meta["resolution"]
)
# because origin in yaml is at bottom left of image
i_y = self.__grid_map.shape[0] - i_y
return i_x, i_y
def __transform_distance_to_pixels(self, distance):
"""
Converts the distance in meters to number of pixels based on the resolution.
Parameters
----------
distance: value in meters
Returns
-------
[Integer]: number of pixel which represent the same distance.
"""
return math.ceil(distance / self.__map_meta["resolution"])
def __is_obstacle_in_distance(self, img_point, distance):
"""
Checks if any obstacle is in vicinity of the given image point.
Parameters
----------
img_point: pixel values of the point
distance: distnace in pixels in which there shouldn't be any obstacle.
Returns
-------
[Bool]: True if any obstacle found else False.
"""
# need to make sure that patch xmin & ymin are >=0,
# because of python's negative indexing capability
row_start_idx = 0 if img_point[1] - distance < 0 else img_point[1] - distance
col_start_idx = 0 if img_point[0] - distance < 0 else img_point[0] - distance
# image point acts as the center of the square, where each side of square is of size
# 2xdistance
patch = self.__grid_map[
row_start_idx : img_point[1] + distance,
col_start_idx : img_point[0] + distance,
]
obstacles = np.where(patch is True)
return len(obstacles[0]) > 0
def is_valid_pose(self, point, distance=0.2):
"""
Checks if a given pose is "distance" away from a obstacle.
Parameters
----------
point: pose in 2D space. where point[0]=pose.x and point[1]=pose.y
distance: distance in meters.
Returns
-------
[Bool]: True if pose is valid else False
"""
assert len(point) == 2
img_point = self.__transform_to_image_coordinates(point)
img_pixel_distance = self.__transform_distance_to_pixels(distance)
# Pose is not valid if there is obstacle in the vicinity
return not self.__is_obstacle_in_distance(img_point, img_pixel_distance)
| 5,630 | Python | 31.177143 | 92 | 0.535169 |
AshisGhosh/roboai/isaac_sim/humble_ws/src/navigation/isaac_ros_navigation_goal/isaac_ros_navigation_goal/set_goal.py | import rclpy
from rclpy.action import ActionClient
from rclpy.node import Node
from nav2_msgs.action import NavigateToPose
from .obstacle_map import GridMap
from .goal_generators import RandomGoalGenerator, GoalReader
import sys
from geometry_msgs.msg import PoseWithCovarianceStamped
import time
class SetNavigationGoal(Node):
def __init__(self):
super().__init__("set_navigation_goal")
self.declare_parameters(
namespace="",
parameters=[
("iteration_count", 1),
("goal_generator_type", "RandomGoalGenerator"),
("action_server_name", "navigate_to_pose"),
("obstacle_search_distance_in_meters", 0.2),
("frame_id", "map"),
("map_yaml_path", rclpy.Parameter.Type.STRING),
("goal_text_file_path", rclpy.Parameter.Type.STRING),
("initial_pose", rclpy.Parameter.Type.DOUBLE_ARRAY),
],
)
self.__goal_generator = self.__create_goal_generator()
action_server_name = self.get_parameter("action_server_name").value
self._action_client = ActionClient(self, NavigateToPose, action_server_name)
self.MAX_ITERATION_COUNT = self.get_parameter("iteration_count").value
assert self.MAX_ITERATION_COUNT > 0
self.curr_iteration_count = 1
self.__initial_goal_publisher = self.create_publisher(
PoseWithCovarianceStamped, "/initialpose", 1
)
self.__initial_pose = self.get_parameter("initial_pose").value
self.__is_initial_pose_sent = True if self.__initial_pose is None else False
def __send_initial_pose(self):
"""
Publishes the initial pose.
This function is only called once that too before sending any goal pose
to the mission server.
"""
goal = PoseWithCovarianceStamped()
goal.header.frame_id = self.get_parameter("frame_id").value
goal.header.stamp = self.get_clock().now().to_msg()
goal.pose.pose.position.x = self.__initial_pose[0]
goal.pose.pose.position.y = self.__initial_pose[1]
goal.pose.pose.position.z = self.__initial_pose[2]
goal.pose.pose.orientation.x = self.__initial_pose[3]
goal.pose.pose.orientation.y = self.__initial_pose[4]
goal.pose.pose.orientation.z = self.__initial_pose[5]
goal.pose.pose.orientation.w = self.__initial_pose[6]
self.__initial_goal_publisher.publish(goal)
def send_goal(self):
"""
Sends the goal to the action server.
"""
if not self.__is_initial_pose_sent:
self.get_logger().info("Sending initial pose")
self.__send_initial_pose()
self.__is_initial_pose_sent = True
# Assumption is that initial pose is set after publishing first time in this duration.
# Can be changed to more sophisticated way. e.g. /particlecloud topic has no msg until
# the initial pose is set.
time.sleep(10)
self.get_logger().info("Sending first goal")
self._action_client.wait_for_server()
goal_msg = self.__get_goal()
if goal_msg is None:
rclpy.shutdown()
sys.exit(1)
self._send_goal_future = self._action_client.send_goal_async(
goal_msg, feedback_callback=self.__feedback_callback
)
self._send_goal_future.add_done_callback(self.__goal_response_callback)
def __goal_response_callback(self, future):
"""
Callback function to check the response(goal accpted/rejected) from the server.\n
If the Goal is rejected it stops the execution for now.(We can change to resample the pose if rejected.)
"""
goal_handle = future.result()
if not goal_handle.accepted:
self.get_logger().info("Goal rejected :(")
rclpy.shutdown()
return
self.get_logger().info("Goal accepted :)")
self._get_result_future = goal_handle.get_result_async()
self._get_result_future.add_done_callback(self.__get_result_callback)
def __get_goal(self):
"""
Get the next goal from the goal generator.
Returns
-------
[NavigateToPose][goal] or None if the next goal couldn't be generated.
"""
goal_msg = NavigateToPose.Goal()
goal_msg.pose.header.frame_id = self.get_parameter("frame_id").value
goal_msg.pose.header.stamp = self.get_clock().now().to_msg()
pose = self.__goal_generator.generate_goal()
# couldn't sample a pose which is not close to obstacles. Rare but might happen in dense maps.
if pose is None:
self.get_logger().error(
"Could not generate next goal. Returning. Possible reasons for this error could be:"
)
self.get_logger().error(
"1. If you are using GoalReader then please make sure iteration count <= number of goals avaiable in file."
)
self.get_logger().error(
"2. If RandomGoalGenerator is being used then it was not able to sample a pose which is given distance away from the obstacles."
)
return
self.get_logger().info("Generated goal pose: {0}".format(pose))
goal_msg.pose.pose.position.x = pose[0]
goal_msg.pose.pose.position.y = pose[1]
goal_msg.pose.pose.orientation.x = pose[2]
goal_msg.pose.pose.orientation.y = pose[3]
goal_msg.pose.pose.orientation.z = pose[4]
goal_msg.pose.pose.orientation.w = pose[5]
return goal_msg
def __get_result_callback(self, future):
"""
Callback to check result.\n
It calls the send_goal() function in case current goal sent count < required goals count.
"""
# Nav2 is sending empty message for success as well as for failure.
result = future.result().result
self.get_logger().info("Result: {0}".format(result.result))
if self.curr_iteration_count < self.MAX_ITERATION_COUNT:
self.curr_iteration_count += 1
self.send_goal()
else:
rclpy.shutdown()
def __feedback_callback(self, feedback_msg):
"""
This is feeback callback. We can compare/compute/log while the robot is on its way to goal.
"""
# self.get_logger().info('FEEDBACK: {}\n'.format(feedback_msg))
pass
def __create_goal_generator(self):
"""
Creates the GoalGenerator object based on the specified ros param value.
"""
goal_generator_type = self.get_parameter("goal_generator_type").value
goal_generator = None
if goal_generator_type == "RandomGoalGenerator":
if self.get_parameter("map_yaml_path").value is None:
self.get_logger().info("Yaml file path is not given. Returning..")
sys.exit(1)
yaml_file_path = self.get_parameter("map_yaml_path").value
grid_map = GridMap(yaml_file_path)
obstacle_search_distance_in_meters = self.get_parameter(
"obstacle_search_distance_in_meters"
).value
assert obstacle_search_distance_in_meters > 0
goal_generator = RandomGoalGenerator(
grid_map, obstacle_search_distance_in_meters
)
elif goal_generator_type == "GoalReader":
if self.get_parameter("goal_text_file_path").value is None:
self.get_logger().info("Goal text file path is not given. Returning..")
sys.exit(1)
file_path = self.get_parameter("goal_text_file_path").value
goal_generator = GoalReader(file_path)
else:
self.get_logger().info("Invalid goal generator specified. Returning...")
sys.exit(1)
return goal_generator
def main():
rclpy.init()
set_goal = SetNavigationGoal()
set_goal.send_goal()
rclpy.spin(set_goal)
if __name__ == "__main__":
main()
| 8,114 | Python | 37.459715 | 144 | 0.602292 |
AshisGhosh/roboai/isaac_sim/humble_ws/src/navigation/isaac_ros_navigation_goal/isaac_ros_navigation_goal/goal_generators/goal_reader.py | from .goal_generator import GoalGenerator
class GoalReader(GoalGenerator):
def __init__(self, file_path):
self.__file_path = file_path
self.__generator = self.__get_goal()
def generate_goal(self, max_num_of_trials=1000):
try:
return next(self.__generator)
except StopIteration:
return
def __get_goal(self):
for row in open(self.__file_path, "r"):
yield list(map(float, row.strip().split(" ")))
| 486 | Python | 26.055554 | 58 | 0.584362 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.