file_path
stringlengths 20
202
| content
stringlengths 9
3.85M
| size
int64 9
3.85M
| lang
stringclasses 9
values | avg_line_length
float64 3.33
100
| max_line_length
int64 8
993
| alphanum_fraction
float64 0.26
0.93
|
---|---|---|---|---|---|---|
superboySB/SBDrone_deprecated/src/HITL/airsim/types.py | from __future__ import print_function
import msgpackrpc #install as admin: pip install msgpack-rpc-python
import numpy as np #pip install numpy
import math
class MsgpackMixin:
def __repr__(self):
from pprint import pformat
return "<" + type(self).__name__ + "> " + pformat(vars(self), indent=4, width=1)
def to_msgpack(self, *args, **kwargs):
return self.__dict__
@classmethod
def from_msgpack(cls, encoded):
obj = cls()
#obj.__dict__ = {k.decode('utf-8'): (from_msgpack(v.__class__, v) if hasattr(v, "__dict__") else v) for k, v in encoded.items()}
obj.__dict__ = { k : (v if not isinstance(v, dict) else getattr(getattr(obj, k).__class__, "from_msgpack")(v)) for k, v in encoded.items()}
#return cls(**msgpack.unpack(encoded))
return obj
class _ImageType(type):
@property
def Scene(cls):
return 0
def DepthPlanar(cls):
return 1
def DepthPerspective(cls):
return 2
def DepthVis(cls):
return 3
def DisparityNormalized(cls):
return 4
def Segmentation(cls):
return 5
def SurfaceNormals(cls):
return 6
def Infrared(cls):
return 7
def OpticalFlow(cls):
return 8
def OpticalFlowVis(cls):
return 9
def __getattr__(self, key):
if key == 'DepthPlanner':
print('\033[31m'+"DepthPlanner has been (correctly) renamed to DepthPlanar. Please use ImageType.DepthPlanar instead."+'\033[0m')
raise AttributeError
class ImageType(metaclass=_ImageType):
Scene = 0
DepthPlanar = 1
DepthPerspective = 2
DepthVis = 3
DisparityNormalized = 4
Segmentation = 5
SurfaceNormals = 6
Infrared = 7
OpticalFlow = 8
OpticalFlowVis = 9
class DrivetrainType:
MaxDegreeOfFreedom = 0
ForwardOnly = 1
class LandedState:
Landed = 0
Flying = 1
class WeatherParameter:
Rain = 0
Roadwetness = 1
Snow = 2
RoadSnow = 3
MapleLeaf = 4
RoadLeaf = 5
Dust = 6
Fog = 7
Enabled = 8
class Vector2r(MsgpackMixin):
x_val = 0.0
y_val = 0.0
def __init__(self, x_val = 0.0, y_val = 0.0):
self.x_val = x_val
self.y_val = y_val
class Vector3r(MsgpackMixin):
x_val = 0.0
y_val = 0.0
z_val = 0.0
def __init__(self, x_val = 0.0, y_val = 0.0, z_val = 0.0):
self.x_val = x_val
self.y_val = y_val
self.z_val = z_val
@staticmethod
def nanVector3r():
return Vector3r(np.nan, np.nan, np.nan)
def containsNan(self):
return (math.isnan(self.x_val) or math.isnan(self.y_val) or math.isnan(self.z_val))
def __add__(self, other):
return Vector3r(self.x_val + other.x_val, self.y_val + other.y_val, self.z_val + other.z_val)
def __sub__(self, other):
return Vector3r(self.x_val - other.x_val, self.y_val - other.y_val, self.z_val - other.z_val)
def __truediv__(self, other):
if type(other) in [int, float] + np.sctypes['int'] + np.sctypes['uint'] + np.sctypes['float']:
return Vector3r( self.x_val / other, self.y_val / other, self.z_val / other)
else:
raise TypeError('unsupported operand type(s) for /: %s and %s' % ( str(type(self)), str(type(other))) )
def __mul__(self, other):
if type(other) in [int, float] + np.sctypes['int'] + np.sctypes['uint'] + np.sctypes['float']:
return Vector3r(self.x_val*other, self.y_val*other, self.z_val*other)
else:
raise TypeError('unsupported operand type(s) for *: %s and %s' % ( str(type(self)), str(type(other))) )
def dot(self, other):
if type(self) == type(other):
return self.x_val*other.x_val + self.y_val*other.y_val + self.z_val*other.z_val
else:
raise TypeError('unsupported operand type(s) for \'dot\': %s and %s' % ( str(type(self)), str(type(other))) )
def cross(self, other):
if type(self) == type(other):
cross_product = np.cross(self.to_numpy_array(), other.to_numpy_array())
return Vector3r(cross_product[0], cross_product[1], cross_product[2])
else:
raise TypeError('unsupported operand type(s) for \'cross\': %s and %s' % ( str(type(self)), str(type(other))) )
def get_length(self):
return ( self.x_val**2 + self.y_val**2 + self.z_val**2 )**0.5
def distance_to(self, other):
return ( (self.x_val-other.x_val)**2 + (self.y_val-other.y_val)**2 + (self.z_val-other.z_val)**2 )**0.5
def to_Quaternionr(self):
return Quaternionr(self.x_val, self.y_val, self.z_val, 0)
def to_numpy_array(self):
return np.array([self.x_val, self.y_val, self.z_val], dtype=np.float32)
def __iter__(self):
return iter((self.x_val, self.y_val, self.z_val))
class Quaternionr(MsgpackMixin):
w_val = 0.0
x_val = 0.0
y_val = 0.0
z_val = 0.0
def __init__(self, x_val = 0.0, y_val = 0.0, z_val = 0.0, w_val = 1.0):
self.x_val = x_val
self.y_val = y_val
self.z_val = z_val
self.w_val = w_val
@staticmethod
def nanQuaternionr():
return Quaternionr(np.nan, np.nan, np.nan, np.nan)
def containsNan(self):
return (math.isnan(self.w_val) or math.isnan(self.x_val) or math.isnan(self.y_val) or math.isnan(self.z_val))
def __add__(self, other):
if type(self) == type(other):
return Quaternionr( self.x_val+other.x_val, self.y_val+other.y_val, self.z_val+other.z_val, self.w_val+other.w_val )
else:
raise TypeError('unsupported operand type(s) for +: %s and %s' % ( str(type(self)), str(type(other))) )
def __mul__(self, other):
if type(self) == type(other):
t, x, y, z = self.w_val, self.x_val, self.y_val, self.z_val
a, b, c, d = other.w_val, other.x_val, other.y_val, other.z_val
return Quaternionr( w_val = a*t - b*x - c*y - d*z,
x_val = b*t + a*x + d*y - c*z,
y_val = c*t + a*y + b*z - d*x,
z_val = d*t + z*a + c*x - b*y)
else:
raise TypeError('unsupported operand type(s) for *: %s and %s' % ( str(type(self)), str(type(other))) )
def __truediv__(self, other):
if type(other) == type(self):
return self * other.inverse()
elif type(other) in [int, float] + np.sctypes['int'] + np.sctypes['uint'] + np.sctypes['float']:
return Quaternionr( self.x_val / other, self.y_val / other, self.z_val / other, self.w_val / other)
else:
raise TypeError('unsupported operand type(s) for /: %s and %s' % ( str(type(self)), str(type(other))) )
def dot(self, other):
if type(self) == type(other):
return self.x_val*other.x_val + self.y_val*other.y_val + self.z_val*other.z_val + self.w_val*other.w_val
else:
raise TypeError('unsupported operand type(s) for \'dot\': %s and %s' % ( str(type(self)), str(type(other))) )
def cross(self, other):
if type(self) == type(other):
return (self * other - other * self) / 2
else:
raise TypeError('unsupported operand type(s) for \'cross\': %s and %s' % ( str(type(self)), str(type(other))) )
def outer_product(self, other):
if type(self) == type(other):
return ( self.inverse()*other - other.inverse()*self ) / 2
else:
raise TypeError('unsupported operand type(s) for \'outer_product\': %s and %s' % ( str(type(self)), str(type(other))) )
def rotate(self, other):
if type(self) == type(other):
if other.get_length() == 1:
return other * self * other.inverse()
else:
raise ValueError('length of the other Quaternionr must be 1')
else:
raise TypeError('unsupported operand type(s) for \'rotate\': %s and %s' % ( str(type(self)), str(type(other))) )
def conjugate(self):
return Quaternionr(-self.x_val, -self.y_val, -self.z_val, self.w_val)
def star(self):
return self.conjugate()
def inverse(self):
return self.star() / self.dot(self)
def sgn(self):
return self/self.get_length()
def get_length(self):
return ( self.x_val**2 + self.y_val**2 + self.z_val**2 + self.w_val**2 )**0.5
def to_numpy_array(self):
return np.array([self.x_val, self.y_val, self.z_val, self.w_val], dtype=np.float32)
def __iter__(self):
return iter((self.x_val, self.y_val, self.z_val, self.w_val))
class Pose(MsgpackMixin):
position = Vector3r()
orientation = Quaternionr()
def __init__(self, position_val = None, orientation_val = None):
position_val = position_val if position_val is not None else Vector3r()
orientation_val = orientation_val if orientation_val is not None else Quaternionr()
self.position = position_val
self.orientation = orientation_val
@staticmethod
def nanPose():
return Pose(Vector3r.nanVector3r(), Quaternionr.nanQuaternionr())
def containsNan(self):
return (self.position.containsNan() or self.orientation.containsNan())
def __iter__(self):
return iter((self.position, self.orientation))
class CollisionInfo(MsgpackMixin):
has_collided = False
normal = Vector3r()
impact_point = Vector3r()
position = Vector3r()
penetration_depth = 0.0
time_stamp = 0.0
object_name = ""
object_id = -1
class GeoPoint(MsgpackMixin):
latitude = 0.0
longitude = 0.0
altitude = 0.0
class YawMode(MsgpackMixin):
is_rate = True
yaw_or_rate = 0.0
def __init__(self, is_rate = True, yaw_or_rate = 0.0):
self.is_rate = is_rate
self.yaw_or_rate = yaw_or_rate
class RCData(MsgpackMixin):
timestamp = 0
pitch, roll, throttle, yaw = (0.0,)*4 #init 4 variable to 0.0
switch1, switch2, switch3, switch4 = (0,)*4
switch5, switch6, switch7, switch8 = (0,)*4
is_initialized = False
is_valid = False
def __init__(self, timestamp = 0, pitch = 0.0, roll = 0.0, throttle = 0.0, yaw = 0.0, switch1 = 0,
switch2 = 0, switch3 = 0, switch4 = 0, switch5 = 0, switch6 = 0, switch7 = 0, switch8 = 0, is_initialized = False, is_valid = False):
self.timestamp = timestamp
self.pitch = pitch
self.roll = roll
self.throttle = throttle
self.yaw = yaw
self.switch1 = switch1
self.switch2 = switch2
self.switch3 = switch3
self.switch4 = switch4
self.switch5 = switch5
self.switch6 = switch6
self.switch7 = switch7
self.switch8 = switch8
self.is_initialized = is_initialized
self.is_valid = is_valid
class ImageRequest(MsgpackMixin):
camera_name = '0'
image_type = ImageType.Scene
pixels_as_float = False
compress = False
def __init__(self, camera_name, image_type, pixels_as_float = False, compress = True):
# todo: in future remove str(), it's only for compatibility to pre v1.2
self.camera_name = str(camera_name)
self.image_type = image_type
self.pixels_as_float = pixels_as_float
self.compress = compress
class ImageResponse(MsgpackMixin):
image_data_uint8 = np.uint8(0)
image_data_float = 0.0
camera_position = Vector3r()
camera_orientation = Quaternionr()
time_stamp = np.uint64(0)
message = ''
pixels_as_float = 0.0
compress = True
width = 0
height = 0
image_type = ImageType.Scene
class CarControls(MsgpackMixin):
throttle = 0.0
steering = 0.0
brake = 0.0
handbrake = False
is_manual_gear = False
manual_gear = 0
gear_immediate = True
def __init__(self, throttle = 0, steering = 0, brake = 0,
handbrake = False, is_manual_gear = False, manual_gear = 0, gear_immediate = True):
self.throttle = throttle
self.steering = steering
self.brake = brake
self.handbrake = handbrake
self.is_manual_gear = is_manual_gear
self.manual_gear = manual_gear
self.gear_immediate = gear_immediate
def set_throttle(self, throttle_val, forward):
if (forward):
self.is_manual_gear = False
self.manual_gear = 0
self.throttle = abs(throttle_val)
else:
self.is_manual_gear = False
self.manual_gear = -1
self.throttle = - abs(throttle_val)
class KinematicsState(MsgpackMixin):
position = Vector3r()
orientation = Quaternionr()
linear_velocity = Vector3r()
angular_velocity = Vector3r()
linear_acceleration = Vector3r()
angular_acceleration = Vector3r()
class EnvironmentState(MsgpackMixin):
position = Vector3r()
geo_point = GeoPoint()
gravity = Vector3r()
air_pressure = 0.0
temperature = 0.0
air_density = 0.0
class CarState(MsgpackMixin):
speed = 0.0
gear = 0
rpm = 0.0
maxrpm = 0.0
handbrake = False
collision = CollisionInfo()
kinematics_estimated = KinematicsState()
timestamp = np.uint64(0)
class MultirotorState(MsgpackMixin):
collision = CollisionInfo()
kinematics_estimated = KinematicsState()
gps_location = GeoPoint()
timestamp = np.uint64(0)
landed_state = LandedState.Landed
rc_data = RCData()
ready = False
ready_message = ""
can_arm = False
class RotorStates(MsgpackMixin):
timestamp = np.uint64(0)
rotors = []
class ProjectionMatrix(MsgpackMixin):
matrix = []
class CameraInfo(MsgpackMixin):
pose = Pose()
fov = -1
proj_mat = ProjectionMatrix()
class LidarData(MsgpackMixin):
point_cloud = 0.0
time_stamp = np.uint64(0)
pose = Pose()
segmentation = 0
class ImuData(MsgpackMixin):
time_stamp = np.uint64(0)
orientation = Quaternionr()
angular_velocity = Vector3r()
linear_acceleration = Vector3r()
class BarometerData(MsgpackMixin):
time_stamp = np.uint64(0)
altitude = Quaternionr()
pressure = Vector3r()
qnh = Vector3r()
class MagnetometerData(MsgpackMixin):
time_stamp = np.uint64(0)
magnetic_field_body = Vector3r()
magnetic_field_covariance = 0.0
class GnssFixType(MsgpackMixin):
GNSS_FIX_NO_FIX = 0
GNSS_FIX_TIME_ONLY = 1
GNSS_FIX_2D_FIX = 2
GNSS_FIX_3D_FIX = 3
class GnssReport(MsgpackMixin):
geo_point = GeoPoint()
eph = 0.0
epv = 0.0
velocity = Vector3r()
fix_type = GnssFixType()
time_utc = np.uint64(0)
class GpsData(MsgpackMixin):
time_stamp = np.uint64(0)
gnss = GnssReport()
is_valid = False
class DistanceSensorData(MsgpackMixin):
time_stamp = np.uint64(0)
distance = 0.0
min_distance = 0.0
max_distance = 0.0
relative_pose = Pose()
class Box2D(MsgpackMixin):
min = Vector2r()
max = Vector2r()
class Box3D(MsgpackMixin):
min = Vector3r()
max = Vector3r()
class DetectionInfo(MsgpackMixin):
name = ''
geo_point = GeoPoint()
box2D = Box2D()
box3D = Box3D()
relative_pose = Pose()
class PIDGains():
"""
Struct to store values of PID gains. Used to transmit controller gain values while instantiating
AngleLevel/AngleRate/Velocity/PositionControllerGains objects.
Attributes:
kP (float): Proportional gain
kI (float): Integrator gain
kD (float): Derivative gain
"""
def __init__(self, kp, ki, kd):
self.kp = kp
self.ki = ki
self.kd = kd
def to_list(self):
return [self.kp, self.ki, self.kd]
class AngleRateControllerGains():
"""
Struct to contain controller gains used by angle level PID controller
Attributes:
roll_gains (PIDGains): kP, kI, kD for roll axis
pitch_gains (PIDGains): kP, kI, kD for pitch axis
yaw_gains (PIDGains): kP, kI, kD for yaw axis
"""
def __init__(self, roll_gains = PIDGains(0.25, 0, 0),
pitch_gains = PIDGains(0.25, 0, 0),
yaw_gains = PIDGains(0.25, 0, 0)):
self.roll_gains = roll_gains
self.pitch_gains = pitch_gains
self.yaw_gains = yaw_gains
def to_lists(self):
return [self.roll_gains.kp, self.pitch_gains.kp, self.yaw_gains.kp], [self.roll_gains.ki, self.pitch_gains.ki, self.yaw_gains.ki], [self.roll_gains.kd, self.pitch_gains.kd, self.yaw_gains.kd]
class AngleLevelControllerGains():
"""
Struct to contain controller gains used by angle rate PID controller
Attributes:
roll_gains (PIDGains): kP, kI, kD for roll axis
pitch_gains (PIDGains): kP, kI, kD for pitch axis
yaw_gains (PIDGains): kP, kI, kD for yaw axis
"""
def __init__(self, roll_gains = PIDGains(2.5, 0, 0),
pitch_gains = PIDGains(2.5, 0, 0),
yaw_gains = PIDGains(2.5, 0, 0)):
self.roll_gains = roll_gains
self.pitch_gains = pitch_gains
self.yaw_gains = yaw_gains
def to_lists(self):
return [self.roll_gains.kp, self.pitch_gains.kp, self.yaw_gains.kp], [self.roll_gains.ki, self.pitch_gains.ki, self.yaw_gains.ki], [self.roll_gains.kd, self.pitch_gains.kd, self.yaw_gains.kd]
class VelocityControllerGains():
"""
Struct to contain controller gains used by velocity PID controller
Attributes:
x_gains (PIDGains): kP, kI, kD for X axis
y_gains (PIDGains): kP, kI, kD for Y axis
z_gains (PIDGains): kP, kI, kD for Z axis
"""
def __init__(self, x_gains = PIDGains(0.2, 0, 0),
y_gains = PIDGains(0.2, 0, 0),
z_gains = PIDGains(2.0, 2.0, 0)):
self.x_gains = x_gains
self.y_gains = y_gains
self.z_gains = z_gains
def to_lists(self):
return [self.x_gains.kp, self.y_gains.kp, self.z_gains.kp], [self.x_gains.ki, self.y_gains.ki, self.z_gains.ki], [self.x_gains.kd, self.y_gains.kd, self.z_gains.kd]
class PositionControllerGains():
"""
Struct to contain controller gains used by position PID controller
Attributes:
x_gains (PIDGains): kP, kI, kD for X axis
y_gains (PIDGains): kP, kI, kD for Y axis
z_gains (PIDGains): kP, kI, kD for Z axis
"""
def __init__(self, x_gains = PIDGains(0.25, 0, 0),
y_gains = PIDGains(0.25, 0, 0),
z_gains = PIDGains(0.25, 0, 0)):
self.x_gains = x_gains
self.y_gains = y_gains
self.z_gains = z_gains
def to_lists(self):
return [self.x_gains.kp, self.y_gains.kp, self.z_gains.kp], [self.x_gains.ki, self.y_gains.ki, self.z_gains.ki], [self.x_gains.kd, self.y_gains.kd, self.z_gains.kd]
class MeshPositionVertexBuffersResponse(MsgpackMixin):
position = Vector3r()
orientation = Quaternionr()
vertices = 0.0
indices = 0.0
name = ''
| 18,961 | Python | 31.636833 | 199 | 0.594009 |
superboySB/SBDrone_deprecated/src/HITL/airsim/client.py | from __future__ import print_function
from .utils import *
from .types import *
import msgpackrpc #install as admin: pip install msgpack-rpc-python
import numpy as np #pip install numpy
import msgpack
import time
import math
import logging
class VehicleClient:
def __init__(self, ip = "", port = 41451, timeout_value = 3600):
if (ip == ""):
ip = "127.0.0.1"
self.client = msgpackrpc.Client(msgpackrpc.Address(ip, port), timeout = timeout_value, pack_encoding = 'utf-8', unpack_encoding = 'utf-8')
#----------------------------------- Common vehicle APIs ---------------------------------------------
def reset(self):
"""
Reset the vehicle to its original starting state
Note that you must call `enableApiControl` and `armDisarm` again after the call to reset
"""
self.client.call('reset')
def ping(self):
"""
If connection is established then this call will return true otherwise it will be blocked until timeout
Returns:
bool:
"""
return self.client.call('ping')
def getClientVersion(self):
return 1 # sync with C++ client
def getServerVersion(self):
return self.client.call('getServerVersion')
def getMinRequiredServerVersion(self):
return 1 # sync with C++ client
def getMinRequiredClientVersion(self):
return self.client.call('getMinRequiredClientVersion')
#basic flight control
def enableApiControl(self, is_enabled, vehicle_name = ''):
"""
Enables or disables API control for vehicle corresponding to vehicle_name
Args:
is_enabled (bool): True to enable, False to disable API control
vehicle_name (str, optional): Name of the vehicle to send this command to
"""
self.client.call('enableApiControl', is_enabled, vehicle_name)
def isApiControlEnabled(self, vehicle_name = ''):
"""
Returns true if API control is established.
If false (which is default) then API calls would be ignored. After a successful call to `enableApiControl`, `isApiControlEnabled` should return true.
Args:
vehicle_name (str, optional): Name of the vehicle
Returns:
bool: If API control is enabled
"""
return self.client.call('isApiControlEnabled', vehicle_name)
def armDisarm(self, arm, vehicle_name = ''):
"""
Arms or disarms vehicle
Args:
arm (bool): True to arm, False to disarm the vehicle
vehicle_name (str, optional): Name of the vehicle to send this command to
Returns:
bool: Success
"""
return self.client.call('armDisarm', arm, vehicle_name)
def simPause(self, is_paused):
"""
Pauses simulation
Args:
is_paused (bool): True to pause the simulation, False to release
"""
self.client.call('simPause', is_paused)
def simIsPause(self):
"""
Returns true if the simulation is paused
Returns:
bool: If the simulation is paused
"""
return self.client.call("simIsPaused")
def simContinueForTime(self, seconds):
"""
Continue the simulation for the specified number of seconds
Args:
seconds (float): Time to run the simulation for
"""
self.client.call('simContinueForTime', seconds)
def simContinueForFrames(self, frames):
"""
Continue (or resume if paused) the simulation for the specified number of frames, after which the simulation will be paused.
Args:
frames (int): Frames to run the simulation for
"""
self.client.call('simContinueForFrames', frames)
def getHomeGeoPoint(self, vehicle_name = ''):
"""
Get the Home location of the vehicle
Args:
vehicle_name (str, optional): Name of vehicle to get home location of
Returns:
GeoPoint: Home location of the vehicle
"""
return GeoPoint.from_msgpack(self.client.call('getHomeGeoPoint', vehicle_name))
def confirmConnection(self):
"""
Checks state of connection every 1 sec and reports it in Console so user can see the progress for connection.
"""
if self.ping():
print("Connected!")
else:
print("Ping returned false!")
server_ver = self.getServerVersion()
client_ver = self.getClientVersion()
server_min_ver = self.getMinRequiredServerVersion()
client_min_ver = self.getMinRequiredClientVersion()
ver_info = "Client Ver:" + str(client_ver) + " (Min Req: " + str(client_min_ver) + \
"), Server Ver:" + str(server_ver) + " (Min Req: " + str(server_min_ver) + ")"
if server_ver < server_min_ver:
print(ver_info, file=sys.stderr)
print("AirSim server is of older version and not supported by this client. Please upgrade!")
elif client_ver < client_min_ver:
print(ver_info, file=sys.stderr)
print("AirSim client is of older version and not supported by this server. Please upgrade!")
else:
print(ver_info)
print('')
def simSetLightIntensity(self, light_name, intensity):
"""
Change intensity of named light
Args:
light_name (str): Name of light to change
intensity (float): New intensity value
Returns:
bool: True if successful, otherwise False
"""
return self.client.call("simSetLightIntensity", light_name, intensity)
def simSwapTextures(self, tags, tex_id = 0, component_id = 0, material_id = 0):
"""
Runtime Swap Texture API
See https://microsoft.github.io/AirSim/retexturing/ for details
Args:
tags (str): string of "," or ", " delimited tags to identify on which actors to perform the swap
tex_id (int, optional): indexes the array of textures assigned to each actor undergoing a swap
If out-of-bounds for some object's texture set, it will be taken modulo the number of textures that were available
component_id (int, optional):
material_id (int, optional):
Returns:
list[str]: List of objects which matched the provided tags and had the texture swap perfomed
"""
return self.client.call("simSwapTextures", tags, tex_id, component_id, material_id)
def simSetObjectMaterial(self, object_name, material_name, component_id = 0):
"""
Runtime Swap Texture API
See https://microsoft.github.io/AirSim/retexturing/ for details
Args:
object_name (str): name of object to set material for
material_name (str): name of material to set for object
component_id (int, optional) : index of material elements
Returns:
bool: True if material was set
"""
return self.client.call("simSetObjectMaterial", object_name, material_name, component_id)
def simSetObjectMaterialFromTexture(self, object_name, texture_path, component_id = 0):
"""
Runtime Swap Texture API
See https://microsoft.github.io/AirSim/retexturing/ for details
Args:
object_name (str): name of object to set material for
texture_path (str): path to texture to set for object
component_id (int, optional) : index of material elements
Returns:
bool: True if material was set
"""
return self.client.call("simSetObjectMaterialFromTexture", object_name, texture_path, component_id)
# time-of-day control
#time - of - day control
def simSetTimeOfDay(self, is_enabled, start_datetime = "", is_start_datetime_dst = False, celestial_clock_speed = 1, update_interval_secs = 60, move_sun = True):
"""
Control the position of Sun in the environment
Sun's position is computed using the coordinates specified in `OriginGeopoint` in settings for the date-time specified in the argument,
else if the string is empty, current date & time is used
Args:
is_enabled (bool): True to enable time-of-day effect, False to reset the position to original
start_datetime (str, optional): Date & Time in %Y-%m-%d %H:%M:%S format, e.g. `2018-02-12 15:20:00`
is_start_datetime_dst (bool, optional): True to adjust for Daylight Savings Time
celestial_clock_speed (float, optional): Run celestial clock faster or slower than simulation clock
E.g. Value 100 means for every 1 second of simulation clock, Sun's position is advanced by 100 seconds
so Sun will move in sky much faster
update_interval_secs (float, optional): Interval to update the Sun's position
move_sun (bool, optional): Whether or not to move the Sun
"""
self.client.call('simSetTimeOfDay', is_enabled, start_datetime, is_start_datetime_dst, celestial_clock_speed, update_interval_secs, move_sun)
#weather
def simEnableWeather(self, enable):
"""
Enable Weather effects. Needs to be called before using `simSetWeatherParameter` API
Args:
enable (bool): True to enable, False to disable
"""
self.client.call('simEnableWeather', enable)
def simSetWeatherParameter(self, param, val):
"""
Enable various weather effects
Args:
param (WeatherParameter): Weather effect to be enabled
val (float): Intensity of the effect, Range 0-1
"""
self.client.call('simSetWeatherParameter', param, val)
#camera control
#simGetImage returns compressed png in array of bytes
#image_type uses one of the ImageType members
def simGetImage(self, camera_name, image_type, vehicle_name = '', external = False):
"""
Get a single image
Returns bytes of png format image which can be dumped into abinary file to create .png image
`string_to_uint8_array()` can be used to convert into Numpy unit8 array
See https://microsoft.github.io/AirSim/image_apis/ for details
Args:
camera_name (str): Name of the camera, for backwards compatibility, ID numbers such as 0,1,etc. can also be used
image_type (ImageType): Type of image required
vehicle_name (str, optional): Name of the vehicle with the camera
external (bool, optional): Whether the camera is an External Camera
Returns:
Binary string literal of compressed png image
"""
#todo : in future remove below, it's only for compatibility to pre v1.2
camera_name = str(camera_name)
#because this method returns std::vector < uint8>, msgpack decides to encode it as a string unfortunately.
result = self.client.call('simGetImage', camera_name, image_type, vehicle_name, external)
if (result == "" or result == "\0"):
return None
return result
#camera control
#simGetImage returns compressed png in array of bytes
#image_type uses one of the ImageType members
def simGetImages(self, requests, vehicle_name = '', external = False):
"""
Get multiple images
See https://microsoft.github.io/AirSim/image_apis/ for details and examples
Args:
requests (list[ImageRequest]): Images required
vehicle_name (str, optional): Name of vehicle associated with the camera
external (bool, optional): Whether the camera is an External Camera
Returns:
list[ImageResponse]:
"""
responses_raw = self.client.call('simGetImages', requests, vehicle_name, external)
return [ImageResponse.from_msgpack(response_raw) for response_raw in responses_raw]
#CinemAirSim
def simGetPresetLensSettings(self, camera_name, vehicle_name = '', external = False):
result = self.client.call('simGetPresetLensSettings', camera_name, vehicle_name, external)
if (result == "" or result == "\0"):
return None
return result
def simGetLensSettings(self, camera_name, vehicle_name = '', external = False):
result = self.client.call('simGetLensSettings', camera_name, vehicle_name, external)
if (result == "" or result == "\0"):
return None
return result
def simSetPresetLensSettings(self, preset_lens_settings, camera_name, vehicle_name = '', external = False):
self.client.call("simSetPresetLensSettings", preset_lens_settings, camera_name, vehicle_name, external)
def simGetPresetFilmbackSettings(self, camera_name, vehicle_name = '', external = False):
result = self.client.call('simGetPresetFilmbackSettings', camera_name, vehicle_name, external)
if (result == "" or result == "\0"):
return None
return result
def simSetPresetFilmbackSettings(self, preset_filmback_settings, camera_name, vehicle_name = '', external = False):
self.client.call("simSetPresetFilmbackSettings", preset_filmback_settings, camera_name, vehicle_name, external)
def simGetFilmbackSettings(self, camera_name, vehicle_name = '', external = False):
result = self.client.call('simGetFilmbackSettings', camera_name, vehicle_name, external)
if (result == "" or result == "\0"):
return None
return result
def simSetFilmbackSettings(self, sensor_width, sensor_height, camera_name, vehicle_name = '', external = False):
return self.client.call("simSetFilmbackSettings", sensor_width, sensor_height, camera_name, vehicle_name, external)
def simGetFocalLength(self, camera_name, vehicle_name = '', external = False):
return self.client.call("simGetFocalLength", camera_name, vehicle_name, external)
def simSetFocalLength(self, focal_length, camera_name, vehicle_name = '', external = False):
self.client.call("simSetFocalLength", focal_length, camera_name, vehicle_name, external)
def simEnableManualFocus(self, enable, camera_name, vehicle_name = '', external = False):
self.client.call("simEnableManualFocus", enable, camera_name, vehicle_name, external)
def simGetFocusDistance(self, camera_name, vehicle_name = '', external = False):
return self.client.call("simGetFocusDistance", camera_name, vehicle_name, external)
def simSetFocusDistance(self, focus_distance, camera_name, vehicle_name = '', external = False):
self.client.call("simSetFocusDistance", focus_distance, camera_name, vehicle_name, external)
def simGetFocusAperture(self, camera_name, vehicle_name = '', external = False):
return self.client.call("simGetFocusAperture", camera_name, vehicle_name, external)
def simSetFocusAperture(self, focus_aperture, camera_name, vehicle_name = '', external = False):
self.client.call("simSetFocusAperture", focus_aperture, camera_name, vehicle_name, external)
def simEnableFocusPlane(self, enable, camera_name, vehicle_name = '', external = False):
self.client.call("simEnableFocusPlane", enable, camera_name, vehicle_name, external)
def simGetCurrentFieldOfView(self, camera_name, vehicle_name = '', external = False):
return self.client.call("simGetCurrentFieldOfView", camera_name, vehicle_name, external)
#End CinemAirSim
def simTestLineOfSightToPoint(self, point, vehicle_name = ''):
"""
Returns whether the target point is visible from the perspective of the inputted vehicle
Args:
point (GeoPoint): target point
vehicle_name (str, optional): Name of vehicle
Returns:
[bool]: Success
"""
return self.client.call('simTestLineOfSightToPoint', point, vehicle_name)
def simTestLineOfSightBetweenPoints(self, point1, point2):
"""
Returns whether the target point is visible from the perspective of the source point
Args:
point1 (GeoPoint): source point
point2 (GeoPoint): target point
Returns:
[bool]: Success
"""
return self.client.call('simTestLineOfSightBetweenPoints', point1, point2)
def simGetWorldExtents(self):
"""
Returns a list of GeoPoints representing the minimum and maximum extents of the world
Returns:
list[GeoPoint]
"""
responses_raw = self.client.call('simGetWorldExtents')
return [GeoPoint.from_msgpack(response_raw) for response_raw in responses_raw]
def simRunConsoleCommand(self, command):
"""
Allows the client to execute a command in Unreal's native console, via an API.
Affords access to the countless built-in commands such as "stat unit", "stat fps", "open [map]", adjust any config settings, etc. etc.
Allows the user to create bespoke APIs very easily, by adding a custom event to the level blueprint, and then calling the console command "ce MyEventName [args]". No recompilation of AirSim needed!
Args:
command ([string]): Desired Unreal Engine Console command to run
Returns:
[bool]: Success
"""
return self.client.call('simRunConsoleCommand', command)
#gets the static meshes in the unreal scene
def simGetMeshPositionVertexBuffers(self):
"""
Returns the static meshes that make up the scene
See https://microsoft.github.io/AirSim/meshes/ for details and how to use this
Returns:
list[MeshPositionVertexBuffersResponse]:
"""
responses_raw = self.client.call('simGetMeshPositionVertexBuffers')
return [MeshPositionVertexBuffersResponse.from_msgpack(response_raw) for response_raw in responses_raw]
def simGetCollisionInfo(self, vehicle_name = ''):
"""
Args:
vehicle_name (str, optional): Name of the Vehicle to get the info of
Returns:
CollisionInfo:
"""
return CollisionInfo.from_msgpack(self.client.call('simGetCollisionInfo', vehicle_name))
def simSetVehiclePose(self, pose, ignore_collision, vehicle_name = ''):
"""
Set the pose of the vehicle
If you don't want to change position (or orientation) then just set components of position (or orientation) to floating point nan values
Args:
pose (Pose): Desired Pose pf the vehicle
ignore_collision (bool): Whether to ignore any collision or not
vehicle_name (str, optional): Name of the vehicle to move
"""
self.client.call('simSetVehiclePose', pose, ignore_collision, vehicle_name)
def simGetVehiclePose(self, vehicle_name = ''):
"""
The position inside the returned Pose is in the frame of the vehicle's starting point
Args:
vehicle_name (str, optional): Name of the vehicle to get the Pose of
Returns:
Pose:
"""
pose = self.client.call('simGetVehiclePose', vehicle_name)
return Pose.from_msgpack(pose)
def simSetTraceLine(self, color_rgba, thickness=1.0, vehicle_name = ''):
"""
Modify the color and thickness of the line when Tracing is enabled
Tracing can be enabled by pressing T in the Editor or setting `EnableTrace` to `True` in the Vehicle Settings
Args:
color_rgba (list): desired RGBA values from 0.0 to 1.0
thickness (float, optional): Thickness of the line
vehicle_name (string, optional): Name of the vehicle to set Trace line values for
"""
self.client.call('simSetTraceLine', color_rgba, thickness, vehicle_name)
def simGetObjectPose(self, object_name):
"""
The position inside the returned Pose is in the world frame
Args:
object_name (str): Object to get the Pose of
Returns:
Pose:
"""
pose = self.client.call('simGetObjectPose', object_name)
return Pose.from_msgpack(pose)
def simSetObjectPose(self, object_name, pose, teleport = True):
"""
Set the pose of the object(actor) in the environment
The specified actor must have Mobility set to movable, otherwise there will be undefined behaviour.
See https://www.unrealengine.com/en-US/blog/moving-physical-objects for details on how to set Mobility and the effect of Teleport parameter
Args:
object_name (str): Name of the object(actor) to move
pose (Pose): Desired Pose of the object
teleport (bool, optional): Whether to move the object immediately without affecting their velocity
Returns:
bool: If the move was successful
"""
return self.client.call('simSetObjectPose', object_name, pose, teleport)
def simGetObjectScale(self, object_name):
"""
Gets scale of an object in the world
Args:
object_name (str): Object to get the scale of
Returns:
airsim.Vector3r: Scale
"""
scale = self.client.call('simGetObjectScale', object_name)
return Vector3r.from_msgpack(scale)
def simSetObjectScale(self, object_name, scale_vector):
"""
Sets scale of an object in the world
Args:
object_name (str): Object to set the scale of
scale_vector (airsim.Vector3r): Desired scale of object
Returns:
bool: True if scale change was successful
"""
return self.client.call('simSetObjectScale', object_name, scale_vector)
def simListSceneObjects(self, name_regex = '.*'):
"""
Lists the objects present in the environment
Default behaviour is to list all objects, regex can be used to return smaller list of matching objects or actors
Args:
name_regex (str, optional): String to match actor names against, e.g. "Cylinder.*"
Returns:
list[str]: List containing all the names
"""
return self.client.call('simListSceneObjects', name_regex)
def simLoadLevel(self, level_name):
"""
Loads a level specified by its name
Args:
level_name (str): Name of the level to load
Returns:
bool: True if the level was successfully loaded
"""
return self.client.call('simLoadLevel', level_name)
def simListAssets(self):
"""
Lists all the assets present in the Asset Registry
Returns:
list[str]: Names of all the assets
"""
return self.client.call('simListAssets')
def simSpawnObject(self, object_name, asset_name, pose, scale, physics_enabled=False, is_blueprint=False):
"""Spawned selected object in the world
Args:
object_name (str): Desired name of new object
asset_name (str): Name of asset(mesh) in the project database
pose (airsim.Pose): Desired pose of object
scale (airsim.Vector3r): Desired scale of object
physics_enabled (bool, optional): Whether to enable physics for the object
is_blueprint (bool, optional): Whether to spawn a blueprint or an actor
Returns:
str: Name of spawned object, in case it had to be modified
"""
return self.client.call('simSpawnObject', object_name, asset_name, pose, scale, physics_enabled, is_blueprint)
def simDestroyObject(self, object_name):
"""Removes selected object from the world
Args:
object_name (str): Name of object to be removed
Returns:
bool: True if object is queued up for removal
"""
return self.client.call('simDestroyObject', object_name)
def simSetSegmentationObjectID(self, mesh_name, object_id, is_name_regex = False):
"""
Set segmentation ID for specific objects
See https://microsoft.github.io/AirSim/image_apis/#segmentation for details
Args:
mesh_name (str): Name of the mesh to set the ID of (supports regex)
object_id (int): Object ID to be set, range 0-255
RBG values for IDs can be seen at https://microsoft.github.io/AirSim/seg_rgbs.txt
is_name_regex (bool, optional): Whether the mesh name is a regex
Returns:
bool: If the mesh was found
"""
return self.client.call('simSetSegmentationObjectID', mesh_name, object_id, is_name_regex)
def simGetSegmentationObjectID(self, mesh_name):
"""
Returns Object ID for the given mesh name
Mapping of Object IDs to RGB values can be seen at https://microsoft.github.io/AirSim/seg_rgbs.txt
Args:
mesh_name (str): Name of the mesh to get the ID of
"""
return self.client.call('simGetSegmentationObjectID', mesh_name)
def simAddDetectionFilterMeshName(self, camera_name, image_type, mesh_name, vehicle_name = '', external = False):
"""
Add mesh name to detect in wild card format
For example: simAddDetectionFilterMeshName("Car_*") will detect all instance named "Car_*"
Args:
camera_name (str): Name of the camera, for backwards compatibility, ID numbers such as 0,1,etc. can also be used
image_type (ImageType): Type of image required
mesh_name (str): mesh name in wild card format
vehicle_name (str, optional): Vehicle which the camera is associated with
external (bool, optional): Whether the camera is an External Camera
"""
self.client.call('simAddDetectionFilterMeshName', camera_name, image_type, mesh_name, vehicle_name, external)
def simSetDetectionFilterRadius(self, camera_name, image_type, radius_cm, vehicle_name = '', external = False):
"""
Set detection radius for all cameras
Args:
camera_name (str): Name of the camera, for backwards compatibility, ID numbers such as 0,1,etc. can also be used
image_type (ImageType): Type of image required
radius_cm (int): Radius in [cm]
vehicle_name (str, optional): Vehicle which the camera is associated with
external (bool, optional): Whether the camera is an External Camera
"""
self.client.call('simSetDetectionFilterRadius', camera_name, image_type, radius_cm, vehicle_name, external)
def simClearDetectionMeshNames(self, camera_name, image_type, vehicle_name = '', external = False):
"""
Clear all mesh names from detection filter
Args:
camera_name (str): Name of the camera, for backwards compatibility, ID numbers such as 0,1,etc. can also be used
image_type (ImageType): Type of image required
vehicle_name (str, optional): Vehicle which the camera is associated with
external (bool, optional): Whether the camera is an External Camera
"""
self.client.call('simClearDetectionMeshNames', camera_name, image_type, vehicle_name, external)
def simGetDetections(self, camera_name, image_type, vehicle_name = '', external = False):
"""
Get current detections
Args:
camera_name (str): Name of the camera, for backwards compatibility, ID numbers such as 0,1,etc. can also be used
image_type (ImageType): Type of image required
vehicle_name (str, optional): Vehicle which the camera is associated with
external (bool, optional): Whether the camera is an External Camera
Returns:
DetectionInfo array
"""
responses_raw = self.client.call('simGetDetections', camera_name, image_type, vehicle_name, external)
return [DetectionInfo.from_msgpack(response_raw) for response_raw in responses_raw]
def simPrintLogMessage(self, message, message_param = "", severity = 0):
"""
Prints the specified message in the simulator's window.
If message_param is supplied, then it's printed next to the message and in that case if this API is called with same message value
but different message_param again then previous line is overwritten with new line (instead of API creating new line on display).
For example, `simPrintLogMessage("Iteration: ", to_string(i))` keeps updating same line on display when API is called with different values of i.
The valid values of severity parameter is 0 to 3 inclusive that corresponds to different colors.
Args:
message (str): Message to be printed
message_param (str, optional): Parameter to be printed next to the message
severity (int, optional): Range 0-3, inclusive, corresponding to the severity of the message
"""
self.client.call('simPrintLogMessage', message, message_param, severity)
def simGetCameraInfo(self, camera_name, vehicle_name = '', external=False):
"""
Get details about the camera
Args:
camera_name (str): Name of the camera, for backwards compatibility, ID numbers such as 0,1,etc. can also be used
vehicle_name (str, optional): Vehicle which the camera is associated with
external (bool, optional): Whether the camera is an External Camera
Returns:
CameraInfo:
"""
#TODO : below str() conversion is only needed for legacy reason and should be removed in future
return CameraInfo.from_msgpack(self.client.call('simGetCameraInfo', str(camera_name), vehicle_name, external))
def simGetDistortionParams(self, camera_name, vehicle_name = '', external = False):
"""
Get camera distortion parameters
Args:
camera_name (str): Name of the camera, for backwards compatibility, ID numbers such as 0,1,etc. can also be used
vehicle_name (str, optional): Vehicle which the camera is associated with
external (bool, optional): Whether the camera is an External Camera
Returns:
List (float): List of distortion parameter values corresponding to K1, K2, K3, P1, P2 respectively.
"""
return self.client.call('simGetDistortionParams', str(camera_name), vehicle_name, external)
def simSetDistortionParams(self, camera_name, distortion_params, vehicle_name = '', external = False):
"""
Set camera distortion parameters
Args:
camera_name (str): Name of the camera, for backwards compatibility, ID numbers such as 0,1,etc. can also be used
distortion_params (dict): Dictionary of distortion param names and corresponding values
{"K1": 0.0, "K2": 0.0, "K3": 0.0, "P1": 0.0, "P2": 0.0}
vehicle_name (str, optional): Vehicle which the camera is associated with
external (bool, optional): Whether the camera is an External Camera
"""
for param_name, value in distortion_params.items():
self.simSetDistortionParam(camera_name, param_name, value, vehicle_name, external)
def simSetDistortionParam(self, camera_name, param_name, value, vehicle_name = '', external = False):
"""
Set single camera distortion parameter
Args:
camera_name (str): Name of the camera, for backwards compatibility, ID numbers such as 0,1,etc. can also be used
param_name (str): Name of distortion parameter
value (float): Value of distortion parameter
vehicle_name (str, optional): Vehicle which the camera is associated with
external (bool, optional): Whether the camera is an External Camera
"""
self.client.call('simSetDistortionParam', str(camera_name), param_name, value, vehicle_name, external)
def simSetCameraPose(self, camera_name, pose, vehicle_name = '', external = False):
"""
- Control the pose of a selected camera
Args:
camera_name (str): Name of the camera to be controlled
pose (Pose): Pose representing the desired position and orientation of the camera
vehicle_name (str, optional): Name of vehicle which the camera corresponds to
external (bool, optional): Whether the camera is an External Camera
"""
#TODO : below str() conversion is only needed for legacy reason and should be removed in future
self.client.call('simSetCameraPose', str(camera_name), pose, vehicle_name, external)
def simSetCameraFov(self, camera_name, fov_degrees, vehicle_name = '', external = False):
"""
- Control the field of view of a selected camera
Args:
camera_name (str): Name of the camera to be controlled
fov_degrees (float): Value of field of view in degrees
vehicle_name (str, optional): Name of vehicle which the camera corresponds to
external (bool, optional): Whether the camera is an External Camera
"""
#TODO : below str() conversion is only needed for legacy reason and should be removed in future
self.client.call('simSetCameraFov', str(camera_name), fov_degrees, vehicle_name, external)
def simGetGroundTruthKinematics(self, vehicle_name = ''):
"""
Get Ground truth kinematics of the vehicle
The position inside the returned KinematicsState is in the frame of the vehicle's starting point
Args:
vehicle_name (str, optional): Name of the vehicle
Returns:
KinematicsState: Ground truth of the vehicle
"""
kinematics_state = self.client.call('simGetGroundTruthKinematics', vehicle_name)
return KinematicsState.from_msgpack(kinematics_state)
simGetGroundTruthKinematics.__annotations__ = {'return': KinematicsState}
def simSetKinematics(self, state, ignore_collision, vehicle_name = ''):
"""
Set the kinematics state of the vehicle
If you don't want to change position (or orientation) then just set components of position (or orientation) to floating point nan values
Args:
state (KinematicsState): Desired Pose pf the vehicle
ignore_collision (bool): Whether to ignore any collision or not
vehicle_name (str, optional): Name of the vehicle to move
"""
self.client.call('simSetKinematics', state, ignore_collision, vehicle_name)
def simGetGroundTruthEnvironment(self, vehicle_name = ''):
"""
Get ground truth environment state
The position inside the returned EnvironmentState is in the frame of the vehicle's starting point
Args:
vehicle_name (str, optional): Name of the vehicle
Returns:
EnvironmentState: Ground truth environment state
"""
env_state = self.client.call('simGetGroundTruthEnvironment', vehicle_name)
return EnvironmentState.from_msgpack(env_state)
simGetGroundTruthEnvironment.__annotations__ = {'return': EnvironmentState}
#sensor APIs
def getImuData(self, imu_name = '', vehicle_name = ''):
"""
Args:
imu_name (str, optional): Name of IMU to get data from, specified in settings.json
vehicle_name (str, optional): Name of vehicle to which the sensor corresponds to
Returns:
ImuData:
"""
return ImuData.from_msgpack(self.client.call('getImuData', imu_name, vehicle_name))
def getBarometerData(self, barometer_name = '', vehicle_name = ''):
"""
Args:
barometer_name (str, optional): Name of Barometer to get data from, specified in settings.json
vehicle_name (str, optional): Name of vehicle to which the sensor corresponds to
Returns:
BarometerData:
"""
return BarometerData.from_msgpack(self.client.call('getBarometerData', barometer_name, vehicle_name))
def getMagnetometerData(self, magnetometer_name = '', vehicle_name = ''):
"""
Args:
magnetometer_name (str, optional): Name of Magnetometer to get data from, specified in settings.json
vehicle_name (str, optional): Name of vehicle to which the sensor corresponds to
Returns:
MagnetometerData:
"""
return MagnetometerData.from_msgpack(self.client.call('getMagnetometerData', magnetometer_name, vehicle_name))
def getGpsData(self, gps_name = '', vehicle_name = ''):
"""
Args:
gps_name (str, optional): Name of GPS to get data from, specified in settings.json
vehicle_name (str, optional): Name of vehicle to which the sensor corresponds to
Returns:
GpsData:
"""
return GpsData.from_msgpack(self.client.call('getGpsData', gps_name, vehicle_name))
def getDistanceSensorData(self, distance_sensor_name = '', vehicle_name = ''):
"""
Args:
distance_sensor_name (str, optional): Name of Distance Sensor to get data from, specified in settings.json
vehicle_name (str, optional): Name of vehicle to which the sensor corresponds to
Returns:
DistanceSensorData:
"""
return DistanceSensorData.from_msgpack(self.client.call('getDistanceSensorData', distance_sensor_name, vehicle_name))
def getLidarData(self, lidar_name = '', vehicle_name = ''):
"""
Args:
lidar_name (str, optional): Name of Lidar to get data from, specified in settings.json
vehicle_name (str, optional): Name of vehicle to which the sensor corresponds to
Returns:
LidarData:
"""
return LidarData.from_msgpack(self.client.call('getLidarData', lidar_name, vehicle_name))
def simGetLidarSegmentation(self, lidar_name = '', vehicle_name = ''):
"""
NOTE: Deprecated API, use `getLidarData()` API instead
Returns Segmentation ID of each point's collided object in the last Lidar update
Args:
lidar_name (str, optional): Name of Lidar sensor
vehicle_name (str, optional): Name of the vehicle wth the sensor
Returns:
list[int]: Segmentation IDs of the objects
"""
logging.warning("simGetLidarSegmentation API is deprecated, use getLidarData() API instead")
return self.getLidarData(lidar_name, vehicle_name).segmentation
#Plotting APIs
def simFlushPersistentMarkers(self):
"""
Clear any persistent markers - those plotted with setting `is_persistent=True` in the APIs below
"""
self.client.call('simFlushPersistentMarkers')
def simPlotPoints(self, points, color_rgba=[1.0, 0.0, 0.0, 1.0], size = 10.0, duration = -1.0, is_persistent = False):
"""
Plot a list of 3D points in World NED frame
Args:
points (list[Vector3r]): List of Vector3r objects
color_rgba (list, optional): desired RGBA values from 0.0 to 1.0
size (float, optional): Size of plotted point
duration (float, optional): Duration (seconds) to plot for
is_persistent (bool, optional): If set to True, the desired object will be plotted for infinite time.
"""
self.client.call('simPlotPoints', points, color_rgba, size, duration, is_persistent)
def simPlotLineStrip(self, points, color_rgba=[1.0, 0.0, 0.0, 1.0], thickness = 5.0, duration = -1.0, is_persistent = False):
"""
Plots a line strip in World NED frame, defined from points[0] to points[1], points[1] to points[2], ... , points[n-2] to points[n-1]
Args:
points (list[Vector3r]): List of 3D locations of line start and end points, specified as Vector3r objects
color_rgba (list, optional): desired RGBA values from 0.0 to 1.0
thickness (float, optional): Thickness of line
duration (float, optional): Duration (seconds) to plot for
is_persistent (bool, optional): If set to True, the desired object will be plotted for infinite time.
"""
self.client.call('simPlotLineStrip', points, color_rgba, thickness, duration, is_persistent)
def simPlotLineList(self, points, color_rgba=[1.0, 0.0, 0.0, 1.0], thickness = 5.0, duration = -1.0, is_persistent = False):
"""
Plots a line strip in World NED frame, defined from points[0] to points[1], points[2] to points[3], ... , points[n-2] to points[n-1]
Args:
points (list[Vector3r]): List of 3D locations of line start and end points, specified as Vector3r objects. Must be even
color_rgba (list, optional): desired RGBA values from 0.0 to 1.0
thickness (float, optional): Thickness of line
duration (float, optional): Duration (seconds) to plot for
is_persistent (bool, optional): If set to True, the desired object will be plotted for infinite time.
"""
self.client.call('simPlotLineList', points, color_rgba, thickness, duration, is_persistent)
def simPlotArrows(self, points_start, points_end, color_rgba=[1.0, 0.0, 0.0, 1.0], thickness = 5.0, arrow_size = 2.0, duration = -1.0, is_persistent = False):
"""
Plots a list of arrows in World NED frame, defined from points_start[0] to points_end[0], points_start[1] to points_end[1], ... , points_start[n-1] to points_end[n-1]
Args:
points_start (list[Vector3r]): List of 3D start positions of arrow start positions, specified as Vector3r objects
points_end (list[Vector3r]): List of 3D end positions of arrow start positions, specified as Vector3r objects
color_rgba (list, optional): desired RGBA values from 0.0 to 1.0
thickness (float, optional): Thickness of line
arrow_size (float, optional): Size of arrow head
duration (float, optional): Duration (seconds) to plot for
is_persistent (bool, optional): If set to True, the desired object will be plotted for infinite time.
"""
self.client.call('simPlotArrows', points_start, points_end, color_rgba, thickness, arrow_size, duration, is_persistent)
def simPlotStrings(self, strings, positions, scale = 5, color_rgba=[1.0, 0.0, 0.0, 1.0], duration = -1.0):
"""
Plots a list of strings at desired positions in World NED frame.
Args:
strings (list[String], optional): List of strings to plot
positions (list[Vector3r]): List of positions where the strings should be plotted. Should be in one-to-one correspondence with the strings' list
scale (float, optional): Font scale of transform name
color_rgba (list, optional): desired RGBA values from 0.0 to 1.0
duration (float, optional): Duration (seconds) to plot for
"""
self.client.call('simPlotStrings', strings, positions, scale, color_rgba, duration)
def simPlotTransforms(self, poses, scale = 5.0, thickness = 5.0, duration = -1.0, is_persistent = False):
"""
Plots a list of transforms in World NED frame.
Args:
poses (list[Pose]): List of Pose objects representing the transforms to plot
scale (float, optional): Length of transforms' axes
thickness (float, optional): Thickness of transforms' axes
duration (float, optional): Duration (seconds) to plot for
is_persistent (bool, optional): If set to True, the desired object will be plotted for infinite time.
"""
self.client.call('simPlotTransforms', poses, scale, thickness, duration, is_persistent)
def simPlotTransformsWithNames(self, poses, names, tf_scale = 5.0, tf_thickness = 5.0, text_scale = 10.0, text_color_rgba = [1.0, 0.0, 0.0, 1.0], duration = -1.0):
"""
Plots a list of transforms with their names in World NED frame.
Args:
poses (list[Pose]): List of Pose objects representing the transforms to plot
names (list[string]): List of strings with one-to-one correspondence to list of poses
tf_scale (float, optional): Length of transforms' axes
tf_thickness (float, optional): Thickness of transforms' axes
text_scale (float, optional): Font scale of transform name
text_color_rgba (list, optional): desired RGBA values from 0.0 to 1.0 for the transform name
duration (float, optional): Duration (seconds) to plot for
"""
self.client.call('simPlotTransformsWithNames', poses, names, tf_scale, tf_thickness, text_scale, text_color_rgba, duration)
def cancelLastTask(self, vehicle_name = ''):
"""
Cancel previous Async task
Args:
vehicle_name (str, optional): Name of the vehicle
"""
self.client.call('cancelLastTask', vehicle_name)
#Recording APIs
def startRecording(self):
"""
Start Recording
Recording will be done according to the settings
"""
self.client.call('startRecording')
def stopRecording(self):
"""
Stop Recording
"""
self.client.call('stopRecording')
def isRecording(self):
"""
Whether Recording is running or not
Returns:
bool: True if Recording, else False
"""
return self.client.call('isRecording')
def simSetWind(self, wind):
"""
Set simulated wind, in World frame, NED direction, m/s
Args:
wind (Vector3r): Wind, in World frame, NED direction, in m/s
"""
self.client.call('simSetWind', wind)
def simCreateVoxelGrid(self, position, x, y, z, res, of):
"""
Construct and save a binvox-formatted voxel grid of environment
Args:
position (Vector3r): Position around which voxel grid is centered in m
x, y, z (int): Size of each voxel grid dimension in m
res (float): Resolution of voxel grid in m
of (str): Name of output file to save voxel grid as
Returns:
bool: True if output written to file successfully, else False
"""
return self.client.call('simCreateVoxelGrid', position, x, y, z, res, of)
#Add new vehicle via RPC
def simAddVehicle(self, vehicle_name, vehicle_type, pose, pawn_path = ""):
"""
Create vehicle at runtime
Args:
vehicle_name (str): Name of the vehicle being created
vehicle_type (str): Type of vehicle, e.g. "simpleflight"
pose (Pose): Initial pose of the vehicle
pawn_path (str, optional): Vehicle blueprint path, default empty wbich uses the default blueprint for the vehicle type
Returns:
bool: Whether vehicle was created
"""
return self.client.call('simAddVehicle', vehicle_name, vehicle_type, pose, pawn_path)
def listVehicles(self):
"""
Lists the names of current vehicles
Returns:
list[str]: List containing names of all vehicles
"""
return self.client.call('listVehicles')
def getSettingsString(self):
"""
Fetch the settings text being used by AirSim
Returns:
str: Settings text in JSON format
"""
return self.client.call('getSettingsString')
#----------------------------------- Multirotor APIs ---------------------------------------------
class MultirotorClient(VehicleClient, object):
def __init__(self, ip = "", port = 41451, timeout_value = 3600):
super(MultirotorClient, self).__init__(ip, port, timeout_value)
def takeoffAsync(self, timeout_sec = 20, vehicle_name = ''):
"""
Takeoff vehicle to 3m above ground. Vehicle should not be moving when this API is used
Args:
timeout_sec (int, optional): Timeout for the vehicle to reach desired altitude
vehicle_name (str, optional): Name of the vehicle to send this command to
Returns:
msgpackrpc.future.Future: future. call .join() to wait for method to finish. Example: client.METHOD().join()
"""
return self.client.call_async('takeoff', timeout_sec, vehicle_name)
def landAsync(self, timeout_sec = 60, vehicle_name = ''):
"""
Land the vehicle
Args:
timeout_sec (int, optional): Timeout for the vehicle to land
vehicle_name (str, optional): Name of the vehicle to send this command to
Returns:
msgpackrpc.future.Future: future. call .join() to wait for method to finish. Example: client.METHOD().join()
"""
return self.client.call_async('land', timeout_sec, vehicle_name)
def goHomeAsync(self, timeout_sec = 3e+38, vehicle_name = ''):
"""
Return vehicle to Home i.e. Launch location
Args:
timeout_sec (int, optional): Timeout for the vehicle to reach desired altitude
vehicle_name (str, optional): Name of the vehicle to send this command to
Returns:
msgpackrpc.future.Future: future. call .join() to wait for method to finish. Example: client.METHOD().join()
"""
return self.client.call_async('goHome', timeout_sec, vehicle_name)
#APIs for control
def moveByVelocityBodyFrameAsync(self, vx, vy, vz, duration, drivetrain = DrivetrainType.MaxDegreeOfFreedom, yaw_mode = YawMode(), vehicle_name = ''):
"""
Args:
vx (float): desired velocity in the X axis of the vehicle's local NED frame.
vy (float): desired velocity in the Y axis of the vehicle's local NED frame.
vz (float): desired velocity in the Z axis of the vehicle's local NED frame.
duration (float): Desired amount of time (seconds), to send this command for
drivetrain (DrivetrainType, optional):
yaw_mode (YawMode, optional):
vehicle_name (str, optional): Name of the multirotor to send this command to
Returns:
msgpackrpc.future.Future: future. call .join() to wait for method to finish. Example: client.METHOD().join()
"""
return self.client.call_async('moveByVelocityBodyFrame', vx, vy, vz, duration, drivetrain, yaw_mode, vehicle_name)
def moveByVelocityZBodyFrameAsync(self, vx, vy, z, duration, drivetrain = DrivetrainType.MaxDegreeOfFreedom, yaw_mode = YawMode(), vehicle_name = ''):
"""
Args:
vx (float): desired velocity in the X axis of the vehicle's local NED frame
vy (float): desired velocity in the Y axis of the vehicle's local NED frame
z (float): desired Z value (in local NED frame of the vehicle)
duration (float): Desired amount of time (seconds), to send this command for
drivetrain (DrivetrainType, optional):
yaw_mode (YawMode, optional):
vehicle_name (str, optional): Name of the multirotor to send this command to
Returns:
msgpackrpc.future.Future: future. call .join() to wait for method to finish. Example: client.METHOD().join()
"""
return self.client.call_async('moveByVelocityZBodyFrame', vx, vy, z, duration, drivetrain, yaw_mode, vehicle_name)
def moveByAngleZAsync(self, pitch, roll, z, yaw, duration, vehicle_name = ''):
logging.warning("moveByAngleZAsync API is deprecated, use moveByRollPitchYawZAsync() API instead")
return self.client.call_async('moveByRollPitchYawZ', roll, -pitch, -yaw, z, duration, vehicle_name)
def moveByAngleThrottleAsync(self, pitch, roll, throttle, yaw_rate, duration, vehicle_name = ''):
logging.warning("moveByAngleThrottleAsync API is deprecated, use moveByRollPitchYawrateThrottleAsync() API instead")
return self.client.call_async('moveByRollPitchYawrateThrottle', roll, -pitch, -yaw_rate, throttle, duration, vehicle_name)
def moveByVelocityAsync(self, vx, vy, vz, duration, drivetrain = DrivetrainType.MaxDegreeOfFreedom, yaw_mode = YawMode(), vehicle_name = ''):
"""
Args:
vx (float): desired velocity in world (NED) X axis
vy (float): desired velocity in world (NED) Y axis
vz (float): desired velocity in world (NED) Z axis
duration (float): Desired amount of time (seconds), to send this command for
drivetrain (DrivetrainType, optional):
yaw_mode (YawMode, optional):
vehicle_name (str, optional): Name of the multirotor to send this command to
Returns:
msgpackrpc.future.Future: future. call .join() to wait for method to finish. Example: client.METHOD().join()
"""
return self.client.call_async('moveByVelocity', vx, vy, vz, duration, drivetrain, yaw_mode, vehicle_name)
def moveByVelocityZAsync(self, vx, vy, z, duration, drivetrain = DrivetrainType.MaxDegreeOfFreedom, yaw_mode = YawMode(), vehicle_name = ''):
return self.client.call_async('moveByVelocityZ', vx, vy, z, duration, drivetrain, yaw_mode, vehicle_name)
def moveOnPathAsync(self, path, velocity, timeout_sec = 3e+38, drivetrain = DrivetrainType.MaxDegreeOfFreedom, yaw_mode = YawMode(),
lookahead = -1, adaptive_lookahead = 1, vehicle_name = ''):
return self.client.call_async('moveOnPath', path, velocity, timeout_sec, drivetrain, yaw_mode, lookahead, adaptive_lookahead, vehicle_name)
def moveToPositionAsync(self, x, y, z, velocity, timeout_sec = 3e+38, drivetrain = DrivetrainType.MaxDegreeOfFreedom, yaw_mode = YawMode(),
lookahead = -1, adaptive_lookahead = 1, vehicle_name = ''):
return self.client.call_async('moveToPosition', x, y, z, velocity, timeout_sec, drivetrain, yaw_mode, lookahead, adaptive_lookahead, vehicle_name)
def moveToGPSAsync(self, latitude, longitude, altitude, velocity, timeout_sec = 3e+38, drivetrain = DrivetrainType.MaxDegreeOfFreedom, yaw_mode = YawMode(),
lookahead = -1, adaptive_lookahead = 1, vehicle_name = ''):
return self.client.call_async('moveToGPS', latitude, longitude, altitude, velocity, timeout_sec, drivetrain, yaw_mode, lookahead, adaptive_lookahead, vehicle_name)
def moveToZAsync(self, z, velocity, timeout_sec = 3e+38, yaw_mode = YawMode(), lookahead = -1, adaptive_lookahead = 1, vehicle_name = ''):
return self.client.call_async('moveToZ', z, velocity, timeout_sec, yaw_mode, lookahead, adaptive_lookahead, vehicle_name)
def moveByManualAsync(self, vx_max, vy_max, z_min, duration, drivetrain = DrivetrainType.MaxDegreeOfFreedom, yaw_mode = YawMode(), vehicle_name = ''):
"""
- Read current RC state and use it to control the vehicles.
Parameters sets up the constraints on velocity and minimum altitude while flying. If RC state is detected to violate these constraints
then that RC state would be ignored.
Args:
vx_max (float): max velocity allowed in x direction
vy_max (float): max velocity allowed in y direction
vz_max (float): max velocity allowed in z direction
z_min (float): min z allowed for vehicle position
duration (float): after this duration vehicle would switch back to non-manual mode
drivetrain (DrivetrainType): when ForwardOnly, vehicle rotates itself so that its front is always facing the direction of travel. If MaxDegreeOfFreedom then it doesn't do that (crab-like movement)
yaw_mode (YawMode): Specifies if vehicle should face at given angle (is_rate=False) or should be rotating around its axis at given rate (is_rate=True)
vehicle_name (str, optional): Name of the multirotor to send this command to
Returns:
msgpackrpc.future.Future: future. call .join() to wait for method to finish. Example: client.METHOD().join()
"""
return self.client.call_async('moveByManual', vx_max, vy_max, z_min, duration, drivetrain, yaw_mode, vehicle_name)
def rotateToYawAsync(self, yaw, timeout_sec = 3e+38, margin = 5, vehicle_name = ''):
return self.client.call_async('rotateToYaw', yaw, timeout_sec, margin, vehicle_name)
def rotateByYawRateAsync(self, yaw_rate, duration, vehicle_name = ''):
return self.client.call_async('rotateByYawRate', yaw_rate, duration, vehicle_name)
def hoverAsync(self, vehicle_name = ''):
return self.client.call_async('hover', vehicle_name)
def moveByRC(self, rcdata = RCData(), vehicle_name = ''):
return self.client.call('moveByRC', rcdata, vehicle_name)
#low - level control API
def moveByMotorPWMsAsync(self, front_right_pwm, rear_left_pwm, front_left_pwm, rear_right_pwm, duration, vehicle_name = ''):
"""
- Directly control the motors using PWM values
Args:
front_right_pwm (float): PWM value for the front right motor (between 0.0 to 1.0)
rear_left_pwm (float): PWM value for the rear left motor (between 0.0 to 1.0)
front_left_pwm (float): PWM value for the front left motor (between 0.0 to 1.0)
rear_right_pwm (float): PWM value for the rear right motor (between 0.0 to 1.0)
duration (float): Desired amount of time (seconds), to send this command for
vehicle_name (str, optional): Name of the multirotor to send this command to
Returns:
msgpackrpc.future.Future: future. call .join() to wait for method to finish. Example: client.METHOD().join()
"""
return self.client.call_async('moveByMotorPWMs', front_right_pwm, rear_left_pwm, front_left_pwm, rear_right_pwm, duration, vehicle_name)
def moveByRollPitchYawZAsync(self, roll, pitch, yaw, z, duration, vehicle_name = ''):
"""
- z is given in local NED frame of the vehicle.
- Roll angle, pitch angle, and yaw angle set points are given in **radians**, in the body frame.
- The body frame follows the Front Left Up (FLU) convention, and right-handedness.
- Frame Convention:
- X axis is along the **Front** direction of the quadrotor.
| Clockwise rotation about this axis defines a positive **roll** angle.
| Hence, rolling with a positive angle is equivalent to translating in the **right** direction, w.r.t. our FLU body frame.
- Y axis is along the **Left** direction of the quadrotor.
| Clockwise rotation about this axis defines a positive **pitch** angle.
| Hence, pitching with a positive angle is equivalent to translating in the **front** direction, w.r.t. our FLU body frame.
- Z axis is along the **Up** direction.
| Clockwise rotation about this axis defines a positive **yaw** angle.
| Hence, yawing with a positive angle is equivalent to rotated towards the **left** direction wrt our FLU body frame. Or in an anticlockwise fashion in the body XY / FL plane.
Args:
roll (float): Desired roll angle, in radians.
pitch (float): Desired pitch angle, in radians.
yaw (float): Desired yaw angle, in radians.
z (float): Desired Z value (in local NED frame of the vehicle)
duration (float): Desired amount of time (seconds), to send this command for
vehicle_name (str, optional): Name of the multirotor to send this command to
Returns:
msgpackrpc.future.Future: future. call .join() to wait for method to finish. Example: client.METHOD().join()
"""
return self.client.call_async('moveByRollPitchYawZ', roll, -pitch, -yaw, z, duration, vehicle_name)
def moveByRollPitchYawThrottleAsync(self, roll, pitch, yaw, throttle, duration, vehicle_name = ''):
"""
- Desired throttle is between 0.0 to 1.0
- Roll angle, pitch angle, and yaw angle are given in **degrees** when using PX4 and in **radians** when using SimpleFlight, in the body frame.
- The body frame follows the Front Left Up (FLU) convention, and right-handedness.
- Frame Convention:
- X axis is along the **Front** direction of the quadrotor.
| Clockwise rotation about this axis defines a positive **roll** angle.
| Hence, rolling with a positive angle is equivalent to translating in the **right** direction, w.r.t. our FLU body frame.
- Y axis is along the **Left** direction of the quadrotor.
| Clockwise rotation about this axis defines a positive **pitch** angle.
| Hence, pitching with a positive angle is equivalent to translating in the **front** direction, w.r.t. our FLU body frame.
- Z axis is along the **Up** direction.
| Clockwise rotation about this axis defines a positive **yaw** angle.
| Hence, yawing with a positive angle is equivalent to rotated towards the **left** direction wrt our FLU body frame. Or in an anticlockwise fashion in the body XY / FL plane.
Args:
roll (float): Desired roll angle.
pitch (float): Desired pitch angle.
yaw (float): Desired yaw angle.
throttle (float): Desired throttle (between 0.0 to 1.0)
duration (float): Desired amount of time (seconds), to send this command for
vehicle_name (str, optional): Name of the multirotor to send this command to
Returns:
msgpackrpc.future.Future: future. call .join() to wait for method to finish. Example: client.METHOD().join()
"""
return self.client.call_async('moveByRollPitchYawThrottle', roll, -pitch, -yaw, throttle, duration, vehicle_name)
def moveByRollPitchYawrateThrottleAsync(self, roll, pitch, yaw_rate, throttle, duration, vehicle_name = ''):
"""
- Desired throttle is between 0.0 to 1.0
- Roll angle, pitch angle, and yaw rate set points are given in **radians**, in the body frame.
- The body frame follows the Front Left Up (FLU) convention, and right-handedness.
- Frame Convention:
- X axis is along the **Front** direction of the quadrotor.
| Clockwise rotation about this axis defines a positive **roll** angle.
| Hence, rolling with a positive angle is equivalent to translating in the **right** direction, w.r.t. our FLU body frame.
- Y axis is along the **Left** direction of the quadrotor.
| Clockwise rotation about this axis defines a positive **pitch** angle.
| Hence, pitching with a positive angle is equivalent to translating in the **front** direction, w.r.t. our FLU body frame.
- Z axis is along the **Up** direction.
| Clockwise rotation about this axis defines a positive **yaw** angle.
| Hence, yawing with a positive angle is equivalent to rotated towards the **left** direction wrt our FLU body frame. Or in an anticlockwise fashion in the body XY / FL plane.
Args:
roll (float): Desired roll angle, in radians.
pitch (float): Desired pitch angle, in radians.
yaw_rate (float): Desired yaw rate, in radian per second.
throttle (float): Desired throttle (between 0.0 to 1.0)
duration (float): Desired amount of time (seconds), to send this command for
vehicle_name (str, optional): Name of the multirotor to send this command to
Returns:
msgpackrpc.future.Future: future. call .join() to wait for method to finish. Example: client.METHOD().join()
"""
return self.client.call_async('moveByRollPitchYawrateThrottle', roll, -pitch, -yaw_rate, throttle, duration, vehicle_name)
def moveByRollPitchYawrateZAsync(self, roll, pitch, yaw_rate, z, duration, vehicle_name = ''):
"""
- z is given in local NED frame of the vehicle.
- Roll angle, pitch angle, and yaw rate set points are given in **radians**, in the body frame.
- The body frame follows the Front Left Up (FLU) convention, and right-handedness.
- Frame Convention:
- X axis is along the **Front** direction of the quadrotor.
| Clockwise rotation about this axis defines a positive **roll** angle.
| Hence, rolling with a positive angle is equivalent to translating in the **right** direction, w.r.t. our FLU body frame.
- Y axis is along the **Left** direction of the quadrotor.
| Clockwise rotation about this axis defines a positive **pitch** angle.
| Hence, pitching with a positive angle is equivalent to translating in the **front** direction, w.r.t. our FLU body frame.
- Z axis is along the **Up** direction.
| Clockwise rotation about this axis defines a positive **yaw** angle.
| Hence, yawing with a positive angle is equivalent to rotated towards the **left** direction wrt our FLU body frame. Or in an anticlockwise fashion in the body XY / FL plane.
Args:
roll (float): Desired roll angle, in radians.
pitch (float): Desired pitch angle, in radians.
yaw_rate (float): Desired yaw rate, in radian per second.
z (float): Desired Z value (in local NED frame of the vehicle)
duration (float): Desired amount of time (seconds), to send this command for
vehicle_name (str, optional): Name of the multirotor to send this command to
Returns:
msgpackrpc.future.Future: future. call .join() to wait for method to finish. Example: client.METHOD().join()
"""
return self.client.call_async('moveByRollPitchYawrateZ', roll, -pitch, -yaw_rate, z, duration, vehicle_name)
def moveByAngleRatesZAsync(self, roll_rate, pitch_rate, yaw_rate, z, duration, vehicle_name = ''):
"""
- z is given in local NED frame of the vehicle.
- Roll rate, pitch rate, and yaw rate set points are given in **radians**, in the body frame.
- The body frame follows the Front Left Up (FLU) convention, and right-handedness.
- Frame Convention:
- X axis is along the **Front** direction of the quadrotor.
| Clockwise rotation about this axis defines a positive **roll** angle.
| Hence, rolling with a positive angle is equivalent to translating in the **right** direction, w.r.t. our FLU body frame.
- Y axis is along the **Left** direction of the quadrotor.
| Clockwise rotation about this axis defines a positive **pitch** angle.
| Hence, pitching with a positive angle is equivalent to translating in the **front** direction, w.r.t. our FLU body frame.
- Z axis is along the **Up** direction.
| Clockwise rotation about this axis defines a positive **yaw** angle.
| Hence, yawing with a positive angle is equivalent to rotated towards the **left** direction wrt our FLU body frame. Or in an anticlockwise fashion in the body XY / FL plane.
Args:
roll_rate (float): Desired roll rate, in radians / second
pitch_rate (float): Desired pitch rate, in radians / second
yaw_rate (float): Desired yaw rate, in radians / second
z (float): Desired Z value (in local NED frame of the vehicle)
duration (float): Desired amount of time (seconds), to send this command for
vehicle_name (str, optional): Name of the multirotor to send this command to
Returns:
msgpackrpc.future.Future: future. call .join() to wait for method to finish. Example: client.METHOD().join()
"""
return self.client.call_async('moveByAngleRatesZ', roll_rate, -pitch_rate, -yaw_rate, z, duration, vehicle_name)
def moveByAngleRatesThrottleAsync(self, roll_rate, pitch_rate, yaw_rate, throttle, duration, vehicle_name = ''):
"""
- Desired throttle is between 0.0 to 1.0
- Roll rate, pitch rate, and yaw rate set points are given in **radians**, in the body frame.
- The body frame follows the Front Left Up (FLU) convention, and right-handedness.
- Frame Convention:
- X axis is along the **Front** direction of the quadrotor.
| Clockwise rotation about this axis defines a positive **roll** angle.
| Hence, rolling with a positive angle is equivalent to translating in the **right** direction, w.r.t. our FLU body frame.
- Y axis is along the **Left** direction of the quadrotor.
| Clockwise rotation about this axis defines a positive **pitch** angle.
| Hence, pitching with a positive angle is equivalent to translating in the **front** direction, w.r.t. our FLU body frame.
- Z axis is along the **Up** direction.
| Clockwise rotation about this axis defines a positive **yaw** angle.
| Hence, yawing with a positive angle is equivalent to rotated towards the **left** direction wrt our FLU body frame. Or in an anticlockwise fashion in the body XY / FL plane.
Args:
roll_rate (float): Desired roll rate, in radians / second
pitch_rate (float): Desired pitch rate, in radians / second
yaw_rate (float): Desired yaw rate, in radians / second
throttle (float): Desired throttle (between 0.0 to 1.0)
duration (float): Desired amount of time (seconds), to send this command for
vehicle_name (str, optional): Name of the multirotor to send this command to
Returns:
msgpackrpc.future.Future: future. call .join() to wait for method to finish. Example: client.METHOD().join()
"""
return self.client.call_async('moveByAngleRatesThrottle', roll_rate, -pitch_rate, -yaw_rate, throttle, duration, vehicle_name)
def setAngleRateControllerGains(self, angle_rate_gains=AngleRateControllerGains(), vehicle_name = ''):
"""
- Modifying these gains will have an affect on *ALL* move*() APIs.
This is because any velocity setpoint is converted to an angle level setpoint which is tracked with an angle level controllers.
That angle level setpoint is itself tracked with and angle rate controller.
- This function should only be called if the default angle rate control PID gains need to be modified.
Args:
angle_rate_gains (AngleRateControllerGains):
- Correspond to the roll, pitch, yaw axes, defined in the body frame.
- Pass AngleRateControllerGains() to reset gains to default recommended values.
vehicle_name (str, optional): Name of the multirotor to send this command to
"""
self.client.call('setAngleRateControllerGains', *(angle_rate_gains.to_lists()+(vehicle_name,)))
def setAngleLevelControllerGains(self, angle_level_gains=AngleLevelControllerGains(), vehicle_name = ''):
"""
- Sets angle level controller gains (used by any API setting angle references - for ex: moveByRollPitchYawZAsync(), moveByRollPitchYawThrottleAsync(), etc)
- Modifying these gains will also affect the behaviour of moveByVelocityAsync() API.
This is because the AirSim flight controller will track velocity setpoints by converting them to angle set points.
- This function should only be called if the default angle level control PID gains need to be modified.
- Passing AngleLevelControllerGains() sets gains to default airsim values.
Args:
angle_level_gains (AngleLevelControllerGains):
- Correspond to the roll, pitch, yaw axes, defined in the body frame.
- Pass AngleLevelControllerGains() to reset gains to default recommended values.
vehicle_name (str, optional): Name of the multirotor to send this command to
"""
self.client.call('setAngleLevelControllerGains', *(angle_level_gains.to_lists()+(vehicle_name,)))
def setVelocityControllerGains(self, velocity_gains=VelocityControllerGains(), vehicle_name = ''):
"""
- Sets velocity controller gains for moveByVelocityAsync().
- This function should only be called if the default velocity control PID gains need to be modified.
- Passing VelocityControllerGains() sets gains to default airsim values.
Args:
velocity_gains (VelocityControllerGains):
- Correspond to the world X, Y, Z axes.
- Pass VelocityControllerGains() to reset gains to default recommended values.
- Modifying velocity controller gains will have an affect on the behaviour of moveOnSplineAsync() and moveOnSplineVelConstraintsAsync(), as they both use velocity control to track the trajectory.
vehicle_name (str, optional): Name of the multirotor to send this command to
"""
self.client.call('setVelocityControllerGains', *(velocity_gains.to_lists()+(vehicle_name,)))
def setPositionControllerGains(self, position_gains=PositionControllerGains(), vehicle_name = ''):
"""
Sets position controller gains for moveByPositionAsync.
This function should only be called if the default position control PID gains need to be modified.
Args:
position_gains (PositionControllerGains):
- Correspond to the X, Y, Z axes.
- Pass PositionControllerGains() to reset gains to default recommended values.
vehicle_name (str, optional): Name of the multirotor to send this command to
"""
self.client.call('setPositionControllerGains', *(position_gains.to_lists()+(vehicle_name,)))
#query vehicle state
def getMultirotorState(self, vehicle_name = ''):
"""
The position inside the returned MultirotorState is in the frame of the vehicle's starting point
Args:
vehicle_name (str, optional): Vehicle to get the state of
Returns:
MultirotorState:
"""
return MultirotorState.from_msgpack(self.client.call('getMultirotorState', vehicle_name))
getMultirotorState.__annotations__ = {'return': MultirotorState}
#query rotor states
def getRotorStates(self, vehicle_name = ''):
"""
Used to obtain the current state of all a multirotor's rotors. The state includes the speeds,
thrusts and torques for all rotors.
Args:
vehicle_name (str, optional): Vehicle to get the rotor state of
Returns:
RotorStates: Containing a timestamp and the speed, thrust and torque of all rotors.
"""
return RotorStates.from_msgpack(self.client.call('getRotorStates', vehicle_name))
getRotorStates.__annotations__ = {'return': RotorStates}
#----------------------------------- Car APIs ---------------------------------------------
class CarClient(VehicleClient, object):
def __init__(self, ip = "", port = 41451, timeout_value = 3600):
super(CarClient, self).__init__(ip, port, timeout_value)
def setCarControls(self, controls, vehicle_name = ''):
"""
Control the car using throttle, steering, brake, etc.
Args:
controls (CarControls): Struct containing control values
vehicle_name (str, optional): Name of vehicle to be controlled
"""
self.client.call('setCarControls', controls, vehicle_name)
def getCarState(self, vehicle_name = ''):
"""
The position inside the returned CarState is in the frame of the vehicle's starting point
Args:
vehicle_name (str, optional): Name of vehicle
Returns:
CarState:
"""
state_raw = self.client.call('getCarState', vehicle_name)
return CarState.from_msgpack(state_raw)
def getCarControls(self, vehicle_name=''):
"""
Args:
vehicle_name (str, optional): Name of vehicle
Returns:
CarControls:
"""
controls_raw = self.client.call('getCarControls', vehicle_name)
return CarControls.from_msgpack(controls_raw) | 76,649 | Python | 46.285626 | 211 | 0.64434 |
superboySB/SBDrone_deprecated/src/HITL/toturials/deprecated/flightmare/README.md | # SBDrone (Flightmare)
use sim-to-real RL to achieve a perception-aware velocity controller. This is note for runing codes in x86_64 machines
# Configure the enironment
## Install dependencies
```sh
sudo apt-get update && sudo apt-get install -y --no-install-recommends build-essential cmake libzmqpp-dev libopencv-dev libgoogle-glog-dev protobuf-compiler ros-$ROS_DISTRO-octomap-msgs ros-$ROS_DISTRO-octomap-ros ros-$ROS_DISTRO-joy python3-vcstool python-catkin-tools git python3-pip lsb-core vim gedit locate wget desktop-file-utils python3-empy gcc g++ cmake git gnuplot doxygen graphviz software-properties-common apt-transport-https curl libqglviewer-dev-qt5 libzmqpp-dev libeigen3-dev libglfw3-dev libglm-dev libvulkan1 vulkan-utils gdb libsdl-image1.2-dev libsdl-dev ros-melodic-octomap-mapping libomp-dev libompl-dev ompl-demos && curl -sSL https://packages.microsoft.com/keys/microsoft.asc | sudo apt-key add - && sudo add-apt-repository "deb [arch=amd64] https://packages.microsoft.com/repos/vscode stable main" && sudo apt update && sudo apt install code -y && sudo pip3 install catkin-tools numpy -i https://pypi.tuna.tsinghua.edu.cn/simple
```
## Install Open3D
```sh
tar -C ~/ -zxvf ~/dependencies/Open3D.tgz && cd ~/Open3D/ && util/scripts/install-deps-ubuntu.sh assume-yes && mkdir build && cd build && cmake -DBUILD_SHARED_LIBS=ON .. && make -j16 && sudo make install
```
## Install cv_bridge
```sh
mkdir -p ~/cv_bridge_ws/src && tar -C ~/cv_bridge_ws/src/ -zxvf ~/dependencies/vision_opencv.tgz && apt-cache show ros-melodic-cv-bridge | grep Version && cd ~/cv_bridge_ws/ && catkin config --install && catkin config -DPYTHON_EXECUTABLE=/usr/bin/python3 -DPYTHON_INCLUDE_DIR=/usr/include/python3.6m -DPYTHON_LIBRARY=/usr/lib/x86_64-linux-gnu/libpython3.6m.so && catkin build && source install/setup.bash --extend
```
---------
## Install Python Package:
```sh
sudo pip3 install --upgrade pip && pip3 install tensorflow-gpu==1.14 markupsafe scikit-build -i https://pypi.tuna.tsinghua.edu.cn/simple && cd ~/flightmare_ws/src/flightmare/flightlib && pip3 install -e . -i https://pypi.tuna.tsinghua.edu.cn/simple
```
## Compile our project
**Every time when you change the code in other machines**, you can delete the project and then restart by:
```sh
cd ~ && git clone https://github.com/superboySB/flightmare_ws.git
```
```sh
echo "export FLIGHTMARE_PATH=~/flightmare_ws/src/flightmare" >> ~/.bashrc && source ~/.bashrc
```
Download the Flightmare Unity Binary **RPG_Flightmare.tar.xz** for rendering from the [Releases](https://github.com/uzh-rpg/flightmare/releases) and extract it into the /home/qiyuan/flightmare_ws/src/flightmare/flightrender/
```sh
cd ~/flightmare_ws/ && catkin init && catkin config --extend /opt/ros/melodic && catkin config --merge-devel && catkin config --cmake-args -DCMAKE_BUILD_TYPE=Release -DCMAKE_CXX_FLAGS=-fdiagnostics-color && catkin config -DPYTHON_EXECUTABLE=/usr/bin/python3 -DPYTHON_INCLUDE_DIR=/usr/include/python3.6m -DPYTHON_LIBRARY=/usr/lib/x86_64-linux-gnu/libpython3.6m.so && catkin build
```
## Install Python Package: flightlib + flightrl
flightlib
```sh
sudo pip3 install --upgrade pip && pip3 install tensorflow-gpu==1.14 markupsafe scikit-build -i https://pypi.tuna.tsinghua.edu.cn/simple && cd ~/flightmare_ws/src/flightmare/flightlib && pip3 install -e . -i https://pypi.tuna.tsinghua.edu.cn/simple
```
flightrl (main)
```sh
cd ~/flightmare_ws/src/flightmare/flightrl && pip3 install -e . -i https://pypi.tuna.tsinghua.edu.cn/simple
```
# Basic Usage with ROS
## Launch Flightmare (use gazebo-based dynamics)
In this example, we show how to use the [RotorS](https://github.com/ethz-asl/rotors_simulator) for the quadrotor dynamics modelling, [rpg_quadrotor_control](https://github.com/uzh-rpg/rpg_quadrotor_control) for model-based controller, and Flightmare for image rendering.
```sh
cd ~/flightmare_ws && source ./devel/setup.bash && roslaunch flightros rotors_gazebo.launch
```
We hope this example can serve as a starting point for many other applications. For example, Flightmare can be used with other multirotor models that comes with RotorS such as AscTec Hummingbird, the AscTec Pelican, or the AscTec Firefly. The default controller in [rpg_quadrotor_control](https://github.com/uzh-rpg/rpg_quadrotor_control) is a PID controller. Users have the option to use more advanced controller in this framework, such as [Perception-Aware Model Predictive Control](https://github.com/uzh-rpg/rpg_mpc).
# Basic Usage with Python
## Train neural network controller using PPO
```sh
cd ~/flightmare_ws/examples && python3 run_drone_control.py --train 1
```
## Test a pre-trained neural network controller
```sh
cd ~/flightmare_ws/examples && python3 run_drone_control.py --train 0
```
## With Unity Rendering
To enable unity for visualization, double click the extracted executable file RPG_Flightmare.x84-64
```sh
~/flightmare_ws/src/flightmare/flightrender/RPG_Flightmare.x86_64
```
and then test a pre-trained controller
```sh
cd ~/flightmare_ws/examples && python3 run_drone_control.py --train 0 --render 1
```
| 5,099 | Markdown | 59.714285 | 954 | 0.751324 |
superboySB/SBDrone_deprecated/src/HITL/toturials/deprecated/use_mavros/README.md | # Toturials of mavros and px4
如何在airsim上面用MAVROS给PX4无人机发送话题控制
## 从Source安装mavros
源码编译方式同单无人机教程,需要先在“编译用容器”里编译,然后再启动“运行用容器”如下
```sh
docker run -itd --privileged --env=LOCAL_USER_ID="$(id -u)" --env=PX4_SIM_HOST_ADDR=172.16.13.104 -v /home/wangchao/daizipeng/SBDrone:/src:rw -v /tmp/.X11-unix:/tmp/.X11-unix:ro -e DISPLAY=:0 --network=host --name=mypx4-0 mypx4_image:v1 /bin/bash
```
其中,`–-env=PX4_SIM_HOST_ADDR=172.16.13.104` 容器添加`PX4_SIM_HOST_ADDR`环境变量,指定远端airsim主机地址;`–-name`后面指定此容器名称。
## 逐步开启mavros服务
在windows设备中,先检查AirSim中setting.json,启动AirSim的某一个map,进入等待服务状态。然后,登录容器
```sh
docker exec -it --user $(id -u) mypx4-0 /bin/bash
```
打开一个窗口,运行2个PX4实例,需要观察到Airsim中有QGC(GPS lock)相关的提示才算成功:
```sh
bash /src/Scripts/run_airsim_sitl.sh 0
bash /src/Scripts/run_airsim_sitl.sh 1
```
注意每次使用ros相关命令时需要输入
```sh
source /opt/ros/melodic/setup.bash
```
打开一个窗口,运行mavros服务,其中第一个端口指定本地主机(127.0.0.1)上的接收端口号(udp_onboard_payload_port_remote),第二个端口指定飞行控制器上的发送端口号(udp_onboard_payload_port_local)。这些可以在上一个窗口的运行日志中,在mavlink的onboard udp port对应上。
```sh
roslaunch mavros px4.launch fcu_url:=udp://:[email protected]:14280
roslaunch mavros px4.launch fcu_url:=udp://:[email protected]:14281
```
## 使用mavros话题通信在Airsim里手动控制PX4无人机(有点受限于版本V1.12.1)
参考[教程](https://www.youtube.com/watch?v=ZonkdMcwXH4),打开一个窗口,基于mavros发送服务调用指令给px4,实现对无人机的控制,这里给出依次玩耍这些指令的结果:
```sh
# 发起起飞指令,此时不能起飞
rosservice call /mavros/cmd/takeoff "{min_pitch: 0.0, yaw: 0.0, latitude: 0.0, longitude: 0.0, altitude: 0.0}"
# 解锁无人机,此时可以起飞
rosservice call /mavros/cmd/arming "value: true"
# 无人机起飞
rosservice call /mavros/cmd/arming "value: true"
# 无人机降落
rosservice call /mavros/cmd/land "{min_pitch: 0.0, yaw: 0.0, latitude: 0.0, longitude: 0.0, altitude: 0.0}"
```
也可以基于mavros发送话题给px4,以下是开一个窗口跑position controller:
```sh
# 发送position controller的话题指令
rostopic pub /mavros/setpoint_position/local geometry_msgs/PoseStamped "header:
seq: 0
stamp:
secs: 0
nsecs: 0
frame_id: ''
pose:
position:
x: 1.0
y: 0.0
z: 2.0
orientation:
x: 0.0
y: 0.0
z: 0.0
w: 0.0" -r 20
```
然后再换个窗口设置飞行模式
```sh
# 该服务的目的是让飞行控制器(例如PX4)切换到特定的飞行模式,这里使用的是'OFFBOARD'模式,该模式允许飞行控制器接受来自外部计算机的指令控制飞行。
rosservice call /mavros/set_mode "base mode: 0
custom_mode: 'OFFBOARD'"
# 解锁无人机,执行指令
rosservice call /mavros/cmd/arming "value: true"
# 可以继续发送其它position controller的话题指令
```
以下是velocity controller的画圈demo:
```sh
rostopic pub /mavros/setpoint_velocity/cmd_vel geometry_msgs/TwistStamped "header
seq: 0
stamp:
secs: 0
nsecs: 0
frame_id: ''
twist:
linear:
x: 1.0
y: 0.0
z: 0.0
angular:
x: 0.0
y: 0.0
z: 1.0" -r 20
```
| 2,690 | Markdown | 26.742268 | 247 | 0.693309 |
superboySB/SBDrone_deprecated/src/HITL/toturials/2_rl_single_px4_drone/README.md | # Notes for re-implementing paper "PRL4AirSim"
尝试带着PX4做强化学习,从一个无人机开始
## Requirements
集成必要的环境
```sh
docker build -t mypx4_image:v1 .
docker run -itd --privileged -v /tmp/.X11-unix:/tmp/.X11-unix:ro -e DISPLAY=$DISPLAY --gpus all --user=user --env=PX4_SIM_HOST_ADDR=172.16.13.104 --network=host --name=mypx4-dev mypx4_image:v1 /bin/bash
docker exec -it --user=user mypx4-dev /bin/bash
git clone https://github.com/superboySB/SBDrone && cd cd SBDrone && pip install -r requirements.txt
```
```sh
bash /home/user/PX4-Autopilot/Tools/simulation/sitl_multiple_run.sh 1
```
/home/user/PX4-Autopilot/build/px4_sitl_default/bin/px4 -i 0 -d /home/user/PX4-Autopilot/build/px4_sitl_default/etc >out.log 2>err.log &
## TroubleShooting
### 1. 可以换一台网络好的机器解决docker拉不下来的问题。
```sh
docker save > <image-name>.tar <repository>:<tag>
docker load < <image-name>.tar
```
### 2. 修改AirSim屏幕分辨率
https://blog.csdn.net/qq_33727884/article/details/89487292
### 3. 建飞老师打的命令
```sh
mavlink status
listener manual_control_setpoint -r 10
listener input_rc
``` | 1,038 | Markdown | 24.974999 | 202 | 0.725434 |
superboySB/SBDrone_deprecated/src/HITL/toturials/3_rl_multiple_px4_drones/README.md | # Notes for re-implementing paper "PRL4AirSim"
复现论文PRL4AirSim.
## Requirements
这个原论文自带的binary编译自某个windows editor项目,但开源只提供了linux版本,所以应该整个项目暂时都是将一台linux的机器作为host machine
## Install
```sh
docker build -t mypx4_image:v1 .
docker run -itd --privileged -v /tmp/.X11-unix:/tmp/.X11-unix:ro -e DISPLAY=$DISPLAY --gpus all --user=user --env=PX4_SIM_HOST_ADDR=172.16.13.104 --network=host --name=mypx4-dev mypx4_image:v1 /bin/bash
docker exec -it --user=user mypx4-dev /bin/bash
bash PX4-Autopilot/Tools/simulation/sitl_multiple_run.sh 2
cd PRL4AirSim && pip install -r requirements.txt
```
## TroubleShooting
### 1. 可以换一台网络好的机器解决docker拉不下来的问题。
```sh
docker save > <image-name>.tar <repository>:<tag>
docker load < <image-name>.tar
```
### 2. 如果使用原版AirSim,遇到UE4.27跑不了Blocks实例的问题
| 782 | Markdown | 22.727272 | 202 | 0.742967 |
superboySB/SBDrone_deprecated/src/HITL/sbrl/PyClient.py | import Utils as Utils
import DQNTrainer as DQNTrainer
import datetime
import time
import Simulation as Simulation
import argparse
if __name__ == "__main__":
"""
Model Server port 29000
UE Server port 29001
"""
parser = argparse.ArgumentParser(description="PyClient",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("UE_Port")
parser.add_argument("UE_Address")
parser.add_argument("storage_port")
args = parser.parse_args()
arguments = vars(args)
trainer_ip_address = '127.0.0.1' #os.environ['BUFFER_SERVER_IP']
#trainer_port = int(29000) #int(os.environ['BUFFER_SERVER_PORT'])
storage_port = int(arguments["storage_port"])
ue_ip_address = arguments["UE_Address"] #os.environ['UE_SERVER_IP']
#ue_ip_address = str(arguments["IP_Address"])
ue_port = int(arguments["UE_Port"]) #int(os.environ['UE_SERVER_PORT'])
client, model_server = Utils.connectClient(trainer_ip_address=trainer_ip_address, ue_ip_address=ue_ip_address, trainer_port=storage_port, ue_port=ue_port)
times = []
## Setup Environment
image_shape = (2, 32, 32)
now = datetime.datetime.now()
current_time = now.strftime("%H:%M:%S")
print("start time: ", current_time)
agent = DQNTrainer.DQNTrainer(image_input_dims=Utils.getConfig()['state_space'],
n_actions=Utils.getConfig()['action_space'],
replayMemory_size=Utils.getConfig()['buffer_Size'],
batch_size=Utils.getConfig()['batch_size'],
learningRate=Utils.getConfig()['learning_rate'],
discount_factor=Utils.getConfig()['discount_factor'],
epsilon=1.0,
replace_target_count_episode=Utils.getConfig()['replace_target_count_episode'])
#print("loaded best model")
#agent.load('{}/BestModelSaves/dqn.pth'.format(pathlib.Path().resolve()))
run_name = now.strftime("%Y_%m_%d_%Hh%Mm%Ss")
simulation = Simulation.Sim(image_shape=Utils.getConfig()['state_space'], num_drones=Utils.getConfig()['num_drones'])
train = Utils.getConfig()['from_artifact'] == ''
start = (time.perf_counter() / 3600)
Utils.getModelServer().call("startSimulation")
while simulation.episodes < Utils.getConfig()['max_episodes']:
finished = simulation.tick(agent)
end = datetime.datetime.now()
current_time = end.strftime("%H:%M:%S")
print("End time: ", current_time)
| 2,618 | Python | 38.681818 | 158 | 0.612299 |
superboySB/SBDrone_deprecated/src/HITL/sbrl/Storage.py | import msgpackrpc #install as admin: pip install msgpack-rpc-python
#import distributed.model.DQNTrainer as DQNTrainer
#https://linuxtut.com/en/70b626ca3ac6fbcdf939/
import numpy as np
import torch
import pathlib
import wandb
import DQNTrainer as DQNTrainer
import datetime
import time
import Utils as Utils
from collections import deque
import ReplayMemory as ReplayMemory
class Storage(object):
def __init__(self):
self.run_name = datetime.datetime.now().strftime("%Y_%m_%d_%Hh%Mm%Ss")
self.run = wandb.init(
project="drone",
config=Utils.getConfig(),
name=self.run_name,
)
self.total_episodes = 0
self.start_time = None
self.agent = DQNTrainer.DQNTrainer(image_input_dims=Utils.getConfig()['state_space'],
n_actions=Utils.getConfig()['action_space'],
replayMemory_size=Utils.getConfig()['buffer_Size'],
batch_size=Utils.getConfig()['batch_size'],
learningRate=Utils.getConfig()['learning_rate'],
discount_factor=Utils.getConfig()['discount_factor'],
epsilon=1.0,
replace_target_count_episode=Utils.getConfig()['replace_target_count_episode'])
self.start_time = time.perf_counter()
def pushMemory(self, state, action, next_state, reward, not_done):
self.agent.memory.push(Utils.convertStateDicToNumpyDic(state), action, Utils.convertStateDicToNumpyDic(next_state), reward, not_done)
if (len(self.agent.memory) % 100 == 0):
wandb.log({"metric/Observations" : self.agent.memory.pushCounter},
step=self.total_episodes)
if not len(self.agent.memory) == self.agent.memory.maxSize:
print(len(self.agent.memory))
def getMemoryPushCounter(self):
return self.agent.memory.pushCounter
def startSimulation(self):
self.start_time = (time.perf_counter() / 3600)
wandb.log({"metric/HoursRun" : 0,
"metric/Observations" : self.agent.memory.pushCounter},
step=self.total_episodes)
print("============ START SIMULATION ===========")
def getEpsilon(self):
return self.agent.epsilon
def finishEpisode(self, finalDistance, totalReward):
self.total_episodes += 1
self.agent.decrement_epsilon()
wandb.log({
"metric/Distance From Goal": finalDistance,
"metric/Total Reward" : totalReward,
"metric/Wall-Time /h" : (time.perf_counter()-self.start_time) / 3600.0,
"metric/Epsilon" : self.agent.epsilon
}, step=self.total_episodes)
if self.total_episodes % 1000 == 0 and self.total_episodes != 0:
print("saving model parameters in wandb")
artifact = wandb.Artifact('dqn_3D_{}_EP_{}'.format(self.run_name, self.total_episodes), type='model', description='Episode {}'.format(self.total_episodes))
artifact.add_file('{}/ModelSaves/dqn.pth'.format(pathlib.Path().resolve()))
self.run.log_artifact(artifact)
def setNetworkTrainIteration(self, trainIteration):
wandb.log({
"metric/Train Iteration": trainIteration
}, step=self.total_episodes)
def sampleFromStorage(self):
if len(self.agent.memory) >= self.agent.replayMemory_size or len(self.agent.memory) >= self.agent.batch_size:
sample = self.agent.memory.sample(self.agent.batch_size)
batch = ReplayMemory.Transition(*zip(*sample))
state = [Utils.convertStateDicToListDic(i) for i in batch.state]
action = [int(i) for i in batch.action]
next_state = [Utils.convertStateDicToListDic(i) for i in batch.next_state]
reward = [float(i) for i in batch.reward]
not_done = [int(i) for i in batch.not_done]
return state, \
action, \
next_state, \
reward, \
not_done
else:
return None, None, None, None, None
def confirmConnection(self):
return 'Storage Server Connected!'
def testSampleFromStorage():
storage_server = Storage()
for i in range(50):
storage_server.agent.memory.push({'image': np.zeros(shape=(32, 32)),
'position': np.zeros(shape=(3,))},
1,
{'image': np.zeros(shape=(32, 32)),
'position': np.zeros(shape=(3,))},
0.1,
1)
storage_server.sampleFromStorage()
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Storage",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("storage_port")
args = parser.parse_args()
arguments = vars(args)
storage_server = Storage()
server = msgpackrpc.Server(storage_server)
server.listen(msgpackrpc.Address("127.0.0.1", int(arguments["storage_port"])))
print("========== STARTING STORAGE SERVER ============")
server.start()
print("========== FINISH STORAGE SERVER ============")
storage_server.run.finish()
| 5,554 | Python | 41.083333 | 167 | 0.567699 |
superboySB/SBDrone_deprecated/src/HITL/sbrl/DQNTrainer.py | import numpy as np
import torch
#import distributed.distributed_22_06_02.ReplayMemory as ReplayMemory
#import distributed.distributed_22_06_02.model.DQNetwork as DQNetwork
import ReplayMemory as ReplayMemory
import DQNetwork as DQNetwork
class DQNTrainer(object):
def __init__(self, image_input_dims : np.array,
n_actions : int,
replayMemory_size : int,
batch_size : int,
learningRate : float = 0.01,
discount_factor : float = 0.99,
epsilon : float = 1.0,
replace_target_count_episode : int = 100,
save_model_count_episode : int = 250,
checkpoint_episode : int = 250,
checkpoint_file : str = 'model_saves/dqn',
number_dimensions : int = 2):
self.image_input_dims = image_input_dims
self.n_actions = n_actions
self.discount_factor = discount_factor
self.epsilon = epsilon
self.replace_target_count_episode = replace_target_count_episode
self.save_model_count_episode = save_model_count_episode
self.network = DQNetwork.DQNetwork(learningRate, self.n_actions, image_input_dims)
self.target_network = DQNetwork.DQNetwork(learningRate, self.n_actions, image_input_dims)
self.batch_size = batch_size
self.memory = ReplayMemory.ReplayMemory(replayMemory_size)
self.replayMemory_size = replayMemory_size
self.checkpoint_episode = checkpoint_episode
self.checkpoint_file = checkpoint_file
def load(self, state_dict):
self.network.load_state_dict(state_dict=torch.load(state_dict))
self.target_network.load_state_dict(state_dict=torch.load(state_dict))
print("Loaded from state dictionary")
# Epsilon Greedy action selection.
def choose_action(self, observation : dict):
maxValue = None
# Expecting (Batch, Channels, Height, Width)
image = torch.tensor(np.reshape(np.array(observation['image']), (1, *self.image_input_dims)), dtype=torch.float).to(self.network.device)
velocity = torch.tensor(np.array(observation['velocity']).reshape((1, 3)), dtype=torch.float).to(self.network.device)
actions = self.network.forward(image, velocity)
if np.random.random() > self.epsilon:
action = torch.argmax(actions).item()
else:
action = np.random.choice([i for i in range(self.n_actions)])
#action = torch.argmax(actions).item()
maxValue = torch.max(actions).item()
#self.decrement_epsilon()
return action, maxValue
def learn(self, transitions):
self.network.optimizer.zero_grad()
self.memory.pushCounter += 1
if self.memory.pushCounter % self.replace_target_count_episode == 0:
print("Transfer weights to target network at step {}".format(self.memory.pushCounter))
self.target_network.load_state_dict(self.network.state_dict())
batch = ReplayMemory.Transition(*zip(*transitions))
state = (torch.tensor(np.array([i[b'image'].reshape(*self.image_input_dims) for i in batch.state])).to(self.network.device).float(),
torch.tensor(np.array([i[b'velocity'] for i in batch.state])).to(self.network.device).float())
next_state = (torch.tensor(np.array([i[b'image'].reshape(*self.image_input_dims) for i in batch.next_state])).to(self.network.device).float(),
torch.tensor(np.array([i[b'velocity'] for i in batch.next_state])).to(self.network.device).float())
actions = torch.tensor(batch.action).to(self.network.device)
rewards = torch.tensor(batch.reward).to(self.network.device)
not_done = torch.tensor(batch.not_done).to(self.network.device)
indices = np.arange(self.batch_size)
# https://en.wikipedia.org/wiki/Q-learning
# Old quality value
Q_old = self.network.forward(*state)[indices, actions]
Q_target = rewards + self.target_network.forward(*next_state).max(dim=1)[0] * self.discount_factor * not_done
loss = self.network.loss(Q_old.double(), Q_target.double()).to(self.network.device)
loss.backward()
self.network.optimizer.step()
def decrement_epsilon(self):
#if self.memory.pushCounter < self.replayMemory_size and self.memory.pushCounter > self.replayMemory_size * 0.2 * 0.99:
if self.memory.pushCounter > self.replayMemory_size:
self.epsilon = max(0, 1. - ((self.memory.pushCounter - self.replayMemory_size) / self.replayMemory_size)) | 4,616 | Python | 47.599999 | 150 | 0.647964 |
superboySB/SBDrone_deprecated/src/HITL/sbrl/Start.py | import os
import json
import os
import pathlib
import time
import Utils
import subprocess
import atexit
homeDir = str(pathlib.Path.home())
projectName = Utils.getConfig()['projectName']
envProcesses = int(Utils.getConfig()['envProcesses'])
storage_port = int(Utils.getConfig()['storage_port'])
headless = bool(Utils.getConfig()['headless'])
def changeUEIPJson(port):
with open(str(pathlib.Path.home()) + "/Documents/AirSim/settings.json", "r") as jsonFile:
data = json.load(jsonFile)
data["ApiServerPort"] = port
with open(str(pathlib.Path.home()) + "/Documents/AirSim/settings.json", "w") as jsonFile:
json.dump(data, jsonFile, indent=4)
# os.system('gnome-terminal -- python Storage.py {}'.format(storage_port))
# time.sleep(5)
# os.system('gnome-terminal -- python Trainer.py {}'.format(storage_port))
storage_procress = subprocess.Popen(['python3','Storage.py',f"{storage_port}"],shell=False, bufsize=0)
atexit.register(storage_procress.terminate)
time.sleep(5)
trainer_procress = subprocess.Popen(['python3','Trainer.py',f"{storage_port}"],shell=False,bufsize=0)
atexit.register(trainer_procress.terminate)
for i in range(envProcesses):
port = storage_port + i + 1
changeUEIPJson(port)
if headless:
# os.system('gnome-terminal -- ./UEBinary/{projectName}.sh -RenderOffscreen -windowed -NoVSync'.format(projectName=projectName))
ue_procress = subprocess.Popen([f'./UEBinary/{projectName}.sh','-RenderOffscreen','-windowed','-NoVSync'],shell=False,bufsize=0)
atexit.register(ue_procress.terminate)
else:
windowX = 1000 * i
windowY = 1000
# os.system('gnome-terminal -- ./UEBinary/{projectName}.sh -windowed -WinX={WinX} -WinY={WinY} -NoVSync'.format(
# projectName=projectName,
# WinX=windowX,
# WinY=windowY))
ue_procress = subprocess.Popen([f'./UEBinary/{projectName}.sh','--windowed',f'-WinX={windowX}',f'-WinY={windowY}','-NoVSync'],shell=False,bufsize=0)
atexit.register(ue_procress.terminate)
time.sleep(4)
time.sleep(5)
for i in range(envProcesses):
UE_port = storage_port + i + 1
UE_Address = "127.0.0.1"
# os.system('gnome-terminal -- python PyClient.py {UE_port} {UE_Address} {storage_port}'.format(UE_port=UE_port, UE_Address="127.0.0.1", storage_port=storage_port))
agent_procress = subprocess.Popen(['python3','PyClient.py',f'{UE_port}',f'{UE_Address}',f'{storage_port}'],shell=False, bufsize=0)
atexit.register(agent_procress.terminate) | 2,533 | Python | 39.222222 | 168 | 0.686932 |
superboySB/SBDrone_deprecated/src/HITL/sbrl/Utils.py | import airsim
import numpy as np
import cv2 as cv
import msgpackrpc
import json
config = json.load(open("config.json", "r"))
print(config)
client = None
model_server = None
def connectClient(trainer_ip_address, ue_ip_address, trainer_port = 29000, ue_port = 41451):
global client, model_server
try:
client = airsim.MultirotorClient(ip=ue_ip_address, port=ue_port)
client.confirmConnection()
except Exception as e:
print("Cannot Connect to Multirotor Client, please ensure Unreal Engine is running with AirSim plugin")
print("Ip address = {} and port {}".format(ue_ip_address, ue_port))
print(e)
exit(1)
try:
model_server = msgpackrpc.Client(msgpackrpc.Address(trainer_ip_address, trainer_port))
print(model_server.call("confirmConnection"))
except Exception as e:
print("Cannot connect to the model server, please ")
print("Ip address = {} and port {}".format(trainer_ip_address, trainer_port))
print(e)
exit(1)
return client, model_server
def getClient() -> airsim.MultirotorClient:
return client
def getModelServer() -> msgpackrpc.Client:
return model_server
def getConfig():
return config
def convertStateDicToListDic(state):
listState = {}
for key in state:
listState[key] = state[key].tolist()
#print(listState)
return listState
def convertStateDicToNumpyDic(state):
listState = {}
for key in state:
listState[key] = np.array(state[key])
#print(listState)
return listState
# API call in AirSim can sometimes be broken depending on version, easier to call using RPC directly
def fixed_simGetImages(requests, vehicle_name = '', external : bool = False):
responses_raw = getClient().client.call('simGetImages', requests, vehicle_name, external)
return [airsim.ImageResponse.from_msgpack(response_raw) for response_raw in responses_raw]
def handleImage(droneName : str, cameraName : str, imageType : airsim.ImageType) -> np.array:
if (imageType == airsim.ImageType.Scene):
imageRequests = [airsim.ImageRequest(cameraName, imageType, False, False)]
imageResponses = fixed_simGetImages(imageRequests, droneName, False)
image1d = np.fromstring(imageResponses[0].image_data_uint8, dtype=np.uint8)
imageRGB = image1d.reshape((imageResponses[0].height, imageResponses[0].width, 3))
return imageRGB
elif (imageType == airsim.ImageType.DepthPlanar or imageType == airsim.ImageType.DepthVis or imageType == airsim.ImageType.DepthPerspective):
imageResponses = fixed_simGetImages([airsim.ImageRequest(cameraName, airsim.ImageType.DepthPlanar, True, True)], droneName, False)
imageDepth = airsim.list_to_2d_float_array(imageResponses[0].image_data_float, imageResponses[0].width, imageResponses[0].height)
return imageDepth
else:
print("NOT CODED THE HANDLING OF THIS IMAGE TYPE YET")
return np.array([])
def showRGBImage(droneName : str):
image = handleImage(droneName, 'scene_cam', airsim.ImageType.Scene)
cv.imshow("RGB image", image)
cv.waitKey(0)
def showDepthImage(droneName : str):
imageResponses = fixed_simGetImages([airsim.ImageRequest('depth_cam', airsim.ImageType.DepthPlanar, True, True)], droneName, False)
imageDepth = airsim.list_to_2d_float_array(imageResponses[0].image_data_float, imageResponses[0].width,
imageResponses[0].height)
cv.imshow("depth image", imageDepth)
cv.waitKey(0)
def convert_pos_UE_to_AS(origin_UE : np.array, pos_UE : np.array):
pos = np.zeros(3, dtype=np.float)
pos[0] = pos_UE[0] - origin_UE[0]
pos[1] = pos_UE[1] - origin_UE[1]
pos[2] = - pos_UE[2] + origin_UE[2]
return pos / 100
| 3,819 | Python | 39.210526 | 145 | 0.686305 |
superboySB/SBDrone_deprecated/src/HITL/sbrl/DQNetwork.py | import torch.nn as nn
import torch.optim as optim
import torch
import torch.nn.functional as functional
import numpy as np
class DQNetwork(nn.Module):
def __init__(self, learningRate: float, num_actions: int, image_input_dims: tuple):
super(DQNetwork, self).__init__()
self.learningRate = learningRate
self.num_actions = num_actions
self.image_input_dims = image_input_dims
self.maxpooling = nn.MaxPool2d((2, 2), stride=2)
self.image_conv1 = nn.Conv2d(image_input_dims[0], 16, kernel_size=(6, 6), stride=(2, 2))
self.image_conv2 = nn.Conv2d(16, 32, kernel_size=(3, 3), stride=(1, 1))
self.vel_fc1 = nn.Linear(3, 16)
conv_output_dim = self.calculate_conv_output_dims()
self.out_fc1 = nn.Linear(conv_output_dim + 16, 16)
self.out_fc2 = nn.Linear(16, num_actions)
self.optimizer = optim.RMSprop(self.parameters(), lr=learningRate)
self.loss = nn.MSELoss()
self.device = torch.device('cuda:0')
self.to(self.device)
def calculate_conv_output_dims(self):
state = torch.zeros(1, *self.image_input_dims).float()
print("inpute state :", state.size())
x = self.maxpooling(functional.relu(self.image_conv1(state)))
print("layer 1", x.size())
x = self.maxpooling(functional.relu(self.image_conv2(x)))
print("layer 2", x.size())
return int(np.prod(x.size()))
def forward(self, image : torch.tensor, velocity : torch.tensor):
image = self.maxpooling(functional.relu(self.image_conv1(image)))
image = self.maxpooling(functional.relu(self.image_conv2(image)))
image_flattened = image.view(image.size()[0], -1)
velocity = functional.relu(self.vel_fc1(velocity))
concatinated_tensor = torch.cat((image_flattened, velocity), 1)
x = functional.relu(self.out_fc1(concatinated_tensor))
x = self.out_fc2(x)
return x
def test(self):
print("Testing network")
image = torch.zeros(1, *self.image_input_dims).float().to(self.device)
velocity = torch.zeros((1, 3)).float().to(self.device)
print("Input shapes: [image]: {} [velocity]: {}".format(image.size(), velocity.size()))
output = self.forward(image, velocity)
print("Output: {}".format(output))
if __name__ == "__main__":
print("test")
model = DQNetwork(learningRate=0.001, num_actions=2, image_input_dims=(2, 64, 64))
print("total parameters: ", sum(p.numel() for p in model.parameters()))
print("total trainable parameters: ", sum(p.numel() for p in model.parameters() if p.requires_grad))
print("total data points: ", (10 * 32 * 5000) / 30)
model.test()
| 2,727 | Python | 38.536231 | 104 | 0.627429 |
superboySB/SBDrone_deprecated/src/HITL/sbrl/Trainer.py | import msgpackrpc #install as admin: pip install msgpack-rpc-python
#import distributed.model.DQNTrainer as DQNTrainer
#https://linuxtut.com/en/70b626ca3ac6fbcdf939/
import torch
import pathlib
import DQNTrainer as DQNTrainer
import datetime
import time
import Utils as Utils
from collections import deque
import ReplayMemory as ReplayMemory
import os
from os.path import exists
class Trainer(object):
def __init__(self):
self.total_episodes = 0
self.start_time = None
self.agent = DQNTrainer.DQNTrainer(image_input_dims=Utils.getConfig()['state_space'],
n_actions=Utils.getConfig()['action_space'],
replayMemory_size=Utils.getConfig()['buffer_Size'],
batch_size=Utils.getConfig()['batch_size'],
learningRate=Utils.getConfig()['learning_rate'],
discount_factor=Utils.getConfig()['discount_factor'],
epsilon=1.0,
replace_target_count_episode=Utils.getConfig()['replace_target_count_episode'])
def confirmConnection(self):
return 'Model Server Connected!'
def learn(self):
return
def saveModel(self):
return
def testSampleFromStorageTrainer():
import Storage
import numpy as np
storage_server = Storage.Storage()
for i in range(50):
storage_server.agent.memory.push({'image': np.zeros(shape=(2, 32, 32)),
'velocity': np.zeros(shape=(3,))},
1,
{'image': np.zeros(shape=(2, 32, 32)),
'velocity': np.zeros(shape=(3,))},
0.1,
1)
state, action, next_state, reward, not_done = storage_server.sampleFromStorage()
transitions = []
for i in range(len(state)):
transition = ReplayMemory.Transition(Utils.convertStateDicToNumpyDic(state[i]),
action[i],
Utils.convertStateDicToNumpyDic(next_state[i]),
reward[i],
not_done[i])
transitions.append(transition)
trainer = Trainer()
trainer.agent.learn(transitions)
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Storage",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("storage_port")
args = parser.parse_args()
arguments = vars(args)
run_tests = False
if run_tests:
testSampleFromStorageTrainer()
print("========== STARTING TRAINING CLIENT ============")
trainer = Trainer()
try:
model_server = msgpackrpc.Client(msgpackrpc.Address("127.0.0.1", int(arguments["storage_port"])))
print(model_server.call("confirmConnection"))
except Exception as e:
print("Cannot connect to the model server, please ")
print("Ip address = {} and port {}".format("127.0.0.1", int(arguments["storage_port"])))
print(e)
exit(1)
trainIteration = 0
previous_time = time.perf_counter()
while True:
state, action, next_state, reward, not_done = model_server.call("sampleFromStorage")
if state == None:
print("Waiting for transitions")
time.sleep(2)
else:
transitions = []
for i in range(len(state)):
transition = ReplayMemory.Transition(Utils.convertStateDicToNumpyDic(state[i]),
action[i],
Utils.convertStateDicToNumpyDic(next_state[i]),
reward[i],
not_done[i])
transitions.append(transition)
trainer.agent.learn(transitions)
trainIteration += 1
if trainIteration % 200 == 0:
model_server.call("setNetworkTrainIteration", trainIteration)
print("Saving model")
#torch.save(trainer.agent.network.state_dict(), '{}/ModelSaves/dqn.pth'.format(pathlib.Path().resolve()))
print("train iteration ", trainIteration, time.perf_counter() - previous_time)
if exists('{}/ModelSaves/dqn.pth'.format(pathlib.Path().resolve())):
os.rename('{}/ModelSaves/dqn.pth'.format(pathlib.Path().resolve()), '{}/ModelSaves/dqn_read.pth'.format(pathlib.Path().resolve()))
torch.save(trainer.agent.network.state_dict(), '{}/ModelSaves/dqn_read.pth'.format(pathlib.Path().resolve()))
os.rename('{}/ModelSaves/dqn_read.pth'.format(pathlib.Path().resolve()), '{}/ModelSaves/dqn.pth'.format(pathlib.Path().resolve()))
else:
torch.save(trainer.agent.network.state_dict(), '{}/ModelSaves/dqn.pth'.format(pathlib.Path().resolve()))
previous_time = time.perf_counter()
| 5,394 | Python | 43.221311 | 150 | 0.531516 |
superboySB/SBDrone_deprecated/src/HITL/sbrl/Start_UEEditor.py | import os
import json
import os
import pathlib
import time
import Utils
UEEditor_port = 29001
storage_port = 29000
def changeUEIPJson(port):
with open(str(pathlib.Path.home()) + "/Documents/AirSim/settings.json", "r") as jsonFile:
data = json.load(jsonFile)
data["ApiServerPort"] = port
with open(str(pathlib.Path.home()) + "/Documents/AirSim/settings.json", "w") as jsonFile:
json.dump(data, jsonFile, indent=4)
changeUEIPJson(UEEditor_port)
os.system('gnome-terminal -- python Storage.py {}'.format(storage_port))
time.sleep(10)
os.system('gnome-terminal -- python PyClient.py {UE_port} {UE_Address} {storage_port}'.format(UE_port=UEEditor_port, UE_Address="127.0.0.1", storage_port=storage_port))
os.system('gnome-terminal -- python Trainer.py {}'.format(storage_port)) | 807 | Python | 30.076922 | 168 | 0.714994 |
superboySB/SBDrone_deprecated/src/HITL/sbrl/Simulation.py | import Utils as Utils
import airsim
import numpy as np
import time
import DroneObj as DroneObj
import random
import argparse
from os.path import exists
import os
import pathlib
beforeTime = None
afterTime = None
class Sim(object):
def __init__(self, image_shape, num_drones):
self.image_shape = image_shape
self.origin_UE = np.array([0.0, 0.0, 910.0])
self.areans_train_long = np.array([
# Using larger environment
#[Utils.convert_pos_UE_to_AS(self.origin_UE, np.array([41156.0, 20459.0, 1000.0])), Utils.convert_pos_UE_to_AS(self.origin_UE, np.array([56206.0, 21019.0, 1000.0]))]
# Using smaller environment
[Utils.convert_pos_UE_to_AS(self.origin_UE, np.array([9030.0, -6760.0, 1000.0])), Utils.convert_pos_UE_to_AS(self.origin_UE, np.array([14060.0, -6760.0, 1000.0]))]
])
self.areans = self.areans_train_long
self.droneObjects = [DroneObj.DroneObject(i) for i in range(num_drones)]
self.episodes = 0
self.model_download_at_episode = 0
self.numImagesSent = 0
#TODO: HyperParameters
self.step_length = 0.25
self.constant_x_vel = 1.0
self.constant_z_pos = Utils.convert_pos_UE_to_AS(origin_UE=self.origin_UE, pos_UE=[8600.0, -4160.0, 1510.0])[2]
self.actionTime = 1.0
self.resetBatch()
def gatherAllObservations(self):
useNewMethod = True
nonResetingDrones = []
for droneObject in self.droneObjects:
if droneObject.reseting == False:
nonResetingDrones.append(droneObject)
if len(nonResetingDrones) == 0:
return
if useNewMethod:
requests = [airsim.ImageRequest('depth_cam_{}'.format(droneObject.droneId), airsim.ImageType.DepthPlanar, True, True) for droneObject in nonResetingDrones]
names = [droneObject.droneName for droneObject in nonResetingDrones]
beforeTime = time.perf_counter()
responses_raw = Utils.getClient().client.call('simGetBatchImages', requests, names)
afterTime = time.perf_counter()
print("Gather images: ", afterTime - beforeTime)
responses = [airsim.ImageResponse.from_msgpack(response_raw) for response_raw in responses_raw]
imageDepths = [airsim.list_to_2d_float_array(responses[i].image_data_float, responses[i].width, responses[i].height) for i in range(len(responses))]
else:
beforeTime = time.perf_counter()
responses_raw = [Utils.getClient().client.call('simGetImages',
[airsim.ImageRequest('depth_cam_{}'.format(droneObject.droneId), airsim.ImageType.DepthPlanar, True, True)],
'Drone{}'.format(droneObject.droneId),
False) for droneObject in nonResetingDrones]
afterTime = time.perf_counter()
print("Gather images (old method): ", afterTime - beforeTime)
responses = [airsim.ImageResponse.from_msgpack(response_raw[0]) for response_raw in responses_raw]
imageDepths = [airsim.list_to_2d_float_array(responses[i].image_data_float, responses[i].width, responses[i].height) for i in range(len(responses))]
for i, droneObject in enumerate(nonResetingDrones):
imageDepth = imageDepths[i]
if (imageDepth.size == 0):
print("Image size is 0")
imageDepth = np.ones(shape=(self.image_shape[1], self.image_shape[2])) * 30
maxDistance = 50
imageDepth[imageDepth > maxDistance] = maxDistance
imageDepth = imageDepth.astype(np.uint8)
if droneObject.currentStep == 0:
droneObject.previous_depth_image = imageDepth
stacked_images = np.array([imageDepth, droneObject.previous_depth_image])
multirotorState = Utils.getClient().getMultirotorState(droneObject.droneName)
velocity = multirotorState.kinematics_estimated.linear_velocity.to_numpy_array()
droneObject.previous_depth_image = imageDepth
droneObject.previousState = droneObject.currentState
droneObject.currentState = {'image': stacked_images, 'velocity': velocity}
droneObject.currentStatePos = multirotorState.kinematics_estimated.position.to_numpy_array()
def doActionBatch(self):
droneNames = []
vx_vec = []
vy_vec = []
z_vec = []
for droneObject in self.droneObjects:
droneNames.append(droneObject.droneName)
quad_vel = Utils.getClient().getMultirotorState(droneObject.droneName).kinematics_estimated.linear_velocity
y_val_offset = 0
if droneObject.currentAction == 0:
y_val_offset = self.step_length
elif droneObject.currentAction == 1:
y_val_offset = -self.step_length
vx_vec.append(self.constant_x_vel if droneObject.reseting == False else 0)
vy_vec.append(quad_vel.y_val + y_val_offset if droneObject.reseting == False else 0)
z_vec.append(self.constant_z_pos)
droneObject.currentStep += 1
Utils.getClient().simPause(False)
Utils.getClient().client.call_async('moveByVelocityZBatch', vx_vec, vy_vec, z_vec, self.actionTime, airsim.DrivetrainType.MaxDegreeOfFreedom, airsim.YawMode(), droneNames).join()
Utils.getClient().simPause(True)
def randomPoseInArena(self):
width = 1600 // 100
min = -(width // 2)
max = (width // 2)
return random.uniform(min, max)
def resetBatch(self):
windows = False
# Size difference: -7710.0, -6070.0
Utils.getClient().simPause(False)
Utils.getClient().reset()
time.sleep(5) if windows else time.sleep(0.25)
randomArenas = np.random.randint(len(self.areans), size=len(self.droneObjects))
for i in range(len(self.droneObjects)):
self.droneObjects[i].currentArena = randomArenas[i]
# airsim.Quaternionr(0.0, 0.0, 1.0, 0.0) = 180 degrees
poses = [airsim.Pose(airsim.Vector3r(self.areans[droneObject.currentArena][0][0],
self.areans[droneObject.currentArena][0][1] + self.randomPoseInArena(),
self.areans[droneObject.currentArena][0][2]),
airsim.Quaternionr(0.0, 0.0, 0.0, 0.0)) for droneObject in self.droneObjects]
Utils.getClient().client.call('simSetVehiclePoseBatch', poses, [droneObject.droneName for droneObject in self.droneObjects])
time.sleep(5) if windows else time.sleep(0.25)
for droneObject in self.droneObjects:
Utils.getClient().armDisarm(True, droneObject.droneName)
Utils.getClient().enableApiControl(True, droneObject.droneName)
Utils.getClient().takeoffAsync(vehicle_name=droneObject.droneName)
if windows: time.sleep(1)
# Move up 3m
time.sleep(5) if windows else time.sleep(0.25)
for droneObject in self.droneObjects:
quad_position = Utils.getClient().getMultirotorState(droneObject.droneName).kinematics_estimated.position
#Utils.getClient().takeoffAsync(vehicle_name=droneObject.droneName).join()
#Utils.getClient().hoverAsync(vehicle_name=droneObject.droneName).join()
Utils.getClient().moveToPositionAsync(quad_position.x_val, quad_position.y_val, self.constant_z_pos, 3.0, vehicle_name=droneObject.droneName)
droneObject.currentStep = 0
currentPos_x_AS = Utils.getClient().getMultirotorState(droneObject.droneName).kinematics_estimated.position.to_numpy_array()[0]
droneObject.distanceFromGoal = abs(currentPos_x_AS - self.areans[droneObject.currentArena][1][0])
droneObject.reseting = False
droneObject.currentTotalReward = 0
if windows: time.sleep(1)
#time.sleep(5)
self.gatherAllObservations()
time.sleep(5) if windows else time.sleep(0.25)
Utils.getClient().simPause(True)
self.episodes += 1
def calculateReward(self, droneObject : DroneObj):
image = droneObject.currentState['image']
currentPos_x_AS = Utils.getClient().getMultirotorState(droneObject.droneName).kinematics_estimated.position.to_numpy_array()[0]
distanceFromGoal = abs(currentPos_x_AS - self.areans[droneObject.currentArena][1][0])
collisionInfo = Utils.getClient().simGetCollisionInfo(droneObject.droneName)
hasCollided = collisionInfo.has_collided or image.min() < 0.55
if droneObject.currentStep < 2:
hasCollided = False
done = 0
reward_States = {
"Collided": 0,
"Won": 0,
"approaching_collision": 0,
"constant_reward" : 0,
"max_actions" : 0,
"goal_distance" : 0,
}
reward_States["goal_distance"] = 3.0
if hasCollided:
done = 1
reward_States["Collided"] = -100
elif distanceFromGoal <= 5:
done = 1
#reward_States["Won"] = 100
elif droneObject.currentStep > 400:
done = 1
reward_States["max_actions"] = -10
reward = sum(reward_States.values())
droneObject.distanceFromGoal = distanceFromGoal
droneObject.currentTotalReward += reward
return reward, done
def resetStep(self, droneObject : DroneObj):
if droneObject.reseting == True:
if droneObject.resetTick == 0 and time.perf_counter() - droneObject.resetingTime > 1:
print("RESETING DRONE ", droneObject.droneId, print("len "), len(self.droneObjects))
randomArena = np.random.randint(len(self.areans), size=(1,))[0]
droneObject.currentArena = randomArena
Utils.getClient().client.call_async("resetVehicle", droneObject.droneName, airsim.Pose(airsim.Vector3r(self.areans[droneObject.currentArena][0][0],
self.areans[droneObject.currentArena][0][1] + self.randomPoseInArena(),
self.areans[droneObject.currentArena][0][2]),
airsim.Quaternionr(0.0, 0.0, 0.0, 0.0)))
droneObject.resetTick = 1
droneObject.resetingTime = time.perf_counter()
if droneObject.resetTick == 1 and time.perf_counter() - droneObject.resetingTime > 1:
Utils.getClient().armDisarm(True, droneObject.droneName)
Utils.getClient().enableApiControl(True, droneObject.droneName)
Utils.getClient().takeoffAsync(vehicle_name=droneObject.droneName)
droneObject.resetingTime = droneObject.resetingTime
droneObject.resetTick = 3
if droneObject.resetTick == 3 and time.perf_counter() - droneObject.resetingTime > 2:
droneObject.reseting = False
droneObject.resetTick = 0
state = Utils.getClient().getMultirotorState(droneObject.droneName)
quad_position = state.kinematics_estimated.position
Utils.getClient().moveToPositionAsync(quad_position.x_val, quad_position.y_val, self.constant_z_pos, 3.0, vehicle_name=droneObject.droneName)
currentPos_x_AS = state.kinematics_estimated.position.to_numpy_array()[0]
droneObject.distanceFromGoal = abs(currentPos_x_AS - self.areans[droneObject.currentArena][1][0])
droneObject.currentStep = 0
droneObject.currentTotalReward = 0
self.episodes += 1
def tick(self, agent):
for droneObject in self.droneObjects:
if droneObject.currentStatePos[0] < 5:
droneObject.reseting = True
self.resetStep(droneObject)
if droneObject.reseting == False:
maxAction, _ = agent.choose_action(droneObject.currentState)
droneObject.currentAction = maxAction
self.doActionBatch()
self.gatherAllObservations()
loadDQNFile = False
for droneObject in self.droneObjects:
if droneObject.reseting == False:
self.numImagesSent += 1
reward, done = self.calculateReward(droneObject)
Utils.getModelServer().call_async("pushMemory",
Utils.convertStateDicToListDic(droneObject.previousState),
int(droneObject.currentAction), #was considered np.int rather than int.
Utils.convertStateDicToListDic(droneObject.currentState),
reward,
1 - int(done))
if done:
Utils.getModelServer().call_async("finishEpisode", droneObject.distanceFromGoal, droneObject.currentTotalReward)
droneObject.reseting = True
droneObject.resetingTime = time.perf_counter()
agent.epsilon = Utils.getModelServer().call("getEpsilon")
agent.memory.pushCounter = Utils.getModelServer().call("getMemoryPushCounter")
loadDQNFile = True
if loadDQNFile and exists('{}/ModelSaves/dqn.pth'.format(pathlib.Path().resolve())):
try:
os.rename('{}/ModelSaves/dqn.pth'.format(pathlib.Path().resolve()), '{}/ModelSaves/dqn_read.pth'.format(pathlib.Path().resolve()))
agent.load('{}/ModelSaves/dqn_read.pth'.format(pathlib.Path().resolve()))
os.rename('{}/ModelSaves/dqn_read.pth'.format(pathlib.Path().resolve()), '{}/ModelSaves/dqn.pth'.format(pathlib.Path().resolve()))
except:
print("issue reading file")
print("NumImagesSent: ", self.numImagesSent)
finished = True
for droneObject in self.droneObjects:
if droneObject.reseting == False:
finished = False
finished = False
return finished
#libUE4Editor-AirSim.so!_ZNSt3__110__function6__funcIZN3rpc6detail10dispatcher4bindIZN3msr6airlib22MultirotorRpcLibServerC1EPNS7_11ApiProviderENS_12basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEEtE4$_14EEvRKSG_T_RKNS3_4tags14nonvoid_resultERKNSL_11nonzero_argEEUlRKN14clmdep_msgpack2v26objectEE_NSE_ISX_EEFNS_10unique_ptrINSS_2v113object_handleENS_14default_deleteIS11_EEEESW_EEclESW_() | 15,043 | Python | 46.457413 | 392 | 0.605797 |
superboySB/SBDrone_deprecated/src/HITL/sbrl/DroneObj.py | import time
import numpy as np
class DroneObject(object):
def __init__(self, droneId):
self.droneId = droneId
self.droneName = 'Drone{}'.format(droneId)
self.currentArena = None
self.currentStep = 0
self.droneSpawnOffset = np.array([0, 0 * droneId, 0])
self.previous_depth_image = None
self.currentState = None
self.currentStatePos = None # Used to create the value heat map
self.previousState = None
self.currentAction = None
self.currentTotalReward = 0
self.distanceFromGoal = None
self.reseting = True
self.reseting_API = False
self.reseting_API_2 = False
self.resetTick = 0
self.resetingTime = time.perf_counter()
def getCurrentArena(self):
return -1 if self.currentArena == None else self.currentArena | 864 | Python | 28.827585 | 71 | 0.630787 |
superboySB/SBDrone_deprecated/src/HITL/sbrl/ReplayMemory.py | import random
from collections import namedtuple, deque
#state_image, state_velocity, action, next_state_image, next_state_velocity, reward, not_done
Transition = namedtuple('Transition', ('state', 'action', 'next_state', 'reward', 'not_done'))
class ReplayMemory(object):
def __init__(self, maxSize : int):
self.maxSize = maxSize
self.pushCounter = 0
self.memory = deque([], maxlen=self.maxSize)
def push(self, *args):
"""Save transition"""
self.memory.append(Transition(*args))
self.pushCounter += 1
def sample(self, batch_size):
return random.sample(self.memory, batch_size)
def __len__(self):
return len(self.memory) | 705 | Python | 31.090908 | 94 | 0.648227 |
superboySB/SBDrone_deprecated/extensions/pegasus.simulator/pegasus/simulator/logic/graphs/__init__.py | """
| License: BSD-3-Clause. Copyright (c) 2023, Marcelo Jacinto. All rights reserved.
"""
from .graph import Graph
from .ros2_camera import ROS2Camera
from .ros2_tf import ROS2Tf
from .ros2_odometry import ROS2Odometry
from .ros2_lidar import ROS2Lidar | 254 | Python | 27.33333 | 82 | 0.771654 |
superboySB/SBDrone_deprecated/extensions/pegasus.simulator/pegasus/simulator/logic/graphs/ros2_odometry.py | """
| File: ros2_odometry.py
| License: BSD-3-Clause. Copyright (c) 2023, Micah Nye. All rights reserved.
"""
__all__ = ["ROS2Tf"]
import carb
from omni.isaac.core.utils import stage
import omni.graph.core as og
from omni.isaac.core.utils.prims import is_prim_path_valid, set_targets
from pegasus.simulator.logic.graphs import Graph
from pegasus.simulator.logic.vehicles import Vehicle
class ROS2Odometry(Graph):
"""The class that implements the ROS2 Odometry graph. This class inherits the base class Graph.
"""
def __init__(self, config: dict = {}):
"""Initialize the ROS2 Odometry class
Args:
config (dict): A Dictionary that contains all the parameters for configuring the ROS2Odometry - it can be empty or only have some of the parameters used by the ROS2Odometry.
Examples:
The dictionary default parameters are
>>> {"odom_topic": "odom", # String for odometry topic
>>> "publish_odom_to_base_tf": True, # Enable tf broadcaster for odom_frame->base_frame transform
>>> "publish_map_to_odom_tf": True, # Enable tf broadcaster for map_frame->odom_frame transform
>>> "map_frame": "map", # String name for the map_frame
>>> "odom_frame": "odom", # String name for the odom_frame
>>> "base_frame": "base_link"} # String name for the base_frame
"""
# Initialize the Super class "object" attribute
super().__init__(graph_type="ROS2Odometry")
# Process the config dictionary
self._odom_topic = config.get("odom_topic", "odom")
self._publish_odom_to_base_tf = config.get("publish_map_to_odom_tf", True)
self._publish_map_to_odom_tf = config.get("publish_map_to_odom_tf", True)
self._map_frame = config.get("map_frame", "map")
self._odom_frame = config.get("odom_frame", "odom")
self._base_frame = config.get("base_frame", "base_link")
def initialize(self, vehicle: Vehicle):
"""Method that initializes the graph.
Args:
vehicle (Vehicle): The vehicle that this graph is attached to.
"""
self._namespace = f"/{vehicle.vehicle_name}"
# Create the graph under vehicle with graph name odom_pub and allow only one per vehicle.
graph_path = f"{vehicle.prim_path}/odom_pub"
if is_prim_path_valid(graph_path):
carb.log_warn(f"ROS2 Odometry Graph for vehicle {vehicle.vehicle_name} already exists")
return
# Graph configuration
graph_specs = {
"graph_path": graph_path,
"evaluator_name": "execution",
}
# Creating a graph edit configuration with transform tree publishers
keys = og.Controller.Keys
graph_config = {
keys.CREATE_NODES: [
("on_playback_tick", "omni.graph.action.OnPlaybackTick"),
("isaac_read_simulation_time", "omni.isaac.core_nodes.IsaacReadSimulationTime"),
("isaac_compute_odometry", "omni.isaac.core_nodes.IsaacComputeOdometry"),
("publish_odometry", "omni.isaac.ros2_bridge.ROS2PublishOdometry")
],
keys.CONNECT: [
("on_playback_tick.outputs:tick", "isaac_compute_odometry.inputs:execIn"),
("isaac_read_simulation_time.outputs:simulationTime", "publish_odometry.inputs:timeStamp"),
("isaac_compute_odometry.outputs:execOut", "publish_odometry.inputs:execIn"),
("isaac_compute_odometry.outputs:linearVelocity", "publish_odometry.inputs:linearVelocity"),
("isaac_compute_odometry.outputs:orientation", "publish_odometry.inputs:orientation"),
("isaac_compute_odometry.outputs:position", "publish_odometry.inputs:position")
],
keys.SET_VALUES: [
("publish_odometry.inputs:odomFrameId", self._odom_frame),
("publish_odometry.inputs:chassisFrameId", self._base_frame),
("publish_odometry.inputs:nodeNamespace", self._namespace),
("publish_odometry.inputs:topicName", self._odom_topic)
]
}
# Create odom_frame->base_frame publisher
if self._publish_odom_to_base_tf:
graph_config[keys.CREATE_NODES] += [
("publish_odom_transform_tree", "omni.isaac.ros2_bridge.ROS2PublishRawTransformTree")
]
graph_config[keys.CONNECT] += [
("on_playback_tick.outputs:tick", "publish_odom_transform_tree.inputs:execIn"),
("isaac_read_simulation_time.outputs:simulationTime", "publish_odom_transform_tree.inputs:timeStamp"),
("isaac_compute_odometry.outputs:orientation", "publish_odom_transform_tree.inputs:rotation"),
("isaac_compute_odometry.outputs:position", "publish_odom_transform_tree.inputs:translation")
]
graph_config[keys.SET_VALUES] += [
("publish_odom_transform_tree.inputs:parentFrameId", self._odom_frame),
("publish_odom_transform_tree.inputs:childFrameId", self._base_frame)
]
# Create map_frame->odom_frame publisher
# Because there is no drift or pose jumps in simulated odometry, map_frame->base_frame == odom_frame->base_frame
if self._publish_odom_to_base_tf:
graph_config[keys.CREATE_NODES] += [
("publish_map_transform_tree", "omni.isaac.ros2_bridge.ROS2PublishRawTransformTree")
]
graph_config[keys.CONNECT] += [
("on_playback_tick.outputs:tick", "publish_map_transform_tree.inputs:execIn"),
("isaac_read_simulation_time.outputs:simulationTime", "publish_map_transform_tree.inputs:timeStamp")
]
graph_config[keys.SET_VALUES] += [
("publish_map_transform_tree.inputs:parentFrameId", self._map_frame),
("publish_map_transform_tree.inputs:childFrameId", self._odom_frame)
]
# Create the camera graph
(graph, _, _, _) = og.Controller.edit(
graph_specs,
graph_config
)
# Set the odometry chassis prim, which should be the vehicle prim path
set_targets(
prim=stage.get_current_stage().GetPrimAtPath(f"{graph_path}/isaac_compute_odometry"),
attribute="inputs:chassisPrim",
target_prim_paths=[vehicle.prim_path]
)
# Run the ROS Camera graph once to generate ROS image publishers in SDGPipeline
og.Controller.evaluate_sync(graph)
# Also initialize the Super class with updated prim path (only camera graph path)
super().initialize(graph_path)
@property
def odometry_topic(self) -> str:
"""
(str) Path to the odometry topic.
Returns:
Odometry topic name (str)
"""
return f"{self._namespace}/{self._odom_topic}"
| 7,131 | Python | 45.921052 | 185 | 0.603422 |
superboySB/SBDrone_deprecated/extensions/pegasus.simulator/pegasus/simulator/logic/graphs/ros2_tf.py | """
| File: ros2_tf.py
| License: BSD-3-Clause. Copyright (c) 2023, Micah Nye. All rights reserved.
"""
__all__ = ["ROS2Tf"]
import carb
from omni.isaac.core.utils import stage
import omni.graph.core as og
from omni.isaac.core.utils.prims import is_prim_path_valid, set_targets
from omni.isaac.core.prims import XFormPrim
from pegasus.simulator.logic.graphs import Graph
from pegasus.simulator.logic.vehicles import Vehicle
class ROS2Tf(Graph):
"""The class that implements the ROS2 TF graph. This class inherits the base class Graph.
"""
def __init__(self):
"""Initialize the ROS2 TF class
"""
# Initialize the Super class "object" attribute
super().__init__(graph_type="ROS2Tf")
def initialize(self, vehicle: Vehicle):
"""Method that initializes the graph.
Args:
vehicle (Vehicle): The vehicle that this graph is attached to.
"""
self._namespace = f"/{vehicle.vehicle_name}"
# The vehicle uses body instead of standardized base_link,
# so we need to create the base_link and connect the body to it
base_link_xform_path = f"{vehicle.prim_path}/body/base_link"
XFormPrim(
prim_path=base_link_xform_path
)
# Create the graph under vehicle with graph name tf and allow only one per vehicle.
graph_path = f"{vehicle.prim_path}/tf_pub"
if is_prim_path_valid(graph_path):
carb.log_warn(f"ROS2 TF Graph for vehicle {vehicle.vehicle_name} already exists")
return
# Graph configuration
graph_specs = {
"graph_path": graph_path,
"evaluator_name": "execution",
}
# Creating a graph edit configuration with transform tree publishers
keys = og.Controller.Keys
graph_config = {
keys.CREATE_NODES: [
("on_playback_tick", "omni.graph.action.OnPlaybackTick"),
("isaac_read_simulation_time", "omni.isaac.core_nodes.IsaacReadSimulationTime"),
("publish_transform_tree", "omni.isaac.ros2_bridge.ROS2PublishTransformTree")
],
keys.CONNECT: [
("on_playback_tick.outputs:tick", "publish_transform_tree.inputs:execIn"),
("isaac_read_simulation_time.outputs:simulationTime", "publish_transform_tree.inputs:timeStamp")
],
keys.SET_VALUES: [
("publish_transform_tree.inputs:nodeNamespace", self._namespace)
]
}
# Create the camera graph
(graph, _, _, _) = og.Controller.edit(
graph_specs,
graph_config
)
# Set the parent frame, it should be the base_link
set_targets(
prim=stage.get_current_stage().GetPrimAtPath(f"{graph_path}/publish_transform_tree"),
attribute="inputs:parentPrim",
target_prim_paths=[base_link_xform_path]
)
# Create list of target prims, which will contain articulation root
# and all sensors with frame_path filled
target_prim_paths = [vehicle.prim_path]
for sensor in vehicle._sensors:
if len(sensor.frame_path) and is_prim_path_valid(sensor.frame_path):
target_prim_paths.append(sensor.frame_path)
set_targets(
prim=stage.get_current_stage().GetPrimAtPath(f"{graph_path}/publish_transform_tree"),
attribute="inputs:targetPrims",
target_prim_paths=target_prim_paths
)
# Run the ROS Camera graph once to generate ROS image publishers in SDGPipeline
og.Controller.evaluate_sync(graph)
# Also initialize the Super class with updated prim path (only camera graph path)
super().initialize(graph_path)
| 3,809 | Python | 35.634615 | 112 | 0.618273 |
superboySB/SBDrone_deprecated/extensions/pegasus.simulator/pegasus/simulator/logic/graphs/ros2_lidar.py | """
| File: ros2_lidar.py
| License: BSD-3-Clause. Copyright (c) 2023, Micah Nye. All rights reserved.
"""
__all__ = ["ROS2Lidar"]
import carb
from omni.isaac.core.utils import stage
import omni.graph.core as og
from omni.isaac.core.utils.prims import is_prim_path_valid
from omni.isaac.core.utils.prims import set_targets
from pegasus.simulator.logic.graphs import Graph
from pegasus.simulator.logic.vehicles import Vehicle
import numpy as np
class ROS2Lidar(Graph):
"""The class that implements the ROS2 Lidar graph. This class inherits the base class Graph.
"""
def __init__(self, lidar_prim_path: str, config: dict = {}):
"""Initialize the ROS2 Lidar class
Args:
lidar_prim_path (str): Path to the lidar prim. Global path when it starts with `/`, else local to vehicle prim path
config (dict): A Dictionary that contains all the parameters for configuring the ROS2Lidar - it can be empty or only have some of the parameters used by the ROS2Lidar.
Examples:
The dictionary default parameters are
>>> {"publish_scan": False, # publish scanner data as sensor_msgs/LaserScan (requires high_lod turned off)
>>> "publish_point_cloud": True} # publish scanner data as sensor_msgs/PointCloud2 (for 2D data, requires high_lod turned on)
"""
# Initialize the Super class "object" attribute
super().__init__(graph_type="ROS2Lidar")
# Save lidar path, frame id and ros topic name
self._lidar_prim_path = lidar_prim_path
self._frame_id = lidar_prim_path.rpartition("/")[-1] # frame_id of the lidar is the last prim path part after `/`
self._base_topic = ""
# Process the config dictionary
self._publish_scan = config.get("publish_scan", False)
self._publish_point_cloud = config.get("publish_point_cloud", True)
def initialize(self, vehicle: Vehicle):
"""Method that initializes the graph of the lidar.
Args:
vehicle (Vehicle): The vehicle that this graph is attached to.
"""
self._namespace = f"/{vehicle.vehicle_name}"
self._base_topic = f"/{self._frame_id}"
# Set the prim_path for the camera
if self._lidar_prim_path[0] != '/':
self._lidar_prim_path = f"{vehicle.prim_path}/{self._lidar_prim_path}"
# Check if the prim path is valid
if not is_prim_path_valid(self._lidar_prim_path):
carb.log_error(f"Cannot create ROS2 Lidar graph, the lidar prim path \"{self._lidar_prim_path}\" is not valid")
return
# Set the prim paths for camera and tf graphs
graph_path = f"{self._lidar_prim_path}_pub"
# Graph configuration
graph_specs = {
"graph_path": graph_path,
"evaluator_name": "execution",
}
# Creating a default graph edit configuration
keys = og.Controller.Keys
graph_config = {
keys.CREATE_NODES: [
("on_tick", "omni.graph.action.OnTick"),
("isaac_read_simulation_time", "omni.isaac.core_nodes.IsaacReadSimulationTime"),
],
keys.CONNECT: [],
keys.SET_VALUES: [],
}
# Add laser scan publishing to the graph
if self._publish_scan:
graph_config[keys.CREATE_NODES] += [
("isaac_read_lidar_beams", "omni.isaac.range_sensor.IsaacReadLidarBeams"),
("publish_laser_scan", "omni.isaac.ros2_bridge.ROS2PublishLaserScan")
]
graph_config[keys.CONNECT] += [
("on_tick.outputs:tick", "isaac_read_lidar_beams.inputs:execIn"),
("isaac_read_lidar_beams.outputs:execOut", "publish_laser_scan.inputs:execIn"),
("isaac_read_lidar_beams.outputs:azimuthRange", "publish_laser_scan.inputs:azimuthRange"),
("isaac_read_lidar_beams.outputs:depthRange", "publish_laser_scan.inputs:depthRange"),
("isaac_read_lidar_beams.outputs:horizontalFov", "publish_laser_scan.inputs:horizontalFov"),
("isaac_read_lidar_beams.outputs:horizontalResolution", "publish_laser_scan.inputs:horizontalResolution"),
("isaac_read_lidar_beams.outputs:intensitiesData", "publish_laser_scan.inputs:intensitiesData"),
("isaac_read_lidar_beams.outputs:linearDepthData", "publish_laser_scan.inputs:linearDepthData"),
("isaac_read_lidar_beams.outputs:numCols", "publish_laser_scan.inputs:numCols"),
("isaac_read_lidar_beams.outputs:numRows", "publish_laser_scan.inputs:numRows"),
("isaac_read_lidar_beams.outputs:rotationRate", "publish_laser_scan.inputs:rotationRate"),
("isaac_read_simulation_time.outputs:simulationTime", "publish_laser_scan.inputs:timeStamp")
]
graph_config[keys.SET_VALUES] += [
("publish_laser_scan.inputs:frameId", self._frame_id),
("publish_laser_scan.inputs:nodeNamespace", self._namespace),
("publish_laser_scan.inputs:topicName", f"{self._base_topic}/scan")
]
# Add point cloud publishing to the graph
if self._publish_point_cloud:
graph_config[keys.CREATE_NODES] += [
("isaac_read_lidar_point_cloud", "omni.isaac.range_sensor.IsaacReadLidarPointCloud"),
("publish_point_cloud", "omni.isaac.ros2_bridge.ROS2PublishPointCloud")
]
graph_config[keys.CONNECT] += [
("on_tick.outputs:tick", "isaac_read_lidar_point_cloud.inputs:execIn"),
("isaac_read_lidar_point_cloud.outputs:execOut", "publish_point_cloud.inputs:execIn"),
("isaac_read_lidar_point_cloud.outputs:pointCloudData", "publish_point_cloud.inputs:pointCloudData"),
("isaac_read_simulation_time.outputs:simulationTime", "publish_point_cloud.inputs:timeStamp")
]
graph_config[keys.SET_VALUES] += [
("publish_point_cloud.inputs:frameId", self._frame_id),
("publish_point_cloud.inputs:nodeNamespace", self._namespace),
("publish_point_cloud.inputs:topicName", f"{self._base_topic}/point_cloud")
]
# Create the camera graph
(graph, _, _, _) = og.Controller.edit(
graph_specs,
graph_config
)
# Connect lidar to the graphs
if self._publish_scan:
set_targets(
prim=stage.get_current_stage().GetPrimAtPath(f"{graph_path}/isaac_read_lidar_beams"),
attribute="inputs:lidarPrim",
target_prim_paths=[self._lidar_prim_path]
)
if self._publish_point_cloud:
set_targets(
prim=stage.get_current_stage().GetPrimAtPath(f"{graph_path}/isaac_read_lidar_point_cloud"),
attribute="inputs:lidarPrim",
target_prim_paths=[self._lidar_prim_path]
)
# Run the ROS Lidar graph once to generate ROS publishers in SDGPipeline
og.Controller.evaluate_sync(graph)
# Also initialize the Super class with updated prim path (only lidar graph path)
super().initialize(graph_path)
def laser_scan_topic(self) -> str:
"""
Returns:
(str) Lidar laser scan topic name if exists, else empty string
"""
return f"{self._namespace}{self._base_topic}/scan" if self._publish_scan else ""
def camera_labels_topic(self) -> str:
"""
Returns:
(str) Lidar point cloud topic name if exists, else empty string
"""
return f"{self._namespace}{self._base_topic}/point_cloud" if self._publish_point_cloud else "" | 7,872 | Python | 45.863095 | 179 | 0.607342 |
superboySB/SBDrone_deprecated/extensions/pegasus.simulator/pegasus/simulator/logic/sensors/camera.py | """
| File: camera.py
| License: BSD-3-Clause. Copyright (c) 2023, Micah Nye. All rights reserved.
| Description: Creates or connects to a Camera prim for higher level functionality
"""
__all__ = ["Camera"]
import carb
from omni.isaac.core.utils.prims import is_prim_path_valid
from omni.isaac.sensor import Camera as CameraPrim
from pegasus.simulator.logic.state import State
from pegasus.simulator.logic.sensors import Sensor
from pegasus.simulator.logic.vehicles import Vehicle
import numpy as np
class Camera(Sensor):
"""The class that implements the Camera sensor. This class inherits the base class Sensor.
"""
def __init__(self, camera_prim_path: str, config: dict = {}):
"""Initialize the Camera class
Args:
camera_prim_path (str): Path to the camera prim. Global path when it starts with `/`, else local to vehicle prim path
config (dict): A Dictionary that contains all the parameters for configuring the Camera - it can be empty or only have some of the parameters used by the Camera.
Examples:
The dictionary default parameters are
>>> {"position": [0.0, 0.0, 0.0], # Meters
>>> "orientation": [0.0, 0.0, 0.0, 1.0], # Quaternion [qx, qy, qz, qw]
>>> "focal_length": 24.0, # Millimeters
>>> "focus_distance", 400.0, # Stage units
>>> "resolution": [640, 480], # Pixels
>>> "set_projection_type": "pinhole", # pinhole, fisheyeOrthographic, fisheyeEquidistant, fisheyeEquisolid, fisheyePolynomial, fisheyeSpherical
>>> "update_rate": 30.0, # Hz
>>> "overwrite_params": False} # Overwrite params if the camera prim already exists
"""
# Initialize the Super class "object" attribute
# update_rate not necessary
super().__init__(sensor_type="Camera", update_rate=config.get("update_rate", 30.0))
# Save the id of the sensor
self._camera_prim_path = camera_prim_path
self._frame_id = camera_prim_path.rpartition("/")[-1] # frame_id of the camera is the last prim path part after `/`
# Reference to the actual camera object. This is set when the camera is initialized
self.camera = None
# Get the position of the camera relative to the vehicle
self._position = np.array(config.get("position", [0.0, 0.0, 0.0]))
self._orientation = np.array(config.get("orientation", [0.0, 0.0, 0.0, 1.0])) # Quaternion [qx, qy, qz, qw]
# Get the camera parameters
self._focal_length = config.get("focal_length", 24.0)
self._focus_distance = config.get("focus_distance", 400.0)
self._clipping_range = config.get("clipping_range", [0.05, 1000000.0])
self._resolution = config.get("resolution", [640, 480])
self._set_projection_type = config.get("set_projection_type", "pinhole")
self._horizonal_aperture = config.get("horizontal_aperture", 20.9550)
self._vertical_aperture = config.get("vertical_aperture", 15.2908)
self._overwrite = config.get("overwrite_params", False)
# Save the current state of the camera sensor
self._state = {
"frame_id": self._frame_id
}
def initialize(self, vehicle: Vehicle):
"""Method that initializes the action graph of the camera. It also initalizes the sensor latitude, longitude and
altitude attributes as well as the vehicle that the sensor is attached to.
Args:
vehicle (Vehicle): The vehicle that this sensor is attached to.
"""
# Set the prim path for the camera
if self._camera_prim_path[0] != '/':
self._camera_prim_path = f"{vehicle.prim_path}/{self._camera_prim_path}"
else:
self._camera_prim_path = self._camera_prim_path
# Create camera prim
if not is_prim_path_valid(self._camera_prim_path) or self._overwrite:
self.camera = CameraPrim(
prim_path=self._camera_prim_path,
frequency=self._update_rate,
resolution=self._resolution,
translation=np.array(self._position),
orientation=[self._orientation[3], self._orientation[0], self._orientation[1], self._orientation[2]]
)
# Set camera parameters
self.camera.set_focal_length(self._focal_length)
self.camera.set_focus_distance(self._focus_distance)
self.camera.set_clipping_range(self._clipping_range[0], self._clipping_range[1])
self.camera.set_projection_type(self._set_projection_type)
self.camera.set_horizontal_aperture(self._horizonal_aperture)
self.camera.set_vertical_aperture(self._vertical_aperture)
else:
self.camera = CameraPrim(
prim_path=self._camera_prim_path,
frequency=self._update_rate,
resolution=self._resolution
)
# Set the sensor's frame path
self.frame_path = self._camera_prim_path
@property
def state(self):
"""
(dict) The 'state' of the sensor, i.e. the data produced by the sensor at any given point in time
"""
return self._state
@Sensor.update_at_rate
def update(self, state: State, dt: float):
"""
Args:
state (State): The current state of the vehicle. UNUSED IN THIS SENSOR
dt (float): The time elapsed between the previous and current function calls (s). UNUSED IN THIS SENSOR
Returns:
None
"""
return None | 5,720 | Python | 44.404762 | 173 | 0.61049 |
superboySB/SBDrone_deprecated/extensions/pegasus.simulator/pegasus/simulator/logic/sensors/lidar.py | """
| File: lidar.py
| License: BSD-3-Clause. Copyright (c) 2023, Micah Nye. All rights reserved.
| Description: Creates a lidar sensor
"""
__all__ = ["Lidar"]
from omni.usd import get_context
from omni.isaac.range_sensor import _range_sensor
import omni.isaac.RangeSensorSchema as RangeSensorSchema
from pxr import Sdf, Gf
from pegasus.simulator.logic.state import State
from pegasus.simulator.logic.sensors import Sensor
from pegasus.simulator.logic.vehicles import Vehicle
import numpy as np
class Lidar(Sensor):
"""The class that implements the Lidar sensor. This class inherits the base class Sensor.
"""
def __init__(self, prim_path: str, config: dict = {}):
"""Initialize the Camera class
Args:
prim_path (str): Path to the lidar prim. Global path when it starts with `/`, else local to vehicle prim path
config (dict): A Dictionary that contains all the parameters for configuring the lidar - it can be empty or only have some of the parameters used by the lidar.
Examples:
The dictionary default parameters are
>>> {"position": [0.0, 0.0, 0.0], # Meters
>>> "yaw_offset": 0.0, # Degrees
>>> "rotation_rate": 20.0, # Hz
>>> "horizontal_fov": 360.0, # Degrees
>>> "horizontal_resolution": 1.0, # Degrees
>>> "vertical_fov": 10.0, # Degrees
>>> "vertical_resolution": 1.0, # Degrees
>>> "min_range": 0.4, # Meters
>>> "max_range": 100.0, # Meters
>>> "high_lod": True, # High level of detail (True - draw all rays, False - draw horizontal rays)
>>> "draw_points": False, # Draw lidar points where they hit an object
>>> "draw_lines": False, # Draw lidar ray lines
>>> "fill_state: False} # Fill state with sensor data
"""
# Initialize the Super class "object" attribute
# update_rate not necessary
super().__init__(sensor_type="Lidar", update_rate=config.get("rotation_rate", 20.0))
# Save the id of the sensor
self._prim_path = prim_path
self._frame_id = prim_path.rpartition("/")[-1] # frame_id of the camera is the last prim path part after `/`
# The extension acquires the LIDAR interface at startup. It will be released during extension shutdown. We
# create a LIDAR prim using our schema, and then we interact with / query that prim using the python API found
# in lidar/bindings
self._li = _range_sensor.acquire_lidar_sensor_interface()
self.lidar = None
# Get the lidar position relative to its parent prim
self._position = np.array(config.get("position", [0.0, 0.0, 0.0]))
# Get the lidar parameters
self._yaw_offset = config.get("yaw_offset", 0.0)
self._rotation_rate = config.get("rotation_rate", 20.0)
self._horizontal_fov = config.get("horizontal_fov", 360.0)
self._horizontal_resolution = config.get("horizontal_resolution", 1.0)
self._vertical_fov = config.get("vertical_fov", 10.0)
self._vertical_resolution = config.get("vertical_resolution", 1.0)
self._min_range = config.get("min_range", 0.4)
self._max_range = config.get("max_range", 100.0)
self._high_lod = config.get("high_lod", True)
self._draw_points = config.get("draw_points", False)
self._draw_lines = config.get("draw_lines", False)
# Save the current state of the range sensor
self._fill_state = config.get("fill_state", False)
if self._fill_state:
self._state = {
"frame_id": self._frame_id,
"depth": None,
"zenith": None,
"azimuth": None
}
else:
self._state = None
def initialize(self, vehicle: Vehicle):
"""Method that initializes the lidar sensor. It also initalizes the sensor latitude, longitude and
altitude attributes as well as the vehicle that the sensor is attached to.
Args:
vehicle (Vehicle): The vehicle that this sensor is attached to.
"""
# Set the prim path for the camera
if self._prim_path[0] != '/':
self._prim_path = f"{vehicle.prim_path}/{self._prim_path}"
else:
self._prim_path = self._prim_path
# create the LIDAR. Before we can set any attributes on our LIDAR, we must first create the prim using our
# LIDAR schema, and then populate it with the parameters we will be manipulating. If you try to manipulate
# a parameter before creating it, you will get a runtime error
stage = get_context().get_stage()
self.lidar = RangeSensorSchema.Lidar.Define(stage, Sdf.Path(self._prim_path))
# Set lidar parameters
self.lidar.AddTranslateOp().Set(Gf.Vec3f(*self._position))
self.lidar.CreateYawOffsetAttr().Set(self._yaw_offset)
self.lidar.CreateRotationRateAttr().Set(self._rotation_rate)
self.lidar.CreateHorizontalFovAttr().Set(self._horizontal_fov)
self.lidar.CreateHorizontalResolutionAttr().Set(self._horizontal_resolution)
self.lidar.CreateVerticalFovAttr().Set(self._vertical_fov)
self.lidar.CreateVerticalResolutionAttr().Set(self._vertical_resolution)
self.lidar.CreateMinRangeAttr().Set(self._min_range)
self.lidar.CreateMaxRangeAttr().Set(self._max_range)
self.lidar.CreateHighLodAttr().Set(self._high_lod)
self.lidar.CreateDrawPointsAttr().Set(self._draw_points)
self.lidar.CreateDrawLinesAttr().Set(self._draw_lines)
# Set the sensor's frame path
self.frame_path = self._prim_path
@property
def state(self):
"""
(dict) The 'state' of the sensor, i.e. the data produced by the sensor at any given point in time
"""
return self._state
@Sensor.update_at_rate
def update(self, state: State, dt: float):
"""
Args:
state (State): The current state of the vehicle.
dt (float): The time elapsed between the previous and current function calls (s).
Returns:
(dict) A dictionary containing the current state of the sensor (the data produced by the sensor) or None
"""
# Add the values to the dictionary and return it
if self._fill_state:
self._state = {
"frame_id": self._frame_id,
"depth": self._li.get_depth_data(self._prim_path),
"zenith": self._li.get_zenith_data(self._prim_path),
"azimuth": self._li.get_azimuth_data(self._prim_path),
}
return self._state
| 6,991 | Python | 45.613333 | 171 | 0.595766 |
superboySB/SBDrone_deprecated/extensions/pegasus.simulator/pegasus/simulator/logic/sensors/vision.py | """
| File: vision.py
| License: BSD-3-Clause. Copyright (c) 2023, Marcelo Jacinto. All rights reserved.
| Description: Simulates a visual odometry. Based on the implementation provided in PX4 stil_gazebo (https://github.com/PX4/PX4-SITL_gazebo) by Amy Wagoner and Nuno Marques
"""
__all__ = ["Vision"]
import numpy as np
from scipy.spatial.transform import Rotation
from pegasus.simulator.logic.sensors import Sensor
class Vision(Sensor):
"""The class that implements a Vision sensor. This class inherits the base class Sensor.
"""
def __init__(self, config={}):
"""Initialize the Vision class.
Args:
config (dict): A Dictionary that contains all the parameters for configuring the Vision - it can be empty or only have some of the parameters used by the Vision.
Examples:
The dictionary default parameters are
>>> {"reset_counter": 0,
>>> "vision_random_walk": 0.1, # (m/s) / sqrt(hz)
>>> "vision_noise_density": 0.01, # (m) / sqrt(hz)
>>> "vision_correlation_time": 60, # s
>>> "update_rate": 30.0 # Hz
>>> }
"""
# Initialize the Super class "object" attributes
super().__init__(sensor_type="Vision", update_rate=config.get("update_rate", 30.0))
# Define the Vision simulated/fixed values
self._reset_counter = config.get("reset_counter", 0)
# Parameters for Vision random walk
self._random_walk = np.array([0.0, 0.0, 0.0])
self._vision_random_walk = config.get("vision_random_walk", 0.1)
# Parameters for Vision position and linear/angular velocity noise
self._noise_pos = np.array([0.0, 0.0, 0.0])
self._noise_linvel = np.array([0.0, 0.0, 0.0])
self._noise_angvel = np.array([0.0, 0.0, 0.0])
self._vision_noise_density = config.get("vision_noise_density", 0.01)
# Parameters for Vision bias
self._bias = np.array([0.0, 0.0, 0.0])
self._vision_correlation_time = config.get("vision_correlation_time", 60.0)
# Position covariance is constant, so prepare it in advance
self._vision_covariance = np.array(
[self._vision_noise_density * self._vision_noise_density if i in [0, 6, 11, 15, 18, 20] else 0.0 for i in range(21)],
dtype=float)
# Save the current state measured by the GPS (and initialize at the origin)
self._state = {
"x": 0.0,
"y": 0.0,
"z": 0.0,
"roll": 0.0,
"pitch": 0.0,
"yaw": 0.0,
"covariance": self._vision_covariance,
"reset_counter": self._reset_counter,
}
@property
def state(self):
"""
(dict) The 'state' of the sensor, i.e. the data produced by the sensor at any given point in time
"""
return self._state
@Sensor.update_at_rate
def update(self, state: np.ndarray, dt: float):
"""Method that implements the logic of a visual odometry.
Args:
state (State): The current state of the vehicle.
dt (float): The time elapsed between the previous and current function calls (s).
Returns:
(dict) A dictionary containing the current state of the sensor (the data produced by the sensor)
"""
# Update noise parameters
self._random_walk[0] = self._vision_random_walk * np.sqrt(dt) * np.random.randn()
self._random_walk[1] = self._vision_random_walk * np.sqrt(dt) * np.random.randn()
self._random_walk[2] = self._vision_random_walk * np.sqrt(dt) * np.random.randn()
self._noise_pos[0] = self._vision_noise_density * np.sqrt(dt) * np.random.randn()
self._noise_pos[1] = self._vision_noise_density * np.sqrt(dt) * np.random.randn()
self._noise_pos[2] = self._vision_noise_density * np.sqrt(dt) * np.random.randn()
self._noise_linvel[0] = self._vision_noise_density * np.sqrt(dt) * np.random.randn()
self._noise_linvel[1] = self._vision_noise_density * np.sqrt(dt) * np.random.randn()
self._noise_linvel[2] = self._vision_noise_density * np.sqrt(dt) * np.random.randn()
tau_g = self._vision_correlation_time
sigma_g_d = 1 / np.sqrt(dt) * self._vision_noise_density
sigma_b_g = self._vision_random_walk
sigma_b_g_d = np.sqrt(-sigma_b_g * sigma_b_g * tau_g / 2.0 * (np.exp(-2.0 * dt / tau_g) - 1.0))
phi_g_d = np.exp(-1.0 / tau_g * dt)
self._noise_angvel[0] = phi_g_d * self._noise_angvel[0] + sigma_b_g_d * np.sqrt(dt) * np.random.randn() # self._noise_angvel[0] might need to be 0.0
self._noise_angvel[1] = phi_g_d * self._noise_angvel[1] + sigma_b_g_d * np.sqrt(dt) * np.random.randn()
self._noise_angvel[2] = phi_g_d * self._noise_angvel[2] + sigma_b_g_d * np.sqrt(dt) * np.random.randn()
# Perform Vision bias integration
self._bias[0] = (
self._bias[0] + self._random_walk[0] * dt - self._bias[0] / self._vision_correlation_time
)
self._bias[1] = (
self._bias[1] + self._random_walk[1] * dt - self._bias[1] / self._vision_correlation_time
)
self._bias[2] = (
self._bias[2] + self._random_walk[2] * dt - self._bias[2] / self._vision_correlation_time
)
# Get resulting values
position: np.ndarray = state.get_position_ned() + self._noise_pos + self._bias
orientation: np.ndarray = Rotation.from_quat(state.get_attitude_ned_frd()).as_euler('xyz', degrees=False)
linear_velocity: np.ndarray = state.get_linear_velocity_ned() + self._noise_linvel
angular_velocity: np.ndarray = state.get_angular_velocity_frd() + self._noise_angvel
self._state = {
"x": position[0],
"y": position[1],
"z": position[2],
"roll": orientation[0],
"pitch": orientation[1],
"yaw": orientation[2],
"covariance": self._vision_covariance,
"reset_counter": self._reset_counter,
}
return self._state
| 6,195 | Python | 42.943262 | 173 | 0.580791 |
superboySB/SBDrone_deprecated/extensions/pegasus.simulator/config/configs.yaml | global_coordinates:
altitude: 90.0
latitude: 38.736832
longitude: -9.137977
px4_dir: ~/PX4-Autopilot
| 107 | YAML | 16.999997 | 24 | 0.728972 |
superboySB/SBDrone_deprecated/tests/test_api_control.py | # ready to run example: PythonClient/multirotor/hello_drone.py
import airsim
import os
# connect to the AirSim simulator
client = airsim.MultirotorClient(ip="172.16.13.104")
client.confirmConnection()
client.enableApiControl(True)
client.armDisarm(True)
# Async methods returns Future. Call join() to wait for task to complete.
client.takeoffAsync()
state = client.getMultirotorState(vehicle_name = 'UAV_0')
client.landAsync().join()
# take images
# responses = client.simGetImages([
# airsim.ImageRequest("0", airsim.ImageType.DepthVis),
# airsim.ImageRequest("1", airsim.ImageType.DepthPlanar, True)])
# print('Retrieved images: %d', len(responses))
# # do something with the images
# for response in responses:
# if response.pixels_as_float:
# print("Type %d, size %d" % (response.image_type, len(response.image_data_float)))
# airsim.write_pfm('./py1.pfm', airsim.get_pfm_array(response))
# else:
# print("Type %d, size %d" % (response.image_type, len(response.image_data_uint8)))
# airsim.write_file('./py1.png', response.image_data_uint8) | 1,100 | Python | 32.363635 | 91 | 0.708182 |
superboySB/SBDrone_deprecated/tests/test_manual_control.py | """
For connecting to the AirSim drone environment and testing API functionality
"""
import airsim
import os
import tempfile
import pprint
# connect to the AirSim simulator
client = airsim.MultirotorClient(ip="172.16.13.104")
client.confirmConnection()
client.enableApiControl(True)
client.armDisarm(True,vehicle_name='UAV_1')
state = client.getMultirotorState(vehicle_name='UAV_1')
s = pprint.pformat(state)
print("state: %s" % s)
client.takeoffAsync(timeout_sec = 20, vehicle_name = 'UAV_1')
# client.moveByManualAsync(vx_max = 1E6, vy_max = 1E6, z_min = -1E6, duration = 1, vehicle_name='UAV_1') # 控制杆量
# airsim.wait_key('Manual mode is setup. Press any key to send RC data to takeoff')
# 会持续控制,需要下一条命令覆盖
client.moveByRC(rcdata = airsim.RCData(pitch = 1, throttle = 0.5, is_initialized = True, is_valid = True), vehicle_name='UAV_1')
client.moveByRC(rcdata = airsim.RCData(pitch = 0, throttle = 0.1, is_initialized = True, is_valid = True), vehicle_name='UAV_1')
| 975 | Python | 32.655171 | 128 | 0.732308 |
superboySB/SBDrone_deprecated/tests/test_get_state.py | import airsim
import time
# this script moves the drone to a location, then rests it thousands of time
# purpose of this script is to stress test reset API
# connect to the AirSim simulator
client = airsim.MultirotorClient(ip="172.16.13.104",port=41451)
client.confirmConnection()
client.enableApiControl(True)
client.armDisarm(True)
for idx in range(3000):
# client.moveToPositionAsync(0, 0, -10, 5).join()
# client.reset()
# client.enableApiControl(True)
print(client.getMultirotorState())
print("%d" % idx)
time.sleep(1)
# that's enough fun for now. let's quite cleanly
client.enableApiControl(False)
| 635 | Python | 24.439999 | 76 | 0.733858 |
superboySB/SBDrone_deprecated/tests/test_functions.py | import time
import airsim
import numpy as np
def convert_pos_UE_to_AS(origin_UE : np.array, pos_UE : np.array):
pos = np.zeros(3, dtype=float)
pos[0] = pos_UE[0] - origin_UE[0]
pos[1] = pos_UE[1] - origin_UE[1]
pos[2] = - pos_UE[2] + origin_UE[2]
return pos / 100
droneName = "Drone0"
origin_UE = np.array([0.0, 0.0, 910.0])
areans_train_long = np.array([
# Using larger environment
# [Utils.convert_pos_UE_to_AS(self.origin_UE, np.array([41156.0, 20459.0, 1000.0])), Utils.convert_pos_UE_to_AS(self.origin_UE, np.array([56206.0, 21019.0, 1000.0]))]
# Using smaller environment
[convert_pos_UE_to_AS(origin_UE, np.array([8430.0, -6760.0, 1000.0])),
convert_pos_UE_to_AS(origin_UE, np.array([14060.0, -6760.0, 1000.0]))]
])
client = airsim.MultirotorClient(ip="172.16.13.104")
client.confirmConnection()
client.reset()
client.enableApiControl(True, vehicle_name = droneName)
client.armDisarm(True, vehicle_name = droneName)
client.takeoffAsync(vehicle_name=droneName)
time.sleep(10)
client.client.call_async("resetVehicle", droneName,
airsim.Pose(airsim.Vector3r(areans_train_long[0][0][0],
areans_train_long[0][0][1],
areans_train_long[0][0][2]),
airsim.Quaternionr(0.0, 0.0, 0.0, 0.0)))
| 1,450 | Python | 35.274999 | 170 | 0.577931 |
superboySB/SBDrone_deprecated/tests/test_subprocress.py | import subprocess
# 定义要执行的脚本命令
command = "ls -l" # 以ls -l命令为例,你可以换成你要执行的任何其他脚本命令
# 执行脚本命令
try:
# 使用subprocess.run()来执行命令
# capture_output=True 表示捕获标准输出和标准错误
result = subprocess.run(command, shell=True, text=True, capture_output=True)
# 输出命令执行结果
print("标准输出:")
print(result.stdout)
print("\n标准错误:")
print(result.stderr)
print("\n返回代码:", result.returncode)
except subprocess.CalledProcessError as e:
print("命令执行出错:", e) | 464 | Python | 20.136363 | 80 | 0.68319 |
AshisGhosh/roboai/docker-compose.yml | services:
isaac-sim:
build:
context: .
dockerfile: ./isaac_sim/Dockerfile
volumes:
- /tmp/.X11-unix:/tmp/.X11-unix
- /run/user/1000/gdm/Xauthority:/root/.Xauthority:rw
- ~/docker/isaac-sim/cache/kit:/isaac-sim/kit/cache:rw
- ~/docker/isaac-sim/cache/ov:/root/.cache/ov:rw
- ~/docker/isaac-sim/cache/pip:/root/.cache/pip:rw
- ~/docker/isaac-sim/cache/glcache:/root/.cache/nvidia/GLCache:rw
- ~/docker/isaac-sim/cache/computecache:/root/.nv/ComputeCache:rw
- ~/docker/isaac-sim/logs:/root/.nvidia-omniverse/logs:rw
- ~/docker/isaac-sim/data:/root/.local/share/ov/data:rw
- ~/docker/isaac-sim/documents:/root/Documents:rw
- ./isaac_sim/isaac_sim:/isaac-sim/roboai/
- ./shared:/isaac-sim/roboai/shared
- ./isaac_sim/humble_ws/src:/isaac-sim/humble_ws/src
- ./isaac_sim/bin:/isaac-sim/roboai/bin
environment:
- DISPLAY=${DISPLAY}
- XAUTHORITY=/root/.Xauthority
- ACCEPT_EULA=Y
- PRIVACY_CONSENT=Y
- ROS_DOMAIN_ID=${ROS_DOMAIN_ID:-0}
entrypoint: /bin/bash -c "while true; do sleep 30; done"
network_mode: host
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: 1
capabilities: [gpu]
franka:
image: franka_isaac_moveit_tutorial
build:
context: ./franka_moveit
dockerfile: Dockerfile
stdin_open: true
tty: true
network_mode: host
ipc: host
privileged: true
environment:
- ROS_DOMAIN_ID=${ROS_DOMAIN_ID:-0}
- DISPLAY=${DISPLAY}
- QT_X11_NO_MITSHM=1
volumes:
- /tmp/.X11-unix:/tmp/.X11-unix:rw
- ${XAUTHORITY:-$HOME/.Xauthority}:/root/.Xauthority
- ./franka_moveit/config:/root/ws_moveit/src/moveit2_tutorials/doc/how_to_guides/isaac_panda/config
command: ros2 launch moveit2_tutorials isaac_demo.launch.py
deploy:
resources:
reservations:
devices:
- capabilities: [gpu]
grasp-server:
build:
context: ./grasping/grasp_server
dockerfile: Dockerfile
volumes:
- ./grasping/grasp_server:/app
- /run/user/1000/gdm/Xauthority:/root/.Xauthority:rw
- ./shared:/app/shared
environment:
- DISPLAY=${DISPLAY}
- XAUTHORITY=/root/.Xauthority
command: poetry run uvicorn app.main:app --host 0.0.0.0 --port 8005 --reload
network_mode: host
deploy:
resources:
reservations:
devices:
- capabilities: [gpu]
ollama-server:
image: ollama/ollama:latest
volumes:
- ~/.cache/ollama:/root/.ollama
ports:
- 11434:11434
roboai:
build:
context: .
dockerfile: ./roboai/Dockerfile
volumes:
- ./roboai:/app
- /run/user/1000/gdm/Xauthority:/root/.Xauthority:rw
- ./shared:/app/shared
- ~/.burr:/root/.burr
environment:
- DISPLAY=${DISPLAY}
- MUJOCO_GL=osmesa
- XAUTHORITY=/root/.Xauthority
# command: python -u -m roboai.roboai
command: bash -c "python -m streamlit run roboai/streamlit_app.py --server.headless true --server.port=8501 --server.address=0.0.0.0 & burr --no-open"
# command: /bin/bash -c "while true; do sleep 30; done"
network_mode: host
roboai-demo:
extends: roboai
command: python -u -m roboai.roboai_demo
| 3,455 | YAML | 30.706422 | 154 | 0.597685 |
AshisGhosh/roboai/README.md | # RoboAI: Playground + Framework for applying LLM/VLMs to Robots in Sim
### Update Videos:
* **May 27 2024** - [VIDEO](https://www.youtube.com/watch?v=ycvPWq4JfEI) - Robot learning task relevant information and factoring that in the plan -- integrated with [OmniGibson](https://behavior.stanford.edu/omnigibson/) from Stanford/NVIDIA
* **May 8 2024** - [VIDEO](https://www.youtube.com/watch?v=sg3PTz5q6kc) - Robot going from plain text to grasping attempt -- integrated with ROS2, MoveIt2, a grasping model and Isaac Sim.
## Simulation Frameworks
### MuJoCo & Robosuite
[Mujoco](https://mujoco.org/) is Google DeepMind's physics simulation.
[Robosuite](https://robosuite.ai/) is a modular framework built on top of MuJoCo.
In the `/robosim` folder you'll find a Robosuite/MuJoCo sim environment:
* Focused on Panda arm grasping objects in pick and place environment
* Camera views to focus on objects
* Markers to indicate robot goal and grasp targets
* Simple API to control the robot
### Isaac Sim
[Isaac Sim](https://docs.omniverse.nvidia.com/isaacsim/latest/index.html) is NVIDIA's robot simulation powered by GPUs.
Isaac Sim offers advanced tooling as well as close to real rendering. This was adopted to better test vision models.
Isaac Sim does not support external async frameworks as well - the development towards it in this project is still in progress and may need some re-architecting.
The simulation
* Focuses on the Panda arm on a table with objects to grasp
* Cameras for different views
* Initial work on Markers - rendering/material support is still WIP
## Models & LLM Framework
The high-level goal is to be able to command a robot to complete a long-horizon task with natural language.
An example would be to "clear the messy table".
### LLMs
LLMs are used in planning layer. Once the scene is understood an LLM (either iteratively or with CoT/ToT) to generate a robot affordable plan.
Currently focused on free models hosted on [openrouter.ai](https://openrouter.ai).
### VLMs
VLMs are an extremely fast changing space. Current work is focused on:
* [moondream2](https://huggingface.co/vikhyatk/moondream2)
* [VILA-2.7b](https://huggingface.co/Efficient-Large-Model/VILA-2.7b) -- inference running on a Jetson Orin Nano (not in this repo) using [NanoLLM](https://dusty-nv.github.io/NanoLLM/index.html) | 2,351 | Markdown | 43.377358 | 242 | 0.762654 |
AshisGhosh/roboai/model_server/pyproject.toml | [tool.poetry]
name = "model-server"
version = "0.1.0"
description = ""
authors = ["AshisGhosh <[email protected]>"]
readme = "README.md"
[tool.poetry.dependencies]
python = "^3.10"
fastapi = "^0.110.1"
uvicorn = "^0.29.0"
transformers = "^4.39.3"
timm = "^0.9.16"
einops = "^0.7.0"
python-multipart = "^0.0.9"
[build-system]
requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api"
| 399 | TOML | 18.047618 | 46 | 0.651629 |
AshisGhosh/roboai/model_server/model_server/hf_cerule.py | from transformers import AutoModelForCausalLM, AutoTokenizer
from PIL import Image
import time
import logging
log = logging.getLogger("model-server")
log.setLevel(logging.DEBUG)
handler = logging.StreamHandler()
handler.setLevel(logging.DEBUG)
log.addHandler(handler)
class HuggingFaceCerule:
def __init__(self):
self.model_id = "Tensoic/Cerule-v0.1"
model_load_start = time.time()
self.model = AutoModelForCausalLM.from_pretrained(
self.model_id, trust_remote_code=True
)
log.info(f"Model loaded in {time.time() - model_load_start} seconds.")
self.tokenizer = AutoTokenizer.from_pretrained(self.model_id)
def encode_image(self, image):
start_encode = time.time()
encoded_image = self.model.encode_image(image)
log.info(f"Image encoded in {time.time() - start_encode} seconds.")
return encoded_image
def answer_question(self, enc_image, question):
start_model = time.time()
answer = self.model.answer_question(enc_image, question, self.tokenizer)
log.info(f"Answered question in {time.time() - start_model} seconds.")
return answer
def answer_question_from_image(self, image, question):
enc_image = self.encode_image(image)
return self.answer_question(enc_image, question)
if __name__ == "__main__":
model = HuggingFaceCerule()
img_path = "/app/shared/data/test2.png"
image = Image.open(img_path)
enc_image = model.encode_image(image)
question = "Describe this image."
print(model.answer_question(enc_image, question))
| 1,609 | Python | 31.857142 | 80 | 0.674953 |
AshisGhosh/roboai/model_server/model_server/hf_idefics.py | # Load model directly
from transformers import AutoProcessor, AutoModelForVision2Seq
from transformers.image_utils import load_image
import torch
from PIL import Image
import time
import logging
log = logging.getLogger("model-server")
log.setLevel(logging.DEBUG)
handler = logging.StreamHandler()
handler.setLevel(logging.DEBUG)
log.addHandler(handler)
DEVICE = "cuda:0" if torch.cuda.is_available() else "cpu"
class HuggingFaceIdefics:
def __init__(self):
model_load_start = time.time()
self.processor = AutoProcessor.from_pretrained("HuggingFaceM4/idefics2-8b")
self.model = AutoModelForVision2Seq.from_pretrained(
"HuggingFaceM4/idefics2-8b"
).to(DEVICE)
log.info(f"Model loaded in {time.time() - model_load_start} seconds.")
def answer_question_from_image(self, image, question):
image1 = load_image("/app/shared/data/test2.png")
messages = [
{
"role": "user",
"content": [
{"type": "image"},
{"type": "text", "text": "What do we see in this image?"},
],
},
]
prompt = self.processor.apply_chat_template(
messages, add_generation_prompt=True
)
inputs = self.processor(text=prompt, images=[image1], return_tensors="pt")
inputs = {k: v.to(DEVICE) for k, v in inputs.items()}
start_time = time.time()
generated_ids = self.model.generate(**inputs, max_new_tokens=500)
log.info(f"Generated in {time.time() - start_time} seconds.")
start_time = time.time()
generated_texts = self.processor.batch_decode(
generated_ids, skip_special_tokens=True
)
log.info(f"Decoded in {time.time() - start_time} seconds.")
return generated_texts
if __name__ == "__main__":
log.info("Loading model...")
model = HuggingFaceIdefics()
log.info("Model loaded.")
img_path = "/app/shared/data/test2.png"
image = Image.open(img_path)
question = "Describe this image."
log.info("Answering question...")
log.info(model.answer_question_from_image(image, question))
| 2,195 | Python | 31.294117 | 83 | 0.617768 |
AshisGhosh/roboai/model_server/model_server/hf_moondream2.py | from transformers import AutoModelForCausalLM, AutoTokenizer
from PIL import Image
import time
import logging
log = logging.getLogger("model-server")
log.setLevel(logging.DEBUG)
handler = logging.StreamHandler()
handler.setLevel(logging.DEBUG)
log.addHandler(handler)
class HuggingFaceMoonDream2:
def __init__(self):
self.model_id = "vikhyatk/moondream2"
self.revision = "2024-04-02"
model_load_start = time.time()
self.model = AutoModelForCausalLM.from_pretrained(
self.model_id, trust_remote_code=True, revision=self.revision
)
log.info(f"Model loaded in {time.time() - model_load_start} seconds.")
self.tokenizer = AutoTokenizer.from_pretrained(
self.model_id, revision=self.revision
)
def encode_image(self, image):
start_encode = time.time()
encoded_image = self.model.encode_image(image)
log.info(f"Image encoded in {time.time() - start_encode} seconds.")
return encoded_image
def answer_question(self, enc_image, question):
start_model = time.time()
answer = self.model.answer_question(enc_image, question, self.tokenizer)
log.info(f"Answered question in {time.time() - start_model} seconds.")
return answer
def answer_question_from_image(self, image, question):
enc_image = self.encode_image(image)
return self.answer_question(enc_image, question)
if __name__ == "__main__":
model = HuggingFaceMoonDream2()
img_path = "/app/shared/data/test2.png"
image = Image.open(img_path)
enc_image = model.encode_image(image)
question = "Describe this image."
log.info(model.answer_question(enc_image, question))
| 1,727 | Python | 32.230769 | 80 | 0.670527 |
AshisGhosh/roboai/model_server/model_server/hf_nanollava.py | from transformers import AutoModelForCausalLM, AutoTokenizer
from PIL import Image
import time
import torch
import logging
log = logging.getLogger("model-server")
log.setLevel(logging.DEBUG)
handler = logging.StreamHandler()
handler.setLevel(logging.DEBUG)
log.addHandler(handler)
class HuggingFaceNanoLLaVA:
def __init__(self):
torch.set_default_device("cpu")
model_load_start = time.time()
self.model = AutoModelForCausalLM.from_pretrained(
"qnguyen3/nanoLLaVA",
torch_dtype=torch.float16,
device_map="auto",
trust_remote_code=True,
)
log.info(f"Model loaded in {time.time() - model_load_start} seconds.")
self.tokenizer = AutoTokenizer.from_pretrained(
"qnguyen3/nanoLLaVA", trust_remote_code=True
)
def process_image(self, image):
start_process = time.time()
image_tensor = self.model.process_images([image], model.config).to(
dtype=model.dtype
)
log.info(f"Image processed in {time.time() - start_process} seconds.")
return image_tensor
def answer_question(self, image_tensor, prompt):
messages = [{"role": "user", "content": f"<image>\n{prompt}"}]
text = self.tokenizer.apply_chat_template(
messages, tokenize=False, add_generation_prompt=True
)
text_chunks = [
self.tokenizer(chunk).input_ids for chunk in text.split("<image>")
]
input_ids = torch.tensor(
text_chunks[0] + [-200] + text_chunks[1], dtype=torch.long
).unsqueeze(0)
start_model = time.time()
output_ids = model.generate(
input_ids, images=image_tensor, max_new_tokens=2048, use_cache=True
)[0]
log.info(f"Answered question in {time.time() - start_model} seconds.")
output = self.tokenizer.decode(
output_ids[input_ids.shape[1] :], skip_special_tokens=True
).strip()
return output
if __name__ == "__main__":
model = HuggingFaceNanoLLaVA()
img_path = "/app/shared/data/test2.png"
image = Image.open(img_path)
image_tensor = model.encode_image(image)
question = "Describe this image."
print(model.answer_question(image_tensor, question))
| 2,292 | Python | 32.231884 | 79 | 0.623473 |
AshisGhosh/roboai/model_server/model_server/hf_mxbai_embed.py | from sentence_transformers import SentenceTransformer
class HuggingFaceMXBaiEmbedLarge:
def __init__(self):
self.model = SentenceTransformer("mixedbread-ai/mxbai-embed-large-v1")
def embed(self, text):
return self.model.encode(text).tolist()
| 269 | Python | 25.999997 | 78 | 0.724907 |
AshisGhosh/roboai/model_server/app/main.py | #!/usr/bin/python -u
import io
import logging
from PIL import Image
from fastapi import FastAPI, File, UploadFile
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import JSONResponse
from model_server.hf_moondream2 import HuggingFaceMoonDream2
from model_server.hf_mxbai_embed import HuggingFaceMXBaiEmbedLarge
moondream = HuggingFaceMoonDream2()
mxbai_embed = HuggingFaceMXBaiEmbedLarge()
logging.basicConfig(level=logging.INFO)
# Create FastAPI instance
app = FastAPI()
# List of allowed origins (you can use '*' to allow all origins)
origins = [
"http://localhost:3000", # Allow your Next.js app
# Add any other origins as needed
]
# Add CORSMiddleware to the application
app.add_middleware(
CORSMiddleware,
allow_origins=origins, # List of allowed origins
allow_credentials=True,
allow_methods=["*"], # Allow all methods
allow_headers=["*"], # Allow all headers
)
# Example route
@app.get("/")
async def read_root():
return {"message": "Hello, FastAPI! This is the model server."}
@app.post("/answer_question")
async def answer_question(file: UploadFile = File(...), question: str = ""):
# Read the image file
image_bytes = await file.read()
# Convert bytes to a file-like object
image_stream = io.BytesIO(image_bytes)
# Use PIL to open the image
image = Image.open(image_stream)
# Perform object detection
result = moondream.answer_question_from_image(image, question)
# Return the result
return JSONResponse(content={"result": result})
@app.post("/embed")
async def embed(text: str = ""):
# Perform embedding
result = mxbai_embed.embed(text)
# Return the result
return JSONResponse(content={"embedding": result})
| 1,753 | Python | 25.179104 | 76 | 0.715345 |
AshisGhosh/roboai/robosuite/robosim/pyproject.toml | [tool.poetry]
name = "robosim"
version = "0.1.0"
description = ""
authors = ["AshisGhosh <[email protected]>"]
readme = "README.md"
[tool.poetry.dependencies]
python = "^3.8"
fastapi = "^0.110.0"
uvicorn = "^0.29.0"
python-multipart = "^0.0.9"
h5py = "^3.10.0"
[build-system]
requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api"
| 350 | TOML | 17.473683 | 46 | 0.657143 |
AshisGhosh/roboai/robosuite/robosim/robosim/camera.py | import numpy as np
from robosuite.utils.camera_utils import (
get_camera_intrinsic_matrix,
get_camera_extrinsic_matrix,
get_camera_transform_matrix,
get_real_depth_map,
transform_from_pixels_to_world,
)
import logging
log = logging.getLogger("robosim robot camera")
log.setLevel(logging.DEBUG)
class Camera:
def __init__(self, env, name, camera_height=480, camera_width=640):
self.env = env
self.name = name
self.camera_height = camera_height
self.camera_width = camera_width
log.debug(f"Getting intrinsic matrix for {name}")
self.intrinsic_matrix = get_camera_intrinsic_matrix(
env.sim, name, camera_height, camera_width
)
log.debug(f"Getting extrinsic matrix for {name}")
self.extrinsic_matrix = get_camera_extrinsic_matrix(env.sim, name)
log.debug(f"Getting transform matrix for {name}")
self.transform_matrix = get_camera_transform_matrix(
env.sim, name, camera_height, camera_width
)
log.debug(f"Getting camera to world transform for {name}")
self.camera_to_world_transform = np.linalg.inv(self.transform_matrix)
log.debug(f"Camera initialized for {name}")
def get_world_coords_from_pixels(self, pixels, depth):
# cv2.imshow("Depth", depth)
# cv2.waitKey(0)
log.debug(
f"Getting world coordinates from pixels {pixels} and depth {depth.shape}"
)
real_depth_map = get_real_depth_map(self.env.sim, depth)
log.debug(f"Real depth map: {real_depth_map.shape}")
log.debug(
f"pixels leading shape: depth map leading shape -- {pixels.shape[:-1]} -- {real_depth_map.shape[:-3]}"
)
return transform_from_pixels_to_world(
pixels, real_depth_map, self.camera_to_world_transform
)
def pixel_to_world(self, pixel):
depth = self.env._get_observations()["robot0_eye_in_hand_depth"][::-1]
return self.get_world_coords_from_pixels(np.array(pixel), depth)
| 2,055 | Python | 37.074073 | 114 | 0.640876 |
AshisGhosh/roboai/robosuite/robosim/robosim/grasp_handler.py | import base64
import numpy as np
from PIL import Image
import cv2
from enum import Enum
from robosim.camera import Camera
from shared.utils.grasp_client import _check_server, _get_grasp_from_image
from shared.utils.robotic_grasping_client import _get_grasps_from_rgb_and_depth
import shared.utils.llm_utils as llm_utils
import logging
log = logging.getLogger("robosim robot grasp")
log.setLevel(logging.DEBUG)
class GraspMethod(Enum):
GRASP_DET_SEG = "grasp_det_seg"
GR_CONVNET = "gr_convnet"
class Grasp:
def __init__(
self, r_bbox, image, depth, env, bbox=None, cls=None, cls_name=None, score=None
):
log.debug("Initializing Grasp object.")
self.r_bbox = r_bbox
self.image = image
self.depth = depth
self.env = env
log.debug("Initializing camera")
self.camera = Camera(self.env, "robot0_eye_in_hand")
self.cls = cls
self.cls_name = cls_name
self.score = score
self.bbox = bbox
self.appoach_poses = []
self.grasp_pose = None
self.retract_poses = []
log.debug(f"Generated grasp for {self}")
def generate_grasp_sequence(self):
log.info(f"Generating grasp sequence for {self}")
self.grasp_pose = self.get_grasp_pose_from_r_bbox()
return self.appoach_poses, self.grasp_pose, self.retract_poses
def get_grasp_pose_from_r_bbox(self):
# Get the center of the bounding box
log.debug(f"Getting grasp pose from r_bbox: {self.r_bbox}")
# pixels work in y, x not x, y
center = (
int(np.mean([coord[1] for coord in self.r_bbox])),
int(np.mean([coord[0] for coord in self.r_bbox])),
)
log.debug(f"Center of the bounding box: {center}")
# Get the world coordinates of the center
log.debug(f"{np.array(center).shape} -- {np.array(self.depth).shape}")
world_coords = self.camera.get_world_coords_from_pixels(
np.array(center), np.array(self.depth)
)
log.debug(f"World coordinates of the center: {world_coords}")
self.grasp_postion = world_coords
# Get grasp orientation
# Get the angle from the bounding box
pt1 = self.r_bbox[0]
pt2 = self.r_bbox[1]
angle = np.arctan2(pt2[1] - pt1[1], pt2[0] - pt1[0]) + np.pi / 2
self.grasp_orientation = angle
log.debug(f"Grasp orientation: {angle}")
return world_coords, angle
def __str__(self):
return f"Grasp: {self.cls_name} with score {self.score} at r_bbox {self.r_bbox}"
class GraspHandler:
def __init__(self, robot):
self.robot = robot
self.env = robot.robosim.env
async def get_grasps_from_image(self, image: Image, visualize=True):
res = await _get_grasp_from_image(image)
if visualize:
self.show_image(res["image"])
return res["result"]
async def get_grasp_image(self) -> Image:
# turn off marker visualization
markers = [
"gripper0_grip_site",
"gripper0_grip_site_cylinder",
"gripper_goal",
"grasp_marker",
]
for marker in markers:
self.env.sim.model.site_rgba[self.env.sim.model.site_name2id(marker)][3] = 0
im = self.env._get_observations()["robot0_eye_in_hand_image"]
img = Image.fromarray(im[::-1])
# turn on marker visualization
for marker in markers:
self.env.sim.model.site_rgba[self.env.sim.model.site_name2id(marker)][3] = (
0.25
)
return img
async def get_grasp_image_and_depth(self):
# turn off marker visualization
markers = [
"gripper0_grip_site",
"gripper0_grip_site_cylinder",
"gripper_goal",
"grasp_marker",
]
for marker in markers:
self.env.sim.model.site_rgba[
self.robot.robosim.env.sim.model.site_name2id(marker)
][3] = 0
self.env.step(np.zeros(self.env.action_dim))
im = self.env._get_observations()
img = Image.fromarray(im["robot0_eye_in_hand_image"][::-1])
depth = im["robot0_eye_in_hand_depth"][::-1]
# turn on marker visualization
for marker in markers:
self.env.sim.model.site_rgba[self.env.sim.model.site_name2id(marker)][3] = (
0.25
)
return img, depth
async def get_grasp_image_and_depth_image(self):
img, depth = await self.get_grasp_image_and_depth()
squeezed_depth = np.squeeze(depth)
normalized_depth = (
(squeezed_depth - np.min(squeezed_depth))
/ (np.max(squeezed_depth) - np.min(squeezed_depth))
* 255
)
depth_uint8 = normalized_depth.astype(np.uint8)
depth_image = Image.fromarray(depth_uint8)
return img, depth_image, depth
async def get_grasp(self, obj_name, method=GraspMethod.GRASP_DET_SEG):
if method == GraspMethod.GRASP_DET_SEG:
log.debug("Getting grasp from grasp_det_seg...")
return await self.get_grasp_grasp_det_seg(obj_name)
elif method == GraspMethod.GR_CONVNET:
log.debug("Getting grasp from grasp convnet...")
return await self.get_grasp_gr_convnet(obj_name)
else:
raise ValueError(f"Invalid grasp method: {method}")
async def get_grasp_grasp_det_seg(self, obj_name):
# return await self.get_grasps()
log.debug("Getting grasp image and depth...")
img, depth = await self.get_grasp_image_and_depth()
log.debug("Getting grasp from image...")
grasps = await self.get_grasps_from_image(img)
if len(grasps) == 0:
log.error("No grasps found.")
return None
candidate_objs = [obj["cls_name"].replace("_", " ") for obj in grasps]
log.info(f"Getting closest object to '{obj_name}' from {candidate_objs}")
closest_obj = await llm_utils.get_closest_text(obj_name, candidate_objs)
log.info(f"Closest object: {closest_obj}")
grasp = grasps[candidate_objs.index(closest_obj)]
g_obj = Grasp(
cls=grasp["cls"],
cls_name=grasp["cls_name"],
score=grasp["obj"],
bbox=grasp["bbox"],
r_bbox=grasp["r_bbox"],
image=img,
depth=depth,
env=self.robot.robosim.env,
)
return grasp, g_obj.generate_grasp_sequence()
async def get_grasp_gr_convnet(self, obj_name):
log.debug("Getting grasp image and depth...")
img, depth_image, depth = await self.get_grasp_image_and_depth_image()
log.debug("Getting grasp from image...")
grasps = await _get_grasps_from_rgb_and_depth(img, depth_image)
grasp = grasps[0]
log.debug(f"r_bbox: {grasp['r_bbox']}")
g_obj = Grasp(
cls=None,
cls_name=None,
score=None,
bbox=None,
r_bbox=grasp["r_bbox"],
image=img,
depth=depth,
env=self.robot.robosim.env,
)
return grasp, g_obj.generate_grasp_sequence()
async def check_server(self):
return await _check_server()
def show_image(self, base64_image):
image_bytes = base64.b64decode(base64_image)
nparr = np.frombuffer(image_bytes, np.uint8)
image = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
# Display the image using OpenCV
cv2.imshow("Image", image)
cv2.waitKey(0)
cv2.destroyAllWindows()
| 7,655 | Python | 33.486486 | 88 | 0.585761 |
AshisGhosh/roboai/robosuite/robosim/robosim/robosim.py | import numpy as np
from dataclasses import dataclass
from enum import Enum
import asyncio
import copy
from PIL import Image
import httpx
import robosuite as suite
from robosuite import load_controller_config
from robosuite.wrappers import VisualizationWrapper
from robosuite.utils.transform_utils import mat2quat, euler2mat
from robosuite.utils.camera_utils import CameraMover
from robosuite.utils.mjcf_utils import new_body, new_site
from robosim.task import TaskFactory, TaskClass, TaskStatus
from robosim.robot import Robot
from robosim.grasp_handler import Camera
import shared.utils.gradio_client as gradio
from shared.utils.model_server_client import _answer_question_from_image
import shared.utils.replicate_client as replicate # noqa: F401
import logging
log = logging.getLogger("robosim")
log.setLevel(logging.INFO)
class ControllerType(Enum):
JOINT_VELOCITY = 1
OSC_POSE = 2
@dataclass
class OSCControlStep:
dx: float = 0
dy: float = 0
dz: float = 0
droll: float = 0
dpitch: float = 0
dyaw: float = 0
gripper: float = 0
def to_list(self):
return [
self.dx,
self.dy,
self.dz,
self.droll,
self.dpitch,
self.dyaw,
self.gripper,
]
class RoboSim:
def __init__(self, controller_type=ControllerType.OSC_POSE):
self.controller_type = controller_type
self.env = None
self.task_factory = TaskFactory()
self.tasks = []
self.current_task = None
self._last_task = None
self._last_task_finish_status = None
self.render_task = None
self.execute_async_task = None
self.__close_renderer_flag = asyncio.Event()
self.__executing_async = asyncio.Event()
self.__pause_execution = asyncio.Event()
self.__stop_execution = asyncio.Event()
self.__getting_image = asyncio.Event()
self.__getting_image_ts = None
def setup(self):
self.env = self.setup_env()
self.setup_markers()
self.setup_cameras()
self.robot = Robot(self)
self.register_tasks()
# self.test_action([0,0,0,0,0,0,0,0])
def register_tasks(self):
self.task_factory = TaskFactory()
self.task_factory.register_task(self.robot.go_to_position)
self.task_factory.register_task(self.robot.go_to_relative_position)
self.task_factory.register_task(self.robot.go_to_orientation)
self.task_factory.register_task(self.robot.go_to_pick_center)
self.task_factory.register_task(self.robot.go_to_object)
self.task_factory.register_task(self.robot.get_grasp, TaskClass.DATA_TASK)
self.task_factory.register_task(self.robot.go_to_grasp_orientation)
self.task_factory.register_task(self.robot.go_to_grasp_position)
self.task_factory.register_task(self.robot.go_to_pose)
self.task_factory.register_task(self.robot.go_to_pre_grasp)
self.task_factory.register_task(self.add_grasp_marker, TaskClass.DATA_TASK)
self.task_factory.register_task(self.robot.close_gripper)
self.task_factory.register_task(self.robot.open_gripper)
self.task_factory.register_task(self.robot.go_to_drop)
def setup_env(self):
config = load_controller_config(
default_controller=self.controller_type.name
) # load default controller config
# create environment instance
env = suite.make(
env_name="PickPlace", # try with other tasks like "Stack" and "Door"
robots="Panda", # try with other robots like "Sawyer" and "Jaco"
gripper_types="default",
controller_configs=config,
control_freq=20,
has_renderer=True,
render_camera="frontview",
# render_camera="robot0_eye_in_hand",
camera_names=["frontview", "agentview", "robot0_eye_in_hand"],
camera_heights=[672, 672, 480],
camera_widths=[672, 672, 640],
camera_depths=[False, False, True], # set to true for using depth sensor
has_offscreen_renderer=True,
use_object_obs=False,
use_camera_obs=True,
)
# reset the environment
env.reset()
return env
def setup_cameras(self):
self.camera_mover = CameraMover(self.env, "agentview")
self.camera_mover.set_camera_pose(pos=[0.65, -0.25, 1.4])
log.info(f"Camera Pose: {self.camera_mover.get_camera_pose()}")
self.env.sim.forward()
self.env.sim.step()
self.env.step(np.zeros(self.env.action_dim))
def setup_markers(self):
self.markers = []
# self.add_marker([0.5, 0, 1.0], size=0.3, name="indicator_ball")
self.add_marker(
[0.5, 0, 2.0], size=0.05, name="grasp_marker", rgba=[0, 0, 1, 0.65]
)
self.add_marker(
[0.5, 0, 1.0], type="box", size=(0.03, 0.05, 0.1), name="gripper_goal"
)
def test_action(self, action, *args):
obs, reward, done, info = self.env.step(action)
def add_marker(
self, pos, type="sphere", size=0.03, name="indicator_ball", rgba=[1, 0, 0, 0.65]
):
indicator_config = {
"name": name,
"type": type,
"size": size,
"rgba": rgba,
"pos": pos,
}
self.markers.append(indicator_config)
self.env = VisualizationWrapper(self.env, self.markers)
self.env.sim.forward()
self.env.sim.step()
self.env.set_xml_processor(processor=None)
def _add_indicators(self, xml):
import xml.etree.ElementTree as ET
root = ET.fromstring(xml)
worldbody = root.find("worldbody")
for indicator_config in self.markers:
config = copy.deepcopy(indicator_config)
indicator_body = new_body(
name=config["name"] + "_body", pos=config.pop("pos", (0, 0, 0))
)
indicator_body.append(new_site(**config))
worldbody.append(indicator_body)
xml = ET.tostring(root, encoding="utf8").decode("utf8")
return xml
async def add_grasp_marker(self, *args):
grasp_pos = self.robot.get_grasp_pose()[0]
self.add_marker(grasp_pos, name="grasp_marker")
self.env.render()
return f"Marker added at {grasp_pos}."
def reset(self):
self.env.reset()
self.setup_markers()
self.setup_cameras()
def move_gripper_goal_to_gripper(self):
gripper_pos = self.robot.get_gripper_position()
gripper_ori = mat2quat(self.robot.get_gripper_orientation())
self.move_marker(gripper_pos, gripper_ori, name="gripper_goal")
return f"Marker moved to gripper position: {gripper_pos}."
def move_marker(self, position=None, orientation=None, name="grasp_marker", *args):
if position is None and orientation is None:
raise ValueError("Either position or orientation must be provided.")
if position is not None:
self.env.sim.model.body_pos[
self.env.sim.model.body_name2id(name + "_body")
] = position
if orientation is not None:
if len(orientation) == 3:
base_orientation = np.array([np.pi, 0, np.pi / 2])
o = copy.deepcopy(orientation)
o = np.array(o) - base_orientation
orientation = base_orientation + [-o[1], o[2], -o[0]]
orientation = euler2mat(orientation)
orientation = mat2quat(orientation)
self.env.sim.model.body_quat[
self.env.sim.model.body_name2id(name + "_body")
] = orientation
self.env.sim.forward()
self.env.sim.step()
self.env.render()
resp = f"Marker {name} moved to {position} with orientation {orientation}."
log.debug(resp)
return resp
def pixel_to_marker(self, pixel, camera_name="robot0_eye_in_hand"):
if camera_name != "robot0_eye_in_hand":
raise NotImplementedError(
"pixel_to_marker only supports robot0_eye_in_hand currently."
)
camera = Camera(self.env, camera_name)
marker_pose = camera.pixel_to_world(pixel)
log.debug(f"Marker Pose: {marker_pose}")
self.move_marker(marker_pose)
return str(marker_pose)
def start(self):
log.info("Starting Robosuite Simulation...")
# self.env.reset()
self.reset()
self.env.render()
action = None
for i in range(1000):
action = self.check_for_action()
if action is None:
action = OSCControlStep().to_list()
obs, reward, done, info = self.env.step(
action
) # take action in the environment
self.env.render() # render on display
async def start_async(self):
if self.render_task is None or self.render_task.done():
self.__close_renderer_flag.clear()
# self.env.reset()
self.reset()
self.render_task = asyncio.create_task(self.render())
return True
async def render(self):
hz = 5
while not self.__close_renderer_flag.is_set(): # Use the Event for checking
if not self.__executing_async.is_set():
self.env.render()
await asyncio.sleep(1 / hz)
async def close_renderer(self):
self.__close_renderer_flag.set()
if self.render_task and not self.render_task.done():
await self.render_task
self.env.close_renderer()
return True
async def start_execution(self):
self.execute_async_task = asyncio.create_task(self.execute_async())
return True
async def execute_async(self):
if not self.render_task or self.render_task.done():
await self.start_async()
self.__pause_execution.clear()
self.__executing_async.set()
while self.tasks or self.current_task:
if self.__stop_execution.is_set():
self.__executing_async.clear()
return "Execution stopped."
if self.__pause_execution.is_set():
await self.manage_execution_delay()
continue
action = await self.check_for_action()
if action is None:
action = OSCControlStep().to_list()
obs, reward, done, info = self.env.step(action)
if self.__getting_image.is_set():
continue
else:
self.env.render()
await self.manage_execution_delay()
self.__executing_async.clear()
return "All tasks executed."
async def manage_execution_delay(self):
delay = 0.0
if self.__getting_image.is_set():
delay = 0.1
else:
if self.__getting_image_ts is not None:
current_time = asyncio.get_event_loop().time()
if current_time - self.__getting_image_ts < 1:
delay = 0.1
else:
self.__getting_image_ts = None
await asyncio.sleep(delay)
async def stop_execution(self):
log.info("Stopping execution...")
self.__stop_execution.set()
return True
async def pause_execution(self):
log.info("Pausing execution...")
self.__pause_execution.set()
return True
async def resume_execution(self):
log.info("Resuming execution...")
self.__pause_execution.clear()
self.__executing_async.set()
return True
async def check_for_action(self):
"""
Check if there is a task in the queue. If there is, execute it.
"""
if self.current_task is None and self.tasks:
self.current_task = self.tasks.pop(0)
log.info(f"Current Task: {self.current_task.name}")
if self.current_task:
if self.current_task.task_class != TaskClass.CONTROL_TASK:
log.info(f"Executing Task: {self.current_task.name}")
data = await self.current_task.execute()
log.info(f"Data: {data}")
if data is None:
self.finish_current_task(
status=TaskStatus.FAILED, status_msg="Task failed."
)
self.finish_current_task()
return OSCControlStep().to_list()
return await self.do_current_task()
return None
async def do_current_task(self):
"""
Execute the current task in the queue.
"""
action = self.current_task.execute()
log.debug(f"Action: {action}")
self.check_if_task_finished(action)
return action
def check_if_task_finished(self, action):
if action == OSCControlStep().to_list():
self.finish_current_task()
if not self.robot.is_gripper_moving(action):
self.finish_current_task(
status=TaskStatus.FAILED, status_msg="Gripper not moving."
)
def finish_current_task(self, status=TaskStatus.COMPLETED, status_msg=None):
"""
Finish the current task in the queue.
"""
log.info(
f"Task finished: {self.current_task.name} with status {status} and message {status_msg}."
)
self._last_task = self.current_task
self._last_task_finish_status = {"status": status, "status_msg": status_msg}
self.current_task = None
self.robot.__goal_position = None
self.robot.__goal_orientation = None
def add_task(self, name, function, *args, **kwargs):
"""
Add a task to the queue.
"""
task = self.task_factory.create_task(function, name, *args, **kwargs)
self.tasks.append(task)
log.info(f"Task added: {task}")
def get_tasks(self):
return self.tasks
def clear_tasks(self):
self.tasks = []
if self.current_task:
self.finish_current_task()
async def get_image_realtime(
self, camera_name="agentview", width=512, height=512
) -> Image:
self.__getting_image.set()
self.__getting_image_ts = asyncio.get_event_loop().time()
im = self.env.sim.render(width=width, height=height, camera_name=camera_name)
img = Image.fromarray(im[::-1])
self.__getting_image.clear()
return img
async def get_image(self, camera_name="agentview") -> Image:
markers = [
"gripper0_grip_site",
"gripper0_grip_site_cylinder",
"gripper_goal",
"grasp_marker",
]
for marker in markers:
self.env.sim.model.site_rgba[self.env.sim.model.site_name2id(marker)][3] = 0
self.env.step(np.zeros(self.env.action_dim))
im = self.env._get_observations()[camera_name + "_image"]
img = Image.fromarray(im[::-1])
# turn on marker visualization
for marker in markers:
self.env.sim.model.site_rgba[self.env.sim.model.site_name2id(marker)][3] = (
0.25
)
return img
async def get_image_with_markers(self, camera_name="agentview") -> Image:
self.env.step(np.zeros(self.env.action_dim))
im = self.env._get_observations()[camera_name + "_image"]
img = Image.fromarray(im[::-1])
return img
def get_object_names(self):
return [obj.name for obj in self.env.objects]
def get_object_pose(self):
for obj in self.env.objects:
dist = self.env._gripper_to_target(
gripper=self.env.robots[0].gripper,
target=obj.root_body,
target_type="body",
return_distance=True,
)
log.info(f"Object {obj.name}: {dist}")
async def pick(self, object_name):
self.clear_tasks()
await self.start_async()
# self.add_task("go to object", "go_to_object", object_name)
self.add_task("go to pick center", "go_to_pick_center", "")
self.add_task("get grasp", "get_grasp", object_name)
await self.execute_async()
if self._last_task_finish_status["status"] == TaskStatus.FAILED:
retry_attempts = 3
for i in range(retry_attempts):
log.info(f"Retrying pick attempt {i+1}...")
self.add_task("go to pick center", "go_to_pick_center", "")
self.add_task("get grasp", "get_grasp", object_name)
if self._last_task_finish_status["status"] == TaskStatus.COMPLETED:
break
success, _ = await self.get_feedback("grasp-selection-feedback", object_name)
if not success:
log.info("Grasp selection feedback failed.")
return False
self.add_task("move to pre-grasp", "go_to_pre_grasp", "")
self.add_task("open gripper", "open_gripper", "")
self.add_task("go to grasp position", "go_to_grasp_position", "")
self.add_task("close gripper", "close_gripper", "")
await self.execute_async()
if self._last_task_finish_status["status"] != TaskStatus.COMPLETED:
log.info("Pick failed.")
return False
success, _ = await self.get_feedback("grasp-feedback", object_name)
async def get_feedback(self, feedback_type, object_name):
if feedback_type == "grasp-selection-feedback":
image = await self.get_image_with_markers()
question = f"Is the the blue sphere marker over the {object_name}?"
elif feedback_type == "grasp-feedback":
image = await self.get_image()
question = f"Is the object {object_name} grasped by the robot?"
log.info(f"Giving feedback for {feedback_type}...")
log.info(f"Question: {question}")
# output = await _answer_question_from_image(image, question)
try:
# output = gradio.moondream_answer_question_from_image(image, question)
# output = replicate.moondream_answer_question_from_image(image, question)
output = gradio.qwen_vl_max_answer_question_from_image(image, question)
except httpx.ConnectError as e:
log.error(f"Error connecting to the model server: {e}")
output = await _answer_question_from_image(image, question)
log.warn(output)
if "yes" in output["result"].lower():
return True, output
return False, output
if __name__ == "__main__":
sim = RoboSim()
sim.setup()
available_tasks = sim.task_factory.get_task_types()
log.info(f"Available Tasks: {available_tasks}")
sim.add_task("Position Check", "go_to_position", [-0.3, -0.3, 1])
sim.add_task("Relative Position Check", "go_to_relative_position", [0.3, 0.1, 0.1])
sim.add_task("Go to can", "go_to_object", "Can")
sim.start()
| 19,083 | Python | 35.489484 | 101 | 0.584394 |
AshisGhosh/roboai/robosuite/robosim/robosim/robot.py | import numpy as np
import copy
from robosim.grasp_handler import GraspHandler
from robosuite.utils.transform_utils import euler2mat, mat2euler, mat2quat
from robosuite.utils.sim_utils import get_contacts
import logging
logging.basicConfig(level=logging.WARN)
log = logging.getLogger("robosim robot")
log.setLevel(logging.INFO)
class Robot:
def __init__(self, robosim):
self.robosim = robosim
self.env = robosim.env
self.grasp = GraspHandler(self)
self._last_position = None
self._last_orientation = None
self.__goal_position = None
self.__goal_orientation = None
self.__grasp_sequence = None
self.__gripper_contact_started = None
def go_to_position(self, position):
if self.__goal_position is None:
self.__goal_position = position
marker_orientation = (
self.__goal_orientation
if self.__goal_orientation is not None
else self.get_gripper_orientation_in_world_as_euler()
)
self.robosim.move_marker(
name="gripper_goal",
position=self.__goal_position,
orientation=marker_orientation,
)
if len(position) != 3:
raise ValueError("Position must be a 3D point.")
dist = self.distance_to_position(position)
# log.debug(f"Distance: {dist}")
action = self.simple_velocity_control(dist)
if action[:-1] == [0, 0, 0, 0, 0, 0]:
self.__goal_position = None
return action
def go_to_relative_position(self, position, frame="gripper"):
if len(position) != 3:
raise ValueError("Position must be a 3D point.")
if frame != "gripper":
raise NotImplementedError("Only gripper frame is supported for now.")
if self.__goal_position is None:
self.__goal_position = self.get_gripper_position() + np.array(position)
self.robosim.move_marker(
name="gripper_goal",
position=self.__goal_position,
orientation=mat2quat(self.get_gripper_orientation_in_world()),
)
dist = self.distance_to_position(self.__goal_position)
return self.simple_velocity_control(dist)
def go_to_pick_center(self, *args):
return self.go_to_pose(pose=[-0.02, -0.27, 1.1, 0, 0, 0])
def go_to_drop(self, *args):
return self.go_to_pose(pose=[0.1, -0.57, 1.1, 0, 0, 0])
def go_to_orientation(self, orientation, roll_only=False):
if len(orientation) != 3:
raise ValueError("Orientation must be a 3D rotation.")
dist = self.delta_to_orientation(orientation)
log.debug(f"Distance (orientation): {dist}")
if roll_only:
dist[0] = dist[1] = 0
log.debug(f"Distance (roll only): {dist}")
action = self.simple_orientation_control(dist)
if action[:-1] == [0, 0, 0, 0, 0, 0]:
self.__goal_orientation = None
return action
def go_to_pose(self, pose, gripper=0):
position = pose[:3]
orientation = pose[3:]
if self.__goal_position is None:
self.__goal_position = position
dist = self.distance_to_position(position)
position_action = self.simple_velocity_control(dist)[:3]
dist = self.delta_to_orientation(orientation)
orientation_action = self.simple_orientation_control(dist)[3:-1]
if (position_action == [0, 0, 0]) and (orientation_action == [0, 0, 0]):
self.__goal_position = None
self.__goal_orientation = None
action = [*position_action, *orientation_action, gripper]
log.debug(f"ACTION: {action}")
return action
async def get_grasp(self, object_name="Cereal", *args):
log.debug(f"Getting grasp for object: {object_name}")
grasp, grasp_sequence = await self.grasp.get_grasp(obj_name=object_name)
self.__grasp_sequence = grasp_sequence
self.robosim.move_marker(grasp_sequence[1][0])
return grasp
def get_grasp_sequence(self):
return self.__grasp_sequence
def get_grasp_pose(self):
return self.__grasp_sequence[1]
def go_to_grasp_orientation(self, *args):
grasp_pose = self.__grasp_sequence[1]
grasp_ori = [0, 0, grasp_pose[1]]
return self.go_to_orientation(grasp_ori)
def go_to_grasp_position(self, *args):
grasp_pose = copy.deepcopy(self.__grasp_sequence[1])
grasp_position = grasp_pose[0]
grasp_position[2] -= 0.01
return self.go_to_position(grasp_position)
def go_to_pre_grasp(self, *args):
grasp_pose = self.__grasp_sequence[1]
pre_grasp_pos = [grasp_pose[0][0], grasp_pose[0][1], 1.05]
pre_grasp_ori = [0, 0, grasp_pose[1]]
return self.go_to_pose([*pre_grasp_pos, *pre_grasp_ori])
def get_gripper_position(self):
gripper = self.env.robots[0].gripper
gripper_pos = copy.deepcopy(
self.env.sim.data.get_site_xpos(gripper.important_sites["grip_site"])
)
return gripper_pos
def get_gripper_orientation_in_world(self):
gripper_ori = self.robosim.env._eef_xmat
return gripper_ori
def get_gripper_orientation_in_world_as_euler(self):
gripper_ori = self.get_gripper_orientation_in_world()
gripper_ori = mat2euler(gripper_ori, axes="rxyz")
return gripper_ori
def is_gripper_moving(self, action):
if action[-1]:
return True
if self._last_position is None:
self._last_position = [self.get_gripper_position()]
return True
if self._last_orientation is None:
self._last_orientation = [self.get_gripper_orientation_in_world_as_euler()]
return True
if len(self._last_position) < 10:
self._last_position.append(self.get_gripper_position())
self._last_position = None
self._last_orientation = None
return True
if len(self._last_orientation) < 10:
self._last_orientation.append(
self.get_gripper_orientation_in_world_as_euler()
)
self._last_position = None
self._last_orientation = None
return True
if len(self._last_position) > 10:
self._last_position.pop(0)
if len(self._last_orientation) > 10:
self._last_orientation.pop(0)
current_pos = self.get_gripper_position()
current_ori = self.get_gripper_orientation_in_world_as_euler()
delta_pos = np.linalg.norm(self._last_position[-1] - current_pos)
delta_ori = np.linalg.norm(
np.array(
[
self._get_closest_distance(a, b)
for a, b in zip(self._last_orientation[-1], current_ori)
]
)
)
self._last_position.append(current_pos)
self._last_orientation.append(current_ori)
log.info(f"Delta Position: {delta_pos}, Delta Orientation: {delta_ori}")
if delta_pos < 0.001 and delta_ori < 0.01:
return False
return True
def distance_to_position(self, position):
log.debug(f" Goal Position: {position}")
gripper_pos = self.get_gripper_position()
log.debug(f" Gripper Position: {gripper_pos}")
dist = position - gripper_pos
log.debug(f" Distance: {dist}")
return dist
def _get_closest_distance(self, a, b):
dist = np.remainder(a - b, 2 * np.pi)
if dist > np.pi:
dist -= 2 * np.pi
elif dist < -np.pi:
dist += 2 * np.pi
return dist
def delta_to_orientation(self, orientation):
gripper_calibration_euler = [3.13, 0.14, -1.56]
gripper_calibration = euler2mat(gripper_calibration_euler)
gripper_calibration_quat = mat2quat(gripper_calibration)
log.debug("-----")
log.debug(f" request: {orientation}")
goal_mat = euler2mat(orientation)
goal_in_world_mat = np.dot(gripper_calibration, goal_mat)
goal_in_world_euler = mat2euler(goal_in_world_mat, axes="rxyz")
goal_in_world_quat = mat2quat(goal_in_world_mat)
current_gripper_ori_mat = self.robosim.env._eef_xmat
current_ori = mat2euler(current_gripper_ori_mat, axes="rxyz")
current_ori_quat = mat2quat(current_gripper_ori_mat)
actual_dist = np.array(
[
self._get_closest_distance(a, b)
for a, b in zip(goal_in_world_euler, current_ori)
]
)
dist = actual_dist
dist[1] *= -1
dist[2] *= -1
if self.__goal_orientation is None:
self.__goal_orientation = goal_in_world_euler
marker_position = (
self.__goal_position
if self.__goal_position is not None
else self.get_gripper_position()
)
self.robosim.move_marker(
name="gripper_goal",
orientation=goal_in_world_euler,
position=marker_position,
)
log.debug(f" Gripper Calibration: {gripper_calibration_euler}")
log.debug(f" Goal in world: {goal_in_world_euler}")
log.debug(f" Current in world: {current_ori}")
log.debug(" ")
log.debug(f" Gripper Calibration [quat]: {gripper_calibration_quat}")
log.debug(f" Goal in world [quat]: {goal_in_world_quat}")
log.debug(f" Current in world [quat]: {current_ori_quat}")
return dist
def go_to_object(self, target_obj_name="Can"):
obj = self.env.objects[self.env.object_to_id[target_obj_name.lower()]]
dist = self.env._gripper_to_target(
gripper=self.env.robots[0].gripper,
target=obj.root_body,
target_type="body",
)
return self.simple_velocity_control(dist)
def simple_velocity_control(self, dist):
euclidean_dist = np.linalg.norm(dist)
if euclidean_dist < 0.01:
return [0, 0, 0, 0, 0, 0, 0]
cartesian_velocities = dist / euclidean_dist
log.debug(f" Cartesian Velocities: {cartesian_velocities}")
action = [*cartesian_velocities, 0, 0, 0, 0]
log.debug(f"XYZ Action: {action}")
return action
def simple_orientation_control(self, orientation):
euclidean_dist = np.linalg.norm(orientation)
if euclidean_dist < 0.02:
return [0, 0, 0, 0, 0, 0, 0]
max_vel = 0.4
if euclidean_dist < 0.4:
max_vel = 0.1
if euclidean_dist < 0.2:
max_vel = 0.05
# if euclidean_dist < 0.05:
# max_vel = 0.02
cartesian_velocities = orientation / euclidean_dist
cartesian_velocities = np.clip(cartesian_velocities, -max_vel, max_vel)
for i in range(3):
if abs(orientation[i]) < 0.02: # ~ 1 degree threshold
cartesian_velocities[i] = 0
action = [0, 0, 0, *cartesian_velocities, 0]
log.debug(f"RPY Action: {action} (euclidean_dist: {euclidean_dist})")
return action
def close_gripper(self, *args):
# get current gripper position
gripper = self.env.robots[0].gripper
gripper_contacts = get_contacts(self.robosim.env.sim, gripper)
log.info(f"Gripper contacts: {gripper_contacts}")
right_fingerpad_pos = self.env.sim.data.get_geom_xpos(
gripper.important_geoms["right_fingerpad"][0]
)
left_fingerpad_pos = self.env.sim.data.get_geom_xpos(
gripper.important_geoms["left_fingerpad"][0]
)
log.debug(f" Right fingerpad position: {right_fingerpad_pos}")
log.debug(f" Left fingerpad position: {left_fingerpad_pos}")
distance = np.linalg.norm(right_fingerpad_pos - left_fingerpad_pos)
log.debug(f" Distance: {distance}")
if self._is_gripper_closed():
return [0, 0, 0, 0, 0, 0, 0]
return [0, 0, 0, 0, 0, 0, 1]
def _is_gripper_closed(self):
gripper = self.env.robots[0].gripper
gripper_contacts = get_contacts(self.robosim.env.sim, gripper)
log.info(f"Gripper contacts: {gripper_contacts}")
right_fingerpad_pos = self.env.sim.data.get_geom_xpos(
gripper.important_geoms["right_fingerpad"][0]
)
left_fingerpad_pos = self.env.sim.data.get_geom_xpos(
gripper.important_geoms["left_fingerpad"][0]
)
log.debug(f" Right fingerpad position: {right_fingerpad_pos}")
log.debug(f" Left fingerpad position: {left_fingerpad_pos}")
distance = np.linalg.norm(right_fingerpad_pos - left_fingerpad_pos)
log.debug(f" Distance: {distance}")
if gripper_contacts:
if self.__gripper_contact_started is None:
self.__gripper_contact_started = [
left_fingerpad_pos,
right_fingerpad_pos,
]
else:
if (
np.linalg.norm(
self.__gripper_contact_started[0] - left_fingerpad_pos
)
> 0.01
and np.linalg.norm(
self.__gripper_contact_started[1] - right_fingerpad_pos
)
> 0.01
):
return False
return True
else:
self.__gripper_contact_started = None
if distance < 0.01:
return True
return False
def open_gripper(self, *args):
gripper = self.env.robots[0].gripper
right_fingerpad_pos = self.env.sim.data.get_geom_xpos(
gripper.important_geoms["right_fingerpad"][0]
)
left_fingerpad_pos = self.env.sim.data.get_geom_xpos(
gripper.important_geoms["left_fingerpad"][0]
)
log.debug(f" Right fingerpad position: {right_fingerpad_pos}")
log.debug(f" Left fingerpad position: {left_fingerpad_pos}")
distance = np.linalg.norm(right_fingerpad_pos - left_fingerpad_pos)
log.debug(f" Distance: {distance}")
if distance > 0.08:
return [0, 0, 0, 0, 0, 0, 0]
return [0, 0, 0, 0, 0, 0, -1]
| 14,488 | Python | 36.246787 | 87 | 0.571162 |
AshisGhosh/roboai/robosuite/robosim/robosim/task.py | from enum import Enum
import logging
logging.basicConfig(level=logging.INFO)
class TaskClass(Enum):
CONTROL_TASK = 0
DATA_TASK = 1
ASYNC_DATA_TASK = 2
class TaskStatus(Enum):
PENDING = 0
RUNNING = 1
COMPLETED = 2
FAILED = 3
class TaskFactory:
def __init__(self):
self._creators = {}
def register_task(self, creator, task_class=TaskClass.CONTROL_TASK):
self.register_task_type(
creator.__name__,
lambda name, *args, **kwargs: Task(
name, creator, task_class, *args, **kwargs
),
)
def register_task_type(self, task_type, creator):
self._creators[task_type] = creator
def create_task(self, task_type, task_name=None, *args, **kwargs):
creator = self._creators.get(task_type)
if not creator:
raise ValueError(f"Task type {task_type} not registered.")
if task_name is not None:
# Use the provided task_name or fallback to a default naming convention
return creator(task_name, *args, **kwargs)
else:
return creator(task_type, *args, **kwargs)
def get_task_types(self):
return self._creators.keys()
class Task:
def __init__(self, name, function, task_class, *args, **kwargs):
self.name = name
self.function = function
self.task_class = task_class
self.args = args
self.kwargs = kwargs
def execute(self):
try:
return self.function(*self.args, **self.kwargs)
except Exception as e:
logging.error(f"Error executing task {self.name}: {e}")
def __str__(self):
return f"Task: {self.name}\n Function: {self.function}\n Args: {self.args}\n Kwargs: {self.kwargs}"
| 1,793 | Python | 26.181818 | 116 | 0.588957 |
AshisGhosh/roboai/robosuite/robosim/robosim/quick_start.py | import numpy as np
from dataclasses import dataclass
from enum import Enum
import robosuite as suite
from robosuite import load_controller_config
class ControllerType(Enum):
JOINT_VELOCITY = 1
OSC_POSE = 2
@dataclass
class OSCControlStep:
dx: float
dy: float
dz: float
droll: float
dpitch: float
dyaw: float
gripper: float
def to_list(self):
return [
self.dx,
self.dy,
self.dz,
self.droll,
self.dpitch,
self.dyaw,
self.gripper,
]
def dummy_joint_vel_control(action, env, step):
"""
Dummy control function for joint velocity control
"""
if action is None:
action = np.random.randn(env.robots[0].dof) # sample random action
for i, a in enumerate(action):
action[i] += 0.1 * np.sin(step / 100) # add some oscillation to the action
print(f"Action {i}: {action}")
return action
def dummy_osc_control(action, env, step):
"""
Dummy control function for OSC control
dx, dy, dz, droll, dpitch, dyaw, gripper
"""
if action is None:
action = OSCControlStep(0, 0, 0, 0, 0, 0, 0)
else:
action = OSCControlStep(*action)
action.dx = 0.1 * np.sin(step / 100)
action.dy = 0.1 * np.cos(step / 100)
action.dz = 0.1 * np.sin(step / 100)
action.droll = 0.1 * np.cos(step / 100)
action.dpitch = 0.1 * np.sin(step / 100)
action.dyaw = 0.1 * np.cos(step / 100)
action.gripper = 0.1 * np.sin(step / 100)
print(f"Action: {action.to_list()}")
return action.to_list()
class robosim:
def __init__(self, controller_type=ControllerType.OSC_POSE):
self.controller_type = controller_type
self.update_action = self.get_action_func()
def get_action_func(self):
if self.controller_type == ControllerType.JOINT_VELOCITY:
return dummy_joint_vel_control
elif self.controller_type == ControllerType.OSC_POSE:
return dummy_osc_control
else:
raise ValueError("Invalid controller type")
def start(self):
print("Starting Robosuite Simulation...")
config = load_controller_config(
default_controller=self.controller_type.name
) # load default controller config
# create environment instance
env = suite.make(
env_name="Lift", # try with other tasks like "Stack" and "Door"
robots="Panda", # try with other robots like "Sawyer" and "Jaco"
gripper_types="default",
controller_configs=config,
control_freq=20,
has_renderer=True,
render_camera="frontview",
camera_names=["frontview", "agentview"],
has_offscreen_renderer=True,
use_object_obs=False,
use_camera_obs=True,
)
# reset the environment
env.reset()
action = None
for i in range(1000):
action = self.update_action(action, env, i)
obs, reward, done, info = env.step(action) # take action in the environment
env.render() # render on display
if __name__ == "__main__":
sim = robosim()
sim.start()
| 3,246 | Python | 26.991379 | 88 | 0.587184 |
AshisGhosh/roboai/robosuite/robosim/app/main.py | #!/usr/bin/python -u
import io
from fastapi import FastAPI, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from starlette.responses import StreamingResponse
from pydantic import BaseModel
from robosim.robosim import RoboSim
import logging
logging.basicConfig(level=logging.DEBUG)
# Create FastAPI instance
app = FastAPI()
robosim = RoboSim()
# List of allowed origins (you can use '*' to allow all origins)
origins = [
"http://localhost:3000", # Allow your Next.js app
# Add any other origins as needed
]
# Add CORSMiddleware to the application
app.add_middleware(
CORSMiddleware,
allow_origins=origins, # List of allowed origins
allow_credentials=True,
allow_methods=["*"], # Allow all methods
allow_headers=["*"], # Allow all headers
)
class Task(BaseModel):
name: str
type: str
args: list | str
# Example route
@app.get("/")
async def read_root():
return {"message": "Hello, FastAPI! This is the robosim server."}
@app.on_event("startup")
async def startup_event():
return robosim.setup()
@app.post("/get_feedback")
async def get_feedback():
return await robosim.get_feedback("grasp-selection-feedback", "cereal")
@app.post("/pick")
async def pick(object_name: str):
return await robosim.pick(object_name)
@app.post("/test")
async def test():
robosim.clear_tasks()
await add_task(Task(name="go to pick", type="go_to_pick_center", args=""))
await add_task(Task(name="get grasp", type="get_grasp", args="cereal"))
await add_task(Task(name="go to pre grasp", type="go_to_pre_grasp", args=""))
await add_task(Task(name="open gripper", type="open_gripper", args=""))
await add_task(Task(name="go to grasp pos", type="go_to_grasp_position", args=""))
await add_task(Task(name="close gripper", type="close_gripper", args=""))
await add_task(Task(name="go to pre grasp", type="go_to_pre_grasp", args=""))
await add_task(Task(name="go to drop", type="go_to_drop", args=""))
await add_task(Task(name="open gripper", type="open_gripper", args=""))
await robosim.start_execution()
return {"msg": "Test task added and execution started."}
@app.post("/start")
async def start():
return await robosim.start_async()
@app.post("/move_pose")
async def move_pose(pose: list[float]):
await add_task(Task(name="move pose", type="go_to_pose", args=pose))
await robosim.start_execution()
return {"msg": "Pose move task added and execution started."}
@app.post("/move_orientation")
async def move_orientation(orientation: list[float]):
await add_task(
Task(name="move orientation", type="go_to_orientation", args=orientation)
)
await robosim.start_execution()
return {"msg": "Orientation move task added and execution started."}
@app.post("/move_position")
async def move_position(position: list[float]):
await add_task(Task(name="move position", type="go_to_position", args=position))
await robosim.start_execution()
return {"msg": "Position move task added and execution started."}
@app.get("/move_gripper_goal_to_gripper")
async def move_gripper_goal_to_gripper():
return robosim.move_gripper_goal_to_gripper()
@app.get("/get_gripper_orientation")
async def get_gripper_orientation():
return str(robosim.robot.get_gripper_orientation_as_euler())
@app.get("/get_gripper_orientation_in_world")
async def get_gripper_orientation_in_world():
return str(robosim.robot.get_gripper_orientation_in_world_as_euler())
@app.post("/pixel_to_marker")
async def pixel_to_marker(pixel: list[int]):
return robosim.pixel_to_marker(pixel)
@app.post("/add_marker")
async def add_marker(position: list[float]):
return robosim.add_marker(position)
@app.post("/move_marker")
async def move_marker(
name: str, position: list[float] | None, orientation: list[float] | None
):
return robosim.move_marker(name=name, position=position, orientation=orientation)
@app.get("/get_grasp_image")
async def get_grasp_image():
logging.info("Getting grasp image...")
img = await robosim.robot.grasp.get_grasp_image()
logging.debug("Image received.")
buf = io.BytesIO()
img.save(buf, format="PNG")
buf.seek(0)
logging.debug("Image saved. Ready to stream.")
return StreamingResponse(buf, media_type="image/png")
@app.get("/get_grasp_image_and_depth")
async def get_grasp_image_and_depth():
logging.info("Getting grasp image and depth...")
img, depth = await robosim.robot.grasp.get_grasp_image_and_depth()
logging.debug("Image and depth received.")
buf = io.BytesIO()
img.save(buf, format="PNG")
buf.seek(0)
logging.debug("Image saved. Ready to stream.")
return StreamingResponse(buf, media_type="image/png")
@app.get("/get_grasp_depth_image")
async def get_grasp_image_and_depth_image():
logging.info("Getting grasp image and depth...")
_img, depth = await robosim.robot.grasp.get_grasp_image_and_depth_image()
logging.debug("Image and depth received.")
buf_depth = io.BytesIO()
depth.save(buf_depth, format="PNG")
buf_depth.seek(0)
return StreamingResponse(buf_depth, media_type="image/png")
@app.get("/get_image")
async def get_image():
logging.info("Getting image...")
img = await robosim.get_image()
logging.debug("Image received.")
buf = io.BytesIO()
img.save(buf, format="PNG")
buf.seek(0)
logging.debug("Image saved. Ready to stream.")
return StreamingResponse(buf, media_type="image/png")
@app.get("/get_image_with_markers")
async def get_image_with_markers():
logging.info("Getting image with markers...")
img = await robosim.get_image_with_markers()
logging.debug("Image received.")
buf = io.BytesIO()
img.save(buf, format="PNG")
buf.seek(0)
logging.debug("Image saved. Ready to stream.")
return StreamingResponse(buf, media_type="image/png")
@app.post("/pause")
async def pause():
return await robosim.pause_execution()
@app.post("/resume")
async def resume():
return await robosim.resume_execution()
@app.post("/close")
async def close():
return await robosim.close_renderer()
@app.post("/execute_tasks")
async def execute_tasks():
return await robosim.execute_async()
@app.post("/add_task")
async def add_task(task: Task):
logging.info(f"Adding task: {task.name} of type {task.type} with args {task.args}")
try:
robosim.add_task(task.name, task.type, task.args)
return {"msg": "Task added"}
except ValueError as e:
raise HTTPException(status_code=400, detail=str(e))
@app.get("/get_tasks")
async def get_tasks():
tasks = [
Task(name=t.name, type=t.function.__name__, args=t.args)
for t in robosim.get_tasks()
]
return tasks
@app.get("/get_objects")
async def get_objects():
return robosim.get_object_names()
| 6,879 | Python | 27.312757 | 87 | 0.682657 |
AshisGhosh/roboai/robosuite/robomimic_sim/robomimic_sim/robomimic_sim.py | import imageio
import numpy as np
import os
from copy import deepcopy
import asyncio
from PIL import Image
import torch
import robosuite
from robosuite import load_controller_config
import robomimic.utils.file_utils as FileUtils
import robomimic.utils.torch_utils as TorchUtils
from robomimic.envs.env_base import EnvBase
from robomimic.algo import RolloutPolicy
import urllib.request
class RobomimicSim:
def __init__(self):
self.rollout_visualizing = False
self.rollout_task = None
self.render_task = None
self.close_renderer_flag = (
asyncio.Event()
) # Use an asyncio Event for better coordination
def setup(self):
# Get pretrained checkpooint from the model zoo
ckpt_path = "models/lift_ph_low_dim_epoch_1000_succ_100.pth"
# Lift (Proficient Human)
urllib.request.urlretrieve(
"http://downloads.cs.stanford.edu/downloads/rt_benchmark/model_zoo/lift/bc_rnn/lift_ph_low_dim_epoch_1000_succ_100.pth",
filename=ckpt_path,
)
assert os.path.exists(ckpt_path)
device = TorchUtils.get_torch_device(try_to_use_cuda=True)
# restore policy
policy, ckpt_dict = FileUtils.policy_from_checkpoint(
ckpt_path=ckpt_path, device=device, verbose=True
)
# create environment from saved checkpoint
env, _ = FileUtils.env_from_checkpoint(
ckpt_dict=ckpt_dict,
render=True,
render_offscreen=True, # render to RGB images for video
verbose=True,
)
self.policy = policy
self.env = env
def custom_env(self):
load_controller_config(
default_controller="OSC_POSE"
) # load default controller config
# create environment from scratch
env = robosuite.make(
env_name="Lift", # try with other tasks like "Stack" and "Door"
robots="Panda", # try with other robots like "Sawyer" and "Jaco"
gripper_types="default",
controller_configs=None,
control_freq=20,
has_renderer=True,
render_camera="frontview",
camera_names=["frontview", "agentview"],
has_offscreen_renderer=True,
use_object_obs=False,
use_camera_obs=True,
)
self.env = env
async def start_rollout(self):
if self.rollout_task is None or self.rollout_task.done():
self.close_renderer_flag.clear()
await self.start_renderer()
self.rollout_task = asyncio.create_task(self.run())
async def close_renderer(self):
self.close_renderer_flag.set() # Signal to stop the tasks
if self.render_task and not self.render_task.done():
await self.render_task # Await the task to ensure it completes
if self.rollout_task and not self.rollout_task.done():
self.rollout_task.cancel() # Cancel rollout task as it might be waiting for external input
try:
await (
self.rollout_task
) # Attempt to await the task to handle any cleanup
except asyncio.CancelledError:
pass # Expected if the task was cancelled
self.env.base_env.close_renderer()
async def render(self):
hz = 5
while not self.close_renderer_flag.is_set(): # Use the Event for checking
if not self.rollout_visualizing:
self.env.render(mode="human", camera_name="frontview")
await asyncio.sleep(1 / hz)
async def start_renderer(self):
if self.render_task is None or self.render_task.done():
self.close_renderer_flag.clear()
self.env.reset()
print("Now starting renderer...")
self.render_task = asyncio.create_task(self.render())
return True
async def reset(self):
self.env.reset()
return True
async def rollout(
self,
policy,
env,
horizon,
render=False,
video_writer=None,
video_skip=5,
camera_names=None,
):
"""
Helper function to carry out rollouts. Supports on-screen rendering, off-screen rendering to a video,
and returns the rollout trajectory.
Args:
policy (instance of RolloutPolicy): policy loaded from a checkpoint
env (instance of EnvBase): env loaded from a checkpoint or demonstration metadata
horizon (int): maximum horizon for the rollout
render (bool): whether to render rollout on-screen
video_writer (imageio writer): if provided, use to write rollout to video
video_skip (int): how often to write video frames
camera_names (list): determines which camera(s) are used for rendering. Pass more than
one to output a video with multiple camera views concatenated horizontally.
Returns:
stats (dict): some statistics for the rollout - such as return, horizon, and task success
"""
print("Rolling out policy...")
assert isinstance(env, EnvBase)
assert isinstance(policy, RolloutPolicy)
assert not (render and (video_writer is not None))
policy.start_episode()
# obs = env.reset()
# state_dict = env.get_state()
# # hack that is necessary for robosuite tasks for deterministic action playback
# obs = env.reset_to(state_dict)
obs = env.get_observation()
video_count = 0 # video frame counter
total_reward = 0.0
self.rollout_visualizing = True
try:
for step_i in range(horizon):
await asyncio.sleep(0) # Allow other tasks to run
if self.close_renderer_flag.is_set():
print("Stopping rollout due to renderer close request...")
break
# get action from policy
act = policy(ob=obs)
# play action
next_obs, r, done, _ = env.step(act)
# compute reward
total_reward += r
success = env.is_success()["task"]
# visualization
if render:
env.render(mode="human", camera_name=camera_names[0])
if video_writer is not None:
if video_count % video_skip == 0:
video_img = []
for cam_name in camera_names:
video_img.append(
env.render(
mode="rgb_array",
height=512,
width=512,
camera_name=cam_name,
)
)
video_img = np.concatenate(
video_img, axis=1
) # concatenate horizontally
video_writer.append_data(video_img)
video_count += 1
# break if done or if success
if done or success:
break
# update for next iter
obs = deepcopy(next_obs)
env.get_state()
except env.rollout_exceptions as e:
print("WARNING: got rollout exception {}".format(e))
self.rollout_visualizing = False
stats = dict(
Return=total_reward, Horizon=(step_i + 1), Success_Rate=float(success)
)
return stats
async def run(self):
rollout_horizon = 400
np.random.seed(0)
torch.manual_seed(0)
video_path = "output/rollout.mp4"
video_writer = imageio.get_writer(video_path, fps=20)
policy = self.policy
env = self.env
stats = await self.rollout(
policy=policy,
env=env,
horizon=rollout_horizon,
render=True,
# render=False,
# video_writer=video_writer,
# video_skip=5,
camera_names=["frontview", "agentview"],
)
print(stats)
video_writer.close()
def get_image(self):
img = self.env.render(
mode="rgb_array", height=512, width=512, camera_name="frontview"
)
img = Image.fromarray(img)
return img
| 8,536 | Python | 33.28514 | 132 | 0.554944 |
AshisGhosh/roboai/robosuite/robomimic_sim/robomimic_sim/train_bc_rnn.py | """
WARNING: This script is only for instructive purposes, to point out different portions
of the config -- the preferred way to launch training runs is still with external
jsons and scripts/train.py (and optionally using scripts/hyperparameter_helper.py
to generate several config jsons by sweeping config settings). See the online
documentation for more information about launching training.
Example script for training a BC-RNN agent by manually setting portions of the config in
python code.
To see a quick training run, use the following command:
python train_bc_rnn.py --debug
To run a full length training run on your own dataset, use the following command:
python train_bc_rnn.py --dataset /path/to/dataset.hdf5 --output /path/to/output_dir
"""
import argparse
import robomimic.utils.torch_utils as TorchUtils
import robomimic.utils.test_utils as TestUtils
import robomimic.macros as Macros
from robomimic.config import config_factory
from robomimic.scripts.train import train
def robosuite_hyperparameters(config):
"""
Sets robosuite-specific hyperparameters.
Args:
config (Config): Config to modify
Returns:
Config: Modified config
"""
## save config - if and when to save checkpoints ##
config.experiment.save.enabled = (
True # whether model saving should be enabled or disabled
)
config.experiment.save.every_n_seconds = (
None # save model every n seconds (set to None to disable)
)
config.experiment.save.every_n_epochs = (
50 # save model every n epochs (set to None to disable)
)
config.experiment.save.epochs = [] # save model on these specific epochs
config.experiment.save.on_best_validation = (
False # save models that achieve best validation score
)
config.experiment.save.on_best_rollout_return = (
False # save models that achieve best rollout return
)
config.experiment.save.on_best_rollout_success_rate = (
True # save models that achieve best success rate
)
# epoch definition - if not None, set an epoch to be this many gradient steps, else the full dataset size will be used
config.experiment.epoch_every_n_steps = 100 # each epoch is 100 gradient steps
config.experiment.validation_epoch_every_n_steps = (
10 # each validation epoch is 10 gradient steps
)
# envs to evaluate model on (assuming rollouts are enabled), to override the metadata stored in dataset
config.experiment.env = None # no need to set this (unless you want to override)
config.experiment.additional_envs = (
None # additional environments that should get evaluated
)
## rendering config ##
config.experiment.render = False # render on-screen or not
config.experiment.render_video = True # render evaluation rollouts to videos
config.experiment.keep_all_videos = False # save all videos, instead of only saving those for saved model checkpoints
config.experiment.video_skip = (
5 # render video frame every n environment steps during rollout
)
## evaluation rollout config ##
config.experiment.rollout.enabled = True # enable evaluation rollouts
config.experiment.rollout.n = 50 # number of rollouts per evaluation
config.experiment.rollout.horizon = 400 # set horizon based on length of demonstrations (can be obtained with scripts/get_dataset_info.py)
config.experiment.rollout.rate = 50 # do rollouts every @rate epochs
config.experiment.rollout.warmstart = (
0 # number of epochs to wait before starting rollouts
)
config.experiment.rollout.terminate_on_success = (
True # end rollout early after task success
)
## dataset loader config ##
# num workers for loading data - generally set to 0 for low-dim datasets, and 2 for image datasets
config.train.num_data_workers = 0 # assume low-dim dataset
# One of ["all", "low_dim", or None]. Set to "all" to cache entire hdf5 in memory - this is
# by far the fastest for data loading. Set to "low_dim" to cache all non-image data. Set
# to None to use no caching - in this case, every batch sample is retrieved via file i/o.
# You should almost never set this to None, even for large image datasets.
config.train.hdf5_cache_mode = "all"
config.train.hdf5_use_swmr = True # used for parallel data loading
# if true, normalize observations at train and test time, using the global mean and standard deviation
# of each observation in each dimension, computed across the training set. See SequenceDataset.normalize_obs
# in utils/dataset.py for more information.
config.train.hdf5_normalize_obs = False # no obs normalization
# if provided, demonstrations are filtered by the list of demo keys under "mask/@hdf5_filter_key"
config.train.hdf5_filter_key = "train" # by default, use "train" and "valid" filter keys corresponding to train-valid split
config.train.hdf5_validation_filter_key = "valid"
# fetch sequences of length 10 from dataset for RNN training
config.train.seq_length = 10
# keys from hdf5 to load per demonstration, besides "obs" and "next_obs"
config.train.dataset_keys = (
"actions",
"rewards",
"dones",
)
# one of [None, "last"] - set to "last" to include goal observations in each batch
config.train.goal_mode = None # no need for goal observations
## learning config ##
config.train.cuda = True # try to use GPU (if present) or not
config.train.batch_size = 100 # batch size
config.train.num_epochs = 2000 # number of training epochs
config.train.seed = 1 # seed for training
### Observation Config ###
config.observation.modalities.obs.low_dim = [ # specify low-dim observations for agent
"robot0_eef_pos",
"robot0_eef_quat",
"robot0_gripper_qpos",
"object",
]
config.observation.modalities.obs.rgb = [] # no image observations
config.observation.modalities.goal.low_dim = [] # no low-dim goals
config.observation.modalities.goal.rgb = [] # no image goals
# observation encoder architecture - applies to all networks that take observation dicts as input
config.observation.encoder.rgb.core_class = "VisualCore"
config.observation.encoder.rgb.core_kwargs.feature_dimension = 64
config.observation.encoder.rgb.core_kwargs.backbone_class = "ResNet18Conv" # ResNet backbone for image observations (unused if no image observations)
config.observation.encoder.rgb.core_kwargs.backbone_kwargs.pretrained = (
False # kwargs for visual core
)
config.observation.encoder.rgb.core_kwargs.backbone_kwargs.input_coord_conv = False
config.observation.encoder.rgb.core_kwargs.pool_class = (
"SpatialSoftmax" # Alternate options are "SpatialMeanPool" or None (no pooling)
)
config.observation.encoder.rgb.core_kwargs.pool_kwargs.num_kp = (
32 # Default arguments for "SpatialSoftmax"
)
config.observation.encoder.rgb.core_kwargs.pool_kwargs.learnable_temperature = (
False # Default arguments for "SpatialSoftmax"
)
config.observation.encoder.rgb.core_kwargs.pool_kwargs.temperature = (
1.0 # Default arguments for "SpatialSoftmax"
)
config.observation.encoder.rgb.core_kwargs.pool_kwargs.noise_std = (
0.0 # Default arguments for "SpatialSoftmax"
)
# if you prefer to use pre-trained visual representations, uncomment the following lines
# R3M
# config.observation.encoder.rgb.core_kwargs.backbone_class = 'R3MConv' # R3M backbone for image observations (unused if no image observations)
# config.observation.encoder.rgb.core_kwargs.backbone_kwargs.r3m_model_class = 'resnet18' # R3M model class (resnet18, resnet34, resnet50)
# config.observation.encoder.rgb.core_kwargs.backbone_kwargs.freeze = True # whether to freeze network during training or allow finetuning
# config.observation.encoder.rgb.core_kwargs.pool_class = None # no pooling class for pretraining model
# MVP
# config.observation.encoder.rgb.core_kwargs.backbone_class = 'MVPConv' # MVP backbone for image observations (unused if no image observations)
# config.observation.encoder.rgb.core_kwargs.backbone_kwargs.mvp_model_class = 'vitb-mae-egosoup' # MVP model class (vits-mae-hoi, vits-mae-in, vits-sup-in, vitb-mae-egosoup, vitl-256-mae-egosoup)
# config.observation.encoder.rgb.core_kwargs.backbone_kwargs.freeze = True # whether to freeze network during training or allow finetuning
# config.observation.encoder.rgb.core_kwargs.pool_class = None # no pooling class for pretraining model
# observation randomizer class - set to None to use no randomization, or 'CropRandomizer' to use crop randomization
config.observation.encoder.rgb.obs_randomizer_class = None
# kwargs for observation randomizers (for the CropRandomizer, this is size and number of crops)
config.observation.encoder.rgb.obs_randomizer_kwargs.crop_height = 76
config.observation.encoder.rgb.obs_randomizer_kwargs.crop_width = 76
config.observation.encoder.rgb.obs_randomizer_kwargs.num_crops = 1
config.observation.encoder.rgb.obs_randomizer_kwargs.pos_enc = False
### Algo Config ###
# optimization parameters
config.algo.optim_params.policy.learning_rate.initial = 1e-4 # policy learning rate
config.algo.optim_params.policy.learning_rate.decay_factor = (
0.1 # factor to decay LR by (if epoch schedule non-empty)
)
config.algo.optim_params.policy.learning_rate.epoch_schedule = [] # epochs where LR decay occurs
config.algo.optim_params.policy.regularization.L2 = (
0.00 # L2 regularization strength
)
# loss weights
config.algo.loss.l2_weight = 1.0 # L2 loss weight
config.algo.loss.l1_weight = 0.0 # L1 loss weight
config.algo.loss.cos_weight = 0.0 # cosine loss weight
# MLP network architecture (layers after observation encoder and RNN, if present)
config.algo.actor_layer_dims = () # empty MLP - go from RNN layer directly to action output
# stochastic GMM policy
config.algo.gmm.enabled = (
True # enable GMM policy - policy outputs GMM action distribution
)
config.algo.gmm.num_modes = 5 # number of GMM modes
config.algo.gmm.min_std = 0.0001 # minimum std output from network
config.algo.gmm.std_activation = (
"softplus" # activation to use for std output from policy net
)
config.algo.gmm.low_noise_eval = True # low-std at test-time
# rnn policy config
config.algo.rnn.enabled = True # enable RNN policy
config.algo.rnn.horizon = (
10 # unroll length for RNN - should usually match train.seq_length
)
config.algo.rnn.hidden_dim = 400 # hidden dimension size
config.algo.rnn.rnn_type = "LSTM" # rnn type - one of "LSTM" or "GRU"
config.algo.rnn.num_layers = 2 # number of RNN layers that are stacked
config.algo.rnn.open_loop = False # if True, action predictions are only based on a single observation (not sequence) + hidden state
config.algo.rnn.kwargs.bidirectional = False # rnn kwargs
return config
def momart_hyperparameters(config):
"""
Sets momart-specific hyperparameters.
Args:
config (Config): Config to modify
Returns:
Config: Modified config
"""
## save config - if and when to save checkpoints ##
config.experiment.save.enabled = (
True # whether model saving should be enabled or disabled
)
config.experiment.save.every_n_seconds = (
None # save model every n seconds (set to None to disable)
)
config.experiment.save.every_n_epochs = (
3 # save model every n epochs (set to None to disable)
)
config.experiment.save.epochs = [] # save model on these specific epochs
config.experiment.save.on_best_validation = (
True # save models that achieve best validation score
)
config.experiment.save.on_best_rollout_return = (
False # save models that achieve best rollout return
)
config.experiment.save.on_best_rollout_success_rate = (
True # save models that achieve best success rate
)
# epoch definition - if not None, set an epoch to be this many gradient steps, else the full dataset size will be used
config.experiment.epoch_every_n_steps = None # each epoch is 100 gradient steps
config.experiment.validation_epoch_every_n_steps = (
10 # each validation epoch is 10 gradient steps
)
# envs to evaluate model on (assuming rollouts are enabled), to override the metadata stored in dataset
config.experiment.env = None # no need to set this (unless you want to override)
config.experiment.additional_envs = (
None # additional environments that should get evaluated
)
## rendering config ##
config.experiment.render = False # render on-screen or not
config.experiment.render_video = True # render evaluation rollouts to videos
config.experiment.keep_all_videos = False # save all videos, instead of only saving those for saved model checkpoints
config.experiment.video_skip = (
5 # render video frame every n environment steps during rollout
)
## evaluation rollout config ##
config.experiment.rollout.enabled = True # enable evaluation rollouts
config.experiment.rollout.n = 30 # number of rollouts per evaluation
config.experiment.rollout.horizon = 1500 # maximum number of env steps per rollout
config.experiment.rollout.rate = 3 # do rollouts every @rate epochs
config.experiment.rollout.warmstart = (
0 # number of epochs to wait before starting rollouts
)
config.experiment.rollout.terminate_on_success = (
True # end rollout early after task success
)
## dataset loader config ##
# num workers for loading data - generally set to 0 for low-dim datasets, and 2 for image datasets
config.train.num_data_workers = 2 # assume low-dim dataset
# One of ["all", "low_dim", or None]. Set to "all" to cache entire hdf5 in memory - this is
# by far the fastest for data loading. Set to "low_dim" to cache all non-image data. Set
# to None to use no caching - in this case, every batch sample is retrieved via file i/o.
# You should almost never set this to None, even for large image datasets.
config.train.hdf5_cache_mode = "low_dim"
config.train.hdf5_use_swmr = True # used for parallel data loading
# if true, normalize observations at train and test time, using the global mean and standard deviation
# of each observation in each dimension, computed across the training set. See SequenceDataset.normalize_obs
# in utils/dataset.py for more information.
config.train.hdf5_normalize_obs = False # no obs normalization
# if provided, demonstrations are filtered by the list of demo keys under "mask/@hdf5_filter_key"
config.train.hdf5_filter_key = "train" # by default, use "train" and "valid" filter keys corresponding to train-valid split
config.train.hdf5_validation_filter_key = "valid"
# fetch sequences of length 10 from dataset for RNN training
config.train.seq_length = 50
# keys from hdf5 to load per demonstration, besides "obs" and "next_obs"
config.train.dataset_keys = (
"actions",
"rewards",
"dones",
)
# one of [None, "last"] - set to "last" to include goal observations in each batch
config.train.goal_mode = "last" # no need for goal observations
## learning config ##
config.train.cuda = True # try to use GPU (if present) or not
config.train.batch_size = 4 # batch size
config.train.num_epochs = 31 # number of training epochs
config.train.seed = 1 # seed for training
### Observation Config ###
config.observation.modalities.obs.low_dim = [ # specify low-dim observations for agent
"proprio",
]
config.observation.modalities.obs.rgb = [
"rgb",
"rgb_wrist",
]
config.observation.modalities.obs.depth = [
"depth",
"depth_wrist",
]
config.observation.modalities.obs.scan = [
"scan",
]
config.observation.modalities.goal.low_dim = [] # no low-dim goals
config.observation.modalities.goal.rgb = [] # no rgb image goals
### Algo Config ###
# optimization parameters
config.algo.optim_params.policy.learning_rate.initial = 1e-4 # policy learning rate
config.algo.optim_params.policy.learning_rate.decay_factor = (
0.1 # factor to decay LR by (if epoch schedule non-empty)
)
config.algo.optim_params.policy.learning_rate.epoch_schedule = [] # epochs where LR decay occurs
config.algo.optim_params.policy.regularization.L2 = (
0.00 # L2 regularization strength
)
# loss weights
config.algo.loss.l2_weight = 1.0 # L2 loss weight
config.algo.loss.l1_weight = 0.0 # L1 loss weight
config.algo.loss.cos_weight = 0.0 # cosine loss weight
# MLP network architecture (layers after observation encoder and RNN, if present)
config.algo.actor_layer_dims = (
300,
400,
) # MLP layers between RNN layer and action output
# stochastic GMM policy
config.algo.gmm.enabled = (
True # enable GMM policy - policy outputs GMM action distribution
)
config.algo.gmm.num_modes = 5 # number of GMM modes
config.algo.gmm.min_std = 0.01 # minimum std output from network
config.algo.gmm.std_activation = (
"softplus" # activation to use for std output from policy net
)
config.algo.gmm.low_noise_eval = True # low-std at test-time
# rnn policy config
config.algo.rnn.enabled = True # enable RNN policy
config.algo.rnn.horizon = (
50 # unroll length for RNN - should usually match train.seq_length
)
config.algo.rnn.hidden_dim = 1200 # hidden dimension size
config.algo.rnn.rnn_type = "LSTM" # rnn type - one of "LSTM" or "GRU"
config.algo.rnn.num_layers = 2 # number of RNN layers that are stacked
config.algo.rnn.open_loop = False # if True, action predictions are only based on a single observation (not sequence) + hidden state
config.algo.rnn.kwargs.bidirectional = False # rnn kwargs
return config
# Valid dataset types to use
DATASET_TYPES = {
"robosuite": {
"default_dataset_func": TestUtils.example_dataset_path,
"hp": robosuite_hyperparameters,
},
"momart": {
"default_dataset_func": TestUtils.example_momart_dataset_path,
"hp": momart_hyperparameters,
},
}
def get_config(
dataset_type="robosuite", dataset_path=None, output_dir=None, debug=False
):
"""
Construct config for training.
Args:
dataset_type (str): Dataset type to use. Valid options are DATASET_TYPES. Default is "robosuite"
dataset_path (str or None): path to hdf5 dataset. Pass None to use a small default dataset.
output_dir (str): path to output folder, where logs, model checkpoints, and videos
will be written. If it doesn't exist, the directory will be created. Pass
None to use a default directory in /tmp.
debug (bool): if True, shrink training and rollout times to test a full training
run quickly.
"""
assert (
dataset_type in DATASET_TYPES
), f"Invalid dataset type. Valid options are: {list(DATASET_TYPES.keys())}, got: {dataset_type}"
# handle args
if dataset_path is None:
# small dataset with a handful of trajectories
dataset_path = DATASET_TYPES[dataset_type]["default_dataset_func"]()
if output_dir is None:
# default output directory created in /tmp
output_dir = TestUtils.temp_model_dir_path()
# make default BC config
config = config_factory(algo_name="bc")
### Experiment Config ###
config.experiment.name = (
f"{dataset_type}_bc_rnn_example" # name of experiment used to make log files
)
config.experiment.validate = True # whether to do validation or not
config.experiment.logging.terminal_output_to_txt = (
False # whether to log stdout to txt file
)
config.experiment.logging.log_tb = True # enable tensorboard logging
### Train Config ###
config.train.data = dataset_path # path to hdf5 dataset
# Write all results to this directory. A new folder with the timestamp will be created
# in this directory, and it will contain three subfolders - "log", "models", and "videos".
# The "log" directory will contain tensorboard and stdout txt logs. The "models" directory
# will contain saved model checkpoints. The "videos" directory contains evaluation rollout
# videos.
config.train.output_dir = output_dir # path to output folder
# Load default hyperparameters based on dataset type
config = DATASET_TYPES[dataset_type]["hp"](config)
# maybe make training length small for a quick run
if debug:
# train and validate for 3 gradient steps per epoch, and 2 total epochs
config.experiment.epoch_every_n_steps = 3
config.experiment.validation_epoch_every_n_steps = 3
config.train.num_epochs = 2
# rollout and model saving every epoch, and make rollouts short
config.experiment.save.every_n_epochs = 1
config.experiment.rollout.rate = 1
config.experiment.rollout.n = 2
config.experiment.rollout.horizon = 10
return config
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Dataset path
parser.add_argument(
"--dataset",
type=str,
default=None,
help="(optional) path to input hdf5 dataset to use in example script. If not provided, \
a default hdf5 packaged with the repository will be used.",
)
# Output dir
parser.add_argument(
"--output",
type=str,
default=None,
help="(optional) path to folder to use (or create) to output logs, model checkpoints, and rollout \
videos. If not provided, a folder in /tmp will be used.",
)
# debug flag for quick training run
parser.add_argument(
"--debug",
action="store_true",
help="set this flag to run a quick training run for debugging purposes",
)
# type
parser.add_argument(
"--dataset_type",
type=str,
default="robosuite",
choices=list(DATASET_TYPES.keys()),
help=f"Dataset type to use. This will determine the default hyperparameter settings to use for training."
f"Valid options are: {list(DATASET_TYPES.keys())}. Default is robosuite.",
)
args = parser.parse_args()
# Turn debug mode on possibly
if args.debug:
Macros.DEBUG = True
# config for training
config = get_config(
dataset_type=args.dataset_type,
dataset_path=args.dataset,
output_dir=args.output,
debug=args.debug,
)
# set torch device
device = TorchUtils.get_torch_device(try_to_use_cuda=config.train.cuda)
# run training
train(config, device=device)
| 23,496 | Python | 42.034798 | 208 | 0.682074 |
AshisGhosh/roboai/robosuite/robomimic_sim/robomimic_sim/run_policy.py | import imageio
import numpy as np
import os
from copy import deepcopy
import torch
import robomimic.utils.file_utils as FileUtils
import robomimic.utils.torch_utils as TorchUtils
from robomimic.envs.env_base import EnvBase
from robomimic.algo import RolloutPolicy
import urllib.request
# Get pretrained checkpooint from the model zoo
ckpt_path = "models/lift_ph_low_dim_epoch_1000_succ_100.pth"
# Lift (Proficient Human)
urllib.request.urlretrieve(
"http://downloads.cs.stanford.edu/downloads/rt_benchmark/model_zoo/lift/bc_rnn/lift_ph_low_dim_epoch_1000_succ_100.pth",
filename=ckpt_path,
)
assert os.path.exists(ckpt_path)
device = TorchUtils.get_torch_device(try_to_use_cuda=True)
# restore policy
policy, ckpt_dict = FileUtils.policy_from_checkpoint(
ckpt_path=ckpt_path, device=device, verbose=True
)
# create environment from saved checkpoint
env, _ = FileUtils.env_from_checkpoint(
ckpt_dict=ckpt_dict,
render=True, # we won't do on-screen rendering in the notebook
render_offscreen=True, # render to RGB images for video
verbose=True,
)
def rollout(
policy,
env,
horizon,
render=False,
video_writer=None,
video_skip=5,
camera_names=None,
):
"""
Helper function to carry out rollouts. Supports on-screen rendering, off-screen rendering to a video,
and returns the rollout trajectory.
Args:
policy (instance of RolloutPolicy): policy loaded from a checkpoint
env (instance of EnvBase): env loaded from a checkpoint or demonstration metadata
horizon (int): maximum horizon for the rollout
render (bool): whether to render rollout on-screen
video_writer (imageio writer): if provided, use to write rollout to video
video_skip (int): how often to write video frames
camera_names (list): determines which camera(s) are used for rendering. Pass more than
one to output a video with multiple camera views concatenated horizontally.
Returns:
stats (dict): some statistics for the rollout - such as return, horizon, and task success
"""
assert isinstance(env, EnvBase)
assert isinstance(policy, RolloutPolicy)
assert not (render and (video_writer is not None))
policy.start_episode()
obs = env.reset()
state_dict = env.get_state()
# hack that is necessary for robosuite tasks for deterministic action playback
obs = env.reset_to(state_dict)
video_count = 0 # video frame counter
total_reward = 0.0
if render:
env.render(mode="human", camera_name=camera_names[0])
try:
for step_i in range(horizon):
# get action from policy
act = policy(ob=obs)
# play action
next_obs, r, done, _ = env.step(act)
# compute reward
total_reward += r
success = env.is_success()["task"]
# visualization
if render:
env.render(mode="human", camera_name=camera_names[0])
if video_writer is not None:
if video_count % video_skip == 0:
video_img = []
for cam_name in camera_names:
video_img.append(
env.render(
mode="rgb_array",
height=512,
width=512,
camera_name=cam_name,
)
)
video_img = np.concatenate(
video_img, axis=1
) # concatenate horizontally
video_writer.append_data(video_img)
video_count += 1
# break if done or if success
if done or success:
break
# update for next iter
obs = deepcopy(next_obs)
state_dict = env.get_state()
except env.rollout_exceptions as e:
print("WARNING: got rollout exception {}".format(e))
stats = dict(Return=total_reward, Horizon=(step_i + 1), Success_Rate=float(success))
return stats
rollout_horizon = 400
np.random.seed(0)
torch.manual_seed(0)
video_path = "output/rollout.mp4"
video_writer = imageio.get_writer(video_path, fps=20)
stats = rollout(
policy=policy,
env=env,
horizon=rollout_horizon,
render=True,
# render=False,
# video_writer=video_writer,
# video_skip=5,
camera_names=["frontview", "agentview"],
)
print(stats)
video_writer.close()
| 4,566 | Python | 29.446666 | 124 | 0.613666 |
AshisGhosh/roboai/robosuite/robomimic_sim/app/main.py | #!/usr/bin/python -u
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from starlette.responses import StreamingResponse
import io
from robomimic_sim.robomimic_sim import RobomimicSim
# Create FastAPI instance
app = FastAPI()
robosim = RobomimicSim()
# List of allowed origins (you can use '*' to allow all origins)
origins = [
"http://localhost:3000", # Allow your Next.js app
# Add any other origins as needed
]
# Add CORSMiddleware to the application
app.add_middleware(
CORSMiddleware,
allow_origins=origins, # List of allowed origins
allow_credentials=True,
allow_methods=["*"], # Allow all methods
allow_headers=["*"], # Allow all headers
)
# Example route
@app.get("/")
async def read_root():
return {"message": "Hello, FastAPI! This is the robomimic server."}
@app.on_event("startup")
async def startup_event():
return robosim.setup()
@app.post("/run")
async def run():
print("Running robomimic simulation...")
return await robosim.start_rollout()
@app.post("/reset")
async def reset():
print("Resetting robomimic simulation...")
return await robosim.reset()
@app.post("/start_renderer")
async def start_renderer():
print("Starting robomimic simulation...")
return await robosim.start_renderer()
@app.post("/close_renderer")
async def close_renderer():
print("Closing robomimic simulation...")
return await robosim.close_renderer()
@app.get("/get_policy")
async def get_policy():
return repr(robosim.policy)
@app.get("/get_image")
async def get_image():
print("Getting image...")
img = robosim.get_image()
buf = io.BytesIO()
img.save(buf, format="PNG")
buf.seek(0)
return StreamingResponse(buf, media_type="image/png")
| 1,778 | Python | 21.518987 | 71 | 0.692351 |
AshisGhosh/roboai/agent_frameworks/crewai_roboai/pyproject.toml | [tool.poetry]
name = "crewai-roboai"
version = "0.1.0"
description = ""
authors = ["AshisGhosh <[email protected]>"]
readme = "README.md"
[tool.poetry.dependencies]
python = ">=3.10,<=3.13"
crewai = {extras = ["tools"], version = "^0.22.5"}
[build-system]
requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api"
| 330 | TOML | 19.687499 | 50 | 0.660606 |
AshisGhosh/roboai/agent_frameworks/crewai_roboai/crewai_roboai/test.py | from crewai import Crew, Process, Agent, Task
from crewai_tools import tool
from dotenv import load_dotenv
load_dotenv() # take environment variables from .env.
@tool("Get objects on the table.")
def get_objects_on_the_table() -> str:
"""Get objects on the table"""
return "Milk, Cereal" # string to be sent back to the agent
# Define your agents
planner = Agent(
role="Planner",
goal="Create plans for robots.",
backstory="An experienced planner that breaks down tasks into steps for robots.",
tools=[],
verbose=True,
allow_delegation=False,
)
analyst = Agent(
role="Scene Analyzer",
goal="Identify objects in the scene.",
backstory="An experienced analyst that can identify objects in a scene.",
tools=[get_objects_on_the_table],
verbose=True,
allow_delegation=False,
)
# Define the tasks in sequence
planner_task = Task(
description="Create a plan for a robot to clear the table.",
agent=planner,
expected_output="List of steps for a robot.",
)
analysis_task = Task(
description="List the objects that are on the table",
agent=analyst,
expected_output="List of objects.",
)
# Form the crew with a sequential process
crew = Crew(
agents=[planner, analyst],
tasks=[analysis_task, planner_task],
process=Process.sequential,
verbose=2,
)
crew.kickoff()
| 1,360 | Python | 23.745454 | 85 | 0.691176 |
AshisGhosh/roboai/agent_frameworks/langroid_roboai/pyproject.toml | [tool.poetry]
name = "langroid-roboai"
version = "0.1.0"
description = ""
authors = ["AshisGhosh <[email protected]>"]
readme = "README.md"
[tool.poetry.dependencies]
python = "<3.12,>=3.9.1"
langroid = {extras = ["litellm"], version = "^0.1.222"}
[build-system]
requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api"
| 337 | TOML | 20.124999 | 55 | 0.667656 |
AshisGhosh/roboai/agent_frameworks/langroid_roboai/langroid_roboai/test.py | import langroid as lr
import langroid.language_models as lm
# set up LLM
llm_cfg = lm.OpenAIGPTConfig( # or OpenAIAssistant to use Assistant API
# any model served via an OpenAI-compatible API
# chat_model="litellm/openrouter/mistralai/mistral-7b-instruct:free"
chat_model="litellm/openrouter/huggingfaceh4/zephyr-7b-beta:free"
)
# # use LLM directly
# mdl = lm.OpenAIGPT(llm_cfg)
# response = mdl.chat("What is the capital of Ontario?", max_tokens=10)
# # use LLM in an Agent
# agent_cfg = lr.ChatAgentConfig(llm=llm_cfg)
# agent = lr.ChatAgent(agent_cfg)
# agent.llm_response("What is the capital of China?")
# response = agent.llm_response("And India?") # maintains conversation state
# wrap Agent in a Task to run interactive loop with user (or other agents)
# task = lr.Task(agent, name="Bot", system_message="You are a helpful assistant")
# task.run("Hello") # kick off with user saying "Hello"
# 2-Agent chat loop: Teacher Agent asks questions to Student Agent
agent_cfg = lr.ChatAgentConfig(llm=llm_cfg)
robot_agent = lr.ChatAgent(agent_cfg)
robot_task = lr.Task(
robot_agent,
name="Robot",
system_message="""
You are a robot and have a high level task.
You must ask the planner to break it down into steps you can do.
Your skills involve 'pick' and 'place' actions.
""",
# done_if_response=[Entity.LLM],
interactive=False,
)
planner_agent = lr.ChatAgent(agent_cfg)
planner_task = lr.Task(
planner_agent,
name="Planner",
system_message="""
Concisely return numbered steps of a plan for a robot.
The plan can only involve 'pick' and 'place' actions.
If the plan is valid, respond with 'DONE'.
""",
single_round=True,
interactive=False,
)
robot_task.add_sub_task(planner_task)
robot_task.run(
"The task is to clear the table, it has the following objects: 'Milk', 'Cereal', and a 'Can'."
)
| 1,912 | Python | 33.160714 | 98 | 0.695607 |
AshisGhosh/roboai/agent_frameworks/autogen_roboai/pyproject.toml | [tool.poetry]
name = "roboai"
version = "0.1.0"
description = ""
authors = ["AshisGhosh <[email protected]>"]
readme = "README.md"
[tool.poetry.dependencies]
python = ">=3.10,<3.13"
pyautogen = "^0.2.21"
[build-system]
requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api"
| 293 | TOML | 17.374999 | 46 | 0.672355 |
AshisGhosh/roboai/agent_frameworks/autogen_roboai/autogen_roboai/test.py | import autogen
from autogen import AssistantAgent, UserProxyAgent
import tempfile
from autogen.coding import LocalCommandLineCodeExecutor
from typing_extensions import Annotated
import logging
logging.basicConfig(level=logging.INFO)
filter_dict = {"tags": ["zephyr"]}
config_list = autogen.config_list_from_json(
env_or_file="OAI_CONFIG_LIST", filter_dict=filter_dict
)
assert len(config_list) == 1
llm_config = {
"config_list": config_list,
"timeout": 120,
}
task = "Create a list of steps for a robot to clear the table."
# create an AssistantAgent instance named "assistant" with the LLM configuration.
assistant = AssistantAgent(
name="assistant",
llm_config=llm_config,
system_message="""
You are a helpful assistant who can break down tasks into steps.
Please help the user with their task.
Use the functions provided to learn more about the task.
Respond with 'TERMINATE' when you are done.
""",
)
# Create a temporary directory to store the code files.
temp_dir = tempfile.TemporaryDirectory()
# Create a local command line code executor.
executor = LocalCommandLineCodeExecutor(
timeout=10, # Timeout for each code execution in seconds.
work_dir=temp_dir.name, # Use the temporary directory to store the code files.
)
user_proxy = UserProxyAgent(
name="user_proxy",
human_input_mode="NEVER",
system_message="A proxy for the user for executing code.",
code_execution_config={"executor": executor},
is_termination_msg=lambda x: "content" in x
and x["content"] is not None
and "TERMINATE" in x["content"]
and "``" not in x["content"],
)
@user_proxy.register_for_execution()
@assistant.register_for_llm(
name="identify_objs_on_table",
description="Python function to get a list of objects on the table.",
)
def identify_objs_on_table(
message: Annotated[
str, "Message to ask the inspector for the objects on the table."
],
) -> str:
logging.info("Asked for objects.")
return "Milk, Cereal, a Can."
# inspector = AssistantAgent(
# name="inspector",
# llm_config=llm_config,
# system_message="You are an inspector who can identify objects in a scene. There is 'Milk', 'Cereal' and a 'Can' on the table. Please respond with 'TERMINATE' when you are done."
# )
# user_inspector = UserProxyAgent(
# name="user_inspector",
# human_input_mode="NEVER",
# is_termination_msg=lambda x: "content" in x
# and x["content"] is not None
# and "TERMINATE" in x["content"]
# )
# @user_inspector.register_for_execution()
# @inspector.register_for_llm(
# name="identify_objects",
# description="Identify objects in the scene.",
# )
# def identify_objects(message: Annotated[str, "Message to identify objects in the scene."]):
# return "Milk, Cereal, a Can."
user_proxy.initiate_chat(assistant, message=task)
# logging.info(f"Chat result: {chat_result}")
| 2,951 | Python | 29.122449 | 183 | 0.694341 |
AshisGhosh/roboai/franka_moveit/isaac_demo.launch.py | import os
from launch import LaunchDescription
from launch.actions import DeclareLaunchArgument
from launch.substitutions import LaunchConfiguration
from launch_ros.actions import Node
from ament_index_python.packages import get_package_share_directory
from moveit_configs_utils import MoveItConfigsBuilder
def generate_launch_description():
# Command-line arguments
ros2_control_hardware_type = DeclareLaunchArgument(
"ros2_control_hardware_type",
default_value="isaac",
description="ROS2 control hardware interface type to use for the launch file -- possible values: [mock_components, isaac]",
)
moveit_config = (
MoveItConfigsBuilder("moveit_resources_panda")
.robot_description(
file_path="config/panda.urdf.xacro",
mappings={
"ros2_control_hardware_type": LaunchConfiguration(
"ros2_control_hardware_type"
)
},
)
.robot_description_semantic(file_path="config/panda.srdf")
.trajectory_execution(file_path="config/gripper_moveit_controllers.yaml")
.planning_scene_monitor(
publish_robot_description=True, publish_robot_description_semantic=True
)
.planning_pipelines(pipelines=["ompl", "pilz_industrial_motion_planner"])
.to_moveit_configs()
)
# Start the actual move_group node/action server
move_group_node = Node(
package="moveit_ros_move_group",
executable="move_group",
output="screen",
parameters=[moveit_config.to_dict()],
arguments=["--ros-args", "--log-level", "info"],
)
# RViz
rviz_config_file = os.path.join(
get_package_share_directory("moveit2_tutorials"),
"config",
"panda_moveit_config.rviz",
)
rviz_node = Node(
package="rviz2",
executable="rviz2",
name="rviz2",
output="log",
arguments=["-d", rviz_config_file],
parameters=[
moveit_config.robot_description,
moveit_config.robot_description_semantic,
moveit_config.robot_description_kinematics,
moveit_config.planning_pipelines,
moveit_config.joint_limits,
],
)
# Static TF
world2robot_tf_node = Node(
package="tf2_ros",
executable="static_transform_publisher",
name="static_transform_publisher",
output="log",
arguments=["--frame-id", "world", "--child-frame-id", "panda_link0"],
)
hand2camera_tf_node = Node(
package="tf2_ros",
executable="static_transform_publisher",
name="static_transform_publisher",
output="log",
arguments=[
"0.04",
"0.0",
"0.04",
"1.57",
"0.0",
"0.0",
"panda_hand",
"sim_camera",
],
)
# Publish TF
robot_state_publisher = Node(
package="robot_state_publisher",
executable="robot_state_publisher",
name="robot_state_publisher",
output="both",
parameters=[moveit_config.robot_description],
)
# ros2_control using FakeSystem as hardware
ros2_controllers_path = os.path.join(
get_package_share_directory("moveit_resources_panda_moveit_config"),
"config",
"ros2_controllers.yaml",
)
ros2_control_node = Node(
package="controller_manager",
executable="ros2_control_node",
parameters=[ros2_controllers_path],
remappings=[
("/controller_manager/robot_description", "/robot_description"),
],
output="screen",
)
joint_state_broadcaster_spawner = Node(
package="controller_manager",
executable="spawner",
arguments=[
"joint_state_broadcaster",
"--controller-manager",
"/controller_manager",
],
)
panda_arm_controller_spawner = Node(
package="controller_manager",
executable="spawner",
arguments=["panda_arm_controller", "-c", "/controller_manager"],
)
panda_hand_controller_spawner = Node(
package="controller_manager",
executable="spawner",
arguments=["panda_hand_controller", "-c", "/controller_manager"],
)
return LaunchDescription(
[
ros2_control_hardware_type,
rviz_node,
world2robot_tf_node,
hand2camera_tf_node,
robot_state_publisher,
move_group_node,
ros2_control_node,
joint_state_broadcaster_spawner,
panda_arm_controller_spawner,
panda_hand_controller_spawner,
]
)
| 4,745 | Python | 29.818182 | 131 | 0.589041 |
AshisGhosh/roboai/roboai/pyproject.toml | [tool.poetry]
name = "roboai"
version = "0.1.0"
description = ""
authors = ["AshisGhosh <[email protected]>"]
readme = "README.md"
[tool.poetry.dependencies]
python = "^3.10"
litellm = "^1.34.16"
langfuse = "^2.21.1"
burr = {version = "0.19.1", extras = ["start", "streamlit"]}
graphviz = "^0.20.3"
[build-system]
requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api"
| 388 | TOML | 19.473683 | 60 | 0.649485 |
AshisGhosh/roboai/roboai/roboai/roboai.py | import time
from typing import List, Optional, Tuple
from PIL import Image # noqa: F401
from enum import Enum
from uuid import uuid4
from burr.core import Application, ApplicationBuilder, State, default, when
from burr.core.action import action
from burr.lifecycle import LifecycleAdapter
from burr.tracking import LocalTrackingClient
from shared.utils.llm_utils import (
get_closest_text_sync as get_closest_text,
get_most_important_sync as get_most_important,
)
# from shared.utils.isaacsim_client import get_image as get_image_from_sim, pick, place # noqa: F401
from shared.utils.omnigibson_client import (
get_image as get_image_from_sim,
pick,
place,
navigate_to,
get_obj_in_hand,
wait_until_ready,
) # noqa: F401
from shared.utils.image_utils import pil_to_b64, b64_to_pil
from shared.utils.gradio_client import moondream_answer_question_from_image as moondream
from task import Task
from agent import Agent
from plans import PLANS
from skills import SKILLS
from semantic_locations import SEMANTIC_LOCATIONS
from role_context import ROBOT_CONTEXT, ROLE_CONTEXT, EMPLOYEE_HANDBOOK
from knowledge_base_utils import KnowledgeBase
import logging
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
knowledge_base = KnowledgeBase()
DEFAULT_MODEL = "openrouter/meta-llama/llama-3-8b-instruct:free"
# DEFAULT_MODEL = "openrouter/huggingfaceh4/zephyr-7b-beta:free"
# DEFAULT_MODEL = "ollama/llama3:latest"
# DEFAULT_MODEL = "ollama/phi3"
# CODING_MODEL = "ollama/codegemma:instruct"
CODING_MODEL = DEFAULT_MODEL
class LogColors:
HEADER = "\033[95m"
OKBLUE = "\033[94m"
OKCYAN = "\033[96m"
OKGREEN = "\033[92m"
WARNING = "\033[93m"
FAIL = "\033[91m"
ENDC = "\033[0m"
BOLD = "\033[1m"
UNDERLINE = "\033[4m"
def pick_mock(object_name: str):
name = pick_mock.__name__
print(f"Called {name} TEST MODE ENABLED")
return True
def place_mock(location: str):
name = place_mock.__name__
print(f"Called {name} TEST MODE ENABLED")
return True
def extract_code(raw_input, language="python"):
start_delimiter = f"```{language}"
if start_delimiter not in raw_input:
start_delimiter = "```"
code_start_index = raw_input.find(start_delimiter)
if code_start_index == -1:
code_start_index = 0
else:
code_start_index += len(start_delimiter)
end_delimiter = "```"
code_end_index = raw_input.find(end_delimiter, code_start_index)
if code_end_index == -1:
code_end_index = len(raw_input)
code = raw_input[code_start_index:code_end_index].strip()
log.debug(f"Extracted code: \n{code}")
return code
def exec_code(code, exec_vars, attempts=3):
success = False
history = []
for _ in range(attempts):
try:
if history:
log.warn(
f"{LogColors.WARNING}Executing code, retry attempt {len(history)}{LogColors.ENDC}"
)
coder_task = Task(
f"""Given the following error, fix the syntax. Here is the error:
{history[-1][1]}\n{code}
Ensure any explanations are formatted as comments.
""",
expected_output_format="""
```python
# explanatations are only formatted as comments
my_variable = "proper_python_syntax"
my_list = ["proper_python_syntax"]
```
""",
)
coder_agent = Agent(
name="Coder",
model=CODING_MODEL,
system_message="You are an expert coder. Only return proper syntax.",
)
coder_task.add_solving_agent(coder_agent)
output = coder_task.run()
code = extract_code(output)
log.info(f"{LogColors.OKBLUE}Fixed code: \n{code}{LogColors.ENDC}")
exec(code, exec_vars)
success = True
break
except Exception as e:
log.error(f"Error executing code: {e}")
history.append((code, f"Error executing code: {e}"))
time.sleep(1)
return success
@action(reads=["current_state"], writes=["chat_history", "prompt"])
def process_prompt(state: State, prompt: str) -> Tuple[dict, State]:
result = {"chat_item": {"role": "user", "content": prompt, "type": "text"}}
if state["current_state"] == "FAILED":
original_prompt = state["prompt"]
if isinstance(original_prompt, str):
prompt = [original_prompt, prompt]
elif isinstance(original_prompt, list):
prompt = original_prompt.append(prompt)
log.info(f"{LogColors.OKCYAN}Prompt: {prompt}{LogColors.ENDC}")
return result, state.append(chat_history=result["chat_item"]).update(prompt=prompt)
class PromptType(Enum):
UNKNOWN = "unknown"
PERFORM_NEW_TASK = "perform new task"
RESPOND_TO_QUESTION = "respond to question"
UPDATE_KNOWLEDGE_BASE = "update knowledge base"
RETRY_EXISTING_TASK = "retry existing task"
MODIFY_EXISTING_TASK = "modify existing task"
@classmethod
def all_values(cls):
return [prompt.value for prompt in cls]
def __eq__(self, other):
if isinstance(other, str):
return self.value == other
if isinstance(other, PromptType):
return self.value == other.value
return NotImplemented
@action(reads=["prompt"], writes=["current_state", "prompt_cls", "task_state_idx"])
def parse_prompt(state: State) -> Tuple[dict, State]:
# determine the mode of the prompt
prompt = state["prompt"]
if state["current_state"] == "FAILED":
prompt = state["prompt"][-1]
options = PromptType.all_values()
classify_prompt = Task(
f"""Given the following prompt, classify the type of prompt it is:
{prompt}
Options: {options}
Examples
- Prompt: "Go to the kitchen and grab the items" Output: "perform new task"
- Prompt: "What skills do you have?" Output: "respond to question"
- Prompt: "Don't handle bottles." Output: "update knowledge base"
- Prompt: "Remember to ignore the red cup." Output: "update knowledge base"
- Prompt: "You can find the items in the kitchen." Output: "update knowledge base"
- Prompt: "The beer is in the fridge." Output: "update knowledge base"
- Prompt: "Beer goes in the fridge." Output: "update knowledge base"
- Prompt: "I want to retry the task" Output: "retry existing task"
- Prompt: "Don't pick up the items, just scan the scene" Output: "modify existing task"
""",
expected_output_format="A string respresenting the classification.",
)
classify_prompt_agent = Agent(
name="Update Classifier",
model=DEFAULT_MODEL,
system_message="""
You are a helpful agent that concisely responds.
""",
)
classify_prompt.add_solving_agent(classify_prompt_agent)
output = classify_prompt.run()
prompt_cls = get_closest_text(output, options)
log.info(f"{LogColors.OKGREEN}Prompt classification: {prompt_cls}{LogColors.ENDC}")
task_state_idx = 0
if prompt_cls == PromptType.PERFORM_NEW_TASK:
current_state = "STARTING"
content = "Starting a new task..."
elif prompt_cls == PromptType.RESPOND_TO_QUESTION:
current_state = "PENDING"
content = "Responding to a question..."
elif prompt_cls == PromptType.UPDATE_KNOWLEDGE_BASE:
current_state = "PENDING"
content = "Updating knowledge..."
elif prompt_cls == PromptType.RETRY_EXISTING_TASK:
current_state = "RUNNING"
content = "Retrying the task..."
task_state_idx = state["task_state_idx"] - 1
if state["current_state"] != "FAILED":
current_state = "PENDING"
prompt_cls = PromptType.UNKNOWN
content = "There is no task to retry."
elif prompt_cls == PromptType.MODIFY_EXISTING_TASK:
current_state = "PENDING"
content = "Modifying existing task...\n\nThis feature is not supported yet."
else:
prompt_cls = PromptType.UNKNOWN
current_state = "PENDING"
content = "Unknown prompt. Please provide a valid prompt."
result = {
"current_state": current_state,
"prompt_cls": prompt_cls,
"task_state_idx": task_state_idx,
}
chat_item = {
"content": content,
"type": "text",
"role": "assistant",
}
return result, state.append(chat_history=chat_item).update(**result)
@action(reads=["prompt"], writes=[])
def respond_to_question(state: State) -> Tuple[dict, State]:
answer_question = Task(
f"""
Given the following prompt, answer the question.
Here is the prompt: \n{state['prompt']}
Here is context:
{ROBOT_CONTEXT}
{ROLE_CONTEXT}
{EMPLOYEE_HANDBOOK}
Locations available: {list(SEMANTIC_LOCATIONS.keys())}
Skills available: {list(SKILLS.keys())}
Previous plans: {list(PLANS.keys())}
Here is additional knowledge you've learned:
{knowledge_base.get_knowledge_as_string()}
If you do not know the answer, return "I do not know."
""",
expected_output_format="A string representing the answer.",
)
answer_question_agent = Agent(
name="Answerer",
model=DEFAULT_MODEL,
system_message="""
You are an agent that answers questions.
""",
)
answer_question.add_solving_agent(answer_question_agent)
output = answer_question.run()
chat_item = {
"content": output,
"type": "text",
"role": "assistant",
}
return {}, state.append(chat_history=chat_item)
@action(reads=["prompt"], writes=[])
def update_knowledge_base(state: State) -> Tuple[dict, State]:
parse_knowledge = Task(
f"""Given the following text, extract the knowledge: \n{state['prompt']}
Do not include any other text that is not the extracted knowledge.
Examples:
- Text: "Don't handle bottles." Output: "Don't handle bottles"
- Text: "Remember to ignore the red cup." Output: "Ignore the red cup"
- Text: "If you're looking for the cheese, there's a very high chance you could find it in the kitchen." Output: "Cheese in the kitchen"
""",
expected_output_format="A string representing the knowledge.",
)
parse_knowledge_agent = Agent(
name="Knowledge Extractor",
model=DEFAULT_MODEL,
system_message="""
You are an agent that extracts knowledge.
""",
)
parse_knowledge.add_solving_agent(parse_knowledge_agent)
new_knowledge = parse_knowledge.run()
knowledge_tags = get_most_important(new_knowledge, 2)
# make tags lower case and strip any non-alphanumeric characters
knowledge_tags = [tag.lower().strip() for tag in knowledge_tags]
knowledge_base.add_data(uuid4().hex, new_knowledge, knowledge_tags)
chat_item = {
"content": f"""Updated knowledge base with:
{new_knowledge}
Tags: {knowledge_tags}""",
"type": "text",
"role": "assistant",
}
return {}, state.append(chat_history=chat_item)
@action(reads=["prompt"], writes=["task"])
def determine_if_task_in_skill_library(state: State) -> Tuple[dict, State]:
closest_text = get_closest_text(state["prompt"], list(PLANS.keys()), threshold=0.75)
if closest_text:
result = {"task": closest_text}
content = f"Task determined to be **{result['task']}**"
else:
result = {"task": "unknown"}
content = f"Parsing unknown task... **{state['prompt']}**"
chat_item = {"role": "assistant", "content": content, "type": "text"}
return result, state.append(chat_history=chat_item).update(**result)
@action(reads=["prompt"], writes=["task"])
def create_plan_for_unknown_task(state: State) -> Tuple[dict, State]:
result = {"task": state["prompt"]}
chat_item = {
"content": f"Creating plan for **{state['prompt']}**",
"type": "text",
"role": "assistant",
}
return result, state.append(chat_history=chat_item).update(**result)
@action(reads=["prompt"], writes=["closest_plans"])
def get_closest_plans(state: State) -> Tuple[dict, State]:
closest_plans = get_closest_text(
state["prompt"], list(PLANS.keys()), k=2, threshold=0.75
)
chat_item = {
"content": f"Closest plans: {closest_plans}",
"type": "text",
"role": "assistant",
}
result = {"closest_plans": closest_plans}
return result, state.append(chat_history=chat_item).update(
closest_plans=closest_plans
)
@action(
reads=["prompt", "closest_plans"],
writes=[
"robot_context",
"role_context",
"employee_context",
],
)
def get_role_and_location_context(state: State) -> Tuple[dict, State]:
result = {
"robot_context": ROBOT_CONTEXT,
"role_context": ROLE_CONTEXT,
"employee_context": EMPLOYEE_HANDBOOK,
}
chat_item = {
"content": "Getting role and location context. Creating initial plan.",
"type": "text",
"role": "assistant",
}
return result, state.append(chat_history=chat_item).update(**result)
@action(
reads=[
"prompt",
"closest_plans",
"robot_context",
"role_context",
"employee_context",
],
writes=["plan"],
)
def create_initial_plan(state: State) -> Tuple[dict, State]:
closest_plans = state["closest_plans"]
if closest_plans:
closest_plans = [f"{k}:\n {PLANS[k]}" for k in closest_plans]
closest_plans = "\n".join(closest_plans)
task = Task(
f"""
Given the following prompt and current robot state, return a simplified high level plan for a robot to perform.
Do not include steps related to confirming successful execution or getting feedback. Do not include steps that indicate to repeat steps.
Prompt:
{state['prompt']}
Current robot location: {state['location'] if 'location' in state else 'living room'}
Examples:
{closest_plans if closest_plans else "No examples available."}
If information is needed, use the skills to get observe or scan the scene.
Context:
Robot:
{state['robot_context']}
Role:
{state['role_context']}
Employee Handbook:
{state['employee_context']}
Here is a list of locations that the robot can go to:
{list(SEMANTIC_LOCATIONS.keys())}
Here is additional knowledge you've learned:
{knowledge_base.get_knowledge_as_string()}
""",
expected_output_format="A numbered list of steps.",
)
parser_agent = Agent(
name="Parser",
model=DEFAULT_MODEL,
system_message="""
You are a helpful agent that concisely responds.
""",
)
task.add_solving_agent(parser_agent)
plan = task.run()
chat_item = {
"content": f"Initial plan: \n\n{plan}",
"type": "text",
"role": "assistant",
}
return {"plan": plan}, state.append(chat_history=chat_item).update(plan=plan)
@action(reads=["plan"], writes=["plan"])
def create_robot_grounded_plan(state: State) -> Tuple[dict, State]:
plan = state["plan"]
skills_verbose = "\n".join([f"{k} : {v['description']}" for k, v in SKILLS.items()])
robot_grounded_plan = Task(
f"""
Map and consolidate the following steps to the Available Robot Skills and locations.
{plan}
Try to match the number of steps. Do not add any additional steps.
Available Robot Skills:
{skills_verbose}
If there is no match for that step, return 'False'. Be conservative in the matching. There shall only be one skill per step. Summarize if the plan if feasible at the end.
Here is a list of locations that the robot can go to: {list(SEMANTIC_LOCATIONS.keys())}
If there are pick and place steps following an observation or scanning step, consolidate those steps into a rollout step for a pick and place plan.
""",
expected_output_format="A numbered list of steps mapped to single skill each or 'False' followed by a summary if the task is feasible.",
)
robot_grounded_agent = Agent(
name="Robot Grounded",
model=DEFAULT_MODEL,
system_message="""
You are an agent that grounds a set of actions to robot skills.
""",
)
robot_grounded_plan.add_solving_agent(robot_grounded_agent)
robot_grounded_plan_output = robot_grounded_plan.run()
chat_item = {
"content": f"**Robot grounded plan**: \n\n{robot_grounded_plan_output}",
"type": "text",
"role": "assistant",
}
return {"plan": robot_grounded_plan_output}, state.append(
chat_history=chat_item
).update(plan=robot_grounded_plan_output)
@action(reads=["plan"], writes=["feasible"])
def determine_if_plan_is_feasibile(state: State) -> Tuple[dict, State]:
robot_grounded_plan_output = state["plan"]
extract_feasibility = Task(
f"Given the following summary, return if the task is feasible. Summary: \n{robot_grounded_plan_output}",
expected_output_format="True or False. Do not add any other information.",
)
feasibility_agent = Agent(
name="Feasibility",
# model="openrouter/huggingfaceh4/zephyr-7b-beta:free",
model=DEFAULT_MODEL,
system_message="""
You are a conservative agent that determines if a plan is feasible.
""",
)
extract_feasibility.add_solving_agent(feasibility_agent)
feasibility_output = extract_feasibility.run()
feasible = get_closest_text(feasibility_output, ["True", "False"])
feasible = True if feasible == "True" else False if feasible is not None else None
result = {
"task": robot_grounded_plan_output,
"feasible": feasible,
}
chat_item = {
"content": f"Feasible: `{result['feasible']}`",
"type": "text",
"role": "assistant",
}
return result, state.append(chat_history=chat_item).update(**result)
@action(reads=["task", "feasible"], writes=["response", "current_state", "task"])
def convert_plan_to_steps(state: State) -> Tuple[dict, State]:
plan = state["task"]
plan_to_list_of_steps = Task(
f"""Given the following output, take the numbered list and return is as a python list assigned to `list_of_steps`.
Do not remove any relevant information. Include information about skills and locations into the correct list item.
Here is the plan:
{plan}
Here is an example:
Plan:
1. navigate to location, the location is the kitchen
2. scan the kitchen for relevant objects
3. roll out a plan to pick and place the objects
Output:
```python
list_of_steps = ["navigate to location, the location is the kitchen", "scan the kitchen for relevant objects", "roll out a plan to pick and place the objects"]
```
""",
expected_output_format="""
```python
list_of_steps = ["step 1", "step 2", "step 3"]
```
""",
)
plan_to_list_of_steps_agent = Agent(
name="Plan to List of Steps",
model=DEFAULT_MODEL,
system_message="""
You are a helpful agent that concisely responds with only code.
""",
)
plan_to_list_of_steps.add_solving_agent(plan_to_list_of_steps_agent)
output = plan_to_list_of_steps.run()
code = extract_code(output)
try:
exec_vars = {}
exec_code(code, exec_vars)
log.info(exec_vars.get("list_of_steps", None))
steps = exec_vars.get("list_of_steps", None)
content = "Steps:\n\n" + "\n".join(
[f"{i+1}. {step}" for i, step in enumerate(steps)]
)
except Exception as e:
log.error(f"Error executing code: {e}")
steps = None
content = "Failed to extract steps. Please check the plan and try again."
# formatted_steps = "\n".join([f"{i+1}. {step}" for i, step in enumerate(steps)])
feasible = state["feasible"]
current_state = "STARTING" if feasible else "DONE"
result = {
"response": {
"content": content,
"type": "text" if steps is not None else "error",
"role": "assistant",
},
"current_state": current_state,
"task": steps,
}
return result, state.append(chat_history=result["response"]).update(**result)
def get_closest_state_from_skills(step: str, skills: dict) -> str:
skill_descriptions = [s["description"] for s in skills.values()]
closest_description = get_closest_text(step, skill_descriptions)
state_idx = skill_descriptions.index(closest_description)
return list(skills.keys())[state_idx]
@action(
reads=["task"],
writes=["state_machine", "task_state", "task_state_idx", "current_state", "task"],
)
def create_state_machine(state: State) -> Tuple[dict, State]:
"""
Create a viable state machine for the task.
Every task requires:
* the robot and environment state
* ensuring the robot has the skills to perform the required steps
"""
task = state["task"]
if state["task"] == "What is on the table?":
result = {
"state_machine": [
"get_image",
"ask_vla",
"get_list_of_objects",
],
"task_state": "not_started",
"current_state": "RUNNING",
}
elif state["task"] == "Clear the table":
result = {
"state_machine": [
"get_image",
"ask_vla",
"get_list_of_objects",
"create_plan",
"code",
"execute_code",
],
"task_state": "not_started",
"current_state": "RUNNING",
}
elif state["task"] == "unknown":
result = {
"state_machine": "unknown",
"task_state": "unknown",
"current_state": "DONE",
}
else:
plan = state["task"]
state_machine = [get_closest_state_from_skills(step, SKILLS) for step in plan]
log.info(f"STATE_MACHINE:\n\n{state_machine}\n")
### Use symbolic logic to prune plan
# Ensure that there are only rollout steps after an observation step until the next observation or navigation step
observation_steps = ["scan the scene"]
observation_step_idxs = [
i for i, step in enumerate(state_machine) if step in observation_steps
]
pick_and_place_steps = [
"rollout pick and place plan",
"pick object",
"place in location",
]
pick_and_place_step_idxs = [
i for i, step in enumerate(state_machine) if step in pick_and_place_steps
]
if len(observation_step_idxs) > 0 and len(pick_and_place_step_idxs) > 0:
for i, observation_idx in enumerate(observation_step_idxs):
pick_and_place_exists = False
if observation_idx + 1 < len(state_machine):
while state_machine[observation_idx + 1] in pick_and_place_steps:
pick_and_place_exists = observation_idx + 1
state_machine.pop(observation_idx + 1)
task.pop(observation_idx + 1)
print(state_machine[observation_idx + 1 :])
if observation_idx + 1 >= len(state_machine):
break
if pick_and_place_exists:
state_machine.insert(
pick_and_place_exists, "rollout pick and place plan"
)
task.insert(pick_and_place_exists, "rollout pick and place plan")
log.info(f"UPDATED STATE_MACHINE (prune for rollout):\n\n{state_machine}\n")
# Consolidate adjacent roll out steps
rollout_steps = ["rollout pick and place plan"]
rollout_step_idxs = [
i for i, step in enumerate(state_machine) if step in rollout_steps
]
if len(rollout_step_idxs) > 1:
consolidated_state_machine = []
for i, s in enumerate(state_machine):
if s in rollout_steps:
if i > 0 and state_machine[i - 1] not in rollout_steps:
consolidated_state_machine.append("rollout pick and place plan")
else:
consolidated_state_machine.append(s)
state_machine = consolidated_state_machine
log.info(
f"UPDATED STATE_MACHINE (consolidate rollout steps):\n\n{state_machine}\n"
)
result = {
"state_machine": state_machine,
"task_state": "not_started",
"current_state": "RUNNING",
}
result["task"] = task
result["task_state_idx"] = 0
log.info(f"Task: {task}")
log.info(f"State machine: {state_machine}")
output = "Here is the consolidated task:"
output += "\n\n"
output += "```\n"
output += "\n".join([f"{idx+1}. {step}" for idx, step in enumerate(task)])
output += "\n```"
output += "\n\n"
output += "Here is the state machine:"
output += "\n\n"
output += "```\n"
output += "\n".join([f"{idx+1}. {step}" for idx, step in enumerate(state_machine)])
output += "\n```"
chat_item = {
"content": output,
"type": "text",
"role": "assistant",
}
return result, state.append(chat_history=chat_item).update(**result)
@action(
reads=["state_machine", "task_state_idx"],
writes=["task_state", "task_state_idx", "current_state", "state_machine", "task"],
)
def execute_state_machine(state: State) -> Tuple[dict, State]:
"""
State machine manages the execution of fully observable steps
"""
task = state["task"]
current_state = "RUNNING"
state_machine = state["state_machine"]
if state["task_state"] == "not_started":
task_state = state["state_machine"][0]
task_state_idx = state["task_state_idx"]
else:
task_state_idx = state["task_state_idx"] + 1
if task_state_idx < len(state["state_machine"]):
task_state = state["state_machine"][task_state_idx]
else:
task_state = "done"
current_state = "DONE"
result = {
"task_state": task_state,
"task_state_idx": task_state_idx,
"current_state": current_state,
"state_machine": state_machine,
"task": task,
}
if task_state_idx < len(state_machine):
content = f"Executing task: **{task[task_state_idx]}**\n\nTask state: `{task_state}`\n\nStep {task_state_idx+1} of {len(state_machine)}"
else:
content = f"Task completed: **{state['prompt']}**"
chat_item = {
"content": content,
"type": "text",
"role": "assistant",
}
return result, state.append(chat_history=chat_item).update(**result)
@action(
reads=["state_machine", "task", "task_state", "task_state_idx"], writes=["location"]
)
def navigate_to_location(state: State) -> Tuple[dict, State]:
step = state["task"][state["task_state_idx"]]
extract_location = Task(
f"""
Given the following step, extract the location (e.g. kitchen), item (e.g. sink) or destination and return it.
Here is the string to extract the location:
{step}
Examples:
- Text: "navigate to the kitchen" Output: "kitchen"
""",
expected_output_format="A string representing the location, item or destination.",
)
extract_location_agent = Agent(
name="Location Extractor",
model=DEFAULT_MODEL,
system_message="""
You are a helpful agent that concisely responds.
""",
)
extract_location.add_solving_agent(extract_location_agent)
output = extract_location.run()
try:
location = get_closest_text(output, list(SEMANTIC_LOCATIONS.keys()))
if not navigate_to(
SEMANTIC_LOCATIONS[location]["name"],
SEMANTIC_LOCATIONS[location]["location"],
):
raise Exception(f"Error navigating to location: {location}")
if not wait_until_ready():
raise Exception(f"Error navigating to location: {location}")
content = f"Navigating to location: **{location}**"
except Exception as e:
log.error(f"Error: {e}")
location = None
content = f"{e}"
result = {"location": location}
chat_item = {
"content": content,
"type": "text" if location is not None else "error",
"role": "assistant",
}
return result, state.append(chat_history=chat_item).update(**result)
@action(reads=["state_machine"], writes=[])
def scan_the_scene(state: State) -> Tuple[dict, State]:
result = {}
chat_item = {
"content": "Scanning the scene...",
"type": "text",
"role": "assistant",
}
return result, state.append(chat_history=chat_item).update(**result)
@action(reads=["state_machine"], writes=["image"])
def get_image(state: State) -> Tuple[dict, State]:
try:
image = get_image_from_sim()
# image = Image.open("shared/data/test1.png")
image = pil_to_b64(image)
result = {"image": image}
chat_item = {
"content": image,
"type": "image",
"role": "assistant",
}
except Exception as e:
log.error(f"Error getting image: {e}")
result = {"image": None}
chat_item = {
"content": f"Error getting image: {e}",
"type": "text",
"role": "assistant",
}
return result, state.append(chat_history=chat_item).update(**result)
@action(reads=["image"], writes=["vla_response"])
def ask_vla(
state: State, vla_prompt: str = "Describe the image."
) -> Tuple[dict, State]:
image = b64_to_pil(state["image"])
result = {"vla_response": moondream(image, vla_prompt)["result"]}
chat_item = {
"content": f"**Image Description:**:\n\n{result['vla_response']}",
"type": "text",
"role": "assistant",
}
return result, state.append(chat_history=chat_item).update(**result)
@action(reads=["vla_response"], writes=["observations"])
def get_list_of_objects(state: State) -> Tuple[dict, State]:
task = Task(
f"""Given the following, return a list assigned to `objects_on_table` of the objects on the table. The table is not an object.
Summary:
{state['vla_response']}
Example:
Summary:
There is an object on the table called "Object 1", an object on the table called "Object 2", and an object on the table called "Object 3".
Output:
```
objects_on_table = ["Object 1", "Object 2", "Object 3"]
```
Don't use any functions, manually identify the objects on the table from the summary.
""",
expected_output_format="""
```
objects_on_table = ["Object 1", "Object 2", "Object 3"]
```
""",
)
analyzer_agent = Agent(
name="Analyzer",
model=DEFAULT_MODEL,
system_message="""
You are a helpful agent that concisely responds with lists.
""",
)
task.add_solving_agent(analyzer_agent)
output = task.run()
code = extract_code(output)
try:
exec_vars = {}
exec_code(code, exec_vars)
log.info(exec_vars.get("objects_on_table", None))
objects_on_table = exec_vars.get("objects_on_table", None)
observations = {"objects_on_table": objects_on_table}
except Exception as e:
log.error(f"Error executing code: {e}")
result = {"observations": observations}
chat_item = {
"content": f"Objects on table: \n\n`{objects_on_table}`",
"type": "text",
"role": "assistant",
}
return result, state.append(chat_history=chat_item).update(**result)
@action(
reads=["observations", "task_state_idx", "location"],
writes=["state_machine", "task"],
)
def rollout_pick_and_place_plan(state: State) -> Tuple[dict, State]:
task_idx = state["task_state_idx"]
state_machine = state["state_machine"]
task = state["task"]
rollout_task = Task(
f"""Rollout a pick and place plan for the robot given the following objects:
{state['observations']}
The robot and the objects are at {state['location']}
Here are the locations the robot can go to:
{list(SEMANTIC_LOCATIONS.keys())}
Here is additional knowledge you've learned:
{knowledge_base.get_knowledge_as_string()}
Here is an example:
'{{'objects_on_table': ['cheese', 'milk', 'book']}}
The robot and the objects are at {{'counter'}}
Output:
```
pick_and_place_tasks = ["Pick cheese at counter and place at kitchen", "Pick milk at counter and place at kitchen", "Pick book at counter and place at shelf"]
```
Don't use any functions, manually synthesize the pick and place tasks from the summary.
Here is additional knowledge you've learned, factor this into the plan:
{knowledge_base.get_knowledge_as_string()}
""",
expected_output_format="""
```
pick_and_place_tasks = ["Pick object1 at location and place at destination", "Pick object2 at location and place at destination", "Pick object3 at location and place at destination"]
```
""",
)
planner_agent = Agent(
name="Planner",
model=DEFAULT_MODEL,
system_message="""
You are a helpful agent that concisely responds with lists.
""",
)
rollout_task.add_solving_agent(planner_agent)
output = rollout_task.run()
code = extract_code(output)
try:
exec_vars = {}
exec_code(code, exec_vars)
log.info(exec_vars.get("pick_and_place_tasks", None))
pick_and_place_tasks = exec_vars.get("pick_and_place_tasks", None)
task = task[: task_idx + 1] + pick_and_place_tasks + task[task_idx + 1 :]
state_machine = (
state_machine[: task_idx + 1]
+ ["pick_and_place"] * len(pick_and_place_tasks)
+ state_machine[task_idx + 1 :]
)
except Exception as e:
log.error(f"Error executing code: {e}")
raise NotImplementedError(
"error handling for Rollout pick and place plan not implemented"
)
result = {"state_machine": state_machine, "task": task}
log.info(f"Task: {task}")
log.info(f"State machine: {state_machine}")
output = "Here is the task:"
output += "\n\n"
output += "```\n"
output += "\n".join([f"{idx+1}. {step}" for idx, step in enumerate(task)])
output += "\n```"
output += "\n\n"
output += "Here is the state machine:"
output += "\n\n"
output += "```\n"
output += "\n".join([f"{idx+1}. {step}" for idx, step in enumerate(state_machine)])
output += "\n```"
chat_item = {
"content": output,
"type": "text",
"role": "assistant",
}
return result, state.append(chat_history=chat_item).update(**result)
@action(
reads=["task", "state_machine", "task_state_idx", "location"],
writes=["obj_to_grasp", "obj_location", "obj_destination"],
)
def pick_and_place(state: State) -> Tuple[dict, State]:
get_object = Task(
f"""Given the following, extract the object of interest as a string assigned to `obj_to_grasp`.
Here is the string to extract the object:
{state["task"][state["task_state_idx"]]}
Here is an example:
Here is the string to extract the object:
Pick cheese at counter and place in kitchen
Output:
```
obj_to_grasp = "cheese"
```
Don't use any functions. Manually identify the object from the summary.
""",
expected_output_format="""
```
obj_to_grasp = "object1"
```
""",
)
analyzer_agent = Agent(
name="Analyzer",
model=DEFAULT_MODEL,
system_message="""
You are a helpful agent that concisely responds with variables.
""",
)
get_object.add_solving_agent(analyzer_agent)
output = get_object.run()
code = extract_code(output)
try:
exec_vars = {}
exec_code(code, exec_vars)
obj_to_grasp = exec_vars.get("obj_to_grasp", None)
except Exception as e:
log.error(f"Error executing code: {e}")
# Assume object location is current unless previously stored
location = state["location"]
if "obj_location" in state:
location = state["obj_location"]
get_obj_destination = Task(
f"""Given the following, extract the destination as a string assigned to `obj_destination`.
Here is the string to extract the destination:
{state["task"][state["task_state_idx"]]}
Example:
Here is the string to extract the destination:
Pick cheese at counter and place in kitchen
Output: 'kitchen'
""",
expected_output_format="String representing the destination.",
)
get_obj_destination_agent = Agent(
name="Destination Extractor",
model=DEFAULT_MODEL,
system_message="""
You are a helpful agent that concisely responds with variables.
""",
)
get_obj_destination.add_solving_agent(get_obj_destination_agent)
output = get_obj_destination.run()
destination = get_closest_text(output, list(SEMANTIC_LOCATIONS.keys()))
result = {
"obj_to_grasp": obj_to_grasp,
"obj_location": location,
"obj_destination": destination,
}
chat_item = {
"content": f"Pick and place **{obj_to_grasp}** at **{location}**",
"type": "text",
"role": "assistant",
}
return result, state.append(chat_history=chat_item).update(**result)
@action(reads=["obj_to_grasp", "obj_location"], writes=["location"])
def navigate_for_pick(state: State) -> Tuple[dict, State]:
location = state["obj_location"]
obj_to_grasp = state["obj_to_grasp"]
location = get_closest_text(location, list(SEMANTIC_LOCATIONS.keys()))
log.info(f"Pick and place {obj_to_grasp} at {location}")
try:
if state["location"] != location:
log.info(f"Changing location from {state['location']} to {location}")
if not navigate_to(
SEMANTIC_LOCATIONS[location]["name"],
SEMANTIC_LOCATIONS[location]["location"],
):
raise Exception(f"Error navigating to location: {location}")
if not wait_until_ready():
raise Exception(f"Error navigating to location: {location}")
content = f"Navigated to **{location}** to pick **{obj_to_grasp}**"
except Exception as e:
log.error(f"Error navigating to location: {e}")
location = None
content = f"{e}"
result = {"location": location}
chat_item = {
"content": content,
"type": "text" if location is not None else "error",
"role": "assistant",
}
return result, state.append(chat_history=chat_item).update(**result)
@action(reads=["obj_to_grasp", "obj_location"], writes=["obj_in_hand", "obj_to_grasp"])
def pick_object(state: State) -> Tuple[dict, State]:
PICK_TIMEOUT = 30.0
obj_to_grasp = state["obj_to_grasp"]
print(f"Pick {obj_to_grasp}")
try:
if not pick(obj_to_grasp):
raise Exception(f"Error picking object: {obj_to_grasp}")
pick_start_time = time.time()
obj_in_hand = None
while not obj_in_hand and time.time() - pick_start_time < PICK_TIMEOUT:
obj_in_hand = get_obj_in_hand()
time.sleep(0.1)
if obj_in_hand:
print(f"Object in hand: {obj_in_hand}")
obj_to_grasp = None
if not wait_until_ready():
raise Exception(f"Error picking object: {obj_to_grasp}")
content = f"Picked **{obj_in_hand}**"
except Exception as e:
log.error(f"Error picking object: {e}")
obj_in_hand = None
content = f"{e}"
result = {"obj_in_hand": obj_in_hand, "obj_to_grasp": obj_to_grasp}
chat_item = {
"content": content,
"type": "text" if obj_in_hand else "error",
"role": "assistant",
}
return result, state.append(chat_history=chat_item).update(**result)
@action(reads=["obj_destination", "location"], writes=["location"])
def navigate_for_place(state: State) -> Tuple[dict, State]:
destination = state["obj_destination"]
try:
if state["location"] != destination:
log.info(f"Changing location from {state['location']} to {destination}")
if not navigate_to(
SEMANTIC_LOCATIONS[destination]["name"],
SEMANTIC_LOCATIONS[destination]["location"],
):
raise Exception(f"Error navigating to destination: {destination}")
if not wait_until_ready():
raise Exception(f"Error navigating to destination: {destination}")
content = f"Navigated to **{destination}** to place **{state['obj_in_hand']}**"
location = destination
except Exception as e:
log.error(f"Error navigating to destination: {e}")
location = None
content = f"{e}"
result = {"location": location}
chat_item = {
"content": content,
"type": "text",
"role": "assistant",
}
return result, state.append(chat_history=chat_item).update(**result)
@action(reads=["obj_in_hand"], writes=["obj_in_hand"])
def place_object(state: State) -> Tuple[dict, State]:
obj_to_place = state["obj_in_hand"]
try:
if not place(SEMANTIC_LOCATIONS[state["location"]]["name"]):
raise Exception(f"Error placing object: {obj_to_place}")
if not wait_until_ready():
raise Exception(f"Error placing object: {obj_to_place}")
obj_in_hand = None
content = f"Placed object **{obj_to_place}**"
except Exception as e:
log.error(f"Error placing object: {e}")
obj_in_hand = obj_to_place
content = f"{e}"
result = {"obj_in_hand": obj_in_hand}
chat_item = {
"content": content,
"type": "text" if obj_in_hand is None else "error",
"role": "assistant",
}
return result, state.append(chat_history=chat_item).update(**result)
@action(reads=["task", "safe"], writes=["response"])
def prompt_for_more(state: State) -> Tuple[dict, State]:
result = {
"response": {
"content": "None of the response modes I support apply to your question. Please clarify?",
"type": "text",
"role": "assistant",
}
}
return result, state.update(**result)
@action(
reads=["current_state"],
writes=["response", "current_state", "code_attempts"],
)
def create_error_response(state: State) -> Tuple[dict, State]:
content = "Could not complete the task."
if "task_state" in state:
content += f" I have failed on {state['task_state']}."
result = {
"response": {
"content": content,
"type": "error",
"role": "assistant",
},
"current_state": "FAILED",
"code_attempts": 0,
}
return result, state.append(chat_history=result["response"]).update(**result)
@action(reads=["current_state"], writes=["response", "current_state"])
def finish_and_score_task(state: State) -> Tuple[dict, State]:
response = {
"content": "I'm done. Goodbye!",
"type": "text",
"role": "assistant",
}
current_state = "PENDING"
result = {"response": response, "current_state": current_state}
return result, state.append(chat_history=result["response"]).update(**result)
@action(reads=["current_state"], writes=["chat_history", "current_state"])
def response(state: State) -> Tuple[dict, State]:
if state["current_state"] == "DONE":
current_state = "PENDING"
response = {
"content": "**Task execution successful :+1:**",
"type": "text",
"role": "assistant",
}
elif state["current_state"] == "FAILED":
current_state = "FAILED"
response = {
"content": "**Task execution failed**\nLet me know if you'd like me to retry or just give me a new task.",
"type": "error",
"role": "assistant",
}
else:
current_state = state["current_state"]
response = {
"content": "Ready for next prompt.",
"type": "text",
"role": "assistant",
}
result = {"chat_item": response, "current_state": current_state}
return result, state.append(chat_history=response).update(**result)
MAX_CODE_ATTEMPTS = 3
def base_application(
hooks: List[LifecycleAdapter],
app_id: str,
storage_dir: str,
project_id: str,
):
if hooks is None:
hooks = []
# we're initializing above so we can load from this as well
# we could also use `with_tracker("local", project=project_id, params={"storage_dir": storage_dir})`
tracker = LocalTrackingClient(project=project_id, storage_dir=storage_dir)
sequence_id = None
return (
ApplicationBuilder()
.with_actions(
prompt=process_prompt,
parse_prompt=parse_prompt,
respond_to_question=respond_to_question,
update_knowledge_base=update_knowledge_base,
determine_if_task_in_skill_library=determine_if_task_in_skill_library,
create_plan_for_unknown_task=create_plan_for_unknown_task,
get_closest_plans=get_closest_plans,
get_role_and_location_context=get_role_and_location_context,
create_initial_plan=create_initial_plan,
create_robot_grounded_plan=create_robot_grounded_plan,
determine_if_plan_is_feasibile=determine_if_plan_is_feasibile,
convert_plan_to_steps=convert_plan_to_steps,
create_state_machine=create_state_machine,
execute_state_machine=execute_state_machine,
navigate_to_location=navigate_to_location,
scan_the_scene=scan_the_scene,
get_image=get_image,
ask_vla=ask_vla,
get_list_of_objects=get_list_of_objects,
rollout_pick_and_place_plan=rollout_pick_and_place_plan,
pick_and_place=pick_and_place,
navigate_for_pick=navigate_for_pick,
pick_object=pick_object,
navigate_for_place=navigate_for_place,
place_object=place_object,
finish_and_score_task=finish_and_score_task,
create_error_response=create_error_response,
prompt_for_more=prompt_for_more,
response=response,
)
.with_transitions(
("prompt", "parse_prompt", default),
(
"parse_prompt",
"determine_if_task_in_skill_library",
when(prompt_cls=PromptType.PERFORM_NEW_TASK),
),
(
"parse_prompt",
"respond_to_question",
when(prompt_cls=PromptType.RESPOND_TO_QUESTION),
),
(
"parse_prompt",
"update_knowledge_base",
when(prompt_cls=PromptType.UPDATE_KNOWLEDGE_BASE),
),
(
"parse_prompt",
"execute_state_machine",
when(prompt_cls=PromptType.RETRY_EXISTING_TASK),
),
("parse_prompt", "response", default),
("respond_to_question", "response", default),
("update_knowledge_base", "response", default),
(
"determine_if_task_in_skill_library",
"create_plan_for_unknown_task",
when(task="unknown"),
),
("create_plan_for_unknown_task", "get_closest_plans", default),
("get_closest_plans", "get_role_and_location_context", default),
("get_role_and_location_context", "create_initial_plan", default),
("create_initial_plan", "create_robot_grounded_plan", default),
("create_robot_grounded_plan", "determine_if_plan_is_feasibile", default),
(
"determine_if_plan_is_feasibile",
"create_error_response",
when(feasible=False),
),
("determine_if_plan_is_feasibile", "convert_plan_to_steps", default),
(
"convert_plan_to_steps",
"create_error_response",
when(task="unknown"),
),
("convert_plan_to_steps", "create_state_machine", default),
("determine_if_task_in_skill_library", "create_state_machine", default),
("create_state_machine", "execute_state_machine", default),
("create_state_machine", "prompt_for_more", when(state_machine="unknown")),
(
"execute_state_machine",
"navigate_to_location",
when(task_state="navigate to location"),
),
(
"execute_state_machine",
"scan_the_scene",
when(task_state="scan the scene"),
),
(
"execute_state_machine",
"rollout_pick_and_place_plan",
when(task_state="rollout pick and place plan"),
),
("rollout_pick_and_place_plan", "execute_state_machine", default),
(
"execute_state_machine",
"pick_and_place",
when(task_state="pick_and_place"),
),
("pick_and_place", "navigate_for_pick", default),
("navigate_for_pick", "create_error_response", when(location=None)),
("navigate_for_pick", "pick_object", default),
("pick_object", "create_error_response", when(obj_in_hand=None)),
("pick_object", "navigate_for_place", default),
("navigate_for_place", "place_object", default),
("place_object", "execute_state_machine", default),
("navigate_to_location", "create_error_response", when(location=None)),
("navigate_to_location", "execute_state_machine", default),
("scan_the_scene", "get_image", default),
("get_image", "create_error_response", when(image=None)),
("get_image", "ask_vla", default),
("ask_vla", "get_list_of_objects", default),
("get_list_of_objects", "execute_state_machine", default),
("execute_state_machine", "finish_and_score_task", when(task_state="done")),
("finish_and_score_task", "prompt", default),
("response", "prompt", when(current_state="PENDING")),
("response", "execute_state_machine", when(current_state="RUNNING")),
("prompt_for_more", "response", default),
("create_error_response", "response", default),
("response", "prompt", when(current_state="FAILED")),
)
# initializes from the tracking log if it does not already exist
.initialize_from(
tracker,
resume_at_next_action=True, # always resume from entrypoint in the case of failure
default_state={"chat_history": [], "current_state": "PENDING"},
default_entrypoint="prompt",
# fork_from_app_id="670b9f83-d0fa-49ce-b396-dcaba416edc8",
# fork_from_sequence_id=55,
)
.with_hooks(*hooks)
.with_tracker(tracker)
.with_identifiers(app_id=app_id, sequence_id=sequence_id)
.build()
)
def application(
app_id: Optional[str] = None,
project_id: str = "roboai",
storage_dir: Optional[str] = "~/.burr",
hooks: Optional[List[LifecycleAdapter]] = None,
) -> Application:
return base_application(hooks, app_id, storage_dir, project_id=project_id)
if __name__ == "__main__":
app = application()
# app.visualize(
# output_file_path="statemachine", include_conditions=False, view=False, format="png"
# )
app.run(halt_after=["response"])
| 53,152 | Python | 35.158503 | 194 | 0.582988 |
AshisGhosh/roboai/roboai/roboai/role_context.py | ROBOT_CONTEXT = """
You are creating a plan for a robot with 2 arms and a mobile base.
The arms can pick and place objects.
The mobile base can navigate to different locations in the environment. You can also call for support if you need help. You can update the plan if needed.
"""
ROLE_CONTEXT = """
The robot is in a house environment where it does useful tasks such as cleaning or fetching items.
"""
EMPLOYEE_HANDBOOK = """
This is the employee handbook for the robot. The following are basic instructions:
Cleaning:
- Remove objects and placing them in the cabinet
Fetching:
- Towels can be found in the kitchen
"""
| 644 | Python | 31.249998 | 154 | 0.736025 |
AshisGhosh/roboai/roboai/roboai/roboai_demo.py | from pydantic import BaseModel
from typing import Callable
import base64 # noqa: F401
from PIL import Image # noqa: F401
from abc import ABC, abstractmethod
from roboai.agent import Agent
from roboai.task import Task
# from shared.utils.robosim_client import ( # noqa: F401
# get_objects_on_table,
# pick,
# place,
# get_image,
# get_grasp_image,
# )
from shared.utils.isaacsim_client import get_image, pick, place
from shared.utils.model_server_client import answer_question_from_image # noqa: F401
import shared.utils.gradio_client as gradio # noqa: F401
import shared.utils.replicate_client as replicate # noqa: F401
from shared.utils.llm_utils import get_closest_text_sync as get_closest_text
import gradio as gr
import logging
log = logging.getLogger("roboai")
log.setLevel(logging.DEBUG)
class Tool(BaseModel):
name: str
func: Callable
description: str
example: str
def extract_code(raw_input, language="python"):
start_delimiter = f"```{language}"
if start_delimiter not in raw_input:
start_delimiter = "```"
code_start_index = raw_input.find(start_delimiter)
if code_start_index == -1:
code_start_index = 0
else:
code_start_index += len(start_delimiter)
end_delimiter = "```"
code_end_index = raw_input.find(end_delimiter, code_start_index)
if code_end_index == -1:
code_end_index = len(raw_input)
code = raw_input[code_start_index:code_end_index].strip()
log.debug(f"Extracted code: \n{code}")
return code
class RobotJob(ABC):
def __init__(self):
pass
@abstractmethod
def run(self):
pass
class ClearTableJob(RobotJob):
def __init__(self):
pass
def run(self, chat_history=None):
"""
Job to:
1. Understand the scene
2. Create a plan to clear the table
"""
if chat_history:
if not chat_history[-1][1]:
chat_history[-1][1] = ""
else:
chat_history[-1][1] += "\n"
chat_history[-1][1] += "Getting image...\n"
yield chat_history
im = get_image()
prompt = "What objects are on the table?"
if chat_history:
chat_history[-1][1] += "Asking VLA model...\n"
yield chat_history
output = gradio.moondream_answer_question_from_image(im, prompt)["result"]
if chat_history:
chat_history[-1][1] += f"Response:\n{output}\n"
yield chat_history
if chat_history:
chat_history[-1][1] += "Creating plan...\n"
yield chat_history
task = Task(
f"Given the following summary, return just a list in python of the objects on the table. The table is not an object. Summary: \n{output}",
expected_output_format="""
objects_on_table = ["Object 1", "Object 2", "Object 3"]
""",
)
analyzer_agent = Agent(
name="Analyzer",
model="openrouter/huggingfaceh4/zephyr-7b-beta:free",
system_message="""
You are a helpful agent that concisely responds with only code.
Use only the provided functions, do not add any extra code.
""",
)
task.add_solving_agent(analyzer_agent)
output = task.run()
# output = '```objects_on_table = ["Box of Cereal", "Carton of Milk", "Can of Soup"]```'
code = extract_code(output)
try:
exec_vars = {}
exec(code, exec_vars)
log.info(exec_vars.get("objects_on_table", None))
list_of_objects = exec_vars.get("objects_on_table", None)
except Exception as e:
log.error(f"Error executing code: {e}")
list_of_objects = None
if chat_history:
chat_history[-1][1] += f"Error executing code: {e}"
yield chat_history
return
plan_task = Task(
f"""Create a plan for a robot to remove the following objects from the table:
{list_of_objects}
Do not add any extra steps.
""",
# expected_output_format="""
# 1. pick object1
# 2. place object1
# 3. pick object2
# 4. place object2
# 5. pick object3
# 6. place object3
# """
expected_output_format="A numbered list of steps constrained to the provided functions.",
)
plan_task.register_tool(
name="pick",
func=pick,
description="Robot picks up the provided arg 'object_name'",
example='"pick_success = pick(object_name="Object 1")" --> Returns: True ',
)
plan_task.register_tool(
name="place",
func=place,
description="Robot places the provided arg 'object_name'",
example='"place_success = place(object_name="Object 1")" --> Returns: True ',
)
planner_agent = Agent(
name="Planner",
model="openrouter/huggingfaceh4/zephyr-7b-beta:free",
system_message="""
You are a planner that breaks down tasks into steps for robots.
Create a conscise set of steps that a robot can do.
Do not add any extra steps.
"""
+ plan_task.generate_tool_prompt(),
)
plan_task.add_solving_agent(planner_agent)
# log.info(plan_task)
output = plan_task.run()
log.info(output)
if chat_history:
chat_history[-1][1] += f"Response:\n{output}"
yield chat_history
if chat_history:
chat_history[-1][1] += "Converting plan to code...\n"
yield chat_history
plan_generated = True
code = extract_code(output)
exec_vars = plan_task.get_exec_vars()
try:
exec(code, exec_vars)
except Exception as e:
log.error(f"Error executing plan: {e}")
plan_generated = False
# Validate the plan?
# Execute the plan
if not plan_generated:
coder_task = Task(
f"""Return python code to execute the plan using only the provided functions.
{output}
"""
)
coder_task.register_tool(
name="pick",
func=pick,
description="Robot picks up the provided arg 'object_name'",
example='"pick_success = pick(object_name="Object 1")" --> Returns: True ',
)
coder_task.register_tool(
name="place",
func=place,
description="Robot places the provided arg 'object_name'",
example='"place_success = place(object_name="Object 1")" --> Returns: True ',
)
coder_agent = Agent(
name="Coder",
# model="ollama/gemma:7b",
model="openrouter/huggingfaceh4/zephyr-7b-beta:free",
system_message="""
You are a coder that writes concise and exact code to execute the plan.
Use only the provided functions.
"""
+ coder_task.generate_tool_prompt(),
)
coder_task.add_solving_agent(coder_agent)
log.info(coder_task)
output = coder_task.run()
if chat_history:
chat_history[-1][1] += f"Response:\n{output}\n"
yield chat_history
if chat_history:
chat_history[-1][1] += "Extracting and running code...\n"
yield chat_history
code = extract_code(output)
if chat_history:
chat_history[-1][1] += f"Response:\n```{code}\n```"
yield chat_history
try:
exec_vars = coder_task.get_exec_vars()
exec(code, exec_vars)
result = "Successful execution of plan."
except Exception as e:
log.error(f"Error executing code: {e}")
result = "Error executing plan."
finally:
if chat_history:
chat_history[-1][1] += f"\nResponse:\n**{result}**"
yield chat_history
class WhatIsOnTableJob(RobotJob):
image = None
def __init__(self):
self.image = get_image()
def get_image(self):
if not self.image:
self.image = get_image()
return self.image
def run(self, chat_history=None):
if chat_history:
if not chat_history[-1][1]:
chat_history[-1][1] = ""
else:
chat_history[-1][1] += "\n"
yield chat_history
chat_history[-1][1] += "Getting image...\n"
yield chat_history
im = get_image()
prompt = "What objects are on the table?"
if chat_history:
chat_history[-1][1] += "Asking VLA model...\n"
yield chat_history
output = gradio.moondream_answer_question_from_image(im, prompt)
if chat_history:
chat_history[-1][1] += f"Response:\n{output['result']}"
yield chat_history
return output["result"]
class TestJob(RobotJob):
def __init__(self):
pass
def run(self, chat_history=None):
responses = [
"I am a robot.",
"I can help you with tasks.",
"Ask me to do something",
]
if chat_history:
if not chat_history[-1][1]:
chat_history[-1][1] = ""
else:
chat_history[-1][1] += "\n"
yield chat_history
for response in responses:
chat_history[-1][1] += response
yield chat_history
def chat():
with gr.Blocks() as demo:
gr.Markdown("## RoboAI Chatbot")
chatbot = gr.Chatbot(height=700)
msg = gr.Textbox(placeholder="Ask me to do a task.", container=False, scale=7)
image_output = gr.Image(label="Response Image")
clear = gr.ClearButton([msg, chatbot]) # noqa: F841
current_task = [None]
def respond(message, chat_history):
nonlocal current_task
closest_text = get_closest_text(
message, ["Clear the table", "What is on the table?"]
)
image = None
if closest_text:
print(f"Closest text: {closest_text}")
current_task[0] = closest_text
chat_history.append((message, None))
return "", chat_history, image
def do_function(chat_history):
nonlocal current_task
if not current_task:
return "", chat_history, None
chat_history[-1][1] = f"**{current_task[0]}**"
yield chat_history
if current_task[0] == "What is on the table?":
job = WhatIsOnTableJob()
image = job.get_image()
yield from job.run(chat_history)
elif current_task[0] == "Clear the table":
job = ClearTableJob()
yield from job.run(chat_history)
image = WhatIsOnTableJob().get_image()
elif current_task[0] == "Test Job":
job = TestJob()
yield from job.run(chat_history)
else:
chat_history[-1][1] = "Sorry, I don't understand that command."
image = None
return None, chat_history, image
def get_image_output():
image_output = WhatIsOnTableJob().get_image()
return image_output
msg.submit(
respond, [msg, chatbot], [msg, chatbot, image_output], queue=False
).then(get_image_output, [], [image_output]).then(do_function, chatbot, chatbot)
demo.queue()
demo.launch()
if __name__ == "__main__":
chat()
| 12,136 | Python | 31.365333 | 150 | 0.528593 |
AshisGhosh/roboai/roboai/roboai/test.py | import logging
from litellm import completion
from roboai.agent import Agent
from roboai.task import Task
from dotenv import load_dotenv
load_dotenv() # take environment variables from .env.
logging.basicConfig(level=logging.WARN)
log = logging.getLogger("roboai")
log.setLevel(logging.DEBUG)
# litellm.success_callback = ["langfuse"]
# litellm.set_verbose=True
def test_task():
planner_agent = Agent(
name="Planner",
model="openrouter/huggingfaceh4/zephyr-7b-beta:free",
system_message="""You are a planner that breaks down tasks into steps for robots.
Create a set of steps that a robot with wheels and one arm can do.
""",
)
# task_handler = Agent(
# name="Task Handler",
# model="openrouter/huggingfaceh4/zephyr-7b-beta:free",
# system_message="""
# You are a task handler that can handle tasks for robots.
# """
# )
task_handler = "Create a plan to clear the table"
task = Task(task_handler, [planner_agent])
task.run()
def test():
messages = [{"content": "Hello, how are you?", "role": "user"}]
response = completion(
model="openrouter/huggingfaceh4/zephyr-7b-beta:free", messages=messages
)
print(response)
def test_agent():
agent = Agent(name="test", model="openrouter/huggingfaceh4/zephyr-7b-beta:free")
response = agent.chat("Hello, how are you?")
print(response)
print(agent.get_last_response())
print(agent.get_last_response_obj())
agent.clear_messages()
print(agent.messages)
response = agent.chat("What is the capital of China?")
print(response)
print(agent.get_last_response())
print(agent.get_last_response_obj())
agent.clear_messages()
print(agent.messages)
response = agent.chat("And India?")
print(response)
print(agent.get_last_response())
print(agent.get_last_response_obj())
agent.clear_messages()
print(agent.messages)
if __name__ == "__main__":
# test()
# test_agent()
test_task()
| 2,191 | Python | 27.467532 | 94 | 0.61068 |
AshisGhosh/roboai/roboai/roboai/server.py | import functools
import importlib
from typing import List, Literal
import pydantic
from fastapi import APIRouter
from burr.core import Application
"""This file represents a simple chatbot API backed with Burr.
We manage an application, write to it with post endpoints, and read with
get/ endpoints.
This demonstrates how you can build interactive web applications with Burr!
"""
# We're doing dynamic import cause this lives within examples/ (and that module has dashes)
# navigate to the examples directory to read more about this!
chat_application = importlib.import_module(
"burr.examples.multi-modal-chatbot.application"
) # noqa: F401
# the app is commented out as we include the router.
# app = FastAPI()
router = APIRouter()
class ChatItem(pydantic.BaseModel):
"""Pydantic model for a chat item. This is used to render the chat history."""
content: str
type: Literal["image", "text", "code", "error"]
role: Literal["user", "assistant"]
@functools.lru_cache(maxsize=128)
def _get_application(project_id: str, app_id: str) -> Application:
"""Quick tool to get the application -- caches it"""
chat_app = chat_application.application(app_id=app_id, project_id=project_id)
return chat_app
@router.post("/response/{{project_id}}/{{app_id}}", response_model=List[ChatItem])
def chat_response(project_id: str, app_id: str, prompt: str) -> List[ChatItem]:
"""Chat response endpoint. User passes in a prompt and the system returns the
full chat history, so its easier to render.
:param project_id: Project ID to run
:param app_id: Application ID to run
:param prompt: Prompt to send to the chatbot
:return:
"""
burr_app = _get_application(project_id, app_id)
_, _, state = burr_app.run(halt_after=["response"], inputs=dict(prompt=prompt))
return state.get("chat_history", [])
@router.get("/response/{project_id}/{app_id}", response_model=List[ChatItem])
def chat_history(project_id: str, app_id: str) -> List[ChatItem]:
"""Endpoint to get chat history. Gets the application and returns the chat history from state.
:param project_id: Project ID
:param app_id: App ID.
:return: The list of chat items in the state
"""
chat_app = _get_application(project_id, app_id)
state = chat_app.state
return state.get("chat_history", [])
@router.post("/create/{project_id}/{app_id}", response_model=str)
async def create_new_application(project_id: str, app_id: str) -> str:
"""Endpoint to create a new application -- used by the FE when
the user types in a new App ID
:param project_id: Project ID
:param app_id: App ID
:return: The app ID
"""
# side-effect of this persists it -- see the application function for details
chat_application.application(app_id=app_id, project_id=project_id)
return app_id # just return it for now
# comment this back in fro a standalone chatbot API
# app.include_router(router, prefix="/api/v0/chatbot")
| 2,984 | Python | 33.310344 | 98 | 0.704424 |
AshisGhosh/roboai/roboai/roboai/semantic_locations.py | COFFEE_TABLE = {
"coffee_table": {
"name": "coffee_table_fqluyq_0",
"location": "0.75 -1.1 3.14",
}
}
TABLE = {
"table": {
"name": "table",
"location": None,
}
}
SUPPLY_CABINET = {
"supply_cabinet": {"name": "bottom_cabinet_bamfsz_0", "location": "-1.0 -0.5 3.14"}
}
FRIDGE = {"fridge": {"name": "fridge_xyejdx_0", "location": "0.2 2.0 1.57"}}
TRASH_CAN = {"trash_can": {"name": "trash_can_zotrbg_0", "location": "0.0 2.4 3.14"}}
SEMANTIC_LOCATIONS = {**COFFEE_TABLE, **TABLE, **SUPPLY_CABINET, **FRIDGE}
| 562 | Python | 22.458332 | 87 | 0.537367 |
AshisGhosh/roboai/roboai/roboai/streamlit_app.py | import time
from typing import Optional
import roboai as chatbot_application
import streamlit as st
from burr.integrations.streamlit import (
AppState,
Record,
get_state,
render_explorer,
set_slider_to_current,
update_state,
)
st.set_page_config(layout="wide")
st.markdown("This is a demo of RoboAI - LLM based planning for robots.")
def render_chat_message(record: Record):
# if record.action in ["prompt", "response"]:
recent_chat_message = record.state["chat_history"][-1]
content = recent_chat_message["content"]
content_type = recent_chat_message["type"]
role = recent_chat_message["role"]
with st.chat_message(role):
if content_type == "image":
st.image(content)
elif content_type == "code":
st.code(content)
elif content_type == "text":
st.write(content)
elif content_type == "error":
st.error(content)
def retrieve_state():
if "burr_state" not in st.session_state:
# TODO --enable usage of hamilton. Currently its not wiring in inputs
# But it should be easy enough
state = AppState.from_empty(
app=chatbot_application.application(),
)
else:
state = get_state()
return state
def chatbot_step(app_state: AppState, prompt: Optional[str]) -> bool:
"""Pushes state forward for the chatbot. Returns whether or not to rerun the app.
:param app_state: State of the app
:param prompt: Prompt to set the chatbot to. If this is None it means it should continue and not be reset.
:return:
"""
inputs = None
if prompt is not None:
# We need to update
inputs = {"prompt": prompt}
# app_state.app.update_state(app_state.app.state.update(prompt=prompt))
st.session_state.running = True # set to running
# if its not running this is a no-op
if not st.session_state.get("running", False):
return False
application = app_state.app
step_output = application.step(inputs=inputs)
# if step_output is None:
# st.session_state.running = False
# return False
action, result, state = step_output
app_state.history.append(Record(state.get_all(), action.name, result))
set_slider_to_current()
if action.name in ["response", "finish_and_score_task"] and state[
"current_state"
] in ["PENDING", "FAILED"]:
# we've gotten to the end
st.session_state.running = False
return True
return True
def main():
st.title("RoboAI")
app_state = retrieve_state() # retrieve first so we can use for the ret of the step
columns = st.columns(2)
with columns[0]:
prompt = st.chat_input(
"...", disabled=st.session_state.get("running", False), key="chat_input"
)
should_rerun = chatbot_step(app_state, prompt)
with st.container(height=850):
for item in app_state.history:
render_chat_message(item)
# wait for 0.1 seconds to allow the UI to update
time.sleep(0.1)
with columns[1]:
render_explorer(app_state)
update_state(app_state) # update so the next iteration knows what to do
if should_rerun:
st.rerun()
if __name__ == "__main__":
main()
| 3,307 | Python | 30.207547 | 110 | 0.625038 |
AshisGhosh/roboai/roboai/roboai/agent.py | import time
import logging
from litellm import completion
from dotenv import load_dotenv
logging.basicConfig(level=logging.WARN)
load_dotenv("shared/.env") # take environment variables from .env.
log = logging.getLogger("roboai")
log.setLevel(logging.INFO)
# litellm.success_callback = ["langfuse"]
class Agent:
def __init__(self, name, model, system_message="", base_url=None):
self.name = name
self.model = model
self._last_response = None
self._last_response_content = None
self.messages = []
self.system_message = system_message
self.set_system_message(system_message)
self.base_url = base_url
def chat(self, message):
self.messages.append({"content": message, "role": "user"})
completion_args = {
"model": self.model,
"messages": self.messages,
}
if self.base_url:
completion_args["base_url"] = self.base_url
response = completion(**completion_args)
self._last_response = response
self._last_response_content = response["choices"][0]["message"]["content"]
self.messages.append(
{"content": self._last_response_content, "role": "assistant"}
)
return self._last_response_content
def task_chat(self, messages):
completion_args = {
"model": self.model,
"messages": messages,
}
if self.base_url:
completion_args["base_url"] = self.base_url
start = time.time()
response = completion(**completion_args)
log.debug(f"Completion time: {time.time() - start}")
self._last_response = response
self._last_response_content = response["choices"][0]["message"]["content"]
return self._last_response_content
def get_last_response(self):
return self._last_response_content
def get_last_response_obj(self):
return self._last_response
def clear_messages(self):
self.messages = []
def set_system_message(self, message):
if not message:
log.warn(f"System message for agent '{self.name}' is empty.")
return
system_message = None
for m in self.messages:
if m["role"] == "system":
system_message = m
break
if system_message:
system_message["content"] = message
else:
self.messages.append({"content": message, "role": "system"})
self.system_message = message
| 2,547 | Python | 28.97647 | 82 | 0.591676 |
AshisGhosh/roboai/roboai/roboai/skills.py | SCAN_THE_SCENE = {
"scan the scene": {
"symbol": "scan_the_scene",
"description": """
Can scan to scene to retreieve information.
Can also be used to identify objects in the scene.
Should already be at the required location.
""",
}
}
PICK_OBJECT = {
"pick object": {
"symbol": "pick",
"description": """
Can pick up an object in the scene.
Requires specifying the object.
""",
}
}
PLACE_IN_LOCATION = {
"place in location": {
"symbol": "place",
"description": """
Can place an object in the scene.
Requires already holding the object.
Requires specifying the location.
""",
}
}
NAVIGATE_TO_LOCATION = {
"navigate to location": {
"symbol": "navigate_to",
"description": """
Can navigate to a location in the scene.
Location can also be specified by an object.
""",
}
}
CALL_SUPPORT = {
"call support": {
"symbol": "call_support",
"description": """
Can call support for an issue in the scene.
""",
}
}
UPDATE_PLAN = {
"update plan": {
"symbol": "update_plan",
"description": """
Can update the plan to a new one.
""",
}
}
ROLLOUT_PICK_AND_PLACE_PLAN = {
"rollout pick and place plan": {
"symbol": "rollout",
"description": """
Given an observation or a scan step, can rollout a pick and place plan.
""",
}
}
SKILLS = {
**SCAN_THE_SCENE,
**PICK_OBJECT,
**PLACE_IN_LOCATION,
**NAVIGATE_TO_LOCATION,
**CALL_SUPPORT,
**UPDATE_PLAN,
**ROLLOUT_PICK_AND_PLACE_PLAN,
}
| 2,047 | Python | 24.28395 | 95 | 0.447484 |
AshisGhosh/roboai/roboai/roboai/task.py | import io
import base64
import logging
from pydantic import BaseModel
from typing import Callable
logging.basicConfig(level=logging.WARN)
log = logging.getLogger("roboai")
log.setLevel(logging.INFO)
class Tool(BaseModel):
name: str
func: Callable
description: str
example: str
def extract_code(raw_input, language="python"):
start_delimiter = f"```{language}\n"
end_delimiter = "\n```"
code_start_index = raw_input.find(start_delimiter) + len(start_delimiter)
code_end_index = raw_input.find(end_delimiter, code_start_index)
code = raw_input[code_start_index:code_end_index].strip()
return code
def str_from_messages(messages):
# Extract the text from the messages, ignore images
text = ""
for m in messages:
if isinstance(m["content"], str):
text += m["role"] + ": " + m["content"] + "\n"
else:
text += m["role"] + ": " + m["content"]["text"] + "\n"
return text
class Task:
def __init__(
self,
task_description,
solving_agents=None,
expected_output_format=None,
finish_when=None,
):
self.task_description = task_description
self.solving_agents = solving_agents if solving_agents else []
self.expected_output_format = expected_output_format
self.finish_when = finish_when
self.chat_messages = []
self.tools = []
def add_solving_agent(self, agent):
self.solving_agents.append(agent)
@property
def task_description_str(self):
if isinstance(self.task_description, list):
return self.task_description[0]["text"]
return self.task_description
def add_task_image(self, image):
try:
# Create a bytes buffer to hold the image data
buffer = io.BytesIO()
# Save the Pillow image object to the buffer in a specific format (e.g., JPEG)
image.save(buffer, format="JPEG")
# Seek to the start of the buffer
buffer.seek(0)
# Read the buffer content and encode it to Base64
image_str = base64.b64encode(buffer.read()).decode("utf-8")
# Format the Base64 string as a data URL, specifying the MIME type
# data_url = f"data:image/jpeg;base64,{image_str}"
data_url = image_str
# Update the task description with the text and the image data URL
self.task_description = [
{"type": "text", "text": self.task_description},
{"type": "image_url", "image_url": {"url": data_url}},
]
log.info("Task image added.")
except Exception as e:
log.error(f"Failed to add task image: {e}")
def register_tool(self, name, func, description, example):
self.tools.append(
Tool(name=name, func=func, description=description, example=example)
)
log.debug(f"Tool {name} added.")
def generate_tool_prompt(self):
tool_prompt = """
You can use the following python functions:
"""
for tool in self.tools:
tool_prompt += f"""'{tool.name}()'
Description: {tool.description}
Usage: {tool.example}
"""
return tool_prompt
def get_exec_vars(self):
exec_vars = {}
for tool in self.tools:
exec_vars[tool.name] = tool.func
return exec_vars
def get_exec_vars_serialized(self):
exec_vars = {}
for tool in self.tools:
exec_vars[tool.name] = tool.func.__name__
return exec_vars
def get_complete_prompt(self, agentid: int):
task_description = self.task_description
if self.expected_output_format:
task_description += f"""
Ensure your output follows the following format strictly: \n{self.expected_output_format}"""
prompt = f"""
{task_description}
"""
if self.tools:
prompt += self.generate_tool_prompt()
return prompt
def run(self):
task_description = self.task_description
if self.expected_output_format:
task_description += f"""
Ensure your output follows the following format strictly: \n{self.expected_output_format}"""
self.chat_messages.append(
{"task": {"content": task_description, "role": "user"}}
)
log.info(f"Task: '{self.task_description_str}'")
for agent in self.solving_agents:
response = self.task_chat(agent, self.chat_messages)
log.info(f"> AGENT '{agent.name}': {response}")
self.chat_messages.append(
{agent.name: {"content": response, "role": "assistant"}}
)
return next(iter(self.chat_messages[-1].values()))["content"]
def task_chat(self, agent, messages):
agent_messages = []
if agent.system_message:
agent_messages.append({"role": "system", "content": agent.system_message})
for m in messages:
if next(iter(m)) == "task":
agent_messages.append(m["task"])
elif next(iter(m)) in [a.name for a in self.solving_agents if a != agent]:
message = m[next(iter(m))]
message["role"] = "user"
agent_messages.append(message)
elif next(iter(m)) == agent.name:
message = m[next(iter(m))]
agent_messages.append(message)
log.debug(f"{str_from_messages(agent_messages)}")
response = agent.task_chat(agent_messages)
return response
def __str__(self):
task_info = f"Task: {self.task_description_str}"
if self.expected_output_format:
task_info += f"\n Expected Output Format: {self.expected_output_format}"
if self.solving_agents:
task_info += "\n Solving Agents:"
for a in self.solving_agents:
task_info += f"\n - {a.name}"
if self.tools:
task_info += "\n Registered Tools:"
for t in self.tools:
task_info += f"\n - {t.name}"
return task_info
| 6,279 | Python | 33.31694 | 116 | 0.565058 |
AshisGhosh/roboai/roboai/roboai/plans.py | CLEAR_TABLE_PLAN = {
"clear the table": """
1. Navigate to the table
2. Scan the table for objects
3. Rollout pick and place plan to remove objects
"""
}
CLEAN_BATHROOM_PLAN = {
"clean the bathroom": """
Given the bathroom is dirty and has a toilet, sink, and shower
1. Spray the shower with cleaner
2. Spray the sink with cleaner
3. Spray the toilet with cleaner
4. Scrub the sink
5. Scrub the toilet
6. Scrub the shower
"""
}
PLANS = {
**CLEAR_TABLE_PLAN,
**CLEAN_BATHROOM_PLAN,
}
| 597 | Python | 22.919999 | 70 | 0.577889 |
AshisGhosh/roboai/roboai/roboai/knowledge_base_utils.py | import os
import json
import pydantic
from datetime import datetime
class KnowledgeBaseItem(pydantic.BaseModel):
key: str
value: pydantic.Json
tags: list[str] = []
timestamp: str = pydantic.Field(default_factory=lambda: datetime.now().isoformat())
class KnowledgeBase:
def __init__(self, file_path: str = "/app/roboai/knowledge_base.json"):
self.file_path = file_path
self.data = self.load_data()
def load_data(self):
print("Current working directory:", os.getcwd())
print("Files in the directory:", os.listdir())
with open(self.file_path, "r") as f:
data = json.load(f)
return data
@property
def all_data(self):
return self.data
@property
def knowledge(self):
return [
KnowledgeBaseItem(
key=key,
value=value["value"],
tags=value["tags"],
timestamp=value["timestamp"],
)
for key, value in self.data.items()
]
def get_knowledge_as_string(self):
return "\n".join([f"{value['value']}" for value in self.data.values()])
def get_data(self, key: str):
return self.data.get(key, None)
def add_data(self, key: str, value, tags: list[str] = []):
self.data[key] = {
"value": value,
"tags": tags,
"timestamp": datetime.now().isoformat(),
}
self.save_data()
def save_data(self):
with open(self.file_path, "w") as f:
json.dump(self.data, f, indent=4)
| 1,586 | Python | 25.898305 | 87 | 0.558008 |
AshisGhosh/roboai/omnigibson/roboai/tiago_primitives.yaml | env:
action_frequency: 30 # (int): environment executes action at the action_frequency rate
physics_frequency: 120 # (int): physics frequency (1 / physics_timestep for physx)
device: null # (None or str): specifies the device to be used if running on the gpu with torch backend
automatic_reset: false # (bool): whether to automatic reset after an episode finishes
flatten_action_space: false # (bool): whether to flatten the action space as a sinle 1D-array
flatten_obs_space: false # (bool): whether the observation space should be flattened when generated
use_external_obs: false # (bool): Whether to use external observations or not
initial_pos_z_offset: 0.1
external_sensors: null # (None or list): If specified, list of sensor configurations for external sensors to add. Should specify sensor "type" and any additional kwargs to instantiate the sensor. Each entry should be the kwargs passed to @create_sensor, in addition to position, orientation
render:
viewer_width: 1280
viewer_height: 720
scene:
type: InteractiveTraversableScene
scene_model: Rs_int
trav_map_resolution: 0.1
default_erosion_radius: 0.0
trav_map_with_objects: true
num_waypoints: 1
waypoint_resolution: 0.1
load_object_categories: null
not_load_object_categories: null
load_room_types: null
load_room_instances: null
load_task_relevant_only: true
seg_map_resolution: 0.1
scene_source: OG
include_robots: false
robots:
- type: Tiago
obs_modalities: ["rgb", "depth", "seg_instance", "seg_instance_id", "normal", "scan", "occupancy_grid"]
scale: 1.0
self_collisions: true
action_normalize: false
action_type: continuous
grasping_mode: physical
rigid_trunk: false
default_trunk_offset: 0.365
default_arm_pose: horizontal
controller_config:
base:
name: JointController
arm_left:
name: JointController
use_delta_commands: true
arm_right:
name: JointController
use_delta_commands: true
gripper_left:
name: JointController
motor_type: position
command_input_limits: [-1, 1]
command_output_limits: null
use_delta_commands: true
gripper_right:
name: JointController
motor_type: position
command_input_limits: [-1, 1]
command_output_limits: null
use_delta_commands: true
camera:
name: JointController
use_delta_commands: False
sensor_config:
VisionSensor:
sensor_kwargs:
image_height: 480
image_width: 640
objects: []
task:
type: DummyTask
# task:
# type: BehaviorTask
# activity_name: putting_away_Halloween_decorations
# activity_definition_id: 0
# activity_instance_id: 0
# predefined_problem: null
# online_object_sampling: false
# debug_object_sampling: null
# highlight_task_relevant_objects: false
# termination_config:
# max_steps: 500
# reward_config:
# r_potential: 1.0
scene_graph:
egocentric: true
full_obs: true
only_true: true
merge_parallel_edges: false | 3,215 | YAML | 32.5 | 307 | 0.66283 |
AshisGhosh/roboai/omnigibson/roboai/roboai.py | import os
import yaml
import numpy as np
import asyncio
import multiprocessing
import time
import omnigibson as og
from omnigibson.macros import gm # noqa F401
from omnigibson.action_primitives.starter_semantic_action_primitives import ( # noqa F401
StarterSemanticActionPrimitives,
StarterSemanticActionPrimitiveSet,
)
from omnigibson.action_primitives.symbolic_semantic_action_primitives import (
SymbolicSemanticActionPrimitives,
SymbolicSemanticActionPrimitiveSet,
)
from omnigibson.robots import Tiago
from .visualize_scene_graph import visualize_scene_graph, visualize_ascii_scene_graph # noqa F401
from .primitive_patches import _quick_settle_robot, _simplified_place_with_predicate
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
import io
from PIL import Image
from starlette.responses import StreamingResponse
# gm.USE_GPU_DYNAMICS = True
# gm.ENABLE_FLATCACHE = True
class ActionHandler:
def __init__(self, env, controller, scene, task_queue):
self.env = env
self.controller = controller
self.scene = scene
self.actions = task_queue
self._last_camera_action = None
async def add_action(self, action: str):
"""
Add an action to the list of actions to be executed
"""
self.actions.put(action)
def execute_controller(self, ctrl_gen):
robot = self.env.robots[0]
for action in ctrl_gen:
state, reward, done, info = self.env.step(action)
self._last_camera_action = action[robot.controller_action_idx["camera"]]
def execute_action(self, action):
"""
Execute the action at the top of the list
"""
# robot = self.env.robots[0]
action, args = action[0], action[1:]
if action == "pick":
print(f"Attempting: 'pick' with args: {args}")
obj_name = args[0]
grasp_obj = self.scene.object_registry("name", obj_name)
# grasp_obj.disable_gravity()
# print(f"navigating to object {grasp_obj.name}")
self.controller._tracking_object = grasp_obj
# self.execute_controller(
# self.controller.apply_ref(
# SymbolicSemanticActionPrimitiveSet.NAVIGATE_TO,
# grasp_obj,
# attempts=10,
# )
# )
print(f"grasping object {grasp_obj.name}")
self.execute_controller(
self.controller.apply_ref(
SymbolicSemanticActionPrimitiveSet.GRASP, grasp_obj
)
)
print("Finished executing pick")
elif action == "place":
print(f"Attempting: 'place' with args: {args}")
obj_name = args[0]
if obj_name in ["None", "", None]:
obj_name = "table"
print(f"no object specified, defaulting to {obj_name}")
destination = self.scene.object_registry("name", obj_name)
# print(f"navigating to object {destination.name}")
self.controller._tracking_object = destination
# self.execute_controller(
# self.controller.apply_ref(
# SymbolicSemanticActionPrimitiveSet.NAVIGATE_TO, destination, attempts=10
# )
# )
print(f"placing object on top of {destination.name}")
self.execute_controller(
self.controller.apply_ref(
SymbolicSemanticActionPrimitiveSet.PLACE_ON_TOP,
destination,
attempts=1,
)
)
print("Finished executing place")
elif action == "navigate_to":
print(f"Attempting: 'navigate_to' with args: {args}")
obj_name = args[0]
obj = self.scene.object_registry("name", obj_name)
self.controller._tracking_object = obj
pose = args[1]
pose = [float(p) for p in pose.split(" ")]
print(f"navigating to object {obj.name}")
self.execute_controller(
self.controller.apply_ref("navigate_to_pose", pose, attempts=10)
)
print("Finished executing navigate_to")
elif action == "navigate_to_object":
print(f"Attempting: 'navigate_to_object' with args: {args}")
obj_name = args[0]
obj = self.scene.object_registry("name", obj_name)
self.controller._tracking_object = obj
print(f"navigating to object {obj.name}")
self.execute_controller(
self.controller.apply_ref(
SymbolicSemanticActionPrimitiveSet.NAVIGATE_TO, obj, attempts=10
)
)
print("Finished executing navigate_to_object")
elif action == "pick_test":
print("Executing pick")
grasp_obj = self.scene.object_registry("name", "black_cologne_bottle")
print(f"navigating to object {grasp_obj.name}")
self.controller._tracking_object = grasp_obj
self.execute_controller(
self.controller.apply_ref(
SymbolicSemanticActionPrimitiveSet.NAVIGATE_TO,
grasp_obj,
attempts=10,
)
)
print(f"grasping object {grasp_obj.name}")
# self.execute_controller(self.controller.apply_ref(StarterSemanticActionPrimitiveSet.GRASP, grasp_obj))
self.execute_controller(
self.controller.apply_ref(
SymbolicSemanticActionPrimitiveSet.GRASP, grasp_obj
)
)
print("Finished executing pick")
elif action == "place_test":
print("Executing place")
table = self.scene.object_registry("name", "table")
# print(f"navigating to object {table.name}")
self.controller._tracking_object = table
# self.execute_controller(self.controller.apply_ref(SymbolicSemanticActionPrimitiveSet.NAVIGATE_TO, table, attempts=10))
print(f"placing object on top of {table.name}")
# self.execute_controller(self.controller.apply_ref(StarterSemanticActionPrimitiveSet.PLACE_ON_TOP, table))
self.execute_controller(
self.controller.apply_ref(
SymbolicSemanticActionPrimitiveSet.PLACE_ON_TOP, table
)
)
print("Finished executing place")
elif action == "navigate_to_coffee_table":
# print("Executing navigate_to_coffee_table")
coffee_table = self.scene.object_registry("name", "coffee_table_fqluyq_0")
self.controller._tracking_object = coffee_table
print(f"navigating to object {coffee_table.name}")
self.execute_controller(
self.controller.apply_ref(
SymbolicSemanticActionPrimitiveSet.NAVIGATE_TO,
coffee_table,
attempts=10,
)
)
print("Finished executing navigate_to_coffee_table")
elif action == "viz":
print("Visualizing scene graph")
graph = self.env.get_scene_graph()
print(graph)
visualize_ascii_scene_graph(self.scene, graph)
# visualize_scene_graph(self.scene, graph)
print("Finished visualizing scene graph")
def check_for_action(self):
"""
Check if there is an action to be executed
"""
if not self.actions.empty():
action = self.actions.get()
self.execute_action(action)
return True
action = np.zeros(self.env.robots[0].action_dim)
if self._last_camera_action is not None:
action[self.env.robots[0].controller_action_idx["camera"]] = (
self._last_camera_action
)
# print(f"ACTION - {action}")
state, reward, done, info = self.env.step(action)
# print(f"info: {info}")
return False
class SimWrapper:
def __init__(self, task_queue, image_queue, obj_in_hand_queue, ready_queue):
self.task_queue = task_queue
self.image_queue = image_queue
self.obj_in_hand_queue = obj_in_hand_queue
self.ready_queue = ready_queue
asyncio.run(self.run())
async def run(self):
"""
Demonstrates how to use the action primitives to pick and place an object in an empty scene.
It loads Rs_int with a Fetch robot, and the robot picks and places a bottle of cologne.
"""
# Load the config
# config_filename = os.path.join(og.example_config_path, "fetch_primitives.yaml")
config_filename = os.path.join(
"/omnigibson-src/roboai", "tiago_primitives.yaml"
)
config = yaml.load(open(config_filename, "r"), Loader=yaml.FullLoader)
config["scene"]["load_object_categories"] = [
"floors",
"ceilings",
"walls",
"coffee_table",
"bottom_cabinet",
"top_cabinet",
"floor_lamp",
"shelf",
"trash_can",
"counter_top",
"fridge",
"sink",
]
# # SHOW TRAVERSABLE AREA
# import matplotlib.pyplot as plt
# import cv2
# scene_model = "Rs_int"
# trav_map_size = 200
# trav_map_erosion = 2
# trav_map = Image.open(os.path.join(get_og_scene_path(scene_model), "layout", "floor_trav_0.png"))
# trav_map = np.array(trav_map.resize((trav_map_size, trav_map_size)))
# trav_map = cv2.erode(trav_map, np.ones((trav_map_erosion, trav_map_erosion)))
# plt.figure(figsize=(12, 12))
# plt.imshow(trav_map)
# plt.title(f"Traversable area of {scene_model} scene")
# plt.show()
config["scene"]["not_load_object_categories"] = ["ceilings"]
config["objects"] = [
{
"type": "DatasetObject",
"name": "black_cologne_bottle",
"category": "bottle_of_cologne",
"model": "lyipur",
"position": [-0.3, -0.8, 0.5],
"orientation": [0, 0, 0, 1],
},
{
"type": "DatasetObject",
"name": "apple",
"category": "apple",
"model": "agveuv",
"position": [-0.3, -1.1, 0.5],
"orientation": [0, 0, 0, 1],
},
{
"type": "DatasetObject",
"name": "cleaner_bottle",
"category": "bottle_of_cleaner",
"model": "svzbeq",
"position": [-0.5, -0.8, 0.6],
"orientation": [0, 1, 0, 0],
},
{
"type": "DatasetObject",
"name": "tomato_can",
"category": "can_of_tomatoes",
"model": "ckdouu",
"position": [-0.6, -1.1, 0.5],
"orientation": [0, 0, 0, 1],
},
{
"type": "DatasetObject",
"name": "table",
"category": "breakfast_table",
"model": "rjgmmy",
"scale": [0.3, 0.3, 0.3],
"position": [-0.7, 0.5, 0.2],
"orientation": [0, 0, 0, 1],
},
]
# Load the environment
env = og.Environment(configs=config)
scene = env.scene
robot = env.robots[0]
print(type(robot))
print(robot.default_arm)
delattr(Tiago, "simplified_mesh_usd_path")
# del robot.simplified_mesh_usd_path
# print(robot.simplified_mesh_usd_path)
# Allow user to move camera more easily
og.sim.enable_viewer_camera_teleoperation()
cam = og.sim.viewer_camera
# Living Room View
# camera pose: array([0.92048866, -5.66129052, 5.39363818]), array([0.44288347, 0.04140454, 0.08336682, 0.89173419])
# cam.set_position_orientation(
# position=np.array([0.92048866, -5.66129052, 5.39363818]),
# orientation=np.array([0.44288347, 0.04140454, 0.08336682, 0.89173419]),
# )
# Living Room + Kitchen View
# cam pose: (array([2.78592041, 0.56388298, 7.03105183]), array([0.15355086, 0.15665731, 0.69675768, 0.68294169]))
cam.set_position_orientation(
position=np.array([2.78592041, 0.56388298, 7.03105183]),
orientation=np.array([0.15355086, 0.15665731, 0.69675768, 0.68294169]),
)
# controller = StarterSemanticActionPrimitives(env, enable_head_tracking=False)
SymbolicSemanticActionPrimitives._place_with_predicate = (
_simplified_place_with_predicate
)
SymbolicSemanticActionPrimitives._settle_robot = _quick_settle_robot
controller = SymbolicSemanticActionPrimitives(env)
controller.controller_functions["navigate_to_pose"] = (
controller._navigate_to_pose
)
# Task queue
action_handler = ActionHandler(
env, controller, scene, task_queue=self.task_queue
)
if False:
print("\n\n####### TASK DATA #######\n")
task_str, _, _ = env.task.show_instruction()
print(task_str)
task_obs, _ = env.task._get_obs(env)
agent_pos = task_obs["agent.n.01_1_pos"]
print(task_obs)
print(env.task.object_scope)
for k, v in env.task.object_scope.items():
dist = np.linalg.norm(
np.array(task_obs[f"{k}_pos"]) - np.array(agent_pos)
)
print(f"{k}: {v.name} {v.category} {v.exists} {dist:.3f}")
print("\n#########################\n\n")
while True:
await asyncio.sleep(0.1)
obs, info = robot.get_obs()
# print(info["robot0:eyes:Camera:0"]["seg_semantic"])
# img = obs["robot0:eyes:Camera:0"]["rgb"]
# print(obs["robot0:eyes:Camera:0"].keys())
# print(f"seg_semantic: {obs['robot0:eyes:Camera:0']['seg_semantic'].shape}")
# print(f"seg_instance: {obs['robot0:eyes:Camera:0']['seg_instance'].shape}")
# print(scene.seg_map)
if self.image_queue.full():
self.image_queue.get()
self.image_queue.put(
(obs["robot0:eyes:Camera:0"], info["robot0:eyes:Camera:0"])
)
if self.obj_in_hand_queue.full():
self.obj_in_hand_queue.get()
obj_in_hand = controller._get_obj_in_hand()
if obj_in_hand is not None:
self.obj_in_hand_queue.put(obj_in_hand.name)
else:
self.obj_in_hand_queue.put(None)
if self.ready_queue.full():
self.ready_queue.get()
self.ready_queue.put(time.time())
action_handler.check_for_action()
# task_str, _, _ = env.task.show_instruction()
# print(task_str)
# if self.scene_graph_queue.full():
# self.scene_graph_queue.get()
# graph = env.get_scene_graph()
# self.scene_graph_queue.put(graph)
# current = robot.get_joint_positions(normalized=False)
# print(f"current: {current}")
# arm_left = robot._controllers["arm_left"]
# print(f"arm_left: {arm_left.control}")
app = FastAPI()
# List of allowed origins (you can use '*' to allow all origins)
origins = [
"http://localhost:3000", # Allow your Next.js app
# Add any other origins as needed
]
# Add CORSMiddleware to the application
app.add_middleware(
CORSMiddleware,
allow_origins=origins, # List of allowed origins
allow_credentials=True,
allow_methods=["*"], # Allow all methods
allow_headers=["*"], # Allow all headers
)
task_queue = multiprocessing.Queue()
image_queue = multiprocessing.Queue(maxsize=1)
obj_in_hand_queue = multiprocessing.Queue(maxsize=1)
ready_queue = multiprocessing.Queue(maxsize=1)
sim = multiprocessing.Process(
target=SimWrapper, args=(task_queue, image_queue, obj_in_hand_queue, ready_queue)
)
@app.post("/add_action")
async def add_action(action: str):
action = action.split(",")
action = (action[0], *action[1:])
print(f"Adding action: {action}")
task_queue.put(action)
return {"action": action}
@app.get("/get_image")
async def get_image():
image, _ = image_queue.get()
await asyncio.sleep(2) # wait for the image to be updated
image, _ = image_queue.get()
image = image["rgb"]
img_array = image
img = Image.fromarray(img_array)
buf = io.BytesIO()
img.save(buf, format="PNG")
buf.seek(0)
return StreamingResponse(buf, media_type="image/png")
@app.get("/get_visible_objects")
async def get_visible_objects():
image, info = image_queue.get()
await asyncio.sleep(2) # wait for the image to be updated
image, info = image_queue.get()
image = image["seg_instance"]
info = info["seg_instance"]
visible_objects = list(info.values())
# filter out background object keywords
background_obj_keywords = ["floor", "wall", "robot", "table"]
visible_objects = [
obj
for obj in visible_objects
if not any(keyword in obj.lower() for keyword in background_obj_keywords)
]
return {"objects": visible_objects}
@app.get("/get_obj_in_hand")
async def get_obj_in_hand():
obj_in_hand = obj_in_hand_queue.get()
return {"obj_in_hand": obj_in_hand}
@app.get("/get_is_ready")
async def get_is_ready():
READY_THRESHOLD = 1.0
ready = ready_queue.get()
ready = ready
if time.time() - ready > READY_THRESHOLD:
ready = False
return {"is_ready": ready}
@app.get("/wait_until_ready")
async def wait_until_ready():
READY_THRESHOLD = 1.0
ready = ready_queue.get()
is_ready = False
while not is_ready:
ready = ready_queue.get()
if time.time() - ready < READY_THRESHOLD:
is_ready = True
await asyncio.sleep(0.1)
return {"is_ready": is_ready}
@app.get("/get_semantic_segmentation")
async def get_semantic_segmentation():
image, info = image_queue.get()
await asyncio.sleep(2) # wait for the image to be updated
image, info = image_queue.get()
image = image["seg_semantic"]
info = info["seg_semantic"]
print(info)
img_array = image
img = Image.fromarray(img_array)
buf = io.BytesIO()
img.save(buf, format="PNG")
buf.seek(0)
return StreamingResponse(buf, media_type="image/png")
@app.get("/get_instance_segmentation")
async def get_instance_segmentation():
image, info = image_queue.get()
await asyncio.sleep(2) # wait for the image to be updated
image, info = image_queue.get()
image = image["seg_instance"]
info = info["seg_instance"]
print(info)
img_array = image
img = Image.fromarray(img_array)
buf = io.BytesIO()
img.save(buf, format="PNG")
buf.seek(0)
return StreamingResponse(buf, media_type="image/png")
@app.get("/get_id_instance_segmentation")
async def get_id_instance_segmentation():
image, info = image_queue.get()
await asyncio.sleep(2) # wait for the image to be updated
image, info = image_queue.get()
image = image["seg_instance_id"]
info = info["seg_instance_id"]
print(info)
img_array = image
img = Image.fromarray(img_array)
buf = io.BytesIO()
img.save(buf, format="PNG")
buf.seek(0)
return StreamingResponse(buf, media_type="image/png")
if __name__ == "__main__":
sim.start()
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=8000)
| 19,970 | Python | 34.472469 | 132 | 0.565448 |
AshisGhosh/roboai/omnigibson/roboai/visualize_scene_graph.py | import networkx as nx
import numpy as np
from PIL import Image
from matplotlib import pyplot as plt
from omnigibson.sensors import VisionSensor
def visualize_ascii_scene_graph(scene, G):
# def print_graph_ascii(G):
# for line in nx.generate_adjlist(G):
# print(line)
# # Example usage:
# print_graph_ascii(G)
nx.write_network_text(G)
def visualize_scene_graph(scene, G, show_window=True, realistic_positioning=False):
"""
Converts the graph into an image and shows it in a cv2 window if preferred.
Args:
show_window (bool): Whether a cv2 GUI window containing the visualization should be shown.
realistic_positioning (bool): Whether nodes should be positioned based on their position in the scene (if True)
or placed using a graphviz layout (neato) that makes it easier to read edges & find clusters.
"""
def _draw_graph():
nodes = list(G.nodes)
node_labels = {obj: obj.category for obj in nodes}
# colors = [
# "yellow" if obj.category == "agent"
# else (
# "green" if obj.states.get(object_states.ObjectsInFOVOfRobot, False)
# else "red" if object_states.ObjectsInFOVOfRobot in obj.states
# else "blue"
# )
# for obj in nodes
# ]
positions = (
{obj: (-pose[0][1], pose[0][0]) for obj, pose in G.nodes.data("pose")}
if realistic_positioning
else nx.nx_pydot.pydot_layout(G, prog="neato")
)
nx.drawing.draw_networkx(
G,
pos=positions,
labels=node_labels,
nodelist=nodes,
# node_color=colors,
font_size=4,
arrowsize=5,
node_size=150,
)
edge_labels = {
edge: ", ".join(
f"{state}={value}" for state, value in G.edges[edge]["states"]
)
for edge in G.edges
}
nx.drawing.draw_networkx_edge_labels(
G, pos=positions, edge_labels=edge_labels, font_size=4
)
# Prepare pyplot figure sized to match the robot video.
robot = scene.robots[0]
robot_camera_sensor = next(
s
for s in robot.sensors.values()
if isinstance(s, VisionSensor) and "rgb" in s.modalities
)
robot_view = (robot_camera_sensor.get_obs()[0]["rgb"][..., :3]).astype(np.uint8)
imgheight, imgwidth, _ = robot_view.shape
figheight = 4.8
figdpi = imgheight / figheight
figwidth = imgwidth / figdpi
# Draw the graph onto the figure.
fig = plt.figure(figsize=(figwidth, figheight), dpi=figdpi)
_draw_graph()
fig.canvas.draw()
# Convert the canvas to image
graph_view = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep="")
graph_view = graph_view.reshape(fig.canvas.get_width_height()[::-1] + (3,))
assert graph_view.shape == robot_view.shape
plt.close(fig)
# Combine the two images side-by-side
img = np.hstack((robot_view, graph_view))
# Convert to BGR for cv2-based viewing.
if show_window:
import cv2
cv_img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
cv2.imshow("SceneGraph", cv_img)
cv2.waitKey(1)
return Image.fromarray(img).save(r"test.png")
| 3,343 | Python | 31.153846 | 119 | 0.591684 |
AshisGhosh/roboai/omnigibson/roboai/primitive_patches.py | import numpy as np
from omnigibson.action_primitives.action_primitive_set_base import ActionPrimitiveError
def _simplified_place_with_predicate(
self, obj, predicate, near_poses=None, near_poses_threshold=None
):
"""
Yields action for the robot to navigate to the object if needed, then to place it
Args:
obj (StatefulObject): Object for robot to place the object in its hand on
predicate (object_states.OnTop or object_states.Inside): Determines whether to place on top or inside
Returns:
np.array or None: Action array for one step for the robot to place or None if place completed
"""
obj_in_hand = self._get_obj_in_hand()
if obj_in_hand is None:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.PRE_CONDITION_ERROR,
"You need to be grasping an object first to place it somewhere.",
)
# Find a spot to put it
# obj_pose = self._sample_pose_with_object_and_predicate(
# predicate,
# obj_in_hand,
# obj,
# near_poses=near_poses,
# near_poses_threshold=near_poses_threshold,
# )
place_locations = {
"table": {
"position": np.array([-0.7, 0.5, 0.5]),
"orientation": np.array([0.0, 0.0, 0.0, 1.0]),
},
"coffee_table_fqluyq_0": {
"position": np.array([-0.5, -1.1, 0.5]),
"orientation": np.array([0.0, 0.0, 0.0, 1.0]),
},
"bottom_cabinet_bamfsz_0": {
"position": np.array([-1.8, -0.5, 0.7]),
"orientation": np.array([0.0, 0.0, 0.0, 1.0]),
},
"fridge_xyejdx_0": {
"position": np.array([0.2, 3.2, 1.0]),
"orientation": np.array([0.0, 0.0, 0.0, 1.0]),
},
"trash_can_zotrbg_0": {
"position": np.array([-1.8, 2.65, 0.5]),
"orientation": np.array([0.0, 0.0, 0.0, 1.0]),
},
}
obj_pose = place_locations[obj.name]
obj_pose = (obj_pose["position"], obj_pose["orientation"])
# Get close, release the object.
# yield from self._navigate_if_needed(obj, pose_on_obj=obj_pose)
yield from self._release()
# Actually move the object to the spot and step a bit to settle it.
obj_in_hand.set_position_orientation(*obj_pose)
# yield from self._settle_robot()
def _quick_settle_robot(self):
"""
Yields a no op action for a few steps to allow the robot and physics to settle
Returns:
np.array or None: Action array for one step for the robot to do nothing
"""
print("Settling robot")
for _ in range(10):
empty_action = self._empty_action()
yield self._postprocess_action(empty_action)
print("Settled robot")
| 2,741 | Python | 33.275 | 109 | 0.588107 |
AshisGhosh/roboai/shared/pyproject.toml | [tool.poetry]
name = "utils"
version = "0.1.0"
description = ""
authors = ["AshisGhosh <[email protected]>"]
readme = "README.md"
[tool.poetry.dependencies]
python = "<3.13,>=3.10.0"
httpx = "^0.27.0"
python-dotenv = "^1.0.1"
litellm = "^1.34.22"
pillow = "^10.3.0"
opencv-python = "^4.9.0.80"
ollama = "^0.1.8"
gradio-client = "^0.16.1"
replicate = "^0.25.1"
gradio = "^4.29.0"
fastembed = "^0.2.7"
[build-system]
requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api"
| 489 | TOML | 18.599999 | 46 | 0.627812 |
AshisGhosh/roboai/shared/utils/isaacsim_client.py | import asyncio
from shared.utils.http_client import post_request, get_image_request
import io
from PIL import Image
from functools import wraps
SERVER_NAME = "http://localhost:8080"
async def _get_image() -> Image:
img_data = await get_image_request(f"{SERVER_NAME}/get_image")
return Image.open(io.BytesIO(img_data))
def get_image():
return asyncio.run(_get_image())
async def _add_task(task: str):
return await post_request(
f"{SERVER_NAME}/add_task",
params=task,
)
def add_task(task: str):
return asyncio.run(_add_task(task))
def add_test_mode(func):
@wraps(func)
def wrapper(*args, **kwargs):
if globals().get("test_mode", False):
print("TEST MODE ENABLED")
return True
else:
return func(*args, **kwargs)
return wrapper
def pick(object_name: str):
print(f"picking {object_name}")
task = {"task": "pick"}
add_task(task)
def place(object_name: str):
print(f"placing {object_name}")
print("placing object")
task = {"task": "place"}
print(f"Dummy task: {task}")
| 1,112 | Python | 19.611111 | 68 | 0.625 |
AshisGhosh/roboai/shared/utils/image_utils.py | import io
import base64
from PIL import Image
def pil_to_b64(image: Image) -> str:
"""Converts a PIL image to a base64 string."""
# Save the image to a bytes buffer
buffer = io.BytesIO()
image.save(
buffer, format=image.format
) # You can change the format to PNG or other supported formats
# Encode the buffer to base64
img_str = base64.b64encode(buffer.getvalue()).decode("utf-8")
# Optionally, prepend the URI scheme to make it ready for HTML or data transfer
img_base64 = f"data:image/jpeg;base64,{img_str}"
return img_base64
def b64_to_pil(image_b64: str) -> Image:
"""Converts a base64 string to a PIL image."""
# Remove the URI scheme
img_str = image_b64.split(",")[1]
# Decode the base64 string
img_bytes = base64.b64decode(img_str)
# Convert bytes to PIL image
image = Image.open(io.BytesIO(img_bytes))
return image
| 914 | Python | 26.727272 | 83 | 0.665208 |
AshisGhosh/roboai/shared/utils/robotic_grasping_client.py | import io
import asyncio
from shared.utils.http_client import get_request, post_request
from typing import Any, Dict
from PIL import Image
import logging
log = logging.getLogger("robotic_grasping_client")
log.setLevel(logging.DEBUG)
SERVER_NAME = "http://localhost:8003"
async def _get_grasps_from_rgb_and_depth(
rgb_image: Image, depth_image: Image
) -> Dict[str, Any]:
log.debug("Getting grasp from GR-ConvNet")
timeout = 30.0
image_byte_array = io.BytesIO()
rgb_image.save(image_byte_array, format="JPEG")
image_byte_array = image_byte_array.getvalue()
log.debug("RGB image byte array saved")
depth_image_byte_array = io.BytesIO()
depth_image.save(depth_image_byte_array, format="JPEG")
depth_image_byte_array = depth_image_byte_array.getvalue()
log.debug("Depth image byte array saved")
files = {
"rgb_image": ("rgb_image.jpg", image_byte_array, "image/jpeg"),
"depth_image": ("depth_image.jpg", depth_image_byte_array, "image/jpeg"),
}
response = await post_request(
f"{SERVER_NAME}/get_grasps", files=files, timeout=timeout
)
return response
def get_grasps_from_rgb_and_depth(
rgb_image: Image, depth_image: Image
) -> Dict[str, Any]:
return asyncio.run(_get_grasps_from_rgb_and_depth(rgb_image, depth_image))
async def _check_server() -> str:
response = await get_request(f"{SERVER_NAME}/")
return response
| 1,432 | Python | 26.557692 | 81 | 0.682263 |
AshisGhosh/roboai/shared/utils/robosim_client.py | import asyncio
from shared.utils.http_client import get_request, post_request, get_image_request
import io
from PIL import Image
SERVER_NAME = "http://localhost:8000"
async def _get_objects_on_table() -> list[str]:
return await get_request(f"{SERVER_NAME}/get_objects")
def get_objects_on_table():
return asyncio.run(_get_objects_on_table())
async def _get_image() -> Image:
img_data = await get_image_request(f"{SERVER_NAME}/get_image")
return Image.open(io.BytesIO(img_data))
def get_image():
return asyncio.run(_get_image())
async def _get_grasp_image() -> Image:
img_data = await get_image_request(f"{SERVER_NAME}/get_grasp_image")
return Image.open(io.BytesIO(img_data))
def get_grasp_image():
return asyncio.run(_get_grasp_image())
async def _open_gripper():
return await post_request(
f"{SERVER_NAME}/add_task",
data={"name": "open gripper", "type": "open_gripper", "args": ""},
)
def open_gripper():
return asyncio.run(_open_gripper())
async def _close_gripper():
return await post_request(
f"{SERVER_NAME}/add_task",
data={"name": "close gripper", "type": "close_gripper", "args": ""},
)
def close_gripper():
return asyncio.run(_close_gripper())
async def _go_to_pick_center():
return await post_request(
f"{SERVER_NAME}/add_task",
data={"name": "go to pick center", "type": "go_to_pick_center", "args": ""},
)
async def _get_grasp(object_name: str):
return await post_request(
f"{SERVER_NAME}/add_task",
data={
"name": f"get grasp {object_name}",
"type": "get_grasp",
"args": object_name,
},
)
def get_grasp(object_name: str):
asyncio.run(_go_to_pick_center())
return asyncio.run(_get_grasp(object_name))
async def _go_to_pre_grasp():
return await post_request(
f"{SERVER_NAME}/add_task",
data={"name": "go to pre grasp", "type": "go_to_pre_grasp", "args": ""},
)
def go_to_pre_grasp():
return asyncio.run(_go_to_pre_grasp())
async def _go_to_grasp_position():
return await post_request(
f"{SERVER_NAME}/add_task",
data={"name": "go to grasp pos", "type": "go_to_grasp_position", "args": ""},
)
def go_to_grasp_position():
return asyncio.run(_go_to_grasp_position())
async def _go_to_drop():
return await post_request(
f"{SERVER_NAME}/add_task",
data={"name": "go to drop", "type": "go_to_drop", "args": ""},
)
def go_to_drop():
return asyncio.run(_go_to_drop())
def pick(object_name: str):
get_grasp(object_name)
go_to_pre_grasp()
open_gripper()
go_to_grasp_position()
close_gripper()
go_to_pre_grasp()
def place(object_name: str):
go_to_drop()
open_gripper()
| 2,818 | Python | 21.552 | 85 | 0.606104 |
AshisGhosh/roboai/shared/utils/gradio_client.py | from PIL import Image
from typing import Dict, Any
import gradio_client
from gradio_client import Client
import time
def moondream_answer_question_from_image(image: Image, question: str) -> Dict[str, Any]:
client = Client("vikhyatk/moondream2")
# client = Client("Kartik2503/ImageToText")
image.save("/app/shared/data/tmp.png")
start_time = time.time()
result = client.predict(
gradio_client.file("/app/shared/data/tmp.png"),
question,
api_name="/answer_question",
)
print(f"[Gradio] Time taken: {time.time() - start_time}")
return {"result": result}
def qwen_vl_max_answer_question_from_image(
image: Image, question: str
) -> Dict[str, Any]:
client = Client("https://qwen-qwen-vl-max.hf.space/--replicas/fi9fr/")
image.save("/app/shared/data/tmp.png")
start_time = time.time()
# result = client.predict(
# fn_index=3
# )
# json_str = "/tmp/gradio/tmp0af5pyui.json"
# result = client.predict(
# json_str,
# img_path, # str (filepath on your computer (or URL) of file) in '📁 Upload (上传文件)' Uploadbutton component
# fn_index=5
# )
result = client.predict(
# json_str,
# "Hi",
fn_index=2
)
print(f"[Gradio] Time taken: {time.time() - start_time}")
return {"result": result}
| 1,334 | Python | 25.699999 | 111 | 0.622939 |
AshisGhosh/roboai/shared/utils/model_server_client.py | import io
import asyncio
from PIL import Image
from typing import Any, Dict
from shared.utils.http_client import post_request
import logging
log = logging.getLogger("model_server_client")
log.setLevel(logging.INFO)
SERVER_NAME = "http://localhost:8002"
async def _answer_question_from_image(image: Image, question: str) -> Dict[str, Any]:
timeout = 120.0
image_byte_array = io.BytesIO()
image.save(image_byte_array, format="JPEG")
image_byte_array = image_byte_array.getvalue()
files = {"file": ("image.jpg", image_byte_array, "image/jpeg")}
response = await post_request(
f"{SERVER_NAME}/answer_question",
files=files,
params={"question": question},
timeout=timeout,
)
return response
def answer_question_from_image(image: Image, question: str) -> Dict[str, Any]:
return asyncio.run(_answer_question_from_image(image, question))
async def _embed(text: str) -> Dict[str, Any]:
log.debug(f"async _embed call: Embedding text: {text}")
return await post_request(f"{SERVER_NAME}/embed", params={"text": text})
def embed(text: str) -> Dict[str, Any]:
log.debug(f"Embedding text: {text}")
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
result = loop.run_until_complete(_embed(text))
loop.close()
return result
| 1,331 | Python | 26.749999 | 85 | 0.676183 |
AshisGhosh/roboai/shared/utils/http_client.py | import httpx
import logging
from typing import Any, Dict, Optional
from httpx import Timeout
from dotenv import load_dotenv
load_dotenv()
log = logging.getLogger("http_client")
log.setLevel(logging.DEBUG)
TIMEOUT_DEFAULT = 5.0
async def post_request(
url: str,
params: Optional[Dict[str, Any]] = None,
data: Optional[Dict[str, Any]] = None,
files: Optional[Dict[str, Any]] = None,
headers: Optional[Dict[str, Any]] = None,
timeout: float = TIMEOUT_DEFAULT,
) -> Dict[str, Any]:
timeout = Timeout(timeout)
log.debug(f"Sending POST request to {url}:")
log.debug(f" headers: {headers}")
log.debug(f" params: {params}")
log.debug(f" data: {data}")
if files:
log.debug(f" files len: {len(files)}")
log.debug(f" timeout: {timeout}")
try:
async with httpx.AsyncClient(timeout=timeout) as client:
response = await client.post(
url,
params=params,
json=data,
files=files,
headers=headers,
timeout=timeout,
)
if response.status_code == 200:
response = response.json()
log.debug(f"Response: {response}")
return response
raise Exception(f"Error:{response.status_code}: {response.text}")
except httpx.ReadTimeout as e:
log.debug(
f"Timeout sending POST request to {url} with params: {params} and timeout: {timeout}: {e}"
)
return {
"success": False,
"text": f"httpx.ReadTimeout: Timeout sending POST request to {url} with params: {params} and timeout: {timeout}: {e}",
}
except Exception as e:
log.debug(
f"Error sending POST request to {url} with params: {params} and timeout: {timeout}: {e}"
)
return {
"success": False,
"text": f"Error sending POST request to {url} with params: {params} and timeout: {timeout}: {e}",
}
async def get_request(url: str, timeout: float = TIMEOUT_DEFAULT) -> Dict[str, Any]:
log.debug(f"Sending GET request to {url}")
async with httpx.AsyncClient(timeout=timeout) as client:
response = await client.get(url, timeout=timeout)
log.debug(response)
if response.status_code == 200:
log.debug(f"Response: {response.json()}")
return response.json()
else:
log.debug(f"Error: {response.text}")
return {"success": False, "text": f"{response.status}{response.text}"}
async def get_image_request(url: str, timeout: float = TIMEOUT_DEFAULT) -> bytes:
log.debug(f"Sending GET request to {url}")
async with httpx.AsyncClient(timeout=timeout) as client:
response = await client.get(url, timeout=timeout)
log.debug(response)
if response.status_code == 200:
image_data = bytearray()
for chunk in response.iter_bytes():
image_data += chunk
log.debug(f"Response: ({type(image_data)}) {image_data[:10]}")
return image_data
else:
log.debug(f"Error: {response.text}")
return b""
| 3,228 | Python | 32.989473 | 130 | 0.579926 |
AshisGhosh/roboai/shared/utils/grasp_client.py | import io
import asyncio
from shared.utils.http_client import get_request, post_request
from typing import Any, Dict
from PIL import Image
SERVER_NAME = "http://localhost:8005"
async def _get_grasp_from_image(image: Image) -> Dict[str, Any]:
timeout = 30.0
image_byte_array = io.BytesIO()
image.save(image_byte_array, format="JPEG")
image_byte_array = image_byte_array.getvalue()
files = {"file": ("image.jpg", image_byte_array, "image/jpeg")}
response = await post_request(f"{SERVER_NAME}/detect", files=files, timeout=timeout)
return response
def get_grasp_from_image(image: Image) -> Dict[str, Any]:
return asyncio.run(_get_grasp_from_image(image))
async def _check_server() -> str:
response = await get_request(f"{SERVER_NAME}/")
return response
| 801 | Python | 24.870967 | 88 | 0.692884 |
AshisGhosh/roboai/shared/utils/omnigibson_client.py | import asyncio
from shared.utils.http_client import post_request, get_image_request, get_request
import io
from PIL import Image
from shared.utils.llm_utils import get_closest_text_sync as get_closest_text
SERVER_NAME = "http://localhost:8000"
OMNIGIBSON_TIMEOUT = 30
async def _get_image() -> Image:
img_data = await get_image_request(
f"{SERVER_NAME}/get_image", timeout=OMNIGIBSON_TIMEOUT
)
return Image.open(io.BytesIO(img_data))
def get_image():
response = asyncio.run(_get_image())
if isinstance(response, dict):
return response.get("success", True)
return response
async def _get_visible_objects() -> dict:
return await get_request(
f"{SERVER_NAME}/get_visible_objects", timeout=OMNIGIBSON_TIMEOUT
)
def get_visible_objects():
response = asyncio.run(_get_visible_objects())
if "success" in response.keys():
return response.get("success", True)
return response["objects"]
async def _get_obj_in_hand() -> dict:
return await get_request(
f"{SERVER_NAME}/get_obj_in_hand", timeout=OMNIGIBSON_TIMEOUT
)
def get_obj_in_hand():
try:
response = asyncio.run(_get_obj_in_hand())
return response["obj_in_hand"]
except Exception as e:
print(f"Error getting object in hand: {e}")
return False
async def _wait_until_ready() -> dict:
await asyncio.sleep(1)
return await get_request(
f"{SERVER_NAME}/wait_until_ready", timeout=OMNIGIBSON_TIMEOUT
)
def wait_until_ready():
try:
response = asyncio.run(_wait_until_ready())
return response["is_ready"]
except Exception as e:
print(f"Error waiting until ready: {e}")
return False
async def _add_action(action: str):
return await post_request(
f"{SERVER_NAME}/add_action", params=action, timeout=OMNIGIBSON_TIMEOUT
)
def add_action(action: str):
response = asyncio.run(_add_action(action))
if isinstance(response, dict):
return response.get("success", True)
return response
def pick(object_name: str):
print(f"Attempting to pick {object_name}. Referencing against visible objects.")
objects = get_visible_objects()
object_name = get_closest_text(object_name, objects, threshold=0.2)
print(f"picking object {object_name}")
action = {"action": f"pick,{object_name}"}
return add_action(action)
def place(location: str):
print(f"placing object in {location}")
print("placing object")
action = {"action": f"place,{location}"}
return add_action(action)
def navigate_to(object_name: str, location: str = None):
print(f"navigating to {object_name}, {location}")
if location:
action = {"action": f"navigate_to,{object_name},{location}"}
else:
action = {"action": f"navigate_to_object,{object_name}"}
return add_action(action)
| 2,883 | Python | 25.703703 | 84 | 0.660076 |
AshisGhosh/roboai/shared/utils/huggingface_client.py | import asyncio
from shared.utils.http_client import post_request
API_URL = "https://api-inference.huggingface.co/models/Efficient-Large-Model/VILA-2.7b"
headers = {"Authorization": "Bearer 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"}
async def _vila_query(text, image=None):
json = {"inputs": text}
response = await post_request(API_URL, headers=headers, data=json)
print(response)
return response
def vila_query(text, image=None):
return asyncio.run(_vila_query(text, image))
| 497 | Python | 28.294116 | 87 | 0.738431 |
AshisGhosh/roboai/shared/utils/replicate_client.py | from PIL import Image
from typing import Dict, Any
import replicate
import time
def moondream_answer_question_from_image(image: Image, question: str) -> Dict[str, Any]:
image.save("/app/shared/data/tmp.png")
image_handler = open("/app/shared/data/tmp.png", "rb")
input = {"image": image_handler, "prompt": question}
start_time = time.time()
output = replicate.run(
"lucataco/moondream2:392a53ac3f36d630d2d07ce0e78142acaccc338d6caeeb8ca552fe5baca2781e",
input=input,
)
output = "".join(output)
print(f"[Replicate] Time taken: {time.time() - start_time}")
return {"result": output}
| 636 | Python | 29.333332 | 95 | 0.683962 |
AshisGhosh/roboai/shared/utils/llm_utils.py | import litellm
import ollama
import logging
import numpy as np
from fastembed import TextEmbedding
import asyncio
from shared.utils.model_server_client import _embed
from dotenv import load_dotenv
load_dotenv("shared/.env") # take environment variables from .env.
log = logging.getLogger("llm_utils")
log.setLevel(logging.INFO)
def log_debug(msg):
log.debug(msg)
# print(msg)
def log_info(msg):
log.info(msg)
# print(msg)
async def get_embedding_sentence_transformers(text):
log_debug(f"Getting sentence_transformer/HF embedding for text: {text}")
response = await _embed(text)
return response["embedding"]
def get_embedding_ollama(text):
log_debug(f"Getting ollama embedding for text: {text}")
response = ollama.embeddings(model="mxbai-embed-large", prompt=text)
return response["embedding"]
def get_embedding_litellm(text):
log_debug(f"Getting litellm/HF embedding for text: {text}")
response = litellm.embedding(
model="huggingface/mixedbread-ai/mxbai-embed-large-v1", input=[text]
)
log_debug(f"Embedding received: {response}")
return response["data"][0]["embedding"]
global fast_embed_model
fast_embed_model = None
def get_embedding_fastembed(text):
global fast_embed_model
if not fast_embed_model:
fast_embed_model = TextEmbedding("mixedbread-ai/mxbai-embed-large-v1")
embed = list(fast_embed_model.embed(text))[0]
return embed
async def get_embedding(text):
log_debug(f"Getting embedding for text: {text}")
return get_embedding_fastembed(text)
def cosine_similarity(v1: np.ndarray, v2: np.ndarray) -> float:
dot_product = np.dot(v1, v2)
norm_v1 = np.linalg.norm(v1)
norm_v2 = np.linalg.norm(v2)
return dot_product / (norm_v1 * norm_v2)
async def get_closest_text(
text: str, text_list: list[str], k: int = 1, threshold: float = 0.5
) -> str:
log_info(f"Getting closest text for: '{text}' in list: {text_list}")
query_vector = await get_embedding(text)
log_debug(f"Query vector: {query_vector}")
vectors = [await get_embedding(text) for text in text_list]
similarities = [cosine_similarity(query_vector, vector) for vector in vectors]
log_debug(f"Similarities: {similarities}")
if k > 1:
closest_indices = np.argsort(similarities)[-k:]
log_info(f"Closest texts: {[text_list[i] for i in closest_indices]}")
return [text_list[i] for i in closest_indices]
closest_index = np.argmax(similarities)
if similarities[closest_index] < threshold:
log_info(f"Similarity below threshold: {similarities[closest_index]}")
return None
log_info(f"Closest text: {text_list[closest_index]}")
return text_list[closest_index]
def get_closest_text_sync(
text: str, text_list: list[str], k: int = 1, threshold: float = 0.5
):
return asyncio.run(get_closest_text(text, text_list, k, threshold))
async def get_most_important(texts: list[str] | str, k: int = 1):
log_info(f"Getting most important text from: {texts}")
if isinstance(texts, list):
texts = " ".join(texts)
texts_embedding = await get_embedding(texts)
texts = texts.split()
vectors = [await get_embedding(text) for text in texts]
similarities = [cosine_similarity(texts_embedding, vector) for vector in vectors]
log_debug(f"Similarities: {similarities}")
closest_indices = np.argsort(similarities)[-k:]
log_info(f"Closest texts: {[texts[i] for i in closest_indices]}")
return [texts[i] for i in closest_indices]
def get_most_important_sync(texts: list[str], k: int = 1):
return asyncio.run(get_most_important(texts, k))
| 3,662 | Python | 29.781512 | 85 | 0.688422 |
AshisGhosh/roboai/grasping/scale_balanced_grasp/pyproject.toml | [tool.poetry]
name = "scale-balanced-grasp"
version = "0.1.0"
description = ""
authors = ["Ashis Ghosh <[email protected]>"]
readme = "README.md"
[tool.poetry.dependencies]
python = "^3.10"
[build-system]
requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api"
| 279 | TOML | 17.666665 | 47 | 0.691756 |
AshisGhosh/roboai/grasping/grasp_server/README.md | Download weights linked here: https://github.com/stefan-ainetter/grasp_det_seg_cnn/tree/1cfeb2f239e0745e127055ad597461f1585a7e94
Model Weights: https://files.icg.tugraz.at/d/10296a970cc242aa90ff/
ResNet101 Weights: https://files.icg.tugraz.at/d/1e84f72c1109485ba9f9/ | 269 | Markdown | 43.999993 | 128 | 0.828996 |
AshisGhosh/roboai/grasping/grasp_server/app/main.py | #!/usr/bin/python -u
import io
import base64
import numpy as np
import cv2
from fastapi import FastAPI, File, UploadFile
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import JSONResponse
from grasp_server.grasp_det_seg import GraspServer
import logging
logging.basicConfig(level=logging.INFO)
# Create GraspServer instance
grasp = GraspServer()
# Create FastAPI instance
app = FastAPI()
# List of allowed origins (you can use '*' to allow all origins)
origins = [
"http://localhost:3000", # Allow your Next.js app
# Add any other origins as needed
]
# Add CORSMiddleware to the application
app.add_middleware(
CORSMiddleware,
allow_origins=origins, # List of allowed origins
allow_credentials=True,
allow_methods=["*"], # Allow all methods
allow_headers=["*"], # Allow all headers
)
# Example route
@app.get("/")
async def read_root():
return {"message": "Hello, FastAPI! This is the grasp server."}
@app.post("/detect")
async def detect(file: UploadFile = File(...)):
# Read the image file
image_bytes = await file.read()
nparr = np.frombuffer(image_bytes, np.uint8)
# Decode the image
image = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
# Perform object detection
result, img = grasp.detect(image)
# Return the result
return JSONResponse(content={"result": result, "image": get_image_response(img)})
@app.post("/test")
async def test():
result, img = grasp.test_detect()
return JSONResponse(content={"result": result, "image": get_image_response(img)})
def get_image_response(image):
buf = io.BytesIO()
image.save(buf, format="JPEG")
buf.seek(0)
base64_image = base64.b64encode(buf.getvalue()).decode("utf-8")
return base64_image
# return StreamingResponse(buf, media_type="image/jpeg")
| 1,834 | Python | 23.797297 | 85 | 0.698473 |
AshisGhosh/roboai/grasping/grasp_server/grasp_server/test_grasp_det_seg.py | import argparse
import time
import os
import numpy as np
import scipy
import cv2
from functools import partial
import torch
import torch.optim as optim
import torch.utils.data as data
from torch import distributed
import grasp_det_seg.models as models
from grasp_det_seg.algos.detection import (
PredictionGenerator,
ProposalMatcher,
DetectionLoss,
)
from grasp_det_seg.algos.fpn import DetectionAlgoFPN, RPNAlgoFPN
from grasp_det_seg.algos.rpn import AnchorMatcher, ProposalGenerator, RPNLoss
from grasp_det_seg.algos.semantic_seg import SemanticSegAlgo, SemanticSegLoss
from grasp_det_seg.config import load_config
from grasp_det_seg.data_OCID import iss_collate_fn, OCIDTestDataset, OCIDTestTransform
from grasp_det_seg.data_OCID.OCID_class_dict import colors_list
from grasp_det_seg.data_OCID.sampler import DistributedARBatchSampler
from grasp_det_seg.models.det_seg import DetSegNet
from grasp_det_seg.modules.fpn import FPN, FPNBody
from grasp_det_seg.modules.heads import RPNHead, FPNROIHead, FPNSemanticHeadDeeplab
from grasp_det_seg.utils import logging
from grasp_det_seg.utils.meters import AverageMeter
from grasp_det_seg.utils.misc import (
config_to_string,
scheduler_from_config,
norm_act_from_config,
freeze_params,
NORM_LAYERS,
OTHER_LAYERS,
)
from grasp_det_seg.utils.parallel import DistributedDataParallel
from grasp_det_seg.utils.snapshot import resume_from_snapshot
parser = argparse.ArgumentParser(
description="OCID detection and segmentation test script"
)
parser.add_argument("--local_rank", type=int)
parser.add_argument(
"--log_dir", type=str, default=".", help="Write logs to the given directory"
)
parser.add_argument(
"config", metavar="FILE", type=str, help="Path to configuration file"
)
parser.add_argument("model", metavar="FILE", type=str, help="Path to model file")
parser.add_argument("data", metavar="DIR", type=str, help="Path to dataset")
parser.add_argument("out_dir", metavar="DIR", type=str, help="Path to output directory")
def save_param_file(writer, param_file):
data_sum = ""
with open(param_file) as fp:
Lines = fp.readlines()
for line in Lines:
data_sum += line + " \n"
writer.add_text("dataset_parameters", data_sum)
return
def ensure_dir(dir_path):
try:
os.mkdir(dir_path)
except FileExistsError:
pass
def Rotate2D(pts, cnt, ang):
ang = np.deg2rad(ang)
return (
scipy.dot(
pts - cnt,
scipy.array(
[[scipy.cos(ang), scipy.sin(ang)], [-scipy.sin(ang), scipy.cos(ang)]]
),
)
+ cnt
)
def save_prediction_image(raw_pred, img_abs_path, img_root_path, im_size, out_dir):
num_classes_theta = 18
# grasp candidate confidence threshold
threshold = 0.06
iou_seg_threshold = 100 # in px
for i, (sem_pred, bbx_pred, cls_pred, obj_pred) in enumerate(
zip(
raw_pred["sem_pred"],
raw_pred["bbx_pred"],
raw_pred["cls_pred"],
raw_pred["obj_pred"],
)
):
item = os.path.join(img_root_path[i], img_abs_path[i])
im_size_ = im_size[i]
ensure_dir(out_dir)
seq_path, im_name = item.split(",")
sem_pred = np.asarray(sem_pred.detach().cpu().numpy(), dtype=np.uint8)
seg_mask_vis = np.zeros((im_size_[0], im_size_[1], 3))
cls_labels = np.unique(sem_pred)
img_path = os.path.join(img_root_path[i], seq_path, "rgb", im_name)
mask_path = os.path.join(
img_root_path[i], seq_path, "seg_mask_labeled_combi", im_name
)
img = cv2.imread(img_path)
img_best_boxes = np.copy(img)
mask_gt = cv2.imread(mask_path, cv2.IMREAD_UNCHANGED)
for cnt, label in enumerate(cls_labels):
if label == 0:
continue
seg_mask_vis[sem_pred == label] = colors_list[label]
mask_per_label = np.zeros_like(sem_pred)
mask_per_label_gt = np.zeros_like(sem_pred)
mask_per_label[sem_pred == label] = 1
mask_per_label_gt[mask_gt == label] = 1
if sum(map(sum, mask_per_label)) < iou_seg_threshold:
continue
ensure_dir(out_dir)
out_path = os.path.join(out_dir, im_name[:-4] + ".png")
img_mask = img * 0.25 + seg_mask_vis * 0.75
if bbx_pred is None:
continue
anno_per_class_dir = os.path.join(
os.path.join(
img_root_path[i], seq_path, "Annotations_per_class", im_name[:-4]
)
)
for class_dir in os.listdir(anno_per_class_dir):
if not os.path.isdir(os.path.join(anno_per_class_dir, class_dir)):
continue
best_confidence = 0.0
r_bbox_best = None
for bbx_pred_i, cls_pred_i, obj_pred_i in zip(bbx_pred, cls_pred, obj_pred):
if obj_pred_i.item() > threshold:
pt1 = (int(bbx_pred_i[0]), int(bbx_pred_i[1]))
pt2 = (int(bbx_pred_i[2]), int(bbx_pred_i[3]))
cls = cls_pred_i.item()
if cls > 17:
assert False
theta = ((180 / num_classes_theta) * cls) + 5
pts = scipy.array(
[
[pt1[0], pt1[1]],
[pt2[0], pt1[1]],
[pt2[0], pt2[1]],
[pt1[0], pt2[1]],
]
)
cnt = scipy.array(
[
(int(bbx_pred_i[0]) + int(bbx_pred_i[2])) / 2,
(int(bbx_pred_i[1]) + int(bbx_pred_i[3])) / 2,
]
)
r_bbox_ = Rotate2D(pts, cnt, 90 - theta)
r_bbox_ = r_bbox_.astype("int16")
if (int(cnt[1]) >= im_size_[0]) or (int(cnt[0]) >= im_size_[1]):
continue
if sem_pred[int(cnt[1]), int(cnt[0])] == int(class_dir):
if obj_pred_i.item() >= best_confidence:
best_confidence = obj_pred_i.item()
r_bbox_best = r_bbox_
if r_bbox_best is not None:
cv2.line(
img_best_boxes,
tuple(r_bbox_best[0]),
tuple(r_bbox_best[1]),
(255, 0, 0),
2,
)
cv2.line(
img_best_boxes,
tuple(r_bbox_best[1]),
tuple(r_bbox_best[2]),
(0, 0, 255),
2,
)
cv2.line(
img_best_boxes,
tuple(r_bbox_best[2]),
tuple(r_bbox_best[3]),
(255, 0, 0),
2,
)
cv2.line(
img_best_boxes,
tuple(r_bbox_best[3]),
tuple(r_bbox_best[0]),
(0, 0, 255),
2,
)
res = np.hstack((img, img_best_boxes, img_mask))
scale_percent = 75 # percent of original size
width = int(res.shape[1] * scale_percent / 100)
height = int(res.shape[0] * scale_percent / 100)
dim = (width, height)
# resize image
resized = cv2.resize(res, dim, interpolation=cv2.INTER_AREA)
cv2.imwrite(out_path, resized)
return
def log_debug(msg, *args, **kwargs):
if distributed.get_rank() == 0:
logging.get_logger().debug(msg, *args, **kwargs)
def log_info(msg, *args, **kwargs):
if distributed.get_rank() == 0:
logging.get_logger().info(msg, *args, **kwargs)
def make_config(args):
log_debug("Loading configuration from %s", args.config)
conf = load_config(args.config, args.config)
log_debug("\n%s", config_to_string(conf))
return conf
def make_dataloader(args, config, rank, world_size):
config = config["dataloader"]
log_debug("Creating dataloaders for dataset in %s", args.data)
# Validation dataloader
val_tf = OCIDTestTransform(
config.getint("shortest_size"),
config.getint("longest_max_size"),
config.getstruct("rgb_mean"),
config.getstruct("rgb_std"),
)
config["root_path"] = "/app/data/OCID_grasp"
val_db = OCIDTestDataset(args.data, config["root_path"], config["test_set"], val_tf)
val_sampler = DistributedARBatchSampler(
val_db, config.getint("val_batch_size"), world_size, rank, False
)
val_dl = data.DataLoader(
val_db,
batch_sampler=val_sampler,
collate_fn=iss_collate_fn,
pin_memory=True,
num_workers=config.getint("num_workers"),
)
return val_dl
def make_model(config):
body_config = config["body"]
fpn_config = config["fpn"]
rpn_config = config["rpn"]
roi_config = config["roi"]
sem_config = config["sem"]
general_config = config["general"]
classes = {
"total": int(general_config["num_things"]) + int(general_config["num_stuff"]),
"stuff": int(general_config["num_stuff"]),
"thing": int(general_config["num_things"]),
"semantic": int(general_config["num_semantic"]),
}
# BN + activation
norm_act_static, norm_act_dynamic = norm_act_from_config(body_config)
# Create backbone
log_debug("Creating backbone model %s", body_config["body"])
body_fn = models.__dict__["net_" + body_config["body"]]
body_params = (
body_config.getstruct("body_params") if body_config.get("body_params") else {}
)
body = body_fn(norm_act=norm_act_static, **body_params)
if body_config.get("weights"):
body_config["weights"] = "/app/data/weights/resnet101"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
body.load_state_dict(torch.load(body_config["weights"], map_location=device))
# Freeze parameters
for n, m in body.named_modules():
for mod_id in range(1, body_config.getint("num_frozen") + 1):
if ("mod%d" % mod_id) in n:
freeze_params(m)
body_channels = body_config.getstruct("out_channels")
# Create FPN
fpn_inputs = fpn_config.getstruct("inputs")
fpn = FPN(
[body_channels[inp] for inp in fpn_inputs],
fpn_config.getint("out_channels"),
fpn_config.getint("extra_scales"),
norm_act_static,
fpn_config["interpolation"],
)
body = FPNBody(body, fpn, fpn_inputs)
# Create RPN
proposal_generator = ProposalGenerator(
rpn_config.getfloat("nms_threshold"),
rpn_config.getint("num_pre_nms_train"),
rpn_config.getint("num_post_nms_train"),
rpn_config.getint("num_pre_nms_val"),
rpn_config.getint("num_post_nms_val"),
rpn_config.getint("min_size"),
)
anchor_matcher = AnchorMatcher(
rpn_config.getint("num_samples"),
rpn_config.getfloat("pos_ratio"),
rpn_config.getfloat("pos_threshold"),
rpn_config.getfloat("neg_threshold"),
rpn_config.getfloat("void_threshold"),
)
rpn_loss = RPNLoss(rpn_config.getfloat("sigma"))
rpn_algo = RPNAlgoFPN(
proposal_generator,
anchor_matcher,
rpn_loss,
rpn_config.getint("anchor_scale"),
rpn_config.getstruct("anchor_ratios"),
fpn_config.getstruct("out_strides"),
rpn_config.getint("fpn_min_level"),
rpn_config.getint("fpn_levels"),
)
rpn_head = RPNHead(
fpn_config.getint("out_channels"),
len(rpn_config.getstruct("anchor_ratios")),
1,
rpn_config.getint("hidden_channels"),
norm_act_dynamic,
)
# Create detection network
prediction_generator = PredictionGenerator(
roi_config.getfloat("nms_threshold"),
roi_config.getfloat("score_threshold"),
roi_config.getint("max_predictions"),
)
proposal_matcher = ProposalMatcher(
classes,
roi_config.getint("num_samples"),
roi_config.getfloat("pos_ratio"),
roi_config.getfloat("pos_threshold"),
roi_config.getfloat("neg_threshold_hi"),
roi_config.getfloat("neg_threshold_lo"),
roi_config.getfloat("void_threshold"),
)
roi_loss = DetectionLoss(roi_config.getfloat("sigma"))
roi_size = roi_config.getstruct("roi_size")
roi_algo = DetectionAlgoFPN(
prediction_generator,
proposal_matcher,
roi_loss,
classes,
roi_config.getstruct("bbx_reg_weights"),
roi_config.getint("fpn_canonical_scale"),
roi_config.getint("fpn_canonical_level"),
roi_size,
roi_config.getint("fpn_min_level"),
roi_config.getint("fpn_levels"),
)
roi_head = FPNROIHead(
fpn_config.getint("out_channels"), classes, roi_size, norm_act=norm_act_dynamic
)
# Create semantic segmentation network
sem_loss = SemanticSegLoss(ohem=sem_config.getfloat("ohem"))
sem_algo = SemanticSegAlgo(sem_loss, classes["semantic"])
sem_head = FPNSemanticHeadDeeplab(
fpn_config.getint("out_channels"),
sem_config.getint("fpn_min_level"),
sem_config.getint("fpn_levels"),
classes["semantic"],
pooling_size=sem_config.getstruct("pooling_size"),
norm_act=norm_act_static,
)
# Create final network
return DetSegNet(
body, rpn_head, roi_head, sem_head, rpn_algo, roi_algo, sem_algo, classes
)
def make_optimizer(config, model, epoch_length):
body_config = config["body"]
opt_config = config["optimizer"]
sch_config = config["scheduler"]
# Gather parameters from the network
norm_parameters = []
other_parameters = []
for m in model.modules():
if any(isinstance(m, layer) for layer in NORM_LAYERS):
norm_parameters += [p for p in m.parameters() if p.requires_grad]
elif any(isinstance(m, layer) for layer in OTHER_LAYERS):
other_parameters += [p for p in m.parameters() if p.requires_grad]
assert len(norm_parameters) + len(other_parameters) == len(
[p for p in model.parameters() if p.requires_grad]
), "Not all parameters that require grad are accounted for in the optimizer"
# Set-up optimizer hyper-parameters
parameters = [
{
"params": norm_parameters,
"lr": opt_config.getfloat("lr")
if not body_config.getboolean("bn_frozen")
else 0.0,
"weight_decay": opt_config.getfloat("weight_decay")
if opt_config.getboolean("weight_decay_norm")
else 0.0,
},
{
"params": other_parameters,
"lr": opt_config.getfloat("lr"),
"weight_decay": opt_config.getfloat("weight_decay"),
},
]
optimizer = optim.SGD(
parameters,
momentum=opt_config.getfloat("momentum"),
nesterov=opt_config.getboolean("nesterov"),
)
scheduler = scheduler_from_config(sch_config, optimizer, epoch_length)
assert sch_config["update_mode"] in ("batch", "epoch")
batch_update = sch_config["update_mode"] == "batch"
total_epochs = sch_config.getint("epochs")
return optimizer, scheduler, batch_update, total_epochs
def test(model, dataloader, **varargs):
model.eval()
dataloader.batch_sampler.set_epoch(0)
data_time_meter = AverageMeter(())
batch_time_meter = AverageMeter(())
data_time = time.time()
for it, batch in enumerate(dataloader):
print("Batch no. : " + str(it))
with torch.no_grad():
# Extract data
img = batch["img"].cuda(device=varargs["device"], non_blocking=True)
# img = batch["img"]
abs_paths = batch["abs_path"]
root_paths = batch["root_path"]
im_size = batch["im_size"]
data_time_meter.update(torch.tensor(time.time() - data_time))
batch_time = time.time()
# Run network
_, pred, conf = model(img=img, do_loss=False, do_prediction=True)
# Update meters
batch_time_meter.update(torch.tensor(time.time() - batch_time))
varargs["save_function"](pred, abs_paths, root_paths, im_size)
data_time = time.time()
def test_model(model, img):
# img may be limited to 640x480
model.eval()
with torch.no_grad():
# Run network
_, pred, conf = model(img=img, do_loss=False, do_prediction=True)
def main(args):
# Adjust backend based on CUDA availability
backend = "nccl" if torch.cuda.is_available() else "gloo"
distributed.init_process_group(backend=backend, init_method="env://")
if torch.cuda.is_available():
device_id, device = args.local_rank, torch.device(f"cuda:{args.local_rank}")
torch.cuda.set_device(device_id)
else:
device_id, device = None, torch.device("cpu")
rank, world_size = distributed.get_rank(), distributed.get_world_size()
# Load configuration
config = make_config(args)
# Create dataloaders
test_dataloader = make_dataloader(args, config, rank, world_size)
# Create model
model = make_model(config)
log_debug("Loading snapshot from %s", args.model)
resume_from_snapshot(
model, args.model, ["body", "rpn_head", "roi_head", "sem_head"]
)
# Initialize GPU specific settings if a GPU is available
if torch.cuda.is_available():
torch.backends.cudnn.benchmark = config["general"].getboolean("cudnn_benchmark")
if torch.cuda.is_available() and "cuda" in device.type:
model = model.cuda(device)
model = DistributedDataParallel(
model,
device_ids=[device_id],
output_device=device_id,
find_unused_parameters=True,
)
else:
# Adjust the model for CPU-based distributed computing if necessary
# Note: You might need to adjust this part based on your specific needs and setup
model = DistributedDataParallel(
model, device_ids=None, output_device=None, find_unused_parameters=True
)
save_function = partial(save_prediction_image, out_dir=args.out_dir)
test(
model,
test_dataloader,
device=device,
summary=None,
log_interval=config["general"].getint("log_interval"),
save_function=save_function,
)
if __name__ == "__main__":
main(parser.parse_args())
| 18,729 | Python | 32.808664 | 89 | 0.576966 |
AshisGhosh/roboai/grasping/grasp_server/grasp_server/grasp_det_seg.py | import numpy as np
import torch
from PIL import Image
import cv2
import scipy
import copy
import grasp_det_seg.models as models
from grasp_det_seg.modules.fpn import FPN, FPNBody
from grasp_det_seg.algos.rpn import ProposalGenerator, AnchorMatcher, RPNLoss
from grasp_det_seg.algos.fpn import RPNAlgoFPN, DetectionAlgoFPN
from grasp_det_seg.modules.heads import RPNHead, FPNROIHead, FPNSemanticHeadDeeplab
from grasp_det_seg.algos.detection import (
PredictionGenerator,
ProposalMatcher,
DetectionLoss,
)
from grasp_det_seg.algos.semantic_seg import SemanticSegLoss, SemanticSegAlgo
from grasp_det_seg.models.det_seg import DetSegNet
from grasp_det_seg.config import load_config
from grasp_det_seg.utils.misc import (
config_to_string,
norm_act_from_config,
freeze_params,
)
from grasp_det_seg.data_OCID import OCIDTestTransform
from grasp_det_seg.utils.parallel import PackedSequence
from grasp_det_seg.data_OCID.OCID_class_dict import cls_list, colors_list
from grasp_det_seg.utils.snapshot import resume_from_snapshot
import logging
def log_debug(msg, *args):
logging.getLogger().debug(msg, *args)
def log_info(msg, *args):
logging.getLogger().info(msg, *args)
def Rotate2D(pts, cnt, ang):
ang = np.deg2rad(ang)
return (
scipy.dot(
pts - cnt,
scipy.array(
[[scipy.cos(ang), scipy.sin(ang)], [-scipy.sin(ang), scipy.cos(ang)]]
),
)
+ cnt
)
def make_config(config_path):
log_debug("Loading configuration from %s", config_path)
conf = load_config(config_path, config_path)
log_debug("\n%s", config_to_string(conf))
return conf
def make_model(config):
body_config = config["body"]
fpn_config = config["fpn"]
rpn_config = config["rpn"]
roi_config = config["roi"]
sem_config = config["sem"]
general_config = config["general"]
classes = {
"total": int(general_config["num_things"]) + int(general_config["num_stuff"]),
"stuff": int(general_config["num_stuff"]),
"thing": int(general_config["num_things"]),
"semantic": int(general_config["num_semantic"]),
}
# BN + activation
norm_act_static, norm_act_dynamic = norm_act_from_config(body_config)
# Create backbone
log_debug("Creating backbone model %s", body_config["body"])
# body_fn = models.__dict__["net_" + body_config["body"]]
body_fn = models.__dict__["net_resnet101"]
body_params = (
body_config.getstruct("body_params") if body_config.get("body_params") else {}
)
body = body_fn(norm_act=norm_act_static, **body_params)
# if body_config.get("weights"):
# body_config["weights"] = "/app/data/weights/resnet101"
# body.load_state_dict(torch.load(body_config["weights"], map_location="cpu"))
weights_path = "/app/data/weights/resnet101"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
body.load_state_dict(torch.load(weights_path, map_location=device))
# Freeze parameters
for n, m in body.named_modules():
for mod_id in range(1, body_config.getint("num_frozen") + 1):
if ("mod%d" % mod_id) in n:
freeze_params(m)
body_channels = body_config.getstruct("out_channels")
# Create FPN
fpn_inputs = fpn_config.getstruct("inputs")
fpn = FPN(
[body_channels[inp] for inp in fpn_inputs],
fpn_config.getint("out_channels"),
fpn_config.getint("extra_scales"),
norm_act_static,
fpn_config["interpolation"],
)
body = FPNBody(body, fpn, fpn_inputs)
# Create RPN
proposal_generator = ProposalGenerator(
rpn_config.getfloat("nms_threshold"),
rpn_config.getint("num_pre_nms_train"),
rpn_config.getint("num_post_nms_train"),
rpn_config.getint("num_pre_nms_val"),
rpn_config.getint("num_post_nms_val"),
rpn_config.getint("min_size"),
)
anchor_matcher = AnchorMatcher(
rpn_config.getint("num_samples"),
rpn_config.getfloat("pos_ratio"),
rpn_config.getfloat("pos_threshold"),
rpn_config.getfloat("neg_threshold"),
rpn_config.getfloat("void_threshold"),
)
rpn_loss = RPNLoss(rpn_config.getfloat("sigma"))
rpn_algo = RPNAlgoFPN(
proposal_generator,
anchor_matcher,
rpn_loss,
rpn_config.getint("anchor_scale"),
rpn_config.getstruct("anchor_ratios"),
fpn_config.getstruct("out_strides"),
rpn_config.getint("fpn_min_level"),
rpn_config.getint("fpn_levels"),
)
rpn_head = RPNHead(
fpn_config.getint("out_channels"),
len(rpn_config.getstruct("anchor_ratios")),
1,
rpn_config.getint("hidden_channels"),
norm_act_dynamic,
)
# Create detection network
prediction_generator = PredictionGenerator(
roi_config.getfloat("nms_threshold"),
roi_config.getfloat("score_threshold"),
roi_config.getint("max_predictions"),
)
proposal_matcher = ProposalMatcher(
classes,
roi_config.getint("num_samples"),
roi_config.getfloat("pos_ratio"),
roi_config.getfloat("pos_threshold"),
roi_config.getfloat("neg_threshold_hi"),
roi_config.getfloat("neg_threshold_lo"),
roi_config.getfloat("void_threshold"),
)
roi_loss = DetectionLoss(roi_config.getfloat("sigma"))
roi_size = roi_config.getstruct("roi_size")
roi_algo = DetectionAlgoFPN(
prediction_generator,
proposal_matcher,
roi_loss,
classes,
roi_config.getstruct("bbx_reg_weights"),
roi_config.getint("fpn_canonical_scale"),
roi_config.getint("fpn_canonical_level"),
roi_size,
roi_config.getint("fpn_min_level"),
roi_config.getint("fpn_levels"),
)
roi_head = FPNROIHead(
fpn_config.getint("out_channels"), classes, roi_size, norm_act=norm_act_dynamic
)
# Create semantic segmentation network
sem_loss = SemanticSegLoss(ohem=sem_config.getfloat("ohem"))
sem_algo = SemanticSegAlgo(sem_loss, classes["semantic"])
sem_head = FPNSemanticHeadDeeplab(
fpn_config.getint("out_channels"),
sem_config.getint("fpn_min_level"),
sem_config.getint("fpn_levels"),
classes["semantic"],
pooling_size=sem_config.getstruct("pooling_size"),
norm_act=norm_act_static,
)
# Create final network
return DetSegNet(
body, rpn_head, roi_head, sem_head, rpn_algo, roi_algo, sem_algo, classes
)
def test(model, img, visualize=True, **varargs):
model.eval()
shortest_size = 480
longest_max_size = 640
rgb_mean = (0.485, 0.456, 0.406)
rgb_std = (0.229, 0.224, 0.225)
preprocess = OCIDTestTransform(
shortest_size=shortest_size,
longest_max_size=longest_max_size,
rgb_mean=rgb_mean,
rgb_std=rgb_std,
)
img_tensor, im_size = preprocess(img)
with torch.no_grad():
# Extract data
packed_img = PackedSequence(img_tensor)
print(packed_img[0].shape)
# exit()
# Run network
_, pred, conf = model(img=packed_img, do_loss=False, do_prediction=True)
# Update meters
res = output_pred(pred, img, im_size, visualize)
return res
def output_pred(raw_pred, img, im_size_, visualize):
# https://github.com/stefan-ainetter/grasp_det_seg_cnn/blob/main/grasp_det_seg/data_OCID/OCID_class_dict.py
# ^ class_list and color_list
output = []
for i, (sem_pred, bbx_pred, cls_pred, obj_pred) in enumerate(
zip(
raw_pred["sem_pred"],
raw_pred["bbx_pred"],
raw_pred["cls_pred"],
raw_pred["obj_pred"],
)
):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
sem_pred = sem_pred.to(device)
sem_pred = np.asarray(sem_pred.detach().cpu().numpy(), dtype=np.uint8)
# print(f"sem_pred: {sem_pred.shape}")
# print(f"bbx_pred: {bbx_pred.shape}")
# print(f"cls_pred: {cls_pred.shape}")
# print(f"obj_pred: {obj_pred.shape}")
seg_mask_vis = np.zeros((im_size_[0], im_size_[1], 3))
cls_labels = np.unique(sem_pred)
for cls in cls_labels:
seg_mask_vis[sem_pred == cls] = colors_list[cls]
mask_per_label = np.zeros_like(sem_pred)
mask_per_label[sem_pred == cls] = 1
iou_seg = np.sum(mask_per_label)
if iou_seg < 100:
continue
# cv2.imshow(f"Mask {cls_list[cls]}", mask_per_label.astype(np.uint8)*255)
# cv2.waitKey(0)
print(f"{cls_list[cls]} {sum(map(sum,mask_per_label))}")
# mask_per_label = mask_per_label.astype(np.uint8) * 255
try:
img_mask = img * 0.25 + seg_mask_vis * 0.75
except ValueError as e:
log_debug(f"Error: {e}")
img_mask = seg_mask_vis
img_mask = img_mask.astype(np.uint8) * 255
for cls in cls_labels:
if cls == 0:
continue
best_confidence = 0
bbox_best = None
r_bbox_best = None
print(f"Getting best for cls: {cls} {cls_list[cls]}")
for bbx_pred_i, cls_pred_i, obj_pred_i in zip(bbx_pred, cls_pred, obj_pred):
threshold = 0.06
cnt = np.array(
[
(int(bbx_pred_i[0]) + int(bbx_pred_i[2])) / 2,
(int(bbx_pred_i[1]) + int(bbx_pred_i[3])) / 2,
]
)
if (int(cnt[1]) >= im_size_[0]) or (int(cnt[0]) >= im_size_[1]):
continue
actual_class = sem_pred[int(cnt[1]), int(cnt[0])]
if actual_class != cls:
continue
if obj_pred_i.item() > threshold:
# print(f"obj_pred_i: {obj_pred_i.item()}")
# print(f"cls_pred_i: {cls_pred_i} {cls_list[cls_pred_i.item()]}")
# print(f"bbx_pred_i: {bbx_pred_i}")
pt1 = (int(bbx_pred_i[0]), int(bbx_pred_i[1]))
pt2 = (int(bbx_pred_i[2]), int(bbx_pred_i[3]))
newcls = cls_pred_i.item()
if newcls > 17:
assert False
num_classes_theta = 18
# theta = ((180 / num_classes_theta) * newcls) + 5 # 5 degrees offset?
theta = (180 / num_classes_theta) * newcls
pts = np.array(
[
[pt1[0], pt1[1]],
[pt2[0], pt1[1]],
[pt2[0], pt2[1]],
[pt1[0], pt2[1]],
]
)
cnt = np.array(
[
(int(bbx_pred_i[0]) + int(bbx_pred_i[2])) / 2,
(int(bbx_pred_i[1]) + int(bbx_pred_i[3])) / 2,
]
)
r_bbox_ = Rotate2D(pts, cnt, 90 - theta)
r_bbox_ = r_bbox_.astype("int16")
# print(f"r_bbox_: {r_bbox_}")
# if (int(cnt[1]) >= im_size_[0]) or (int(cnt[0]) >= im_size_[1]):
# continue
# filter out gripper - any result with the center in the bottom 100 pixels
# TODO: find a better solution
if cnt[1] > im_size_[0] - 100:
continue
# if sem_pred[int(cnt[1]), int(cnt[0])] == cls:
# print(f"Seg class: {cls_list[sem_pred[int(cnt[1]), int(cnt[0])]]}")
if obj_pred_i.item() >= best_confidence:
best_confidence = obj_pred_i.item()
bbox_best = bbx_pred_i
r_bbox_best = copy.deepcopy(r_bbox_)
if bbox_best is not None:
res = {
"cls": cls,
"obj": best_confidence,
"bbox": bbox_best,
"r_bbox": r_bbox_best,
}
cnt = np.array(
[
(int(bbox_best[0]) + int(bbox_best[2])) / 2,
(int(bbox_best[1]) + int(bbox_best[3])) / 2,
]
)
print(
f"res {cls_list[cls]} | {cls_list[sem_pred[int(cnt[1]), int(cnt[0])]]}: {res}"
)
output.append(res)
pt1 = (int(bbox_best[0]), int(bbox_best[1]))
pt2 = (int(bbox_best[2]), int(bbox_best[3]))
# cv2.rectangle(img, pt1, pt2, (0, 255, 0), 2)
cv2.putText(
img_mask,
cls_list[cls],
(int(bbox_best[0]), int(bbox_best[1])),
cv2.FONT_HERSHEY_SIMPLEX,
1,
(0, 255, 0),
2,
cv2.LINE_AA,
)
if r_bbox_best is not None:
cv2.line(
img_mask,
tuple(r_bbox_best[0]),
tuple(r_bbox_best[1]),
(255, 0, 0),
2,
)
cv2.line(
img_mask,
tuple(r_bbox_best[1]),
tuple(r_bbox_best[2]),
(0, 0, 255),
2,
)
cv2.line(
img_mask,
tuple(r_bbox_best[2]),
tuple(r_bbox_best[3]),
(255, 0, 0),
2,
)
cv2.line(
img_mask,
tuple(r_bbox_best[3]),
tuple(r_bbox_best[0]),
(0, 0, 255),
2,
)
# print(f"output: {output}")
# img_mask = (img * 0.25 + seg_mask_vis * 0.75)
# img_mask = img_mask.astype(np.uint8)*255
if visualize:
cv2.imshow("Image Mask", img_mask)
cv2.waitKey(0)
return output, img_mask
class GraspServer:
def __init__(self):
config_path = "/app/data/config/test.ini"
print(f"Loading configuration from {config_path}")
config = make_config(config_path)
print("Creating model...")
self.model = make_model(config)
weights_path = "/app/data/weights/model_last.pth.tar"
log_debug("Loading snapshot from %s", weights_path)
resume_from_snapshot(
self.model, weights_path, ["body", "rpn_head", "roi_head", "sem_head"]
)
self.visualize = False
def detect(self, img):
res, img = test(self.model, img, visualize=self.visualize)
# Convert to JSON serializable format
res_dict = []
for r in res:
res_dict.append(
{
"cls": int(r["cls"]),
"cls_name": cls_list[int(r["cls"])],
"obj": r["obj"],
"bbox": r["bbox"].tolist(),
"r_bbox": r["r_bbox"].tolist(),
}
)
return res_dict, Image.fromarray(img)
def detect_from_path(self, img_path):
img_bgr = cv2.imread(img_path)
img_rgb = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2RGB)
return self.detect(img_rgb)
def test_detect(self):
return self.detect_from_path(
"/app/data/OCID_grasp/ARID20/table/top/seq08/rgb/result_2018-08-21-14-44-31.png"
)
if __name__ == "__main__":
print("Testing Grasp_Det_Seg")
config_path = "/app/data/config/test.ini"
print(f"Loading configuration from {config_path}")
config = make_config(config_path)
print("Creating model...")
model = make_model(config)
weights_path = "/app/data/weights/model_last.pth.tar"
log_debug("Loading snapshot from %s", weights_path)
snapshot = resume_from_snapshot(
model, weights_path, ["body", "rpn_head", "roi_head", "sem_head"]
)
# rank, world_size = distributed.get_rank(), distributed.get_world_size()
# model = DistributedDataParallel(model, device_ids=None, output_device=None, find_unused_parameters=True)
print("Loading image...")
# img_path = "/app/data/OCID_grasp/ARID20/table/top/seq12/rgb/result_2018-08-21-16-53-16.png"
# img_path = "/app/data/OCID_grasp/ARID20/table/top/seq04/rgb/result_2018-08-21-12-13-01.png"
# img_path="/app/data/OCID_grasp/ARID20/table/top/seq08/rgb/result_2018-08-21-14-44-31.png"
img_path = "/app/data/test.png"
img_bgr = cv2.imread(img_path)
img_rgb = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2RGB)
# cv2.imshow("Image", img_rgb)
# cv2.waitKey(0)
print("Testing model...")
test(model, img_rgb)
| 17,224 | Python | 34.010163 | 111 | 0.521598 |
AshisGhosh/roboai/grasping/robotic_grasping_server/app/main.py | from PIL import Image
from fastapi import FastAPI, File, UploadFile
from fastapi.middleware.cors import CORSMiddleware
from robotic_grasping_server.robotic_grasping import GraspGenerator
import logging
log = logging.getLogger("robotic_grasping_server app")
log.setLevel(logging.INFO)
app = FastAPI()
grasp = GraspGenerator(visualize=True)
# List of allowed origins (you can use '*' to allow all origins)
origins = [
"http://localhost:3000", # Allow your Next.js app
# Add any other origins as needed
]
# Add CORSMiddleware to the application
app.add_middleware(
CORSMiddleware,
allow_origins=origins, # List of allowed origins
allow_credentials=True,
allow_methods=["*"], # Allow all methods
allow_headers=["*"], # Allow all headers
)
@app.get("/")
async def read_root():
return {"message": "Hello, FastAPI! This is the robotic grasping server."}
@app.on_event("startup")
async def startup_event():
log.info("Starting up the grasp server...")
grasp.load_model()
@app.post("/get_grasps")
async def get_grasps(
rgb_image: UploadFile = File(...), depth_image: UploadFile = File(...)
):
log.debug("Received get_grasp request.")
rgb_image = Image.open(rgb_image.file)
depth_image = Image.open(depth_image.file)
return grasp.run(rgb_image, depth_image)
| 1,325 | Python | 24.5 | 78 | 0.704906 |
AshisGhosh/roboai/grasping/robotic_grasping_server/robotic_grasping_server/robotic_grasping.py | import matplotlib.pyplot as plt
import numpy as np
import torch
from hardware.device import get_device
from inference.post_process import post_process_output
from utils.data.camera_data import CameraData
from utils.dataset_processing.grasp import detect_grasps
from utils.visualisation.plot import plot_grasp
from PIL import Image
class GraspGenerator:
def __init__(
self,
saved_model_path="/robotic-grasping/trained-models/cornell-randsplit-rgbd-grconvnet3-drop1-ch32/epoch_19_iou_0.98",
visualize=False,
force_cpu=False,
):
self.saved_model_path = saved_model_path
self.saved_model_path = saved_model_path
self.model = None
self.device = get_device(force_cpu=force_cpu)
self.cam_data = CameraData(
include_depth=True, include_rgb=True, output_size=360
)
if visualize:
self.fig = plt.figure(figsize=(10, 10))
else:
self.fig = None
def load_model(self):
# monkey patching
np.float = float
print("Loading model... ")
self.model = torch.load(self.saved_model_path, map_location=self.device)
self.model.to(self.device) # Ensure model parameters are on the correct device
def generate(self, rgb, depth):
x, depth_img, rgb_img = self.cam_data.get_data(rgb=rgb, depth=depth)
# Predict the grasp pose using the saved model
with torch.no_grad():
xc = x.to(self.device)
pred = self.model.predict(xc)
q_img, ang_img, width_img = post_process_output(
pred["pos"], pred["cos"], pred["sin"], pred["width"]
)
grasps = detect_grasps(q_img, ang_img, width_img)
for grasp in grasps:
print(grasp.as_gr)
if self.fig:
plot_grasp(
fig=self.fig,
rgb_img=self.cam_data.get_rgb(rgb, False),
grasps=grasps,
save=True,
)
return grasps
def run_test(self):
rgb = Image.open("shared/data/test_pair1_rgb.png")
rgb = np.array(rgb)
print(rgb.shape)
depth = Image.open("shared/data/test_pair1_depth.png")
depth = np.array(depth)
depth = np.expand_dims(depth, axis=2)
print(depth.shape)
self.generate(rgb, depth)
def run(self, rgb, depth):
rgb = np.array(rgb)
depth = np.array(depth)
depth = np.expand_dims(depth, axis=2)
grasps = self.generate(rgb, depth)
grasp_dict = []
print(grasps[0].as_gr)
for grasp in grasps:
r_bbox = [[pt[0], pt[1]] for pt in grasp.as_gr.points]
grasp_dict.append({"r_bbox": r_bbox})
return grasp_dict
if __name__ == "__main__":
np.float = float
generator = GraspGenerator(
saved_model_path="/robotic-grasping/trained-models/cornell-randsplit-rgbd-grconvnet3-drop1-ch32/epoch_19_iou_0.98",
visualize=True,
force_cpu=False,
)
generator.load_model()
generator.run_test()
| 3,082 | Python | 28.932039 | 123 | 0.594744 |
AshisGhosh/roboai/isaac_sim/pyproject.toml | [tool.poetry]
name = "isaac-sim"
version = "0.1.0"
description = ""
authors = ["Ashis Ghosh <[email protected]>"]
readme = "README.md"
[tool.poetry.dependencies]
python = "^3.10"
fastapi = "^0.110.2"
uvicorn = "^0.29.0"
python-multipart = "^0.0.9"
[build-system]
requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api"
| 337 | TOML | 17.777777 | 47 | 0.664688 |
AshisGhosh/roboai/isaac_sim/README.md | # README
## Launch Docker
`docker compose up isaac-sim`
Enter docker:
`docker exec -it roboai-isaac-sim-1 bash`
## Run pthon standalone (will launch sim as well)
`./python.sh roboai/robosim.py`
## Run jupyter
** requires local Nucleus server **
https://docs.omniverse.nvidia.com/nucleus/latest/workstation.html
`./jupyter_notebook.sh --allow-root roboai/test_nb.`
## Isaac Slow Loading Issue (v2023.1.0)
https://github.com/NVIDIA-Omniverse/OmniIsaacGymEnvs/issues/92#issuecomment-1797057491
```
def check_server(server: str, path: str) -> bool:
"""Check a specific server for a path
Args:
server (str): Name of Nucleus server
path (str): Path to search
Returns:
bool: True if folder is found
"""
carb.log_info("Checking path: {}{}".format(server, path))
# Increase hang detection timeout
if "localhost" not in server:
omni.client.set_hang_detection_time_ms(10000)
result, _ = omni.client.stat("{}{}".format(server, path))
if result == Result.OK:
carb.log_info("Success: {}{}".format(server, path))
return True
carb.log_info("Failure: {}{} not accessible".format(server, path))
return False
```
to:
```
def check_server(server: str, path: str, timeout: float = 10.0) -> bool:
"""Check a specific server for a path
Args:
server (str): Name of Nucleus server
path (str): Path to search
timeout (float): Default value: 10 seconds
Returns:
bool: True if folder is found
"""
carb.log_info("Checking path: {}{}".format(server, path))
# Increase hang detection timeout
if "localhost" not in server:
omni.client.set_hang_detection_time_ms(20000)
result, _ = omni.client.stat("{}{}".format(server, path))
if result == Result.OK:
carb.log_info("Success: {}{}".format(server, path))
return True
carb.log_info("Failure: {}{} not accessible".format(server, path))
return False
```
| 2,007 | Markdown | 27.685714 | 86 | 0.633284 |
AshisGhosh/roboai/isaac_sim/app/main.py | import nest_asyncio
import logging
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from roboai.robosim import SimManager
from roboai.standalone_stream_server import StreamServer
nest_asyncio.apply()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
# Create FastAPI instance
app = FastAPI()
robosim = SimManager()
ss = StreamServer()
# List of allowed origins (you can use '*' to allow all origins)
origins = [
"http://localhost:3000", # Allow your Next.js app
# Add any other origins as needed
]
# Add CORSMiddleware to the application
app.add_middleware(
CORSMiddleware,
allow_origins=origins, # List of allowed origins
allow_credentials=True,
allow_methods=["*"], # Allow all methods
allow_headers=["*"], # Allow all headers
)
# Example route
@app.get("/")
async def read_root():
return {"message": "Hello, FastAPI! This is the robosim server."}
@app.on_event("startup")
def startup_event():
pass
@app.post("/test")
async def test():
ss.start()
return True
@app.post("/start_sim")
async def start_sim():
# threading.Thread(target=robosim.start_sim).start()
robosim.start_sim(headless=True)
return True
@app.post("/run_sim")
async def run_sim():
return robosim.run_sim()
@app.post("/close_sim")
async def close_sim():
return await robosim.close_sim()
| 1,389 | Python | 19.144927 | 69 | 0.698344 |
AshisGhosh/roboai/isaac_sim/omniverse_patch/nucleus.py | # Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import asyncio
import json
import os
# python
import typing
from collections import namedtuple
from urllib.parse import urlparse
import carb
# omniverse
import omni.client
from omni.client._omniclient import CopyBehavior, Result
from omni.isaac.version import get_version
class Version(namedtuple("Version", "major minor patch")):
def __new__(cls, s):
return super().__new__(cls, *map(int, s.split(".")))
def __repr__(self):
return ".".join(map(str, self))
def get_url_root(url: str) -> str:
"""Get root from URL or path
Args:
url (str): full http or omniverse path
Returns:
str: Root path or URL or Nucleus server
"""
supported_list = ["omniverse", "http", "https"]
protocol = urlparse(url).scheme
if protocol not in supported_list:
carb.log_warn("Unable to find root for {}".format(url))
return ""
server = f"{protocol}://{urlparse(url).netloc}"
return server
def create_folder(server: str, path: str) -> bool:
"""Create a folder on server
Args:
server (str): Name of Nucleus server
path (str): Path to folder
Returns:
bool: True if folder is created successfully
"""
carb.log_info("Create {} folder on {} Server".format(path, server))
# Increase hang detection timeout
omni.client.set_hang_detection_time_ms(10000)
result = omni.client.create_folder("{}{}".format(server, path))
if result == Result.OK:
carb.log_info("Success: {} Server has {} folder created".format(server, path))
return True
else:
carb.log_warn(
"Failure: Server {} not able to create {} folder".format(server, path)
)
return False
def delete_folder(server: str, path: str) -> bool:
"""Remove folder and all of its contents
Args:
server (str): Name of Nucleus server
path (str): Path to folder
Returns:
bool: True if folder is deleted successfully
"""
carb.log_info("Cleanup {} folder on {} Server".format(path, server))
# Increase hang detection timeout
omni.client.set_hang_detection_time_ms(10000)
result = omni.client.delete("{}{}".format(server, path))
if result == Result.OK:
carb.log_info("Success: {} Server has {} folder deleted".format(server, path))
return True
else:
carb.log_warn(
"Failure: Server {} not able to delete {} folder".format(server, path)
)
return False
async def _list_files(url: str) -> typing.Tuple[str, typing.List]:
"""List files under a URL
Args:
url (str): URL of Nucleus server with path to folder
Returns:
root (str): Root of URL of Nucleus server
paths (typing.List): List of path to each file
"""
root, paths = await _collect_files(url)
return root, paths
async def download_assets_async(
src: str,
dst: str,
progress_callback,
concurrency: int = 10,
copy_behaviour: omni.client._omniclient.CopyBehavior = CopyBehavior.OVERWRITE,
copy_after_delete: bool = True,
timeout: float = 300.0,
) -> omni.client._omniclient.Result:
"""Download assets from S3 bucket
Args:
src (str): URL of S3 bucket as source
dst (str): URL of Nucleus server to copy assets to
progress_callback: Callback function to keep track of progress of copy
concurrency (int): Number of concurrent copy operations. Default value: 3
copy_behaviour (omni.client._omniclient.CopyBehavior): Behavior if the destination exists. Default value: OVERWRITE
copy_after_delete (bool): True if destination needs to be deleted before a copy. Default value: True
timeout (float): Default value: 300 seconds
Returns:
Result (omni.client._omniclient.Result): Result of copy
"""
# omni.client is a singleton, import locally to allow to run with multiprocessing
import omni.client
count = 0
result = Result.ERROR
if copy_after_delete and check_server(dst, ""):
carb.log_info("Deleting existing folder {}".format(dst))
delete_folder(dst, "")
sem = asyncio.Semaphore(concurrency)
carb.log_info("Listing {} ...".format(src))
root_source, paths = await _list_files("{}".format(src))
total = len(paths)
carb.log_info("Found {} files from {}".format(total, root_source))
for entry in reversed(paths):
count += 1
path = os.path.relpath(entry, root_source).replace("\\", "/")
carb.log_info(
"Downloading asset {} of {} from {}/{} to {}/{}".format(
count, total, root_source, path, dst, path
)
)
try:
async with sem:
result = await asyncio.wait_for(
omni.client.copy_async(
"{}/{}".format(root_source, path),
"{}/{}".format(dst, path),
copy_behaviour,
),
timeout=timeout,
)
if result != Result.OK:
carb.log_warn(f"Failed to copy {path} to {dst}.")
return Result.ERROR_ACCESS_LOST
except asyncio.CancelledError:
carb.log_warn("Assets download cancelled.")
return Result.ERROR
except Exception as ex:
carb.log_warn(f"Exception: {type(ex).__name__}")
return Result.ERROR
progress_callback(count, total)
return result
def check_server(server: str, path: str, timeout: float = 10.0) -> bool:
"""Check a specific server for a path
Args:
server (str): Name of Nucleus server
path (str): Path to search
timeout (float): Default value: 10 seconds
Returns:
bool: True if folder is found
"""
carb.log_info("Checking path: {}{}".format(server, path))
# Increase hang detection timeout
if "localhost" not in server:
omni.client.set_hang_detection_time_ms(20000)
result, _ = omni.client.stat("{}{}".format(server, path))
if result == Result.OK:
carb.log_info("Success: {}{}".format(server, path))
return True
carb.log_info("Failure: {}{} not accessible".format(server, path))
return False
async def check_server_async(server: str, path: str, timeout: float = 10.0) -> bool:
"""Check a specific server for a path (asynchronous version).
Args:
server (str): Name of Nucleus server
path (str): Path to search
timeout (float): Default value: 10 seconds
Returns:
bool: True if folder is found
"""
carb.log_info("Checking path: {}{}".format(server, path))
try:
result, _ = await asyncio.wait_for(
omni.client.stat_async("{}{}".format(server, path)), timeout
)
if result == Result.OK:
carb.log_info("Success: {}{}".format(server, path))
return True
else:
carb.log_info("Failure: {}{} not accessible".format(server, path))
return False
except asyncio.TimeoutError:
carb.log_warn(f"check_server_async() timeout {timeout}")
return False
except Exception as ex:
carb.log_warn(f"Exception: {type(ex).__name__}")
return False
def build_server_list() -> typing.List:
"""Return list with all known servers to check
Returns:
all_servers (typing.List): List of servers found
"""
mounted_drives = carb.settings.get_settings().get_settings_dictionary(
"/persistent/app/omniverse/mountedDrives"
)
all_servers = []
if mounted_drives is not None:
mounted_dict = json.loads(mounted_drives.get_dict())
for drive in mounted_dict.items():
all_servers.append(drive[1])
else:
carb.log_info("/persistent/app/omniverse/mountedDrives setting not found")
return all_servers
def find_nucleus_server(suffix: str) -> typing.Tuple[bool, str]:
"""Attempts to determine best Nucleus server to use based on existing mountedDrives setting and the
default server specified in json config at "/persistent/isaac/asset_root/". Call is blocking
Args:
suffix (str): Path to folder to search for. Default value: /Isaac
Returns:
bool: True if Nucleus server with suffix is found
url (str): URL of found Nucleus
"""
carb.log_warn("find_nucleus_server() is deprecated. Use get_assets_root_path().")
return False, ""
def get_server_path(suffix: str = "") -> typing.Union[str, None]:
"""Tries to find a Nucleus server with specific path
Args:
suffix (str): Path to folder to search for.
Returns:
url (str): URL of Nucleus server with path to folder.
Returns None if Nucleus server not found.
"""
carb.log_info("Check /persistent/isaac/asset_root/default setting")
default_asset_root = carb.settings.get_settings().get(
"/persistent/isaac/asset_root/default"
)
server_root = get_url_root(default_asset_root)
if server_root:
result = check_server(server_root, suffix)
if result:
return server_root
carb.log_warn("Could not find Nucleus server with {} folder".format(suffix))
return None
async def get_server_path_async(suffix: str = "") -> typing.Union[str, None]:
"""Tries to find a Nucleus server with specific path (asynchronous version).
Args:
suffix (str): Path to folder to search for.
Returns:
url (str): URL of Nucleus server with path to folder.
Returns None if Nucleus server not found.
"""
carb.log_info("Check /persistent/isaac/asset_root/default setting")
default_asset_root = carb.settings.get_settings().get(
"/persistent/isaac/asset_root/default"
)
server_root = get_url_root(default_asset_root)
if server_root:
result = await check_server_async(server_root, suffix)
if result:
return server_root
carb.log_warn("Could not find Nucleus server with {} folder".format(suffix))
return None
def verify_asset_root_path(path: str) -> typing.Tuple[omni.client.Result, str]:
"""Attempts to determine Isaac assets version and check if there are updates.
(asynchronous version)
Args:
path (str): URL or path of asset root to verify
Returns:
omni.client.Result: OK if Assets verified
ver (str): Version of Isaac Sim assets
"""
# omni.client is a singleton, import locally to allow to run with multiprocessing
import omni.client
ver_asset = Version("0.0.0")
version_core, _, _, _, _, _, _, _ = get_version()
ver_app = Version(version_core)
# Get asset version
carb.log_info(f"Verifying {path}")
try:
# Increase hang detection timeout
omni.client.set_hang_detection_time_ms(10000)
omni.client.push_base_url(f"{path}/")
file_path = omni.client.combine_with_base_url("version.txt")
# carb.log_warn(f"Looking for version file at: {file_path}")
result, _, file_content = omni.client.read_file(file_path)
if result != omni.client.Result.OK:
carb.log_info(f"Unable to find version file: {file_path}.")
else:
ver_asset = Version(memoryview(file_content).tobytes().decode())
except ValueError:
carb.log_info(f"Unable to parse version file: {file_path}.")
except UnicodeDecodeError:
carb.log_info(f"Unable to read version file: {file_path}.")
except Exception as ex:
carb.log_warn(f"Exception: {type(ex).__name__}")
# Compare versions
# carb.log_warn(f"ver_asset = {ver_asset.major}.{ver_asset.minor}.{ver_asset.patch}")
# carb.log_warn(f"ver_app = {ver_app.major}.{ver_app.minor}.{ver_app.patch}")
if ver_asset == Version("0.0.0"):
carb.log_info(f"Error verifying Isaac Sim assets at {path}")
return Result.ERROR_NOT_FOUND, ""
elif ver_asset.major != ver_app.major:
carb.log_info(
f"Unsupported version of Isaac Sim assets found at {path}: {ver_asset}"
)
return Result.ERROR_BAD_VERSION, ver_asset
elif ver_asset.minor != ver_app.minor:
carb.log_info(
f"Unsupported version of Isaac Sim assets found at {path}: {ver_asset}"
)
return Result.ERROR_BAD_VERSION, ver_asset
else:
return Result.OK, ver_asset
def get_full_asset_path(path: str) -> typing.Union[str, None]:
"""Tries to find the full asset path on connected servers
Args:
path (str): Path of asset from root to verify
Returns:
url (str): URL or full path to assets.
Returns None if assets not found.
"""
# 1 - Check /persistent/isaac/asset_root/default setting
default_asset_root = carb.settings.get_settings().get(
"/persistent/isaac/asset_root/default"
)
if default_asset_root:
result = check_server(default_asset_root, path)
if result:
carb.log_info("Asset path found at {}{}".format(default_asset_root, path))
return default_asset_root + path
# 2 - Check mountedDrives setting
connected_servers = build_server_list()
if len(connected_servers):
for server_name in connected_servers:
result = check_server(server_name, path)
if result:
carb.log_info("Asset path found at {}{}".format(server_name, path))
return server_name + path
carb.log_warn("Could not find assets path: {}".format(path))
return None
async def get_full_asset_path_async(path: str) -> typing.Union[str, None]:
"""Tries to find the full asset path on connected servers (asynchronous version).
Args:
path (str): Path of asset from root to verify
Returns:
url (str): URL or full path to assets.
Returns None if assets not found.
"""
# 1 - Check /persistent/isaac/asset_root/default setting
default_asset_root = carb.settings.get_settings().get(
"/persistent/isaac/asset_root/default"
)
if default_asset_root:
result = await check_server_async(default_asset_root, path)
if result:
carb.log_info("Asset path found at {}{}".format(default_asset_root, path))
return default_asset_root + path
# 2 - Check mountedDrives setting
connected_servers = build_server_list()
if len(connected_servers):
for server_name in connected_servers:
result = await check_server_async(server_name, path)
if result:
carb.log_info("Asset path found at {}{}".format(server_name, path))
return server_name + path
carb.log_warn("Could not find assets path: {}".format(path))
return None
def get_nvidia_asset_root_path() -> typing.Union[str, None]:
"""Tries to find the root path to the NVIDIA assets
Returns:
url (str): URL or root path to NVIDIA assets folder.
Returns None if NVIDIA assets not found.
"""
# 1 - Check /persistent/isaac/asset_root/nvidia setting
carb.log_info("Check /persistent/isaac/asset_root/nvidia setting")
nvidia_asset_root = carb.settings.get_settings().get(
"/persistent/isaac/asset_root/nvidia"
)
if nvidia_asset_root:
result = check_server(nvidia_asset_root, "")
if result:
carb.log_info("NVIDIA assets found at {}".format(nvidia_asset_root))
return nvidia_asset_root
# 2 - Check root on /persistent/isaac/asset_root/nvidia and mountedDrives setting for /NVIDIA folder
nvidia_asset_path = "/NVIDIA"
server_root = get_url_root(nvidia_asset_path)
if server_root:
result = check_server(server_root, nvidia_asset_path)
if result:
carb.log_info("NVIDIA assets found at {}".format(nvidia_asset_root))
return server_root + nvidia_asset_path
connected_servers = build_server_list()
if len(connected_servers):
for server_name in connected_servers:
result = check_server(server_name, nvidia_asset_path)
if result:
carb.log_info("NVIDIA assets found at {}".format(server_name))
return server_name + nvidia_asset_path
# 3 - Check cloud for http://omniverse-content-production.s3-us-west-2.amazonaws.com folder
nvidia_assets_url = "http://omniverse-content-production.s3-us-west-2.amazonaws.com"
carb.log_info("Check {}".format(nvidia_assets_url))
if nvidia_assets_url:
result = check_server(nvidia_assets_url, "/Assets")
if result:
carb.log_info("NVIDIA assets found at {}".format(nvidia_assets_url))
return nvidia_assets_url
carb.log_warn("Could not find NVIDIA assets folder")
return None
def get_isaac_asset_root_path() -> typing.Union[str, None]:
"""Tries to find the root path to the Isaac Sim assets
Returns:
url (str): URL or root path to Isaac Sim assets folder.
Returns None if Isaac Sim assets not found.
"""
_, _, version_major, version_minor, _, _, _, _ = get_version()
# 1 - Check /persistent/isaac/asset_root/isaac setting
carb.log_info("Check /persistent/isaac/asset_root/isaac setting")
isaac_asset_root = carb.settings.get_settings().get(
"/persistent/isaac/asset_root/isaac"
)
if isaac_asset_root:
result = check_server(isaac_asset_root, "")
if result:
result, ver_asset = verify_asset_root_path(isaac_asset_root)
if result is Result.OK:
carb.log_info(
"Isaac Sim assets version {} found at {}".format(
ver_asset, isaac_asset_root
)
)
return isaac_asset_root
# 2 - Check root on /persistent/isaac/asset_root/default and mountedDrives setting for /Isaac folder
carb.log_info("Check /persistent/isaac/asset_root/default setting")
default_asset_root = carb.settings.get_settings().get(
"/persistent/isaac/asset_root/default"
)
isaac_path = "/Isaac"
server_root = get_url_root(isaac_asset_root)
if default_asset_root:
result = check_server(default_asset_root, isaac_path)
if result:
result, ver_asset = verify_asset_root_path(default_asset_root + isaac_path)
if result is Result.OK:
carb.log_info(
"Isaac Sim assets version {} found at {}".format(
ver_asset, default_asset_root + isaac_path
)
)
return default_asset_root + isaac_path
connected_servers = build_server_list()
if len(connected_servers):
for server_name in connected_servers:
result = check_server(server_name, isaac_path)
if result:
result, ver_asset = verify_asset_root_path(server_name + isaac_path)
if result is Result.OK:
carb.log_info(
"Isaac Sim assets version {} found at {}".format(
ver_asset, server_name + isaac_path
)
)
return server_name + isaac_path
# 3 - Check root on /persistent/isaac/asset_root/default and mountedDrives setting for /NVIDIA/Assets/Isaac/{version_major}.{version_minor} folder
isaac_path = f"/NVIDIA/Assets/Isaac/{version_major}.{version_minor}"
server_root = get_url_root(isaac_asset_root)
if server_root:
result = check_server(server_root, isaac_path)
if result:
result, ver_asset = verify_asset_root_path(server_root + isaac_path)
if result is Result.OK:
carb.log_info(
"Isaac Sim assets version {} found at {}".format(
ver_asset, server_root + isaac_path
)
)
return server_root + isaac_path
connected_servers = build_server_list()
if len(connected_servers):
for server_name in connected_servers:
result = check_server(server_name, isaac_path)
if result:
result, ver_asset = verify_asset_root_path(server_name + isaac_path)
if result is Result.OK:
carb.log_info(
"Isaac Sim assets version {} found at {}".format(
ver_asset, server_name + isaac_path
)
)
return server_name + isaac_path
# 4 - Check cloud for /Assets/Isaac/{version_major}.{version_minor} folder
cloud_assetsURL = carb.settings.get_settings().get_as_string(
"/persistent/isaac/asset_root/cloud"
)
carb.log_info("Check {}".format(cloud_assetsURL))
if cloud_assetsURL:
result = check_server(cloud_assetsURL, "")
if result:
result, ver_asset = verify_asset_root_path(cloud_assetsURL)
if result is Result.OK:
carb.log_info(
"Isaac Sim assets version {} found at {}".format(
ver_asset, cloud_assetsURL
)
)
return cloud_assetsURL
carb.log_warn("Could not find Isaac Sim assets folder")
return None
def get_assets_root_path() -> typing.Union[str, None]:
"""Tries to find the root path to the Isaac Sim assets on a Nucleus server
Returns:
url (str): URL of Nucleus server with root path to assets folder.
Returns None if Nucleus server not found.
"""
# get timeout
timeout = carb.settings.get_settings().get("/persistent/isaac/asset_root/timeout")
if not isinstance(timeout, (int, float)):
timeout = 10.0
# 1 - Check /persistent/isaac/asset_root/default setting
carb.log_info("Check /persistent/isaac/asset_root/default setting")
default_asset_root = carb.settings.get_settings().get(
"/persistent/isaac/asset_root/default"
)
if default_asset_root:
result = check_server(default_asset_root, "/Isaac", timeout)
if result:
result = check_server(default_asset_root, "/NVIDIA", timeout)
if result:
carb.log_info("Assets root found at {}".format(default_asset_root))
return default_asset_root
# 2 - Check root on mountedDrives setting
connected_servers = build_server_list()
if len(connected_servers):
for server_name in connected_servers:
# carb.log_info("Found {}".format(server_name))
result = check_server(server_name, "/Isaac", timeout)
if result:
result = check_server(server_name, "/NVIDIA", timeout)
if result:
carb.log_info("Assets root found at {}".format(server_name))
return server_name
# 3 - Check cloud for /Assets/Isaac/{version_major}.{version_minor} folder
cloud_assets_url = carb.settings.get_settings().get(
"/persistent/isaac/asset_root/cloud"
)
carb.log_info("Checking {}...".format(cloud_assets_url))
if cloud_assets_url:
result = check_server(cloud_assets_url, "/Isaac", timeout)
if result:
result = check_server(cloud_assets_url, "/NVIDIA", timeout)
if result:
carb.log_info("Assets root found at {}".format(cloud_assets_url))
return cloud_assets_url
carb.log_warn("Could not find assets root folder")
return None
async def get_assets_root_path_async() -> typing.Union[str, None]:
"""Tries to find the root path to the Isaac Sim assets on a Nucleus server (asynchronous version).
Returns:
url (str): URL of Nucleus server with root path to assets folder.
Returns None if Nucleus server not found.
"""
# get timeout
timeout = carb.settings.get_settings().get("/persistent/isaac/asset_root/timeout")
if not isinstance(timeout, (int, float)):
timeout = 10.0
# 1 - Check /persistent/isaac/asset_root/default setting
carb.log_info("Check /persistent/isaac/asset_root/default setting")
default_asset_root = carb.settings.get_settings().get(
"/persistent/isaac/asset_root/default"
)
if default_asset_root:
result = await check_server_async(default_asset_root, "/Isaac", timeout)
if result:
result = await check_server_async(default_asset_root, "/NVIDIA", timeout)
if result:
carb.log_info("Assets root found at {}".format(default_asset_root))
return default_asset_root
# 2 - Check root on mountedDrives setting
connected_servers = build_server_list()
if len(connected_servers):
for server_name in connected_servers:
# carb.log_info("Found {}".format(server_name))
result = await check_server_async(server_name, "/Isaac", timeout)
if result:
result = await check_server_async(server_name, "/NVIDIA", timeout)
if result:
carb.log_info("Assets root found at {}".format(server_name))
return server_name
# 3 - Check cloud for /Assets/Isaac/{version_major}.{version_minor} folder
cloud_assets_url = carb.settings.get_settings().get(
"/persistent/isaac/asset_root/cloud"
)
carb.log_info("Checking {}...".format(cloud_assets_url))
if cloud_assets_url:
result = await check_server_async(cloud_assets_url, "/Isaac", timeout)
if result:
result = await check_server_async(cloud_assets_url, "/NVIDIA", timeout)
if result:
carb.log_info("Assets root found at {}".format(cloud_assets_url))
return cloud_assets_url
carb.log_warn("Could not find assets root folder")
return None
def get_assets_server() -> typing.Union[str, None]:
"""Tries to find a server with the Isaac Sim assets
Returns:
url (str): URL of Nucleus server with the Isaac Sim assets
Returns None if Nucleus server not found.
"""
carb.log_warn("get_assets_server() is deprecated. Use get_server_path().")
return None
async def _collect_files(url: str) -> typing.Tuple[str, typing.List]:
"""Collect files under a URL.
Args:
url (str): URL of Nucleus server with path to folder
Returns:
root (str): Root of URL of Nucleus server
paths (typing.List): List of path to each file
"""
paths = []
if await is_dir_async(url):
root = url + "/"
paths.extend(await recursive_list_folder(root))
return url, paths
else:
if await is_file_async(url):
root = os.path.dirname(url)
return root, [url]
async def is_dir_async(path: str) -> bool:
"""Check if path is a folder
Args:
path (str): Path to folder
Returns:
bool: True if path is a folder
"""
result, folder = await asyncio.wait_for(omni.client.list_async(path), timeout=10)
if result != omni.client.Result.OK:
raise Exception(f"Failed to determine if {path} is a folder: {result}")
return True if len(folder) > 0 else False
async def is_file_async(path: str) -> bool:
"""Check if path is a file
Args:
path (str): Path to file
Returns:
bool: True if path is a file
"""
result, file = await asyncio.wait_for(omni.client.stat_async(path), timeout=10)
if result != omni.client.Result.OK:
raise Exception(f"Failed to determine if {path} is a file: {result}")
return False if file.flags & omni.client.ItemFlags.CAN_HAVE_CHILDREN > 0 else True
def is_file(path: str) -> bool:
"""Check if path is a file
Args:
path (str): Path to file
Returns:
bool: True if path is a file
"""
# Increase hang detection timeout
omni.client.set_hang_detection_time_ms(10000)
result, file = omni.client.stat(path)
if result != omni.client.Result.OK:
raise Exception(f"Failed to determine if {path} is a file: {result}")
return False if file.flags & omni.client.ItemFlags.CAN_HAVE_CHILDREN > 0 else True
async def recursive_list_folder(path: str) -> typing.List:
"""Recursively list all files
Args:
path (str): Path to folder
Returns:
paths (typing.List): List of path to each file
"""
paths = []
files, dirs = await list_folder(path)
paths.extend(files)
tasks = []
for dir in dirs:
tasks.append(asyncio.create_task(recursive_list_folder(dir)))
results = await asyncio.gather(*tasks)
for result in results:
paths.extend(result)
return paths
async def list_folder(path: str) -> typing.Tuple[typing.List, typing.List]:
"""List files and sub-folders from root path
Args:
path (str): Path to root folder
Raises:
Exception: When unable to find files under the path.
Returns:
files (typing.List): List of path to each file
dirs (typing.List): List of path to each sub-folder
"""
# omni.client is a singleton, import locally to allow to run with multiprocessing
import omni.client
files = []
dirs = []
carb.log_info(f"Collecting files for {path}")
result, entries = await asyncio.wait_for(omni.client.list_async(path), timeout=10)
if result != omni.client.Result.OK:
raise Exception(f"Failed to list entries for {path}: {result}")
for entry in entries:
# Increase hang detection timeout
omni.client.set_hang_detection_time_ms(10000)
full_path = omni.client.combine_urls(path, entry.relative_path)
if entry.flags & omni.client.ItemFlags.CAN_HAVE_CHILDREN > 0:
dirs.append(full_path + "/")
else:
carb.log_info(f"Enqueuing {full_path} for processing")
files.append(full_path)
return files, dirs
| 30,457 | Python | 34.457509 | 150 | 0.61513 |
AshisGhosh/roboai/isaac_sim/humble_ws/src/robot_api/setup.py | from setuptools import find_packages, setup
package_name = "robot_api"
setup(
name=package_name,
version="0.0.0",
packages=find_packages(exclude=["test"]),
data_files=[
("share/ament_index/resource_index/packages", ["resource/" + package_name]),
("share/" + package_name, ["package.xml"]),
("share/" + package_name + "/config", ["config/moveit_franka_python.yaml"]),
],
install_requires=["setuptools"],
zip_safe=True,
maintainer="root",
maintainer_email="[email protected]",
description="TODO: Package description",
license="TODO: License declaration",
tests_require=["pytest"],
entry_points={
"console_scripts": [
"manipulation_example = robot_api.manipulation_example:main",
"manipulation = robot_api.manipulation:main",
"task_manager = robot_api.task_manager:main",
],
},
)
| 908 | Python | 30.344827 | 84 | 0.615639 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.