file_path
stringlengths 21
224
| content
stringlengths 0
80.8M
|
---|---|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/task/IndustRealBase.yaml
|
# See schema in factory_schema_config_base.py for descriptions of parameters.
defaults:
- _self_
mode:
export_scene: False
export_states: False
sim:
dt: 0.016667
substeps: 2
up_axis: "z"
use_gpu_pipeline: ${eq:${...pipeline},"gpu"}
gravity: [0.0, 0.0, -9.81]
add_damping: True
disable_franka_collisions: False
physx:
solver_type: ${....solver_type}
num_threads: ${....num_threads}
num_subscenes: ${....num_subscenes}
use_gpu: ${contains:"cuda",${....sim_device}}
num_position_iterations: 16
num_velocity_iterations: 0
contact_offset: 0.01
rest_offset: 0.0
bounce_threshold_velocity: 0.2
max_depenetration_velocity: 5.0
friction_offset_threshold: 0.01
friction_correlation_distance: 0.00625
max_gpu_contact_pairs: 6553600 # 50 * 1024 * 1024
default_buffer_size_multiplier: 8.0
contact_collection: 1 # 0: CC_NEVER (don't collect contact info), 1: CC_LAST_SUBSTEP (collect only contacts on last substep), 2: CC_ALL_SUBSTEPS (broken - do not use!)
env:
env_spacing: 0.7
franka_depth: 0.37 # Franka origin 37 cm behind table midpoint
table_height: 1.04
franka_friction: 4.0
table_friction: 0.3
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/task/Ant.yaml
|
# used to create the object
name: Ant
physics_engine: ${..physics_engine}
# if given, will override the device setting in gym.
env:
numEnvs: ${resolve_default:4096,${...num_envs}}
envSpacing: 5
episodeLength: 1000
enableDebugVis: False
clipActions: 1.0
powerScale: 1.0
controlFrequencyInv: 1 # 60 Hz
# reward parameters
headingWeight: 0.5
upWeight: 0.1
# cost parameters
actionsCost: 0.005
energyCost: 0.05
dofVelocityScale: 0.2
contactForceScale: 0.1
jointsAtLimitCost: 0.1
deathCost: -2.0
terminationHeight: 0.31
plane:
staticFriction: 1.0
dynamicFriction: 1.0
restitution: 0.0
asset:
assetFileName: "mjcf/nv_ant.xml"
# set to True if you use camera sensors in the environment
enableCameraSensors: False
sim:
dt: 0.0166 # 1/60 s
substeps: 2
up_axis: "z"
use_gpu_pipeline: ${eq:${...pipeline},"gpu"}
gravity: [0.0, 0.0, -9.81]
physx:
num_threads: ${....num_threads}
solver_type: ${....solver_type}
use_gpu: ${contains:"cuda",${....sim_device}} # set to False to run on CPU
num_position_iterations: 4
num_velocity_iterations: 0
contact_offset: 0.02
rest_offset: 0.0
bounce_threshold_velocity: 0.2
max_depenetration_velocity: 10.0
default_buffer_size_multiplier: 5.0
max_gpu_contact_pairs: 8388608 # 8*1024*1024
num_subscenes: ${....num_subscenes}
contact_collection: 0 # 0: CC_NEVER (don't collect contact info), 1: CC_LAST_SUBSTEP (collect only contacts on last substep), 2: CC_ALL_SUBSTEPS (broken - do not use!)
task:
randomize: False
randomization_params:
# specify which attributes to randomize for each actor type and property
frequency: 600 # Define how many environment steps between generating new randomizations
observations:
range: [0, .002] # range for the white noise
operation: "additive"
distribution: "gaussian"
actions:
range: [0., .02]
operation: "additive"
distribution: "gaussian"
actor_params:
ant:
color: True
rigid_body_properties:
mass:
range: [0.5, 1.5]
operation: "scaling"
distribution: "uniform"
setup_only: True # Property will only be randomized once before simulation is started. See Domain Randomization Documentation for more info.
dof_properties:
damping:
range: [0.5, 1.5]
operation: "scaling"
distribution: "uniform"
stiffness:
range: [0.5, 1.5]
operation: "scaling"
distribution: "uniform"
lower:
range: [0, 0.01]
operation: "additive"
distribution: "gaussian"
upper:
range: [0, 0.01]
operation: "additive"
distribution: "gaussian"
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/task/FrankaCubeStack.yaml
|
# used to create the object
name: FrankaCubeStack
physics_engine: ${..physics_engine}
# if given, will override the device setting in gym.
env:
numEnvs: ${resolve_default:8192,${...num_envs}}
envSpacing: 1.5
episodeLength: 300
enableDebugVis: False
clipObservations: 5.0
clipActions: 1.0
startPositionNoise: 0.25
startRotationNoise: 0.785
frankaPositionNoise: 0.0
frankaRotationNoise: 0.0
frankaDofNoise: 0.25
aggregateMode: 3
actionScale: 1.0
distRewardScale: 0.1
liftRewardScale: 1.5
alignRewardScale: 2.0
stackRewardScale: 16.0
controlType: osc # options are {joint_tor, osc}
asset:
assetRoot: "../../assets"
assetFileNameFranka: "urdf/franka_description/robots/franka_panda_gripper.urdf"
# set to True if you use camera sensors in the environment
enableCameraSensors: False
sim:
dt: 0.01667 # 1/60
substeps: 2
up_axis: "z"
use_gpu_pipeline: ${eq:${...pipeline},"gpu"}
gravity: [0.0, 0.0, -9.81]
physx:
num_threads: ${....num_threads}
solver_type: ${....solver_type}
use_gpu: ${contains:"cuda",${....sim_device}} # set to False to run on CPU
num_position_iterations: 8
num_velocity_iterations: 1
contact_offset: 0.005
rest_offset: 0.0
bounce_threshold_velocity: 0.2
max_depenetration_velocity: 1000.0
default_buffer_size_multiplier: 5.0
max_gpu_contact_pairs: 1048576 # 1024*1024
num_subscenes: ${....num_subscenes}
contact_collection: 0 # 0: CC_NEVER (don't collect contact info), 1: CC_LAST_SUBSTEP (collect only contacts on last substep), 2: CC_ALL_SUBSTEPS (broken - do not use!)
task:
randomize: False
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/task/AllegroHandDextremeManualDR.yaml
|
# used to create the object
name: AllegroHandManualDR
physics_engine: ${..physics_engine}
# if given, will override the device setting in gym.
env:
numEnvs: ${resolve_default:16384,${...num_envs}}
envSpacing: 0.75
episodeLength: 320 # Not used, but would be 8 sec if resetTime is not set
resetTime: 8 # Max time till reset, in seconds, if a goal wasn't achieved. Will overwrite the episodeLength if is > 0.
enableDebugVis: False
aggregateMode: 1
#clipObservations: 5.0
clipActions: 1.0
stiffnessScale: 1.0
forceLimitScale: 1.0
useRelativeControl: False
dofSpeedScale: 20.0
use_capped_dof_control: False
max_dof_radians_per_second: 3.1415
# This is to generate correct random goals
apply_random_quat: True
actionsMovingAverage:
range: [0.15, 0.35]
schedule_steps: 1000_000
schedule_freq: 500 # schedule every 500 steps for stability
controlFrequencyInv: 2 #2 # 30 Hz #3 # 20 Hz
cubeObsDelayProb: 0.3
maxObjectSkipObs: 2
# Action Delay related
# right now the schedule steps are so big that
# it virtually never changes the latency
# our best seed came out of this config file
# so for now keeping it as it is, will look into it in future
actionDelayProbMax: 0.3
actionLatencyMax: 15
actionLatencyScheduledSteps: 10_000_000
startPositionNoise: 0.01
startRotationNoise: 0.0
resetPositionNoise: 0.03
resetPositionNoiseZ: 0.01
resetRotationNoise: 0.0
resetDofPosRandomInterval: 0.5
resetDofVelRandomInterval: 0.0
startObjectPoseDY: -0.19
startObjectPoseDZ: 0.06
# Random forces applied to the object
forceScale: 2.0
forceProbRange: [0.001, 0.1]
forceDecay: 0.99
forceDecayInterval: 0.08
# Random Adversarial Perturbations
random_network_adversary:
enable: True
prob: 0.15
weight_sample_freq: 1000 # steps
# Provide random cube observations to model pose jumps in the real
random_cube_observation:
enable: True
prob: 0.3
# reward -> dictionary
distRewardScale: -10.0
rotRewardScale: 1.0
rotEps: 0.1
actionPenaltyScale: -0.0001
actionDeltaPenaltyScale: -0.01
reachGoalBonus: 250
fallDistance: 0.24
fallPenalty: 0.0
objectType: "block" # can be block, egg or pen
observationType: "no_vel" #"full_state" # can be "no_vel", "full_state"
asymmetric_observations: True
successTolerance: 0.4
printNumSuccesses: False
maxConsecutiveSuccesses: 50
asset:
assetFileName: "urdf/kuka_allegro_description/allegro_touch_sensor.urdf"
assetFileNameBlock: "urdf/objects/cube_multicolor_allegro.urdf"
assetFileNameEgg: "mjcf/open_ai_assets/hand/egg.xml"
assetFileNamePen: "mjcf/open_ai_assets/hand/pen.xml"
# set to True if you use camera sensors in the environment
enableCameraSensors: False
task:
randomize: True
randomization_params:
frequency: 720 # Define how many simulation steps between generating new randomizations
observations:
# There is a hidden variable `apply_white_noise_prob` which is set to 0.5
# so that the observation noise is added only 50% of the time.
dof_pos:
range: [0, .005] # range for the white noise
range_correlated: [0, .01 ] # range for correlated noise, refreshed with freq `frequency`
operation: "additive"
distribution: "gaussian"
# schedule: "linear" # "constant" is to turn on noise after `schedule_steps` num steps
# schedule_steps: 40000
object_pose_cam:
range: [0, .005] # range for the white noise
range_correlated: [0, .01 ] # range for correlated noise, refreshed with freq `frequency`
operation: "additive"
distribution: "gaussian"
# schedule: "linear" # "constant" is to turn on noise after `schedule_steps` num steps
# schedule_steps: 40000
goal_pose:
range: [0, .005] # range for the white noise
range_correlated: [0, .01 ] # range for correlated noise, refreshed with freq `frequency`
operation: "additive"
distribution: "gaussian"
# schedule: "linear" # "constant" is to turn on noise after `schedule_steps` num steps
# schedule_steps: 40000
goal_relative_rot_cam:
range: [0, .005] # range for the white noise
range_correlated: [0, .01 ] # range for correlated noise, refreshed with freq `frequency`
operation: "additive"
distribution: "gaussian"
# schedule: "linear" # "constant" is to turn on noise after `schedule_steps` num steps
# schedule_steps: 40000
last_actions:
range: [0, .005] # range for the white noise
range_correlated: [0, .01 ] # range for correlated noise, refreshed with freq `frequency`
operation: "additive"
distribution: "gaussian"
# schedule: "linear" # "constant" is to turn on noise after `schedule_steps` num steps
# schedule_steps: 40000
actions:
range: [0., .05]
range_correlated: [0, .02] # range for correlated noise, refreshed with freq `frequency`
operation: "additive"
distribution: "gaussian"
# schedule: "linear" # "linear" will linearly interpolate between no rand and max rand
# schedule_steps: 40000
sim_params:
gravity:
range: [0, 0.5]
operation: "additive"
distribution: "gaussian"
actor_params:
hand:
color: True
dof_properties:
damping:
range: [0.3, 3.0]
operation: "scaling"
distribution: "loguniform"
# schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps`
# schedule_steps: 30000
stiffness:
range: [0.75, 1.5]
operation: "scaling"
distribution: "loguniform"
# schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps`
# schedule_steps: 30000
lower:
range: [0, 0.01]
operation: "additive"
distribution: "gaussian"
# schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps`
# schedule_steps: 30000
upper:
range: [0, 0.01]
operation: "additive"
distribution: "gaussian"
# schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps`
# schedule_steps: 30000
rigid_body_properties:
mass:
range: [0.5, 2.0]
operation: "scaling"
distribution: "uniform"
setup_only: True # Property will only be randomized once before simulation is started. See Domain Randomization Documentation for more info.
# schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps`
# schedule_steps: 30000
rigid_shape_properties:
friction:
num_buckets: 250
range: [0.2, 1.2] #[0.7, 1.3]
operation: "scaling"
distribution: "uniform"
# schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps`
# schedule_steps: 30000
restitution:
num_buckets: 100
range: [0.0, 0.4]
operation: "additive"
distribution: "uniform"
object:
scale:
range: [0.95, 1.05]
operation: "scaling"
distribution: "uniform"
setup_only: True # Property will only be randomized once before simulation is started. See Domain Randomization Documentation for more info.
# schedule: "linear" # "linear" will scale the current random sample by ``min(current num steps, schedule_steps) / schedule_steps`
# schedule_steps: 30000
rigid_body_properties:
mass:
range: [0.5, 1.5]
operation: "scaling"
distribution: "uniform"
setup_only: True # Property will only be randomized once before simulation is started. See Domain Randomization Documentation for more info.
# schedule: "linear" # "linear" will scale the current random sample by ``min(current num steps, schedule_steps) / schedule_steps`
# schedule_steps: 30000
rigid_shape_properties:
friction:
num_buckets: 250
range: [0.2, 1.2] #[0.7, 1.3]
operation: "scaling"
distribution: "uniform"
# schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps`
# schedule_steps: 30000
restitution:
num_buckets: 100
range: [0.0, 0.4]
operation: "additive"
distribution: "uniform"
sim:
dt: 0.01667 # 1/60
substeps: 2
up_axis: "z"
use_gpu_pipeline: ${eq:${...pipeline},"gpu"}
gravity: [0.0, 0.0, -9.81]
physx:
num_threads: ${....num_threads}
solver_type: ${....solver_type}
use_gpu: ${contains:"cuda",${....sim_device}} # set to False to run on CPU
num_position_iterations: 8
num_velocity_iterations: 0
max_gpu_contact_pairs: 8388608 # 8*1024*1024
num_subscenes: ${....num_subscenes}
contact_offset: 0.005
rest_offset: 0.0
bounce_threshold_velocity: 0.2
max_depenetration_velocity: 1.0 #1000.0
default_buffer_size_multiplier: 75.0
contact_collection: 0 # 0: CC_NEVER (don't collect contact info), 1: CC_LAST_SUBSTEP (collect only contacts on last substep), 2: CC_ALL_SUBSTEPS (broken - do not use!)
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/task/HumanoidAMP.yaml
|
# used to create the object
name: HumanoidAMP
physics_engine: ${..physics_engine}
# if given, will override the device setting in gym.
env:
numEnvs: ${resolve_default:4096,${...num_envs}}
envSpacing: 5
episodeLength: 300
cameraFollow: True # if the camera follows humanoid or not
enableDebugVis: False
pdControl: True
powerScale: 1.0
controlFrequencyInv: 2 # 30 Hz
stateInit: "Random"
hybridInitProb: 0.5
numAMPObsSteps: 2
localRootObs: False
contactBodies: ["right_foot", "left_foot"]
terminationHeight: 0.5
enableEarlyTermination: True
# animation files to learn from
# these motions should use hyperparameters from HumanoidAMPPPO.yaml
#motion_file: "amp_humanoid_walk.npy"
motion_file: "amp_humanoid_run.npy"
#motion_file: "amp_humanoid_dance.npy"
# these motions should use hyperparameters from HumanoidAMPPPOLowGP.yaml
#motion_file: "amp_humanoid_hop.npy"
#motion_file: "amp_humanoid_backflip.npy"
asset:
assetFileName: "mjcf/amp_humanoid.xml"
plane:
staticFriction: 1.0
dynamicFriction: 1.0
restitution: 0.0
sim:
dt: 0.0166 # 1/60 s
substeps: 2
up_axis: "z"
use_gpu_pipeline: ${eq:${...pipeline},"gpu"}
gravity: [0.0, 0.0, -9.81]
physx:
num_threads: ${....num_threads}
solver_type: ${....solver_type}
use_gpu: ${contains:"cuda",${....sim_device}} # set to False to run on CPU
num_position_iterations: 4
num_velocity_iterations: 0
contact_offset: 0.02
rest_offset: 0.0
bounce_threshold_velocity: 0.2
max_depenetration_velocity: 10.0
default_buffer_size_multiplier: 5.0
max_gpu_contact_pairs: 8388608 # 8*1024*1024
num_subscenes: ${....num_subscenes}
contact_collection: 2 # 0: CC_NEVER (don't collect contact info), 1: CC_LAST_SUBSTEP (collect only contacts on last substep), 2: CC_ALL_SUBSTEPS (broken - do not use!)
task:
randomize: False
randomization_params:
# specify which attributes to randomize for each actor type and property
frequency: 600 # Define how many environment steps between generating new randomizations
observations:
range: [0, .002] # range for the white noise
operation: "additive"
distribution: "gaussian"
actions:
range: [0., .02]
operation: "additive"
distribution: "gaussian"
sim_params:
gravity:
range: [0, 0.4]
operation: "additive"
distribution: "gaussian"
schedule: "linear" # "linear" will linearly interpolate between no rand and max rand
schedule_steps: 3000
actor_params:
humanoid:
color: True
rigid_body_properties:
mass:
range: [0.5, 1.5]
operation: "scaling"
distribution: "uniform"
setup_only: True # Property will only be randomized once before simulation is started. See Domain Randomization Documentation for more info.
schedule: "linear" # "linear" will linearly interpolate between no rand and max rand
schedule_steps: 3000
rigid_shape_properties:
friction:
num_buckets: 500
range: [0.7, 1.3]
operation: "scaling"
distribution: "uniform"
schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps`
schedule_steps: 3000
restitution:
range: [0., 0.7]
operation: "scaling"
distribution: "uniform"
schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps`
schedule_steps: 3000
dof_properties:
damping:
range: [0.5, 1.5]
operation: "scaling"
distribution: "uniform"
schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps`
schedule_steps: 3000
stiffness:
range: [0.5, 1.5]
operation: "scaling"
distribution: "uniform"
schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps`
schedule_steps: 3000
lower:
range: [0, 0.01]
operation: "additive"
distribution: "gaussian"
schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps`
schedule_steps: 3000
upper:
range: [0, 0.01]
operation: "additive"
distribution: "gaussian"
schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps`
schedule_steps: 3000
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/task/AnymalTerrain.yaml
|
# used to create the object
name: AnymalTerrain
physics_engine: 'physx'
env:
numEnvs: ${resolve_default:4096,${...num_envs}}
numObservations: 188
numActions: 12
envSpacing: 3. # [m]
enableDebugVis: False
terrain:
terrainType: trimesh # none, plane, or trimesh
staticFriction: 1.0 # [-]
dynamicFriction: 1.0 # [-]
restitution: 0. # [-]
# rough terrain only:
curriculum: true
maxInitMapLevel: 0
mapLength: 8.
mapWidth: 8.
numLevels: 10
numTerrains: 20
# terrain types: [smooth slope, rough slope, stairs up, stairs down, discrete]
terrainProportions: [0.1, 0.1, 0.35, 0.25, 0.2]
# tri mesh only:
slopeTreshold: 0.5
baseInitState:
pos: [0.0, 0.0, 0.62] # x,y,z [m]
rot: [0.0, 0.0, 0.0, 1.0] # x,y,z,w [quat]
vLinear: [0.0, 0.0, 0.0] # x,y,z [m/s]
vAngular: [0.0, 0.0, 0.0] # x,y,z [rad/s]
randomCommandVelocityRanges:
# train
linear_x: [-1., 1.] # min max [m/s]
linear_y: [-1., 1.] # min max [m/s]
yaw: [-3.14, 3.14] # min max [rad/s]
control:
# PD Drive parameters:
stiffness: 80.0 # [N*m/rad]
damping: 2.0 # [N*m*s/rad]
# action scale: target angle = actionScale * action + defaultAngle
actionScale: 0.5
# decimation: Number of control action updates @ sim DT per policy DT
decimation: 4
defaultJointAngles: # = target angles when action = 0.0
LF_HAA: 0.03 # [rad]
LH_HAA: 0.03 # [rad]
RF_HAA: -0.03 # [rad]
RH_HAA: -0.03 # [rad]
LF_HFE: 0.4 # [rad]
LH_HFE: -0.4 # [rad]
RF_HFE: 0.4 # [rad]
RH_HFE: -0.4 # [rad]
LF_KFE: -0.8 # [rad]
LH_KFE: 0.8 # [rad]
RF_KFE: -0.8 # [rad]
RH_KFE: 0.8 # [rad]
urdfAsset:
file: "urdf/anymal_c/urdf/anymal_minimal.urdf"
footName: SHANK # SHANK if collapsing fixed joint, FOOT otherwise
kneeName: THIGH
collapseFixedJoints: True
fixBaseLink: false
defaultDofDriveMode: 4 # see GymDofDriveModeFlags (0 is none, 1 is pos tgt, 2 is vel tgt, 4 effort)
learn:
allowKneeContacts: true
# rewards
terminalReward: 0.0
linearVelocityXYRewardScale: 1.0
linearVelocityZRewardScale: -4.0
angularVelocityXYRewardScale: -0.05
angularVelocityZRewardScale: 0.5
orientationRewardScale: -0. #-1.
torqueRewardScale: -0.00002 # -0.000025
jointAccRewardScale: -0.0005 # -0.0025
baseHeightRewardScale: -0.0 #5
feetAirTimeRewardScale: 1.0
kneeCollisionRewardScale: -0.25
feetStumbleRewardScale: -0. #-2.0
actionRateRewardScale: -0.01
# cosmetics
hipRewardScale: -0. #25
# normalization
linearVelocityScale: 2.0
angularVelocityScale: 0.25
dofPositionScale: 1.0
dofVelocityScale: 0.05
heightMeasurementScale: 5.0
# noise
addNoise: true
noiseLevel: 1.0 # scales other values
dofPositionNoise: 0.01
dofVelocityNoise: 1.5
linearVelocityNoise: 0.1
angularVelocityNoise: 0.2
gravityNoise: 0.05
heightMeasurementNoise: 0.06
#randomization
randomizeFriction: true
frictionRange: [0.5, 1.25]
pushRobots: true
pushInterval_s: 15
# episode length in seconds
episodeLength_s: 20
# viewer cam:
viewer:
refEnv: 0
pos: [0, 0, 10] # [m]
lookat: [1., 1, 9] # [m]
# set to True if you use camera sensors in the environment
enableCameraSensors: False
sim:
dt: 0.005
substeps: 1
up_axis: "z"
use_gpu_pipeline: ${eq:${...pipeline},"gpu"}
gravity: [0.0, 0.0, -9.81]
physx:
num_threads: ${....num_threads}
solver_type: ${....solver_type}
use_gpu: ${contains:"cuda",${....sim_device}} # set to False to run on CPU
num_position_iterations: 4
num_velocity_iterations: 1
contact_offset: 0.02
rest_offset: 0.0
bounce_threshold_velocity: 0.2
max_depenetration_velocity: 100.0
default_buffer_size_multiplier: 5.0
max_gpu_contact_pairs: 8388608 # 8*1024*1024
num_subscenes: ${....num_subscenes}
contact_collection: 1 # 0: CC_NEVER (don't collect contact info), 1: CC_LAST_SUBSTEP (collect only contacts on last substep), 2: CC_ALL_SUBSTEPS (broken - do not use!)
task:
randomize: False
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/task/BallBalance.yaml
|
# used to create the object
name: BallBalance
physics_engine: ${..physics_engine}
# if given, will override the device setting in gym.
env:
numEnvs: ${resolve_default:4096,${...num_envs}}
envSpacing: 2.0
maxEpisodeLength: 500
actionSpeedScale: 20
enableDebugVis: False
clipObservations: 5.0
clipActions: 1.0
# set to True if you use camera sensors in the environment
enableCameraSensors: False
sim:
dt: 0.01
substeps: 1
up_axis: "z"
use_gpu_pipeline: ${eq:${...pipeline},"gpu"}
gravity: [0.0, 0.0, -9.81]
physx:
num_threads: ${....num_threads}
solver_type: ${....solver_type}
use_gpu: ${contains:"cuda",${....sim_device}} # set to False to run on CPU
num_position_iterations: 8
num_velocity_iterations: 0
contact_offset: 0.02
rest_offset: 0.001
bounce_threshold_velocity: 0.2
max_depenetration_velocity: 1000.0
default_buffer_size_multiplier: 5.0
max_gpu_contact_pairs: 8388608 # 8*1024*1024
num_subscenes: ${....num_subscenes}
contact_collection: 0 # 0: CC_NEVER (don't collect contact info), 1: CC_LAST_SUBSTEP (collect only contacts on last substep), 2: CC_ALL_SUBSTEPS (broken - do not use!)
task:
randomize: False
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/task/AllegroHandLSTM_Big.yaml
|
defaults:
- AllegroHandLSTM
- _self_
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/task/IndustRealEnvGears.yaml
|
# See schema in factory_schema_config_env.py for descriptions of common parameters.
defaults:
- IndustRealBase
- _self_
- /factory_schema_config_env
env:
env_name: 'IndustRealEnvGears'
gears_lateral_offset: 0.1 # Y-axis offset of gears before initial reset to prevent initial interpenetration with base plate
gears_friction: 0.5 # coefficient of friction associated with gears
base_friction: 0.5 # coefficient of friction associated with base plate
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/task/AllegroHandLSTM.yaml
|
# used to create the object
name: AllegroHand
physics_engine: ${..physics_engine}
# if given, will override the device setting in gym.
env:
numEnvs: ${resolve_default:16384,${...num_envs}}
envSpacing: 0.75
episodeLength: 320 # Not used, but would be 8 sec if resetTime is not set
resetTime: 16 # Max time till reset, in seconds, if a goal wasn't achieved. Will overwrite the episodeLength if is > 0.
enableDebugVis: False
aggregateMode: 1
clipActions: 1.0
stiffnessScale: 1.0
forceLimitScale: 1.0
useRelativeControl: False
dofSpeedScale: 20.0
use_capped_dof_control: False
max_dof_radians_per_second: 3.1415
# This is to generate correct random goals
apply_random_quat: False
actionsMovingAverage:
range: [0.15, 0.35]
schedule_steps: 1000_000
#schedule_steps: 300_000
schedule_freq: 500 # schedule every 500 steps for stability
controlFrequencyInv: 2 #2 # 30 Hz #3 # 20 Hz
cubeObsDelayProb: 0.3
maxObjectSkipObs: 2
# Action Delay related
# right now the schedule steps are so big that
# it virtually never changes the latency
# our best seed came out of this config file
# so for now keeping it as it is, will look into it in future
actionDelayProbMax: 0.3
actionLatencyMax: 15
actionLatencyScheduledSteps: 10_000_000
startPositionNoise: 0.01
startRotationNoise: 0.0
resetPositionNoise: 0.03
resetPositionNoiseZ: 0.01
resetRotationNoise: 0.0
resetDofPosRandomInterval: 0.5
resetDofVelRandomInterval: 0.0
startObjectPoseDY: -0.19
startObjectPoseDZ: 0.06
# Random forces applied to the object
forceScale: 2.0
forceProbRange: [0.001, 0.1]
forceDecay: 0.99
forceDecayInterval: 0.08
# Random Adversarial Perturbations
random_network_adversary:
enable: True
prob: 0.15
weight_sample_freq: 1000 # steps
# Provide random cube observations to model pose jumps in the real
random_cube_observation:
enable: True
prob: 0.3
# reward -> dictionary
distRewardScale: -10.0
rotRewardScale: 1.0
rotEps: 0.1
actionPenaltyScale: -0.0001
actionDeltaPenaltyScale: -0.01
reachGoalBonus: 250
fallDistance: 0.24
fallPenalty: 0.0
objectType: "block" # can be block, egg or pen
observationType: "no_vel" #"full_state" # can be "no_vel", "full_state"
asymmetric_observations: True
successTolerance: 0.4
printNumSuccesses: False
maxConsecutiveSuccesses: 50
asset:
assetFileName: "urdf/kuka_allegro_description/allegro.urdf"
# assetFileNameBlock: "urdf/objects/cube_multicolor_dextreme.urdf"
# assetFileNameBlock: "urdf/objects/cube_multicolor_allegro.urdf"
assetFileNameEgg: "mjcf/open_ai_assets/hand/egg.xml"
assetFileNamePen: "mjcf/open_ai_assets/hand/pen.xml"
# set to True if you use camera sensors in the environment
enableCameraSensors: False
task:
randomize: True
randomization_params:
frequency: 720 # Define how many simulation steps between generating new randomizations
observations:
# There is a hidden variable `apply_white_noise_prob` which is set to 0.5
# so that the observation noise is added only 50% of the time.
dof_pos:
range: [0, .005] # range for the white noise
range_correlated: [0, .01 ] # range for correlated noise, refreshed with freq `frequency`
operation: "additive"
distribution: "gaussian"
# schedule: "linear" # "constant" is to turn on noise after `schedule_steps` num steps
# schedule_steps: 40000
object_pose_cam:
range: [0, .005] # range for the white noise
range_correlated: [0, .01 ] # range for correlated noise, refreshed with freq `frequency`
operation: "additive"
distribution: "gaussian"
# schedule: "linear" # "constant" is to turn on noise after `schedule_steps` num steps
# schedule_steps: 40000
goal_pose:
range: [0, .005] # range for the white noise
range_correlated: [0, .01 ] # range for correlated noise, refreshed with freq `frequency`
operation: "additive"
distribution: "gaussian"
# schedule: "linear" # "constant" is to turn on noise after `schedule_steps` num steps
# schedule_steps: 40000
goal_relative_rot_cam:
range: [0, .005] # range for the white noise
range_correlated: [0, .01 ] # range for correlated noise, refreshed with freq `frequency`
operation: "additive"
distribution: "gaussian"
# schedule: "linear" # "constant" is to turn on noise after `schedule_steps` num steps
# schedule_steps: 40000
last_actions:
range: [0, .005] # range for the white noise
range_correlated: [0, .01 ] # range for correlated noise, refreshed with freq `frequency`
operation: "additive"
distribution: "gaussian"
# schedule: "linear" # "constant" is to turn on noise after `schedule_steps` num steps
# schedule_steps: 40000
actions:
range: [0., .05]
range_correlated: [0, .02] # range for correlated noise, refreshed with freq `frequency`
operation: "additive"
distribution: "gaussian"
# schedule: "linear" # "linear" will linearly interpolate between no rand and max rand
# schedule_steps: 40000
sim_params:
gravity:
range: [0, 0.5]
operation: "additive"
distribution: "gaussian"
# schedule: "linear" # "linear" will linearly interpolate between no rand and max rand
# schedule_steps: 40000
#rest_offset:
#range: [0, 0.007]
#operation: "additive"
#distribution: "uniform"
#schedule: "linear"
#schedule_steps: 6000
actor_params:
hand:
# scale:
# range: [0.95, 1.05]
# operation: "scaling"
# distribution: "uniform"
# setup_only: True
color: True
dof_properties:
damping:
range: [0.3, 3.0]
operation: "scaling"
distribution: "loguniform"
# schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps`
# schedule_steps: 30000
stiffness:
range: [0.75, 1.5]
operation: "scaling"
distribution: "loguniform"
# schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps`
# schedule_steps: 30000
lower:
range: [0, 0.01]
operation: "additive"
distribution: "gaussian"
# schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps`
# schedule_steps: 30000
upper:
range: [0, 0.01]
operation: "additive"
distribution: "gaussian"
# schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps`
# schedule_steps: 30000
rigid_body_properties:
mass:
range: [0.5, 2.0]
operation: "scaling"
distribution: "uniform"
setup_only: True # Property will only be randomized once before simulation is started. See Domain Randomization Documentation for more info.
# schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps`
# schedule_steps: 30000
rigid_shape_properties:
friction:
num_buckets: 250
range: [0.2, 1.2] #[0.7, 1.3]
operation: "scaling"
distribution: "uniform"
# schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps`
# schedule_steps: 30000
restitution:
num_buckets: 100
range: [0.0, 0.4]
operation: "additive"
distribution: "uniform"
object:
scale:
range: [0.95, 1.05]
operation: "scaling"
distribution: "uniform"
setup_only: True # Property will only be randomized once before simulation is started. See Domain Randomization Documentation for more info.
# schedule: "linear" # "linear" will scale the current random sample by ``min(current num steps, schedule_steps) / schedule_steps`
# schedule_steps: 30000
rigid_body_properties:
mass:
range: [0.5, 1.5]
operation: "scaling"
distribution: "uniform"
setup_only: True # Property will only be randomized once before simulation is started. See Domain Randomization Documentation for more info.
# schedule: "linear" # "linear" will scale the current random sample by ``min(current num steps, schedule_steps) / schedule_steps`
# schedule_steps: 30000
rigid_shape_properties:
friction:
num_buckets: 250
range: [0.2, 1.2] #[0.7, 1.3]
operation: "scaling"
distribution: "uniform"
# schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps`
# schedule_steps: 30000
restitution:
num_buckets: 100
range: [0.0, 0.4]
operation: "additive"
distribution: "uniform"
sim:
dt: 0.01667 # 1/60
substeps: 2
up_axis: "z"
use_gpu_pipeline: ${eq:${...pipeline},"gpu"}
gravity: [0.0, 0.0, -9.81]
physx:
num_threads: ${....num_threads}
solver_type: ${....solver_type}
use_gpu: ${contains:"cuda",${....sim_device}} # set to False to run on CPU
num_position_iterations: 8
num_velocity_iterations: 0
max_gpu_contact_pairs: 8388608 # 8*1024*1024
num_subscenes: ${....num_subscenes}
contact_offset: 0.005
rest_offset: 0.0
bounce_threshold_velocity: 0.2
max_depenetration_velocity: 1.0 #1000.0
default_buffer_size_multiplier: 75.0
contact_collection: 0 # 0: CC_NEVER (don't collect contact info), 1: CC_LAST_SUBSTEP (collect only contacts on last substep), 2: CC_ALL_SUBSTEPS ((broken - do not use!)
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/task/FactoryBase.yaml
|
# See schema in factory_schema_config_base.py for descriptions of parameters.
defaults:
- _self_
#- /factory_schema_config_base
mode:
export_scene: False
export_states: False
sim:
dt: 0.016667
substeps: 2
up_axis: "z"
use_gpu_pipeline: ${eq:${...pipeline},"gpu"}
gravity: [0.0, 0.0, -9.81]
add_damping: True
physx:
solver_type: ${....solver_type}
num_threads: ${....num_threads}
num_subscenes: ${....num_subscenes}
use_gpu: ${contains:"cuda",${....sim_device}}
num_position_iterations: 16
num_velocity_iterations: 0
contact_offset: 0.005
rest_offset: 0.0
bounce_threshold_velocity: 0.2
max_depenetration_velocity: 5.0
friction_offset_threshold: 0.01
friction_correlation_distance: 0.00625
max_gpu_contact_pairs: 1048576 # 1024 * 1024
default_buffer_size_multiplier: 8.0
contact_collection: 1 # 0: CC_NEVER (don't collect contact info), 1: CC_LAST_SUBSTEP (collect only contacts on last substep), 2: CC_ALL_SUBSTEPS (broken - do not use!)
env:
env_spacing: 0.5
franka_depth: 0.5
table_height: 0.4
franka_friction: 1.0
table_friction: 0.3
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/task/Humanoid.yaml
|
# used to create the object
name: Humanoid
physics_engine: ${..physics_engine}
# if given, will override the device setting in gym.
env:
numEnvs: ${resolve_default:4096,${...num_envs}}
envSpacing: 5
episodeLength: 1000
enableDebugVis: False
clipActions: 1.0
powerScale: 1.0
# reward parameters
headingWeight: 0.5
upWeight: 0.1
# cost parameters
actionsCost: 0.01
energyCost: 0.05
dofVelocityScale: 0.1
angularVelocityScale: 0.25
contactForceScale: 0.01
jointsAtLimitCost: 0.25
deathCost: -1.0
terminationHeight: 0.8
asset:
assetFileName: "mjcf/nv_humanoid.xml"
plane:
staticFriction: 1.0
dynamicFriction: 1.0
restitution: 0.0
# set to True if you use camera sensors in the environment
enableCameraSensors: False
sim:
dt: 0.0166 # 1/60 s
substeps: 2
up_axis: "z"
use_gpu_pipeline: ${eq:${...pipeline},"gpu"}
gravity: [0.0, 0.0, -9.81]
physx:
num_threads: ${....num_threads}
solver_type: ${....solver_type}
use_gpu: ${contains:"cuda",${....sim_device}} # set to False to run on CPU
num_position_iterations: 4
num_velocity_iterations: 0
contact_offset: 0.02
rest_offset: 0.0
bounce_threshold_velocity: 0.2
max_depenetration_velocity: 10.0
default_buffer_size_multiplier: 5.0
max_gpu_contact_pairs: 8388608 # 8*1024*1024
num_subscenes: ${....num_subscenes}
contact_collection: 0 # 0: CC_NEVER (don't collect contact info), 1: CC_LAST_SUBSTEP (collect only contacts on last substep), 2: CC_ALL_SUBSTEPS (broken - do not use!)
task:
randomize: False
randomization_params:
# specify which attributes to randomize for each actor type and property
frequency: 600 # Define how many environment steps between generating new randomizations
observations:
range: [0, .002] # range for the white noise
operation: "additive"
distribution: "gaussian"
actions:
range: [0., .02]
operation: "additive"
distribution: "gaussian"
sim_params:
gravity:
range: [0, 0.4]
operation: "additive"
distribution: "gaussian"
schedule: "linear" # "linear" will linearly interpolate between no rand and max rand
schedule_steps: 3000
actor_params:
humanoid:
color: True
rigid_body_properties:
mass:
range: [0.5, 1.5]
operation: "scaling"
distribution: "uniform"
setup_only: True # Property will only be randomized once before simulation is started. See Domain Randomization Documentation for more info.
schedule: "linear" # "linear" will linearly interpolate between no rand and max rand
schedule_steps: 3000
rigid_shape_properties:
friction:
num_buckets: 500
range: [0.7, 1.3]
operation: "scaling"
distribution: "uniform"
schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps`
schedule_steps: 3000
restitution:
range: [0., 0.7]
operation: "scaling"
distribution: "uniform"
schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps`
schedule_steps: 3000
dof_properties:
damping:
range: [0.5, 1.5]
operation: "scaling"
distribution: "uniform"
schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps`
schedule_steps: 3000
stiffness:
range: [0.5, 1.5]
operation: "scaling"
distribution: "uniform"
schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps`
schedule_steps: 3000
lower:
range: [0, 0.01]
operation: "additive"
distribution: "gaussian"
schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps`
schedule_steps: 3000
upper:
range: [0, 0.01]
operation: "additive"
distribution: "gaussian"
schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps`
schedule_steps: 3000
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/task/FactoryEnvGears.yaml
|
# See schema in factory_schema_config_env.py for descriptions of common parameters.
defaults:
- FactoryBase
- _self_
- /factory_schema_config_env
sim:
disable_franka_collisions: False
env:
env_name: 'FactoryEnvGears'
tight_or_loose: loose # use assets with loose (maximal clearance) or tight (minimal clearance) shafts
gears_lateral_offset: 0.1 # Y-axis offset of gears before initial reset to prevent initial interpenetration with base plate
gears_density: 1000.0 # density of gears
base_density: 2700.0 # density of base plate
gears_friction: 0.3 # coefficient of friction associated with gears
base_friction: 0.3 # coefficient of friction associated with base plate
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/task/AllegroHand.yaml
|
# used to create the object
name: AllegroHand
physics_engine: ${..physics_engine}
# if given, will override the device setting in gym.
env:
numEnvs: ${resolve_default:16384,${...num_envs}}
envSpacing: 0.75
episodeLength: 600
enableDebugVis: False
aggregateMode: 1
clipObservations: 5.0
clipActions: 1.0
stiffnessScale: 1.0
forceLimitScale: 1.0
useRelativeControl: False
dofSpeedScale: 20.0
actionsMovingAverage: 1.0
controlFrequencyInv: 2 # 30 Hz
startPositionNoise: 0.01
startRotationNoise: 0.0
resetPositionNoise: 0.01
resetRotationNoise: 0.0
resetDofPosRandomInterval: 0.2
resetDofVelRandomInterval: 0.0
startObjectPoseDY: -0.19
startObjectPoseDZ: 0.06
# Random forces applied to the object
forceScale: 0.0
forceProbRange: [0.001, 0.1]
forceDecay: 0.99
forceDecayInterval: 0.08
# reward -> dictionary
distRewardScale: -10.0
rotRewardScale: 1.0
rotEps: 0.1
actionPenaltyScale: -0.0002
reachGoalBonus: 250
fallDistance: 0.24
fallPenalty: 0.0
objectType: "block" # can be block, egg or pen
observationType: "full_state" # can be "no_vel", "full_state"
asymmetric_observations: False
successTolerance: 0.1
printNumSuccesses: False
maxConsecutiveSuccesses: 0
asset:
assetFileName: "urdf/kuka_allegro_description/allegro_touch_sensor.urdf"
assetFileNameBlock: "urdf/objects/cube_multicolor_allegro.urdf"
assetFileNameEgg: "mjcf/open_ai_assets/hand/egg.xml"
assetFileNamePen: "mjcf/open_ai_assets/hand/pen.xml"
# set to True if you use camera sensors in the environment
enableCameraSensors: False
task:
randomize: False
randomization_params:
frequency: 720 # Define how many simulation steps between generating new randomizations
observations:
range: [0, .002] # range for the white noise
range_correlated: [0, .001 ] # range for correlated noise, refreshed with freq `frequency`
operation: "additive"
distribution: "gaussian"
# schedule: "linear" # "constant" is to turn on noise after `schedule_steps` num steps
# schedule_steps: 40000
actions:
range: [0., .05]
range_correlated: [0, .015] # range for correlated noise, refreshed with freq `frequency`
operation: "additive"
distribution: "gaussian"
# schedule: "linear" # "linear" will linearly interpolate between no rand and max rand
# schedule_steps: 40000
sim_params:
gravity:
range: [0, 0.4]
operation: "additive"
distribution: "gaussian"
# schedule: "linear" # "linear" will linearly interpolate between no rand and max rand
# schedule_steps: 40000
actor_params:
hand:
color: True
dof_properties:
damping:
range: [0.3, 3.0]
operation: "scaling"
distribution: "loguniform"
# schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps`
# schedule_steps: 30000
stiffness:
range: [0.75, 1.5]
operation: "scaling"
distribution: "loguniform"
# schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps`
# schedule_steps: 30000
lower:
range: [0, 0.01]
operation: "additive"
distribution: "gaussian"
# schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps`
# schedule_steps: 30000
upper:
range: [0, 0.01]
operation: "additive"
distribution: "gaussian"
# schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps`
# schedule_steps: 30000
rigid_body_properties:
mass:
range: [0.5, 1.5]
operation: "scaling"
distribution: "uniform"
setup_only: True # Property will only be randomized once before simulation is started. See Domain Randomization Documentation for more info.
# schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps`
# schedule_steps: 30000
rigid_shape_properties:
friction:
num_buckets: 250
range: [0.7, 1.3]
operation: "scaling"
distribution: "uniform"
# schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps`
# schedule_steps: 30000
object:
scale:
range: [0.95, 1.05]
operation: "scaling"
distribution: "uniform"
setup_only: True # Property will only be randomized once before simulation is started. See Domain Randomization Documentation for more info.
# schedule: "linear" # "linear" will scale the current random sample by ``min(current num steps, schedule_steps) / schedule_steps`
# schedule_steps: 30000
rigid_body_properties:
mass:
range: [0.5, 1.5]
operation: "scaling"
distribution: "uniform"
setup_only: True # Property will only be randomized once before simulation is started. See Domain Randomization Documentation for more info.
# schedule: "linear" # "linear" will scale the current random sample by ``min(current num steps, schedule_steps) / schedule_steps`
# schedule_steps: 30000
rigid_shape_properties:
friction:
num_buckets: 250
range: [0.7, 1.3]
operation: "scaling"
distribution: "uniform"
# schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps`
# schedule_steps: 30000
sim:
dt: 0.01667 # 1/60
substeps: 2
up_axis: "z"
use_gpu_pipeline: ${eq:${...pipeline},"gpu"}
gravity: [0.0, 0.0, -9.81]
physx:
num_threads: ${....num_threads}
solver_type: ${....solver_type}
use_gpu: ${contains:"cuda",${....sim_device}} # set to False to run on CPU
num_position_iterations: 8
num_velocity_iterations: 0
max_gpu_contact_pairs: 8388608 # 8*1024*1024
num_subscenes: ${....num_subscenes}
contact_offset: 0.002
rest_offset: 0.0
bounce_threshold_velocity: 0.2
max_depenetration_velocity: 1000.0
default_buffer_size_multiplier: 5.0
contact_collection: 0 # 0: CC_NEVER (don't collect contact info), 1: CC_LAST_SUBSTEP (collect only contacts on last substep), 2: CC_ALL_SUBSTEPS (broken - do not use!)
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/task/FactoryTaskNutBoltPick.yaml
|
# See schema in factory_schema_config_task.py for descriptions of common parameters.
defaults:
- FactoryBase
- _self_
# - /factory_schema_config_task
name: FactoryTaskNutBoltPick
physics_engine: ${..physics_engine}
sim:
disable_gravity: False
env:
numEnvs: ${resolve_default:128,${...num_envs}}
numObservations: 20
numActions: 12
close_and_lift: True # close gripper and lift after last step of episode
num_gripper_move_sim_steps: 20 # number of timesteps to reserve for moving gripper before first step of episode
num_gripper_close_sim_steps: 25 # number of timesteps to reserve for closing gripper after last step of episode
num_gripper_lift_sim_steps: 25 # number of timesteps to reserve for lift after last step of episode
randomize:
franka_arm_initial_dof_pos: [0.3413, -0.8011, -0.0670, -1.8299, 0.0266, 1.0185, 1.0927]
fingertip_midpoint_pos_initial: [0.0, -0.2, 0.2] # initial position of hand above table
fingertip_midpoint_pos_noise: [0.2, 0.2, 0.1] # noise on hand position
fingertip_midpoint_rot_initial: [3.1416, 0, 3.1416] # initial rotation of fingertips (Euler)
fingertip_midpoint_rot_noise: [0.3, 0.3, 1] # noise on rotation
nut_pos_xy_initial: [0.0, -0.3] # initial XY position of nut on table
nut_pos_xy_initial_noise: [0.1, 0.1] # noise on nut position
bolt_pos_xy_initial: [0.0, 0.0] # initial position of bolt on table
bolt_pos_xy_noise: [0.1, 0.1] # noise on bolt position
rl:
pos_action_scale: [0.1, 0.1, 0.1]
rot_action_scale: [0.1, 0.1, 0.1]
force_action_scale: [1.0, 1.0, 1.0]
torque_action_scale: [1.0, 1.0, 1.0]
clamp_rot: True
clamp_rot_thresh: 1.0e-6
num_keypoints: 4 # number of keypoints used in reward
keypoint_scale: 0.5 # length of line of keypoints
keypoint_reward_scale: 1.0 # scale on keypoint-based reward
action_penalty_scale: 0.0 # scale on action penalty
max_episode_length: 100
success_bonus: 0.0 # bonus if nut has been lifted
ctrl:
ctrl_type: joint_space_id # {gym_default,
# joint_space_ik, joint_space_id,
# task_space_impedance, operational_space_motion,
# open_loop_force, closed_loop_force,
# hybrid_force_motion}
all:
jacobian_type: geometric
gripper_prop_gains: [50, 50]
gripper_deriv_gains: [2, 2]
gym_default:
ik_method: dls
joint_prop_gains: [40, 40, 40, 40, 40, 40, 40]
joint_deriv_gains: [8, 8, 8, 8, 8, 8, 8]
gripper_prop_gains: [500, 500]
gripper_deriv_gains: [20, 20]
joint_space_ik:
ik_method: dls
joint_prop_gains: [1, 1, 1, 1, 1, 1, 1]
joint_deriv_gains: [0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]
joint_space_id:
ik_method: dls
joint_prop_gains: [40, 40, 40, 40, 40, 40, 40]
joint_deriv_gains: [8, 8, 8, 8, 8, 8, 8]
task_space_impedance:
motion_ctrl_axes: [1, 1, 1, 1, 1, 1]
task_prop_gains: [40, 40, 40, 40, 40, 40]
task_deriv_gains: [8, 8, 8, 8, 8, 8]
operational_space_motion:
motion_ctrl_axes: [1, 1, 1, 1, 1, 1]
task_prop_gains: [1, 1, 1, 1, 1, 1]
task_deriv_gains: [1, 1, 1, 1, 1, 1]
open_loop_force:
force_ctrl_axes: [0, 0, 1, 0, 0, 0]
closed_loop_force:
force_ctrl_axes: [0, 0, 1, 0, 0, 0]
wrench_prop_gains: [0.1, 0.1, 0.1, 0.1, 0.1, 0.1]
hybrid_force_motion:
motion_ctrl_axes: [1, 1, 0, 1, 1, 1]
task_prop_gains: [40, 40, 40, 40, 40, 40]
task_deriv_gains: [8, 8, 8, 8, 8, 8]
force_ctrl_axes: [0, 0, 1, 0, 0, 0]
wrench_prop_gains: [0.1, 0.1, 0.1, 0.1, 0.1, 0.1]
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/task/HumanoidSAC.yaml
|
# used to create the object
defaults:
- Humanoid
- _self_
# if given, will override the device setting in gym.
env:
numEnvs: ${resolve_default:64,${...num_envs}}
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/task/FactoryTaskInsertion.yaml
|
# See schema in factory_schema_config_task.py for descriptions of common parameters.
defaults:
- FactoryBase
- _self_
# - /factory_schema_config_task
name: FactoryTaskInsertion
physics_engine: ${..physics_engine}
env:
numEnvs: ${resolve_default:128,${...num_envs}}
numObservations: 32
numActions: 12
randomize:
joint_noise: 0.0 # noise on Franka DOF positions [deg]
initial_state: random # initialize plugs in random state or goal state {random, goal}
plug_bias_y: -0.1 # if random, Y-axis offset of plug during each reset to prevent initial interpenetration with socket
plug_bias_z: 0.0 # if random, Z-axis offset of plug during each reset to prevent initial interpenetration with ground plane
plug_noise_xy: 0.05 # if random, XY-axis noise on plug position during each reset
rl:
max_episode_length: 1024
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/task/FactoryEnvInsertion.yaml
|
# See schema in factory_schema_config_env.py for descriptions of common parameters.
defaults:
- FactoryBase
- _self_
- /factory_schema_config_env
sim:
disable_franka_collisions: False
env:
env_name: 'FactoryEnvInsertion'
desired_subassemblies: ['round_peg_hole_4mm_loose',
'round_peg_hole_8mm_loose',
'round_peg_hole_12mm_loose',
'round_peg_hole_16mm_loose',
'rectangular_peg_hole_4mm_loose',
'rectangular_peg_hole_8mm_loose',
'rectangular_peg_hole_12mm_loose',
'rectangular_peg_hole_16mm_loose']
plug_lateral_offset: 0.1 # Y-axis offset of plug before initial reset to prevent initial interpenetration with socket
# Subassembly options:
# {round_peg_hole_4mm_tight, round_peg_hole_4mm_loose,
# round_peg_hole_8mm_tight, round_peg_hole_8mm_loose,
# round_peg_hole_12mm_tight, round_peg_hole_12mm_loose,
# round_peg_hole_16mm_tight, round_peg_hole_16mm_loose,
# rectangular_peg_hole_4mm_tight, rectangular_peg_hole_4mm_loose,
# rectangular_peg_hole_8mm_tight, rectangular_peg_hole_8mm_loose,
# rectangular_peg_hole_12mm_tight, rectangular_peg_hole_12mm_loose,
# rectangular_peg_hole_16mm_tight, rectangular_peg_hole_16mm_loose,
# bnc, dsub, usb}
#
# NOTE: BNC, D-sub, and USB are currently unavailable while we await approval from manufacturers.
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/task/Ingenuity.yaml
|
# used to create the object
name: Ingenuity
physics_engine: ${..physics_engine}
# if given, will override the device setting in gym.
env:
numEnvs: ${resolve_default:4096,${...num_envs}}
envSpacing: 2.5
maxEpisodeLength: 2000
enableDebugVis: False
clipObservations: 5.0
clipActions: 1.0
# set to True if you use camera sensors in the environment
enableCameraSensors: False
sim:
dt: 0.01
substeps: 2
up_axis: "z"
use_gpu_pipeline: ${eq:${...pipeline},"gpu"}
gravity: [0.0, 0.0, -9.81]
physx:
num_threads: ${....num_threads}
solver_type: ${....solver_type}
use_gpu: ${contains:"cuda",${....sim_device}} # set to False to run on CPU
num_position_iterations: 6
num_velocity_iterations: 0
contact_offset: 0.02
rest_offset: 0.001
bounce_threshold_velocity: 0.2
max_depenetration_velocity: 1000.0
default_buffer_size_multiplier: 5.0
max_gpu_contact_pairs: 1048576 # 1024*1024
num_subscenes: ${....num_subscenes}
contact_collection: 0 # 0: CC_NEVER (don't collect contact info), 1: CC_LAST_SUBSTEP (collect only contacts on last substep), 2: CC_ALL_SUBSTEPS (broken - do not use!)
task:
randomize: False
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/task/Quadcopter.yaml
|
# used to create the object
name: Quadcopter
physics_engine: ${..physics_engine}
# if given, will override the device setting in gym.
env:
numEnvs: ${resolve_default:8192,${...num_envs}}
envSpacing: 1.25
maxEpisodeLength: 500
enableDebugVis: False
clipObservations: 5.0
clipActions: 1.0
# set to True if you use camera sensors in the environment
enableCameraSensors: False
sim:
dt: 0.01
substeps: 2
up_axis: "z"
use_gpu_pipeline: ${eq:${...pipeline},"gpu"}
gravity: [0.0, 0.0, -9.81]
physx:
num_threads: ${....num_threads}
solver_type: ${....solver_type}
use_gpu: ${contains:"cuda",${....sim_device}} # set to False to run on CPU
num_position_iterations: 4
num_velocity_iterations: 0
contact_offset: 0.02
rest_offset: 0.001
bounce_threshold_velocity: 0.2
max_depenetration_velocity: 1000.0
default_buffer_size_multiplier: 5.0
max_gpu_contact_pairs: 1048576 # 1024*1024
num_subscenes: ${....num_subscenes}
contact_collection: 0 # 0: CC_NEVER (don't collect contact info), 1: CC_LAST_SUBSTEP (collect only contacts on last substep), 2: CC_ALL_SUBSTEPS (broken - do not use!)
task:
randomize: False
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/task/Trifinger.yaml
|
name: Trifinger
physics_engine: ${..physics_engine}
env:
aggregate_mode: True
control_decimation: 1
envSpacing: 1.0
numEnvs: ${resolve_default:16384,${...num_envs}}
episodeLength: 750
clipObservations: 5.0
clipActions: 1.0
task_difficulty: 4
enable_ft_sensors: false
asymmetric_obs: true
normalize_obs: true
apply_safety_damping: true
command_mode: torque
normalize_action: true
cube_obs_keypoints: true
reset_distribution:
object_initial_state:
type: random
robot_initial_state:
dof_pos_stddev: 0.4
dof_vel_stddev: 0.2
type: default
reward_terms:
finger_move_penalty:
activate: true
weight: -0.5
finger_reach_object_rate:
activate: true
norm_p: 2
weight: -250
object_dist:
activate: false
weight: 2000
object_rot:
activate: false
weight: 2000
keypoints_dist:
activate: true
weight: 2000
termination_conditions:
success:
orientation_tolerance: 0.4
position_tolerance: 0.02
# set to True if you use camera sensors in the environment
enableCameraSensors: False
sim:
dt: 0.02
substeps: 4
up_axis: z
use_gpu_pipeline: ${eq:${...pipeline},"gpu"}
gravity:
- 0.0
- 0.0
- -9.81
physx:
num_threads: ${....num_threads}
solver_type: ${....solver_type}
use_gpu: ${contains:"cuda",${....sim_device}} # set to False to run on CPU
num_position_iterations: 8
num_velocity_iterations: 0
contact_offset: 0.002
rest_offset: 0.0
bounce_threshold_velocity: 0.5
max_depenetration_velocity: 1000.0
default_buffer_size_multiplier: 5.0
max_gpu_contact_pairs: 8388608 # 8*1024*1024
num_subscenes: ${....num_subscenes}
contact_collection: 0 # 0: CC_NEVER (don't collect contact info), 1: CC_LAST_SUBSTEP (collect only contacts on last substep), 2: CC_ALL_SUBSTEPS (broken - do not use!)
task:
randomize: True
randomization_params:
frequency: 750 # Define how many simulation steps between generating new randomizations
observations:
range: [0, .002] # range for the white noise
range_correlated: [0, .000 ] # range for correlated noise, refreshed with freq `frequency`
operation: "additive"
distribution: "gaussian"
# schedule: "linear" # "constant" is to turn on noise after `schedule_steps` num steps
# schedule_steps: 40000
actions:
range: [0., .02]
range_correlated: [0, .01] # range for correlated noise, refreshed with freq `frequency`
operation: "additive"
distribution: "gaussian"
# schedule: "linear" # "linear" will linearly interpolate between no rand and max rand
# schedule_steps: 40000
sim_params:
gravity:
range: [0, 0.4]
operation: "additive"
distribution: "gaussian"
# schedule: "linear" # "linear" will linearly interpolate between no rand and max rand
# schedule_steps: 40000
actor_params:
robot:
color: True
dof_properties:
lower:
range: [0, 0.01]
operation: "additive"
distribution: "gaussian"
# schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps`
# schedule_steps: 30000
upper:
range: [0, 0.01]
operation: "additive"
distribution: "gaussian"
# schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps`
# schedule_steps: 30000
object:
scale:
range: [0.97, 1.03]
operation: "scaling"
distribution: "uniform"
setup_only: True # Property will only be randomized once before simulation is started. See Domain Randomization Documentation for more info.
# schedule: "linear" # "linear" will scale the current random sample by ``min(current num steps, schedule_steps) / schedule_steps`
# schedule_steps: 30000
rigid_body_properties:
mass:
range: [0.7, 1.3]
operation: "scaling"
distribution: "uniform"
setup_only: True # Property will only be randomized once before simulation is started. See Domain Randomization Documentation for more info.
# schedule: "linear" # "linear" will scale the current random sample by ``min(current num steps, schedule_steps) / schedule_steps`
# schedule_steps: 30000
rigid_shape_properties:
friction:
num_buckets: 250
range: [0.7, 1.3]
operation: "scaling"
distribution: "uniform"
# schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps`
# schedule_steps: 30000
table:
rigid_shape_properties:
friction:
num_buckets: 250
range: [0.5, 1.5]
operation: "scaling"
distribution: "uniform"
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/task/FactoryTaskNutBoltPlace.yaml
|
# See schema in factory_schema_config_task.py for descriptions of common parameters.
defaults:
- FactoryBase
- _self_
# - /factory_schema_config_task
name: FactoryTaskNutBoltPlace
physics_engine: ${..physics_engine}
sim:
disable_gravity: True
env:
numEnvs: ${resolve_default:128,${...num_envs}}
numObservations: 27
numActions: 12
num_gripper_move_sim_steps: 40 # number of timesteps to reserve for moving gripper before first step of episode
num_gripper_close_sim_steps: 50 # number of timesteps to reserve for closing gripper onto nut during each reset
randomize:
franka_arm_initial_dof_pos: [0.00871, -0.10368, -0.00794, -1.49139, -0.00083, 1.38774, 0.7861]
fingertip_midpoint_pos_initial: [0.0, 0.0, 0.2] # initial position of midpoint between fingertips above table
fingertip_midpoint_pos_noise: [0.2, 0.2, 0.1] # noise on fingertip pos
fingertip_midpoint_rot_initial: [3.1416, 0, 3.1416] # initial rotation of fingertips (Euler)
fingertip_midpoint_rot_noise: [0.3, 0.3, 1] # noise on rotation
nut_noise_pos_in_gripper: [0.0, 0.0, 0.01] # noise on nut position within gripper
nut_noise_rot_in_gripper: 0.0 # noise on nut rotation within gripper
bolt_pos_xy_initial: [0.0, 0.0] # initial XY position of nut on table
bolt_pos_xy_noise: [0.1, 0.1] # noise on nut position
rl:
pos_action_scale: [0.1, 0.1, 0.1]
rot_action_scale: [0.1, 0.1, 0.1]
force_action_scale: [1.0, 1.0, 1.0]
torque_action_scale: [1.0, 1.0, 1.0]
clamp_rot: True
clamp_rot_thresh: 1.0e-6
add_obs_bolt_tip_pos: False # add observation of bolt tip position
num_keypoints: 4 # number of keypoints used in reward
keypoint_scale: 0.5 # length of line of keypoints
keypoint_reward_scale: 1.0 # scale on keypoint-based reward
action_penalty_scale: 0.0 # scale on action penalty
max_episode_length: 200
close_error_thresh: 0.1 # threshold below which nut is considered close enough to bolt
success_bonus: 0.0 # bonus if nut is close enough to bolt
ctrl:
ctrl_type: joint_space_id # {gym_default,
# joint_space_ik, joint_space_id,
# task_space_impedance, operational_space_motion,
# open_loop_force, closed_loop_force,
# hybrid_force_motion}
all:
jacobian_type: geometric
gripper_prop_gains: [100, 100]
gripper_deriv_gains: [2, 2]
gym_default:
ik_method: dls
joint_prop_gains: [40, 40, 40, 40, 40, 40, 40]
joint_deriv_gains: [8, 8, 8, 8, 8, 8, 8]
gripper_prop_gains: [500, 500]
gripper_deriv_gains: [20, 20]
joint_space_ik:
ik_method: dls
joint_prop_gains: [1, 1, 1, 1, 1, 1, 1]
joint_deriv_gains: [0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]
joint_space_id:
ik_method: dls
joint_prop_gains: [40, 40, 40, 40, 40, 40, 40]
joint_deriv_gains: [8, 8, 8, 8, 8, 8, 8]
task_space_impedance:
motion_ctrl_axes: [1, 1, 1, 1, 1, 1]
task_prop_gains: [40, 40, 40, 40, 40, 40]
task_deriv_gains: [8, 8, 8, 8, 8, 8]
operational_space_motion:
motion_ctrl_axes: [1, 1, 1, 1, 1, 1]
task_prop_gains: [1, 1, 1, 1, 1, 1]
task_deriv_gains: [1, 1, 1, 1, 1, 1]
open_loop_force:
force_ctrl_axes: [0, 0, 1, 0, 0, 0]
closed_loop_force:
force_ctrl_axes: [0, 0, 1, 0, 0, 0]
wrench_prop_gains: [0.1, 0.1, 0.1, 0.1, 0.1, 0.1]
hybrid_force_motion:
motion_ctrl_axes: [1, 1, 0, 1, 1, 1]
task_prop_gains: [40, 40, 40, 40, 40, 40]
task_deriv_gains: [8, 8, 8, 8, 8, 8]
force_ctrl_axes: [0, 0, 1, 0, 0, 0]
wrench_prop_gains: [0.1, 0.1, 0.1, 0.1, 0.1, 0.1]
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/task/HumanoidAMPHands.yaml
|
# used to create the object
name: HumanoidAMP
physics_engine: ${..physics_engine}
# if given, will override the device setting in gym.
env:
numEnvs: ${resolve_default:4096,${...num_envs}}
envSpacing: 5
episodeLength: 300
cameraFollow: True # if the camera follows humanoid or not
enableDebugVis: False
pdControl: True
powerScale: 1.0
controlFrequencyInv: 2 # 30 Hz
stateInit: "Random"
hybridInitProb: 0.5
numAMPObsSteps: 2
localRootObs: False
contactBodies: ["right_foot", "left_foot", "right_hand", "left_hand"]
terminationHeight: 0.5
enableEarlyTermination: True
# animation files to learn from
motion_file: "amp_humanoid_cartwheel.npy"
asset:
assetFileName: "mjcf/amp_humanoid.xml"
plane:
staticFriction: 1.0
dynamicFriction: 1.0
restitution: 0.0
sim:
dt: 0.0166 # 1/60 s
substeps: 2
up_axis: "z"
use_gpu_pipeline: ${eq:${...pipeline},"gpu"}
gravity: [0.0, 0.0, -9.81]
physx:
num_threads: ${....num_threads}
solver_type: ${....solver_type}
use_gpu: ${contains:"cuda",${....sim_device}} # set to False to run on CPU
num_position_iterations: 4
num_velocity_iterations: 0
contact_offset: 0.02
rest_offset: 0.0
bounce_threshold_velocity: 0.2
max_depenetration_velocity: 10.0
default_buffer_size_multiplier: 5.0
max_gpu_contact_pairs: 8388608 # 8*1024*1024
num_subscenes: ${....num_subscenes}
contact_collection: 1 # 0: CC_NEVER (don't collect contact info), 1: CC_LAST_SUBSTEP (collect only contacts on last substep), 2: CC_ALL_SUBSTEPS (broken - do not use!)
task:
randomize: False
randomization_params:
# specify which attributes to randomize for each actor type and property
frequency: 600 # Define how many environment steps between generating new randomizations
observations:
range: [0, .002] # range for the white noise
operation: "additive"
distribution: "gaussian"
actions:
range: [0., .02]
operation: "additive"
distribution: "gaussian"
sim_params:
gravity:
range: [0, 0.4]
operation: "additive"
distribution: "gaussian"
schedule: "linear" # "linear" will linearly interpolate between no rand and max rand
schedule_steps: 3000
actor_params:
humanoid:
color: True
rigid_body_properties:
mass:
range: [0.5, 1.5]
operation: "scaling"
distribution: "uniform"
setup_only: True # Property will only be randomized once before simulation is started. See Domain Randomization Documentation for more info.
schedule: "linear" # "linear" will linearly interpolate between no rand and max rand
schedule_steps: 3000
rigid_shape_properties:
friction:
num_buckets: 500
range: [0.7, 1.3]
operation: "scaling"
distribution: "uniform"
schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps`
schedule_steps: 3000
restitution:
range: [0., 0.7]
operation: "scaling"
distribution: "uniform"
schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps`
schedule_steps: 3000
dof_properties:
damping:
range: [0.5, 1.5]
operation: "scaling"
distribution: "uniform"
schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps`
schedule_steps: 3000
stiffness:
range: [0.5, 1.5]
operation: "scaling"
distribution: "uniform"
schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps`
schedule_steps: 3000
lower:
range: [0, 0.01]
operation: "additive"
distribution: "gaussian"
schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps`
schedule_steps: 3000
upper:
range: [0, 0.01]
operation: "additive"
distribution: "gaussian"
schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps`
schedule_steps: 3000
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/task/FactoryTaskGears.yaml
|
# See schema in factory_schema_config_task.py for descriptions of common parameters.
defaults:
- FactoryBase
- _self_
# - /factory_schema_config_task
name: FactoryTaskGears
physics_engine: ${..physics_engine}
env:
numEnvs: ${resolve_default:128,${...num_envs}}
numObservations: 32
numActions: 12
randomize:
joint_noise: 0.0 # noise on Franka DOF positions [deg]
initial_state: random # initialize gears in random state or goal state {random, goal}
gears_bias_y: -0.1 # if random, Y-axis offset of gears during each reset to prevent initial interpenetration with base plate
gears_bias_z: 0.0 # if random, Z-axis offset of gears during each reset to prevent initial interpenetration with ground plane
gears_noise_xy: 0.05 # if random, XY-axis noise on gears during each reset
rl:
max_episode_length: 1024
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/task/IndustRealEnvPegs.yaml
|
# See schema in factory_schema_config_env.py for descriptions of common parameters.
defaults:
- IndustRealBase
- _self_
- /factory_schema_config_env
env:
env_name: 'IndustRealEnvPegs'
desired_subassemblies: ['round_peg_hole_8mm',
'round_peg_hole_12mm',
'round_peg_hole_16mm',
'rectangular_peg_hole_8mm',
'rectangular_peg_hole_12mm',
'rectangular_peg_hole_16mm']
plug_lateral_offset: 0.1 # Y-axis offset of plug before initial reset to prevent initial interpenetration with socket
# Density and friction values are specified in industreal_asset_info_pegs.yaml
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/task/ShadowHand.yaml
|
# used to create the object
name: ShadowHand
physics_engine: ${..physics_engine}
# if given, will override the device setting in gym.
env:
numEnvs: ${resolve_default:16384,${...num_envs}}
envSpacing: 0.75
episodeLength: 600
enableDebugVis: False
aggregateMode: 1
clipObservations: 5.0
clipActions: 1.0
stiffnessScale: 1.0
forceLimitScale: 1.0
useRelativeControl: False
dofSpeedScale: 20.0
actionsMovingAverage: 1.0
controlFrequencyInv: 1 # 60 Hz
startPositionNoise: 0.01
startRotationNoise: 0.0
resetPositionNoise: 0.01
resetRotationNoise: 0.0
resetDofPosRandomInterval: 0.2
resetDofVelRandomInterval: 0.0
# Random forces applied to the object
forceScale: 0.0
forceProbRange: [0.001, 0.1]
forceDecay: 0.99
forceDecayInterval: 0.08
# reward -> dictionary
distRewardScale: -10.0
rotRewardScale: 1.0
rotEps: 0.1
actionPenaltyScale: -0.0002
reachGoalBonus: 250
fallDistance: 0.24
fallPenalty: 0.0
objectType: "block" # can be block, egg or pen
observationType: "full_state" # can be "openai", "full_no_vel", "full", "full_state"
asymmetric_observations: False
successTolerance: 0.1
printNumSuccesses: False
maxConsecutiveSuccesses: 0
asset:
assetFileName: "mjcf/open_ai_assets/hand/shadow_hand.xml"
assetFileNameBlock: "urdf/objects/cube_multicolor.urdf"
assetFileNameEgg: "mjcf/open_ai_assets/hand/egg.xml"
assetFileNamePen: "mjcf/open_ai_assets/hand/pen.xml"
# set to True if you use camera sensors in the environment
enableCameraSensors: False
task:
randomize: False
randomization_params:
frequency: 720 # Define how many simulation steps between generating new randomizations
observations:
range: [0, .002] # range for the white noise
range_correlated: [0, .001] # range for correlated noise, refreshed with freq `frequency`
operation: "additive"
distribution: "gaussian"
# schedule: "linear" # "constant" is to turn on noise after `schedule_steps` num steps
# schedule_steps: 40000
actions:
range: [0., .05]
range_correlated: [0, .015] # range for correlated noise, refreshed with freq `frequency`
operation: "additive"
distribution: "gaussian"
# schedule: "linear" # "linear" will linearly interpolate between no rand and max rand
# schedule_steps: 40000
sim_params:
gravity:
range: [0, 0.4]
operation: "additive"
distribution: "gaussian"
# schedule: "linear" # "linear" will linearly interpolate between no rand and max rand
# schedule_steps: 40000
actor_params:
hand:
color: True
tendon_properties:
damping:
range: [0.3, 3.0]
operation: "scaling"
distribution: "loguniform"
# schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps`
# schedule_steps: 30000
stiffness:
range: [0.75, 1.5]
operation: "scaling"
distribution: "loguniform"
# schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps`
# schedule_steps: 30000
dof_properties:
damping:
range: [0.3, 3.0]
operation: "scaling"
distribution: "loguniform"
# schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps`
# schedule_steps: 30000
stiffness:
range: [0.75, 1.5]
operation: "scaling"
distribution: "loguniform"
# schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps`
# schedule_steps: 30000
lower:
range: [0, 0.01]
operation: "additive"
distribution: "gaussian"
# schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps`
# schedule_steps: 30000
upper:
range: [0, 0.01]
operation: "additive"
distribution: "gaussian"
# schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps`
# schedule_steps: 30000
rigid_body_properties:
mass:
range: [0.5, 1.5]
operation: "scaling"
distribution: "uniform"
setup_only: True # Property will only be randomized once before simulation is started. See Domain Randomization Documentation for more info.
# schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps`
# schedule_steps: 30000
rigid_shape_properties:
friction:
num_buckets: 250
range: [0.7, 1.3]
operation: "scaling"
distribution: "uniform"
# schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps`
# schedule_steps: 30000
object:
scale:
range: [0.95, 1.05]
operation: "scaling"
distribution: "uniform"
setup_only: True # Property will only be randomized once before simulation is started. See Domain Randomization Documentation for more info.
# schedule: "linear" # "linear" will scale the current random sample by ``min(current num steps, schedule_steps) / schedule_steps`
# schedule_steps: 30000
rigid_body_properties:
mass:
range: [0.5, 1.5]
operation: "scaling"
distribution: "uniform"
setup_only: True # Property will only be randomized once before simulation is started. See Domain Randomization Documentation for more info.
# schedule: "linear" # "linear" will scale the current random sample by ``min(current num steps, schedule_steps) / schedule_steps`
# schedule_steps: 30000
rigid_shape_properties:
friction:
num_buckets: 250
range: [0.7, 1.3]
operation: "scaling"
distribution: "uniform"
# schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps`
# schedule_steps: 30000
sim:
dt: 0.01667 # 1/60
substeps: 2
up_axis: "z"
use_gpu_pipeline: ${eq:${...pipeline},"gpu"}
gravity: [0.0, 0.0, -9.81]
physx:
num_threads: ${....num_threads}
solver_type: ${....solver_type}
use_gpu: ${contains:"cuda",${....sim_device}} # set to False to run on CPU
num_position_iterations: 8
num_velocity_iterations: 0
max_gpu_contact_pairs: 8388608 # 8*1024*1024
num_subscenes: ${....num_subscenes}
contact_offset: 0.002
rest_offset: 0.0
bounce_threshold_velocity: 0.2
max_depenetration_velocity: 1000.0
default_buffer_size_multiplier: 5.0
contact_collection: 0 # 0: CC_NEVER (don't collect contact info), 1: CC_LAST_SUBSTEP (collect only contacts on last substep), 2: CC_ALL_SUBSTEPS (broken - do not use!)
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/task/IndustRealTaskGearsInsert.yaml
|
# See schema in factory_schema_config_task.py for descriptions of common parameters.
defaults:
- IndustRealBase
- _self_
# - /factory_schema_config_task
name: IndustRealTaskGearsInsert
physics_engine: ${..physics_engine}
env:
numEnvs: 128
numObservations: 24
numStates: 47
numActions: 6
gear_medium_pos_offset: [-0.05, -0.02, 0.03]
num_gripper_move_sim_steps: 120 # number of timesteps to reserve for moving gripper before first step of episode
num_gripper_close_sim_steps: 60 # number of timesteps to reserve for closing gripper onto gear during each reset
base_pos_obs_noise: [0.001, 0.001, 0.0]
base_rot_obs_noise: [0.0, 0.0, 0.0]
randomize:
franka_arm_initial_dof_pos: [-1.7574766278484677, 0.8403247702305783, 2.015877580177467, -2.0924931236718334, -0.7379389376686856, 1.6256438760537268, 1.2689337870766628]
fingertip_centered_pos_initial: [0.0, 0.0, 0.2] # initial position of midpoint between fingertips above table
fingertip_centered_pos_noise: [0.0, 0.0, 0.0] # noise on fingertip pos
fingertip_centered_rot_initial: [3.141593, 0.0, 0.0] # initial rotation of fingertips (Euler)
fingertip_centered_rot_noise: [0.0, 0.0, 0.0] # noise on fingertip rotation
base_pos_xy_initial: [0.5, 0.0, 1.0781] # initial position of gear base on table
base_pos_xy_noise: [0.1, 0.1, 0.0381] # noise on gear base position
base_pos_z_noise_bounds: [0.0, 0.05] # noise on gear base offset from table
gear_pos_xyz_noise: [0.01, 0.01, 0.0] # noise on gear position
gear_rot_noise: 0.0872665 # noise on gear rotation
rl:
pos_action_scale: [0.01, 0.01, 0.01]
rot_action_scale: [0.01, 0.01, 0.01]
force_action_scale: [1.0, 1.0, 1.0]
torque_action_scale: [1.0, 1.0, 1.0]
unidirectional_rot: True # constrain Franka Z-rot to be unidirectional
unidirectional_force: False # constrain Franka Z-force to be unidirectional (useful for debugging)
clamp_rot: True
clamp_rot_thresh: 1.0e-6
num_keypoints: 4 # number of keypoints used in reward
keypoint_scale: 0.5 # length of line of keypoints
max_episode_length: 128
# SAPU
interpen_thresh: 0.001 # max allowed interpenetration between gear and shaft
# SDF-Based Reward
sdf_reward_scale: 10.0
sdf_reward_num_samples: 5000
# SBC
initial_max_disp: 0.01 # max initial downward displacement of gear at beginning of curriculum
curriculum_success_thresh: 0.6 # success rate threshold for increasing curriculum difficulty
curriculum_failure_thresh: 0.3 # success rate threshold for decreasing curriculum difficulty
curriculum_height_step: [-0.005, 0.002] # how much to increase max initial downward displacement after hitting success or failure thresh
curriculum_height_bound: [-0.005, 0.015] # max initial downward displacement of gear at hardest and easiest stages of curriculum
# Success bonus
close_error_thresh: 0.1 # threshold distance below which gear is considered close to shaft
success_height_thresh: 0.01 # threshold distance below which gear is considered successfully inserted
engagement_bonus: 10.0 # bonus if gear is engaged (partially inserted) with shaft
ctrl:
ctrl_type: task_space_impedance # {gym_default,
# joint_space_ik, joint_space_id,
# task_space_impedance, operational_space_motion,
# open_loop_force, closed_loop_force,
# hybrid_force_motion}
all:
jacobian_type: geometric
gripper_prop_gains: [500, 500]
gripper_deriv_gains: [2, 2]
gym_default:
ik_method: dls
joint_prop_gains: [40, 40, 40, 40, 40, 40, 40]
joint_deriv_gains: [8, 8, 8, 8, 8, 8, 8]
gripper_prop_gains: [500, 500]
gripper_deriv_gains: [20, 20]
joint_space_ik:
ik_method: dls
joint_prop_gains: [1, 1, 1, 1, 1, 1, 1]
joint_deriv_gains: [0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]
joint_space_id:
ik_method: dls
joint_prop_gains: [40, 40, 40, 40, 40, 40, 40]
joint_deriv_gains: [8, 8, 8, 8, 8, 8, 8]
task_space_impedance:
motion_ctrl_axes: [1, 1, 1, 1, 1, 1]
task_prop_gains: [300, 300, 600, 50, 50, 50]
task_deriv_gains: [34, 34, 34, 1.4, 1.4, 1.4]
operational_space_motion:
motion_ctrl_axes: [1, 1, 1, 1, 1, 1]
task_prop_gains: [20, 20, 100, 0, 0, 100]
task_deriv_gains: [1, 1, 1, 1, 1, 1]
open_loop_force:
force_ctrl_axes: [0, 0, 1, 0, 0, 0]
closed_loop_force:
force_ctrl_axes: [0, 0, 1, 0, 0, 0]
wrench_prop_gains: [0.1, 0.1, 0.1, 0.1, 0.1, 0.1]
hybrid_force_motion:
motion_ctrl_axes: [1, 1, 0, 1, 1, 1]
task_prop_gains: [40, 40, 40, 40, 40, 40]
task_deriv_gains: [8, 8, 8, 8, 8, 8]
force_ctrl_axes: [0, 0, 1, 0, 0, 0]
wrench_prop_gains: [0.1, 0.1, 0.1, 0.1, 0.1, 0.1]
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/task/FactoryEnvNutBolt.yaml
|
# See schema in factory_schema_config_env.py for descriptions of common parameters.
defaults:
- FactoryBase
- _self_
- /factory_schema_config_env
sim:
disable_franka_collisions: False
disable_nut_collisions: False
disable_bolt_collisions: False
env:
env_name: 'FactoryEnvNutBolt'
desired_subassemblies: ['nut_bolt_m16_tight', 'nut_bolt_m16_loose']
nut_lateral_offset: 0.1 # Y-axis offset of nut before initial reset to prevent initial interpenetration with bolt
nut_bolt_density: 7850.0
nut_bolt_friction: 0.3
# Subassembly options:
# {nut_bolt_m4_tight, nut_bolt_m4_loose,
# nut_bolt_m8_tight, nut_bolt_m8_loose,
# nut_bolt_m12_tight, nut_bolt_m12_loose,
# nut_bolt_m16_tight, nut_bolt_m16_loose,
# nut_bolt_m20_tight, nut_bolt_m20_loose}
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/task/ShadowHandTest.yaml
|
# used to create the object
name: ShadowHand
physics_engine: ${..physics_engine}
# if given, will override the device setting in gym.
env:
numEnvs: ${resolve_default:256,${...num_envs}}
envSpacing: 0.75
episodeLength: 1600 # 80 sec
resetTime: 80 # Max time till reset, if goal wasn't achieved. In sec, if >0 will overwrite the episodeLength
enableDebugVis: False
aggregateMode: 1
clipObservations: 5.0
clipActions: 1.0
stiffnessScale: 1.0
forceLimitScale: 1.0
useRelativeControl: False
dofSpeedScale: 20.0
actionsMovingAverage: 0.3
controlFrequencyInv: 3 # 20 Hz
startPositionNoise: 0.01
startRotationNoise: 0.0
resetPositionNoise: 0.01
resetRotationNoise: 0.0
resetDofPosRandomInterval: 0.2
resetDofVelRandomInterval: 0.0
# Random forces applied to the object
forceScale: 0.0
forceProbRange: [0.001, 0.1]
forceDecay: 0.99
forceDecayInterval: 0.08
distRewardScale: -10.0
rotRewardScale: 1.0
rotEps: 0.1
actionPenaltyScale: -0.0002
reachGoalBonus: 250
fallDistance: 0.24
fallPenalty: -50.0
objectType: "block" # can be block, egg or pen
observationType: "openai" # can be "openai", "full_no_vel", "full", "full_state"
asymmetric_observations: True
successTolerance: 0.4
printNumSuccesses: True
maxConsecutiveSuccesses: 50
averFactor: 0.1 # running mean factor for consecutive successes calculation
asset:
assetRoot: "../assets"
assetFileName: "mjcf/open_ai_assets/hand/shadow_hand.xml"
assetFileNameBlock: "urdf/objects/cube_multicolor.urdf"
assetFileNameEgg: "mjcf/open_ai_assets/hand/egg.xml"
assetFileNamePen: "mjcf/open_ai_assets/hand/pen.xml"
# set to True if you use camera sensors in the environment
enableCameraSensors: False
task:
randomize: True
randomization_params:
frequency: 480000 # Define how many simulation steps between generating new randomizations
observations:
range: [0, .002] # range for the white noise
range_correlated: [0, .001 ] # range for correlated noise, refreshed with freq `frequency`
operation: "additive"
distribution: "gaussian"
schedule: "constant" # "constant" is to turn on noise after `schedule_steps` num steps
schedule_steps: 1
actions:
range: [0., .05]
range_correlated: [0, .015] # range for correlated noise, refreshed with freq `frequency`
operation: "additive"
distribution: "gaussian"
schedule: "constant" # "linear" will linearly interpolate between no rand and max rand
schedule_steps: 1
sim_params:
gravity:
range: [0, 0.4]
operation: "additive"
distribution: "gaussian"
schedule: "constant" # "linear" will linearly interpolate between no rand and max rand
schedule_steps: 1
actor_params:
hand:
color: True
tendon_properties:
damping:
range: [0.3, 3.0]
operation: "scaling"
distribution: "loguniform"
stiffness:
range: [0.75, 1.5]
operation: "scaling"
distribution: "loguniform"
dof_properties:
damping:
range: [0.3, 3.0]
operation: "scaling"
distribution: "loguniform"
stiffness:
range: [0.75, 1.5]
operation: "scaling"
distribution: "loguniform"
lower:
range: [0, 0.01]
operation: "additive"
distribution: "gaussian"
upper:
range: [0, 0.01]
operation: "additive"
distribution: "gaussian"
rigid_body_properties:
mass:
range: [0.5, 1.5]
operation: "scaling"
distribution: "uniform"
setup_only: True # Property will only be randomized once before simulation is started. See Domain Randomization Documentation for more info.
rigid_shape_properties:
friction:
num_buckets: 250
range: [0.7, 1.3]
operation: "scaling"
distribution: "uniform"
object:
scale:
range: [0.95, 1.05]
operation: "scaling"
distribution: "uniform"
setup_only: True # Property will only be randomized once before simulation is started. See Domain Randomization Documentation for more info.
rigid_body_properties:
mass:
range: [0.5, 1.5]
operation: "scaling"
distribution: "uniform"
setup_only: True # Property will only be randomized once before simulation is started. See Domain Randomization Documentation for more info.
rigid_shape_properties:
friction:
num_buckets: 250
range: [0.7, 1.3]
operation: "scaling"
distribution: "uniform"
sim:
dt: 0.01667 # 1/60
substeps: 2
up_axis: "z"
use_gpu_pipeline: ${eq:${...pipeline},"gpu"}
gravity: [0.0, 0.0, -9.81]
physx:
num_threads: ${....num_threads}
solver_type: ${....solver_type}
use_gpu: ${contains:"cuda",${....sim_device}} # set to False to run on CPU
num_position_iterations: 8
num_velocity_iterations: 0
max_gpu_contact_pairs: 8388608 # 8*1024*1024
contact_offset: 0.002
rest_offset: 0.0
bounce_threshold_velocity: 0.2
max_depenetration_velocity: 1000.0
num_subscenes: ${....num_subscenes}
default_buffer_size_multiplier: 5.0
contact_collection: 0 # 0: CC_NEVER (don't collect contact info), 1: CC_LAST_SUBSTEP (collect only contacts on last substep), 2: CC_ALL_SUBSTEPS (broken - do not use!)
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/task/AntSAC.yaml
|
# used to create the object
defaults:
- Ant
- _self_
# if given, will override the device setting in gym.
env:
numEnvs: ${resolve_default:64,${...num_envs}}
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/task/Cartpole.yaml
|
# used to create the object
name: Cartpole
physics_engine: ${..physics_engine}
# if given, will override the device setting in gym.
env:
numEnvs: ${resolve_default:512,${...num_envs}}
envSpacing: 4.0
resetDist: 3.0
maxEffort: 400.0
clipObservations: 5.0
clipActions: 1.0
asset:
assetRoot: "../../assets"
assetFileName: "urdf/cartpole.urdf"
# set to True if you use camera sensors in the environment
enableCameraSensors: False
sim:
dt: 0.0166 # 1/60 s
substeps: 2
up_axis: "z"
use_gpu_pipeline: ${eq:${...pipeline},"gpu"}
gravity: [0.0, 0.0, -9.81]
physx:
num_threads: ${....num_threads}
solver_type: ${....solver_type}
use_gpu: ${contains:"cuda",${....sim_device}} # set to False to run on CPU
num_position_iterations: 4
num_velocity_iterations: 0
contact_offset: 0.02
rest_offset: 0.001
bounce_threshold_velocity: 0.2
max_depenetration_velocity: 100.0
default_buffer_size_multiplier: 2.0
max_gpu_contact_pairs: 1048576 # 1024*1024
num_subscenes: ${....num_subscenes}
contact_collection: 0 # 0: CC_NEVER (don't collect contact info), 1: CC_LAST_SUBSTEP (collect only contacts on last substep), 2: CC_ALL_SUBSTEPS (broken - do not use!)
task:
randomize: False
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/task/IndustRealTaskPegsInsert.yaml
|
# See schema in factory_schema_config_task.py for descriptions of common parameters.
defaults:
- IndustRealBase
- _self_
# - /factory_schema_config_task
name: IndustRealTaskPegsInsert
physics_engine: ${..physics_engine}
env:
numEnvs: 128
numObservations: 24
numStates: 47
numActions: 6
socket_base_height: 0.003
num_gripper_move_sim_steps: 120 # number of timesteps to reserve for moving gripper before first step of episode
num_gripper_close_sim_steps: 60 # number of timesteps to reserve for closing gripper onto plug during each reset
socket_pos_obs_noise: [0.001, 0.001, 0.0]
socket_rot_obs_noise: [0.0, 0.0, 0.0]
randomize:
franka_arm_initial_dof_pos: [-1.7574766278484677, 0.8403247702305783, 2.015877580177467, -2.0924931236718334, -0.7379389376686856, 1.6256438760537268, 1.2689337870766628] # initial joint angles after reset; FrankX home pose
fingertip_centered_pos_initial: [0.0, 0.0, 0.2] # initial position of midpoint between fingertips above table
fingertip_centered_pos_noise: [0.0, 0.0, 0.0] # noise on fingertip pos
fingertip_centered_rot_initial: [3.141593, 0.0, 0.0] # initial rotation of fingertips (Euler)
fingertip_centered_rot_noise: [0.0, 0.0, 0.0] # noise on fingertip rotation
socket_pos_xy_initial: [0.5, 0.0] # initial position of socket on table
socket_pos_xy_noise: [0.1, 0.1] # noise on socket position
socket_pos_z_noise_bounds: [0.0, 0.05] # noise on socket offset from table
socket_rot_noise: [0.0, 0.0, 0.0872665] # noise on socket rotation
plug_pos_xy_noise: [0.01, 0.01] # noise on plug position
rl:
pos_action_scale: [0.01, 0.01, 0.01]
rot_action_scale: [0.01, 0.01, 0.01]
force_action_scale: [1.0, 1.0, 1.0]
torque_action_scale: [1.0, 1.0, 1.0]
unidirectional_rot: True # constrain Franka Z-rot to be unidirectional
unidirectional_force: False # constrain Franka Z-force to be unidirectional (useful for debugging)
clamp_rot: True
clamp_rot_thresh: 1.0e-6
num_keypoints: 4 # number of keypoints used in reward
keypoint_scale: 0.5 # length of line of keypoints
max_episode_length: 256
# SAPU
interpen_thresh: 0.001 # SAPU: max allowed interpenetration between plug and socket
# SDF-Based Reward
sdf_reward_scale: 10.0
sdf_reward_num_samples: 1000
# SBC
initial_max_disp: 0.01 # max initial downward displacement of plug at beginning of curriculum
curriculum_success_thresh: 0.75 # success rate threshold for increasing curriculum difficulty
curriculum_failure_thresh: 0.5 # success rate threshold for decreasing curriculum difficulty
curriculum_height_step: [-0.005, 0.003] # how much to increase max initial downward displacement after hitting success or failure thresh
curriculum_height_bound: [-0.01, 0.01] # max initial downward displacement of plug at hardest and easiest stages of curriculum
# Success bonus
close_error_thresh: 0.15 # threshold below which plug is considered close to socket
success_height_thresh: 0.003 # threshold distance below which plug is considered successfully inserted
engagement_bonus: 10.0 # bonus if plug is engaged (partially inserted) with socket
ctrl:
ctrl_type: task_space_impedance # {gym_default,
# joint_space_ik, joint_space_id,
# task_space_impedance, operational_space_motion,
# open_loop_force, closed_loop_force,
# hybrid_force_motion}
all:
jacobian_type: geometric
gripper_prop_gains: [500, 500]
gripper_deriv_gains: [2, 2]
gym_default:
ik_method: dls
joint_prop_gains: [40, 40, 40, 40, 40, 40, 40]
joint_deriv_gains: [8, 8, 8, 8, 8, 8, 8]
gripper_prop_gains: [500, 500]
gripper_deriv_gains: [20, 20]
joint_space_ik:
ik_method: dls
joint_prop_gains: [1, 1, 1, 1, 1, 1, 1]
joint_deriv_gains: [0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]
joint_space_id:
ik_method: dls
joint_prop_gains: [40, 40, 40, 40, 40, 40, 40]
joint_deriv_gains: [8, 8, 8, 8, 8, 8, 8]
task_space_impedance:
motion_ctrl_axes: [1, 1, 1, 1, 1, 1]
task_prop_gains: [300, 300, 300, 50, 50, 50]
task_deriv_gains: [34, 34, 34, 1.4, 1.4, 1.4]
operational_space_motion:
motion_ctrl_axes: [1, 1, 1, 1, 1, 1]
task_prop_gains: [60, 60, 60, 5, 5, 5]
task_deriv_gains: [15.5, 15.5, 15.5, 4.5, 4.5, 4.5]
open_loop_force:
force_ctrl_axes: [0, 0, 1, 0, 0, 0]
closed_loop_force:
force_ctrl_axes: [0, 0, 1, 0, 0, 0]
wrench_prop_gains: [0.1, 0.1, 0.1, 0.1, 0.1, 0.1]
hybrid_force_motion:
motion_ctrl_axes: [1, 1, 1, 1, 1, 1]
task_prop_gains: [40, 40, 40, 40, 40, 40]
task_deriv_gains: [8, 8, 8, 8, 8, 8]
force_ctrl_axes: [0, 0, 1, 0, 0, 0]
wrench_prop_gains: [0.1, 0.1, 0.1, 0.1, 0.1, 0.1]
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/task/ShadowHandOpenAI_FF.yaml
|
# used to create the object
name: ShadowHand
physics_engine: ${..physics_engine}
# if given, will override the device setting in gym.
env:
numEnvs: ${resolve_default:16384,${...num_envs}}
envSpacing: 0.75
episodeLength: 160 # Not used, but would be 8 sec if resetTime is not set
resetTime: 8 # Max time till reset, in seconds, if a goal wasn't achieved. Will overwrite the episodeLength if is > 0.
enableDebugVis: False
aggregateMode: 1
clipObservations: 5.0
clipActions: 1.0
stiffnessScale: 1.0
forceLimitScale: 1.0
useRelativeControl: False
dofSpeedScale: 20.0
actionsMovingAverage: 0.3
controlFrequencyInv: 3 # 20 Hz
startPositionNoise: 0.01
startRotationNoise: 0.0
resetPositionNoise: 0.01
resetRotationNoise: 0.0
resetDofPosRandomInterval: 0.2
resetDofVelRandomInterval: 0.0
# Random forces applied to the object
forceScale: 1.0
forceProbRange: [0.001, 0.1]
forceDecay: 0.99
forceDecayInterval: 0.08
distRewardScale: -10.0
rotRewardScale: 1.0
rotEps: 0.1
actionPenaltyScale: -0.0002
reachGoalBonus: 250
fallDistance: 0.24
fallPenalty: -50.0
objectType: "block" # can be block, egg or pen
observationType: "openai" # can be "openai", "full_no_vel", "full","full_state"
asymmetric_observations: True
successTolerance: 0.4
printNumSuccesses: False
maxConsecutiveSuccesses: 50
averFactor: 0.1 # running mean factor for consecutive successes calculation
asset:
assetRoot: "../assets"
assetFileName: "mjcf/open_ai_assets/hand/shadow_hand.xml"
assetFileNameBlock: "urdf/objects/cube_multicolor.urdf"
assetFileNameEgg: "mjcf/open_ai_assets/hand/egg.xml"
assetFileNamePen: "mjcf/open_ai_assets/hand/pen.xml"
# set to True if you use camera sensors in the environment
enableCameraSensors: False
task:
randomize: True
randomization_params:
frequency: 720 # Define how many simulation steps between generating new randomizations
observations:
range: [0, .002] # range for the white noise
range_correlated: [0, .001 ] # range for correlated noise, refreshed with freq `frequency`
operation: "additive"
distribution: "gaussian"
# schedule: "linear" # "constant" is to turn on noise after `schedule_steps` num steps
# schedule_steps: 40000
actions:
range: [0., .05]
range_correlated: [0, .015] # range for correlated noise, refreshed with freq `frequency`
operation: "additive"
distribution: "gaussian"
# schedule: "linear" # "linear" will linearly interpolate between no rand and max rand
# schedule_steps: 40000
sim_params:
gravity:
range: [0, 0.4]
operation: "additive"
distribution: "gaussian"
# schedule: "linear" # "linear" will linearly interpolate between no rand and max rand
# schedule_steps: 40000
actor_params:
hand:
color: True
tendon_properties:
damping:
range: [0.3, 3.0]
operation: "scaling"
distribution: "loguniform"
# schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps`
# schedule_steps: 30000
stiffness:
range: [0.75, 1.5]
operation: "scaling"
distribution: "loguniform"
# schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps`
# schedule_steps: 30000
dof_properties:
damping:
range: [0.3, 3.0]
operation: "scaling"
distribution: "loguniform"
# schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps`
# schedule_steps: 30000
stiffness:
range: [0.75, 1.5]
operation: "scaling"
distribution: "loguniform"
# schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps`
# schedule_steps: 30000
lower:
range: [0, 0.01]
operation: "additive"
distribution: "gaussian"
# schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps`
# schedule_steps: 30000
upper:
range: [0, 0.01]
operation: "additive"
distribution: "gaussian"
# schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps`
# schedule_steps: 30000
rigid_body_properties:
mass:
range: [0.5, 1.5]
operation: "scaling"
distribution: "uniform"
setup_only: True # Property will only be randomized once before simulation is started. See Domain Randomization Documentation for more info.
# schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps`
# schedule_steps: 30000
rigid_shape_properties:
friction:
num_buckets: 250
range: [0.7, 1.3]
operation: "scaling"
distribution: "uniform"
# schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps`
# schedule_steps: 30000
object:
scale:
range: [0.95, 1.05]
operation: "scaling"
distribution: "uniform"
setup_only: True # Property will only be randomized once before simulation is started. See Domain Randomization Documentation for more info.
# schedule: "linear" # "linear" will scale the current random sample by ``min(current num steps, schedule_steps) / schedule_steps`
# schedule_steps: 30000
rigid_body_properties:
mass:
range: [0.5, 1.5]
operation: "scaling"
distribution: "uniform"
setup_only: True # Property will only be randomized once before simulation is started. See Domain Randomization Documentation for more info.
# schedule: "linear" # "linear" will scale the current random sample by ``min(current num steps, schedule_steps) / schedule_steps`
# schedule_steps: 30000
rigid_shape_properties:
friction:
num_buckets: 250
range: [0.7, 1.3]
operation: "scaling"
distribution: "uniform"
# schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps`
# schedule_steps: 30000
sim:
dt: 0.01667 # 1/60
substeps: 2
up_axis: "z"
use_gpu_pipeline: ${eq:${...pipeline},"gpu"}
gravity: [0.0, 0.0, -9.81]
physx:
num_threads: ${....num_threads}
solver_type: ${....solver_type}
use_gpu: ${contains:"cuda",${....sim_device}} # set to False to run on CPU
num_position_iterations: 8
num_velocity_iterations: 0
max_gpu_contact_pairs: 8388608 # 8*1024*1024
num_subscenes: ${....num_subscenes}
contact_offset: 0.002
rest_offset: 0.0
bounce_threshold_velocity: 0.2
max_depenetration_velocity: 1000.0
default_buffer_size_multiplier: 5.0
contact_collection: 0 # 0: CC_NEVER (don't collect contact info), 1: CC_LAST_SUBSTEP (collect only contacts on last substep), 2: CC_ALL_SUBSTEPS (broken - do not use!)
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/task/Anymal.yaml
|
# used to create the object
name: Anymal
physics_engine: ${..physics_engine}
env:
numEnvs: ${resolve_default:4096,${...num_envs}}
envSpacing: 4. # [m]
clipObservations: 5.0
clipActions: 1.0
plane:
staticFriction: 1.0 # [-]
dynamicFriction: 1.0 # [-]
restitution: 0. # [-]
baseInitState:
pos: [0.0, 0.0, 0.62] # x,y,z [m]
rot: [0.0, 0.0, 0.0, 1.0] # x,y,z,w [quat]
vLinear: [0.0, 0.0, 0.0] # x,y,z [m/s]
vAngular: [0.0, 0.0, 0.0] # x,y,z [rad/s]
randomCommandVelocityRanges:
linear_x: [-2., 2.] # min max [m/s]
linear_y: [-1., 1.] # min max [m/s]
yaw: [-1., 1.] # min max [rad/s]
control:
# PD Drive parameters:
stiffness: 85.0 # [N*m/rad]
damping: 2.0 # [N*m*s/rad]
actionScale: 0.5
controlFrequencyInv: 1 # 60 Hz
defaultJointAngles: # = target angles when action = 0.0
LF_HAA: 0.03 # [rad]
LH_HAA: 0.03 # [rad]
RF_HAA: -0.03 # [rad]
RH_HAA: -0.03 # [rad]
LF_HFE: 0.4 # [rad]
LH_HFE: -0.4 # [rad]
RF_HFE: 0.4 # [rad]
RH_HFE: -0.4 # [rad]
LF_KFE: -0.8 # [rad]
LH_KFE: 0.8 # [rad]
RF_KFE: -0.8 # [rad]
RH_KFE: 0.8 # [rad]
urdfAsset:
collapseFixedJoints: True
fixBaseLink: False
defaultDofDriveMode: 4 # see GymDofDriveModeFlags (0 is none, 1 is pos tgt, 2 is vel tgt, 4 effort)
learn:
# rewards
linearVelocityXYRewardScale: 1.0
angularVelocityZRewardScale: 0.5
torqueRewardScale: -0.000025
# normalization
linearVelocityScale: 2.0
angularVelocityScale: 0.25
dofPositionScale: 1.0
dofVelocityScale: 0.05
# episode length in seconds
episodeLength_s: 50
# viewer cam:
viewer:
refEnv: 0
pos: [0, 0, 4] # [m]
lookat: [1., 1, 3.3] # [m]
# set to True if you use camera sensors in the environment
enableCameraSensors: False
sim:
dt: 0.02
substeps: 2
up_axis: "z"
use_gpu_pipeline: ${eq:${...pipeline},"gpu"}
gravity: [0.0, 0.0, -9.81]
physx:
num_threads: ${....num_threads}
solver_type: ${....solver_type}
use_gpu: ${contains:"cuda",${....sim_device}} # set to False to run on CPU
num_position_iterations: 4
num_velocity_iterations: 1
contact_offset: 0.02
rest_offset: 0.0
bounce_threshold_velocity: 0.2
max_depenetration_velocity: 100.0
default_buffer_size_multiplier: 5.0
max_gpu_contact_pairs: 8388608 # 8*1024*1024
num_subscenes: ${....num_subscenes}
contact_collection: 1 # 0: CC_NEVER (don't collect contact info), 1: CC_LAST_SUBSTEP (collect only contacts on last substep), 2: CC_ALL_SUBSTEPS (broken - do not use!)
task:
randomize: False
randomization_params:
frequency: 600 # Define how many environment steps between generating new randomizations
observations:
range: [0, .002] # range for the white noise
operation: "additive"
distribution: "gaussian"
actions:
range: [0., .02]
operation: "additive"
distribution: "gaussian"
sim_params:
gravity:
range: [0, 0.4]
operation: "additive"
distribution: "gaussian"
schedule: "linear" # "linear" will linearly interpolate between no rand and max rand
schedule_steps: 3000
actor_params:
anymal:
color: True
rigid_body_properties:
mass:
range: [0.5, 1.5]
operation: "scaling"
distribution: "uniform"
setup_only: True # Property will only be randomized once before simulation is started. See Domain Randomization Documentation for more info.
schedule: "linear" # "linear" will linearly interpolate between no rand and max rand
schedule_steps: 3000
rigid_shape_properties:
friction:
num_buckets: 500
range: [0.7, 1.3]
operation: "scaling"
distribution: "uniform"
schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps`
schedule_steps: 3000
restitution:
range: [0., 0.7]
operation: "scaling"
distribution: "uniform"
schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps`
schedule_steps: 3000
dof_properties:
damping:
range: [0.5, 1.5]
operation: "scaling"
distribution: "uniform"
schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps`
schedule_steps: 3000
stiffness:
range: [0.5, 1.5]
operation: "scaling"
distribution: "uniform"
schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps`
schedule_steps: 3000
lower:
range: [0, 0.01]
operation: "additive"
distribution: "gaussian"
schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps`
schedule_steps: 3000
upper:
range: [0, 0.01]
operation: "additive"
distribution: "gaussian"
schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps`
schedule_steps: 3000
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/task/ShadowHandOpenAI_LSTM.yaml
|
# specifies what the config is when running `ShadowHandOpenAI` in LSTM mode
defaults:
- ShadowHandOpenAI_FF
- _self_
env:
numEnvs: ${resolve_default:8192,${...num_envs}}
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/task/env/regrasping.yaml
|
subtask: "regrasping"
episodeLength: 300
# requires holding a grasp for a whole second, thus trained policies develop a robust grasp
successSteps: 30
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/task/env/throw.yaml
|
subtask: "throw"
episodeLength: 300
forceScale: 0.0 # random forces don't allow us to throw precisely so we turn them off
# curriculum not needed - if we hit a bin, that's good!
successTolerance: 0.075
targetSuccessTolerance: 0.075
# adds a small pause every time we hit a target
successSteps: 5
# throwing big objects is hard and they don't fit in the bin, so focus on randomized but smaller objects
withSmallCuboids: True
withBigCuboids: False
withSticks: False
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/task/env/reorientation.yaml
|
# reorientation is a default task
subtask: "reorientation"
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/train/FactoryTaskGearsPPO.yaml
|
params:
seed: ${...seed}
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [256, 128, 64]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
load_checkpoint: ${if:${...checkpoint},True,False}
load_path: ${...checkpoint}
config:
name: ${resolve_default:FactoryTaskGears,${....experiment}}
full_experiment_name: ${.name}
env_name: rlgpu
multi_gpu: ${....multi_gpu}
ppo: True
mixed_precision: True
normalize_input: True
normalize_value: True
value_bootstrap: True
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 1.0
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
lr_schedule: fixed
schedule_type: standard
kl_threshold: 0.016
score_to_win: 20000
max_epochs: ${resolve_default:8192,${....max_iterations}}
save_best_after: 50
save_frequency: 100
print_stats: True
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: False
e_clip: 0.2
horizon_length: 32
minibatch_size: 512 # batch size = num_envs * horizon_length; minibatch_size = batch_size / num_minibatches
mini_epochs: 8
critic_coef: 2
clip_value: True
seq_len: 4
bounds_loss_coef: 0.0001
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/train/ShadowHandOpenAI_FFPPO.yaml
|
# specifies what the default training mode is when
# running `ShadowHandOpenAI_FF` (version with DR and asymmetric observations and feedforward network)
# (currently defaults to asymmetric training)
defaults:
- ShadowHandPPOAsymm
- _self_
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/train/AnymalTerrainPPO.yaml
|
params:
seed: ${...seed}
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: True
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0. # std = 1.
fixed_sigma: True
mlp:
units: [512, 256, 128]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
# rnn:
# name: lstm
# units: 128
# layers: 1
# before_mlp: True
# concat_input: True
# layer_norm: False
load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint
load_path: ${...checkpoint} # path to the checkpoint to load
config:
name: ${resolve_default:AnymalTerrain,${....experiment}}
full_experiment_name: ${.name}
env_name: rlgpu
ppo: True
multi_gpu: ${....multi_gpu}
mixed_precision: True
normalize_input: True
normalize_value: True
normalize_advantage: True
value_bootstrap: True
clip_actions: False
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 1.0
gamma: 0.99
tau: 0.95
e_clip: 0.2
entropy_coef: 0.001
learning_rate: 3.e-4 # overwritten by adaptive lr_schedule
lr_schedule: adaptive
kl_threshold: 0.008 # target kl for adaptive lr
truncate_grads: True
grad_norm: 1.
horizon_length: 24
minibatch_size: 16384
mini_epochs: 5
critic_coef: 2
clip_value: True
seq_len: 4 # only for rnn
bounds_loss_coef: 0.
max_epochs: ${resolve_default:1500,${....max_iterations}}
save_best_after: 100
score_to_win: 20000
save_frequency: 50
print_stats: True
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/train/AllegroKukaTwoArmsLSTMPPO.yaml
|
defaults:
- AllegroKukaLSTMPPO
- _self_
# TODO: try bigger network for two hands?
params:
network:
mlp:
units: [768, 512, 256]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
rnn:
name: lstm
units: 768
layers: 1
before_mlp: True
layer_norm: True
config:
name: ${resolve_default:AllegroKukaTwoArmsLSTMPPO,${....experiment}}
minibatch_size: 32768
mini_epochs: 2
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/train/HumanoidPPO.yaml
|
params:
seed: ${...seed}
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [400, 200, 100]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint
load_path: ${...checkpoint} # path to the checkpoint to load
config:
name: ${resolve_default:Humanoid,${....experiment}}
full_experiment_name: ${.name}
env_name: rlgpu
multi_gpu: ${....multi_gpu}
mixed_precision: True
normalize_input: True
normalize_value: True
value_bootstrap: True
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 0.01
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 5e-4
lr_schedule: adaptive
kl_threshold: 0.008
score_to_win: 20000
max_epochs: ${resolve_default:1000,${....max_iterations}}
save_best_after: 200
save_frequency: 100
print_stats: True
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: True
ppo: True
e_clip: 0.2
horizon_length: 32
minibatch_size: 32768
mini_epochs: 5
critic_coef: 4
clip_value: True
seq_len: 4
bounds_loss_coef: 0.0001
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/train/AllegroHandDextremeManualDRPPO.yaml
|
params:
seed: ${...seed}
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
inputs:
dof_pos_randomized: { }
object_pose_cam_randomized: { }
goal_pose_randomized: { }
goal_relative_rot_cam_randomized: { }
last_actions_randomized: { }
mlp:
units: [256]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
rnn:
name: lstm
units: 512
layers: 1
before_mlp: True
layer_norm: True
load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint
load_path: ${...checkpoint} # path to the checkpoint to load
config:
name: ${resolve_default:AllegroHandManualDRAsymmLSTM,${....experiment}}
full_experiment_name: ${.name}
env_name: rlgpu
multi_gpu: ${....multi_gpu}
ppo: True
mixed_precision: False
normalize_input: True
normalize_value: True
value_bootstrap: False
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 1.0
normalize_advantage: True
gamma: 0.998
tau: 0.95
learning_rate: 1e-4
lr_schedule: adaptive
schedule_type: standard
kl_threshold: 0.016
score_to_win: 100000
max_epochs: ${resolve_default:50000,${....max_iterations}}
save_best_after: 200
save_frequency: 500
print_stats: True
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: True
e_clip: 0.2
horizon_length: 16
minibatch_size: 16384
mini_epochs: 4
critic_coef: 4
clip_value: True
seq_length: 16
bounds_loss_coef: 0.0001
zero_rnn_on_done: False
central_value_config:
minibatch_size: 16384
mini_epochs: 4
learning_rate: 1e-4
kl_threshold: 0.016
clip_value: True
normalize_input: True
truncate_grads: True
network:
name: actor_critic
central_value: True
inputs:
dof_pos: { }
dof_vel: { }
dof_force: { }
object_pose: { }
object_pose_cam_randomized: { }
object_vels: { }
goal_pose: { }
goal_relative_rot: {}
last_actions: { }
ft_force_torques: {}
gravity_vec: {}
ft_states: {}
mlp:
units: [512, 256, 128]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
player:
deterministic: True
use_vecenv: True
games_num: 1000000
print_stats: False
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/train/IndustRealTaskPegsInsertPPO.yaml
|
params:
seed: ${...seed}
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [512, 256, 128]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
rnn:
name: lstm
units: 256
layers: 2
before_mlp: True
concat_input: True
layer_norm: False
load_checkpoint: ${if:${...checkpoint},True,False}
load_path: ${...checkpoint}
config:
name: ${resolve_default:IndustRealTaskPegsInsert,${....experiment}}
full_experiment_name: ${.name}
env_name: rlgpu
multi_gpu: False
ppo: True
mixed_precision: True
normalize_input: True
normalize_value: True
value_bootstrap: True
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 1.0
normalize_advantage: True
gamma: 0.998
tau: 0.95
learning_rate: 1e-3
lr_schedule: linear
schedule_type: standard
kl_threshold: 0.016
score_to_win: 200000
max_epochs: ${resolve_default:8192,${....max_iterations}}
save_best_after: 10
save_frequency: 100
print_stats: True
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: False
e_clip: 0.2
horizon_length: 128
minibatch_size: 8192 # batch size = num_envs * horizon_length; minibatch_size = batch_size / num_minibatches
mini_epochs: 8
critic_coef: 2
clip_value: True
seq_len: 8
bounds_loss_coef: 0.0001
central_value_config:
minibatch_size: 256
mini_epochs: 4
learning_rate: 1e-3
lr_schedule: linear
kl_threshold: 0.016
clip_value: True
normalize_input: True
truncate_grads: True
network:
name: actor_critic
central_value: True
mlp:
units: [256, 128, 64]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
# rnn:
# name: lstm
# units: 256
# layers: 2
# before_mlp: True
# concat_input: True
# layer_norm: False
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/train/ShadowHandPPOAsymmLSTM.yaml
|
params:
seed: ${...seed}
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [512]
activation: relu
d2rl: False
initializer:
name: default
regularizer:
name: None
rnn:
name: lstm
units: 1024
layers: 1
before_mlp: True
layer_norm: True
load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint
load_path: ${...checkpoint} # path to the checkpoint to load
config:
name: ${resolve_default:ShadowHandAsymmLSTM,${....experiment}}
full_experiment_name: ${.name}
env_name: rlgpu
multi_gpu: ${....multi_gpu}
ppo: True
mixed_precision: False
normalize_input: True
normalize_value: True
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 0.01
normalize_advantage: True
gamma: 0.998
tau: 0.95
learning_rate: 1e-4
lr_schedule: adaptive
schedule_type: standard
kl_threshold: 0.016
score_to_win: 100000
max_epochs: ${resolve_default:10000,${....max_iterations}}
save_best_after: 500
save_frequency: 500
print_stats: True
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: True
e_clip: 0.2
horizon_length: 16
minibatch_size: 16384
mini_epochs: 4
critic_coef: 4
clip_value: True
seq_length: 4
bounds_loss_coef: 0.0001
central_value_config:
minibatch_size: 32768
mini_epochs: 4
learning_rate: 1e-4
kl_threshold: 0.016
clip_value: True
normalize_input: True
truncate_grads: True
network:
name: actor_critic
central_value: True
mlp:
units: [512]
activation: relu
d2rl: False
initializer:
name: default
regularizer:
name: None
rnn:
name: lstm
units: 1024
layers: 1
before_mlp: True
layer_norm: True
player:
#render: True
deterministic: True
games_num: 1000000
print_stats: False
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/train/FactoryTaskInsertionPPO.yaml
|
params:
seed: ${...seed}
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [256, 128, 64]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
load_checkpoint: ${if:${...checkpoint},True,False}
load_path: ${...checkpoint}
config:
name: ${resolve_default:FactoryTaskInsertion,${....experiment}}
full_experiment_name: ${.name}
env_name: rlgpu
multi_gpu: ${....multi_gpu}
ppo: True
mixed_precision: True
normalize_input: True
normalize_value: True
value_bootstrap: True
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 1.0
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
lr_schedule: fixed
schedule_type: standard
kl_threshold: 0.016
score_to_win: 20000
max_epochs: ${resolve_default:8192,${....max_iterations}}
save_best_after: 50
save_frequency: 100
print_stats: True
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: False
e_clip: 0.2
horizon_length: 32
minibatch_size: 512 # batch size = num_envs * horizon_length; minibatch_size = batch_size / num_minibatches
mini_epochs: 8
critic_coef: 2
clip_value: True
seq_len: 4
bounds_loss_coef: 0.0001
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/train/AllegroHandDextremeADRPPO.yaml
|
params:
seed: ${...seed}
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
inputs:
dof_pos_randomized: {}
object_pose_cam_randomized: { }
goal_pose: { }
goal_relative_rot_cam_randomized: { }
last_actions: { }
mlp:
units: [512, 512]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
rnn:
name: lstm
units: 1024
layers: 1
before_mlp: True
layer_norm: True
load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint
load_path: ${...checkpoint} # path to the checkpoint to load
config:
name: ${resolve_default:AllegroHandADRAsymmLSTM,${....experiment}}
full_experiment_name: ${.name}
env_name: rlgpu
multi_gpu: ${....multi_gpu}
ppo: True
mixed_precision: False
normalize_input: True
normalize_value: True
value_bootstrap: False
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 1.0
normalize_advantage: True
gamma: 0.998
tau: 0.95
learning_rate: 1e-4
lr_schedule: linear #adaptive
schedule_type: standard
kl_threshold: 0.01
score_to_win: 1000000
max_epochs: ${resolve_default:1000_000,${....max_iterations}}
save_best_after: 10000
save_frequency: 500
print_stats: True
grad_norm: 1.0
entropy_coef: 0.002
truncate_grads: True
e_clip: 0.2
horizon_length: 16
minibatch_size: 16384
mini_epochs: 4
critic_coef: 4
clip_value: True
seq_length: 16
bound_loss_type: regularization
bounds_loss_coef: 0.005
zero_rnn_on_done: False
# optimize summaries to prevent tf.event files from growing to gigabytes
force_interval_writer: True
central_value_config:
minibatch_size: 16384
mini_epochs: 4
learning_rate: 5e-5
kl_threshold: 0.016
clip_value: True
normalize_input: True
truncate_grads: True
network:
name: actor_critic
central_value: True
inputs:
dof_pos: { }
dof_vel: { }
dof_force: { }
object_pose: { }
object_pose_cam_randomized: { }
object_vels: { }
goal_pose: { }
goal_relative_rot: {}
last_actions: { }
stochastic_delay_params: { }
affine_params: { }
cube_random_params: {}
hand_random_params: {}
ft_force_torques: {}
gravity_vec: {}
ft_states: {}
rot_dist: {}
rb_forces: {}
mlp:
units: [1024, 512]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
rnn:
name: lstm
units: 2048
layers: 1
before_mlp: True
layer_norm: True
player:
deterministic: True
use_vecenv: True
games_num: 1000000
print_stats: False
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/train/AllegroKukaPPO.yaml
|
params:
seed: ${...seed}
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [1024, 1024, 512, 512]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint
load_path: ${...checkpoint} # path to the checkpoint to load
config:
name: ${resolve_default:AllegroKukaPPO,${....experiment}}
# full_experiment_name: ${.name}
env_name: rlgpu
multi_gpu: ${....multi_gpu}
ppo: True
mixed_precision: True
normalize_input: True
normalize_value: True
normalize_advantage: True
reward_shaper:
scale_value: 0.01
num_actors: ${....task.env.numEnvs}
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
lr_schedule: adaptive
schedule_type: standard
kl_threshold: 0.016
score_to_win: 1000000
max_epochs: 100000
max_frames: 10_000_000_000
save_best_after: 100
save_frequency: 5000
print_stats: True
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: True
e_clip: 0.1
minibatch_size: 32768
mini_epochs: 4
critic_coef: 4.0
clip_value: True
horizon_length: 16
seq_length: 16
# SampleFactory currently gives better results without bounds loss but I don't think this loss matters too much
# bounds_loss_coef: 0.0
bounds_loss_coef: 0.0001
# optimize summaries to prevent tf.event files from growing to gigabytes
defer_summaries_sec: ${if:${....pbt},240,5}
summaries_interval_sec_min: ${if:${....pbt},60,5}
summaries_interval_sec_max: 300
player:
#render: True
deterministic: False # be careful there's a typo in older versions of rl_games in this parameter name ("determenistic")
games_num: 100000
print_stats: False
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/train/ShadowHandPPOAsymm.yaml
|
params:
seed: ${...seed}
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [400, 400, 200, 100]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint
load_path: ${...checkpoint} # path to the checkpoint to load
config:
name: ${resolve_default:ShadowHandAsymm,${....experiment}}
full_experiment_name: ${.name}
env_name: rlgpu
multi_gpu: ${....multi_gpu}
ppo: True
mixed_precision: False
normalize_input: True
normalize_value: True
reward_shaper:
scale_value: 0.01
normalize_advantage: True
num_actors: ${....task.env.numEnvs}
gamma: 0.99
tau: 0.95
learning_rate: 5e-4
lr_schedule: adaptive
schedule_type: standard
kl_threshold: 0.016
score_to_win: 100000
max_epochs: ${resolve_default:10000,${....max_iterations}}
save_best_after: 500
save_frequency: 200
print_stats: True
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: True
e_clip: 0.2
horizon_length: 8
minibatch_size: 16384
mini_epochs: 8
critic_coef: 4
clip_value: True
seq_len: 4
bounds_loss_coef: 0.0001
central_value_config:
minibatch_size: 16384
mini_epochs: 8
learning_rate: 5e-4
lr_schedule: adaptive
schedule_type: standard
kl_threshold: 0.016
clip_value: True
normalize_input: True
truncate_grads: True
network:
name: actor_critic
central_value: True
mlp:
units: [512, 512, 256, 128]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
player:
#render: True
deterministic: True
games_num: 1000000
print_stats: False
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/train/ShadowHandPPO.yaml
|
params:
seed: ${...seed}
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [512, 512, 256, 128]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint
load_path: ${...checkpoint} # path to the checkpoint to load
config:
name: ${resolve_default:ShadowHand,${....experiment}}
full_experiment_name: ${.name}
env_name: rlgpu
multi_gpu: ${....multi_gpu}
ppo: True
mixed_precision: False
normalize_input: True
normalize_value: True
value_bootstrap: True
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 0.01
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 5e-4
lr_schedule: adaptive
schedule_type: standard
kl_threshold: 0.016
score_to_win: 100000
max_epochs: ${resolve_default:5000,${....max_iterations}}
save_best_after: 100
save_frequency: 200
print_stats: True
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: True
e_clip: 0.2
horizon_length: 8
minibatch_size: 32768
mini_epochs: 5
critic_coef: 4
clip_value: True
seq_len: 4
bounds_loss_coef: 0.0001
player:
#render: True
deterministic: True
games_num: 100000
print_stats: True
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/train/AllegroHandLSTM_BigPPO.yaml
|
params:
seed: ${...seed}
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: complex_net
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
inputs:
dof_pos_offset_randomized: {}
object_pose_delayed_randomized: {}
goal_pose: {}
goal_relative_rot_delayed_randomized: {}
last_actions: {}
mlp:
units: [512]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
rnn:
name: lstm
units: 1024
layers: 1
before_mlp: True
layer_norm: True
load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint
load_path: ${...checkpoint} # path to the checkpoint to load
config:
name: ${resolve_default:AllegroHandAsymmLSTM,${....experiment}}
full_experiment_name: ${.name}
env_name: rlgpu
multi_gpu: ${....multi_gpu}
ppo: True
mixed_precision: False
normalize_input: True
normalize_value: True
use_smooth_clamp: True
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 1.0
normalize_advantage: True
gamma: 0.998
tau: 0.95
learning_rate: 1e-4
lr_schedule: adaptive
schedule_type: standard
kl_threshold: 0.016
score_to_win: 100000
max_epochs: ${resolve_default:1000000,${....max_iterations}}
save_best_after: 200
save_frequency: 500
print_stats: True
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: True
e_clip: 0.2
horizon_length: 16
minibatch_size: 16384
mini_epochs: 4
critic_coef: 4
clip_value: True
seq_length: 16
bounds_loss_coef: 0.0001
central_value_config:
minibatch_size: 16384 #32768
mini_epochs: 4
learning_rate: 1e-4
kl_threshold: 0.016
clip_value: True
normalize_input: True
truncate_grads: True
use_smooth_clamp: True
network:
name: complex_net
central_value: True
inputs:
dof_pos: {}
dof_pos_offset_randomized: {}
dof_vel: {}
dof_torque: {}
object_pose: {}
object_vels: {}
goal_pose: {}
goal_relative_rot: {}
object_pose_delayed_randomized: {}
goal_relative_rot_delayed_randomized: {}
object_obs_delayed_age: {}
# ft_states: {}
# ft_force_torques: {}
last_actions: {}
mlp:
units: [512, 512, 256, 128] #[256] #
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
# rnn:
# name: lstm
# units: 512
# layers: 1
# before_mlp: True
# layer_norm: True
player:
deterministic: True
use_vecenv: True
games_num: 1000000
print_stats: False
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/train/AllegroHandLSTMPPO.yaml
|
params:
seed: ${...seed}
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
inputs:
dof_pos_randomized: { }
object_pose_cam_randomized: { }
goal_pose_randomized: { }
goal_relative_rot_cam_randomized: { }
last_actions_randomized: { }
mlp:
units: [256]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
rnn:
name: lstm
units: 512
layers: 1
before_mlp: True
layer_norm: True
load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint
load_path: ${...checkpoint} # path to the checkpoint to load
config:
name: ${resolve_default:AllegroHandAsymmLSTM,${....experiment}}
full_experiment_name: ${.name}
env_name: rlgpu
multi_gpu: ${....multi_gpu}
ppo: True
mixed_precision: False
normalize_input: True
normalize_value: True
value_bootstrap: False # TODO: enable bootstrap?
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 1.0
normalize_advantage: True
gamma: 0.998
tau: 0.95
learning_rate: 1e-4
lr_schedule: adaptive
schedule_type: standard
kl_threshold: 0.016
score_to_win: 100000
max_epochs: ${resolve_default:50000,${....max_iterations}}
save_best_after: 200
save_frequency: 500
print_stats: True
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: True
e_clip: 0.2
horizon_length: 16
minibatch_size: 16384
mini_epochs: 4
critic_coef: 4
clip_value: True
seq_length: 16
bounds_loss_coef: 0.0001
# optimize summaries to prevent tf.event files from growing to gigabytes
defer_summaries_sec: ${if:${....pbt},150,5}
summaries_interval_sec_min: ${if:${....pbt},20,5}
summaries_interval_sec_max: 100
central_value_config:
minibatch_size: 16384 #32768
mini_epochs: 4
learning_rate: 1e-4
kl_threshold: 0.016
clip_value: True
normalize_input: True
truncate_grads: True
network:
name: actor_critic
central_value: True
inputs:
dof_pos: { }
dof_vel: { }
dof_force: { }
object_pose: { }
object_pose_cam_randomized: { }
object_vels: { }
goal_pose: { }
goal_relative_rot: {}
last_actions: { }
ft_force_torques: {}
gravity_vec: {}
ft_states: {}
mlp:
units: [512, 256, 128] #[256] #
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
rnn:
name: lstm
units: 512
layers: 1
before_mlp: True
layer_norm: True
player:
deterministic: True
use_vecenv: True
games_num: 1000000
print_stats: False
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/train/HumanoidAMPPPO.yaml
|
params:
seed: ${...seed}
algo:
name: amp_continuous
model:
name: continuous_amp
network:
name: amp
separate: True
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: -2.9
fixed_sigma: True
learn_sigma: False
mlp:
units: [1024, 512]
activation: relu
d2rl: False
initializer:
name: default
regularizer:
name: None
disc:
units: [1024, 512]
activation: relu
initializer:
name: default
load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint
load_path: ${...checkpoint} # path to the checkpoint to load
config:
name: ${resolve_default:HumanoidAMP,${....experiment}}
full_experiment_name: ${.name}
env_name: rlgpu
ppo: True
multi_gpu: ${....multi_gpu}
mixed_precision: False
normalize_input: True
normalize_value: True
value_bootstrap: True
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 5e-5
lr_schedule: constant
kl_threshold: 0.008
score_to_win: 20000
max_epochs: ${resolve_default:5000,${....max_iterations}}
save_best_after: 100
save_frequency: 50
print_stats: True
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: False
e_clip: 0.2
horizon_length: 16
minibatch_size: 32768
mini_epochs: 6
critic_coef: 5
clip_value: False
seq_len: 4
bounds_loss_coef: 10
amp_obs_demo_buffer_size: 200000
amp_replay_buffer_size: 1000000
amp_replay_keep_prob: 0.01
amp_batch_size: 512
amp_minibatch_size: 4096
disc_coef: 5
disc_logit_reg: 0.05
disc_grad_penalty: 5
disc_reward_scale: 2
disc_weight_decay: 0.0001
normalize_amp_input: True
task_reward_w: 0.0
disc_reward_w: 1.0
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/train/AnymalTerrainPPO_LSTM.yaml
|
params:
seed: ${...seed}
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: True
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0. # std = 1.
fixed_sigma: True
mlp:
units: [512] #, 256, 128]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
rnn:
name: lstm
units: 256 #128
layers: 1
before_mlp: False #True
concat_input: True
layer_norm: False
load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint
load_path: ${...checkpoint} # path to the checkpoint to load
config:
name: ${resolve_default:AnymalTerrain,${....experiment}}
full_experiment_name: ${.name}
env_name: rlgpu
multi_gpu: ${....multi_gpu}
ppo: True
mixed_precision: True
normalize_input: True
normalize_value: True
normalize_advantage: True
value_bootstrap: True
clip_actions: False
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 1.0
gamma: 0.99
tau: 0.95
e_clip: 0.2
entropy_coef: 0.001
learning_rate: 3.e-4 # overwritten by adaptive lr_schedule
lr_schedule: adaptive
kl_threshold: 0.008 # target kl for adaptive lr
truncate_grads: True
grad_norm: 1.
horizon_length: 24
minibatch_size: 16384
mini_epochs: 5
critic_coef: 2
clip_value: True
seq_len: 4 # only for rnn
bounds_loss_coef: 0.
max_epochs: ${resolve_default:750,${....max_iterations}}
save_best_after: 100
score_to_win: 20000
save_frequency: 50
print_stats: True
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/train/FrankaCubeStackPPO.yaml
|
params:
seed: ${...seed}
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [256, 128, 64]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint
load_path: ${...checkpoint} # path to the checkpoint to load
config:
name: ${resolve_default:FrankaCubeStack,${....experiment}}
full_experiment_name: ${.name}
env_name: rlgpu
multi_gpu: ${....multi_gpu}
ppo: True
mixed_precision: False
normalize_input: True
normalize_value: True
value_bootstrap: True
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 1.0
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 5e-4
lr_schedule: adaptive
schedule_type: standard
kl_threshold: 0.008
score_to_win: 10000
max_epochs: ${resolve_default:10000,${....max_iterations}}
save_best_after: 200
save_frequency: 100
print_stats: True
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: True
e_clip: 0.2
horizon_length: 32
minibatch_size: 16384
mini_epochs: 5
critic_coef: 4
clip_value: True
seq_len: 4
bounds_loss_coef: 0.0001
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/train/HumanoidSAC.yaml
|
params:
seed: ${...seed}
algo:
name: sac
model:
name: soft_actor_critic
network:
name: soft_actor_critic
separate: True
space:
continuous:
mlp:
units: [512, 256]
activation: relu
initializer:
name: default
log_std_bounds: [-5, 2]
load_checkpoint: False
load_path: nn/Ant.pth
config:
name: ${resolve_default:HumanoidSAC,${....experiment}}
env_name: rlgpu
multi_gpu: ${....multi_gpu}
normalize_input: True
reward_shaper:
scale_value: 1.0
max_epochs: 50000
num_steps_per_episode: 8
save_best_after: 100
save_frequency: 1000
gamma: 0.99
init_alpha: 1.0
alpha_lr: 0.005
actor_lr: 0.0005
critic_lr: 0.0005
critic_tau: 0.005
batch_size: 4096
learnable_temperature: true
num_seed_steps: 5
num_warmup_steps: 10
replay_buffer_size: 1000000
num_actors: ${....task.env.numEnvs}
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/train/ShadowHandOpenAI_LSTMPPO.yaml
|
# specifies what the default training mode is when
# running `ShadowHandOpenAI_LSTM` (version with DR and asymmetric observations, and LSTM)
defaults:
- ShadowHandPPOAsymmLSTM
- _self_
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/train/IngenuityPPO.yaml
|
params:
seed: ${...seed}
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [256, 256, 128]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint
load_path: ${...checkpoint} # path to the checkpoint to load
config:
name: ${resolve_default:Ingenuity,${....experiment}}
full_experiment_name: ${.name}
env_name: rlgpu
multi_gpu: ${....multi_gpu}
ppo: True
mixed_precision: False
normalize_input: True
normalize_value: True
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 0.01
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-3
lr_schedule: adaptive
kl_threshold: 0.016
score_to_win: 20000
max_epochs: ${resolve_default:500,${....max_iterations}}
save_best_after: 50
save_frequency: 50
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: True
e_clip: 0.2
horizon_length: 16
minibatch_size: 16384
mini_epochs: 8
critic_coef: 2
clip_value: True
seq_len: 4
bounds_loss_coef: 0.0001
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/train/QuadcopterPPO.yaml
|
params:
seed: ${...seed}
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [256, 256, 128]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint
load_path: ${...checkpoint} # path to the checkpoint to load
config:
name: ${resolve_default:Quadcopter,${....experiment}}
full_experiment_name: ${.name}
env_name: rlgpu
multi_gpu: ${....multi_gpu}
ppo: True
mixed_precision: False
normalize_input: True
normalize_value: True
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 0.1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-3
lr_schedule: adaptive
kl_threshold: 0.016
score_to_win: 20000
max_epochs: ${resolve_default:500,${....max_iterations}}
save_best_after: 50
save_frequency: 50
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: True
e_clip: 0.2
horizon_length: 8
minibatch_size: 16384
mini_epochs: 8
critic_coef: 2
clip_value: True
seq_len: 4
bounds_loss_coef: 0.0001
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/train/FactoryTaskNutBoltScrewPPO.yaml
|
params:
seed: ${...seed}
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [256, 128, 64]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
load_checkpoint: ${if:${...checkpoint},True,False}
load_path: ${...checkpoint}
config:
name: ${resolve_default:FactoryTaskNutBoltScrew,${....experiment}}
full_experiment_name: ${.name}
env_name: rlgpu
multi_gpu: ${....multi_gpu}
ppo: True
mixed_precision: True
normalize_input: True
normalize_value: True
value_bootstrap: True
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 1.0
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1.0e-4
lr_schedule: fixed
schedule_type: standard
kl_threshold: 0.016
score_to_win: 20000
max_epochs: ${resolve_default:1024,${....max_iterations}}
save_best_after: 50
save_frequency: 100
print_stats: True
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: False
e_clip: 0.2
horizon_length: 128
minibatch_size: 512 # batch size = num_envs * horizon_length; minibatch_size = batch_size / num_minibatches
mini_epochs: 8
critic_coef: 2
clip_value: True
seq_len: 4
bounds_loss_coef: 0.0001
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/train/ShadowHandPPOLSTM.yaml
|
params:
seed: ${...seed}
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [512, 512, 256]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
rnn:
name: lstm
units: 256
layers: 1
before_mlp: False
concat_input: True
layer_norm: True
load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint
load_path: ${...checkpoint} # path to the checkpoint to load
config:
name: ${resolve_default:ShadowHandLSTM,${....experiment}}
full_experiment_name: ${.name}
env_name: rlgpu
multi_gpu: ${....multi_gpu}
ppo: True
mixed_precision: False
normalize_input: True
normalize_value: True
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 0.01
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 5e-4
lr_schedule: adaptive
schedule_type: standard
kl_threshold: 0.016
score_to_win: 100000
save_best_after: 500
save_frequency: 100
print_stats: True
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: True
e_clip: 0.2
horizon_length: 32
minibatch_size: 16384
mini_epochs: ${resolve_default:4,${....max_iterations}}
critic_coef: 4
clip_value: False
seq_len: 4
bounds_loss_coef: 0.0001
player:
#render: True
deterministic: True
games_num: 100000
print_stats: True
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/train/BallBalancePPO.yaml
|
params:
seed: ${...seed}
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [128, 64, 32]
activation: elu
initializer:
name: default
regularizer:
name: None
load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint
load_path: ${...checkpoint} # path to the checkpoint to load
config:
name: ${resolve_default:BallBalance,${....experiment}}
full_experiment_name: ${.name}
env_name: rlgpu
multi_gpu: ${....multi_gpu}
ppo: True
mixed_precision: False
normalize_input: True
normalize_value: True
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 0.1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 3e-4
lr_schedule: adaptive
kl_threshold: 0.008
score_to_win: 20000
max_epochs: ${resolve_default:250,${....max_iterations}}
save_best_after: 50
save_frequency: 100
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: True
e_clip: 0.2
horizon_length: 16
minibatch_size: 8192
mini_epochs: 8
critic_coef: 4
clip_value: True
seq_len: 4
bounds_loss_coef: 0.0001
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/train/FactoryTaskNutBoltPlacePPO.yaml
|
params:
seed: ${...seed}
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [256, 128, 64]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
load_checkpoint: ${if:${...checkpoint},True,False}
load_path: ${...checkpoint}
config:
name: ${resolve_default:FactoryTaskNutBoltPlace,${....experiment}}
full_experiment_name: ${.name}
env_name: rlgpu
multi_gpu: ${....multi_gpu}
ppo: True
mixed_precision: True
normalize_input: True
normalize_value: True
value_bootstrap: True
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 1.0
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
lr_schedule: fixed
schedule_type: standard
kl_threshold: 0.016
score_to_win: 20000
max_epochs: ${resolve_default:1024,${....max_iterations}}
save_best_after: 50
save_frequency: 100
print_stats: True
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: False
e_clip: 0.2
horizon_length: 120
minibatch_size: 512
mini_epochs: 8
critic_coef: 2
clip_value: True
seq_len: 4
bounds_loss_coef: 0.0001
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/train/IndustRealTaskGearsInsertPPO.yaml
|
params:
seed: ${...seed}
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [512, 256, 128]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
rnn:
name: lstm
units: 256
layers: 2
before_mlp: True
concat_input: True
layer_norm: False
load_checkpoint: ${if:${...checkpoint},True,False}
load_path: ${...checkpoint}
config:
name: ${resolve_default:IndustRealTaskGearsInsert,${....experiment}}
full_experiment_name: ${.name}
env_name: rlgpu
multi_gpu: False
ppo: True
mixed_precision: True
normalize_input: True
normalize_value: True
value_bootstrap: True
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 1.0
normalize_advantage: True
gamma: 0.998
tau: 0.95
learning_rate: 1e-4
lr_schedule: linear
schedule_type: standard
kl_threshold: 0.008
score_to_win: 20000
max_epochs: ${resolve_default:8192,${....max_iterations}}
save_best_after: 50
save_frequency: 1000
print_stats: True
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: False
e_clip: 0.2
horizon_length: 128
minibatch_size: 8 # batch size = num_envs * horizon_length; minibatch_size = batch_size / num_minibatches
mini_epochs: 8
critic_coef: 2
clip_value: True
seq_len: 4
bounds_loss_coef: 0.0001
central_value_config:
minibatch_size: 8
mini_epochs: 4
learning_rate: 1e-3
lr_schedule: linear
kl_threshold: 0.016
clip_value: True
normalize_input: True
truncate_grads: True
network:
name: actor_critic
central_value: True
mlp:
units: [256, 128, 64]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/train/AntPPO.yaml
|
params:
seed: ${...seed}
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [256, 128, 64]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint
load_path: ${...checkpoint} # path to the checkpoint to load
config:
name: ${resolve_default:Ant,${....experiment}}
full_experiment_name: ${.name}
env_name: rlgpu
multi_gpu: ${....multi_gpu}
ppo: True
mixed_precision: True
normalize_input: True
normalize_value: True
value_bootstrap: True
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 0.01
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 3e-4
lr_schedule: adaptive
schedule_type: legacy
kl_threshold: 0.008
score_to_win: 20000
max_epochs: ${resolve_default:500,${....max_iterations}}
save_best_after: 200
save_frequency: 50
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: False
e_clip: 0.2
horizon_length: 16
minibatch_size: 32768
mini_epochs: 4
critic_coef: 2
clip_value: True
seq_len: 4
bounds_loss_coef: 0.0001
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/train/FrankaCabinetPPO.yaml
|
params:
seed: ${...seed}
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [256, 128, 64]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint
load_path: ${...checkpoint} # path to the checkpoint to load
config:
name: ${resolve_default:FrankaCabinet,${....experiment}}
full_experiment_name: ${.name}
env_name: rlgpu
multi_gpu: ${....multi_gpu}
ppo: True
mixed_precision: False
normalize_input: True
normalize_value: True
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 0.01
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 5e-4
lr_schedule: adaptive
kl_threshold: 0.008
score_to_win: 10000
max_epochs: ${resolve_default:1500,${....max_iterations}}
save_best_after: 200
save_frequency: 100
print_stats: True
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: True
e_clip: 0.2
horizon_length: 16
minibatch_size: 8192
mini_epochs: 8
critic_coef: 4
clip_value: True
seq_len: 4
bounds_loss_coef: 0.0001
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/train/AntSAC.yaml
|
params:
seed: ${...seed}
algo:
name: sac
model:
name: soft_actor_critic
network:
name: soft_actor_critic
separate: True
space:
continuous:
mlp:
units: [512, 256]
activation: relu
initializer:
name: default
log_std_bounds: [-5, 2]
load_checkpoint: False
load_path: nn/Ant.pth
config:
name: ${resolve_default:AntSAC,${....experiment}}
env_name: rlgpu
multi_gpu: ${....multi_gpu}
normalize_input: True
reward_shaper:
scale_value: 1.0
max_epochs: 20000
num_steps_per_episode: 8
save_best_after: 100
save_frequency: 1000
gamma: 0.99
init_alpha: 1.0
alpha_lr: 0.005
actor_lr: 0.0005
critic_lr: 0.0005
critic_tau: 0.005
batch_size: 4096
learnable_temperature: true
num_seed_steps: 5
num_warmup_steps: 10
replay_buffer_size: 1000000
num_actors: ${....task.env.numEnvs}
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/train/AllegroKukaLSTMPPO.yaml
|
defaults:
- AllegroKukaPPO
- _self_
params:
network:
mlp:
units: [768, 512, 256]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
rnn:
name: lstm
units: 768
layers: 1
before_mlp: True
layer_norm: True
config:
name: ${resolve_default:AllegroKukaLSTMPPO,${....experiment}}
minibatch_size: 32768
mini_epochs: 2
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/train/AllegroHandPPO.yaml
|
params:
seed: ${...seed}
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [512, 256, 128]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint
load_path: ${...checkpoint} # path to the checkpoint to load
config:
name: ${resolve_default:AllegroHand,${....experiment}}
full_experiment_name: ${.name}
env_name: rlgpu
multi_gpu: ${....multi_gpu}
ppo: True
mixed_precision: False
normalize_input: True
normalize_value: True
value_bootstrap: True
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 0.01
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 5e-4
lr_schedule: adaptive
schedule_type: standard
kl_threshold: 0.016
score_to_win: 100000
max_epochs: ${resolve_default:5000,${....max_iterations}}
save_best_after: 500
save_frequency: 200
print_stats: True
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: True
e_clip: 0.2
horizon_length: 8
minibatch_size: 32768
mini_epochs: 5
critic_coef: 4
clip_value: True
seq_len: 4
bounds_loss_coef: 0.0001
player:
#render: True
deterministic: True
games_num: 100000
print_stats: True
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/train/TrifingerPPO.yaml
|
asymmetric_obs: true
params:
seed: ${...seed}
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: false
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: true
mlp:
units: [256, 256, 128, 128]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint
load_path: ${...checkpoint} # path to the checkpoint to load
config:
name: ${resolve_default:Trifinger,${....experiment}}
full_experiment_name: ${.name}
env_name: rlgpu
multi_gpu: ${....multi_gpu}
ppo: true
mixed_precision: false
normalize_input: true
normalize_value: true
reward_shaper:
scale_value: 0.01
normalize_advantage: true
gamma: 0.99
tau: 0.95
learning_rate: 0.0003
lr_schedule: constant
use_experimental_cv: true
schedule_type: standard
kl_threshold: 0.016
score_to_win: 500000
max_epochs: ${resolve_default:20000,${....max_iterations}}
save_best_after: 100
save_frequency: 100
print_stats: true
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: true
e_clip: 0.2
horizon_length: 8
minibatch_size: ${.num_actors}
mini_epochs: 4
critic_coef: 4
clip_value: true
seq_len: 4
bounds_loss_coef: 0.0001
central_value_config:
minibatch_size: ${..num_actors}
mini_epochs: ${..mini_epochs}
learning_rate: 0.0005
lr_schedule: linear
schedule_type: standard
kl_threshold: 0.016
clip_value: true
normalize_input: true
truncate_grads: true
network:
name: actor_critic
central_value: true
mlp:
units: [512, 512, 256, 128]
activation: elu
d2rl: false
initializer:
name: default
regularizer:
name: None
player:
deterministic: true
games_num: 1000000
print_stats: false
num_actors: ${....task.env.numEnvs}
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/train/AnymalPPO.yaml
|
params:
seed: ${...seed}
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0. # std = 1.
fixed_sigma: True
mlp:
units: [256, 128, 64]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint
load_path: ${...checkpoint} # path to the checkpoint to load
config:
name: ${resolve_default:Anymal,${....experiment}}
full_experiment_name: ${.name}
env_name: rlgpu
multi_gpu: ${....multi_gpu}
ppo: True
mixed_precision: True
normalize_input: True
normalize_value: True
value_bootstrap: True
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 1.0
normalize_advantage: True
gamma: 0.99
tau: 0.95
e_clip: 0.2
entropy_coef: 0.0
learning_rate: 3.e-4 # overwritten by adaptive lr_schedule
lr_schedule: adaptive
kl_threshold: 0.008 # target kl for adaptive lr
truncate_grads: True
grad_norm: 1.
horizon_length: 24
minibatch_size: 32768
mini_epochs: 5
critic_coef: 2
clip_value: True
seq_len: 4 # only for rnn
bounds_loss_coef: 0.001
max_epochs: ${resolve_default:1000,${....max_iterations}}
save_best_after: 200
score_to_win: 20000
save_frequency: 50
print_stats: True
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/train/CartpolePPO.yaml
|
params:
seed: ${...seed}
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [32, 32]
activation: elu
initializer:
name: default
regularizer:
name: None
load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint
load_path: ${...checkpoint} # path to the checkpoint to load
config:
name: ${resolve_default:Cartpole,${....experiment}}
full_experiment_name: ${.name}
env_name: rlgpu
multi_gpu: ${....multi_gpu}
ppo: True
mixed_precision: False
normalize_input: True
normalize_value: True
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 0.1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 3e-4
lr_schedule: adaptive
kl_threshold: 0.008
score_to_win: 20000
max_epochs: ${resolve_default:100,${....max_iterations}}
save_best_after: 50
save_frequency: 25
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: True
e_clip: 0.2
horizon_length: 16
minibatch_size: 8192
mini_epochs: 8
critic_coef: 4
clip_value: True
seq_len: 4
bounds_loss_coef: 0.0001
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/train/ShadowHandOpenAIPPO.yaml
|
# specifies what the default training mode is when
# running `ShadowHandOpenAI` (version with DR and asymmetric observations)
# (currently defaults to asymmetric training)
defaults:
- ShadowHandPPOAsymm
- _self_
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/train/FactoryTaskNutBoltPickPPO.yaml
|
params:
seed: ${...seed}
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [256, 128, 64]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
load_checkpoint: ${if:${...checkpoint},True,False}
load_path: ${...checkpoint}
config:
name: ${resolve_default:FactoryTaskNutBoltPick,${....experiment}}
full_experiment_name: ${.name}
env_name: rlgpu
multi_gpu: ${....multi_gpu}
ppo: True
mixed_precision: True
normalize_input: True
normalize_value: True
value_bootstrap: True
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 1.0
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
lr_schedule: fixed
schedule_type: standard
kl_threshold: 0.016
score_to_win: 20000
max_epochs: ${resolve_default:1024,${....max_iterations}}
save_best_after: 50
save_frequency: 100
print_stats: True
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: False
e_clip: 0.2
horizon_length: 120
minibatch_size: 512
mini_epochs: 8
critic_coef: 2
clip_value: True
seq_len: 4
bounds_loss_coef: 0.0001
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/train/HumanoidAMPPPOLowGP.yaml
|
params:
seed: ${...seed}
algo:
name: amp_continuous
model:
name: continuous_amp
network:
name: amp
separate: True
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: -2.9
fixed_sigma: True
learn_sigma: False
mlp:
units: [1024, 512]
activation: relu
d2rl: False
initializer:
name: default
regularizer:
name: None
disc:
units: [1024, 512]
activation: relu
initializer:
name: default
load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint
load_path: ${...checkpoint} # path to the checkpoint to load
config:
name: ${resolve_default:HumanoidAMP,${....experiment}}
full_experiment_name: ${.name}
env_name: rlgpu
ppo: True
multi_gpu: ${....multi_gpu}
mixed_precision: False
normalize_input: True
normalize_value: True
value_bootstrap: True
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 5e-5
lr_schedule: constant
kl_threshold: 0.008
score_to_win: 20000
max_epochs: ${resolve_default:5000,${....max_iterations}}
save_best_after: 100
save_frequency: 50
print_stats: True
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: False
e_clip: 0.2
horizon_length: 16
minibatch_size: 32768
mini_epochs: 6
critic_coef: 5
clip_value: False
seq_len: 4
bounds_loss_coef: 10
amp_obs_demo_buffer_size: 200000
amp_replay_buffer_size: 1000000
amp_replay_keep_prob: 0.01
amp_batch_size: 512
amp_minibatch_size: 4096
disc_coef: 5
disc_logit_reg: 0.05
disc_grad_penalty: 0.2
disc_reward_scale: 2
disc_weight_decay: 0.0001
normalize_amp_input: True
task_reward_w: 0.0
disc_reward_w: 1.0
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/pbt/pbt_default.yaml
|
defaults:
- mutation: default_mutation
enabled: True
policy_idx: 0 # policy index in a population: should always be specified explicitly! Each run in a population should have a unique idx from [0..N-1]
num_policies: 8 # total number of policies in the population, the total number of learners. Override through CLI!
workspace: "pbt_workspace" # suffix of the workspace dir name inside train_dir, used to distinguish different PBT runs with the same experiment name. Recommended to specify a unique name
# special mode that enables PBT features for debugging even if only one policy is present. Never enable in actual experiments
dbg_mode: False
# PBT hyperparams
interval_steps: 10000000 # Interval in env steps between PBT iterations (checkpointing, mutation, etc.)
start_after: 10000000 # Start PBT after this many env frames are collected, this applies to all experiment restarts, i.e. when we resume training after the weights are mutated
initial_delay: 20000000 # This is a separate delay for when we're just starting the training session. It makes sense to give policies a bit more time to develop different behaviors
# Fraction of the underperforming policies whose weights are to be replaced by better performing policies
# This is rounded up, i.e. for 8 policies and fraction 0.3 we replace ceil(0.3*8)=3 worst policies
replace_fraction_worst: 0.125
# Fraction of agents used to sample weights from when we replace an underperforming agent
# This is also rounded up
replace_fraction_best: 0.3
# Replace an underperforming policy only if its reward is lower by at least this fraction of standard deviation
# within the population.
replace_threshold_frac_std: 0.5
# Replace an underperforming policy only if its reward is lower by at least this fraction of the absolute value
# of the objective of a better policy
replace_threshold_frac_absolute: 0.05
# Probability to mutate a certain parameter
mutation_rate: 0.15
# min and max values for the mutation of a parameter
# The mutation is performed by multiplying or dividing (randomly) the parameter value by a value sampled from [change_min, change_max]
change_min: 1.1
change_max: 1.5
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/pbt/no_pbt.yaml
|
enabled: False
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/pbt/mutation/ant_mutation.yaml
|
task.env.headingWeight: "mutate_float"
task.env.upWeight: "mutate_float"
train.params.config.grad_norm: "mutate_float"
train.params.config.entropy_coef: "mutate_float"
train.params.config.critic_coef: "mutate_float"
train.params.config.bounds_loss_coef: "mutate_float"
train.params.config.kl_threshold: "mutate_float"
train.params.config.e_clip: "mutate_eps_clip"
train.params.config.mini_epochs: "mutate_mini_epochs"
train.params.config.gamma: "mutate_discount"
train.params.config.tau: "mutate_discount"
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/pbt/mutation/humanoid_mutation.yaml
|
task.env.headingWeight: "mutate_float"
task.env.upWeight: "mutate_float"
task.env.fingertipDeltaRewScale: "mutate_float"
task.env.liftingRewScale: "mutate_float"
task.env.liftingBonus: "mutate_float"
task.env.keypointRewScale: "mutate_float"
task.env.reachGoalBonus: "mutate_float"
task.env.kukaActionsPenaltyScale: "mutate_float"
task.env.allegroActionsPenaltyScale: "mutate_float"
train.params.config.reward_shaper.scale_value: "mutate_float"
train.params.config.learning_rate: "mutate_float"
train.params.config.grad_norm: "mutate_float"
train.params.config.entropy_coef: "mutate_float"
train.params.config.critic_coef: "mutate_float"
train.params.config.bounds_loss_coef: "mutate_float"
train.params.config.e_clip: "mutate_eps_clip"
train.params.config.mini_epochs: "mutate_mini_epochs"
train.params.config.gamma: "mutate_discount"
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/pbt/mutation/default_mutation.yaml
|
train.params.config.reward_shaper.scale_value: "mutate_float"
train.params.config.learning_rate: "mutate_float"
train.params.config.grad_norm: "mutate_float"
train.params.config.entropy_coef: "mutate_float"
train.params.config.critic_coef: "mutate_float"
train.params.config.bounds_loss_coef: "mutate_float"
train.params.config.e_clip: "mutate_eps_clip"
train.params.config.mini_epochs: "mutate_mini_epochs"
train.params.config.gamma: "mutate_discount"
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/pbt/mutation/allegro_hand_mutation.yaml
|
task.env.dist_reward_scale: "mutate_float"
task.env.rot_reward_scale: "mutate_float"
task.env.rot_eps: "mutate_float"
task.env.reach_goal_bonus: "mutate_float"
# Could be additionally mutated
#task.env.actionPenaltyScale: "mutate_float"
#task.env.actionDeltaPenaltyScale: "mutate_float"
#task.env.startObjectPoseDY: "mutate_float"
#task.env.startObjectPoseDZ: "mutate_float"
#task.env.fallDistance: "mutate_float"
train.params.config.learning_rate: "mutate_float"
train.params.config.grad_norm: "mutate_float"
train.params.config.entropy_coef: "mutate_float"
train.params.config.critic_coef: "mutate_float"
train.params.config.bounds_loss_coef: "mutate_float"
train.params.config.kl_threshold: "mutate_float"
train.params.config.e_clip: "mutate_eps_clip"
train.params.config.mini_epochs: "mutate_mini_epochs"
train.params.config.gamma: "mutate_discount"
# These would require special mutation rules
# 'train.params.config.steps_num': 8
# 'train.params.config.minibatch_size': 256
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/cfg/pbt/mutation/allegro_kuka_mutation.yaml
|
task.env.distRewardScale: "mutate_float"
task.env.rotRewardScale: "mutate_float"
task.env.actionPenaltyScale: "mutate_float"
task.env.liftingRewScale: "mutate_float"
task.env.liftingBonus: "mutate_float"
task.env.liftingBonusThreshold: "mutate_float"
task.env.keypointRewScale: "mutate_float"
task.env.distanceDeltaRewScale: "mutate_float"
task.env.reachGoalBonus: "mutate_float"
task.env.kukaActionsPenaltyScale: "mutate_float"
task.env.allegroActionsPenaltyScale: "mutate_float"
task.env.fallDistance: "mutate_float"
# Could be additionally mutated
#train.params.config.learning_rate: "mutate_float"
#train.params.config.entropy_coef: "mutate_float" # this is 0, no reason to mutate
train.params.config.grad_norm: "mutate_float"
train.params.config.critic_coef: "mutate_float"
train.params.config.bounds_loss_coef: "mutate_float"
train.params.config.kl_threshold: "mutate_float"
train.params.config.e_clip: "mutate_eps_clip"
train.params.config.mini_epochs: "mutate_mini_epochs"
train.params.config.gamma: "mutate_discount"
# These would require special mutation rules
# 'train.params.config.steps_num': 8
# 'train.params.config.minibatch_size': 256
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/pbt/__init__.py
| |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/pbt/pbt.py
|
# Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import os
import random
import shutil
import sys
import time
from os.path import join
from typing import Any, Dict, List, Optional
import numpy as np
import torch
import yaml
from omegaconf import DictConfig
from rl_games.algos_torch.torch_ext import safe_filesystem_op, safe_save
from rl_games.common.algo_observer import AlgoObserver
from isaacgymenvs.pbt.mutation import mutate
from isaacgymenvs.utils.reformat import omegaconf_to_dict
from isaacgymenvs.utils.utils import flatten_dict, project_tmp_dir, safe_ensure_dir_exists
# i.e. value for target objective when it is not known
_UNINITIALIZED_VALUE = float(-1e9)
def _checkpnt_name(iteration):
return f"{iteration:06d}.yaml"
def _model_checkpnt_name(iteration):
return f"{iteration:06d}.pth"
def _flatten_params(params: Dict, prefix="", separator=".") -> Dict:
all_params = flatten_dict(params, prefix, separator)
return all_params
def _filter_params(params: Dict, params_to_mutate: Dict) -> Dict:
filtered_params = dict()
for key, value in params.items():
if key in params_to_mutate:
if isinstance(value, str):
try:
# trying to convert values such as "1e-4" to floats because yaml fails to recognize them as such
float_value = float(value)
value = float_value
except ValueError:
pass
filtered_params[key] = value
return filtered_params
class PbtParams:
def __init__(self, cfg: DictConfig):
params: Dict = omegaconf_to_dict(cfg)
pbt_params = params["pbt"]
self.replace_fraction_best = pbt_params["replace_fraction_best"]
self.replace_fraction_worst = pbt_params["replace_fraction_worst"]
self.replace_threshold_frac_std = pbt_params["replace_threshold_frac_std"]
self.replace_threshold_frac_absolute = pbt_params["replace_threshold_frac_absolute"]
self.mutation_rate = pbt_params["mutation_rate"]
self.change_min = pbt_params["change_min"]
self.change_max = pbt_params["change_max"]
self.task_name = params["task"]["name"]
self.dbg_mode = pbt_params["dbg_mode"]
self.policy_idx = pbt_params["policy_idx"]
self.num_policies = pbt_params["num_policies"]
self.num_envs = params["task"]["env"]["numEnvs"]
self.workspace = pbt_params["workspace"]
self.interval_steps = pbt_params["interval_steps"]
self.start_after_steps = pbt_params["start_after"]
self.initial_delay_steps = pbt_params["initial_delay"]
self.params_to_mutate = pbt_params["mutation"]
mutable_params = _flatten_params(params)
self.mutable_params = _filter_params(mutable_params, self.params_to_mutate)
self.with_wandb = params["wandb_activate"]
RLAlgo = Any # just for readability
def _restart_process_with_new_params(
policy_idx: int,
new_params: Dict,
restart_from_checkpoint: Optional[str],
experiment_name: Optional[str],
algo: Optional[RLAlgo],
with_wandb: bool,
) -> None:
cli_args = sys.argv
modified_args = [cli_args[0]] # initialize with path to the Python script
for arg in cli_args[1:]:
if "=" not in arg:
modified_args.append(arg)
else:
assert "=" in arg
arg_name, arg_value = arg.split("=")
if arg_name in new_params or arg_name in [
"checkpoint",
"+full_experiment_name",
"hydra.run.dir",
"++pbt_restart",
]:
# skip this parameter, it will be added later!
continue
modified_args.append(f"{arg_name}={arg_value}")
modified_args.append(f"hydra.run.dir={os.getcwd()}")
modified_args.append(f"++pbt_restart=True")
if experiment_name is not None:
modified_args.append(f"+full_experiment_name={experiment_name}")
if restart_from_checkpoint is not None:
modified_args.append(f"checkpoint={restart_from_checkpoint}")
# add all the new (possibly mutated) parameters
for param, value in new_params.items():
modified_args.append(f"{param}={value}")
if algo is not None:
algo.writer.flush()
algo.writer.close()
if with_wandb:
try:
import wandb
wandb.run.finish()
except Exception as exc:
print(f"Policy {policy_idx}: Exception {exc} in wandb.run.finish()")
return
print(f"Policy {policy_idx}: Restarting self with args {modified_args}", flush=True)
os.execv(sys.executable, ["python3"] + modified_args)
def initial_pbt_check(cfg: DictConfig):
assert cfg.pbt.enabled
if hasattr(cfg, "pbt_restart") and cfg.pbt_restart:
print(f"PBT job restarted from checkpoint, keep going...")
return
print("PBT run without 'pbt_restart=True' - must be the very start of the experiment!")
print("Mutating initial set of hyperparameters!")
pbt_params = PbtParams(cfg)
new_params = mutate(
pbt_params.mutable_params,
pbt_params.params_to_mutate,
pbt_params.mutation_rate,
pbt_params.change_min,
pbt_params.change_max,
)
_restart_process_with_new_params(pbt_params.policy_idx, new_params, None, None, None, False)
class PbtAlgoObserver(AlgoObserver):
def __init__(self, cfg: DictConfig):
super().__init__()
self.pbt_params: PbtParams = PbtParams(cfg)
self.policy_idx: int = self.pbt_params.policy_idx
self.num_envs: int = self.pbt_params.num_envs
self.pbt_num_policies: int = self.pbt_params.num_policies
self.algo: Optional[RLAlgo] = None
self.pbt_workspace_dir = self.curr_policy_workspace_dir = None
self.pbt_iteration = -1 # dummy value, stands for "not initialized"
self.initial_env_frames = -1 # env frames at the beginning of the experiment, can be > 0 if we resume
self.finished_agents = set()
self.last_target_objectives = [_UNINITIALIZED_VALUE] * self.pbt_params.num_envs
self.curr_target_objective_value: float = _UNINITIALIZED_VALUE
self.target_objective_known = False # switch to true when we have enough data to calculate target objective
# keep track of objective values in the current iteration
# we use best value reached in the current iteration to decide whether to be replaced by another policy
# this reduces the noisiness of evolutionary pressure by reducing the number of situations where a policy
# gets replaced just due to a random minor dip in performance
self.best_objective_curr_iteration: Optional[float] = None
self.experiment_start = time.time()
self.with_wandb = self.pbt_params.with_wandb
def after_init(self, algo):
self.algo = algo
self.pbt_workspace_dir = join(algo.train_dir, self.pbt_params.workspace)
self.curr_policy_workspace_dir = self._policy_workspace_dir(self.pbt_params.policy_idx)
os.makedirs(self.curr_policy_workspace_dir, exist_ok=True)
def process_infos(self, infos, done_indices):
if "true_objective" in infos:
done_indices_lst = done_indices.squeeze(-1).tolist()
self.finished_agents.update(done_indices_lst)
for done_idx in done_indices_lst:
true_objective_value = infos["true_objective"][done_idx].item()
self.last_target_objectives[done_idx] = true_objective_value
# last result for all episodes
self.target_objective_known = len(self.finished_agents) >= self.pbt_params.num_envs
if self.target_objective_known:
self.curr_target_objective_value = float(np.mean(self.last_target_objectives))
else:
# environment does not specify "true objective", use regular reward
# in this case, be careful not to include reward shaping coefficients into the mutation config
self.target_objective_known = self.algo.game_rewards.current_size >= self.algo.games_to_track
if self.target_objective_known:
self.curr_target_objective_value = float(self.algo.mean_rewards)
if self.target_objective_known:
if (
self.best_objective_curr_iteration is None
or self.curr_target_objective_value > self.best_objective_curr_iteration
):
print(
f"Policy {self.policy_idx}: New best objective value {self.curr_target_objective_value} in iteration {self.pbt_iteration}"
)
self.best_objective_curr_iteration = self.curr_target_objective_value
def after_steps(self):
if self.pbt_iteration == -1:
self.pbt_iteration = self.algo.frame // self.pbt_params.interval_steps
self.initial_env_frames = self.algo.frame
print(
f"Policy {self.policy_idx}: PBT init. Env frames: {self.algo.frame}, pbt_iteration: {self.pbt_iteration}"
)
env_frames: int = self.algo.frame
iteration = env_frames // self.pbt_params.interval_steps
print(
f"Policy {self.policy_idx}: Env frames {env_frames}, iteration {iteration}, self iteration {self.pbt_iteration}"
)
if iteration <= self.pbt_iteration:
return
if not self.target_objective_known:
# not enough data yet to calcuate avg true_objective
print(
f"Policy {self.policy_idx}: Not enough episodes finished, wait for more data ({len(self.finished_agents)}/{self.num_envs})..."
)
return
assert self.curr_target_objective_value != _UNINITIALIZED_VALUE
assert self.best_objective_curr_iteration is not None
best_objective_curr_iteration: float = self.best_objective_curr_iteration
# reset for the next iteration
self.best_objective_curr_iteration = None
self.target_objective_known = False
sec_since_experiment_start = time.time() - self.experiment_start
pbt_start_after_sec = 1 if self.pbt_params.dbg_mode else 30
if sec_since_experiment_start < pbt_start_after_sec:
print(
f"Policy {self.policy_idx}: Not enough time passed since experiment start {sec_since_experiment_start}"
)
return
print(f"Policy {self.policy_idx}: New pbt iteration {iteration}!")
self.pbt_iteration = iteration
try:
self._save_pbt_checkpoint()
except Exception as exc:
print(f"Policy {self.policy_idx}: Exception {exc} when saving PBT checkpoint!")
return
try:
checkpoints = self._load_population_checkpoints()
except Exception as exc:
print(f"Policy {self.policy_idx}: Exception {exc} when loading checkpoints!")
return
try:
self._cleanup(checkpoints)
except Exception as exc:
print(f"Policy {self.policy_idx}: Exception {exc} during cleanup!")
policies = list(range(self.pbt_num_policies))
target_objectives = []
for p in policies:
if checkpoints[p] is None:
target_objectives.append(_UNINITIALIZED_VALUE)
else:
target_objectives.append(checkpoints[p]["true_objective"])
policies_sorted = sorted(zip(target_objectives, policies), reverse=True)
objectives = [objective for objective, p in policies_sorted]
best_objective = objectives[0]
policies_sorted = [p for objective, p in policies_sorted]
best_policy = policies_sorted[0]
self._maybe_save_best_policy(best_objective, best_policy, checkpoints[best_policy])
objectives_filtered = [o for o in objectives if o > _UNINITIALIZED_VALUE]
try:
self._pbt_summaries(self.pbt_params.mutable_params, best_objective)
except Exception as exc:
print(f"Policy {self.policy_idx}: Exception {exc} when writing summaries!")
return
if (
env_frames - self.initial_env_frames < self.pbt_params.start_after_steps
or env_frames < self.pbt_params.initial_delay_steps
):
print(
f"Policy {self.policy_idx}: Not enough experience collected to replace weights. "
f"Giving this policy more time to adjust to the latest parameters... "
f"env_frames={env_frames} started_at={self.initial_env_frames} "
f"restart_delay={self.pbt_params.start_after_steps} initial_delay={self.pbt_params.initial_delay_steps}"
)
return
replace_worst = math.ceil(self.pbt_params.replace_fraction_worst * self.pbt_num_policies)
replace_best = math.ceil(self.pbt_params.replace_fraction_best * self.pbt_num_policies)
best_policies = policies_sorted[:replace_best]
worst_policies = policies_sorted[-replace_worst:]
print(f"Policy {self.policy_idx}: PBT best_policies={best_policies}, worst_policies={worst_policies}")
if self.policy_idx not in worst_policies and not self.pbt_params.dbg_mode:
# don't touch the policies that are doing okay
print(f"Current policy {self.policy_idx} is doing well, not among the worst_policies={worst_policies}")
return
if best_objective_curr_iteration is not None and not self.pbt_params.dbg_mode:
if best_objective_curr_iteration >= min(objectives[:replace_best]):
print(
f"Policy {self.policy_idx}: best_objective={best_objective_curr_iteration} "
f"is better than some of the top policies {objectives[:replace_best]}. "
f"This policy should keep training for now, it is doing okay."
)
return
if len(objectives_filtered) <= max(2, self.pbt_num_policies // 2) and not self.pbt_params.dbg_mode:
print(f"Policy {self.policy_idx}: Not enough data to start PBT, {objectives_filtered}")
return
print(f"Current policy {self.policy_idx} is among the worst_policies={worst_policies}, consider replacing weights")
print(
f"Policy {self.policy_idx} objective: {self.curr_target_objective_value}, best_objective={best_objective} (best_policy={best_policy})."
)
replacement_policy_candidate = random.choice(best_policies)
candidate_objective = checkpoints[replacement_policy_candidate]["true_objective"]
targ_objective_value = self.curr_target_objective_value
objective_delta = candidate_objective - targ_objective_value
num_outliers = int(math.floor(0.2 * len(objectives_filtered)))
print(f"Policy {self.policy_idx} num outliers: {num_outliers}")
if len(objectives_filtered) > num_outliers:
objectives_filtered_sorted = sorted(objectives_filtered)
# remove the worst policies from the std calculation, this will allow us to keep improving even if 1-2 policies
# crashed and can't keep improving. Otherwise, std value will be too large.
objectives_std = np.std(objectives_filtered_sorted[num_outliers:])
else:
objectives_std = np.std(objectives_filtered)
objective_threshold = self.pbt_params.replace_threshold_frac_std * objectives_std
absolute_threshold = self.pbt_params.replace_threshold_frac_absolute * abs(candidate_objective)
if objective_delta > objective_threshold and objective_delta > absolute_threshold:
# replace this policy with a candidate
replacement_policy = replacement_policy_candidate
print(f"Replacing underperforming policy {self.policy_idx} with {replacement_policy}")
else:
print(
f"Policy {self.policy_idx}: Difference in objective value ({candidate_objective} vs {targ_objective_value}) is not sufficient to justify replacement,"
f"{objective_delta}, {objectives_std}, {objective_threshold}, {absolute_threshold}"
)
# replacing with "self": keep the weights but mutate the hyperparameters
replacement_policy = self.policy_idx
# Decided to replace the policy weights!
# we can either copy parameters from the checkpoint we're restarting from, or keep our parameters and
# further mutate them.
if random.random() < 0.5:
new_params = checkpoints[replacement_policy]["params"]
else:
new_params = self.pbt_params.mutable_params
new_params = mutate(
new_params,
self.pbt_params.params_to_mutate,
self.pbt_params.mutation_rate,
self.pbt_params.change_min,
self.pbt_params.change_max,
)
experiment_name = checkpoints[self.policy_idx]["experiment_name"]
try:
self._pbt_summaries(new_params, best_objective)
except Exception as exc:
print(f"Policy {self.policy_idx}: Exception {exc} when writing summaries!")
return
try:
restart_checkpoint = os.path.abspath(checkpoints[replacement_policy]["checkpoint"])
# delete previous tempdir to make sure we don't grow too big
checkpoint_tmp_dir = join(project_tmp_dir(), f"{experiment_name}_p{self.policy_idx}")
if os.path.isdir(checkpoint_tmp_dir):
shutil.rmtree(checkpoint_tmp_dir)
checkpoint_tmp_dir = safe_ensure_dir_exists(checkpoint_tmp_dir)
restart_checkpoint_tmp = join(checkpoint_tmp_dir, os.path.basename(restart_checkpoint))
# copy the checkpoint file to the temp dir to make sure it does not get deleted while we're restarting
shutil.copyfile(restart_checkpoint, restart_checkpoint_tmp)
except Exception as exc:
print(f"Policy {self.policy_idx}: Exception {exc} when copying checkpoint file for restart")
# perhaps checkpoint file was deleted before we could make a copy. Abort the restart.
return
# try to load the checkpoint file and if it fails, abandon the restart
try:
self._rewrite_checkpoint(restart_checkpoint_tmp, env_frames)
except Exception as exc:
# this should happen infrequently so should not affect training in any significant way
print(
f"Policy {self.policy_idx}: Exception {exc} when loading checkpoint file for restart."
f"Aborting restart. Continue training with the existing set of weights!"
)
return
print(
f"Policy {self.policy_idx}: Preparing to restart the process with mutated parameters! "
f"Checkpoint {restart_checkpoint_tmp}"
)
_restart_process_with_new_params(
self.policy_idx, new_params, restart_checkpoint_tmp, experiment_name, self.algo, self.with_wandb
)
def _rewrite_checkpoint(self, restart_checkpoint_tmp: str, env_frames: int) -> None:
state = torch.load(restart_checkpoint_tmp)
print(f"Policy {self.policy_idx}: restarting from checkpoint {restart_checkpoint_tmp}, {state['frame']}")
print(f"Replacing {state['frame']} with {env_frames}...")
state["frame"] = env_frames
pbt_history = state.get("pbt_history", [])
print(f"PBT history: {pbt_history}")
pbt_history.append((self.policy_idx, env_frames, self.curr_target_objective_value))
state["pbt_history"] = pbt_history
torch.save(state, restart_checkpoint_tmp)
print(f"Policy {self.policy_idx}: checkpoint rewritten to {restart_checkpoint_tmp}!")
def _save_pbt_checkpoint(self):
"""Save PBT-specific information including iteration number, policy index and hyperparameters."""
checkpoint_file = join(self.curr_policy_workspace_dir, _model_checkpnt_name(self.pbt_iteration))
algo_state = self.algo.get_full_state_weights()
safe_save(algo_state, checkpoint_file)
pbt_checkpoint_file = join(self.curr_policy_workspace_dir, _checkpnt_name(self.pbt_iteration))
pbt_checkpoint = {
"iteration": self.pbt_iteration,
"true_objective": self.curr_target_objective_value,
"frame": self.algo.frame,
"params": self.pbt_params.mutable_params,
"checkpoint": os.path.abspath(checkpoint_file),
"pbt_checkpoint": os.path.abspath(pbt_checkpoint_file),
"experiment_name": self.algo.experiment_name,
}
with open(pbt_checkpoint_file, "w") as fobj:
print(f"Policy {self.policy_idx}: Saving {pbt_checkpoint_file}...")
yaml.dump(pbt_checkpoint, fobj)
def _policy_workspace_dir(self, policy_idx):
return join(self.pbt_workspace_dir, f"{policy_idx:03d}")
def _load_population_checkpoints(self):
"""
Load checkpoints for other policies in the population.
Pick the newest checkpoint, but not newer than our current iteration.
"""
checkpoints = dict()
for policy_idx in range(self.pbt_num_policies):
checkpoints[policy_idx] = None
policy_workspace_dir = self._policy_workspace_dir(policy_idx)
if not os.path.isdir(policy_workspace_dir):
continue
pbt_checkpoint_files = [f for f in os.listdir(policy_workspace_dir) if f.endswith(".yaml")]
pbt_checkpoint_files.sort(reverse=True)
for pbt_checkpoint_file in pbt_checkpoint_files:
iteration_str = pbt_checkpoint_file.split(".")[0]
iteration = int(iteration_str)
if iteration <= self.pbt_iteration:
with open(join(policy_workspace_dir, pbt_checkpoint_file), "r") as fobj:
print(f"Policy {self.policy_idx}: Loading policy-{policy_idx} {pbt_checkpoint_file}")
checkpoints[policy_idx] = safe_filesystem_op(yaml.load, fobj, Loader=yaml.FullLoader)
break
else:
# print(f'Policy {self.policy_idx}: Ignoring {pbt_checkpoint_file} because it is newer than our current iteration')
pass
assert self.policy_idx in checkpoints.keys()
return checkpoints
def _maybe_save_best_policy(self, best_objective, best_policy_idx: int, best_policy_checkpoint):
# make a directory containing the best policy checkpoints using safe_filesystem_op
best_policy_workspace_dir = join(self.pbt_workspace_dir, f"best{self.policy_idx}")
safe_filesystem_op(os.makedirs, best_policy_workspace_dir, exist_ok=True)
best_objective_so_far = _UNINITIALIZED_VALUE
best_policy_checkpoint_files = [f for f in os.listdir(best_policy_workspace_dir) if f.endswith(".yaml")]
best_policy_checkpoint_files.sort(reverse=True)
if best_policy_checkpoint_files:
with open(join(best_policy_workspace_dir, best_policy_checkpoint_files[0]), "r") as fobj:
best_policy_checkpoint_so_far = safe_filesystem_op(yaml.load, fobj, Loader=yaml.FullLoader)
best_objective_so_far = best_policy_checkpoint_so_far["true_objective"]
if best_objective_so_far >= best_objective:
# don't save the checkpoint if it is worse than the best checkpoint so far
return
print(f"Policy {self.policy_idx}: New best objective: {best_objective}!")
# save the best policy checkpoint to this folder
best_policy_checkpoint_name = f"{self.pbt_params.task_name}_best_obj_{best_objective:015.5f}_iter_{self.pbt_iteration:04d}_policy{best_policy_idx:03d}_frame{self.algo.frame}"
# copy the checkpoint file to the best policy directory
try:
shutil.copy(
best_policy_checkpoint["checkpoint"],
join(best_policy_workspace_dir, f"{best_policy_checkpoint_name}.pth"),
)
shutil.copy(
best_policy_checkpoint["pbt_checkpoint"],
join(best_policy_workspace_dir, f"{best_policy_checkpoint_name}.yaml"),
)
# cleanup older best policy checkpoints, we want to keep only N latest files
best_policy_checkpoint_files = [f for f in os.listdir(best_policy_workspace_dir)]
best_policy_checkpoint_files.sort(reverse=True)
n_to_keep = 6
for best_policy_checkpoint_file in best_policy_checkpoint_files[n_to_keep:]:
os.remove(join(best_policy_workspace_dir, best_policy_checkpoint_file))
except Exception as exc:
print(f"Policy {self.policy_idx}: Exception {exc} when copying best checkpoint!")
# no big deal if this fails, hopefully the next time we will succeeed
return
def _pbt_summaries(self, params, best_objective):
for param, value in params.items():
self.algo.writer.add_scalar(f"pbt/{param}", value, self.algo.frame)
self.algo.writer.add_scalar(f"pbt/00_best_objective", best_objective, self.algo.frame)
self.algo.writer.flush()
def _cleanup(self, checkpoints):
iterations = []
for policy_idx, checkpoint in checkpoints.items():
if checkpoint is None:
iterations.append(0)
else:
iterations.append(checkpoint["iteration"])
oldest_iteration = sorted(iterations)[0]
cleanup_threshold = oldest_iteration - 20
print(
f"Policy {self.policy_idx}: Oldest iteration in population is {oldest_iteration}, removing checkpoints older than {cleanup_threshold} iteration"
)
pbt_checkpoint_files = [f for f in os.listdir(self.curr_policy_workspace_dir)]
for f in pbt_checkpoint_files:
if "." in f:
iteration_idx = int(f.split(".")[0])
if iteration_idx <= cleanup_threshold:
print(f"Policy {self.policy_idx}: PBT cleanup: removing checkpoint {f}")
# we catch all exceptions in this function so no need to use safe_filesystem_op
os.remove(join(self.curr_policy_workspace_dir, f))
# Sometimes, one of the PBT processes can get stuck, or crash, or be scheduled significantly later on Slurm
# or a similar cluster management system.
# In that case, we will accumulate a lot of older checkpoints. In order to keep the number of older checkpoints
# under control (to avoid running out of disk space) we implement the following logic:
# when we have more than N checkpoints, we delete half of the oldest checkpoints. This caps the max amount of
# disk space used, and still allows older policies to participate in PBT
max_old_checkpoints = 25
while True:
pbt_checkpoint_files = [f for f in os.listdir(self.curr_policy_workspace_dir) if f.endswith(".yaml")]
if len(pbt_checkpoint_files) <= max_old_checkpoints:
break
if not self._delete_old_checkpoint(pbt_checkpoint_files):
break
def _delete_old_checkpoint(self, pbt_checkpoint_files: List[str]) -> bool:
"""
Delete the checkpoint that results in the smallest max gap between the remaining checkpoints.
Do not delete any of the last N checkpoints.
"""
pbt_checkpoint_files.sort()
n_latest_to_keep = 10
candidates = pbt_checkpoint_files[:-n_latest_to_keep]
num_candidates = len(candidates)
if num_candidates < 3:
return False
def _iter(f):
return int(f.split(".")[0])
best_gap = 1e9
best_candidate = 1
for i in range(1, num_candidates - 1):
prev_iteration = _iter(candidates[i - 1])
next_iteration = _iter(candidates[i + 1])
# gap is we delete the ith candidate
gap = next_iteration - prev_iteration
if gap < best_gap:
best_gap = gap
best_candidate = i
# delete the best candidate
best_candidate_file = candidates[best_candidate]
files_to_remove = [best_candidate_file, _model_checkpnt_name(_iter(best_candidate_file))]
for file_to_remove in files_to_remove:
print(
f"Policy {self.policy_idx}: PBT cleanup old checkpoints, removing checkpoint {file_to_remove} (best gap {best_gap})"
)
os.remove(join(self.curr_policy_workspace_dir, file_to_remove))
return True
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/pbt/mutation.py
|
# Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import copy
import random
def mutate_float(x, change_min=1.1, change_max=1.5):
perturb_amount = random.uniform(change_min, change_max)
# mutation direction
new_value = x / perturb_amount if random.random() < 0.5 else x * perturb_amount
return new_value
def mutate_float_min_1(x, **kwargs):
new_value = mutate_float(x, **kwargs)
new_value = max(1.0, new_value)
return new_value
def mutate_eps_clip(x, **kwargs):
new_value = mutate_float(x, **kwargs)
new_value = max(0.01, new_value)
new_value = min(0.3, new_value)
return new_value
def mutate_mini_epochs(x, **kwargs):
change_amount = 1
new_value = x + change_amount if random.random() < 0.5 else x - change_amount
new_value = max(1, new_value)
new_value = min(8, new_value)
return new_value
def mutate_discount(x, **kwargs):
"""Special mutation func for parameters such as gamma (discount factor)."""
inv_x = 1.0 - x
# very conservative, large changes in gamma can lead to very different critic estimates
new_inv_x = mutate_float(inv_x, change_min=1.1, change_max=1.2)
new_value = 1.0 - new_inv_x
return new_value
def get_mutation_func(mutation_func_name):
try:
func = eval(mutation_func_name)
except Exception as exc:
print(f'Exception {exc} while trying to find the mutation func {mutation_func_name}.')
raise Exception(f'Could not find mutation func {mutation_func_name}')
return func
def mutate(params, mutations, mutation_rate, pbt_change_min, pbt_change_max):
mutated_params = copy.deepcopy(params)
for param, param_value in params.items():
# toss a coin whether we perturb the parameter at all
if random.random() > mutation_rate:
continue
mutation_func_name = mutations[param]
mutation_func = get_mutation_func(mutation_func_name)
mutated_value = mutation_func(param_value, change_min=pbt_change_min, change_max=pbt_change_max)
mutated_params[param] = mutated_value
print(f'Param {param} mutated to value {mutated_value}')
return mutated_params
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/pbt/experiments/ant_pbt.py
|
from isaacgymenvs.pbt.launcher.run_description import ParamGrid, RunDescription, Experiment
from isaacgymenvs.pbt.experiments.run_utils import version
_env = 'ant'
_name = f'{_env}_{version}'
_iterations = 10000
_pbt_num_policies = 3
_params = ParamGrid([
('pbt.policy_idx', list(range(_pbt_num_policies))),
])
_wandb_activate = True
_wandb_group = f'pbt_{_name}'
_wandb_entity = 'your_wandb_entity'
_wandb_project = 'your_wandb_project'
_experiments = [
Experiment(
f'{_name}',
f'python -m isaacgymenvs.train task=Ant headless=True '
f'max_iterations={_iterations} num_envs=2048 seed=-1 train.params.config.save_frequency=2000 '
f'wandb_activate={_wandb_activate} wandb_group={_wandb_group} wandb_entity={_wandb_entity} wandb_project={_wandb_project} '
f'pbt=pbt_default pbt.num_policies={_pbt_num_policies} pbt.workspace=workspace_{_name} '
f'pbt.initial_delay=10000000 pbt.interval_steps=5000000 pbt.start_after=10000000 pbt/mutation=ant_mutation',
_params.generate_params(randomize=False),
),
]
RUN_DESCRIPTION = RunDescription(
f'{_name}',
experiments=_experiments, experiment_arg_name='experiment', experiment_dir_arg_name='hydra.run.dir',
param_prefix='', customize_experiment_name=False,
)
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/pbt/experiments/allegro_kuka_two_arms_reorientation_lstm.py
|
from isaacgymenvs.pbt.launcher.run_description import ParamGrid, RunDescription, Experiment
from isaacgymenvs.pbt.experiments.run_utils import version, seeds, default_num_frames
kuka_env = 'allegro_kuka_two_arms_reorientation'
_frames = default_num_frames
_name = f'{kuka_env}_{version}'
_params = ParamGrid([
('seed', seeds(8)),
])
_wandb_activate = True
_wandb_group = f'pbt_{_name}'
_wandb_entity = 'your_wandb_entity'
_wandb_project = 'your_wandb_project'
cli = f'python -m isaacgymenvs.train ' \
f'train.params.config.max_frames={_frames} headless=True ' \
f'task=AllegroKukaTwoArmsLSTM task/env=reorientation ' \
f'wandb_project={_wandb_project} wandb_entity={_wandb_entity} wandb_activate={_wandb_activate} wandb_group={_wandb_group}'
RUN_DESCRIPTION = RunDescription(
f'{_name}',
experiments=[Experiment(f'{_name}', cli, _params.generate_params(randomize=False))],
experiment_arg_name='experiment', experiment_dir_arg_name='hydra.run.dir',
param_prefix='', customize_experiment_name=False,
)
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/pbt/experiments/allegro_kuka_throw_lstm.py
|
from isaacgymenvs.pbt.launcher.run_description import ParamGrid, RunDescription, Experiment
from isaacgymenvs.pbt.experiments.run_utils import version, seeds, default_num_frames
kuka_env = 'allegro_kuka_throw'
_frames = default_num_frames
_name = f'{kuka_env}_{version}'
_params = ParamGrid([
('seed', seeds(8)),
])
_wandb_activate = True
_wandb_group = f'pbt_{_name}'
_wandb_entity = 'your_wandb_entity'
_wandb_project = 'your_wandb_project'
cli = f'python -m isaacgymenvs.train seed=-1 ' \
f'train.params.config.max_frames={_frames} headless=True ' \
f'task=AllegroKukaLSTM task/env=throw ' \
f'wandb_project={_wandb_project} wandb_entity={_wandb_entity} wandb_activate={_wandb_activate} wandb_group={_wandb_group}'
RUN_DESCRIPTION = RunDescription(
f'{_name}',
experiments=[Experiment(f'{_name}', cli, _params.generate_params(randomize=False))],
experiment_arg_name='experiment', experiment_dir_arg_name='hydra.run.dir',
param_prefix='', customize_experiment_name=False,
)
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/pbt/experiments/run_utils.py
|
import random
from typing import List
# Versioning -- you can change this number and keep a changelog below to keep track of your experiments as you go.
version = "v1"
def seeds(num_seeds) -> List[int]:
return [random.randrange(1000000, 9999999) for _ in range(num_seeds)]
default_num_frames: int = 10_000_000_000
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/pbt/experiments/allegro_kuka_reorientation_lstm.py
|
from isaacgymenvs.pbt.launcher.run_description import ParamGrid, RunDescription, Experiment
from isaacgymenvs.pbt.experiments.run_utils import version, seeds, default_num_frames
kuka_env = 'allegro_kuka_reorientation'
_frames = default_num_frames
_name = f'{kuka_env}_{version}'
_wandb_activate = True
_wandb_group = f'pbt_{_name}'
_wandb_entity = 'your_wandb_entity'
_wandb_project = 'your_wandb_project'
_params = ParamGrid([
('seed', seeds(8)),
])
cli = f'python -m isaacgymenvs.train seed=-1 ' \
f'train.params.config.max_frames={_frames} headless=True ' \
f'task=AllegroKukaLSTM task/env=reorientation ' \
f'wandb_project={_wandb_project} wandb_entity={_wandb_entity} wandb_activate={_wandb_activate} wandb_group={_wandb_group}'
RUN_DESCRIPTION = RunDescription(
f'{_name}',
experiments=[Experiment(f'{_name}', cli, _params.generate_params(randomize=False))],
experiment_arg_name='experiment', experiment_dir_arg_name='hydra.run.dir',
param_prefix='', customize_experiment_name=False,
)
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/pbt/experiments/allegro_kuka_two_arms_regrasping_pbt_lstm.py
|
from isaacgymenvs.pbt.launcher.run_description import ParamGrid, RunDescription, Experiment
from isaacgymenvs.pbt.experiments.allegro_kuka_pbt_base import kuka_base_cli
from isaacgymenvs.pbt.experiments.run_utils import version
env = 'allegro_kuka_two_arms_regrasp'
_pbt_num_policies = 8
_name = f'{env}_{version}_pbt_{_pbt_num_policies}p'
_wandb_group = f'pbt_{_name}'
_params = ParamGrid([
('pbt.policy_idx', list(range(_pbt_num_policies))),
])
cli = kuka_base_cli + f' task=AllegroKukaTwoArmsLSTM task/env=regrasping task.env.episodeLength=400 wandb_activate=True wandb_group={_wandb_group} pbt.num_policies={_pbt_num_policies}'
RUN_DESCRIPTION = RunDescription(
f'{_name}',
experiments=[Experiment(f'{_name}', cli, _params.generate_params(randomize=False))],
experiment_arg_name='experiment', experiment_dir_arg_name='hydra.run.dir',
param_prefix='', customize_experiment_name=False,
)
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/pbt/experiments/__init__.py
| |
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/pbt/experiments/allegro_kuka_regrasping_pbt_lstm.py
|
from isaacgymenvs.pbt.launcher.run_description import ParamGrid, RunDescription, Experiment
from isaacgymenvs.pbt.experiments.allegro_kuka_pbt_base import kuka_env, kuka_base_cli
from isaacgymenvs.pbt.experiments.run_utils import version
_pbt_num_policies = 8
_name = f'{kuka_env}_regrasp_{version}_pbt_{_pbt_num_policies}p'
_wandb_group = f'pbt_{_name}'
_params = ParamGrid([
('pbt.policy_idx', list(range(_pbt_num_policies))),
])
cli = kuka_base_cli + f' task=AllegroKukaLSTM task/env=regrasping wandb_activate=True wandb_group={_wandb_group} pbt.num_policies={_pbt_num_policies}'
RUN_DESCRIPTION = RunDescription(
f'{_name}',
experiments=[Experiment(f'{_name}', cli, _params.generate_params(randomize=False))],
experiment_arg_name='experiment', experiment_dir_arg_name='hydra.run.dir',
param_prefix='', customize_experiment_name=False,
)
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/pbt/experiments/allegro_kuka_two_arms_regrasping_lstm.py
|
from isaacgymenvs.pbt.launcher.run_description import ParamGrid, RunDescription, Experiment
from isaacgymenvs.pbt.experiments.run_utils import version, seeds, default_num_frames
kuka_env = 'allegro_kuka_two_arms_regrasp'
_frames = default_num_frames
_name = f'{kuka_env}_{version}'
_params = ParamGrid([
('seed', seeds(8)),
])
_wandb_activate = True
_wandb_group = f'pbt_{_name}'
_wandb_entity = 'your_wandb_entity'
_wandb_project = 'your_wandb_project'
cli = f'python -m isaacgymenvs.train seed=-1 ' \
f'train.params.config.max_frames={_frames} headless=True ' \
f'task=AllegroKukaTwoArmsLSTM task/env=regrasping ' \
f'task.env.episodeLength=400 ' \
f'wandb_project={_wandb_project} wandb_entity={_wandb_entity} wandb_activate={_wandb_activate} wandb_group={_wandb_group}'
RUN_DESCRIPTION = RunDescription(
f'{_name}',
experiments=[Experiment(f'{_name}', cli, _params.generate_params(randomize=False))],
experiment_arg_name='experiment', experiment_dir_arg_name='hydra.run.dir',
param_prefix='', customize_experiment_name=False,
)
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/pbt/experiments/allegro_kuka_reorientation_lstm_8gpu.py
|
from isaacgymenvs.pbt.launcher.run_description import ParamGrid, RunDescription, Experiment
from isaacgymenvs.pbt.experiments.run_utils import version, seeds, default_num_frames
kuka_env = 'allegro_kuka_reorientation'
_num_gpus = 8
_frames = default_num_frames * _num_gpus
_name = f'{kuka_env}_{version}_{_num_gpus}gpu'
_params = ParamGrid([
('seed', seeds(1)),
])
_wandb_activate = True
_wandb_group = f'rlgames_{_name}'
_wandb_entity = 'your_wandb_entity'
_wandb_project = 'your_wandb_project'
cli = f'train.py multi_gpu=True ' \
f'train.params.config.max_frames={_frames} headless=True ' \
f'task=AllegroKukaLSTM task/env=reorientation ' \
f'wandb_project={_wandb_project} wandb_entity={_wandb_entity} wandb_activate={_wandb_activate} wandb_group={_wandb_group}'
RUN_DESCRIPTION = RunDescription(
f'{_name}',
experiments=[Experiment(f'{_name}', cli, _params.generate_params(randomize=False))],
experiment_arg_name='experiment', experiment_dir_arg_name='hydra.run.dir',
param_prefix='', customize_experiment_name=False,
)
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/pbt/experiments/allegro_kuka_regrasping_lstm.py
|
from isaacgymenvs.pbt.launcher.run_description import ParamGrid, RunDescription, Experiment
from isaacgymenvs.pbt.experiments.run_utils import version, seeds, default_num_frames
kuka_env = 'allegro_kuka_regrasp'
_frames = default_num_frames
_name = f'{kuka_env}_{version}'
_params = ParamGrid([
('seed', seeds(8)),
])
_wandb_activate = True
_wandb_group = f'pbt_{_name}'
_wandb_entity = 'your_wandb_entity'
_wandb_project = 'your_wandb_project'
cli = f'python -m isaacgymenvs.train seed=-1 ' \
f'train.params.config.max_frames={_frames} headless=True ' \
f'task=AllegroKukaLSTM task/env=regrasping ' \
f'wandb_project={_wandb_project} wandb_entity={_wandb_entity} wandb_activate={_wandb_activate} wandb_group={_wandb_group}'
RUN_DESCRIPTION = RunDescription(
f'{_name}',
experiments=[Experiment(f'{_name}', cli, _params.generate_params(randomize=False))],
experiment_arg_name='experiment', experiment_dir_arg_name='hydra.run.dir',
param_prefix='', customize_experiment_name=False,
)
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/pbt/experiments/allegro_kuka_reorientation_pbt_lstm.py
|
from isaacgymenvs.pbt.launcher.run_description import ParamGrid, RunDescription, Experiment
from isaacgymenvs.pbt.experiments.allegro_kuka_pbt_base import kuka_env, kuka_base_cli
from isaacgymenvs.pbt.experiments.run_utils import version
_pbt_num_policies = 8
_name = f'{kuka_env}_manip_{version}_pbt_{_pbt_num_policies}p'
_params = ParamGrid([
('pbt.policy_idx', list(range(_pbt_num_policies))),
])
_wandb_activate = True
_wandb_group = f'pbt_{_name}'
_wandb_entity = 'your_wandb_entity'
_wandb_project = 'your_wandb_project'
cli = kuka_base_cli + f' task=AllegroKukaLSTM task/env=reorientation ' \
f'wandb_project={_wandb_project} wandb_entity={_wandb_entity} wandb_activate={_wandb_activate} wandb_group={_wandb_group}'
RUN_DESCRIPTION = RunDescription(
f'{_name}',
experiments=[Experiment(f'{_name}', cli, _params.generate_params(randomize=False))],
experiment_arg_name='experiment', experiment_dir_arg_name='hydra.run.dir',
param_prefix='', customize_experiment_name=False,
)
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/pbt/experiments/allegro_kuka_throw_pbt_lstm.py
|
from isaacgymenvs.pbt.launcher.run_description import ParamGrid, RunDescription, Experiment
from isaacgymenvs.pbt.experiments.allegro_kuka_pbt_base import kuka_env, kuka_base_cli
from isaacgymenvs.pbt.experiments.run_utils import version
_pbt_num_policies = 8
_name = f'{kuka_env}_throw_{version}_pbt_{_pbt_num_policies}p'
_params = ParamGrid([
('pbt.policy_idx', list(range(_pbt_num_policies))),
])
_wandb_activate = True
_wandb_group = f'pbt_{_name}'
_wandb_entity = 'your_wandb_entity'
_wandb_project = 'your_wandb_project'
cli = kuka_base_cli + \
f' task=AllegroKukaLSTM ' \
f'task/env=throw wandb_activate=True pbt.num_policies={_pbt_num_policies} ' \
f'wandb_project={_wandb_project} wandb_entity={_wandb_entity} wandb_activate={_wandb_activate} wandb_group={_wandb_group}'
RUN_DESCRIPTION = RunDescription(
f'{_name}',
experiments=[Experiment(f'{_name}', cli, _params.generate_params(randomize=False))],
experiment_arg_name='experiment', experiment_dir_arg_name='hydra.run.dir',
param_prefix='', customize_experiment_name=False,
)
|
NVIDIA-Omniverse/IsaacGymEnvs/isaacgymenvs/pbt/experiments/allegro_kuka_two_arms_reorientation_pbt_lstm.py
|
from isaacgymenvs.pbt.launcher.run_description import ParamGrid, RunDescription, Experiment
from isaacgymenvs.pbt.experiments.allegro_kuka_pbt_base import kuka_base_cli
from isaacgymenvs.pbt.experiments.run_utils import version
env = 'allegro_kuka_two_arms_reorientation'
_pbt_num_policies = 8
_name = f'{env}_{version}_pbt_{_pbt_num_policies}p'
_wandb_group = f'pbt_{_name}'
_params = ParamGrid([
('pbt.policy_idx', list(range(_pbt_num_policies))),
])
cli = kuka_base_cli + f' task=AllegroKukaTwoArmsLSTM task/env=reorientation wandb_activate=True wandb_group={_wandb_group} pbt.num_policies={_pbt_num_policies}'
RUN_DESCRIPTION = RunDescription(
f'{_name}',
experiments=[Experiment(f'{_name}', cli, _params.generate_params(randomize=False))],
experiment_arg_name='experiment', experiment_dir_arg_name='hydra.run.dir',
param_prefix='', customize_experiment_name=False,
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.