rpl_peg_in_hole_act / config.yaml
iantc104's picture
Upload YAML configuration
c1de710 verified
resume: false
device: cuda
use_amp: false
seed: 0
dataset_repo_id: iantc104/rpl_peg_in_hole
video_backend: pyav
training:
offline_steps: 100000
num_workers: 4
batch_size: 8
eval_freq: 20000
log_freq: 200
save_checkpoint: true
save_freq: 20000
online_steps: 0
online_rollout_n_episodes: 1
online_rollout_batch_size: 1
online_steps_between_rollouts: 1
online_sampling_ratio: 0.5
online_env_seed: null
online_buffer_capacity: null
online_buffer_seed_size: 0
do_online_rollout_async: false
image_transforms:
enable: false
max_num_transforms: 3
random_order: false
brightness:
weight: 1
min_max:
- 0.8
- 1.2
contrast:
weight: 1
min_max:
- 0.8
- 1.2
saturation:
weight: 1
min_max:
- 0.5
- 1.5
hue:
weight: 1
min_max:
- -0.05
- 0.05
sharpness:
weight: 1
min_max:
- 0.8
- 1.2
lr: 1.0e-05
lr_backbone: 1.0e-05
weight_decay: 0.0001
grad_clip_norm: 10
delta_timestamps:
action:
- 0.0
- 0.02
- 0.04
- 0.06
- 0.08
- 0.1
- 0.12
- 0.14
- 0.16
- 0.18
- 0.2
- 0.22
- 0.24
- 0.26
- 0.28
- 0.3
- 0.32
- 0.34
- 0.36
- 0.38
- 0.4
- 0.42
- 0.44
- 0.46
- 0.48
- 0.5
- 0.52
- 0.54
- 0.56
- 0.58
- 0.6
- 0.62
- 0.64
- 0.66
- 0.68
- 0.7
- 0.72
- 0.74
- 0.76
- 0.78
- 0.8
- 0.82
- 0.84
- 0.86
- 0.88
- 0.9
- 0.92
- 0.94
- 0.96
- 0.98
eval:
n_episodes: 50
batch_size: 10
use_async_envs: false
wandb:
enable: true
disable_artifact: false
project: rpl
notes: ''
fps: 50
env:
name: rpl
task: PegInHoleEnv-v0
state_dim: 16
action_dim: 10
fps: ${fps}
episode_length: 300
gym:
render_mode: rgb_array
override_dataset_stats:
observation.images.scene:
mean:
- - - 0.485
- - - 0.456
- - - 0.406
std:
- - - 0.229
- - - 0.224
- - - 0.225
observation.images.wrist:
mean:
- - - 0.485
- - - 0.456
- - - 0.406
std:
- - - 0.229
- - - 0.224
- - - 0.225
policy:
name: act
n_obs_steps: 1
chunk_size: 50
n_action_steps: 50
input_shapes:
observation.images.scene:
- 3
- 480
- 640
observation.images.wrist:
- 3
- 480
- 640
observation.state:
- ${env.state_dim}
output_shapes:
action:
- ${env.action_dim}
input_normalization_modes:
observation.images.scene: mean_std
observation.images.wrist: mean_std
observation.state: mean_std
output_normalization_modes:
action: mean_std
vision_backbone: resnet18
pretrained_backbone_weights: ResNet18_Weights.IMAGENET1K_V1
replace_final_stride_with_dilation: false
pre_norm: false
dim_model: 512
n_heads: 8
dim_feedforward: 3200
feedforward_activation: relu
n_encoder_layers: 4
n_decoder_layers: 1
use_vae: true
latent_dim: 32
n_vae_encoder_layers: 4
temporal_ensemble_coeff: null
dropout: 0.1
kl_weight: 10.0