File size: 1,224 Bytes
89e4271 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 |
{
"beta_end": 0.02,
"beta_schedule": "squaredcos_cap_v2",
"beta_start": 0.0001,
"causal_attn": true,
"clip_sample": true,
"clip_sample_range": 1.0,
"crop_is_random": true,
"crop_shape": [
84,
84
],
"diffusion_step_embed_dim": 256,
"do_mask_loss_for_padding": false,
"down_dims": [
512,
1024,
2048
],
"horizon": 10,
"input_normalization_modes": {
"observation.image": "mean_std",
"observation.state": "min_max"
},
"input_shapes": {
"observation.image": [
3,
96,
96
],
"observation.state": [
2
]
},
"kernel_size": 5,
"n_action_steps": 8,
"n_cond_layers": 0,
"n_groups": 8,
"n_head": 4,
"n_layer": 8,
"n_obs_steps": 2,
"noise_scheduler_type": "DDPM",
"num_inference_steps": null,
"num_train_timesteps": 100,
"output_normalization_modes": {
"action": "min_max"
},
"output_shapes": {
"action": [
2
]
},
"p_drop_attn": 0.3,
"p_drop_emb": 0.0,
"prediction_type": "epsilon",
"pretrained_backbone_weights": null,
"spatial_softmax_num_keypoints": 32,
"use_film_scale_modulation": true,
"use_group_norm": true,
"use_transformer": true,
"vision_backbone": "resnet18"
} |