cadene HF staff commited on
Commit
2f3c60a
·
verified ·
1 Parent(s): ece0315

Upload folder using huggingface_hub

Browse files
Files changed (4) hide show
  1. README.md +9 -0
  2. config.json +50 -0
  3. config.yaml +161 -0
  4. model.safetensors +3 -0
README.md ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - model_hub_mixin
4
+ - pytorch_model_hub_mixin
5
+ ---
6
+
7
+ This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration:
8
+ - Library: [More Information Needed]
9
+ - Docs: [More Information Needed]
config.json ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "action_chunk_size": 5,
3
+ "bet_softmax_temperature": 0.1,
4
+ "crop_is_random": false,
5
+ "crop_shape": null,
6
+ "dropout": 0.1,
7
+ "gpt_block_size": 500,
8
+ "gpt_hidden_dim": 512,
9
+ "gpt_input_dim": 512,
10
+ "gpt_n_head": 8,
11
+ "gpt_n_layer": 8,
12
+ "gpt_output_dim": 512,
13
+ "input_normalization_modes": {
14
+ "observation.images.laptop": "mean_std",
15
+ "observation.state": "min_max"
16
+ },
17
+ "input_shapes": {
18
+ "observation.images.laptop": [
19
+ 3,
20
+ 480,
21
+ 640
22
+ ],
23
+ "observation.state": [
24
+ 6
25
+ ]
26
+ },
27
+ "mlp_hidden_dim": 1024,
28
+ "n_action_pred_token": 7,
29
+ "n_obs_steps": 5,
30
+ "n_vqvae_training_steps": 20000,
31
+ "offset_loss_weight": 10000.0,
32
+ "output_normalization_modes": {
33
+ "action": "min_max"
34
+ },
35
+ "output_shapes": {
36
+ "action": [
37
+ 6
38
+ ]
39
+ },
40
+ "pretrained_backbone_weights": "ResNet18_Weights.IMAGENET1K_V1",
41
+ "primary_code_loss_weight": 5.0,
42
+ "secondary_code_loss_weight": 0.5,
43
+ "sequentially_select": false,
44
+ "spatial_softmax_num_keypoints": 512,
45
+ "use_group_norm": false,
46
+ "vision_backbone": "resnet18",
47
+ "vqvae_embedding_dim": 256,
48
+ "vqvae_enc_hidden_dim": 128,
49
+ "vqvae_n_embed": 16
50
+ }
config.yaml ADDED
@@ -0,0 +1,161 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ resume: false
2
+ device: cuda
3
+ use_amp: false
4
+ seed: 100000
5
+ dataset_repo_id: lerobot/koch_pick_place_lego
6
+ video_backend: pyav
7
+ training:
8
+ offline_steps: 80000
9
+ online_steps: 0
10
+ online_steps_between_rollouts: 1
11
+ online_sampling_ratio: 0.5
12
+ online_env_seed: ???
13
+ eval_freq: -1
14
+ log_freq: 200
15
+ save_checkpoint: true
16
+ save_freq: 10000
17
+ num_workers: 4
18
+ batch_size: 8
19
+ image_transforms:
20
+ enable: false
21
+ max_num_transforms: 3
22
+ random_order: false
23
+ brightness:
24
+ weight: 1
25
+ min_max:
26
+ - 0.8
27
+ - 1.2
28
+ contrast:
29
+ weight: 1
30
+ min_max:
31
+ - 0.8
32
+ - 1.2
33
+ saturation:
34
+ weight: 1
35
+ min_max:
36
+ - 0.5
37
+ - 1.5
38
+ hue:
39
+ weight: 1
40
+ min_max:
41
+ - -0.05
42
+ - 0.05
43
+ sharpness:
44
+ weight: 1
45
+ min_max:
46
+ - 0.8
47
+ - 1.2
48
+ grad_clip_norm: 10
49
+ lr: 0.0001
50
+ lr_scheduler: cosine
51
+ lr_warmup_steps: 2000
52
+ adam_betas:
53
+ - 0.95
54
+ - 0.999
55
+ adam_eps: 1.0e-08
56
+ adam_weight_decay: 1.0e-06
57
+ vqvae_lr: 0.001
58
+ n_vqvae_training_steps: 20000
59
+ bet_weight_decay: 0.0002
60
+ bet_learning_rate: 5.5e-05
61
+ bet_betas:
62
+ - 0.9
63
+ - 0.999
64
+ delta_timestamps:
65
+ observation.images.laptop:
66
+ - -0.13333333333333333
67
+ - -0.1
68
+ - -0.06666666666666667
69
+ - -0.03333333333333333
70
+ - 0.0
71
+ observation.state:
72
+ - -0.13333333333333333
73
+ - -0.1
74
+ - -0.06666666666666667
75
+ - -0.03333333333333333
76
+ - 0.0
77
+ action:
78
+ - -0.13333333333333333
79
+ - -0.1
80
+ - -0.06666666666666667
81
+ - -0.03333333333333333
82
+ - 0.0
83
+ - 0.03333333333333333
84
+ - 0.06666666666666667
85
+ - 0.1
86
+ - 0.13333333333333333
87
+ - 0.16666666666666666
88
+ - 0.2
89
+ - 0.23333333333333334
90
+ - 0.26666666666666666
91
+ - 0.3
92
+ - 0.3333333333333333
93
+ eval:
94
+ n_episodes: 50
95
+ batch_size: 50
96
+ use_async_envs: false
97
+ wandb:
98
+ enable: true
99
+ disable_artifact: false
100
+ project: lerobot
101
+ notes: ''
102
+ fps: 30
103
+ env:
104
+ name: real_world
105
+ task: null
106
+ state_dim: 6
107
+ action_dim: 6
108
+ fps: ${fps}
109
+ override_dataset_stats:
110
+ observation.images.laptop:
111
+ mean:
112
+ - - - 0.485
113
+ - - - 0.456
114
+ - - - 0.406
115
+ std:
116
+ - - - 0.229
117
+ - - - 0.224
118
+ - - - 0.225
119
+ policy:
120
+ name: vqbet
121
+ n_obs_steps: 5
122
+ n_action_pred_token: 7
123
+ action_chunk_size: 5
124
+ input_shapes:
125
+ observation.images.laptop:
126
+ - 3
127
+ - 480
128
+ - 640
129
+ observation.state:
130
+ - ${env.state_dim}
131
+ output_shapes:
132
+ action:
133
+ - ${env.action_dim}
134
+ input_normalization_modes:
135
+ observation.images.laptop: mean_std
136
+ observation.state: min_max
137
+ output_normalization_modes:
138
+ action: min_max
139
+ vision_backbone: resnet18
140
+ pretrained_backbone_weights: ResNet18_Weights.IMAGENET1K_V1
141
+ crop_is_random: false
142
+ spatial_softmax_num_keypoints: 512
143
+ use_group_norm: false
144
+ crop_shape: null
145
+ n_vqvae_training_steps: ${training.n_vqvae_training_steps}
146
+ vqvae_n_embed: 16
147
+ vqvae_embedding_dim: 256
148
+ vqvae_enc_hidden_dim: 128
149
+ gpt_block_size: 500
150
+ gpt_input_dim: 512
151
+ gpt_output_dim: 512
152
+ gpt_n_layer: 8
153
+ gpt_n_head: 8
154
+ gpt_hidden_dim: 512
155
+ dropout: 0.1
156
+ mlp_hidden_dim: 1024
157
+ offset_loss_weight: 10000.0
158
+ primary_code_loss_weight: 5.0
159
+ secondary_code_loss_weight: 0.5
160
+ bet_softmax_temperature: 0.1
161
+ sequentially_select: false
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4816775810888625f2a4ab62fbee3a48c7acaf581379d406599aa36dfbdb2e4a
3
+ size 166675178