Feature Extraction
Transformers
Safetensors
openvla
custom_code
File size: 3,553 Bytes
6ad3b27
 
 
 
 
 
8a8d518
 
6ad3b27
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
{
  "arch_specifier": "no-align+fused-gelu-mlp",
  "architectures": [
    "OpenVLAForActionPrediction"
  ],
  "auto_map": {
    "AutoConfig": "configuration_prismatic.OpenVLAConfig",
    "AutoModelForVision2Seq": "modeling_prismatic.OpenVLAForActionPrediction"
  },
  "hf_llm_id": "meta-llama/Llama-2-7b-hf",
  "image_resize_strategy": "resize-naive",
  "image_sizes": [
    224,
    224
  ],
  "llm_backbone_id": "llama2-7b-pure",
  "llm_max_length": 2048,
  "model_type": "openvla",
  "n_action_bins": 256,
  "norm_stats": {
    "fractal20220817_data": {
      "action": {
        "mask": [
          true,
          true,
          true,
          true,
          true,
          true,
          false
        ],
        "max": [
          2.9984593391418457,
          22.09052848815918,
          2.7507524490356445,
          1.570636510848999,
          1.5321086645126343,
          1.5691522359848022,
          1.0
        ],
        "mean": [
          0.006987581495195627,
          0.006265914998948574,
          -0.01262515690177679,
          0.04333311691880226,
          -0.005756211932748556,
          0.0009130308171734214,
          0.5354204773902893
        ],
        "min": [
          -2.0204520225524902,
          -5.497899532318115,
          -2.031663417816162,
          -1.569917917251587,
          -1.569892168045044,
          -1.570419430732727,
          0.0
        ],
        "q01": [
          -0.22453527510166169,
          -0.14820013284683228,
          -0.231589707583189,
          -0.3517994859814644,
          -0.4193011274933815,
          -0.43643461108207704,
          0.0
        ],
        "q99": [
          0.17824687153100965,
          0.14938379630446405,
          0.21842354819178575,
          0.5892666035890578,
          0.35272657424211445,
          0.44796681255102094,
          1.0
        ],
        "std": [
          0.06921170651912689,
          0.05970961973071098,
          0.07353077083826065,
          0.1561051607131958,
          0.13164445757865906,
          0.1459379941225052,
          0.4971102774143219
        ]
      },
      "num_trajectories": 87212,
      "num_transitions": 3786400,
      "proprio": {
        "max": [
          0.0,
          0.0,
          0.0,
          0.0,
          0.0,
          0.0,
          0.0
        ],
        "mean": [
          0.0,
          0.0,
          0.0,
          0.0,
          0.0,
          0.0,
          0.0
        ],
        "min": [
          0.0,
          0.0,
          0.0,
          0.0,
          0.0,
          0.0,
          0.0
        ],
        "q01": [
          0.0,
          0.0,
          0.0,
          0.0,
          0.0,
          0.0,
          0.0
        ],
        "q99": [
          0.0,
          0.0,
          0.0,
          0.0,
          0.0,
          0.0,
          0.0
        ],
        "std": [
          0.0,
          0.0,
          0.0,
          0.0,
          0.0,
          0.0,
          0.0
        ]
      }
    }
  },
  "output_projector_states": false,
  "pad_to_multiple_of": 64,
  "pad_token_id": 32000,
  "text_config": {
    "model_type": "llama",
    "pad_token_id": 32000,
    "torch_dtype": "bfloat16",
    "vocab_size": 32064
  },
  "timm_model_ids": [
    "vit_large_patch14_reg4_dinov2.lvd142m",
    "vit_so400m_patch14_siglip_224"
  ],
  "timm_override_act_layers": [
    null,
    null
  ],
  "torch_dtype": "bfloat16",
  "transformers_version": "4.40.1",
  "use_fused_vision_backbone": true,
  "vision_backbone_id": "dinosiglip-vit-so-224px"
}