{ "num_layers": 32, "num_heads": 8, "d_model": 256, "T": 12, "S": 256, "image_vocab_size": 262144, "use_mup": false, "num_factored_vocabs": 2, "qkv_bias": true, "proj_bias": true, "use_actions": true, "action_network": "conconcat+modulatecat", "attn_drop": 0.1, "qk_norm": false, "mlp_ratio": 4.0, "mlp_drop": 0.05, "mlp_bias": false, "patch_size": 2 }