Upload SAE blocks.24.hook_mlp_out
Browse files
blocks.24.hook_mlp_out/cfg.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"architecture": "topk", "d_in": 3072, "d_sae": 49152, "dtype": "float32", "device": "cuda", "model_name": "meta-llama/Llama-3.2-3B-Instruct", "hook_name": "blocks.24.hook_mlp_out", "hook_layer": 24, "hook_head_index": null, "activation_fn_str": "topk", "activation_fn_kwargs": {"k": 100}, "apply_b_dec_to_input": true, "finetuning_scaling_factor": false, "sae_lens_training_version": "4.4.5", "prepend_bos": true, "dataset_path": "roneneldan/TinyStories", "dataset_trust_remote_code": true, "context_size": 256, "normalize_activations": "none", "neuronpedia_id": null, "model_from_pretrained_kwargs": {"center_writing_weights": false}, "seqpos_slice": [null], "l1_coefficient": 4, "lp_norm": 1, "use_ghost_grads": false, "normalize_sae_decoder": false, "noise_scale": 0.0, "decoder_orthogonal_init": false, "init_encoder_as_decoder_transpose": true, "mse_loss_normalization": null, "decoder_heuristic_init": true, "scale_sparsity_penalty_by_decoder_norm": true, "jumprelu_init_threshold": 0.001, "jumprelu_bandwidth": 0.001}
|
blocks.24.hook_mlp_out/sae_weights.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b57b80dfd92a7a37f12fc893e400fe63279e71a5645e5ee0af790f85b3299316
|
3 |
+
size 1208168776
|