huypn16 commited on
Commit
58f6430
1 Parent(s): 8055dd0

Update layers.8/cfg.json

Browse files
Files changed (1) hide show
  1. layers.8/cfg.json +9 -13
layers.8/cfg.json CHANGED
@@ -1,28 +1,24 @@
1
  {
2
- "normalize_decoder": true,
3
- "num_latents": 0,
4
- "k": 64,
5
- "multi_topk": false,
6
  "layer": 8,
7
- "device": "cuda",
8
  "activation_fn_str": "relu",
9
  "d_sae": 24576,
10
- "d_in": 1536,
11
- "architecture": "topk",
 
12
  "apply_b_dec_to_input": false,
13
  "finetuning_scaling_factor": false,
14
  "context_size": 1024,
15
- "model_name": "Qwen/Qwen2.5-1.5B-Instruct",
16
- "hook_name": "blocks.8.hook_sae_acts_topk",
17
- "hook_layer": 8,
18
  "hook_head_index": null,
19
  "prepend_bos": true,
20
- "dataset_path": "",
21
- "dataset_trust_remote_code": false,
22
  "normalize_activations": "none",
23
  "dtype": "float32",
24
  "sae_lens_training_version": "eleuther",
25
- "activation_fn_kwargs": {},
26
  "neuronpedia_id": null,
 
27
  "model_from_pretrained_kwargs": {}
28
  }
 
1
  {
2
+ "model_name": "Qwen/Qwen2.5-1.5B-Instruct",
3
+ "architecture": "topk",
4
+ "hook_name": "blocks.8.hook_sae_acts_topk",
5
+ "hook_layer": 8,
6
  "layer": 8,
7
+ "k": 64,
8
  "activation_fn_str": "relu",
9
  "d_sae": 24576,
10
+ "d_in": 1536,
11
+ "multi_topk": false,
12
+ "device": "cuda",
13
  "apply_b_dec_to_input": false,
14
  "finetuning_scaling_factor": false,
15
  "context_size": 1024,
 
 
 
16
  "hook_head_index": null,
17
  "prepend_bos": true,
 
 
18
  "normalize_activations": "none",
19
  "dtype": "float32",
20
  "sae_lens_training_version": "eleuther",
 
21
  "neuronpedia_id": null,
22
+ "activation_fn_kwargs": {},
23
  "model_from_pretrained_kwargs": {}
24
  }