File size: 2,172 Bytes
61f01a5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
{
    "model_class_name": "HookedViT",
    "model_name": "open-clip:laion/CLIP-ViT-B-32-DataComp.XL-s13B-b90K",
    "hook_point": "blocks.8.hook_resid_post",
    "hook_point_layer": 8,
    "layer_subtype": "hook_resid_post",
    "hook_point_head_index": null,
    "context_size": 50,
    "use_cached_activations": false,
    "use_patches_only": false,
    "cached_activations_path": "activations/_network_scratch_s_sonia.joseph_datasets_kaggle_datasets/open-clip:laion_CLIP-ViT-B-32-DataComp.XL-s13B-b90K/blocks.9.hook_mlp_out",
    "d_in": 768,
    "activation_fn_str": "relu",
    "activation_fn_kwargs": {},
    "cls_token_only": false,
    "max_grad_norm": 1.0,
    "initialization_method": "encoder_transpose_decoder",
    "normalize_activations": null,
    "n_batches_in_buffer": 20,
    "store_batch_size": 32,
    "num_workers": 16,
    "num_epochs": 10,
    "total_training_images": 13000000,
    "total_training_tokens": 50000000,
    "image_size": 224,
    "device": {
        "__type__": "torch.device",
        "value": "cuda"
    },
    "seed": 42,
    "dtype": {
        "__type__": "torch.dtype",
        "value": "torch.float32"
    },
    "architecture": "gated",
    "sparsity_loss": "l1",
    "verbose": false,
    "b_dec_init_method": "geometric_median",
    "expansion_factor": 64,
    "from_pretrained_path": null,
    "d_sae": 49152,
    "l1_coefficient": 1.2,
    "lp_norm": 1,
    "lr": 0.0002,
    "lr_scheduler_name": "cosineannealingwarmup",
    "lr_warm_up_steps": 200,
    "beta1": 0.9,
    "beta2": 0.999,
    "train_batch_size": 4096,
    "dataset_name": "imagenet1k",
    "dataset_path": "data/ImageNet-complete/",
    "dataset_train_path": "data/ImageNet-complete/train",
    "dataset_val_path": "data/ImageNet-complete/val",
    "use_ghost_grads": false,
    "feature_sampling_window": 1000,
    "dead_feature_window": 5000,
    "dead_feature_threshold": 1e-08,
    "log_to_wandb": true,
    "wandb_project": "lorenz_clip_b_l0",
    "wandb_entity": null,
    "wandb_log_frequency": 100,
    "n_validation_runs": 4,
    "n_checkpoints": 10,
    "checkpoint_path": "models/sae/clip_B_gated_l0/e54ce3c9-lorenz_clip_b_l0-lorenz_clip_b_l0"
}