sgraham commited on
Commit
56099b5
·
verified ·
1 Parent(s): 58e1450

Pushed the IDEFICS2 fine-tuned model on some archae context sheets just to figure out the workflow.

Browse files
checkpoint-25/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:be9e155a00a091ac429c9d7a002d77384488d3de0efeaae002cad2bbc9c46207
3
  size 93378688
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f3b8a5cd7034273b7b144bcbfa022ca6b5184d724c03e821d8683fffa686153d
3
  size 93378688
checkpoint-25/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b5cec25c51806ee812922158a4f65e6cf97e564598eb8475b8f6b140257657cd
3
  size 48071944
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:009570f31da8c655af3b904517b62088a73c25662cda765d5a13ba70c08c0037
3
  size 48071944
checkpoint-25/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a05716f05747fb967268b6b65bee49602693de25bf95ae9c915a7eff48a89265
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:58d35264248af70bc04142557841b4f17281f57af4edd5637dfec599681b99ca
3
  size 1064
checkpoint-25/trainer_state.json CHANGED
@@ -10,60 +10,60 @@
10
  "log_history": [
11
  {
12
  "epoch": 0.7017543859649122,
13
- "grad_norm": 86.28084564208984,
14
- "learning_rate": 0.00017600000000000002,
15
- "loss": 3.9501,
16
  "step": 5
17
  },
18
  {
19
  "epoch": 1.280701754385965,
20
- "grad_norm": 109.0323486328125,
21
- "learning_rate": 0.000144,
22
- "loss": 2.0948,
23
  "step": 10
24
  },
25
  {
26
  "epoch": 1.280701754385965,
27
- "eval_loss": 0.6712124943733215,
28
- "eval_runtime": 31.8555,
29
- "eval_samples_per_second": 0.722,
30
- "eval_steps_per_second": 0.377,
31
  "step": 10
32
  },
33
  {
34
  "epoch": 1.9824561403508771,
35
- "grad_norm": 51.63697052001953,
36
- "learning_rate": 0.00010400000000000001,
37
- "loss": 2.2882,
38
  "step": 15
39
  },
40
  {
41
  "epoch": 2.56140350877193,
42
- "grad_norm": 53.5184326171875,
43
- "learning_rate": 6.400000000000001e-05,
44
- "loss": 1.3999,
45
  "step": 20
46
  },
47
  {
48
  "epoch": 2.56140350877193,
49
- "eval_loss": 0.5531365871429443,
50
- "eval_runtime": 31.7285,
51
- "eval_samples_per_second": 0.725,
52
- "eval_steps_per_second": 0.378,
53
  "step": 20
54
  },
55
  {
56
  "epoch": 3.1403508771929824,
57
- "grad_norm": 20.981565475463867,
58
- "learning_rate": 3.2000000000000005e-05,
59
- "loss": 1.3415,
60
  "step": 25
61
  }
62
  ],
63
  "logging_steps": 5,
64
- "max_steps": 25,
65
  "num_input_tokens_seen": 0,
66
- "num_train_epochs": 4,
67
  "save_steps": 25,
68
  "stateful_callbacks": {
69
  "TrainerControl": {
@@ -72,7 +72,7 @@
72
  "should_evaluate": false,
73
  "should_log": false,
74
  "should_save": true,
75
- "should_training_stop": true
76
  },
77
  "attributes": {}
78
  }
 
10
  "log_history": [
11
  {
12
  "epoch": 0.7017543859649122,
13
+ "grad_norm": 44.80005645751953,
14
+ "learning_rate": 0.00017333333333333334,
15
+ "loss": 2.0998,
16
  "step": 5
17
  },
18
  {
19
  "epoch": 1.280701754385965,
20
+ "grad_norm": 901.4852294921875,
21
+ "learning_rate": 0.00016,
22
+ "loss": 1.986,
23
  "step": 10
24
  },
25
  {
26
  "epoch": 1.280701754385965,
27
+ "eval_loss": 0.7075792551040649,
28
+ "eval_runtime": 31.4718,
29
+ "eval_samples_per_second": 0.731,
30
+ "eval_steps_per_second": 0.381,
31
  "step": 10
32
  },
33
  {
34
  "epoch": 1.9824561403508771,
35
+ "grad_norm": 42.54853439331055,
36
+ "learning_rate": 0.00012666666666666666,
37
+ "loss": 2.1037,
38
  "step": 15
39
  },
40
  {
41
  "epoch": 2.56140350877193,
42
+ "grad_norm": 84.47293853759766,
43
+ "learning_rate": 9.333333333333334e-05,
44
+ "loss": 1.2837,
45
  "step": 20
46
  },
47
  {
48
  "epoch": 2.56140350877193,
49
+ "eval_loss": 0.6040331125259399,
50
+ "eval_runtime": 31.5373,
51
+ "eval_samples_per_second": 0.729,
52
+ "eval_steps_per_second": 0.381,
53
  "step": 20
54
  },
55
  {
56
  "epoch": 3.1403508771929824,
57
+ "grad_norm": 52.28364181518555,
58
+ "learning_rate": 6e-05,
59
+ "loss": 1.1705,
60
  "step": 25
61
  }
62
  ],
63
  "logging_steps": 5,
64
+ "max_steps": 30,
65
  "num_input_tokens_seen": 0,
66
+ "num_train_epochs": 5,
67
  "save_steps": 25,
68
  "stateful_callbacks": {
69
  "TrainerControl": {
 
72
  "should_evaluate": false,
73
  "should_log": false,
74
  "should_save": true,
75
+ "should_training_stop": false
76
  },
77
  "attributes": {}
78
  }
checkpoint-25/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c4f668871e21e05e5e1625fecbdb54559b75b1aff22294454ac78ae0c7e83537
3
  size 5304
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5d18e7524883970e68cdada6591481ad12d536248a71cc627706abc935569009
3
  size 5304
checkpoint-30/adapter_config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "HuggingFaceM4/idefics2-8b",
5
+ "bias": "none",
6
+ "eva_config": null,
7
+ "exclude_modules": null,
8
+ "fan_in_fan_out": false,
9
+ "inference_mode": false,
10
+ "init_lora_weights": "gaussian",
11
+ "layer_replication": null,
12
+ "layers_pattern": null,
13
+ "layers_to_transform": null,
14
+ "loftq_config": {},
15
+ "lora_alpha": 8,
16
+ "lora_bias": false,
17
+ "lora_dropout": 0.1,
18
+ "megatron_config": null,
19
+ "megatron_core": "megatron.core",
20
+ "modules_to_save": null,
21
+ "peft_type": "LORA",
22
+ "r": 8,
23
+ "rank_pattern": {},
24
+ "revision": null,
25
+ "target_modules": ".*(text_model|modality_projection|perceiver_resampler).*(down_proj|gate_proj|up_proj|k_proj|q_proj|v_proj|o_proj).*$",
26
+ "task_type": null,
27
+ "use_dora": false,
28
+ "use_rslora": false
29
+ }
checkpoint-30/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c8cdf564080619dd5da832a2e6dddc7ddeb9b49dcfdb9471eb1888410a03eac5
3
+ size 93378688
checkpoint-30/generation_config.json ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bad_words_ids": [
4
+ [
5
+ 32000
6
+ ],
7
+ [
8
+ 32001
9
+ ]
10
+ ],
11
+ "bos_token_id": 1,
12
+ "eos_token_id": [
13
+ 2,
14
+ 32002
15
+ ],
16
+ "pad_token_id": 0,
17
+ "transformers_version": "4.48.0.dev0"
18
+ }
checkpoint-30/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a59487f60933ab4844b5854bfc6f13394aa385968d6d81c9c53278ef2d2d9ea5
3
+ size 48071944
checkpoint-30/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6dcba77c57f5eab0141fde363e0cf9700ae7efb305f3d3dfc25572c16c1f4e0b
3
+ size 14244
checkpoint-30/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:174c77dd52478115dc3752b25cb54f830116ea3fe612fb9bdb81ac01b6d182be
3
+ size 1064
checkpoint-30/trainer_state.json ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 3.8421052631578947,
5
+ "eval_steps": 10,
6
+ "global_step": 30,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.7017543859649122,
13
+ "grad_norm": 44.80005645751953,
14
+ "learning_rate": 0.00017333333333333334,
15
+ "loss": 2.0998,
16
+ "step": 5
17
+ },
18
+ {
19
+ "epoch": 1.280701754385965,
20
+ "grad_norm": 901.4852294921875,
21
+ "learning_rate": 0.00016,
22
+ "loss": 1.986,
23
+ "step": 10
24
+ },
25
+ {
26
+ "epoch": 1.280701754385965,
27
+ "eval_loss": 0.7075792551040649,
28
+ "eval_runtime": 31.4718,
29
+ "eval_samples_per_second": 0.731,
30
+ "eval_steps_per_second": 0.381,
31
+ "step": 10
32
+ },
33
+ {
34
+ "epoch": 1.9824561403508771,
35
+ "grad_norm": 42.54853439331055,
36
+ "learning_rate": 0.00012666666666666666,
37
+ "loss": 2.1037,
38
+ "step": 15
39
+ },
40
+ {
41
+ "epoch": 2.56140350877193,
42
+ "grad_norm": 84.47293853759766,
43
+ "learning_rate": 9.333333333333334e-05,
44
+ "loss": 1.2837,
45
+ "step": 20
46
+ },
47
+ {
48
+ "epoch": 2.56140350877193,
49
+ "eval_loss": 0.6040331125259399,
50
+ "eval_runtime": 31.5373,
51
+ "eval_samples_per_second": 0.729,
52
+ "eval_steps_per_second": 0.381,
53
+ "step": 20
54
+ },
55
+ {
56
+ "epoch": 3.1403508771929824,
57
+ "grad_norm": 52.28364181518555,
58
+ "learning_rate": 6e-05,
59
+ "loss": 1.1705,
60
+ "step": 25
61
+ },
62
+ {
63
+ "epoch": 3.8421052631578947,
64
+ "grad_norm": 16.730497360229492,
65
+ "learning_rate": 2.6666666666666667e-05,
66
+ "loss": 1.2169,
67
+ "step": 30
68
+ },
69
+ {
70
+ "epoch": 3.8421052631578947,
71
+ "eval_loss": 0.5927651524543762,
72
+ "eval_runtime": 31.4582,
73
+ "eval_samples_per_second": 0.731,
74
+ "eval_steps_per_second": 0.381,
75
+ "step": 30
76
+ }
77
+ ],
78
+ "logging_steps": 5,
79
+ "max_steps": 30,
80
+ "num_input_tokens_seen": 0,
81
+ "num_train_epochs": 5,
82
+ "save_steps": 25,
83
+ "stateful_callbacks": {
84
+ "TrainerControl": {
85
+ "args": {
86
+ "should_epoch_stop": false,
87
+ "should_evaluate": false,
88
+ "should_log": false,
89
+ "should_save": true,
90
+ "should_training_stop": true
91
+ },
92
+ "attributes": {}
93
+ }
94
+ },
95
+ "total_flos": 2043610494331296.0,
96
+ "train_batch_size": 2,
97
+ "trial_name": null,
98
+ "trial_params": null
99
+ }
checkpoint-30/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5d18e7524883970e68cdada6591481ad12d536248a71cc627706abc935569009
3
+ size 5304