AkashDataScience commited on
Commit
e59dc29
Β·
1 Parent(s): 22641d7

Changed model

Browse files
Files changed (23) hide show
  1. app.py +1 -1
  2. checkpoint_dir/adapter_config.json +2 -2
  3. checkpoint_dir/adapter_model.safetensors +1 -1
  4. checkpoint_dir/all_results.json +11 -11
  5. checkpoint_dir/{checkpoint-100 β†’ checkpoint-60}/README.md +0 -0
  6. checkpoint_dir/{checkpoint-100 β†’ checkpoint-60}/adapter_config.json +2 -2
  7. checkpoint_dir/{checkpoint-100 β†’ checkpoint-60}/adapter_model.safetensors +1 -1
  8. checkpoint_dir/{checkpoint-100 β†’ checkpoint-60}/added_tokens.json +0 -0
  9. checkpoint_dir/{checkpoint-100 β†’ checkpoint-60}/optimizer.pt +1 -1
  10. checkpoint_dir/{checkpoint-100 β†’ checkpoint-60}/rng_state.pth +1 -1
  11. checkpoint_dir/{checkpoint-100 β†’ checkpoint-60}/scheduler.pt +1 -1
  12. checkpoint_dir/{checkpoint-100 β†’ checkpoint-60}/special_tokens_map.json +0 -0
  13. checkpoint_dir/{checkpoint-100 β†’ checkpoint-60}/tokenizer.json +0 -0
  14. checkpoint_dir/{checkpoint-100 β†’ checkpoint-60}/tokenizer.model +0 -0
  15. checkpoint_dir/{checkpoint-100 β†’ checkpoint-60}/tokenizer_config.json +0 -0
  16. checkpoint_dir/{checkpoint-100 β†’ checkpoint-60}/trainer_state.json +14 -28
  17. checkpoint_dir/{checkpoint-100 β†’ checkpoint-60}/training_args.bin +1 -1
  18. checkpoint_dir/eval_results.json +6 -6
  19. checkpoint_dir/runs/{Sep15_06-41-15_be30e98dc4c6/events.out.tfevents.1726383163.be30e98dc4c6.4371.0 β†’ Sep16_05-36-31_3ea85fc78173/events.out.tfevents.1726465646.3ea85fc78173.1553.0} +2 -2
  20. checkpoint_dir/runs/{Sep15_06-41-15_be30e98dc4c6/events.out.tfevents.1726392217.be30e98dc4c6.4371.1 β†’ Sep16_05-36-31_3ea85fc78173/events.out.tfevents.1726469017.3ea85fc78173.1553.1} +1 -1
  21. checkpoint_dir/train_results.json +6 -6
  22. checkpoint_dir/trainer_state.json +21 -35
  23. checkpoint_dir/training_args.bin +1 -1
app.py CHANGED
@@ -14,7 +14,7 @@ model_kwargs = dict(
14
  )
15
  base_model = AutoModelForCausalLM.from_pretrained(checkpoint_path, **model_kwargs)
16
 
17
- new_model = "checkpoint_dir/checkpoint-100" # change to the path where your model is saved
18
 
19
  model = PeftModel.from_pretrained(base_model, new_model)
20
  model = model.merge_and_unload()
 
14
  )
15
  base_model = AutoModelForCausalLM.from_pretrained(checkpoint_path, **model_kwargs)
16
 
17
+ new_model = "checkpoint_dir/checkpoint-60" # change to the path where your model is saved
18
 
19
  model = PeftModel.from_pretrained(base_model, new_model)
20
  model = model.merge_and_unload()
checkpoint_dir/adapter_config.json CHANGED
@@ -20,8 +20,8 @@
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
- "o_proj",
24
- "qkv_proj"
25
  ],
26
  "task_type": "CAUSAL_LM",
27
  "use_dora": false,
 
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
+ "qkv_proj",
24
+ "o_proj"
25
  ],
26
  "task_type": "CAUSAL_LM",
27
  "use_dora": false,
checkpoint_dir/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:387c9fad15944c3910ba4774e7ba6a23445d76fcbbb3977a6b8d0c740763aacf
3
  size 37766064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eaf6e67d0958c4e912553fe4b1102fec6fc5aa6174c61ff7f17fc19b55e37a58
3
  size 37766064
checkpoint_dir/all_results.json CHANGED
@@ -1,13 +1,13 @@
1
  {
2
- "epoch": 0.01725327812284334,
3
- "eval_loss": 1.4359806776046753,
4
- "eval_runtime": 4457.9597,
5
- "eval_samples": 3500,
6
- "eval_samples_per_second": 0.272,
7
- "eval_steps_per_second": 0.068,
8
- "total_flos": 9171802836172800.0,
9
- "train_loss": 1.4125469398498536,
10
- "train_runtime": 4596.9179,
11
- "train_samples_per_second": 0.087,
12
- "train_steps_per_second": 0.022
13
  }
 
1
  {
2
+ "epoch": 0.010351966873706004,
3
+ "eval_loss": 1.48798406124115,
4
+ "eval_runtime": 847.6662,
5
+ "eval_samples": 700,
6
+ "eval_samples_per_second": 0.289,
7
+ "eval_steps_per_second": 0.073,
8
+ "total_flos": 5503081701703680.0,
9
+ "train_loss": 1.4797897656758627,
10
+ "train_runtime": 2523.2494,
11
+ "train_samples_per_second": 0.095,
12
+ "train_steps_per_second": 0.024
13
  }
checkpoint_dir/{checkpoint-100 β†’ checkpoint-60}/README.md RENAMED
File without changes
checkpoint_dir/{checkpoint-100 β†’ checkpoint-60}/adapter_config.json RENAMED
@@ -20,8 +20,8 @@
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
- "o_proj",
24
- "qkv_proj"
25
  ],
26
  "task_type": "CAUSAL_LM",
27
  "use_dora": false,
 
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
+ "qkv_proj",
24
+ "o_proj"
25
  ],
26
  "task_type": "CAUSAL_LM",
27
  "use_dora": false,
checkpoint_dir/{checkpoint-100 β†’ checkpoint-60}/adapter_model.safetensors RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:387c9fad15944c3910ba4774e7ba6a23445d76fcbbb3977a6b8d0c740763aacf
3
  size 37766064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eaf6e67d0958c4e912553fe4b1102fec6fc5aa6174c61ff7f17fc19b55e37a58
3
  size 37766064
checkpoint_dir/{checkpoint-100 β†’ checkpoint-60}/added_tokens.json RENAMED
File without changes
checkpoint_dir/{checkpoint-100 β†’ checkpoint-60}/optimizer.pt RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6438fc8c8808bb136aef6e9783a1d257bae39790db6744709c35ee4dd14c4611
3
  size 75605242
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ed74ef148c4c92c935d30471a2e767464433b631c4930075e2229fee27108027
3
  size 75605242
checkpoint_dir/{checkpoint-100 β†’ checkpoint-60}/rng_state.pth RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3cbd578982392ecbe96fefb2b34d0f45951e4c538b31cdb7a93fccf52dc6b0dc
3
  size 14180
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:26d10d8762f000ca0c31d718d394e7cdf5310597fd0da5cdcb8408cf8a9abfe9
3
  size 14180
checkpoint_dir/{checkpoint-100 β†’ checkpoint-60}/scheduler.pt RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f2c4a11c3ec7ace2e963dc6e2b0b5b6372cc0250cefb36d5f7289475908638cb
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ebca7978291eb340009ddb4652f35ff3bf53d4582f90ef15e09ee69170f6af0a
3
  size 1064
checkpoint_dir/{checkpoint-100 β†’ checkpoint-60}/special_tokens_map.json RENAMED
File without changes
checkpoint_dir/{checkpoint-100 β†’ checkpoint-60}/tokenizer.json RENAMED
File without changes
checkpoint_dir/{checkpoint-100 β†’ checkpoint-60}/tokenizer.model RENAMED
File without changes
checkpoint_dir/{checkpoint-100 β†’ checkpoint-60}/tokenizer_config.json RENAMED
File without changes
checkpoint_dir/{checkpoint-100 β†’ checkpoint-60}/trainer_state.json RENAMED
@@ -1,54 +1,40 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.01725327812284334,
5
  "eval_steps": 500,
6
- "global_step": 100,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
  "epoch": 0.003450655624568668,
13
- "grad_norm": 0.2711855471134186,
14
- "learning_rate": 5e-06,
15
- "loss": 1.4262,
16
  "step": 20
17
  },
18
  {
19
  "epoch": 0.006901311249137336,
20
- "grad_norm": 0.47000059485435486,
21
- "learning_rate": 4.267766952966369e-06,
22
- "loss": 1.3739,
23
  "step": 40
24
  },
25
  {
26
  "epoch": 0.010351966873706004,
27
- "grad_norm": 0.2519361078739166,
28
- "learning_rate": 2.5e-06,
29
- "loss": 1.458,
30
- "step": 60
31
- },
32
- {
33
- "epoch": 0.013802622498274672,
34
- "grad_norm": 0.20868921279907227,
35
- "learning_rate": 7.322330470336314e-07,
36
- "loss": 1.397,
37
- "step": 80
38
- },
39
- {
40
- "epoch": 0.01725327812284334,
41
- "grad_norm": 0.38277438282966614,
42
  "learning_rate": 0.0,
43
- "loss": 1.4076,
44
- "step": 100
45
  }
46
  ],
47
  "logging_steps": 20,
48
- "max_steps": 100,
49
  "num_input_tokens_seen": 0,
50
  "num_train_epochs": 1,
51
- "save_steps": 100,
52
  "stateful_callbacks": {
53
  "TrainerControl": {
54
  "args": {
@@ -61,7 +47,7 @@
61
  "attributes": {}
62
  }
63
  },
64
- "total_flos": 9171802836172800.0,
65
  "train_batch_size": 4,
66
  "trial_name": null,
67
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.010351966873706004,
5
  "eval_steps": 500,
6
+ "global_step": 60,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
  "epoch": 0.003450655624568668,
13
+ "grad_norm": 0.7515325546264648,
14
+ "learning_rate": 4.665063509461098e-06,
15
+ "loss": 1.4845,
16
  "step": 20
17
  },
18
  {
19
  "epoch": 0.006901311249137336,
20
+ "grad_norm": 0.6366741061210632,
21
+ "learning_rate": 1.852952387243698e-06,
22
+ "loss": 1.4653,
23
  "step": 40
24
  },
25
  {
26
  "epoch": 0.010351966873706004,
27
+ "grad_norm": 0.38952797651290894,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
  "learning_rate": 0.0,
29
+ "loss": 1.4896,
30
+ "step": 60
31
  }
32
  ],
33
  "logging_steps": 20,
34
+ "max_steps": 60,
35
  "num_input_tokens_seen": 0,
36
  "num_train_epochs": 1,
37
+ "save_steps": 60,
38
  "stateful_callbacks": {
39
  "TrainerControl": {
40
  "args": {
 
47
  "attributes": {}
48
  }
49
  },
50
+ "total_flos": 5503081701703680.0,
51
  "train_batch_size": 4,
52
  "trial_name": null,
53
  "trial_params": null
checkpoint_dir/{checkpoint-100 β†’ checkpoint-60}/training_args.bin RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:20e9e09e2e72e8760b2f7a6a8339880a8159d2f89dcb91f882053f5ec4a71902
3
  size 5432
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:36cc7dd510b77261c992baebefd980593dd7994f54f7011cf92230e5a9c2dca0
3
  size 5432
checkpoint_dir/eval_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
- "epoch": 0.01725327812284334,
3
- "eval_loss": 1.4359806776046753,
4
- "eval_runtime": 4457.9597,
5
- "eval_samples": 3500,
6
- "eval_samples_per_second": 0.272,
7
- "eval_steps_per_second": 0.068
8
  }
 
1
  {
2
+ "epoch": 0.010351966873706004,
3
+ "eval_loss": 1.48798406124115,
4
+ "eval_runtime": 847.6662,
5
+ "eval_samples": 700,
6
+ "eval_samples_per_second": 0.289,
7
+ "eval_steps_per_second": 0.073
8
  }
checkpoint_dir/runs/{Sep15_06-41-15_be30e98dc4c6/events.out.tfevents.1726383163.be30e98dc4c6.4371.0 β†’ Sep16_05-36-31_3ea85fc78173/events.out.tfevents.1726465646.3ea85fc78173.1553.0} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:53833131cb770530912d2ef3946b501015b4ef1f0ea73142374125c489efec1a
3
- size 6969
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:17e4f8b0090231d7fe4690e3db88ce3fd056a8c9eb64e374b3b236da8ccbb543
3
+ size 7035
checkpoint_dir/runs/{Sep15_06-41-15_be30e98dc4c6/events.out.tfevents.1726392217.be30e98dc4c6.4371.1 β†’ Sep16_05-36-31_3ea85fc78173/events.out.tfevents.1726469017.3ea85fc78173.1553.1} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:caf58e6d887ef44b4c8c61ef1a283ff8eb5015ae878f3cbb2da6d1fa935aa252
3
  size 354
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4cda15103f42ec617f74b5cadba1ac418a3f37f9ca5da18994c158c26044a8f7
3
  size 354
checkpoint_dir/train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
- "epoch": 0.01725327812284334,
3
- "total_flos": 9171802836172800.0,
4
- "train_loss": 1.4125469398498536,
5
- "train_runtime": 4596.9179,
6
- "train_samples_per_second": 0.087,
7
- "train_steps_per_second": 0.022
8
  }
 
1
  {
2
+ "epoch": 0.010351966873706004,
3
+ "total_flos": 5503081701703680.0,
4
+ "train_loss": 1.4797897656758627,
5
+ "train_runtime": 2523.2494,
6
+ "train_samples_per_second": 0.095,
7
+ "train_steps_per_second": 0.024
8
  }
checkpoint_dir/trainer_state.json CHANGED
@@ -1,63 +1,49 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.01725327812284334,
5
  "eval_steps": 500,
6
- "global_step": 100,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
  "epoch": 0.003450655624568668,
13
- "grad_norm": 0.2711855471134186,
14
- "learning_rate": 5e-06,
15
- "loss": 1.4262,
16
  "step": 20
17
  },
18
  {
19
  "epoch": 0.006901311249137336,
20
- "grad_norm": 0.47000059485435486,
21
- "learning_rate": 4.267766952966369e-06,
22
- "loss": 1.3739,
23
  "step": 40
24
  },
25
  {
26
  "epoch": 0.010351966873706004,
27
- "grad_norm": 0.2519361078739166,
28
- "learning_rate": 2.5e-06,
29
- "loss": 1.458,
30
- "step": 60
31
- },
32
- {
33
- "epoch": 0.013802622498274672,
34
- "grad_norm": 0.20868921279907227,
35
- "learning_rate": 7.322330470336314e-07,
36
- "loss": 1.397,
37
- "step": 80
38
- },
39
- {
40
- "epoch": 0.01725327812284334,
41
- "grad_norm": 0.38277438282966614,
42
  "learning_rate": 0.0,
43
- "loss": 1.4076,
44
- "step": 100
45
  },
46
  {
47
- "epoch": 0.01725327812284334,
48
- "step": 100,
49
- "total_flos": 9171802836172800.0,
50
- "train_loss": 1.4125469398498536,
51
- "train_runtime": 4596.9179,
52
- "train_samples_per_second": 0.087,
53
- "train_steps_per_second": 0.022
54
  }
55
  ],
56
  "logging_steps": 20,
57
- "max_steps": 100,
58
  "num_input_tokens_seen": 0,
59
  "num_train_epochs": 1,
60
- "save_steps": 100,
61
  "stateful_callbacks": {
62
  "TrainerControl": {
63
  "args": {
@@ -70,7 +56,7 @@
70
  "attributes": {}
71
  }
72
  },
73
- "total_flos": 9171802836172800.0,
74
  "train_batch_size": 4,
75
  "trial_name": null,
76
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.010351966873706004,
5
  "eval_steps": 500,
6
+ "global_step": 60,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
  "epoch": 0.003450655624568668,
13
+ "grad_norm": 0.7515325546264648,
14
+ "learning_rate": 4.665063509461098e-06,
15
+ "loss": 1.4845,
16
  "step": 20
17
  },
18
  {
19
  "epoch": 0.006901311249137336,
20
+ "grad_norm": 0.6366741061210632,
21
+ "learning_rate": 1.852952387243698e-06,
22
+ "loss": 1.4653,
23
  "step": 40
24
  },
25
  {
26
  "epoch": 0.010351966873706004,
27
+ "grad_norm": 0.38952797651290894,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
  "learning_rate": 0.0,
29
+ "loss": 1.4896,
30
+ "step": 60
31
  },
32
  {
33
+ "epoch": 0.010351966873706004,
34
+ "step": 60,
35
+ "total_flos": 5503081701703680.0,
36
+ "train_loss": 1.4797897656758627,
37
+ "train_runtime": 2523.2494,
38
+ "train_samples_per_second": 0.095,
39
+ "train_steps_per_second": 0.024
40
  }
41
  ],
42
  "logging_steps": 20,
43
+ "max_steps": 60,
44
  "num_input_tokens_seen": 0,
45
  "num_train_epochs": 1,
46
+ "save_steps": 60,
47
  "stateful_callbacks": {
48
  "TrainerControl": {
49
  "args": {
 
56
  "attributes": {}
57
  }
58
  },
59
+ "total_flos": 5503081701703680.0,
60
  "train_batch_size": 4,
61
  "trial_name": null,
62
  "trial_params": null
checkpoint_dir/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:20e9e09e2e72e8760b2f7a6a8339880a8159d2f89dcb91f882053f5ec4a71902
3
  size 5432
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:36cc7dd510b77261c992baebefd980593dd7994f54f7011cf92230e5a9c2dca0
3
  size 5432