Ahatsham commited on
Commit
a1dc70a
·
verified ·
1 Parent(s): da6760f

Model save

Browse files
README.md ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: meta-llama/Meta-Llama-3-8B
3
+ library_name: peft
4
+ license: llama3
5
+ metrics:
6
+ - accuracy
7
+ - precision
8
+ - recall
9
+ - f1
10
+ tags:
11
+ - generated_from_trainer
12
+ model-index:
13
+ - name: Output_llama2_70-15-15_new
14
+ results: []
15
+ ---
16
+
17
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
18
+ should probably proofread and complete it, then remove this comment. -->
19
+
20
+ # Output_llama2_70-15-15_new
21
+
22
+ This model is a fine-tuned version of [meta-llama/Meta-Llama-3-8B](https://huggingface.co/meta-llama/Meta-Llama-3-8B) on the None dataset.
23
+ It achieves the following results on the evaluation set:
24
+ - Loss: 0.6310
25
+ - Accuracy: 0.6474
26
+ - Precision: 0.6760
27
+ - Recall: 0.6474
28
+ - F1: 0.6376
29
+
30
+ ## Model description
31
+
32
+ More information needed
33
+
34
+ ## Intended uses & limitations
35
+
36
+ More information needed
37
+
38
+ ## Training and evaluation data
39
+
40
+ More information needed
41
+
42
+ ## Training procedure
43
+
44
+ ### Training hyperparameters
45
+
46
+ The following hyperparameters were used during training:
47
+ - learning_rate: 1e-05
48
+ - train_batch_size: 16
49
+ - eval_batch_size: 16
50
+ - seed: 42
51
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
52
+ - lr_scheduler_type: linear
53
+ - num_epochs: 10
54
+
55
+ ### Training results
56
+
57
+ | Training Loss | Epoch | Step | Validation Loss | Accuracy | Precision | Recall | F1 |
58
+ |:-------------:|:-----:|:----:|:---------------:|:--------:|:---------:|:------:|:------:|
59
+ | No log | 1.0 | 46 | 0.7791 | 0.4808 | 0.2311 | 0.4808 | 0.3122 |
60
+ | No log | 2.0 | 92 | 0.7556 | 0.4872 | 0.7519 | 0.4872 | 0.3262 |
61
+ | No log | 3.0 | 138 | 0.7071 | 0.5064 | 0.5708 | 0.5064 | 0.4086 |
62
+ | No log | 4.0 | 184 | 0.7045 | 0.5 | 0.5549 | 0.5 | 0.3971 |
63
+ | No log | 5.0 | 230 | 0.6714 | 0.5705 | 0.6227 | 0.5705 | 0.5340 |
64
+ | No log | 6.0 | 276 | 0.6976 | 0.4936 | 0.5303 | 0.4936 | 0.3932 |
65
+ | No log | 7.0 | 322 | 0.6453 | 0.6603 | 0.6906 | 0.6603 | 0.6507 |
66
+ | No log | 8.0 | 368 | 0.6640 | 0.5769 | 0.6429 | 0.5769 | 0.5354 |
67
+ | No log | 9.0 | 414 | 0.6460 | 0.6154 | 0.6643 | 0.6154 | 0.5928 |
68
+ | No log | 10.0 | 460 | 0.6310 | 0.6474 | 0.6760 | 0.6474 | 0.6376 |
69
+
70
+
71
+ ### Framework versions
72
+
73
+ - PEFT 0.10.0
74
+ - Transformers 4.41.2
75
+ - Pytorch 2.3.1+cu121
76
+ - Datasets 3.2.0
77
+ - Tokenizers 0.19.1
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fd9699fb1d45c7ebc6f9f10f31db2b5656a06c1cc7aa55d67d95e5fa1b1ca7c7
3
  size 54593240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e107e3c746763fc822ee728479f4440ed39dc0e64cdc37fe10f99651e23267c9
3
  size 54593240
all_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 10.0,
3
+ "total_flos": 2.488087619686564e+17,
4
+ "train_loss": 0.5811269345490829,
5
+ "train_runtime": 16870.9489,
6
+ "train_samples": 730,
7
+ "train_samples_per_second": 0.433,
8
+ "train_steps_per_second": 0.027
9
+ }
runs/Dec18_13-29-32_LCEEE-HAL/events.out.tfevents.1734550172.LCEEE-HAL.1086441.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b14beea7e1cef9f3bf77da962e5e3ca400c90cab04e182553e064b53526777e6
3
- size 9660
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ee607ee86e8a870237c0836f98e80ce80a8713bf8a3602af4db976118d94113b
3
+ size 10486
tokenizer.json CHANGED
@@ -6,7 +6,14 @@
6
  "strategy": "LongestFirst",
7
  "stride": 0
8
  },
9
- "padding": null,
 
 
 
 
 
 
 
10
  "added_tokens": [
11
  {
12
  "id": 128000,
 
6
  "strategy": "LongestFirst",
7
  "stride": 0
8
  },
9
+ "padding": {
10
+ "strategy": "BatchLongest",
11
+ "direction": "Right",
12
+ "pad_to_multiple_of": null,
13
+ "pad_id": 128001,
14
+ "pad_type_id": 0,
15
+ "pad_token": "<|end_of_text|>"
16
+ },
17
  "added_tokens": [
18
  {
19
  "id": 128000,
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 10.0,
3
+ "total_flos": 2.488087619686564e+17,
4
+ "train_loss": 0.5811269345490829,
5
+ "train_runtime": 16870.9489,
6
+ "train_samples": 730,
7
+ "train_samples_per_second": 0.433,
8
+ "train_steps_per_second": 0.027
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.6602564102564102,
3
+ "best_model_checkpoint": "Output_llama2_70-15-15_new/checkpoint-322",
4
+ "epoch": 10.0,
5
+ "eval_steps": 500,
6
+ "global_step": 460,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 1.0,
13
+ "eval_accuracy": 0.4807692307692308,
14
+ "eval_f1": 0.3121878121878122,
15
+ "eval_loss": 0.779107391834259,
16
+ "eval_precision": 0.23113905325443787,
17
+ "eval_recall": 0.4807692307692308,
18
+ "eval_runtime": 111.4069,
19
+ "eval_samples_per_second": 1.4,
20
+ "eval_steps_per_second": 0.09,
21
+ "step": 46
22
+ },
23
+ {
24
+ "epoch": 2.0,
25
+ "eval_accuracy": 0.48717948717948717,
26
+ "eval_f1": 0.3262093156048617,
27
+ "eval_loss": 0.755558967590332,
28
+ "eval_precision": 0.7518610421836228,
29
+ "eval_recall": 0.48717948717948717,
30
+ "eval_runtime": 111.1784,
31
+ "eval_samples_per_second": 1.403,
32
+ "eval_steps_per_second": 0.09,
33
+ "step": 92
34
+ },
35
+ {
36
+ "epoch": 3.0,
37
+ "eval_accuracy": 0.5064102564102564,
38
+ "eval_f1": 0.40855426407209083,
39
+ "eval_loss": 0.7070600390434265,
40
+ "eval_precision": 0.5707901253675902,
41
+ "eval_recall": 0.5064102564102564,
42
+ "eval_runtime": 111.2413,
43
+ "eval_samples_per_second": 1.402,
44
+ "eval_steps_per_second": 0.09,
45
+ "step": 138
46
+ },
47
+ {
48
+ "epoch": 4.0,
49
+ "eval_accuracy": 0.5,
50
+ "eval_f1": 0.39713058754635955,
51
+ "eval_loss": 0.7044801115989685,
52
+ "eval_precision": 0.5548682087143626,
53
+ "eval_recall": 0.5,
54
+ "eval_runtime": 111.1012,
55
+ "eval_samples_per_second": 1.404,
56
+ "eval_steps_per_second": 0.09,
57
+ "step": 184
58
+ },
59
+ {
60
+ "epoch": 5.0,
61
+ "eval_accuracy": 0.5705128205128205,
62
+ "eval_f1": 0.5339812912754868,
63
+ "eval_loss": 0.6713973879814148,
64
+ "eval_precision": 0.622663378087679,
65
+ "eval_recall": 0.5705128205128205,
66
+ "eval_runtime": 111.3456,
67
+ "eval_samples_per_second": 1.401,
68
+ "eval_steps_per_second": 0.09,
69
+ "step": 230
70
+ },
71
+ {
72
+ "epoch": 6.0,
73
+ "eval_accuracy": 0.4935897435897436,
74
+ "eval_f1": 0.3931920371648725,
75
+ "eval_loss": 0.6975941061973572,
76
+ "eval_precision": 0.530316514471444,
77
+ "eval_recall": 0.4935897435897436,
78
+ "eval_runtime": 111.0983,
79
+ "eval_samples_per_second": 1.404,
80
+ "eval_steps_per_second": 0.09,
81
+ "step": 276
82
+ },
83
+ {
84
+ "epoch": 7.0,
85
+ "eval_accuracy": 0.6602564102564102,
86
+ "eval_f1": 0.6507378243632287,
87
+ "eval_loss": 0.6453418135643005,
88
+ "eval_precision": 0.6906434911242604,
89
+ "eval_recall": 0.6602564102564102,
90
+ "eval_runtime": 111.1451,
91
+ "eval_samples_per_second": 1.404,
92
+ "eval_steps_per_second": 0.09,
93
+ "step": 322
94
+ },
95
+ {
96
+ "epoch": 8.0,
97
+ "eval_accuracy": 0.5769230769230769,
98
+ "eval_f1": 0.5353708791208791,
99
+ "eval_loss": 0.6639875769615173,
100
+ "eval_precision": 0.6429280397022332,
101
+ "eval_recall": 0.5769230769230769,
102
+ "eval_runtime": 111.2239,
103
+ "eval_samples_per_second": 1.403,
104
+ "eval_steps_per_second": 0.09,
105
+ "step": 368
106
+ },
107
+ {
108
+ "epoch": 9.0,
109
+ "eval_accuracy": 0.6153846153846154,
110
+ "eval_f1": 0.5928187429481648,
111
+ "eval_loss": 0.645951509475708,
112
+ "eval_precision": 0.6643282486336569,
113
+ "eval_recall": 0.6153846153846154,
114
+ "eval_runtime": 111.2453,
115
+ "eval_samples_per_second": 1.402,
116
+ "eval_steps_per_second": 0.09,
117
+ "step": 414
118
+ },
119
+ {
120
+ "epoch": 10.0,
121
+ "eval_accuracy": 0.6474358974358975,
122
+ "eval_f1": 0.6375581196222184,
123
+ "eval_loss": 0.6310403347015381,
124
+ "eval_precision": 0.6760355029585798,
125
+ "eval_recall": 0.6474358974358975,
126
+ "eval_runtime": 111.4617,
127
+ "eval_samples_per_second": 1.4,
128
+ "eval_steps_per_second": 0.09,
129
+ "step": 460
130
+ },
131
+ {
132
+ "epoch": 10.0,
133
+ "step": 460,
134
+ "total_flos": 2.488087619686564e+17,
135
+ "train_loss": 0.5811269345490829,
136
+ "train_runtime": 16870.9489,
137
+ "train_samples_per_second": 0.433,
138
+ "train_steps_per_second": 0.027
139
+ }
140
+ ],
141
+ "logging_steps": 500,
142
+ "max_steps": 460,
143
+ "num_input_tokens_seen": 0,
144
+ "num_train_epochs": 10,
145
+ "save_steps": 500,
146
+ "stateful_callbacks": {
147
+ "EarlyStoppingCallback": {
148
+ "args": {
149
+ "early_stopping_patience": 10,
150
+ "early_stopping_threshold": 0.0
151
+ },
152
+ "attributes": {
153
+ "early_stopping_patience_counter": 0
154
+ }
155
+ },
156
+ "TrainerControl": {
157
+ "args": {
158
+ "should_epoch_stop": false,
159
+ "should_evaluate": false,
160
+ "should_log": false,
161
+ "should_save": true,
162
+ "should_training_stop": true
163
+ },
164
+ "attributes": {}
165
+ }
166
+ },
167
+ "total_flos": 2.488087619686564e+17,
168
+ "train_batch_size": 16,
169
+ "trial_name": null,
170
+ "trial_params": null
171
+ }