chansung commited on
Commit
ff019e6
1 Parent(s): a9c3d0c

Model save

Browse files
Files changed (4) hide show
  1. README.md +69 -0
  2. all_results.json +9 -0
  3. train_results.json +9 -0
  4. trainer_state.json +176 -0
README.md ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ license: apache-2.0
4
+ base_model: mistralai/Mistral-7B-v0.3
5
+ tags:
6
+ - trl
7
+ - sft
8
+ - generated_from_trainer
9
+ datasets:
10
+ - generator
11
+ model-index:
12
+ - name: mistral7b-milora-classification-11-v1
13
+ results: []
14
+ ---
15
+
16
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
17
+ should probably proofread and complete it, then remove this comment. -->
18
+
19
+ # mistral7b-milora-classification-11-v1
20
+
21
+ This model is a fine-tuned version of [mistralai/Mistral-7B-v0.3](https://huggingface.co/mistralai/Mistral-7B-v0.3) on the generator dataset.
22
+ It achieves the following results on the evaluation set:
23
+ - Loss: 1.4541
24
+
25
+ ## Model description
26
+
27
+ More information needed
28
+
29
+ ## Intended uses & limitations
30
+
31
+ More information needed
32
+
33
+ ## Training and evaluation data
34
+
35
+ More information needed
36
+
37
+ ## Training procedure
38
+
39
+ ### Training hyperparameters
40
+
41
+ The following hyperparameters were used during training:
42
+ - learning_rate: 0.0002
43
+ - train_batch_size: 14
44
+ - eval_batch_size: 14
45
+ - seed: 42
46
+ - distributed_type: multi-GPU
47
+ - num_devices: 8
48
+ - gradient_accumulation_steps: 2
49
+ - total_train_batch_size: 224
50
+ - total_eval_batch_size: 112
51
+ - optimizer: Use OptimizerNames.ADAMW_TORCH with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
52
+ - lr_scheduler_type: cosine
53
+ - lr_scheduler_warmup_ratio: 0.1
54
+ - num_epochs: 1
55
+
56
+ ### Training results
57
+
58
+ | Training Loss | Epoch | Step | Validation Loss |
59
+ |:-------------:|:------:|:----:|:---------------:|
60
+ | 1.4574 | 0.9943 | 87 | 1.4541 |
61
+
62
+
63
+ ### Framework versions
64
+
65
+ - PEFT 0.13.3.dev0
66
+ - Transformers 4.46.2
67
+ - Pytorch 2.3.1+cu121
68
+ - Datasets 3.1.0
69
+ - Tokenizers 0.20.3
all_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 0.9942857142857143,
3
+ "total_flos": 4.260877023880151e+17,
4
+ "train_loss": 1.5436431375043145,
5
+ "train_runtime": 332.2798,
6
+ "train_samples": 51241,
7
+ "train_samples_per_second": 58.947,
8
+ "train_steps_per_second": 0.262
9
+ }
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 0.9942857142857143,
3
+ "total_flos": 4.260877023880151e+17,
4
+ "train_loss": 1.5436431375043145,
5
+ "train_runtime": 332.2798,
6
+ "train_samples": 51241,
7
+ "train_samples_per_second": 58.947,
8
+ "train_steps_per_second": 0.262
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.9942857142857143,
5
+ "eval_steps": 500,
6
+ "global_step": 87,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.011428571428571429,
13
+ "grad_norm": 1.4554669857025146,
14
+ "learning_rate": 2.2222222222222223e-05,
15
+ "loss": 1.9343,
16
+ "step": 1
17
+ },
18
+ {
19
+ "epoch": 0.05714285714285714,
20
+ "grad_norm": 1.3781572580337524,
21
+ "learning_rate": 0.00011111111111111112,
22
+ "loss": 1.8872,
23
+ "step": 5
24
+ },
25
+ {
26
+ "epoch": 0.11428571428571428,
27
+ "grad_norm": 1.2489840984344482,
28
+ "learning_rate": 0.00019991889981715698,
29
+ "loss": 1.8455,
30
+ "step": 10
31
+ },
32
+ {
33
+ "epoch": 0.17142857142857143,
34
+ "grad_norm": 0.9914447665214539,
35
+ "learning_rate": 0.0001970941817426052,
36
+ "loss": 1.7457,
37
+ "step": 15
38
+ },
39
+ {
40
+ "epoch": 0.22857142857142856,
41
+ "grad_norm": 0.7855111360549927,
42
+ "learning_rate": 0.00019034504346103823,
43
+ "loss": 1.6664,
44
+ "step": 20
45
+ },
46
+ {
47
+ "epoch": 0.2857142857142857,
48
+ "grad_norm": 0.8774387240409851,
49
+ "learning_rate": 0.00017994427634035015,
50
+ "loss": 1.5641,
51
+ "step": 25
52
+ },
53
+ {
54
+ "epoch": 0.34285714285714286,
55
+ "grad_norm": 0.5209712982177734,
56
+ "learning_rate": 0.00016631226582407952,
57
+ "loss": 1.512,
58
+ "step": 30
59
+ },
60
+ {
61
+ "epoch": 0.4,
62
+ "grad_norm": 0.4458630383014679,
63
+ "learning_rate": 0.00015000000000000001,
64
+ "loss": 1.4799,
65
+ "step": 35
66
+ },
67
+ {
68
+ "epoch": 0.45714285714285713,
69
+ "grad_norm": 0.42980432510375977,
70
+ "learning_rate": 0.00013166679938014726,
71
+ "loss": 1.4725,
72
+ "step": 40
73
+ },
74
+ {
75
+ "epoch": 0.5142857142857142,
76
+ "grad_norm": 0.4260351359844208,
77
+ "learning_rate": 0.0001120536680255323,
78
+ "loss": 1.4651,
79
+ "step": 45
80
+ },
81
+ {
82
+ "epoch": 0.5714285714285714,
83
+ "grad_norm": 0.3732384741306305,
84
+ "learning_rate": 9.195334312832742e-05,
85
+ "loss": 1.4722,
86
+ "step": 50
87
+ },
88
+ {
89
+ "epoch": 0.6285714285714286,
90
+ "grad_norm": 0.3934707045555115,
91
+ "learning_rate": 7.217825360835473e-05,
92
+ "loss": 1.471,
93
+ "step": 55
94
+ },
95
+ {
96
+ "epoch": 0.6857142857142857,
97
+ "grad_norm": 0.4875844717025757,
98
+ "learning_rate": 5.3527682795623146e-05,
99
+ "loss": 1.4555,
100
+ "step": 60
101
+ },
102
+ {
103
+ "epoch": 0.7428571428571429,
104
+ "grad_norm": 0.36814966797828674,
105
+ "learning_rate": 3.675546244046228e-05,
106
+ "loss": 1.4407,
107
+ "step": 65
108
+ },
109
+ {
110
+ "epoch": 0.8,
111
+ "grad_norm": 0.4143487513065338,
112
+ "learning_rate": 2.2539503817234553e-05,
113
+ "loss": 1.4469,
114
+ "step": 70
115
+ },
116
+ {
117
+ "epoch": 0.8571428571428571,
118
+ "grad_norm": 0.3434593081474304,
119
+ "learning_rate": 1.1454397434679021e-05,
120
+ "loss": 1.4406,
121
+ "step": 75
122
+ },
123
+ {
124
+ "epoch": 0.9142857142857143,
125
+ "grad_norm": 0.39424604177474976,
126
+ "learning_rate": 3.948188836862776e-06,
127
+ "loss": 1.442,
128
+ "step": 80
129
+ },
130
+ {
131
+ "epoch": 0.9714285714285714,
132
+ "grad_norm": 0.4415414333343506,
133
+ "learning_rate": 3.2426918657900704e-07,
134
+ "loss": 1.4574,
135
+ "step": 85
136
+ },
137
+ {
138
+ "epoch": 0.9942857142857143,
139
+ "eval_loss": 1.4540868997573853,
140
+ "eval_runtime": 1.2938,
141
+ "eval_samples_per_second": 150.714,
142
+ "eval_steps_per_second": 1.546,
143
+ "step": 87
144
+ },
145
+ {
146
+ "epoch": 0.9942857142857143,
147
+ "step": 87,
148
+ "total_flos": 4.260877023880151e+17,
149
+ "train_loss": 1.5436431375043145,
150
+ "train_runtime": 332.2798,
151
+ "train_samples_per_second": 58.947,
152
+ "train_steps_per_second": 0.262
153
+ }
154
+ ],
155
+ "logging_steps": 5,
156
+ "max_steps": 87,
157
+ "num_input_tokens_seen": 0,
158
+ "num_train_epochs": 1,
159
+ "save_steps": 100,
160
+ "stateful_callbacks": {
161
+ "TrainerControl": {
162
+ "args": {
163
+ "should_epoch_stop": false,
164
+ "should_evaluate": false,
165
+ "should_log": false,
166
+ "should_save": true,
167
+ "should_training_stop": true
168
+ },
169
+ "attributes": {}
170
+ }
171
+ },
172
+ "total_flos": 4.260877023880151e+17,
173
+ "train_batch_size": 14,
174
+ "trial_name": null,
175
+ "trial_params": null
176
+ }