sedrickkeh commited on
Commit
522a600
·
verified ·
1 Parent(s): feb2482

End of training

Browse files
README.md CHANGED
@@ -4,6 +4,7 @@ license: llama3.1
4
  base_model: meta-llama/Meta-Llama-3.1-8B
5
  tags:
6
  - llama-factory
 
7
  - generated_from_trainer
8
  model-index:
9
  - name: top_1_ranking_stackexchange
@@ -15,7 +16,7 @@ should probably proofread and complete it, then remove this comment. -->
15
 
16
  # top_1_ranking_stackexchange
17
 
18
- This model is a fine-tuned version of [meta-llama/Meta-Llama-3.1-8B](https://huggingface.co/meta-llama/Meta-Llama-3.1-8B) on an unknown dataset.
19
  It achieves the following results on the evaluation set:
20
  - Loss: 0.5613
21
 
 
4
  base_model: meta-llama/Meta-Llama-3.1-8B
5
  tags:
6
  - llama-factory
7
+ - full
8
  - generated_from_trainer
9
  model-index:
10
  - name: top_1_ranking_stackexchange
 
16
 
17
  # top_1_ranking_stackexchange
18
 
19
+ This model is a fine-tuned version of [meta-llama/Meta-Llama-3.1-8B](https://huggingface.co/meta-llama/Meta-Llama-3.1-8B) on the mlfoundations-dev/top_1_ranking_stackexchange dataset.
20
  It achieves the following results on the evaluation set:
21
  - Loss: 0.5613
22
 
all_results.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.9925187032418954,
3
+ "eval_loss": 0.5612708926200867,
4
+ "eval_runtime": 55.4018,
5
+ "eval_samples_per_second": 24.331,
6
+ "eval_steps_per_second": 0.397,
7
+ "total_flos": 251046207160320.0,
8
+ "train_loss": 0.5803195349375407,
9
+ "train_runtime": 9079.4053,
10
+ "train_samples_per_second": 8.46,
11
+ "train_steps_per_second": 0.017
12
+ }
eval_results.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.9925187032418954,
3
+ "eval_loss": 0.5612708926200867,
4
+ "eval_runtime": 55.4018,
5
+ "eval_samples_per_second": 24.331,
6
+ "eval_steps_per_second": 0.397
7
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.9925187032418954,
3
+ "total_flos": 251046207160320.0,
4
+ "train_loss": 0.5803195349375407,
5
+ "train_runtime": 9079.4053,
6
+ "train_samples_per_second": 8.46,
7
+ "train_steps_per_second": 0.017
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 2.9925187032418954,
5
+ "eval_steps": 500,
6
+ "global_step": 150,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.19950124688279303,
13
+ "grad_norm": 59.38634222354722,
14
+ "learning_rate": 5e-06,
15
+ "loss": 0.812,
16
+ "step": 10
17
+ },
18
+ {
19
+ "epoch": 0.39900249376558605,
20
+ "grad_norm": 2.1960032800772877,
21
+ "learning_rate": 5e-06,
22
+ "loss": 0.6885,
23
+ "step": 20
24
+ },
25
+ {
26
+ "epoch": 0.5985037406483791,
27
+ "grad_norm": 7.000153159965406,
28
+ "learning_rate": 5e-06,
29
+ "loss": 0.6424,
30
+ "step": 30
31
+ },
32
+ {
33
+ "epoch": 0.7980049875311721,
34
+ "grad_norm": 7.945600307179786,
35
+ "learning_rate": 5e-06,
36
+ "loss": 0.6225,
37
+ "step": 40
38
+ },
39
+ {
40
+ "epoch": 0.9975062344139651,
41
+ "grad_norm": 1.4092966622843572,
42
+ "learning_rate": 5e-06,
43
+ "loss": 0.6165,
44
+ "step": 50
45
+ },
46
+ {
47
+ "epoch": 0.9975062344139651,
48
+ "eval_loss": 0.6011843085289001,
49
+ "eval_runtime": 55.5185,
50
+ "eval_samples_per_second": 24.28,
51
+ "eval_steps_per_second": 0.396,
52
+ "step": 50
53
+ },
54
+ {
55
+ "epoch": 1.1970074812967582,
56
+ "grad_norm": 1.3488536657915955,
57
+ "learning_rate": 5e-06,
58
+ "loss": 0.6217,
59
+ "step": 60
60
+ },
61
+ {
62
+ "epoch": 1.3965087281795512,
63
+ "grad_norm": 0.7654234458916354,
64
+ "learning_rate": 5e-06,
65
+ "loss": 0.555,
66
+ "step": 70
67
+ },
68
+ {
69
+ "epoch": 1.5960099750623442,
70
+ "grad_norm": 1.1374130411278827,
71
+ "learning_rate": 5e-06,
72
+ "loss": 0.5496,
73
+ "step": 80
74
+ },
75
+ {
76
+ "epoch": 1.7955112219451372,
77
+ "grad_norm": 0.6833448776058244,
78
+ "learning_rate": 5e-06,
79
+ "loss": 0.5567,
80
+ "step": 90
81
+ },
82
+ {
83
+ "epoch": 1.9950124688279303,
84
+ "grad_norm": 0.5921564717905348,
85
+ "learning_rate": 5e-06,
86
+ "loss": 0.5416,
87
+ "step": 100
88
+ },
89
+ {
90
+ "epoch": 1.9950124688279303,
91
+ "eval_loss": 0.5647123456001282,
92
+ "eval_runtime": 55.3866,
93
+ "eval_samples_per_second": 24.338,
94
+ "eval_steps_per_second": 0.397,
95
+ "step": 100
96
+ },
97
+ {
98
+ "epoch": 2.1945137157107233,
99
+ "grad_norm": 0.7816435404426337,
100
+ "learning_rate": 5e-06,
101
+ "loss": 0.5452,
102
+ "step": 110
103
+ },
104
+ {
105
+ "epoch": 2.3940149625935163,
106
+ "grad_norm": 0.8083163191936461,
107
+ "learning_rate": 5e-06,
108
+ "loss": 0.4933,
109
+ "step": 120
110
+ },
111
+ {
112
+ "epoch": 2.5935162094763093,
113
+ "grad_norm": 0.7453739368674692,
114
+ "learning_rate": 5e-06,
115
+ "loss": 0.4865,
116
+ "step": 130
117
+ },
118
+ {
119
+ "epoch": 2.7930174563591024,
120
+ "grad_norm": 0.6095605072913075,
121
+ "learning_rate": 5e-06,
122
+ "loss": 0.4884,
123
+ "step": 140
124
+ },
125
+ {
126
+ "epoch": 2.9925187032418954,
127
+ "grad_norm": 0.722389875412173,
128
+ "learning_rate": 5e-06,
129
+ "loss": 0.4848,
130
+ "step": 150
131
+ },
132
+ {
133
+ "epoch": 2.9925187032418954,
134
+ "eval_loss": 0.5612708926200867,
135
+ "eval_runtime": 55.0401,
136
+ "eval_samples_per_second": 24.491,
137
+ "eval_steps_per_second": 0.4,
138
+ "step": 150
139
+ },
140
+ {
141
+ "epoch": 2.9925187032418954,
142
+ "step": 150,
143
+ "total_flos": 251046207160320.0,
144
+ "train_loss": 0.5803195349375407,
145
+ "train_runtime": 9079.4053,
146
+ "train_samples_per_second": 8.46,
147
+ "train_steps_per_second": 0.017
148
+ }
149
+ ],
150
+ "logging_steps": 10,
151
+ "max_steps": 150,
152
+ "num_input_tokens_seen": 0,
153
+ "num_train_epochs": 3,
154
+ "save_steps": 500,
155
+ "stateful_callbacks": {
156
+ "TrainerControl": {
157
+ "args": {
158
+ "should_epoch_stop": false,
159
+ "should_evaluate": false,
160
+ "should_log": false,
161
+ "should_save": true,
162
+ "should_training_stop": true
163
+ },
164
+ "attributes": {}
165
+ }
166
+ },
167
+ "total_flos": 251046207160320.0,
168
+ "train_batch_size": 8,
169
+ "trial_name": null,
170
+ "trial_params": null
171
+ }
training_eval_loss.png ADDED
training_loss.png ADDED