Model save
Browse files- README.md +13 -17
- all_results.json +4 -9
- train_results.json +4 -4
- trainer_state.json +45 -45
README.md
CHANGED
@@ -3,15 +3,11 @@ library_name: transformers
|
|
3 |
license: apache-2.0
|
4 |
base_model: alignment-handbook/zephyr-7b-sft-full
|
5 |
tags:
|
6 |
-
- alignment-handbook
|
7 |
-
- trl
|
8 |
-
- sft
|
9 |
-
- generated_from_trainer
|
10 |
- trl
|
11 |
- sft
|
12 |
- generated_from_trainer
|
13 |
datasets:
|
14 |
-
-
|
15 |
model-index:
|
16 |
- name: zephyr-7b-sft-full
|
17 |
results: []
|
@@ -22,9 +18,9 @@ should probably proofread and complete it, then remove this comment. -->
|
|
22 |
|
23 |
# zephyr-7b-sft-full
|
24 |
|
25 |
-
This model is a fine-tuned version of [alignment-handbook/zephyr-7b-sft-full](https://huggingface.co/alignment-handbook/zephyr-7b-sft-full) on the
|
26 |
It achieves the following results on the evaluation set:
|
27 |
-
- Loss: 0.
|
28 |
|
29 |
## Model description
|
30 |
|
@@ -60,16 +56,16 @@ The following hyperparameters were used during training:
|
|
60 |
|
61 |
| Training Loss | Epoch | Step | Validation Loss |
|
62 |
|:-------------:|:-----:|:----:|:---------------:|
|
63 |
-
| 1.
|
64 |
-
| 1.
|
65 |
-
| 1.
|
66 |
-
| 1.
|
67 |
-
|
|
68 |
-
|
|
69 |
-
|
|
70 |
-
|
|
71 |
-
|
|
72 |
-
| 0.
|
73 |
|
74 |
|
75 |
### Framework versions
|
|
|
3 |
license: apache-2.0
|
4 |
base_model: alignment-handbook/zephyr-7b-sft-full
|
5 |
tags:
|
|
|
|
|
|
|
|
|
6 |
- trl
|
7 |
- sft
|
8 |
- generated_from_trainer
|
9 |
datasets:
|
10 |
+
- generator
|
11 |
model-index:
|
12 |
- name: zephyr-7b-sft-full
|
13 |
results: []
|
|
|
18 |
|
19 |
# zephyr-7b-sft-full
|
20 |
|
21 |
+
This model is a fine-tuned version of [alignment-handbook/zephyr-7b-sft-full](https://huggingface.co/alignment-handbook/zephyr-7b-sft-full) on the generator dataset.
|
22 |
It achieves the following results on the evaluation set:
|
23 |
+
- Loss: 0.1468
|
24 |
|
25 |
## Model description
|
26 |
|
|
|
56 |
|
57 |
| Training Loss | Epoch | Step | Validation Loss |
|
58 |
|:-------------:|:-----:|:----:|:---------------:|
|
59 |
+
| 1.0647 | 1.0 | 1 | 1.0615 |
|
60 |
+
| 1.0647 | 2.0 | 2 | 1.1875 |
|
61 |
+
| 1.0647 | 3.0 | 3 | 0.9856 |
|
62 |
+
| 1.0647 | 4.0 | 4 | 0.6196 |
|
63 |
+
| 0.969 | 5.0 | 5 | 0.4429 |
|
64 |
+
| 0.969 | 6.0 | 6 | 0.3590 |
|
65 |
+
| 0.969 | 7.0 | 7 | 0.2403 |
|
66 |
+
| 0.969 | 8.0 | 8 | 0.1860 |
|
67 |
+
| 0.969 | 9.0 | 9 | 0.1558 |
|
68 |
+
| 0.2782 | 10.0 | 10 | 0.1468 |
|
69 |
|
70 |
|
71 |
### Framework versions
|
all_results.json
CHANGED
@@ -1,14 +1,9 @@
|
|
1 |
{
|
2 |
"epoch": 10.0,
|
3 |
-
"eval_loss": 0.41154685616493225,
|
4 |
-
"eval_runtime": 2.677,
|
5 |
-
"eval_samples": 848,
|
6 |
-
"eval_samples_per_second": 18.678,
|
7 |
-
"eval_steps_per_second": 0.374,
|
8 |
"total_flos": 8375186227200.0,
|
9 |
-
"train_loss": 0.
|
10 |
-
"train_runtime":
|
11 |
-
"train_samples":
|
12 |
-
"train_samples_per_second":
|
13 |
"train_steps_per_second": 0.032
|
14 |
}
|
|
|
1 |
{
|
2 |
"epoch": 10.0,
|
|
|
|
|
|
|
|
|
|
|
3 |
"total_flos": 8375186227200.0,
|
4 |
+
"train_loss": 0.6331865787506104,
|
5 |
+
"train_runtime": 307.8989,
|
6 |
+
"train_samples": 424,
|
7 |
+
"train_samples_per_second": 0.617,
|
8 |
"train_steps_per_second": 0.032
|
9 |
}
|
train_results.json
CHANGED
@@ -1,9 +1,9 @@
|
|
1 |
{
|
2 |
"epoch": 10.0,
|
3 |
"total_flos": 8375186227200.0,
|
4 |
-
"train_loss": 0.
|
5 |
-
"train_runtime":
|
6 |
-
"train_samples":
|
7 |
-
"train_samples_per_second":
|
8 |
"train_steps_per_second": 0.032
|
9 |
}
|
|
|
1 |
{
|
2 |
"epoch": 10.0,
|
3 |
"total_flos": 8375186227200.0,
|
4 |
+
"train_loss": 0.6331865787506104,
|
5 |
+
"train_runtime": 307.8989,
|
6 |
+
"train_samples": 424,
|
7 |
+
"train_samples_per_second": 0.617,
|
8 |
"train_steps_per_second": 0.032
|
9 |
}
|
trainer_state.json
CHANGED
@@ -10,112 +10,112 @@
|
|
10 |
"log_history": [
|
11 |
{
|
12 |
"epoch": 1.0,
|
13 |
-
"grad_norm":
|
14 |
"learning_rate": 1e-05,
|
15 |
-
"loss": 1.
|
16 |
"step": 1
|
17 |
},
|
18 |
{
|
19 |
"epoch": 1.0,
|
20 |
-
"eval_loss": 1.
|
21 |
-
"eval_runtime": 2.
|
22 |
-
"eval_samples_per_second":
|
23 |
"eval_steps_per_second": 0.357,
|
24 |
"step": 1
|
25 |
},
|
26 |
{
|
27 |
"epoch": 2.0,
|
28 |
-
"eval_loss": 1.
|
29 |
-
"eval_runtime": 2.
|
30 |
-
"eval_samples_per_second":
|
31 |
-
"eval_steps_per_second": 0.
|
32 |
"step": 2
|
33 |
},
|
34 |
{
|
35 |
"epoch": 3.0,
|
36 |
-
"eval_loss":
|
37 |
-
"eval_runtime": 2.
|
38 |
-
"eval_samples_per_second":
|
39 |
-
"eval_steps_per_second": 0.
|
40 |
"step": 3
|
41 |
},
|
42 |
{
|
43 |
"epoch": 4.0,
|
44 |
-
"eval_loss": 0.
|
45 |
-
"eval_runtime": 2.
|
46 |
-
"eval_samples_per_second":
|
47 |
-
"eval_steps_per_second": 0.
|
48 |
"step": 4
|
49 |
},
|
50 |
{
|
51 |
"epoch": 5.0,
|
52 |
-
"grad_norm":
|
53 |
"learning_rate": 5.8682408883346535e-06,
|
54 |
-
"loss":
|
55 |
"step": 5
|
56 |
},
|
57 |
{
|
58 |
"epoch": 5.0,
|
59 |
-
"eval_loss": 0.
|
60 |
-
"eval_runtime": 2.
|
61 |
-
"eval_samples_per_second":
|
62 |
"eval_steps_per_second": 0.372,
|
63 |
"step": 5
|
64 |
},
|
65 |
{
|
66 |
"epoch": 6.0,
|
67 |
-
"eval_loss": 0.
|
68 |
-
"eval_runtime": 2.
|
69 |
-
"eval_samples_per_second":
|
70 |
-
"eval_steps_per_second": 0.
|
71 |
"step": 6
|
72 |
},
|
73 |
{
|
74 |
"epoch": 7.0,
|
75 |
-
"eval_loss": 0.
|
76 |
-
"eval_runtime": 2.
|
77 |
-
"eval_samples_per_second":
|
78 |
"eval_steps_per_second": 0.374,
|
79 |
"step": 7
|
80 |
},
|
81 |
{
|
82 |
"epoch": 8.0,
|
83 |
-
"eval_loss": 0.
|
84 |
-
"eval_runtime": 2.
|
85 |
-
"eval_samples_per_second":
|
86 |
-
"eval_steps_per_second": 0.
|
87 |
"step": 8
|
88 |
},
|
89 |
{
|
90 |
"epoch": 9.0,
|
91 |
-
"eval_loss": 0.
|
92 |
-
"eval_runtime": 2.
|
93 |
-
"eval_samples_per_second":
|
94 |
"eval_steps_per_second": 0.374,
|
95 |
"step": 9
|
96 |
},
|
97 |
{
|
98 |
"epoch": 10.0,
|
99 |
-
"grad_norm":
|
100 |
"learning_rate": 0.0,
|
101 |
-
"loss": 0.
|
102 |
"step": 10
|
103 |
},
|
104 |
{
|
105 |
"epoch": 10.0,
|
106 |
-
"eval_loss": 0.
|
107 |
-
"eval_runtime": 2.
|
108 |
-
"eval_samples_per_second":
|
109 |
-
"eval_steps_per_second": 0.
|
110 |
"step": 10
|
111 |
},
|
112 |
{
|
113 |
"epoch": 10.0,
|
114 |
"step": 10,
|
115 |
"total_flos": 8375186227200.0,
|
116 |
-
"train_loss": 0.
|
117 |
-
"train_runtime":
|
118 |
-
"train_samples_per_second":
|
119 |
"train_steps_per_second": 0.032
|
120 |
}
|
121 |
],
|
|
|
10 |
"log_history": [
|
11 |
{
|
12 |
"epoch": 1.0,
|
13 |
+
"grad_norm": 25.3735801083628,
|
14 |
"learning_rate": 1e-05,
|
15 |
+
"loss": 1.0647,
|
16 |
"step": 1
|
17 |
},
|
18 |
{
|
19 |
"epoch": 1.0,
|
20 |
+
"eval_loss": 1.061476469039917,
|
21 |
+
"eval_runtime": 2.8031,
|
22 |
+
"eval_samples_per_second": 6.778,
|
23 |
"eval_steps_per_second": 0.357,
|
24 |
"step": 1
|
25 |
},
|
26 |
{
|
27 |
"epoch": 2.0,
|
28 |
+
"eval_loss": 1.1875081062316895,
|
29 |
+
"eval_runtime": 2.6772,
|
30 |
+
"eval_samples_per_second": 7.097,
|
31 |
+
"eval_steps_per_second": 0.374,
|
32 |
"step": 2
|
33 |
},
|
34 |
{
|
35 |
"epoch": 3.0,
|
36 |
+
"eval_loss": 0.985565185546875,
|
37 |
+
"eval_runtime": 2.6682,
|
38 |
+
"eval_samples_per_second": 7.121,
|
39 |
+
"eval_steps_per_second": 0.375,
|
40 |
"step": 3
|
41 |
},
|
42 |
{
|
43 |
"epoch": 4.0,
|
44 |
+
"eval_loss": 0.6195911169052124,
|
45 |
+
"eval_runtime": 2.6829,
|
46 |
+
"eval_samples_per_second": 7.082,
|
47 |
+
"eval_steps_per_second": 0.373,
|
48 |
"step": 4
|
49 |
},
|
50 |
{
|
51 |
"epoch": 5.0,
|
52 |
+
"grad_norm": 13.767739837044488,
|
53 |
"learning_rate": 5.8682408883346535e-06,
|
54 |
+
"loss": 0.969,
|
55 |
"step": 5
|
56 |
},
|
57 |
{
|
58 |
"epoch": 5.0,
|
59 |
+
"eval_loss": 0.4429333806037903,
|
60 |
+
"eval_runtime": 2.6868,
|
61 |
+
"eval_samples_per_second": 7.072,
|
62 |
"eval_steps_per_second": 0.372,
|
63 |
"step": 5
|
64 |
},
|
65 |
{
|
66 |
"epoch": 6.0,
|
67 |
+
"eval_loss": 0.3589998185634613,
|
68 |
+
"eval_runtime": 2.6805,
|
69 |
+
"eval_samples_per_second": 7.088,
|
70 |
+
"eval_steps_per_second": 0.373,
|
71 |
"step": 6
|
72 |
},
|
73 |
{
|
74 |
"epoch": 7.0,
|
75 |
+
"eval_loss": 0.24026596546173096,
|
76 |
+
"eval_runtime": 2.671,
|
77 |
+
"eval_samples_per_second": 7.113,
|
78 |
"eval_steps_per_second": 0.374,
|
79 |
"step": 7
|
80 |
},
|
81 |
{
|
82 |
"epoch": 8.0,
|
83 |
+
"eval_loss": 0.18603171408176422,
|
84 |
+
"eval_runtime": 2.7105,
|
85 |
+
"eval_samples_per_second": 7.01,
|
86 |
+
"eval_steps_per_second": 0.369,
|
87 |
"step": 8
|
88 |
},
|
89 |
{
|
90 |
"epoch": 9.0,
|
91 |
+
"eval_loss": 0.15581761300563812,
|
92 |
+
"eval_runtime": 2.6748,
|
93 |
+
"eval_samples_per_second": 7.103,
|
94 |
"eval_steps_per_second": 0.374,
|
95 |
"step": 9
|
96 |
},
|
97 |
{
|
98 |
"epoch": 10.0,
|
99 |
+
"grad_norm": 2.4836614664887184,
|
100 |
"learning_rate": 0.0,
|
101 |
+
"loss": 0.2782,
|
102 |
"step": 10
|
103 |
},
|
104 |
{
|
105 |
"epoch": 10.0,
|
106 |
+
"eval_loss": 0.14684411883354187,
|
107 |
+
"eval_runtime": 2.6767,
|
108 |
+
"eval_samples_per_second": 7.098,
|
109 |
+
"eval_steps_per_second": 0.374,
|
110 |
"step": 10
|
111 |
},
|
112 |
{
|
113 |
"epoch": 10.0,
|
114 |
"step": 10,
|
115 |
"total_flos": 8375186227200.0,
|
116 |
+
"train_loss": 0.6331865787506104,
|
117 |
+
"train_runtime": 307.8989,
|
118 |
+
"train_samples_per_second": 0.617,
|
119 |
"train_steps_per_second": 0.032
|
120 |
}
|
121 |
],
|