cutelemonlili commited on
Commit
89ffc00
·
verified ·
1 Parent(s): a467795

Add files using upload-large-folder tool

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ license: other
4
+ base_model: Qwen/Qwen2.5-14B-Instruct
5
+ tags:
6
+ - llama-factory
7
+ - lora
8
+ - generated_from_trainer
9
+ model-index:
10
+ - name: omni_training_less_than_5
11
+ results: []
12
+ ---
13
+
14
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
+ should probably proofread and complete it, then remove this comment. -->
16
+
17
+ # omni_training_less_than_5
18
+
19
+ This model is a fine-tuned version of [Qwen/Qwen2.5-14B-Instruct](https://huggingface.co/Qwen/Qwen2.5-14B-Instruct) on the omni_training_less_than_5 dataset.
20
+ It achieves the following results on the evaluation set:
21
+ - Loss: 0.7308
22
+
23
+ ## Model description
24
+
25
+ More information needed
26
+
27
+ ## Intended uses & limitations
28
+
29
+ More information needed
30
+
31
+ ## Training and evaluation data
32
+
33
+ More information needed
34
+
35
+ ## Training procedure
36
+
37
+ ### Training hyperparameters
38
+
39
+ The following hyperparameters were used during training:
40
+ - learning_rate: 0.0001
41
+ - train_batch_size: 2
42
+ - eval_batch_size: 1
43
+ - seed: 42
44
+ - distributed_type: multi-GPU
45
+ - num_devices: 4
46
+ - total_train_batch_size: 8
47
+ - total_eval_batch_size: 4
48
+ - optimizer: Use adamw_torch with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
49
+ - lr_scheduler_type: cosine
50
+ - lr_scheduler_warmup_ratio: 0.1
51
+ - num_epochs: 2.0
52
+
53
+ ### Training results
54
+
55
+
56
+
57
+ ### Framework versions
58
+
59
+ - PEFT 0.12.0
60
+ - Transformers 4.46.1
61
+ - Pytorch 2.5.1+cu124
62
+ - Datasets 3.1.0
63
+ - Tokenizers 0.20.3
adapter_config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "Qwen/Qwen2.5-14B-Instruct",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layer_replication": null,
10
+ "layers_pattern": null,
11
+ "layers_to_transform": null,
12
+ "loftq_config": {},
13
+ "lora_alpha": 16,
14
+ "lora_dropout": 0.0,
15
+ "megatron_config": null,
16
+ "megatron_core": "megatron.core",
17
+ "modules_to_save": null,
18
+ "peft_type": "LORA",
19
+ "r": 8,
20
+ "rank_pattern": {},
21
+ "revision": null,
22
+ "target_modules": [
23
+ "down_proj",
24
+ "gate_proj",
25
+ "q_proj",
26
+ "up_proj",
27
+ "k_proj",
28
+ "o_proj",
29
+ "v_proj"
30
+ ],
31
+ "task_type": "CAUSAL_LM",
32
+ "use_dora": false,
33
+ "use_rslora": false
34
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a16e04a6a4be274ef21044caf2572f11049dc7fc5e3aaab52509e8c825f7a979
3
+ size 68902296
added_tokens.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</tool_call>": 151658,
3
+ "<tool_call>": 151657,
4
+ "<|box_end|>": 151649,
5
+ "<|box_start|>": 151648,
6
+ "<|endoftext|>": 151643,
7
+ "<|file_sep|>": 151664,
8
+ "<|fim_middle|>": 151660,
9
+ "<|fim_pad|>": 151662,
10
+ "<|fim_prefix|>": 151659,
11
+ "<|fim_suffix|>": 151661,
12
+ "<|im_end|>": 151645,
13
+ "<|im_start|>": 151644,
14
+ "<|image_pad|>": 151655,
15
+ "<|object_ref_end|>": 151647,
16
+ "<|object_ref_start|>": 151646,
17
+ "<|quad_end|>": 151651,
18
+ "<|quad_start|>": 151650,
19
+ "<|repo_name|>": 151663,
20
+ "<|video_pad|>": 151656,
21
+ "<|vision_end|>": 151653,
22
+ "<|vision_pad|>": 151654,
23
+ "<|vision_start|>": 151652
24
+ }
all_results.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.0,
3
+ "eval_loss": 0.7307779788970947,
4
+ "eval_runtime": 1.5534,
5
+ "eval_samples_per_second": 1.288,
6
+ "eval_steps_per_second": 0.644,
7
+ "total_flos": 167148584435712.0,
8
+ "train_loss": 0.7704400447281924,
9
+ "train_runtime": 273.3504,
10
+ "train_samples_per_second": 1.266,
11
+ "train_steps_per_second": 0.161
12
+ }
eval_results.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.0,
3
+ "eval_loss": 0.7307779788970947,
4
+ "eval_runtime": 1.5534,
5
+ "eval_samples_per_second": 1.288,
6
+ "eval_steps_per_second": 0.644
7
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
special_tokens_map.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|object_ref_start|>",
6
+ "<|object_ref_end|>",
7
+ "<|box_start|>",
8
+ "<|box_end|>",
9
+ "<|quad_start|>",
10
+ "<|quad_end|>",
11
+ "<|vision_start|>",
12
+ "<|vision_end|>",
13
+ "<|vision_pad|>",
14
+ "<|image_pad|>",
15
+ "<|video_pad|>"
16
+ ],
17
+ "eos_token": {
18
+ "content": "<|im_end|>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ "pad_token": {
25
+ "content": "<|endoftext|>",
26
+ "lstrip": false,
27
+ "normalized": false,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ }
31
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c5ae00e602b8860cbd784ba82a8aa14e8feecec692e7076590d014d7b7fdafa
3
+ size 11421896
tokenizer_config.json ADDED
@@ -0,0 +1,208 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "151643": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "151644": {
14
+ "content": "<|im_start|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "151645": {
22
+ "content": "<|im_end|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "151646": {
30
+ "content": "<|object_ref_start|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "151647": {
38
+ "content": "<|object_ref_end|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "151648": {
46
+ "content": "<|box_start|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "151649": {
54
+ "content": "<|box_end|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "151650": {
62
+ "content": "<|quad_start|>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "151651": {
70
+ "content": "<|quad_end|>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "151652": {
78
+ "content": "<|vision_start|>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "151653": {
86
+ "content": "<|vision_end|>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "151654": {
94
+ "content": "<|vision_pad|>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "151655": {
102
+ "content": "<|image_pad|>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "151656": {
110
+ "content": "<|video_pad|>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false,
115
+ "special": true
116
+ },
117
+ "151657": {
118
+ "content": "<tool_call>",
119
+ "lstrip": false,
120
+ "normalized": false,
121
+ "rstrip": false,
122
+ "single_word": false,
123
+ "special": false
124
+ },
125
+ "151658": {
126
+ "content": "</tool_call>",
127
+ "lstrip": false,
128
+ "normalized": false,
129
+ "rstrip": false,
130
+ "single_word": false,
131
+ "special": false
132
+ },
133
+ "151659": {
134
+ "content": "<|fim_prefix|>",
135
+ "lstrip": false,
136
+ "normalized": false,
137
+ "rstrip": false,
138
+ "single_word": false,
139
+ "special": false
140
+ },
141
+ "151660": {
142
+ "content": "<|fim_middle|>",
143
+ "lstrip": false,
144
+ "normalized": false,
145
+ "rstrip": false,
146
+ "single_word": false,
147
+ "special": false
148
+ },
149
+ "151661": {
150
+ "content": "<|fim_suffix|>",
151
+ "lstrip": false,
152
+ "normalized": false,
153
+ "rstrip": false,
154
+ "single_word": false,
155
+ "special": false
156
+ },
157
+ "151662": {
158
+ "content": "<|fim_pad|>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false,
163
+ "special": false
164
+ },
165
+ "151663": {
166
+ "content": "<|repo_name|>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false,
171
+ "special": false
172
+ },
173
+ "151664": {
174
+ "content": "<|file_sep|>",
175
+ "lstrip": false,
176
+ "normalized": false,
177
+ "rstrip": false,
178
+ "single_word": false,
179
+ "special": false
180
+ }
181
+ },
182
+ "additional_special_tokens": [
183
+ "<|im_start|>",
184
+ "<|im_end|>",
185
+ "<|object_ref_start|>",
186
+ "<|object_ref_end|>",
187
+ "<|box_start|>",
188
+ "<|box_end|>",
189
+ "<|quad_start|>",
190
+ "<|quad_end|>",
191
+ "<|vision_start|>",
192
+ "<|vision_end|>",
193
+ "<|vision_pad|>",
194
+ "<|image_pad|>",
195
+ "<|video_pad|>"
196
+ ],
197
+ "bos_token": null,
198
+ "chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are Qwen, created by Alibaba Cloud. You are a helpful assistant.' }}\n {%- endif %}\n {{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\"name\\\": <function-name>, \\\"arguments\\\": <args-json-object>}\\n</tool_call><|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n<tool_call>\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- message.content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n",
199
+ "clean_up_tokenization_spaces": false,
200
+ "eos_token": "<|im_end|>",
201
+ "errors": "replace",
202
+ "model_max_length": 131072,
203
+ "pad_token": "<|endoftext|>",
204
+ "padding_side": "right",
205
+ "split_special_tokens": false,
206
+ "tokenizer_class": "Qwen2Tokenizer",
207
+ "unk_token": null
208
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.0,
3
+ "total_flos": 167148584435712.0,
4
+ "train_loss": 0.7704400447281924,
5
+ "train_runtime": 273.3504,
6
+ "train_samples_per_second": 1.266,
7
+ "train_steps_per_second": 0.161
8
+ }
trainer_log.jsonl ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"current_steps": 1, "total_steps": 44, "loss": 0.8818, "lr": 2e-05, "epoch": 0.045454545454545456, "percentage": 2.27, "elapsed_time": "0:00:08", "remaining_time": "0:05:57"}
2
+ {"current_steps": 2, "total_steps": 44, "loss": 0.876, "lr": 4e-05, "epoch": 0.09090909090909091, "percentage": 4.55, "elapsed_time": "0:00:18", "remaining_time": "0:06:31"}
3
+ {"current_steps": 3, "total_steps": 44, "loss": 1.2233, "lr": 6e-05, "epoch": 0.13636363636363635, "percentage": 6.82, "elapsed_time": "0:00:24", "remaining_time": "0:05:31"}
4
+ {"current_steps": 4, "total_steps": 44, "loss": 1.019, "lr": 8e-05, "epoch": 0.18181818181818182, "percentage": 9.09, "elapsed_time": "0:00:29", "remaining_time": "0:04:57"}
5
+ {"current_steps": 5, "total_steps": 44, "loss": 0.7696, "lr": 0.0001, "epoch": 0.22727272727272727, "percentage": 11.36, "elapsed_time": "0:00:35", "remaining_time": "0:04:35"}
6
+ {"current_steps": 6, "total_steps": 44, "loss": 0.9575, "lr": 9.983786540671051e-05, "epoch": 0.2727272727272727, "percentage": 13.64, "elapsed_time": "0:00:40", "remaining_time": "0:04:19"}
7
+ {"current_steps": 7, "total_steps": 44, "loss": 0.8849, "lr": 9.935251313189564e-05, "epoch": 0.3181818181818182, "percentage": 15.91, "elapsed_time": "0:00:46", "remaining_time": "0:04:06"}
8
+ {"current_steps": 8, "total_steps": 44, "loss": 0.8554, "lr": 9.85470908713026e-05, "epoch": 0.36363636363636365, "percentage": 18.18, "elapsed_time": "0:00:52", "remaining_time": "0:03:54"}
9
+ {"current_steps": 9, "total_steps": 44, "loss": 0.7842, "lr": 9.742682209735727e-05, "epoch": 0.4090909090909091, "percentage": 20.45, "elapsed_time": "0:00:57", "remaining_time": "0:03:44"}
10
+ {"current_steps": 10, "total_steps": 44, "loss": 0.7777, "lr": 9.599897218294122e-05, "epoch": 0.45454545454545453, "percentage": 22.73, "elapsed_time": "0:01:03", "remaining_time": "0:03:35"}
11
+ {"current_steps": 11, "total_steps": 44, "loss": 0.7611, "lr": 9.42728012826605e-05, "epoch": 0.5, "percentage": 25.0, "elapsed_time": "0:01:08", "remaining_time": "0:03:26"}
12
+ {"current_steps": 12, "total_steps": 44, "loss": 0.7816, "lr": 9.225950427718975e-05, "epoch": 0.5454545454545454, "percentage": 27.27, "elapsed_time": "0:01:14", "remaining_time": "0:03:18"}
13
+ {"current_steps": 13, "total_steps": 44, "loss": 0.7748, "lr": 8.997213817017507e-05, "epoch": 0.5909090909090909, "percentage": 29.55, "elapsed_time": "0:01:20", "remaining_time": "0:03:11"}
14
+ {"current_steps": 14, "total_steps": 44, "loss": 0.7604, "lr": 8.742553740855506e-05, "epoch": 0.6363636363636364, "percentage": 31.82, "elapsed_time": "0:01:25", "remaining_time": "0:03:03"}
15
+ {"current_steps": 15, "total_steps": 44, "loss": 0.7948, "lr": 8.463621767547998e-05, "epoch": 0.6818181818181818, "percentage": 34.09, "elapsed_time": "0:01:31", "remaining_time": "0:02:56"}
16
+ {"current_steps": 16, "total_steps": 44, "loss": 0.7387, "lr": 8.162226877976887e-05, "epoch": 0.7272727272727273, "percentage": 36.36, "elapsed_time": "0:01:37", "remaining_time": "0:02:49"}
17
+ {"current_steps": 17, "total_steps": 44, "loss": 0.8671, "lr": 7.840323733655778e-05, "epoch": 0.7727272727272727, "percentage": 38.64, "elapsed_time": "0:01:42", "remaining_time": "0:02:42"}
18
+ {"current_steps": 18, "total_steps": 44, "loss": 0.6836, "lr": 7.500000000000001e-05, "epoch": 0.8181818181818182, "percentage": 40.91, "elapsed_time": "0:01:48", "remaining_time": "0:02:36"}
19
+ {"current_steps": 19, "total_steps": 44, "loss": 0.7574, "lr": 7.143462807015271e-05, "epoch": 0.8636363636363636, "percentage": 43.18, "elapsed_time": "0:01:53", "remaining_time": "0:02:29"}
20
+ {"current_steps": 20, "total_steps": 44, "loss": 0.6944, "lr": 6.773024435212678e-05, "epoch": 0.9090909090909091, "percentage": 45.45, "elapsed_time": "0:01:59", "remaining_time": "0:02:23"}
21
+ {"current_steps": 21, "total_steps": 44, "loss": 0.7105, "lr": 6.391087319582264e-05, "epoch": 0.9545454545454546, "percentage": 47.73, "elapsed_time": "0:02:05", "remaining_time": "0:02:17"}
22
+ {"current_steps": 22, "total_steps": 44, "loss": 0.7271, "lr": 6.0001284688802226e-05, "epoch": 1.0, "percentage": 50.0, "elapsed_time": "0:02:10", "remaining_time": "0:02:10"}
23
+ {"current_steps": 23, "total_steps": 44, "loss": 0.7422, "lr": 5.602683401276615e-05, "epoch": 1.0454545454545454, "percentage": 52.27, "elapsed_time": "0:02:16", "remaining_time": "0:02:04"}
24
+ {"current_steps": 24, "total_steps": 44, "loss": 0.7493, "lr": 5.201329700547076e-05, "epoch": 1.0909090909090908, "percentage": 54.55, "elapsed_time": "0:02:22", "remaining_time": "0:01:58"}
25
+ {"current_steps": 25, "total_steps": 44, "loss": 0.7524, "lr": 4.798670299452926e-05, "epoch": 1.1363636363636362, "percentage": 56.82, "elapsed_time": "0:02:27", "remaining_time": "0:01:52"}
26
+ {"current_steps": 26, "total_steps": 44, "loss": 0.7569, "lr": 4.397316598723385e-05, "epoch": 1.1818181818181819, "percentage": 59.09, "elapsed_time": "0:02:33", "remaining_time": "0:01:46"}
27
+ {"current_steps": 27, "total_steps": 44, "loss": 0.74, "lr": 3.9998715311197785e-05, "epoch": 1.2272727272727273, "percentage": 61.36, "elapsed_time": "0:02:38", "remaining_time": "0:01:40"}
28
+ {"current_steps": 28, "total_steps": 44, "loss": 0.7329, "lr": 3.608912680417737e-05, "epoch": 1.2727272727272727, "percentage": 63.64, "elapsed_time": "0:02:44", "remaining_time": "0:01:34"}
29
+ {"current_steps": 29, "total_steps": 44, "loss": 0.6354, "lr": 3.226975564787322e-05, "epoch": 1.3181818181818181, "percentage": 65.91, "elapsed_time": "0:02:50", "remaining_time": "0:01:28"}
30
+ {"current_steps": 30, "total_steps": 44, "loss": 0.686, "lr": 2.8565371929847284e-05, "epoch": 1.3636363636363638, "percentage": 68.18, "elapsed_time": "0:02:55", "remaining_time": "0:01:22"}
31
+ {"current_steps": 31, "total_steps": 44, "loss": 0.7447, "lr": 2.500000000000001e-05, "epoch": 1.4090909090909092, "percentage": 70.45, "elapsed_time": "0:03:01", "remaining_time": "0:01:16"}
32
+ {"current_steps": 32, "total_steps": 44, "loss": 0.679, "lr": 2.1596762663442218e-05, "epoch": 1.4545454545454546, "percentage": 72.73, "elapsed_time": "0:03:06", "remaining_time": "0:01:10"}
33
+ {"current_steps": 33, "total_steps": 44, "loss": 0.716, "lr": 1.837773122023114e-05, "epoch": 1.5, "percentage": 75.0, "elapsed_time": "0:03:12", "remaining_time": "0:01:04"}
34
+ {"current_steps": 34, "total_steps": 44, "loss": 0.7334, "lr": 1.536378232452003e-05, "epoch": 1.5454545454545454, "percentage": 77.27, "elapsed_time": "0:03:18", "remaining_time": "0:00:58"}
35
+ {"current_steps": 35, "total_steps": 44, "loss": 0.684, "lr": 1.257446259144494e-05, "epoch": 1.5909090909090908, "percentage": 79.55, "elapsed_time": "0:03:23", "remaining_time": "0:00:52"}
36
+ {"current_steps": 36, "total_steps": 44, "loss": 0.7261, "lr": 1.0027861829824952e-05, "epoch": 1.6363636363636362, "percentage": 81.82, "elapsed_time": "0:03:29", "remaining_time": "0:00:46"}
37
+ {"current_steps": 37, "total_steps": 44, "loss": 0.7938, "lr": 7.740495722810271e-06, "epoch": 1.6818181818181817, "percentage": 84.09, "elapsed_time": "0:03:34", "remaining_time": "0:00:40"}
38
+ {"current_steps": 38, "total_steps": 44, "loss": 0.7025, "lr": 5.727198717339511e-06, "epoch": 1.7272727272727273, "percentage": 86.36, "elapsed_time": "0:03:40", "remaining_time": "0:00:34"}
39
+ {"current_steps": 39, "total_steps": 44, "loss": 0.7592, "lr": 4.001027817058789e-06, "epoch": 1.7727272727272727, "percentage": 88.64, "elapsed_time": "0:03:46", "remaining_time": "0:00:28"}
40
+ {"current_steps": 40, "total_steps": 44, "loss": 0.7231, "lr": 2.573177902642726e-06, "epoch": 1.8181818181818183, "percentage": 90.91, "elapsed_time": "0:03:51", "remaining_time": "0:00:23"}
41
+ {"current_steps": 41, "total_steps": 44, "loss": 0.6872, "lr": 1.4529091286973995e-06, "epoch": 1.8636363636363638, "percentage": 93.18, "elapsed_time": "0:03:57", "remaining_time": "0:00:17"}
42
+ {"current_steps": 42, "total_steps": 44, "loss": 0.696, "lr": 6.474868681043578e-07, "epoch": 1.9090909090909092, "percentage": 95.45, "elapsed_time": "0:04:03", "remaining_time": "0:00:11"}
43
+ {"current_steps": 43, "total_steps": 44, "loss": 0.6727, "lr": 1.6213459328950352e-07, "epoch": 1.9545454545454546, "percentage": 97.73, "elapsed_time": "0:04:08", "remaining_time": "0:00:05"}
44
+ {"current_steps": 44, "total_steps": 44, "loss": 0.7056, "lr": 0.0, "epoch": 2.0, "percentage": 100.0, "elapsed_time": "0:04:14", "remaining_time": "0:00:00"}
45
+ {"current_steps": 44, "total_steps": 44, "epoch": 2.0, "percentage": 100.0, "elapsed_time": "0:04:33", "remaining_time": "0:00:00"}
trainer_state.json ADDED
@@ -0,0 +1,350 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 2.0,
5
+ "eval_steps": 200,
6
+ "global_step": 44,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.045454545454545456,
13
+ "grad_norm": 0.20162349585089198,
14
+ "learning_rate": 2e-05,
15
+ "loss": 0.8818,
16
+ "step": 1
17
+ },
18
+ {
19
+ "epoch": 0.09090909090909091,
20
+ "grad_norm": 0.1953500581220745,
21
+ "learning_rate": 4e-05,
22
+ "loss": 0.876,
23
+ "step": 2
24
+ },
25
+ {
26
+ "epoch": 0.13636363636363635,
27
+ "grad_norm": 0.5228028889794835,
28
+ "learning_rate": 6e-05,
29
+ "loss": 1.2233,
30
+ "step": 3
31
+ },
32
+ {
33
+ "epoch": 0.18181818181818182,
34
+ "grad_norm": 0.3168539123432193,
35
+ "learning_rate": 8e-05,
36
+ "loss": 1.019,
37
+ "step": 4
38
+ },
39
+ {
40
+ "epoch": 0.22727272727272727,
41
+ "grad_norm": 0.2448592330495161,
42
+ "learning_rate": 0.0001,
43
+ "loss": 0.7696,
44
+ "step": 5
45
+ },
46
+ {
47
+ "epoch": 0.2727272727272727,
48
+ "grad_norm": 0.5022417968409909,
49
+ "learning_rate": 9.983786540671051e-05,
50
+ "loss": 0.9575,
51
+ "step": 6
52
+ },
53
+ {
54
+ "epoch": 0.3181818181818182,
55
+ "grad_norm": 0.3690185930765724,
56
+ "learning_rate": 9.935251313189564e-05,
57
+ "loss": 0.8849,
58
+ "step": 7
59
+ },
60
+ {
61
+ "epoch": 0.36363636363636365,
62
+ "grad_norm": 0.3481259765917461,
63
+ "learning_rate": 9.85470908713026e-05,
64
+ "loss": 0.8554,
65
+ "step": 8
66
+ },
67
+ {
68
+ "epoch": 0.4090909090909091,
69
+ "grad_norm": 0.2491302807616705,
70
+ "learning_rate": 9.742682209735727e-05,
71
+ "loss": 0.7842,
72
+ "step": 9
73
+ },
74
+ {
75
+ "epoch": 0.45454545454545453,
76
+ "grad_norm": 0.2553944608600153,
77
+ "learning_rate": 9.599897218294122e-05,
78
+ "loss": 0.7777,
79
+ "step": 10
80
+ },
81
+ {
82
+ "epoch": 0.5,
83
+ "grad_norm": 0.16006256910442612,
84
+ "learning_rate": 9.42728012826605e-05,
85
+ "loss": 0.7611,
86
+ "step": 11
87
+ },
88
+ {
89
+ "epoch": 0.5454545454545454,
90
+ "grad_norm": 0.15558254774359165,
91
+ "learning_rate": 9.225950427718975e-05,
92
+ "loss": 0.7816,
93
+ "step": 12
94
+ },
95
+ {
96
+ "epoch": 0.5909090909090909,
97
+ "grad_norm": 0.18832677655086638,
98
+ "learning_rate": 8.997213817017507e-05,
99
+ "loss": 0.7748,
100
+ "step": 13
101
+ },
102
+ {
103
+ "epoch": 0.6363636363636364,
104
+ "grad_norm": 0.09681390727181982,
105
+ "learning_rate": 8.742553740855506e-05,
106
+ "loss": 0.7604,
107
+ "step": 14
108
+ },
109
+ {
110
+ "epoch": 0.6818181818181818,
111
+ "grad_norm": 0.07979864307427108,
112
+ "learning_rate": 8.463621767547998e-05,
113
+ "loss": 0.7948,
114
+ "step": 15
115
+ },
116
+ {
117
+ "epoch": 0.7272727272727273,
118
+ "grad_norm": 0.0751068874132372,
119
+ "learning_rate": 8.162226877976887e-05,
120
+ "loss": 0.7387,
121
+ "step": 16
122
+ },
123
+ {
124
+ "epoch": 0.7727272727272727,
125
+ "grad_norm": 0.09818843566208878,
126
+ "learning_rate": 7.840323733655778e-05,
127
+ "loss": 0.8671,
128
+ "step": 17
129
+ },
130
+ {
131
+ "epoch": 0.8181818181818182,
132
+ "grad_norm": 0.12433711342571083,
133
+ "learning_rate": 7.500000000000001e-05,
134
+ "loss": 0.6836,
135
+ "step": 18
136
+ },
137
+ {
138
+ "epoch": 0.8636363636363636,
139
+ "grad_norm": 0.09336807665626587,
140
+ "learning_rate": 7.143462807015271e-05,
141
+ "loss": 0.7574,
142
+ "step": 19
143
+ },
144
+ {
145
+ "epoch": 0.9090909090909091,
146
+ "grad_norm": 0.10857087455570018,
147
+ "learning_rate": 6.773024435212678e-05,
148
+ "loss": 0.6944,
149
+ "step": 20
150
+ },
151
+ {
152
+ "epoch": 0.9545454545454546,
153
+ "grad_norm": 0.09987180851168877,
154
+ "learning_rate": 6.391087319582264e-05,
155
+ "loss": 0.7105,
156
+ "step": 21
157
+ },
158
+ {
159
+ "epoch": 1.0,
160
+ "grad_norm": 0.12403936967122979,
161
+ "learning_rate": 6.0001284688802226e-05,
162
+ "loss": 0.7271,
163
+ "step": 22
164
+ },
165
+ {
166
+ "epoch": 1.0454545454545454,
167
+ "grad_norm": 0.11169323633585997,
168
+ "learning_rate": 5.602683401276615e-05,
169
+ "loss": 0.7422,
170
+ "step": 23
171
+ },
172
+ {
173
+ "epoch": 1.0909090909090908,
174
+ "grad_norm": 0.08664401280752744,
175
+ "learning_rate": 5.201329700547076e-05,
176
+ "loss": 0.7493,
177
+ "step": 24
178
+ },
179
+ {
180
+ "epoch": 1.1363636363636362,
181
+ "grad_norm": 0.08676810328328194,
182
+ "learning_rate": 4.798670299452926e-05,
183
+ "loss": 0.7524,
184
+ "step": 25
185
+ },
186
+ {
187
+ "epoch": 1.1818181818181819,
188
+ "grad_norm": 0.08590933006332456,
189
+ "learning_rate": 4.397316598723385e-05,
190
+ "loss": 0.7569,
191
+ "step": 26
192
+ },
193
+ {
194
+ "epoch": 1.2272727272727273,
195
+ "grad_norm": 0.08858606445370859,
196
+ "learning_rate": 3.9998715311197785e-05,
197
+ "loss": 0.74,
198
+ "step": 27
199
+ },
200
+ {
201
+ "epoch": 1.2727272727272727,
202
+ "grad_norm": 0.07666247259553818,
203
+ "learning_rate": 3.608912680417737e-05,
204
+ "loss": 0.7329,
205
+ "step": 28
206
+ },
207
+ {
208
+ "epoch": 1.3181818181818181,
209
+ "grad_norm": 0.07618604122498249,
210
+ "learning_rate": 3.226975564787322e-05,
211
+ "loss": 0.6354,
212
+ "step": 29
213
+ },
214
+ {
215
+ "epoch": 1.3636363636363638,
216
+ "grad_norm": 0.08140024777764204,
217
+ "learning_rate": 2.8565371929847284e-05,
218
+ "loss": 0.686,
219
+ "step": 30
220
+ },
221
+ {
222
+ "epoch": 1.4090909090909092,
223
+ "grad_norm": 0.08978876050933346,
224
+ "learning_rate": 2.500000000000001e-05,
225
+ "loss": 0.7447,
226
+ "step": 31
227
+ },
228
+ {
229
+ "epoch": 1.4545454545454546,
230
+ "grad_norm": 0.0716318283002434,
231
+ "learning_rate": 2.1596762663442218e-05,
232
+ "loss": 0.679,
233
+ "step": 32
234
+ },
235
+ {
236
+ "epoch": 1.5,
237
+ "grad_norm": 0.10585147220057532,
238
+ "learning_rate": 1.837773122023114e-05,
239
+ "loss": 0.716,
240
+ "step": 33
241
+ },
242
+ {
243
+ "epoch": 1.5454545454545454,
244
+ "grad_norm": 0.08349343850144146,
245
+ "learning_rate": 1.536378232452003e-05,
246
+ "loss": 0.7334,
247
+ "step": 34
248
+ },
249
+ {
250
+ "epoch": 1.5909090909090908,
251
+ "grad_norm": 0.07246804816219393,
252
+ "learning_rate": 1.257446259144494e-05,
253
+ "loss": 0.684,
254
+ "step": 35
255
+ },
256
+ {
257
+ "epoch": 1.6363636363636362,
258
+ "grad_norm": 0.08656458135317226,
259
+ "learning_rate": 1.0027861829824952e-05,
260
+ "loss": 0.7261,
261
+ "step": 36
262
+ },
263
+ {
264
+ "epoch": 1.6818181818181817,
265
+ "grad_norm": 0.07432309009864736,
266
+ "learning_rate": 7.740495722810271e-06,
267
+ "loss": 0.7938,
268
+ "step": 37
269
+ },
270
+ {
271
+ "epoch": 1.7272727272727273,
272
+ "grad_norm": 0.06644380159937287,
273
+ "learning_rate": 5.727198717339511e-06,
274
+ "loss": 0.7025,
275
+ "step": 38
276
+ },
277
+ {
278
+ "epoch": 1.7727272727272727,
279
+ "grad_norm": 0.07589928426253979,
280
+ "learning_rate": 4.001027817058789e-06,
281
+ "loss": 0.7592,
282
+ "step": 39
283
+ },
284
+ {
285
+ "epoch": 1.8181818181818183,
286
+ "grad_norm": 0.1400960157888807,
287
+ "learning_rate": 2.573177902642726e-06,
288
+ "loss": 0.7231,
289
+ "step": 40
290
+ },
291
+ {
292
+ "epoch": 1.8636363636363638,
293
+ "grad_norm": 0.07387468065399092,
294
+ "learning_rate": 1.4529091286973995e-06,
295
+ "loss": 0.6872,
296
+ "step": 41
297
+ },
298
+ {
299
+ "epoch": 1.9090909090909092,
300
+ "grad_norm": 0.0855342727329305,
301
+ "learning_rate": 6.474868681043578e-07,
302
+ "loss": 0.696,
303
+ "step": 42
304
+ },
305
+ {
306
+ "epoch": 1.9545454545454546,
307
+ "grad_norm": 0.0985088722005277,
308
+ "learning_rate": 1.6213459328950352e-07,
309
+ "loss": 0.6727,
310
+ "step": 43
311
+ },
312
+ {
313
+ "epoch": 2.0,
314
+ "grad_norm": 0.07968270308548887,
315
+ "learning_rate": 0.0,
316
+ "loss": 0.7056,
317
+ "step": 44
318
+ },
319
+ {
320
+ "epoch": 2.0,
321
+ "step": 44,
322
+ "total_flos": 167148584435712.0,
323
+ "train_loss": 0.7704400447281924,
324
+ "train_runtime": 273.3504,
325
+ "train_samples_per_second": 1.266,
326
+ "train_steps_per_second": 0.161
327
+ }
328
+ ],
329
+ "logging_steps": 1,
330
+ "max_steps": 44,
331
+ "num_input_tokens_seen": 0,
332
+ "num_train_epochs": 2,
333
+ "save_steps": 500,
334
+ "stateful_callbacks": {
335
+ "TrainerControl": {
336
+ "args": {
337
+ "should_epoch_stop": false,
338
+ "should_evaluate": false,
339
+ "should_log": false,
340
+ "should_save": true,
341
+ "should_training_stop": true
342
+ },
343
+ "attributes": {}
344
+ }
345
+ },
346
+ "total_flos": 167148584435712.0,
347
+ "train_batch_size": 2,
348
+ "trial_name": null,
349
+ "trial_params": null
350
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ecc80db74aacbbe132f654f49994d63c9f8651b65cf58b6dfdd357f0b975c2da
3
+ size 7224
training_loss.png ADDED
vocab.json ADDED
The diff for this file is too large to render. See raw diff