Go4miii commited on
Commit
fa4b296
·
1 Parent(s): 8a1a52a

commit from root

Browse files
Baichuan-13B-Chat-lora-Task/README.md DELETED
@@ -1,9 +0,0 @@
1
- ---
2
- library_name: peft
3
- ---
4
- ## Training procedure
5
-
6
- ### Framework versions
7
-
8
-
9
- - PEFT 0.5.0
 
 
 
 
 
 
 
 
 
 
Baichuan-13B-Chat-lora-Task/adapter_config.json DELETED
@@ -1,20 +0,0 @@
1
- {
2
- "auto_mapping": null,
3
- "base_model_name_or_path": "baichuan-inc/Baichuan-13B-Chat",
4
- "bias": "none",
5
- "fan_in_fan_out": false,
6
- "inference_mode": true,
7
- "init_lora_weights": true,
8
- "layers_pattern": null,
9
- "layers_to_transform": null,
10
- "lora_alpha": 32.0,
11
- "lora_dropout": 0.1,
12
- "modules_to_save": null,
13
- "peft_type": "LORA",
14
- "r": 8,
15
- "revision": null,
16
- "target_modules": [
17
- "W_pack"
18
- ],
19
- "task_type": "CAUSAL_LM"
20
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Baichuan-13B-Chat-lora-Task/adapter_model.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:84594037c53e9a300bbdebbc3534f704f22f429b6b912fe36e32aeec5a928e3a
3
- size 26243422
 
 
 
 
Baichuan-13B-Chat-lora-Task/all_results.json DELETED
@@ -1,11 +0,0 @@
1
- {
2
- "epoch": 2.0,
3
- "eval_loss": 0.41885045170783997,
4
- "eval_runtime": 69.6555,
5
- "eval_samples_per_second": 15.821,
6
- "eval_steps_per_second": 1.594,
7
- "train_loss": 0.46561298847898175,
8
- "train_runtime": 35477.4141,
9
- "train_samples_per_second": 6.149,
10
- "train_steps_per_second": 0.077
11
- }
 
 
 
 
 
 
 
 
 
 
 
 
Baichuan-13B-Chat-lora-Task/eval_results.json DELETED
@@ -1,7 +0,0 @@
1
- {
2
- "epoch": 2.0,
3
- "eval_loss": 0.41885045170783997,
4
- "eval_runtime": 69.6555,
5
- "eval_samples_per_second": 15.821,
6
- "eval_steps_per_second": 1.594
7
- }
 
 
 
 
 
 
 
 
Baichuan-13B-Chat-lora-Task/special_tokens_map.json DELETED
@@ -1,30 +0,0 @@
1
- {
2
- "bos_token": {
3
- "content": "<s>",
4
- "lstrip": false,
5
- "normalized": true,
6
- "rstrip": false,
7
- "single_word": true
8
- },
9
- "eos_token": {
10
- "content": "</s>",
11
- "lstrip": false,
12
- "normalized": true,
13
- "rstrip": false,
14
- "single_word": true
15
- },
16
- "pad_token": {
17
- "content": "<unk>",
18
- "lstrip": false,
19
- "normalized": true,
20
- "rstrip": false,
21
- "single_word": true
22
- },
23
- "unk_token": {
24
- "content": "<unk>",
25
- "lstrip": false,
26
- "normalized": true,
27
- "rstrip": false,
28
- "single_word": true
29
- }
30
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Baichuan-13B-Chat-lora-Task/tokenization_baichuan.py DELETED
@@ -1,232 +0,0 @@
1
- # Copyright (c) 2023, Baichuan Intelligent Technology. All rights reserved.
2
-
3
- import os
4
- from shutil import copyfile
5
- from typing import Any, Dict, List, Optional, Tuple
6
-
7
- import sentencepiece as spm
8
- from transformers.tokenization_utils import AddedToken, PreTrainedTokenizer
9
- from transformers.utils import logging
10
-
11
-
12
- logger = logging.get_logger(__name__)
13
-
14
- VOCAB_FILES_NAMES = {"vocab_file": "tokenizer.model"}
15
-
16
- PRETRAINED_VOCAB_FILES_MAP = {
17
- "vocab_file": {},
18
- "tokenizer_file": {},
19
- }
20
- PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {}
21
-
22
-
23
- class BaichuanTokenizer(PreTrainedTokenizer):
24
- """
25
- Construct a Baichuan tokenizer. Based on byte-level Byte-Pair-Encoding.
26
-
27
- Args:
28
- vocab_file (`str`):
29
- Path to the vocabulary file.
30
- """
31
-
32
- vocab_files_names = VOCAB_FILES_NAMES
33
- pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
34
- max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
35
- model_input_names = ["input_ids", "attention_mask"]
36
-
37
- def __init__(
38
- self,
39
- vocab_file,
40
- unk_token="<unk>",
41
- bos_token="<s>",
42
- eos_token="</s>",
43
- pad_token=None,
44
- sp_model_kwargs: Optional[Dict[str, Any]] = None,
45
- add_bos_token=True,
46
- add_eos_token=False,
47
- clean_up_tokenization_spaces=False,
48
- **kwargs,
49
- ):
50
- self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
51
- bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token
52
- eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token
53
- unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token
54
- pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token
55
- super().__init__(
56
- bos_token=bos_token,
57
- eos_token=eos_token,
58
- unk_token=unk_token,
59
- pad_token=pad_token,
60
- add_bos_token=add_bos_token,
61
- add_eos_token=add_eos_token,
62
- sp_model_kwargs=self.sp_model_kwargs,
63
- clean_up_tokenization_spaces=clean_up_tokenization_spaces,
64
- **kwargs,
65
- )
66
- self.vocab_file = vocab_file
67
- self.add_bos_token = add_bos_token
68
- self.add_eos_token = add_eos_token
69
- self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
70
- self.sp_model.Load(vocab_file)
71
-
72
- def __getstate__(self):
73
- state = self.__dict__.copy()
74
- state["sp_model"] = None
75
- return state
76
-
77
- def __setstate__(self, d):
78
- self.__dict__ = d
79
- self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
80
- self.sp_model.Load(self.vocab_file)
81
-
82
- @property
83
- def vocab_size(self):
84
- """Returns vocab size"""
85
- return self.sp_model.get_piece_size()
86
-
87
- def get_vocab(self):
88
- """Returns vocab as a dict"""
89
- vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
90
- vocab.update(self.added_tokens_encoder)
91
- return vocab
92
-
93
- def _tokenize(self, text):
94
- """Returns a tokenized string."""
95
- return self.sp_model.encode(text, out_type=str)
96
-
97
- def _convert_token_to_id(self, token):
98
- """Converts a token (str) in an id using the vocab."""
99
- return self.sp_model.piece_to_id(token)
100
-
101
- def _convert_id_to_token(self, index):
102
- """Converts an index (integer) in a token (str) using the vocab."""
103
- token = self.sp_model.IdToPiece(index)
104
- return token
105
-
106
- def convert_tokens_to_string(self, tokens):
107
- """Converts a sequence of tokens (string) in a single string."""
108
- current_sub_tokens = []
109
- out_string = ""
110
- prev_is_special = False
111
- for i, token in enumerate(tokens):
112
- # make sure that special tokens are not decoded using sentencepiece model
113
- if token in self.all_special_tokens:
114
- if not prev_is_special and i != 0:
115
- out_string += " "
116
- out_string += self.sp_model.decode(current_sub_tokens) + token
117
- prev_is_special = True
118
- current_sub_tokens = []
119
- else:
120
- current_sub_tokens.append(token)
121
- prev_is_special = False
122
- out_string += self.sp_model.decode(current_sub_tokens)
123
- return out_string
124
-
125
- def save_vocabulary(self, save_directory, filename_prefix: Optional[str] = None) -> Tuple[str]:
126
- """
127
- Save the vocabulary and special tokens file to a directory.
128
-
129
- Args:
130
- save_directory (`str`):
131
- The directory in which to save the vocabulary.
132
-
133
- Returns:
134
- `Tuple(str)`: Paths to the files saved.
135
- """
136
- if not os.path.isdir(save_directory):
137
- logger.error(f"Vocabulary path ({save_directory}) should be a directory")
138
- return
139
- out_vocab_file = os.path.join(
140
- save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
141
- )
142
-
143
- if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
144
- copyfile(self.vocab_file, out_vocab_file)
145
- elif not os.path.isfile(self.vocab_file):
146
- with open(out_vocab_file, "wb") as fi:
147
- content_spiece_model = self.sp_model.serialized_model_proto()
148
- fi.write(content_spiece_model)
149
-
150
- return (out_vocab_file,)
151
-
152
- def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
153
- bos_token_id = [self.bos_token_id] if self.add_bos_token else []
154
- eos_token_id = [self.eos_token_id] if self.add_eos_token else []
155
-
156
- output = bos_token_id + token_ids_0 + eos_token_id
157
-
158
- if token_ids_1 is not None:
159
- output = output + bos_token_id + token_ids_1 + eos_token_id
160
-
161
- return output
162
-
163
- def get_special_tokens_mask(
164
- self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
165
- ) -> List[int]:
166
- """
167
- Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
168
- special tokens using the tokenizer `prepare_for_model` method.
169
-
170
- Args:
171
- token_ids_0 (`List[int]`):
172
- List of IDs.
173
- token_ids_1 (`List[int]`, *optional*):
174
- Optional second list of IDs for sequence pairs.
175
- already_has_special_tokens (`bool`, *optional*, defaults to `False`):
176
- Whether or not the token list is already formatted with special tokens for the model.
177
-
178
- Returns:
179
- `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
180
- """
181
- if already_has_special_tokens:
182
- return super().get_special_tokens_mask(
183
- token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
184
- )
185
-
186
- bos_token_id = [1] if self.add_bos_token else []
187
- eos_token_id = [1] if self.add_eos_token else []
188
-
189
- if token_ids_1 is None:
190
- return bos_token_id + ([0] * len(token_ids_0)) + eos_token_id
191
- return (
192
- bos_token_id
193
- + ([0] * len(token_ids_0))
194
- + eos_token_id
195
- + bos_token_id
196
- + ([0] * len(token_ids_1))
197
- + eos_token_id
198
- )
199
-
200
- def create_token_type_ids_from_sequences(
201
- self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
202
- ) -> List[int]:
203
- """
204
- Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT
205
- sequence pair mask has the following format:
206
-
207
- ```
208
- 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
209
- | first sequence | second sequence |
210
- ```
211
-
212
- if token_ids_1 is None, only returns the first portion of the mask (0s).
213
-
214
- Args:
215
- token_ids_0 (`List[int]`):
216
- List of ids.
217
- token_ids_1 (`List[int]`, *optional*):
218
- Optional second list of IDs for sequence pairs.
219
-
220
- Returns:
221
- `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
222
- """
223
- bos_token_id = [self.bos_token_id] if self.add_bos_token else []
224
- eos_token_id = [self.eos_token_id] if self.add_eos_token else []
225
-
226
- output = [0] * len(bos_token_id + token_ids_0 + eos_token_id)
227
-
228
- if token_ids_1 is not None:
229
- output += [1] * len(bos_token_id + token_ids_1 + eos_token_id)
230
-
231
- return output
232
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Baichuan-13B-Chat-lora-Task/tokenizer.model DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:f7d1ab69d25c74644af5c5e4dcd1cc6e96d33783dbd257b6bdea55b643c72813
3
- size 1136765
 
 
 
 
Baichuan-13B-Chat-lora-Task/tokenizer_config.json DELETED
@@ -1,48 +0,0 @@
1
- {
2
- "add_bos_token": false,
3
- "add_eos_token": false,
4
- "auto_map": {
5
- "AutoTokenizer": [
6
- "tokenization_baichuan.BaichuanTokenizer",
7
- null
8
- ]
9
- },
10
- "bos_token": {
11
- "__type": "AddedToken",
12
- "content": "<s>",
13
- "lstrip": false,
14
- "normalized": true,
15
- "rstrip": false,
16
- "single_word": true
17
- },
18
- "clean_up_tokenization_spaces": false,
19
- "eos_token": {
20
- "__type": "AddedToken",
21
- "content": "</s>",
22
- "lstrip": false,
23
- "normalized": true,
24
- "rstrip": false,
25
- "single_word": true
26
- },
27
- "model_max_length": 4096,
28
- "pad_token": {
29
- "__type": "AddedToken",
30
- "content": "<unk>",
31
- "lstrip": false,
32
- "normalized": true,
33
- "rstrip": false,
34
- "single_word": true
35
- },
36
- "padding_side": "right",
37
- "sp_model_kwargs": {},
38
- "split_special_tokens": false,
39
- "tokenizer_class": "BaichuanTokenizer",
40
- "unk_token": {
41
- "__type": "AddedToken",
42
- "content": "<unk>",
43
- "lstrip": false,
44
- "normalized": true,
45
- "rstrip": false,
46
- "single_word": true
47
- }
48
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Baichuan-13B-Chat-lora-Task/train_results.json DELETED
@@ -1,7 +0,0 @@
1
- {
2
- "epoch": 2.0,
3
- "train_loss": 0.46561298847898175,
4
- "train_runtime": 35477.4141,
5
- "train_samples_per_second": 6.149,
6
- "train_steps_per_second": 0.077
7
- }
 
 
 
 
 
 
 
 
Baichuan-13B-Chat-lora-Task/trainer_log.jsonl DELETED
@@ -1,287 +0,0 @@
1
- {"current_steps": 10, "total_steps": 2726, "loss": 1.5452, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.999865525734509e-05, "epoch": 0.01, "percentage": 0.37, "elapsed_time": "0:02:32", "remaining_time": "11:29:51"}
2
- {"current_steps": 20, "total_steps": 2726, "loss": 1.3291, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.9994621174046976e-05, "epoch": 0.01, "percentage": 0.73, "elapsed_time": "0:04:36", "remaining_time": "10:23:36"}
3
- {"current_steps": 30, "total_steps": 2726, "loss": 1.1048, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.9986985231938546e-05, "epoch": 0.02, "percentage": 1.1, "elapsed_time": "0:06:36", "remaining_time": "9:54:30"}
4
- {"current_steps": 40, "total_steps": 2726, "loss": 0.9155, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.99760306731191e-05, "epoch": 0.03, "percentage": 1.47, "elapsed_time": "0:08:47", "remaining_time": "9:50:47"}
5
- {"current_steps": 50, "total_steps": 2726, "loss": 0.8856, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.9961758952505326e-05, "epoch": 0.04, "percentage": 1.83, "elapsed_time": "0:10:46", "remaining_time": "9:36:34"}
6
- {"current_steps": 60, "total_steps": 2726, "loss": 0.8052, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.9944171965578836e-05, "epoch": 0.04, "percentage": 2.2, "elapsed_time": "0:13:03", "remaining_time": "9:40:26"}
7
- {"current_steps": 70, "total_steps": 2726, "loss": 0.7332, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.992327204813435e-05, "epoch": 0.05, "percentage": 2.57, "elapsed_time": "0:15:01", "remaining_time": "9:30:01"}
8
- {"current_steps": 80, "total_steps": 2726, "loss": 0.6982, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.989906197596955e-05, "epoch": 0.06, "percentage": 2.93, "elapsed_time": "0:17:01", "remaining_time": "9:22:57"}
9
- {"current_steps": 90, "total_steps": 2726, "loss": 0.6811, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.987154496451635e-05, "epoch": 0.07, "percentage": 3.3, "elapsed_time": "0:18:51", "remaining_time": "9:12:22"}
10
- {"current_steps": 100, "total_steps": 2726, "loss": 0.6323, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.984072466841389e-05, "epoch": 0.07, "percentage": 3.67, "elapsed_time": "0:21:04", "remaining_time": "9:13:18"}
11
- {"current_steps": 110, "total_steps": 2726, "loss": 0.6234, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.981016546765289e-05, "epoch": 0.08, "percentage": 4.04, "elapsed_time": "0:23:02", "remaining_time": "9:07:57"}
12
- {"current_steps": 120, "total_steps": 2726, "loss": 0.6125, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.977308057009269e-05, "epoch": 0.09, "percentage": 4.4, "elapsed_time": "0:25:11", "remaining_time": "9:07:04"}
13
- {"current_steps": 130, "total_steps": 2726, "loss": 0.5977, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.97327054653146e-05, "epoch": 0.1, "percentage": 4.77, "elapsed_time": "0:27:29", "remaining_time": "9:08:54"}
14
- {"current_steps": 140, "total_steps": 2726, "loss": 0.6179, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.968904551569013e-05, "epoch": 0.1, "percentage": 5.14, "elapsed_time": "0:29:47", "remaining_time": "9:10:22"}
15
- {"current_steps": 150, "total_steps": 2726, "loss": 0.5879, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.9642106519863544e-05, "epoch": 0.11, "percentage": 5.5, "elapsed_time": "0:31:46", "remaining_time": "9:05:40"}
16
- {"current_steps": 160, "total_steps": 2726, "loss": 0.5472, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.959189471198171e-05, "epoch": 0.12, "percentage": 5.87, "elapsed_time": "0:33:38", "remaining_time": "8:59:28"}
17
- {"current_steps": 170, "total_steps": 2726, "loss": 0.602, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.953841676086613e-05, "epoch": 0.12, "percentage": 6.24, "elapsed_time": "0:35:33", "remaining_time": "8:54:43"}
18
- {"current_steps": 180, "total_steps": 2726, "loss": 0.5773, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.9481679769127275e-05, "epoch": 0.13, "percentage": 6.6, "elapsed_time": "0:37:48", "remaining_time": "8:54:48"}
19
- {"current_steps": 190, "total_steps": 2726, "loss": 0.5615, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.9421691272221167e-05, "epoch": 0.14, "percentage": 6.97, "elapsed_time": "0:39:45", "remaining_time": "8:50:36"}
20
- {"current_steps": 200, "total_steps": 2726, "loss": 0.5704, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.935845923744865e-05, "epoch": 0.15, "percentage": 7.34, "elapsed_time": "0:41:50", "remaining_time": "8:48:29"}
21
- {"current_steps": 200, "total_steps": 2726, "loss": null, "eval_loss": 0.5564362406730652, "predict_loss": null, "reward": null, "learning_rate": null, "epoch": 0.15, "percentage": 7.34, "elapsed_time": "0:41:50", "remaining_time": "8:48:29"}
22
- {"current_steps": 210, "total_steps": 2726, "loss": 0.5803, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.9291992062897183e-05, "epoch": 0.15, "percentage": 7.7, "elapsed_time": "0:44:59", "remaining_time": "8:59:06"}
23
- {"current_steps": 220, "total_steps": 2726, "loss": 0.5655, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.922229857632545e-05, "epoch": 0.16, "percentage": 8.07, "elapsed_time": "0:46:42", "remaining_time": "8:52:03"}
24
- {"current_steps": 230, "total_steps": 2726, "loss": 0.5769, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.9149388033990966e-05, "epoch": 0.17, "percentage": 8.44, "elapsed_time": "0:49:02", "remaining_time": "8:52:13"}
25
- {"current_steps": 240, "total_steps": 2726, "loss": 0.5862, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.9073270119420635e-05, "epoch": 0.18, "percentage": 8.8, "elapsed_time": "0:51:04", "remaining_time": "8:49:08"}
26
- {"current_steps": 250, "total_steps": 2726, "loss": 0.5506, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.899395494212471e-05, "epoch": 0.18, "percentage": 9.17, "elapsed_time": "0:53:13", "remaining_time": "8:47:07"}
27
- {"current_steps": 260, "total_steps": 2726, "loss": 0.5183, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.891145303625408e-05, "epoch": 0.19, "percentage": 9.54, "elapsed_time": "0:55:40", "remaining_time": "8:48:05"}
28
- {"current_steps": 270, "total_steps": 2726, "loss": 0.575, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.882577535920121e-05, "epoch": 0.2, "percentage": 9.9, "elapsed_time": "0:58:06", "remaining_time": "8:48:33"}
29
- {"current_steps": 280, "total_steps": 2726, "loss": 0.5359, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.8736933290144815e-05, "epoch": 0.21, "percentage": 10.27, "elapsed_time": "1:00:24", "remaining_time": "8:47:41"}
30
- {"current_steps": 290, "total_steps": 2726, "loss": 0.5302, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.8644938628538606e-05, "epoch": 0.21, "percentage": 10.64, "elapsed_time": "1:02:22", "remaining_time": "8:43:57"}
31
- {"current_steps": 300, "total_steps": 2726, "loss": 0.5399, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.8549803592544076e-05, "epoch": 0.22, "percentage": 11.01, "elapsed_time": "1:04:13", "remaining_time": "8:39:25"}
32
- {"current_steps": 310, "total_steps": 2726, "loss": 0.5379, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.845154081740783e-05, "epoch": 0.23, "percentage": 11.37, "elapsed_time": "1:06:13", "remaining_time": "8:36:11"}
33
- {"current_steps": 320, "total_steps": 2726, "loss": 0.5299, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.835016335378343e-05, "epoch": 0.23, "percentage": 11.74, "elapsed_time": "1:08:08", "remaining_time": "8:32:17"}
34
- {"current_steps": 330, "total_steps": 2726, "loss": 0.535, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.8245684665998073e-05, "epoch": 0.24, "percentage": 12.11, "elapsed_time": "1:10:23", "remaining_time": "8:31:06"}
35
- {"current_steps": 340, "total_steps": 2726, "loss": 0.5145, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.813811863026436e-05, "epoch": 0.25, "percentage": 12.47, "elapsed_time": "1:12:33", "remaining_time": "8:29:13"}
36
- {"current_steps": 350, "total_steps": 2726, "loss": 0.5126, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.802747953283732e-05, "epoch": 0.26, "percentage": 12.84, "elapsed_time": "1:14:40", "remaining_time": "8:26:55"}
37
- {"current_steps": 360, "total_steps": 2726, "loss": 0.5012, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.791378206811704e-05, "epoch": 0.26, "percentage": 13.21, "elapsed_time": "1:16:47", "remaining_time": "8:24:39"}
38
- {"current_steps": 370, "total_steps": 2726, "loss": 0.5373, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.7797041336696995e-05, "epoch": 0.27, "percentage": 13.57, "elapsed_time": "1:19:07", "remaining_time": "8:23:47"}
39
- {"current_steps": 380, "total_steps": 2726, "loss": 0.4778, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.767727284335852e-05, "epoch": 0.28, "percentage": 13.94, "elapsed_time": "1:21:18", "remaining_time": "8:21:59"}
40
- {"current_steps": 390, "total_steps": 2726, "loss": 0.4894, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.755449249501155e-05, "epoch": 0.29, "percentage": 14.31, "elapsed_time": "1:23:03", "remaining_time": "8:17:32"}
41
- {"current_steps": 400, "total_steps": 2726, "loss": 0.4986, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.7428716598581934e-05, "epoch": 0.29, "percentage": 14.67, "elapsed_time": "1:25:10", "remaining_time": "8:15:20"}
42
- {"current_steps": 400, "total_steps": 2726, "loss": null, "eval_loss": 0.4967592656612396, "predict_loss": null, "reward": null, "learning_rate": null, "epoch": 0.29, "percentage": 14.67, "elapsed_time": "1:25:10", "remaining_time": "8:15:20"}
43
- {"current_steps": 410, "total_steps": 2726, "loss": 0.4928, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.729996185884571e-05, "epoch": 0.3, "percentage": 15.04, "elapsed_time": "1:28:14", "remaining_time": "8:18:25"}
44
- {"current_steps": 420, "total_steps": 2726, "loss": 0.4807, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.716824537621042e-05, "epoch": 0.31, "percentage": 15.41, "elapsed_time": "1:30:05", "remaining_time": "8:14:40"}
45
- {"current_steps": 430, "total_steps": 2726, "loss": 0.4734, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.703358464444397e-05, "epoch": 0.32, "percentage": 15.77, "elapsed_time": "1:32:27", "remaining_time": "8:13:42"}
46
- {"current_steps": 440, "total_steps": 2726, "loss": 0.4603, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.689599754835122e-05, "epoch": 0.32, "percentage": 16.14, "elapsed_time": "1:34:29", "remaining_time": "8:10:56"}
47
- {"current_steps": 450, "total_steps": 2726, "loss": 0.4661, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.6755502361398616e-05, "epoch": 0.33, "percentage": 16.51, "elapsed_time": "1:36:31", "remaining_time": "8:08:14"}
48
- {"current_steps": 460, "total_steps": 2726, "loss": 0.5072, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.6612117743287234e-05, "epoch": 0.34, "percentage": 16.87, "elapsed_time": "1:38:36", "remaining_time": "8:05:42"}
49
- {"current_steps": 470, "total_steps": 2726, "loss": 0.5056, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.646586273747452e-05, "epoch": 0.34, "percentage": 17.24, "elapsed_time": "1:40:39", "remaining_time": "8:03:07"}
50
- {"current_steps": 480, "total_steps": 2726, "loss": 0.4391, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.631675676864503e-05, "epoch": 0.35, "percentage": 17.61, "elapsed_time": "1:43:01", "remaining_time": "8:02:03"}
51
- {"current_steps": 490, "total_steps": 2726, "loss": 0.483, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.6164819640130595e-05, "epoch": 0.36, "percentage": 17.98, "elapsed_time": "1:45:28", "remaining_time": "8:01:17"}
52
- {"current_steps": 500, "total_steps": 2726, "loss": 0.4774, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.601007153128014e-05, "epoch": 0.37, "percentage": 18.34, "elapsed_time": "1:47:35", "remaining_time": "7:59:00"}
53
- {"current_steps": 510, "total_steps": 2726, "loss": 0.4848, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.5852532994779606e-05, "epoch": 0.37, "percentage": 18.71, "elapsed_time": "1:49:45", "remaining_time": "7:56:55"}
54
- {"current_steps": 520, "total_steps": 2726, "loss": 0.5099, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.5692224953922266e-05, "epoch": 0.38, "percentage": 19.08, "elapsed_time": "1:51:38", "remaining_time": "7:53:35"}
55
- {"current_steps": 530, "total_steps": 2726, "loss": 0.4851, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.5529168699829805e-05, "epoch": 0.39, "percentage": 19.44, "elapsed_time": "1:53:42", "remaining_time": "7:51:09"}
56
- {"current_steps": 540, "total_steps": 2726, "loss": 0.498, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.536338588862459e-05, "epoch": 0.4, "percentage": 19.81, "elapsed_time": "1:55:51", "remaining_time": "7:48:59"}
57
- {"current_steps": 550, "total_steps": 2726, "loss": 0.5398, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.519489853855341e-05, "epoch": 0.4, "percentage": 20.18, "elapsed_time": "1:58:05", "remaining_time": "7:47:11"}
58
- {"current_steps": 560, "total_steps": 2726, "loss": 0.4566, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.50237290270632e-05, "epoch": 0.41, "percentage": 20.54, "elapsed_time": "1:59:55", "remaining_time": "7:43:50"}
59
- {"current_steps": 570, "total_steps": 2726, "loss": 0.4942, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.484990008782893e-05, "epoch": 0.42, "percentage": 20.91, "elapsed_time": "2:01:51", "remaining_time": "7:40:56"}
60
- {"current_steps": 580, "total_steps": 2726, "loss": 0.4625, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.467343480773433e-05, "epoch": 0.43, "percentage": 21.28, "elapsed_time": "2:04:02", "remaining_time": "7:38:55"}
61
- {"current_steps": 590, "total_steps": 2726, "loss": 0.4703, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.44943566238056e-05, "epoch": 0.43, "percentage": 21.64, "elapsed_time": "2:06:02", "remaining_time": "7:36:17"}
62
- {"current_steps": 600, "total_steps": 2726, "loss": 0.464, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.431268932009865e-05, "epoch": 0.44, "percentage": 22.01, "elapsed_time": "2:08:29", "remaining_time": "7:35:16"}
63
- {"current_steps": 600, "total_steps": 2726, "loss": null, "eval_loss": 0.46821752190589905, "predict_loss": null, "reward": null, "learning_rate": null, "epoch": 0.44, "percentage": 22.01, "elapsed_time": "2:08:29", "remaining_time": "7:35:16"}
64
- {"current_steps": 610, "total_steps": 2726, "loss": 0.4739, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.412845702454024e-05, "epoch": 0.45, "percentage": 22.38, "elapsed_time": "2:11:46", "remaining_time": "7:37:07"}
65
- {"current_steps": 620, "total_steps": 2726, "loss": 0.4529, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.394168420572349e-05, "epoch": 0.45, "percentage": 22.74, "elapsed_time": "2:14:01", "remaining_time": "7:35:15"}
66
- {"current_steps": 630, "total_steps": 2726, "loss": 0.4433, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.3752395669658086e-05, "epoch": 0.46, "percentage": 23.11, "elapsed_time": "2:16:41", "remaining_time": "7:34:47"}
67
- {"current_steps": 640, "total_steps": 2726, "loss": 0.459, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.356061655647571e-05, "epoch": 0.47, "percentage": 23.48, "elapsed_time": "2:18:53", "remaining_time": "7:32:41"}
68
- {"current_steps": 650, "total_steps": 2726, "loss": 0.4877, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.336637233709107e-05, "epoch": 0.48, "percentage": 23.84, "elapsed_time": "2:20:42", "remaining_time": "7:29:22"}
69
- {"current_steps": 660, "total_steps": 2726, "loss": 0.4676, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.316968880981903e-05, "epoch": 0.48, "percentage": 24.21, "elapsed_time": "2:22:37", "remaining_time": "7:26:26"}
70
- {"current_steps": 670, "total_steps": 2726, "loss": 0.4782, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.2970592096948236e-05, "epoch": 0.49, "percentage": 24.58, "elapsed_time": "2:24:37", "remaining_time": "7:23:48"}
71
- {"current_steps": 680, "total_steps": 2726, "loss": 0.4778, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.276910864127168e-05, "epoch": 0.5, "percentage": 24.94, "elapsed_time": "2:26:47", "remaining_time": "7:21:39"}
72
- {"current_steps": 690, "total_steps": 2726, "loss": 0.454, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.25652652025748e-05, "epoch": 0.51, "percentage": 25.31, "elapsed_time": "2:28:42", "remaining_time": "7:18:47"}
73
- {"current_steps": 700, "total_steps": 2726, "loss": 0.4399, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.235908885408133e-05, "epoch": 0.51, "percentage": 25.68, "elapsed_time": "2:30:54", "remaining_time": "7:16:47"}
74
- {"current_steps": 710, "total_steps": 2726, "loss": 0.5106, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.215060697885767e-05, "epoch": 0.52, "percentage": 26.05, "elapsed_time": "2:32:57", "remaining_time": "7:14:19"}
75
- {"current_steps": 720, "total_steps": 2726, "loss": 0.4683, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.1939847266176e-05, "epoch": 0.53, "percentage": 26.41, "elapsed_time": "2:35:12", "remaining_time": "7:12:26"}
76
- {"current_steps": 730, "total_steps": 2726, "loss": 0.4896, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.172683770783677e-05, "epoch": 0.54, "percentage": 26.78, "elapsed_time": "2:37:07", "remaining_time": "7:09:37"}
77
- {"current_steps": 740, "total_steps": 2726, "loss": 0.4633, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.1511606594451016e-05, "epoch": 0.54, "percentage": 27.15, "elapsed_time": "2:39:14", "remaining_time": "7:07:20"}
78
- {"current_steps": 750, "total_steps": 2726, "loss": 0.4486, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.1294182511682946e-05, "epoch": 0.55, "percentage": 27.51, "elapsed_time": "2:41:16", "remaining_time": "7:04:53"}
79
- {"current_steps": 760, "total_steps": 2726, "loss": 0.4706, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.1074594336453384e-05, "epoch": 0.56, "percentage": 27.88, "elapsed_time": "2:43:11", "remaining_time": "7:02:09"}
80
- {"current_steps": 770, "total_steps": 2726, "loss": 0.4797, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.085287123310455e-05, "epoch": 0.56, "percentage": 28.25, "elapsed_time": "2:44:59", "remaining_time": "6:59:07"}
81
- {"current_steps": 780, "total_steps": 2726, "loss": 0.4251, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.062904264952657e-05, "epoch": 0.57, "percentage": 28.61, "elapsed_time": "2:47:04", "remaining_time": "6:56:49"}
82
- {"current_steps": 790, "total_steps": 2726, "loss": 0.4687, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.0403138313246435e-05, "epoch": 0.58, "percentage": 28.98, "elapsed_time": "2:49:15", "remaining_time": "6:54:47"}
83
- {"current_steps": 800, "total_steps": 2726, "loss": 0.48, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.017518822747976e-05, "epoch": 0.59, "percentage": 29.35, "elapsed_time": "2:51:34", "remaining_time": "6:53:03"}
84
- {"current_steps": 800, "total_steps": 2726, "loss": null, "eval_loss": 0.4554018974304199, "predict_loss": null, "reward": null, "learning_rate": null, "epoch": 0.59, "percentage": 29.35, "elapsed_time": "2:51:34", "remaining_time": "6:53:03"}
85
- {"current_steps": 810, "total_steps": 2726, "loss": 0.4662, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.994522266714594e-05, "epoch": 0.59, "percentage": 29.71, "elapsed_time": "2:54:51", "remaining_time": "6:53:36"}
86
- {"current_steps": 820, "total_steps": 2726, "loss": 0.4837, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.9713272174847246e-05, "epoch": 0.6, "percentage": 30.08, "elapsed_time": "2:57:25", "remaining_time": "6:52:24"}
87
- {"current_steps": 830, "total_steps": 2726, "loss": 0.5014, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.947936755681229e-05, "epoch": 0.61, "percentage": 30.45, "elapsed_time": "2:59:33", "remaining_time": "6:50:10"}
88
- {"current_steps": 840, "total_steps": 2726, "loss": 0.4764, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.92435398788046e-05, "epoch": 0.62, "percentage": 30.81, "elapsed_time": "3:01:44", "remaining_time": "6:48:02"}
89
- {"current_steps": 850, "total_steps": 2726, "loss": 0.4587, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.9005820461996604e-05, "epoch": 0.62, "percentage": 31.18, "elapsed_time": "3:03:52", "remaining_time": "6:45:49"}
90
- {"current_steps": 860, "total_steps": 2726, "loss": 0.484, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.876624087880979e-05, "epoch": 0.63, "percentage": 31.55, "elapsed_time": "3:06:00", "remaining_time": "6:43:35"}
91
- {"current_steps": 870, "total_steps": 2726, "loss": 0.4285, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.85248329487214e-05, "epoch": 0.64, "percentage": 31.91, "elapsed_time": "3:08:03", "remaining_time": "6:41:10"}
92
- {"current_steps": 880, "total_steps": 2726, "loss": 0.4742, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.828162873403843e-05, "epoch": 0.65, "percentage": 32.28, "elapsed_time": "3:10:09", "remaining_time": "6:38:53"}
93
- {"current_steps": 890, "total_steps": 2726, "loss": 0.4533, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.803666053563926e-05, "epoch": 0.65, "percentage": 32.65, "elapsed_time": "3:12:20", "remaining_time": "6:36:48"}
94
- {"current_steps": 900, "total_steps": 2726, "loss": 0.4395, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.778996088868365e-05, "epoch": 0.66, "percentage": 33.02, "elapsed_time": "3:14:34", "remaining_time": "6:34:47"}
95
- {"current_steps": 910, "total_steps": 2726, "loss": 0.4519, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.754156255829167e-05, "epoch": 0.67, "percentage": 33.38, "elapsed_time": "3:16:42", "remaining_time": "6:32:33"}
96
- {"current_steps": 920, "total_steps": 2726, "loss": 0.4684, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.7291498535191996e-05, "epoch": 0.67, "percentage": 33.75, "elapsed_time": "3:18:41", "remaining_time": "6:30:02"}
97
- {"current_steps": 930, "total_steps": 2726, "loss": 0.4259, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.703980203134029e-05, "epoch": 0.68, "percentage": 34.12, "elapsed_time": "3:20:24", "remaining_time": "6:27:00"}
98
- {"current_steps": 940, "total_steps": 2726, "loss": 0.4576, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.678650647550822e-05, "epoch": 0.69, "percentage": 34.48, "elapsed_time": "3:22:27", "remaining_time": "6:24:40"}
99
- {"current_steps": 950, "total_steps": 2726, "loss": 0.446, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.6531645508843636e-05, "epoch": 0.7, "percentage": 34.85, "elapsed_time": "3:24:21", "remaining_time": "6:22:01"}
100
- {"current_steps": 960, "total_steps": 2726, "loss": 0.4439, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.6275252980402544e-05, "epoch": 0.7, "percentage": 35.22, "elapsed_time": "3:26:18", "remaining_time": "6:19:31"}
101
- {"current_steps": 970, "total_steps": 2726, "loss": 0.476, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.601736294265354e-05, "epoch": 0.71, "percentage": 35.58, "elapsed_time": "3:28:32", "remaining_time": "6:17:31"}
102
- {"current_steps": 980, "total_steps": 2726, "loss": 0.4153, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.5758009646955115e-05, "epoch": 0.72, "percentage": 35.95, "elapsed_time": "3:30:27", "remaining_time": "6:14:57"}
103
- {"current_steps": 990, "total_steps": 2726, "loss": 0.4444, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.5497227539006614e-05, "epoch": 0.73, "percentage": 36.32, "elapsed_time": "3:32:34", "remaining_time": "6:12:46"}
104
- {"current_steps": 1000, "total_steps": 2726, "loss": 0.4395, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.523505125427341e-05, "epoch": 0.73, "percentage": 36.68, "elapsed_time": "3:34:52", "remaining_time": "6:10:52"}
105
- {"current_steps": 1000, "total_steps": 2726, "loss": null, "eval_loss": 0.44357746839523315, "predict_loss": null, "reward": null, "learning_rate": null, "epoch": 0.73, "percentage": 36.68, "elapsed_time": "3:34:52", "remaining_time": "6:10:52"}
106
- {"current_steps": 1010, "total_steps": 2726, "loss": 0.4387, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.497151561338678e-05, "epoch": 0.74, "percentage": 37.05, "elapsed_time": "3:38:14", "remaining_time": "6:10:46"}
107
- {"current_steps": 1020, "total_steps": 2726, "loss": 0.4559, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.470665561751928e-05, "epoch": 0.75, "percentage": 37.42, "elapsed_time": "3:40:04", "remaining_time": "6:08:04"}
108
- {"current_steps": 1030, "total_steps": 2726, "loss": 0.4349, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.444050644373611e-05, "epoch": 0.76, "percentage": 37.78, "elapsed_time": "3:42:11", "remaining_time": "6:05:50"}
109
- {"current_steps": 1040, "total_steps": 2726, "loss": 0.4661, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.417310344032309e-05, "epoch": 0.76, "percentage": 38.15, "elapsed_time": "3:44:11", "remaining_time": "6:03:26"}
110
- {"current_steps": 1050, "total_steps": 2726, "loss": 0.4945, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.390448212209191e-05, "epoch": 0.77, "percentage": 38.52, "elapsed_time": "3:46:22", "remaining_time": "6:01:20"}
111
- {"current_steps": 1060, "total_steps": 2726, "loss": 0.4399, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.3634678165663325e-05, "epoch": 0.78, "percentage": 38.88, "elapsed_time": "3:48:36", "remaining_time": "5:59:18"}
112
- {"current_steps": 1070, "total_steps": 2726, "loss": 0.4399, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.336372740472877e-05, "epoch": 0.78, "percentage": 39.25, "elapsed_time": "3:50:31", "remaining_time": "5:56:47"}
113
- {"current_steps": 1080, "total_steps": 2726, "loss": 0.4251, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.309166582529114e-05, "epoch": 0.79, "percentage": 39.62, "elapsed_time": "3:52:21", "remaining_time": "5:54:08"}
114
- {"current_steps": 1090, "total_steps": 2726, "loss": 0.4385, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.281852956088537e-05, "epoch": 0.8, "percentage": 39.99, "elapsed_time": "3:54:18", "remaining_time": "5:51:40"}
115
- {"current_steps": 1100, "total_steps": 2726, "loss": 0.4566, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.254435488777941e-05, "epoch": 0.81, "percentage": 40.35, "elapsed_time": "3:56:22", "remaining_time": "5:49:24"}
116
- {"current_steps": 1110, "total_steps": 2726, "loss": 0.4352, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.226917822015623e-05, "epoch": 0.81, "percentage": 40.72, "elapsed_time": "3:58:33", "remaining_time": "5:47:19"}
117
- {"current_steps": 1120, "total_steps": 2726, "loss": 0.4617, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.199303610527749e-05, "epoch": 0.82, "percentage": 41.09, "elapsed_time": "4:00:57", "remaining_time": "5:45:30"}
118
- {"current_steps": 1130, "total_steps": 2726, "loss": 0.476, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.1715965218629595e-05, "epoch": 0.83, "percentage": 41.45, "elapsed_time": "4:02:46", "remaining_time": "5:42:54"}
119
- {"current_steps": 1140, "total_steps": 2726, "loss": 0.4322, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.143800235905268e-05, "epoch": 0.84, "percentage": 41.82, "elapsed_time": "4:04:46", "remaining_time": "5:40:32"}
120
- {"current_steps": 1150, "total_steps": 2726, "loss": 0.4736, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.115918444385315e-05, "epoch": 0.84, "percentage": 42.19, "elapsed_time": "4:06:57", "remaining_time": "5:38:26"}
121
- {"current_steps": 1160, "total_steps": 2726, "loss": 0.4322, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.0879548503900665e-05, "epoch": 0.85, "percentage": 42.55, "elapsed_time": "4:09:21", "remaining_time": "5:36:37"}
122
- {"current_steps": 1170, "total_steps": 2726, "loss": 0.4152, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.0599131678709836e-05, "epoch": 0.86, "percentage": 42.92, "elapsed_time": "4:11:20", "remaining_time": "5:34:16"}
123
- {"current_steps": 1180, "total_steps": 2726, "loss": 0.4187, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.031797121150764e-05, "epoch": 0.87, "percentage": 43.29, "elapsed_time": "4:13:18", "remaining_time": "5:31:52"}
124
- {"current_steps": 1190, "total_steps": 2726, "loss": 0.4331, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.0036104444286954e-05, "epoch": 0.87, "percentage": 43.65, "elapsed_time": "4:15:22", "remaining_time": "5:29:37"}
125
- {"current_steps": 1200, "total_steps": 2726, "loss": 0.45, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.9753568812847065e-05, "epoch": 0.88, "percentage": 44.02, "elapsed_time": "4:17:56", "remaining_time": "5:28:00"}
126
- {"current_steps": 1200, "total_steps": 2726, "loss": null, "eval_loss": 0.43782830238342285, "predict_loss": null, "reward": null, "learning_rate": null, "epoch": 0.88, "percentage": 44.02, "elapsed_time": "4:17:56", "remaining_time": "5:28:00"}
127
- {"current_steps": 1210, "total_steps": 2726, "loss": 0.4457, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.9470401841821686e-05, "epoch": 0.89, "percentage": 44.39, "elapsed_time": "4:21:17", "remaining_time": "5:27:22"}
128
- {"current_steps": 1220, "total_steps": 2726, "loss": 0.4667, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.9186641139695108e-05, "epoch": 0.89, "percentage": 44.75, "elapsed_time": "4:23:21", "remaining_time": "5:25:05"}
129
- {"current_steps": 1230, "total_steps": 2726, "loss": 0.4516, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.8902324393807333e-05, "epoch": 0.9, "percentage": 45.12, "elapsed_time": "4:25:38", "remaining_time": "5:23:05"}
130
- {"current_steps": 1240, "total_steps": 2726, "loss": 0.4695, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.861748936534867e-05, "epoch": 0.91, "percentage": 45.49, "elapsed_time": "4:27:43", "remaining_time": "5:20:50"}
131
- {"current_steps": 1250, "total_steps": 2726, "loss": 0.45, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.8332173884344477e-05, "epoch": 0.92, "percentage": 45.85, "elapsed_time": "4:30:07", "remaining_time": "5:18:58"}
132
- {"current_steps": 1260, "total_steps": 2726, "loss": 0.4246, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.8046415844630857e-05, "epoch": 0.92, "percentage": 46.22, "elapsed_time": "4:32:04", "remaining_time": "5:16:33"}
133
- {"current_steps": 1270, "total_steps": 2726, "loss": 0.4449, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.7760253198821822e-05, "epoch": 0.93, "percentage": 46.59, "elapsed_time": "4:34:02", "remaining_time": "5:14:10"}
134
- {"current_steps": 1280, "total_steps": 2726, "loss": 0.4217, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.7473723953268687e-05, "epoch": 0.94, "percentage": 46.96, "elapsed_time": "4:36:21", "remaining_time": "5:12:11"}
135
- {"current_steps": 1290, "total_steps": 2726, "loss": 0.4495, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.7186866163012232e-05, "epoch": 0.95, "percentage": 47.32, "elapsed_time": "4:38:18", "remaining_time": "5:09:48"}
136
- {"current_steps": 1300, "total_steps": 2726, "loss": 0.4523, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.6899717926728535e-05, "epoch": 0.95, "percentage": 47.69, "elapsed_time": "4:40:31", "remaining_time": "5:07:43"}
137
- {"current_steps": 1310, "total_steps": 2726, "loss": 0.4523, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.6612317381668915e-05, "epoch": 0.96, "percentage": 48.06, "elapsed_time": "4:42:52", "remaining_time": "5:05:45"}
138
- {"current_steps": 1320, "total_steps": 2726, "loss": 0.45, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.632470269859478e-05, "epoch": 0.97, "percentage": 48.42, "elapsed_time": "4:44:43", "remaining_time": "5:03:16"}
139
- {"current_steps": 1330, "total_steps": 2726, "loss": 0.4371, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.603691207670803e-05, "epoch": 0.98, "percentage": 48.79, "elapsed_time": "4:46:50", "remaining_time": "5:01:04"}
140
- {"current_steps": 1340, "total_steps": 2726, "loss": 0.4221, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.5748983738577653e-05, "epoch": 0.98, "percentage": 49.16, "elapsed_time": "4:49:12", "remaining_time": "4:59:08"}
141
- {"current_steps": 1350, "total_steps": 2726, "loss": 0.4565, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.5460955925063268e-05, "epoch": 0.99, "percentage": 49.52, "elapsed_time": "4:51:37", "remaining_time": "4:57:14"}
142
- {"current_steps": 1360, "total_steps": 2726, "loss": 0.3892, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.5172866890236203e-05, "epoch": 1.0, "percentage": 49.89, "elapsed_time": "4:53:48", "remaining_time": "4:55:05"}
143
- {"current_steps": 1370, "total_steps": 2726, "loss": 0.3973, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.48847548962988e-05, "epoch": 1.0, "percentage": 50.26, "elapsed_time": "4:56:07", "remaining_time": "4:53:06"}
144
- {"current_steps": 1380, "total_steps": 2726, "loss": 0.4151, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.4596658208502713e-05, "epoch": 1.01, "percentage": 50.62, "elapsed_time": "4:58:09", "remaining_time": "4:50:48"}
145
- {"current_steps": 1390, "total_steps": 2726, "loss": 0.3839, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.4308615090066735e-05, "epoch": 1.02, "percentage": 50.99, "elapsed_time": "5:00:25", "remaining_time": "4:48:44"}
146
- {"current_steps": 1400, "total_steps": 2726, "loss": 0.4062, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.4020663797094864e-05, "epoch": 1.03, "percentage": 51.36, "elapsed_time": "5:02:30", "remaining_time": "4:46:31"}
147
- {"current_steps": 1400, "total_steps": 2726, "loss": null, "eval_loss": 0.43284282088279724, "predict_loss": null, "reward": null, "learning_rate": null, "epoch": 1.03, "percentage": 51.36, "elapsed_time": "5:02:30", "remaining_time": "4:46:31"}
148
- {"current_steps": 1410, "total_steps": 2726, "loss": 0.4003, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.373284257349544e-05, "epoch": 1.03, "percentage": 51.72, "elapsed_time": "5:05:58", "remaining_time": "4:45:34"}
149
- {"current_steps": 1420, "total_steps": 2726, "loss": 0.4447, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.3445189645901806e-05, "epoch": 1.04, "percentage": 52.09, "elapsed_time": "5:08:26", "remaining_time": "4:43:40"}
150
- {"current_steps": 1430, "total_steps": 2726, "loss": 0.4565, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.3157743218595247e-05, "epoch": 1.05, "percentage": 52.46, "elapsed_time": "5:10:20", "remaining_time": "4:41:15"}
151
- {"current_steps": 1440, "total_steps": 2726, "loss": 0.4265, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.287054146843097e-05, "epoch": 1.06, "percentage": 52.82, "elapsed_time": "5:12:13", "remaining_time": "4:38:50"}
152
- {"current_steps": 1450, "total_steps": 2726, "loss": 0.4056, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.2583622539767668e-05, "epoch": 1.06, "percentage": 53.19, "elapsed_time": "5:14:21", "remaining_time": "4:36:38"}
153
- {"current_steps": 1460, "total_steps": 2726, "loss": 0.4074, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.2297024539401463e-05, "epoch": 1.07, "percentage": 53.56, "elapsed_time": "5:16:26", "remaining_time": "4:34:23"}
154
- {"current_steps": 1470, "total_steps": 2726, "loss": 0.4281, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.2010785531504716e-05, "epoch": 1.08, "percentage": 53.93, "elapsed_time": "5:18:46", "remaining_time": "4:32:22"}
155
- {"current_steps": 1480, "total_steps": 2726, "loss": 0.4663, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.172494353257066e-05, "epoch": 1.09, "percentage": 54.29, "elapsed_time": "5:21:04", "remaining_time": "4:30:18"}
156
- {"current_steps": 1490, "total_steps": 2726, "loss": 0.4281, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.1439536506364274e-05, "epoch": 1.09, "percentage": 54.66, "elapsed_time": "5:23:36", "remaining_time": "4:28:26"}
157
- {"current_steps": 1500, "total_steps": 2726, "loss": 0.4111, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.1154602358880122e-05, "epoch": 1.1, "percentage": 55.03, "elapsed_time": "5:25:52", "remaining_time": "4:26:20"}
158
- {"current_steps": 1510, "total_steps": 2726, "loss": 0.4187, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.0870178933307948e-05, "epoch": 1.11, "percentage": 55.39, "elapsed_time": "5:27:49", "remaining_time": "4:23:59"}
159
- {"current_steps": 1520, "total_steps": 2726, "loss": 0.3964, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.0586304005006585e-05, "epoch": 1.11, "percentage": 55.76, "elapsed_time": "5:30:05", "remaining_time": "4:21:54"}
160
- {"current_steps": 1530, "total_steps": 2726, "loss": 0.4331, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.030301527648684e-05, "epoch": 1.12, "percentage": 56.13, "elapsed_time": "5:32:15", "remaining_time": "4:19:43"}
161
- {"current_steps": 1540, "total_steps": 2726, "loss": 0.4432, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.0020350372404102e-05, "epoch": 1.13, "percentage": 56.49, "elapsed_time": "5:34:15", "remaining_time": "4:17:25"}
162
- {"current_steps": 1550, "total_steps": 2726, "loss": 0.4056, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.9738346834561254e-05, "epoch": 1.14, "percentage": 56.86, "elapsed_time": "5:36:18", "remaining_time": "4:15:09"}
163
- {"current_steps": 1560, "total_steps": 2726, "loss": 0.4354, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.945704211692262e-05, "epoch": 1.14, "percentage": 57.23, "elapsed_time": "5:38:26", "remaining_time": "4:12:57"}
164
- {"current_steps": 1570, "total_steps": 2726, "loss": 0.4309, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.9176473580639538e-05, "epoch": 1.15, "percentage": 57.59, "elapsed_time": "5:40:42", "remaining_time": "4:10:52"}
165
- {"current_steps": 1580, "total_steps": 2726, "loss": 0.4058, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.8896678489088304e-05, "epoch": 1.16, "percentage": 57.96, "elapsed_time": "5:42:44", "remaining_time": "4:08:35"}
166
- {"current_steps": 1590, "total_steps": 2726, "loss": 0.4319, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.8617694002921064e-05, "epoch": 1.17, "percentage": 58.33, "elapsed_time": "5:44:54", "remaining_time": "4:06:25"}
167
- {"current_steps": 1600, "total_steps": 2726, "loss": 0.4267, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.8339557175130383e-05, "epoch": 1.17, "percentage": 58.69, "elapsed_time": "5:46:47", "remaining_time": "4:04:03"}
168
- {"current_steps": 1600, "total_steps": 2726, "loss": null, "eval_loss": 0.42894992232322693, "predict_loss": null, "reward": null, "learning_rate": null, "epoch": 1.17, "percentage": 58.69, "elapsed_time": "5:46:47", "remaining_time": "4:04:03"}
169
- {"current_steps": 1610, "total_steps": 2726, "loss": 0.3921, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.8062304946128073e-05, "epoch": 1.18, "percentage": 59.06, "elapsed_time": "5:50:11", "remaining_time": "4:02:44"}
170
- {"current_steps": 1620, "total_steps": 2726, "loss": 0.4206, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.7785974138839018e-05, "epoch": 1.19, "percentage": 59.43, "elapsed_time": "5:52:10", "remaining_time": "4:00:26"}
171
- {"current_steps": 1630, "total_steps": 2726, "loss": 0.4356, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.7510601453810594e-05, "epoch": 1.2, "percentage": 59.79, "elapsed_time": "5:54:17", "remaining_time": "3:58:13"}
172
- {"current_steps": 1640, "total_steps": 2726, "loss": 0.4167, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.723622346433828e-05, "epoch": 1.2, "percentage": 60.16, "elapsed_time": "5:56:29", "remaining_time": "3:56:03"}
173
- {"current_steps": 1650, "total_steps": 2726, "loss": 0.4233, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.6962876611608262e-05, "epoch": 1.21, "percentage": 60.53, "elapsed_time": "5:58:26", "remaining_time": "3:53:45"}
174
- {"current_steps": 1660, "total_steps": 2726, "loss": 0.4176, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.6690597199857523e-05, "epoch": 1.22, "percentage": 60.9, "elapsed_time": "6:00:33", "remaining_time": "3:51:32"}
175
- {"current_steps": 1670, "total_steps": 2726, "loss": 0.4241, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.6419421391552142e-05, "epoch": 1.22, "percentage": 61.26, "elapsed_time": "6:02:41", "remaining_time": "3:49:20"}
176
- {"current_steps": 1680, "total_steps": 2726, "loss": 0.4524, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.6149385202584423e-05, "epoch": 1.23, "percentage": 61.63, "elapsed_time": "6:04:45", "remaining_time": "3:47:06"}
177
- {"current_steps": 1690, "total_steps": 2726, "loss": 0.4647, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.5880524497489474e-05, "epoch": 1.24, "percentage": 62.0, "elapsed_time": "6:07:03", "remaining_time": "3:45:01"}
178
- {"current_steps": 1700, "total_steps": 2726, "loss": 0.4036, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.5612874984681923e-05, "epoch": 1.25, "percentage": 62.36, "elapsed_time": "6:09:03", "remaining_time": "3:42:44"}
179
- {"current_steps": 1710, "total_steps": 2726, "loss": 0.4148, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.534647221171334e-05, "epoch": 1.25, "percentage": 62.73, "elapsed_time": "6:11:22", "remaining_time": "3:40:38"}
180
- {"current_steps": 1720, "total_steps": 2726, "loss": 0.408, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.5081351560551021e-05, "epoch": 1.26, "percentage": 63.1, "elapsed_time": "6:13:19", "remaining_time": "3:38:21"}
181
- {"current_steps": 1730, "total_steps": 2726, "loss": 0.457, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.4817548242878759e-05, "epoch": 1.27, "percentage": 63.46, "elapsed_time": "6:15:23", "remaining_time": "3:36:07"}
182
- {"current_steps": 1740, "total_steps": 2726, "loss": 0.4193, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.45550972954203e-05, "epoch": 1.28, "percentage": 63.83, "elapsed_time": "6:17:21", "remaining_time": "3:33:50"}
183
- {"current_steps": 1750, "total_steps": 2726, "loss": 0.4149, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.4294033575285914e-05, "epoch": 1.28, "percentage": 64.2, "elapsed_time": "6:19:35", "remaining_time": "3:31:42"}
184
- {"current_steps": 1760, "total_steps": 2726, "loss": 0.4032, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.4034391755342972e-05, "epoch": 1.29, "percentage": 64.56, "elapsed_time": "6:21:52", "remaining_time": "3:29:35"}
185
- {"current_steps": 1770, "total_steps": 2726, "loss": 0.4078, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.3776206319610823e-05, "epoch": 1.3, "percentage": 64.93, "elapsed_time": "6:23:56", "remaining_time": "3:27:22"}
186
- {"current_steps": 1780, "total_steps": 2726, "loss": 0.4123, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.3519511558680892e-05, "epoch": 1.31, "percentage": 65.3, "elapsed_time": "6:26:23", "remaining_time": "3:25:21"}
187
- {"current_steps": 1790, "total_steps": 2726, "loss": 0.4089, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.3264341565162422e-05, "epoch": 1.31, "percentage": 65.66, "elapsed_time": "6:28:17", "remaining_time": "3:23:02"}
188
- {"current_steps": 1800, "total_steps": 2726, "loss": 0.383, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.3010730229154445e-05, "epoch": 1.32, "percentage": 66.03, "elapsed_time": "6:30:17", "remaining_time": "3:20:47"}
189
- {"current_steps": 1800, "total_steps": 2726, "loss": null, "eval_loss": 0.4250344932079315, "predict_loss": null, "reward": null, "learning_rate": null, "epoch": 1.32, "percentage": 66.03, "elapsed_time": "6:30:17", "remaining_time": "3:20:47"}
190
- {"current_steps": 1810, "total_steps": 2726, "loss": 0.3936, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.2758711233744783e-05, "epoch": 1.33, "percentage": 66.4, "elapsed_time": "6:33:42", "remaining_time": "3:19:14"}
191
- {"current_steps": 1820, "total_steps": 2726, "loss": 0.4127, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.2508318050536421e-05, "epoch": 1.33, "percentage": 66.76, "elapsed_time": "6:35:33", "remaining_time": "3:16:54"}
192
- {"current_steps": 1830, "total_steps": 2726, "loss": 0.4015, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.2259583935202062e-05, "epoch": 1.34, "percentage": 67.13, "elapsed_time": "6:37:28", "remaining_time": "3:14:36"}
193
- {"current_steps": 1840, "total_steps": 2726, "loss": 0.4203, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.2012541923067244e-05, "epoch": 1.35, "percentage": 67.5, "elapsed_time": "6:39:29", "remaining_time": "3:12:21"}
194
- {"current_steps": 1850, "total_steps": 2726, "loss": 0.4367, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.176722482472286e-05, "epoch": 1.36, "percentage": 67.87, "elapsed_time": "6:41:25", "remaining_time": "3:10:04"}
195
- {"current_steps": 1860, "total_steps": 2726, "loss": 0.4233, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.1523665221667398e-05, "epoch": 1.36, "percentage": 68.23, "elapsed_time": "6:43:12", "remaining_time": "3:07:44"}
196
- {"current_steps": 1870, "total_steps": 2726, "loss": 0.4405, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.1281895461979732e-05, "epoch": 1.37, "percentage": 68.6, "elapsed_time": "6:45:16", "remaining_time": "3:05:31"}
197
- {"current_steps": 1880, "total_steps": 2726, "loss": 0.4379, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.104194765602281e-05, "epoch": 1.38, "percentage": 68.97, "elapsed_time": "6:47:24", "remaining_time": "3:03:20"}
198
- {"current_steps": 1890, "total_steps": 2726, "loss": 0.4146, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.0803853672178946e-05, "epoch": 1.39, "percentage": 69.33, "elapsed_time": "6:49:44", "remaining_time": "3:01:14"}
199
- {"current_steps": 1900, "total_steps": 2726, "loss": 0.4438, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.0567645132617316e-05, "epoch": 1.39, "percentage": 69.7, "elapsed_time": "6:51:53", "remaining_time": "2:59:03"}
200
- {"current_steps": 1910, "total_steps": 2726, "loss": 0.3915, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.0333353409094015e-05, "epoch": 1.4, "percentage": 70.07, "elapsed_time": "6:54:02", "remaining_time": "2:56:53"}
201
- {"current_steps": 1920, "total_steps": 2726, "loss": 0.4063, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.0101009618785528e-05, "epoch": 1.41, "percentage": 70.43, "elapsed_time": "6:56:21", "remaining_time": "2:54:47"}
202
- {"current_steps": 1930, "total_steps": 2726, "loss": 0.3974, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 9.870644620155877e-06, "epoch": 1.42, "percentage": 70.8, "elapsed_time": "6:58:32", "remaining_time": "2:52:37"}
203
- {"current_steps": 1940, "total_steps": 2726, "loss": 0.4015, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 9.642289008858244e-06, "epoch": 1.42, "percentage": 71.17, "elapsed_time": "7:00:49", "remaining_time": "2:50:30"}
204
- {"current_steps": 1950, "total_steps": 2726, "loss": 0.4264, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 9.41597311367142e-06, "epoch": 1.43, "percentage": 71.53, "elapsed_time": "7:02:59", "remaining_time": "2:48:19"}
205
- {"current_steps": 1960, "total_steps": 2726, "loss": 0.4334, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 9.191726992471725e-06, "epoch": 1.44, "percentage": 71.9, "elapsed_time": "7:05:22", "remaining_time": "2:46:14"}
206
- {"current_steps": 1970, "total_steps": 2726, "loss": 0.4197, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 8.969580428240903e-06, "epoch": 1.44, "percentage": 72.27, "elapsed_time": "7:07:11", "remaining_time": "2:43:56"}
207
- {"current_steps": 1980, "total_steps": 2726, "loss": 0.422, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 8.74956292511056e-06, "epoch": 1.45, "percentage": 72.63, "elapsed_time": "7:09:23", "remaining_time": "2:41:46"}
208
- {"current_steps": 1990, "total_steps": 2726, "loss": 0.4077, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 8.531703704443575e-06, "epoch": 1.46, "percentage": 73.0, "elapsed_time": "7:11:33", "remaining_time": "2:39:36"}
209
- {"current_steps": 2000, "total_steps": 2726, "loss": 0.4334, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 8.316031700953086e-06, "epoch": 1.47, "percentage": 73.37, "elapsed_time": "7:13:58", "remaining_time": "2:37:31"}
210
- {"current_steps": 2000, "total_steps": 2726, "loss": null, "eval_loss": 0.4206145107746124, "predict_loss": null, "reward": null, "learning_rate": null, "epoch": 1.47, "percentage": 73.37, "elapsed_time": "7:13:58", "remaining_time": "2:37:31"}
211
- {"current_steps": 2010, "total_steps": 2726, "loss": 0.4171, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 8.102575558859612e-06, "epoch": 1.47, "percentage": 73.73, "elapsed_time": "7:17:28", "remaining_time": "2:35:50"}
212
- {"current_steps": 2020, "total_steps": 2726, "loss": 0.3957, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 7.891363628086671e-06, "epoch": 1.48, "percentage": 74.1, "elapsed_time": "7:19:27", "remaining_time": "2:33:35"}
213
- {"current_steps": 2030, "total_steps": 2726, "loss": 0.4174, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 7.682423960495517e-06, "epoch": 1.49, "percentage": 74.47, "elapsed_time": "7:21:25", "remaining_time": "2:31:20"}
214
- {"current_steps": 2040, "total_steps": 2726, "loss": 0.4187, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 7.475784306159478e-06, "epoch": 1.5, "percentage": 74.83, "elapsed_time": "7:23:33", "remaining_time": "2:29:09"}
215
- {"current_steps": 2050, "total_steps": 2726, "loss": 0.3775, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 7.271472109678379e-06, "epoch": 1.5, "percentage": 75.2, "elapsed_time": "7:25:41", "remaining_time": "2:26:58"}
216
- {"current_steps": 2060, "total_steps": 2726, "loss": 0.4019, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 7.0695145065334585e-06, "epoch": 1.51, "percentage": 75.57, "elapsed_time": "7:27:36", "remaining_time": "2:24:42"}
217
- {"current_steps": 2070, "total_steps": 2726, "loss": 0.4143, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 6.869938319483471e-06, "epoch": 1.52, "percentage": 75.94, "elapsed_time": "7:29:45", "remaining_time": "2:22:31"}
218
- {"current_steps": 2080, "total_steps": 2726, "loss": 0.3853, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 6.67277005500222e-06, "epoch": 1.53, "percentage": 76.3, "elapsed_time": "7:31:46", "remaining_time": "2:20:18"}
219
- {"current_steps": 2090, "total_steps": 2726, "loss": 0.4403, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 6.478035899758139e-06, "epoch": 1.53, "percentage": 76.67, "elapsed_time": "7:33:54", "remaining_time": "2:18:07"}
220
- {"current_steps": 2100, "total_steps": 2726, "loss": 0.3888, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 6.285761717136335e-06, "epoch": 1.54, "percentage": 77.04, "elapsed_time": "7:36:12", "remaining_time": "2:15:59"}
221
- {"current_steps": 2110, "total_steps": 2726, "loss": 0.4123, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 6.095973043803577e-06, "epoch": 1.55, "percentage": 77.4, "elapsed_time": "7:38:32", "remaining_time": "2:13:52"}
222
- {"current_steps": 2120, "total_steps": 2726, "loss": 0.4035, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 5.908695086316701e-06, "epoch": 1.55, "percentage": 77.77, "elapsed_time": "7:40:48", "remaining_time": "2:11:43"}
223
- {"current_steps": 2130, "total_steps": 2726, "loss": 0.4219, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 5.723952717774763e-06, "epoch": 1.56, "percentage": 78.14, "elapsed_time": "7:42:48", "remaining_time": "2:09:30"}
224
- {"current_steps": 2140, "total_steps": 2726, "loss": 0.4209, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 5.54177047451562e-06, "epoch": 1.57, "percentage": 78.5, "elapsed_time": "7:44:53", "remaining_time": "2:07:18"}
225
- {"current_steps": 2150, "total_steps": 2726, "loss": 0.4286, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 5.362172552857128e-06, "epoch": 1.58, "percentage": 78.87, "elapsed_time": "7:47:02", "remaining_time": "2:05:07"}
226
- {"current_steps": 2160, "total_steps": 2726, "loss": 0.4134, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 5.18518280588354e-06, "epoch": 1.58, "percentage": 79.24, "elapsed_time": "7:49:04", "remaining_time": "2:02:54"}
227
- {"current_steps": 2170, "total_steps": 2726, "loss": 0.4005, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 5.010824740277501e-06, "epoch": 1.59, "percentage": 79.6, "elapsed_time": "7:51:12", "remaining_time": "2:00:44"}
228
- {"current_steps": 2180, "total_steps": 2726, "loss": 0.3973, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.83912151319802e-06, "epoch": 1.6, "percentage": 79.97, "elapsed_time": "7:53:12", "remaining_time": "1:58:31"}
229
- {"current_steps": 2190, "total_steps": 2726, "loss": 0.3959, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.6700959292048875e-06, "epoch": 1.61, "percentage": 80.34, "elapsed_time": "7:55:29", "remaining_time": "1:56:22"}
230
- {"current_steps": 2200, "total_steps": 2726, "loss": 0.4017, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.503770437229918e-06, "epoch": 1.61, "percentage": 80.7, "elapsed_time": "7:57:38", "remaining_time": "1:54:12"}
231
- {"current_steps": 2200, "total_steps": 2726, "loss": null, "eval_loss": 0.41970890760421753, "predict_loss": null, "reward": null, "learning_rate": null, "epoch": 1.61, "percentage": 80.7, "elapsed_time": "7:57:38", "remaining_time": "1:54:12"}
232
- {"current_steps": 2210, "total_steps": 2726, "loss": 0.4013, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.340167127595407e-06, "epoch": 1.62, "percentage": 81.07, "elapsed_time": "8:00:51", "remaining_time": "1:52:16"}
233
- {"current_steps": 2220, "total_steps": 2726, "loss": 0.4069, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.179307729080256e-06, "epoch": 1.63, "percentage": 81.44, "elapsed_time": "8:03:00", "remaining_time": "1:50:05"}
234
- {"current_steps": 2230, "total_steps": 2726, "loss": 0.3857, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.021213606034064e-06, "epoch": 1.64, "percentage": 81.8, "elapsed_time": "8:05:05", "remaining_time": "1:47:53"}
235
- {"current_steps": 2240, "total_steps": 2726, "loss": 0.4262, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.8659057555396645e-06, "epoch": 1.64, "percentage": 82.17, "elapsed_time": "8:07:03", "remaining_time": "1:45:40"}
236
- {"current_steps": 2250, "total_steps": 2726, "loss": 0.3953, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.713404804624418e-06, "epoch": 1.65, "percentage": 82.54, "elapsed_time": "8:09:07", "remaining_time": "1:43:28"}
237
- {"current_steps": 2260, "total_steps": 2726, "loss": 0.4231, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.5637310075206544e-06, "epoch": 1.66, "percentage": 82.91, "elapsed_time": "8:11:23", "remaining_time": "1:41:19"}
238
- {"current_steps": 2270, "total_steps": 2726, "loss": 0.407, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.416904242975635e-06, "epoch": 1.66, "percentage": 83.27, "elapsed_time": "8:13:23", "remaining_time": "1:39:06"}
239
- {"current_steps": 2280, "total_steps": 2726, "loss": 0.4254, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.2729440116113843e-06, "epoch": 1.67, "percentage": 83.64, "elapsed_time": "8:15:40", "remaining_time": "1:36:57"}
240
- {"current_steps": 2290, "total_steps": 2726, "loss": 0.4054, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.131869433334725e-06, "epoch": 1.68, "percentage": 84.01, "elapsed_time": "8:17:43", "remaining_time": "1:34:45"}
241
- {"current_steps": 2300, "total_steps": 2726, "loss": 0.3801, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.9936992447979068e-06, "epoch": 1.69, "percentage": 84.37, "elapsed_time": "8:20:05", "remaining_time": "1:32:37"}
242
- {"current_steps": 2310, "total_steps": 2726, "loss": 0.4335, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.8584517969101054e-06, "epoch": 1.69, "percentage": 84.74, "elapsed_time": "8:21:46", "remaining_time": "1:30:21"}
243
- {"current_steps": 2320, "total_steps": 2726, "loss": 0.412, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.7261450524001807e-06, "epoch": 1.7, "percentage": 85.11, "elapsed_time": "8:23:37", "remaining_time": "1:28:08"}
244
- {"current_steps": 2330, "total_steps": 2726, "loss": 0.4456, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.596796583430969e-06, "epoch": 1.71, "percentage": 85.47, "elapsed_time": "8:25:39", "remaining_time": "1:25:56"}
245
- {"current_steps": 2340, "total_steps": 2726, "loss": 0.3972, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.470423569265462e-06, "epoch": 1.72, "percentage": 85.84, "elapsed_time": "8:27:30", "remaining_time": "1:23:43"}
246
- {"current_steps": 2350, "total_steps": 2726, "loss": 0.3935, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.34704279398516e-06, "epoch": 1.72, "percentage": 86.21, "elapsed_time": "8:29:12", "remaining_time": "1:21:28"}
247
- {"current_steps": 2360, "total_steps": 2726, "loss": 0.3974, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.2266706442609226e-06, "epoch": 1.73, "percentage": 86.57, "elapsed_time": "8:31:13", "remaining_time": "1:19:17"}
248
- {"current_steps": 2370, "total_steps": 2726, "loss": 0.4157, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.109323107176578e-06, "epoch": 1.74, "percentage": 86.94, "elapsed_time": "8:33:31", "remaining_time": "1:17:08"}
249
- {"current_steps": 2380, "total_steps": 2726, "loss": 0.4379, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.9950157681056318e-06, "epoch": 1.75, "percentage": 87.31, "elapsed_time": "8:35:40", "remaining_time": "1:14:58"}
250
- {"current_steps": 2390, "total_steps": 2726, "loss": 0.4035, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.8837638086413063e-06, "epoch": 1.75, "percentage": 87.67, "elapsed_time": "8:37:35", "remaining_time": "1:12:46"}
251
- {"current_steps": 2400, "total_steps": 2726, "loss": 0.4455, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.7755820045802145e-06, "epoch": 1.76, "percentage": 88.04, "elapsed_time": "8:39:45", "remaining_time": "1:10:35"}
252
- {"current_steps": 2400, "total_steps": 2726, "loss": null, "eval_loss": 0.41885045170783997, "predict_loss": null, "reward": null, "learning_rate": null, "epoch": 1.76, "percentage": 88.04, "elapsed_time": "8:39:45", "remaining_time": "1:10:35"}
253
- {"current_steps": 2410, "total_steps": 2726, "loss": 0.4532, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.6704847239599364e-06, "epoch": 1.77, "percentage": 88.41, "elapsed_time": "8:43:09", "remaining_time": "1:08:35"}
254
- {"current_steps": 2420, "total_steps": 2726, "loss": 0.4213, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.5684859251507394e-06, "epoch": 1.77, "percentage": 88.77, "elapsed_time": "8:45:22", "remaining_time": "1:06:25"}
255
- {"current_steps": 2430, "total_steps": 2726, "loss": 0.3873, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.4695991550017164e-06, "epoch": 1.78, "percentage": 89.14, "elapsed_time": "8:47:29", "remaining_time": "1:04:15"}
256
- {"current_steps": 2440, "total_steps": 2726, "loss": 0.4163, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.373837547041576e-06, "epoch": 1.79, "percentage": 89.51, "elapsed_time": "8:49:41", "remaining_time": "1:02:05"}
257
- {"current_steps": 2450, "total_steps": 2726, "loss": 0.3946, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.2812138197343392e-06, "epoch": 1.8, "percentage": 89.88, "elapsed_time": "8:51:48", "remaining_time": "0:59:54"}
258
- {"current_steps": 2460, "total_steps": 2726, "loss": 0.41, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.1917402747901152e-06, "epoch": 1.8, "percentage": 90.24, "elapsed_time": "8:53:44", "remaining_time": "0:57:42"}
259
- {"current_steps": 2470, "total_steps": 2726, "loss": 0.4126, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.105428795531327e-06, "epoch": 1.81, "percentage": 90.61, "elapsed_time": "8:55:59", "remaining_time": "0:55:33"}
260
- {"current_steps": 2480, "total_steps": 2726, "loss": 0.4505, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.0222908453143804e-06, "epoch": 1.82, "percentage": 90.98, "elapsed_time": "8:58:09", "remaining_time": "0:53:22"}
261
- {"current_steps": 2490, "total_steps": 2726, "loss": 0.4303, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 9.423374660072065e-07, "epoch": 1.83, "percentage": 91.34, "elapsed_time": "9:00:00", "remaining_time": "0:51:10"}
262
- {"current_steps": 2500, "total_steps": 2726, "loss": 0.4179, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 8.655792765227405e-07, "epoch": 1.83, "percentage": 91.71, "elapsed_time": "9:01:58", "remaining_time": "0:48:59"}
263
- {"current_steps": 2510, "total_steps": 2726, "loss": 0.4376, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 7.920264714085828e-07, "epoch": 1.84, "percentage": 92.08, "elapsed_time": "9:04:05", "remaining_time": "0:46:49"}
264
- {"current_steps": 2520, "total_steps": 2726, "loss": 0.4069, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 7.216888194930272e-07, "epoch": 1.85, "percentage": 92.44, "elapsed_time": "9:06:08", "remaining_time": "0:44:38"}
265
- {"current_steps": 2530, "total_steps": 2726, "loss": 0.3887, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 6.545756625876031e-07, "epoch": 1.86, "percentage": 92.81, "elapsed_time": "9:08:12", "remaining_time": "0:42:28"}
266
- {"current_steps": 2540, "total_steps": 2726, "loss": 0.3972, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 5.906959142463947e-07, "epoch": 1.86, "percentage": 93.18, "elapsed_time": "9:10:08", "remaining_time": "0:40:17"}
267
- {"current_steps": 2550, "total_steps": 2726, "loss": 0.4078, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 5.300580585821696e-07, "epoch": 1.87, "percentage": 93.54, "elapsed_time": "9:12:17", "remaining_time": "0:38:07"}
268
- {"current_steps": 2560, "total_steps": 2726, "loss": 0.4467, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.7267014913956463e-07, "epoch": 1.88, "percentage": 93.91, "elapsed_time": "9:14:11", "remaining_time": "0:35:56"}
269
- {"current_steps": 2570, "total_steps": 2726, "loss": 0.4567, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.1853980782549097e-07, "epoch": 1.88, "percentage": 94.28, "elapsed_time": "9:16:09", "remaining_time": "0:33:45"}
270
- {"current_steps": 2580, "total_steps": 2726, "loss": 0.4494, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.6767422389682173e-07, "epoch": 1.89, "percentage": 94.64, "elapsed_time": "9:18:36", "remaining_time": "0:31:36"}
271
- {"current_steps": 2590, "total_steps": 2726, "loss": 0.4344, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.2008015300555306e-07, "epoch": 1.9, "percentage": 95.01, "elapsed_time": "9:20:48", "remaining_time": "0:29:26"}
272
- {"current_steps": 2600, "total_steps": 2726, "loss": 0.4101, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.757639163015774e-07, "epoch": 1.91, "percentage": 95.38, "elapsed_time": "9:22:52", "remaining_time": "0:27:16"}
273
- {"current_steps": 2600, "total_steps": 2726, "loss": null, "eval_loss": 0.41886425018310547, "predict_loss": null, "reward": null, "learning_rate": null, "epoch": 1.91, "percentage": 95.38, "elapsed_time": "9:22:52", "remaining_time": "0:27:16"}
274
- {"current_steps": 2610, "total_steps": 2726, "loss": 0.4058, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.347313995931466e-07, "epoch": 1.91, "percentage": 95.74, "elapsed_time": "9:26:02", "remaining_time": "0:25:09"}
275
- {"current_steps": 2620, "total_steps": 2726, "loss": 0.414, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.9698805256513908e-07, "epoch": 1.92, "percentage": 96.11, "elapsed_time": "9:28:14", "remaining_time": "0:22:59"}
276
- {"current_steps": 2630, "total_steps": 2726, "loss": 0.4254, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.6253888805527474e-07, "epoch": 1.93, "percentage": 96.48, "elapsed_time": "9:30:26", "remaining_time": "0:20:49"}
277
- {"current_steps": 2640, "total_steps": 2726, "loss": 0.4017, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.3138848138835313e-07, "epoch": 1.94, "percentage": 96.85, "elapsed_time": "9:32:34", "remaining_time": "0:18:39"}
278
- {"current_steps": 2650, "total_steps": 2726, "loss": 0.4258, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.0354096976856186e-07, "epoch": 1.94, "percentage": 97.21, "elapsed_time": "9:34:48", "remaining_time": "0:16:29"}
279
- {"current_steps": 2660, "total_steps": 2726, "loss": 0.4098, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 7.900005173002712e-08, "epoch": 1.95, "percentage": 97.58, "elapsed_time": "9:37:06", "remaining_time": "0:14:19"}
280
- {"current_steps": 2670, "total_steps": 2726, "loss": 0.42, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 5.776898664557051e-08, "epoch": 1.96, "percentage": 97.95, "elapsed_time": "9:39:14", "remaining_time": "0:12:08"}
281
- {"current_steps": 2680, "total_steps": 2726, "loss": 0.3886, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.985059429383875e-08, "epoch": 1.97, "percentage": 98.31, "elapsed_time": "9:41:31", "remaining_time": "0:09:58"}
282
- {"current_steps": 2690, "total_steps": 2726, "loss": 0.4276, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.5247254484794813e-08, "epoch": 1.97, "percentage": 98.68, "elapsed_time": "9:43:48", "remaining_time": "0:07:48"}
283
- {"current_steps": 2700, "total_steps": 2726, "loss": 0.4006, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.3960906743634706e-08, "epoch": 1.98, "percentage": 99.05, "elapsed_time": "9:45:40", "remaining_time": "0:05:38"}
284
- {"current_steps": 2710, "total_steps": 2726, "loss": 0.4243, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 5.993050053204607e-09, "epoch": 1.99, "percentage": 99.41, "elapsed_time": "9:47:47", "remaining_time": "0:03:28"}
285
- {"current_steps": 2720, "total_steps": 2726, "loss": 0.4439, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.3447426549129117e-09, "epoch": 1.99, "percentage": 99.78, "elapsed_time": "9:50:02", "remaining_time": "0:01:18"}
286
- {"current_steps": 2726, "total_steps": 2726, "loss": null, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": null, "epoch": 2.0, "percentage": 100.0, "elapsed_time": "9:51:16", "remaining_time": "0:00:00"}
287
- {"current_steps": 111, "total_steps": 111, "loss": null, "eval_loss": 0.41885045170783997, "predict_loss": null, "reward": null, "learning_rate": null, "epoch": 2.0, "percentage": 100.0, "elapsed_time": "9:52:25", "remaining_time": "0:00:00"}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Baichuan-13B-Chat-lora-Task/trainer_state.json DELETED
@@ -1,1761 +0,0 @@
1
- {
2
- "best_metric": 0.41885045170783997,
3
- "best_model_checkpoint": "output/Baichuan-13B-Chat_lora_wqs_nlp/checkpoint-2400",
4
- "epoch": 1.9992665933259994,
5
- "global_step": 2726,
6
- "is_hyper_param_search": false,
7
- "is_local_process_zero": true,
8
- "is_world_process_zero": true,
9
- "log_history": [
10
- {
11
- "epoch": 0.01,
12
- "learning_rate": 4.999865525734509e-05,
13
- "loss": 1.5452,
14
- "step": 10
15
- },
16
- {
17
- "epoch": 0.01,
18
- "learning_rate": 4.9994621174046976e-05,
19
- "loss": 1.3291,
20
- "step": 20
21
- },
22
- {
23
- "epoch": 0.02,
24
- "learning_rate": 4.9986985231938546e-05,
25
- "loss": 1.1048,
26
- "step": 30
27
- },
28
- {
29
- "epoch": 0.03,
30
- "learning_rate": 4.99760306731191e-05,
31
- "loss": 0.9155,
32
- "step": 40
33
- },
34
- {
35
- "epoch": 0.04,
36
- "learning_rate": 4.9961758952505326e-05,
37
- "loss": 0.8856,
38
- "step": 50
39
- },
40
- {
41
- "epoch": 0.04,
42
- "learning_rate": 4.9944171965578836e-05,
43
- "loss": 0.8052,
44
- "step": 60
45
- },
46
- {
47
- "epoch": 0.05,
48
- "learning_rate": 4.992327204813435e-05,
49
- "loss": 0.7332,
50
- "step": 70
51
- },
52
- {
53
- "epoch": 0.06,
54
- "learning_rate": 4.989906197596955e-05,
55
- "loss": 0.6982,
56
- "step": 80
57
- },
58
- {
59
- "epoch": 0.07,
60
- "learning_rate": 4.987154496451635e-05,
61
- "loss": 0.6811,
62
- "step": 90
63
- },
64
- {
65
- "epoch": 0.07,
66
- "learning_rate": 4.984072466841389e-05,
67
- "loss": 0.6323,
68
- "step": 100
69
- },
70
- {
71
- "epoch": 0.08,
72
- "learning_rate": 4.981016546765289e-05,
73
- "loss": 0.6234,
74
- "step": 110
75
- },
76
- {
77
- "epoch": 0.09,
78
- "learning_rate": 4.977308057009269e-05,
79
- "loss": 0.6125,
80
- "step": 120
81
- },
82
- {
83
- "epoch": 0.1,
84
- "learning_rate": 4.97327054653146e-05,
85
- "loss": 0.5977,
86
- "step": 130
87
- },
88
- {
89
- "epoch": 0.1,
90
- "learning_rate": 4.968904551569013e-05,
91
- "loss": 0.6179,
92
- "step": 140
93
- },
94
- {
95
- "epoch": 0.11,
96
- "learning_rate": 4.9642106519863544e-05,
97
- "loss": 0.5879,
98
- "step": 150
99
- },
100
- {
101
- "epoch": 0.12,
102
- "learning_rate": 4.959189471198171e-05,
103
- "loss": 0.5472,
104
- "step": 160
105
- },
106
- {
107
- "epoch": 0.12,
108
- "learning_rate": 4.953841676086613e-05,
109
- "loss": 0.602,
110
- "step": 170
111
- },
112
- {
113
- "epoch": 0.13,
114
- "learning_rate": 4.9481679769127275e-05,
115
- "loss": 0.5773,
116
- "step": 180
117
- },
118
- {
119
- "epoch": 0.14,
120
- "learning_rate": 4.9421691272221167e-05,
121
- "loss": 0.5615,
122
- "step": 190
123
- },
124
- {
125
- "epoch": 0.15,
126
- "learning_rate": 4.935845923744865e-05,
127
- "loss": 0.5704,
128
- "step": 200
129
- },
130
- {
131
- "epoch": 0.15,
132
- "eval_loss": 0.5564362406730652,
133
- "eval_runtime": 72.9469,
134
- "eval_samples_per_second": 15.107,
135
- "eval_steps_per_second": 1.522,
136
- "step": 200
137
- },
138
- {
139
- "epoch": 0.15,
140
- "learning_rate": 4.9291992062897183e-05,
141
- "loss": 0.5803,
142
- "step": 210
143
- },
144
- {
145
- "epoch": 0.16,
146
- "learning_rate": 4.922229857632545e-05,
147
- "loss": 0.5655,
148
- "step": 220
149
- },
150
- {
151
- "epoch": 0.17,
152
- "learning_rate": 4.9149388033990966e-05,
153
- "loss": 0.5769,
154
- "step": 230
155
- },
156
- {
157
- "epoch": 0.18,
158
- "learning_rate": 4.9073270119420635e-05,
159
- "loss": 0.5862,
160
- "step": 240
161
- },
162
- {
163
- "epoch": 0.18,
164
- "learning_rate": 4.899395494212471e-05,
165
- "loss": 0.5506,
166
- "step": 250
167
- },
168
- {
169
- "epoch": 0.19,
170
- "learning_rate": 4.891145303625408e-05,
171
- "loss": 0.5183,
172
- "step": 260
173
- },
174
- {
175
- "epoch": 0.2,
176
- "learning_rate": 4.882577535920121e-05,
177
- "loss": 0.575,
178
- "step": 270
179
- },
180
- {
181
- "epoch": 0.21,
182
- "learning_rate": 4.8736933290144815e-05,
183
- "loss": 0.5359,
184
- "step": 280
185
- },
186
- {
187
- "epoch": 0.21,
188
- "learning_rate": 4.8644938628538606e-05,
189
- "loss": 0.5302,
190
- "step": 290
191
- },
192
- {
193
- "epoch": 0.22,
194
- "learning_rate": 4.8549803592544076e-05,
195
- "loss": 0.5399,
196
- "step": 300
197
- },
198
- {
199
- "epoch": 0.23,
200
- "learning_rate": 4.845154081740783e-05,
201
- "loss": 0.5379,
202
- "step": 310
203
- },
204
- {
205
- "epoch": 0.23,
206
- "learning_rate": 4.835016335378343e-05,
207
- "loss": 0.5299,
208
- "step": 320
209
- },
210
- {
211
- "epoch": 0.24,
212
- "learning_rate": 4.8245684665998073e-05,
213
- "loss": 0.535,
214
- "step": 330
215
- },
216
- {
217
- "epoch": 0.25,
218
- "learning_rate": 4.813811863026436e-05,
219
- "loss": 0.5145,
220
- "step": 340
221
- },
222
- {
223
- "epoch": 0.26,
224
- "learning_rate": 4.802747953283732e-05,
225
- "loss": 0.5126,
226
- "step": 350
227
- },
228
- {
229
- "epoch": 0.26,
230
- "learning_rate": 4.791378206811704e-05,
231
- "loss": 0.5012,
232
- "step": 360
233
- },
234
- {
235
- "epoch": 0.27,
236
- "learning_rate": 4.7797041336696995e-05,
237
- "loss": 0.5373,
238
- "step": 370
239
- },
240
- {
241
- "epoch": 0.28,
242
- "learning_rate": 4.767727284335852e-05,
243
- "loss": 0.4778,
244
- "step": 380
245
- },
246
- {
247
- "epoch": 0.29,
248
- "learning_rate": 4.755449249501155e-05,
249
- "loss": 0.4894,
250
- "step": 390
251
- },
252
- {
253
- "epoch": 0.29,
254
- "learning_rate": 4.7428716598581934e-05,
255
- "loss": 0.4986,
256
- "step": 400
257
- },
258
- {
259
- "epoch": 0.29,
260
- "eval_loss": 0.4967592656612396,
261
- "eval_runtime": 70.0948,
262
- "eval_samples_per_second": 15.722,
263
- "eval_steps_per_second": 1.584,
264
- "step": 400
265
- },
266
- {
267
- "epoch": 0.3,
268
- "learning_rate": 4.729996185884571e-05,
269
- "loss": 0.4928,
270
- "step": 410
271
- },
272
- {
273
- "epoch": 0.31,
274
- "learning_rate": 4.716824537621042e-05,
275
- "loss": 0.4807,
276
- "step": 420
277
- },
278
- {
279
- "epoch": 0.32,
280
- "learning_rate": 4.703358464444397e-05,
281
- "loss": 0.4734,
282
- "step": 430
283
- },
284
- {
285
- "epoch": 0.32,
286
- "learning_rate": 4.689599754835122e-05,
287
- "loss": 0.4603,
288
- "step": 440
289
- },
290
- {
291
- "epoch": 0.33,
292
- "learning_rate": 4.6755502361398616e-05,
293
- "loss": 0.4661,
294
- "step": 450
295
- },
296
- {
297
- "epoch": 0.34,
298
- "learning_rate": 4.6612117743287234e-05,
299
- "loss": 0.5072,
300
- "step": 460
301
- },
302
- {
303
- "epoch": 0.34,
304
- "learning_rate": 4.646586273747452e-05,
305
- "loss": 0.5056,
306
- "step": 470
307
- },
308
- {
309
- "epoch": 0.35,
310
- "learning_rate": 4.631675676864503e-05,
311
- "loss": 0.4391,
312
- "step": 480
313
- },
314
- {
315
- "epoch": 0.36,
316
- "learning_rate": 4.6164819640130595e-05,
317
- "loss": 0.483,
318
- "step": 490
319
- },
320
- {
321
- "epoch": 0.37,
322
- "learning_rate": 4.601007153128014e-05,
323
- "loss": 0.4774,
324
- "step": 500
325
- },
326
- {
327
- "epoch": 0.37,
328
- "learning_rate": 4.5852532994779606e-05,
329
- "loss": 0.4848,
330
- "step": 510
331
- },
332
- {
333
- "epoch": 0.38,
334
- "learning_rate": 4.5692224953922266e-05,
335
- "loss": 0.5099,
336
- "step": 520
337
- },
338
- {
339
- "epoch": 0.39,
340
- "learning_rate": 4.5529168699829805e-05,
341
- "loss": 0.4851,
342
- "step": 530
343
- },
344
- {
345
- "epoch": 0.4,
346
- "learning_rate": 4.536338588862459e-05,
347
- "loss": 0.498,
348
- "step": 540
349
- },
350
- {
351
- "epoch": 0.4,
352
- "learning_rate": 4.519489853855341e-05,
353
- "loss": 0.5398,
354
- "step": 550
355
- },
356
- {
357
- "epoch": 0.41,
358
- "learning_rate": 4.50237290270632e-05,
359
- "loss": 0.4566,
360
- "step": 560
361
- },
362
- {
363
- "epoch": 0.42,
364
- "learning_rate": 4.484990008782893e-05,
365
- "loss": 0.4942,
366
- "step": 570
367
- },
368
- {
369
- "epoch": 0.43,
370
- "learning_rate": 4.467343480773433e-05,
371
- "loss": 0.4625,
372
- "step": 580
373
- },
374
- {
375
- "epoch": 0.43,
376
- "learning_rate": 4.44943566238056e-05,
377
- "loss": 0.4703,
378
- "step": 590
379
- },
380
- {
381
- "epoch": 0.44,
382
- "learning_rate": 4.431268932009865e-05,
383
- "loss": 0.464,
384
- "step": 600
385
- },
386
- {
387
- "epoch": 0.44,
388
- "eval_loss": 0.46821752190589905,
389
- "eval_runtime": 69.9505,
390
- "eval_samples_per_second": 15.754,
391
- "eval_steps_per_second": 1.587,
392
- "step": 600
393
- },
394
- {
395
- "epoch": 0.45,
396
- "learning_rate": 4.412845702454024e-05,
397
- "loss": 0.4739,
398
- "step": 610
399
- },
400
- {
401
- "epoch": 0.45,
402
- "learning_rate": 4.394168420572349e-05,
403
- "loss": 0.4529,
404
- "step": 620
405
- },
406
- {
407
- "epoch": 0.46,
408
- "learning_rate": 4.3752395669658086e-05,
409
- "loss": 0.4433,
410
- "step": 630
411
- },
412
- {
413
- "epoch": 0.47,
414
- "learning_rate": 4.356061655647571e-05,
415
- "loss": 0.459,
416
- "step": 640
417
- },
418
- {
419
- "epoch": 0.48,
420
- "learning_rate": 4.336637233709107e-05,
421
- "loss": 0.4877,
422
- "step": 650
423
- },
424
- {
425
- "epoch": 0.48,
426
- "learning_rate": 4.316968880981903e-05,
427
- "loss": 0.4676,
428
- "step": 660
429
- },
430
- {
431
- "epoch": 0.49,
432
- "learning_rate": 4.2970592096948236e-05,
433
- "loss": 0.4782,
434
- "step": 670
435
- },
436
- {
437
- "epoch": 0.5,
438
- "learning_rate": 4.276910864127168e-05,
439
- "loss": 0.4778,
440
- "step": 680
441
- },
442
- {
443
- "epoch": 0.51,
444
- "learning_rate": 4.25652652025748e-05,
445
- "loss": 0.454,
446
- "step": 690
447
- },
448
- {
449
- "epoch": 0.51,
450
- "learning_rate": 4.235908885408133e-05,
451
- "loss": 0.4399,
452
- "step": 700
453
- },
454
- {
455
- "epoch": 0.52,
456
- "learning_rate": 4.215060697885767e-05,
457
- "loss": 0.5106,
458
- "step": 710
459
- },
460
- {
461
- "epoch": 0.53,
462
- "learning_rate": 4.1939847266176e-05,
463
- "loss": 0.4683,
464
- "step": 720
465
- },
466
- {
467
- "epoch": 0.54,
468
- "learning_rate": 4.172683770783677e-05,
469
- "loss": 0.4896,
470
- "step": 730
471
- },
472
- {
473
- "epoch": 0.54,
474
- "learning_rate": 4.1511606594451016e-05,
475
- "loss": 0.4633,
476
- "step": 740
477
- },
478
- {
479
- "epoch": 0.55,
480
- "learning_rate": 4.1294182511682946e-05,
481
- "loss": 0.4486,
482
- "step": 750
483
- },
484
- {
485
- "epoch": 0.56,
486
- "learning_rate": 4.1074594336453384e-05,
487
- "loss": 0.4706,
488
- "step": 760
489
- },
490
- {
491
- "epoch": 0.56,
492
- "learning_rate": 4.085287123310455e-05,
493
- "loss": 0.4797,
494
- "step": 770
495
- },
496
- {
497
- "epoch": 0.57,
498
- "learning_rate": 4.062904264952657e-05,
499
- "loss": 0.4251,
500
- "step": 780
501
- },
502
- {
503
- "epoch": 0.58,
504
- "learning_rate": 4.0403138313246435e-05,
505
- "loss": 0.4687,
506
- "step": 790
507
- },
508
- {
509
- "epoch": 0.59,
510
- "learning_rate": 4.017518822747976e-05,
511
- "loss": 0.48,
512
- "step": 800
513
- },
514
- {
515
- "epoch": 0.59,
516
- "eval_loss": 0.4554018974304199,
517
- "eval_runtime": 70.2392,
518
- "eval_samples_per_second": 15.689,
519
- "eval_steps_per_second": 1.58,
520
- "step": 800
521
- },
522
- {
523
- "epoch": 0.59,
524
- "learning_rate": 3.994522266714594e-05,
525
- "loss": 0.4662,
526
- "step": 810
527
- },
528
- {
529
- "epoch": 0.6,
530
- "learning_rate": 3.9713272174847246e-05,
531
- "loss": 0.4837,
532
- "step": 820
533
- },
534
- {
535
- "epoch": 0.61,
536
- "learning_rate": 3.947936755681229e-05,
537
- "loss": 0.5014,
538
- "step": 830
539
- },
540
- {
541
- "epoch": 0.62,
542
- "learning_rate": 3.92435398788046e-05,
543
- "loss": 0.4764,
544
- "step": 840
545
- },
546
- {
547
- "epoch": 0.62,
548
- "learning_rate": 3.9005820461996604e-05,
549
- "loss": 0.4587,
550
- "step": 850
551
- },
552
- {
553
- "epoch": 0.63,
554
- "learning_rate": 3.876624087880979e-05,
555
- "loss": 0.484,
556
- "step": 860
557
- },
558
- {
559
- "epoch": 0.64,
560
- "learning_rate": 3.85248329487214e-05,
561
- "loss": 0.4285,
562
- "step": 870
563
- },
564
- {
565
- "epoch": 0.65,
566
- "learning_rate": 3.828162873403843e-05,
567
- "loss": 0.4742,
568
- "step": 880
569
- },
570
- {
571
- "epoch": 0.65,
572
- "learning_rate": 3.803666053563926e-05,
573
- "loss": 0.4533,
574
- "step": 890
575
- },
576
- {
577
- "epoch": 0.66,
578
- "learning_rate": 3.778996088868365e-05,
579
- "loss": 0.4395,
580
- "step": 900
581
- },
582
- {
583
- "epoch": 0.67,
584
- "learning_rate": 3.754156255829167e-05,
585
- "loss": 0.4519,
586
- "step": 910
587
- },
588
- {
589
- "epoch": 0.67,
590
- "learning_rate": 3.7291498535191996e-05,
591
- "loss": 0.4684,
592
- "step": 920
593
- },
594
- {
595
- "epoch": 0.68,
596
- "learning_rate": 3.703980203134029e-05,
597
- "loss": 0.4259,
598
- "step": 930
599
- },
600
- {
601
- "epoch": 0.69,
602
- "learning_rate": 3.678650647550822e-05,
603
- "loss": 0.4576,
604
- "step": 940
605
- },
606
- {
607
- "epoch": 0.7,
608
- "learning_rate": 3.6531645508843636e-05,
609
- "loss": 0.446,
610
- "step": 950
611
- },
612
- {
613
- "epoch": 0.7,
614
- "learning_rate": 3.6275252980402544e-05,
615
- "loss": 0.4439,
616
- "step": 960
617
- },
618
- {
619
- "epoch": 0.71,
620
- "learning_rate": 3.601736294265354e-05,
621
- "loss": 0.476,
622
- "step": 970
623
- },
624
- {
625
- "epoch": 0.72,
626
- "learning_rate": 3.5758009646955115e-05,
627
- "loss": 0.4153,
628
- "step": 980
629
- },
630
- {
631
- "epoch": 0.73,
632
- "learning_rate": 3.5497227539006614e-05,
633
- "loss": 0.4444,
634
- "step": 990
635
- },
636
- {
637
- "epoch": 0.73,
638
- "learning_rate": 3.523505125427341e-05,
639
- "loss": 0.4395,
640
- "step": 1000
641
- },
642
- {
643
- "epoch": 0.73,
644
- "eval_loss": 0.44357746839523315,
645
- "eval_runtime": 70.1513,
646
- "eval_samples_per_second": 15.709,
647
- "eval_steps_per_second": 1.582,
648
- "step": 1000
649
- },
650
- {
651
- "epoch": 0.74,
652
- "learning_rate": 3.497151561338678e-05,
653
- "loss": 0.4387,
654
- "step": 1010
655
- },
656
- {
657
- "epoch": 0.75,
658
- "learning_rate": 3.470665561751928e-05,
659
- "loss": 0.4559,
660
- "step": 1020
661
- },
662
- {
663
- "epoch": 0.76,
664
- "learning_rate": 3.444050644373611e-05,
665
- "loss": 0.4349,
666
- "step": 1030
667
- },
668
- {
669
- "epoch": 0.76,
670
- "learning_rate": 3.417310344032309e-05,
671
- "loss": 0.4661,
672
- "step": 1040
673
- },
674
- {
675
- "epoch": 0.77,
676
- "learning_rate": 3.390448212209191e-05,
677
- "loss": 0.4945,
678
- "step": 1050
679
- },
680
- {
681
- "epoch": 0.78,
682
- "learning_rate": 3.3634678165663325e-05,
683
- "loss": 0.4399,
684
- "step": 1060
685
- },
686
- {
687
- "epoch": 0.78,
688
- "learning_rate": 3.336372740472877e-05,
689
- "loss": 0.4399,
690
- "step": 1070
691
- },
692
- {
693
- "epoch": 0.79,
694
- "learning_rate": 3.309166582529114e-05,
695
- "loss": 0.4251,
696
- "step": 1080
697
- },
698
- {
699
- "epoch": 0.8,
700
- "learning_rate": 3.281852956088537e-05,
701
- "loss": 0.4385,
702
- "step": 1090
703
- },
704
- {
705
- "epoch": 0.81,
706
- "learning_rate": 3.254435488777941e-05,
707
- "loss": 0.4566,
708
- "step": 1100
709
- },
710
- {
711
- "epoch": 0.81,
712
- "learning_rate": 3.226917822015623e-05,
713
- "loss": 0.4352,
714
- "step": 1110
715
- },
716
- {
717
- "epoch": 0.82,
718
- "learning_rate": 3.199303610527749e-05,
719
- "loss": 0.4617,
720
- "step": 1120
721
- },
722
- {
723
- "epoch": 0.83,
724
- "learning_rate": 3.1715965218629595e-05,
725
- "loss": 0.476,
726
- "step": 1130
727
- },
728
- {
729
- "epoch": 0.84,
730
- "learning_rate": 3.143800235905268e-05,
731
- "loss": 0.4322,
732
- "step": 1140
733
- },
734
- {
735
- "epoch": 0.84,
736
- "learning_rate": 3.115918444385315e-05,
737
- "loss": 0.4736,
738
- "step": 1150
739
- },
740
- {
741
- "epoch": 0.85,
742
- "learning_rate": 3.0879548503900665e-05,
743
- "loss": 0.4322,
744
- "step": 1160
745
- },
746
- {
747
- "epoch": 0.86,
748
- "learning_rate": 3.0599131678709836e-05,
749
- "loss": 0.4152,
750
- "step": 1170
751
- },
752
- {
753
- "epoch": 0.87,
754
- "learning_rate": 3.031797121150764e-05,
755
- "loss": 0.4187,
756
- "step": 1180
757
- },
758
- {
759
- "epoch": 0.87,
760
- "learning_rate": 3.0036104444286954e-05,
761
- "loss": 0.4331,
762
- "step": 1190
763
- },
764
- {
765
- "epoch": 0.88,
766
- "learning_rate": 2.9753568812847065e-05,
767
- "loss": 0.45,
768
- "step": 1200
769
- },
770
- {
771
- "epoch": 0.88,
772
- "eval_loss": 0.43782830238342285,
773
- "eval_runtime": 70.0939,
774
- "eval_samples_per_second": 15.722,
775
- "eval_steps_per_second": 1.584,
776
- "step": 1200
777
- },
778
- {
779
- "epoch": 0.89,
780
- "learning_rate": 2.9470401841821686e-05,
781
- "loss": 0.4457,
782
- "step": 1210
783
- },
784
- {
785
- "epoch": 0.89,
786
- "learning_rate": 2.9186641139695108e-05,
787
- "loss": 0.4667,
788
- "step": 1220
789
- },
790
- {
791
- "epoch": 0.9,
792
- "learning_rate": 2.8902324393807333e-05,
793
- "loss": 0.4516,
794
- "step": 1230
795
- },
796
- {
797
- "epoch": 0.91,
798
- "learning_rate": 2.861748936534867e-05,
799
- "loss": 0.4695,
800
- "step": 1240
801
- },
802
- {
803
- "epoch": 0.92,
804
- "learning_rate": 2.8332173884344477e-05,
805
- "loss": 0.45,
806
- "step": 1250
807
- },
808
- {
809
- "epoch": 0.92,
810
- "learning_rate": 2.8046415844630857e-05,
811
- "loss": 0.4246,
812
- "step": 1260
813
- },
814
- {
815
- "epoch": 0.93,
816
- "learning_rate": 2.7760253198821822e-05,
817
- "loss": 0.4449,
818
- "step": 1270
819
- },
820
- {
821
- "epoch": 0.94,
822
- "learning_rate": 2.7473723953268687e-05,
823
- "loss": 0.4217,
824
- "step": 1280
825
- },
826
- {
827
- "epoch": 0.95,
828
- "learning_rate": 2.7186866163012232e-05,
829
- "loss": 0.4495,
830
- "step": 1290
831
- },
832
- {
833
- "epoch": 0.95,
834
- "learning_rate": 2.6899717926728535e-05,
835
- "loss": 0.4523,
836
- "step": 1300
837
- },
838
- {
839
- "epoch": 0.96,
840
- "learning_rate": 2.6612317381668915e-05,
841
- "loss": 0.4523,
842
- "step": 1310
843
- },
844
- {
845
- "epoch": 0.97,
846
- "learning_rate": 2.632470269859478e-05,
847
- "loss": 0.45,
848
- "step": 1320
849
- },
850
- {
851
- "epoch": 0.98,
852
- "learning_rate": 2.603691207670803e-05,
853
- "loss": 0.4371,
854
- "step": 1330
855
- },
856
- {
857
- "epoch": 0.98,
858
- "learning_rate": 2.5748983738577653e-05,
859
- "loss": 0.4221,
860
- "step": 1340
861
- },
862
- {
863
- "epoch": 0.99,
864
- "learning_rate": 2.5460955925063268e-05,
865
- "loss": 0.4565,
866
- "step": 1350
867
- },
868
- {
869
- "epoch": 1.0,
870
- "learning_rate": 2.5172866890236203e-05,
871
- "loss": 0.3892,
872
- "step": 1360
873
- },
874
- {
875
- "epoch": 1.0,
876
- "learning_rate": 2.48847548962988e-05,
877
- "loss": 0.3973,
878
- "step": 1370
879
- },
880
- {
881
- "epoch": 1.01,
882
- "learning_rate": 2.4596658208502713e-05,
883
- "loss": 0.4151,
884
- "step": 1380
885
- },
886
- {
887
- "epoch": 1.02,
888
- "learning_rate": 2.4308615090066735e-05,
889
- "loss": 0.3839,
890
- "step": 1390
891
- },
892
- {
893
- "epoch": 1.03,
894
- "learning_rate": 2.4020663797094864e-05,
895
- "loss": 0.4062,
896
- "step": 1400
897
- },
898
- {
899
- "epoch": 1.03,
900
- "eval_loss": 0.43284282088279724,
901
- "eval_runtime": 70.1658,
902
- "eval_samples_per_second": 15.706,
903
- "eval_steps_per_second": 1.582,
904
- "step": 1400
905
- },
906
- {
907
- "epoch": 1.03,
908
- "learning_rate": 2.373284257349544e-05,
909
- "loss": 0.4003,
910
- "step": 1410
911
- },
912
- {
913
- "epoch": 1.04,
914
- "learning_rate": 2.3445189645901806e-05,
915
- "loss": 0.4447,
916
- "step": 1420
917
- },
918
- {
919
- "epoch": 1.05,
920
- "learning_rate": 2.3157743218595247e-05,
921
- "loss": 0.4565,
922
- "step": 1430
923
- },
924
- {
925
- "epoch": 1.06,
926
- "learning_rate": 2.287054146843097e-05,
927
- "loss": 0.4265,
928
- "step": 1440
929
- },
930
- {
931
- "epoch": 1.06,
932
- "learning_rate": 2.2583622539767668e-05,
933
- "loss": 0.4056,
934
- "step": 1450
935
- },
936
- {
937
- "epoch": 1.07,
938
- "learning_rate": 2.2297024539401463e-05,
939
- "loss": 0.4074,
940
- "step": 1460
941
- },
942
- {
943
- "epoch": 1.08,
944
- "learning_rate": 2.2010785531504716e-05,
945
- "loss": 0.4281,
946
- "step": 1470
947
- },
948
- {
949
- "epoch": 1.09,
950
- "learning_rate": 2.172494353257066e-05,
951
- "loss": 0.4663,
952
- "step": 1480
953
- },
954
- {
955
- "epoch": 1.09,
956
- "learning_rate": 2.1439536506364274e-05,
957
- "loss": 0.4281,
958
- "step": 1490
959
- },
960
- {
961
- "epoch": 1.1,
962
- "learning_rate": 2.1154602358880122e-05,
963
- "loss": 0.4111,
964
- "step": 1500
965
- },
966
- {
967
- "epoch": 1.11,
968
- "learning_rate": 2.0870178933307948e-05,
969
- "loss": 0.4187,
970
- "step": 1510
971
- },
972
- {
973
- "epoch": 1.11,
974
- "learning_rate": 2.0586304005006585e-05,
975
- "loss": 0.3964,
976
- "step": 1520
977
- },
978
- {
979
- "epoch": 1.12,
980
- "learning_rate": 2.030301527648684e-05,
981
- "loss": 0.4331,
982
- "step": 1530
983
- },
984
- {
985
- "epoch": 1.13,
986
- "learning_rate": 2.0020350372404102e-05,
987
- "loss": 0.4432,
988
- "step": 1540
989
- },
990
- {
991
- "epoch": 1.14,
992
- "learning_rate": 1.9738346834561254e-05,
993
- "loss": 0.4056,
994
- "step": 1550
995
- },
996
- {
997
- "epoch": 1.14,
998
- "learning_rate": 1.945704211692262e-05,
999
- "loss": 0.4354,
1000
- "step": 1560
1001
- },
1002
- {
1003
- "epoch": 1.15,
1004
- "learning_rate": 1.9176473580639538e-05,
1005
- "loss": 0.4309,
1006
- "step": 1570
1007
- },
1008
- {
1009
- "epoch": 1.16,
1010
- "learning_rate": 1.8896678489088304e-05,
1011
- "loss": 0.4058,
1012
- "step": 1580
1013
- },
1014
- {
1015
- "epoch": 1.17,
1016
- "learning_rate": 1.8617694002921064e-05,
1017
- "loss": 0.4319,
1018
- "step": 1590
1019
- },
1020
- {
1021
- "epoch": 1.17,
1022
- "learning_rate": 1.8339557175130383e-05,
1023
- "loss": 0.4267,
1024
- "step": 1600
1025
- },
1026
- {
1027
- "epoch": 1.17,
1028
- "eval_loss": 0.42894992232322693,
1029
- "eval_runtime": 70.251,
1030
- "eval_samples_per_second": 15.687,
1031
- "eval_steps_per_second": 1.58,
1032
- "step": 1600
1033
- },
1034
- {
1035
- "epoch": 1.18,
1036
- "learning_rate": 1.8062304946128073e-05,
1037
- "loss": 0.3921,
1038
- "step": 1610
1039
- },
1040
- {
1041
- "epoch": 1.19,
1042
- "learning_rate": 1.7785974138839018e-05,
1043
- "loss": 0.4206,
1044
- "step": 1620
1045
- },
1046
- {
1047
- "epoch": 1.2,
1048
- "learning_rate": 1.7510601453810594e-05,
1049
- "loss": 0.4356,
1050
- "step": 1630
1051
- },
1052
- {
1053
- "epoch": 1.2,
1054
- "learning_rate": 1.723622346433828e-05,
1055
- "loss": 0.4167,
1056
- "step": 1640
1057
- },
1058
- {
1059
- "epoch": 1.21,
1060
- "learning_rate": 1.6962876611608262e-05,
1061
- "loss": 0.4233,
1062
- "step": 1650
1063
- },
1064
- {
1065
- "epoch": 1.22,
1066
- "learning_rate": 1.6690597199857523e-05,
1067
- "loss": 0.4176,
1068
- "step": 1660
1069
- },
1070
- {
1071
- "epoch": 1.22,
1072
- "learning_rate": 1.6419421391552142e-05,
1073
- "loss": 0.4241,
1074
- "step": 1670
1075
- },
1076
- {
1077
- "epoch": 1.23,
1078
- "learning_rate": 1.6149385202584423e-05,
1079
- "loss": 0.4524,
1080
- "step": 1680
1081
- },
1082
- {
1083
- "epoch": 1.24,
1084
- "learning_rate": 1.5880524497489474e-05,
1085
- "loss": 0.4647,
1086
- "step": 1690
1087
- },
1088
- {
1089
- "epoch": 1.25,
1090
- "learning_rate": 1.5612874984681923e-05,
1091
- "loss": 0.4036,
1092
- "step": 1700
1093
- },
1094
- {
1095
- "epoch": 1.25,
1096
- "learning_rate": 1.534647221171334e-05,
1097
- "loss": 0.4148,
1098
- "step": 1710
1099
- },
1100
- {
1101
- "epoch": 1.26,
1102
- "learning_rate": 1.5081351560551021e-05,
1103
- "loss": 0.408,
1104
- "step": 1720
1105
- },
1106
- {
1107
- "epoch": 1.27,
1108
- "learning_rate": 1.4817548242878759e-05,
1109
- "loss": 0.457,
1110
- "step": 1730
1111
- },
1112
- {
1113
- "epoch": 1.28,
1114
- "learning_rate": 1.45550972954203e-05,
1115
- "loss": 0.4193,
1116
- "step": 1740
1117
- },
1118
- {
1119
- "epoch": 1.28,
1120
- "learning_rate": 1.4294033575285914e-05,
1121
- "loss": 0.4149,
1122
- "step": 1750
1123
- },
1124
- {
1125
- "epoch": 1.29,
1126
- "learning_rate": 1.4034391755342972e-05,
1127
- "loss": 0.4032,
1128
- "step": 1760
1129
- },
1130
- {
1131
- "epoch": 1.3,
1132
- "learning_rate": 1.3776206319610823e-05,
1133
- "loss": 0.4078,
1134
- "step": 1770
1135
- },
1136
- {
1137
- "epoch": 1.31,
1138
- "learning_rate": 1.3519511558680892e-05,
1139
- "loss": 0.4123,
1140
- "step": 1780
1141
- },
1142
- {
1143
- "epoch": 1.31,
1144
- "learning_rate": 1.3264341565162422e-05,
1145
- "loss": 0.4089,
1146
- "step": 1790
1147
- },
1148
- {
1149
- "epoch": 1.32,
1150
- "learning_rate": 1.3010730229154445e-05,
1151
- "loss": 0.383,
1152
- "step": 1800
1153
- },
1154
- {
1155
- "epoch": 1.32,
1156
- "eval_loss": 0.4250344932079315,
1157
- "eval_runtime": 70.4814,
1158
- "eval_samples_per_second": 15.635,
1159
- "eval_steps_per_second": 1.575,
1160
- "step": 1800
1161
- },
1162
- {
1163
- "epoch": 1.33,
1164
- "learning_rate": 1.2758711233744783e-05,
1165
- "loss": 0.3936,
1166
- "step": 1810
1167
- },
1168
- {
1169
- "epoch": 1.33,
1170
- "learning_rate": 1.2508318050536421e-05,
1171
- "loss": 0.4127,
1172
- "step": 1820
1173
- },
1174
- {
1175
- "epoch": 1.34,
1176
- "learning_rate": 1.2259583935202062e-05,
1177
- "loss": 0.4015,
1178
- "step": 1830
1179
- },
1180
- {
1181
- "epoch": 1.35,
1182
- "learning_rate": 1.2012541923067244e-05,
1183
- "loss": 0.4203,
1184
- "step": 1840
1185
- },
1186
- {
1187
- "epoch": 1.36,
1188
- "learning_rate": 1.176722482472286e-05,
1189
- "loss": 0.4367,
1190
- "step": 1850
1191
- },
1192
- {
1193
- "epoch": 1.36,
1194
- "learning_rate": 1.1523665221667398e-05,
1195
- "loss": 0.4233,
1196
- "step": 1860
1197
- },
1198
- {
1199
- "epoch": 1.37,
1200
- "learning_rate": 1.1281895461979732e-05,
1201
- "loss": 0.4405,
1202
- "step": 1870
1203
- },
1204
- {
1205
- "epoch": 1.38,
1206
- "learning_rate": 1.104194765602281e-05,
1207
- "loss": 0.4379,
1208
- "step": 1880
1209
- },
1210
- {
1211
- "epoch": 1.39,
1212
- "learning_rate": 1.0803853672178946e-05,
1213
- "loss": 0.4146,
1214
- "step": 1890
1215
- },
1216
- {
1217
- "epoch": 1.39,
1218
- "learning_rate": 1.0567645132617316e-05,
1219
- "loss": 0.4438,
1220
- "step": 1900
1221
- },
1222
- {
1223
- "epoch": 1.4,
1224
- "learning_rate": 1.0333353409094015e-05,
1225
- "loss": 0.3915,
1226
- "step": 1910
1227
- },
1228
- {
1229
- "epoch": 1.41,
1230
- "learning_rate": 1.0101009618785528e-05,
1231
- "loss": 0.4063,
1232
- "step": 1920
1233
- },
1234
- {
1235
- "epoch": 1.42,
1236
- "learning_rate": 9.870644620155877e-06,
1237
- "loss": 0.3974,
1238
- "step": 1930
1239
- },
1240
- {
1241
- "epoch": 1.42,
1242
- "learning_rate": 9.642289008858244e-06,
1243
- "loss": 0.4015,
1244
- "step": 1940
1245
- },
1246
- {
1247
- "epoch": 1.43,
1248
- "learning_rate": 9.41597311367142e-06,
1249
- "loss": 0.4264,
1250
- "step": 1950
1251
- },
1252
- {
1253
- "epoch": 1.44,
1254
- "learning_rate": 9.191726992471725e-06,
1255
- "loss": 0.4334,
1256
- "step": 1960
1257
- },
1258
- {
1259
- "epoch": 1.44,
1260
- "learning_rate": 8.969580428240903e-06,
1261
- "loss": 0.4197,
1262
- "step": 1970
1263
- },
1264
- {
1265
- "epoch": 1.45,
1266
- "learning_rate": 8.74956292511056e-06,
1267
- "loss": 0.422,
1268
- "step": 1980
1269
- },
1270
- {
1271
- "epoch": 1.46,
1272
- "learning_rate": 8.531703704443575e-06,
1273
- "loss": 0.4077,
1274
- "step": 1990
1275
- },
1276
- {
1277
- "epoch": 1.47,
1278
- "learning_rate": 8.316031700953086e-06,
1279
- "loss": 0.4334,
1280
- "step": 2000
1281
- },
1282
- {
1283
- "epoch": 1.47,
1284
- "eval_loss": 0.4206145107746124,
1285
- "eval_runtime": 70.0533,
1286
- "eval_samples_per_second": 15.731,
1287
- "eval_steps_per_second": 1.585,
1288
- "step": 2000
1289
- },
1290
- {
1291
- "epoch": 1.47,
1292
- "learning_rate": 8.102575558859612e-06,
1293
- "loss": 0.4171,
1294
- "step": 2010
1295
- },
1296
- {
1297
- "epoch": 1.48,
1298
- "learning_rate": 7.891363628086671e-06,
1299
- "loss": 0.3957,
1300
- "step": 2020
1301
- },
1302
- {
1303
- "epoch": 1.49,
1304
- "learning_rate": 7.682423960495517e-06,
1305
- "loss": 0.4174,
1306
- "step": 2030
1307
- },
1308
- {
1309
- "epoch": 1.5,
1310
- "learning_rate": 7.475784306159478e-06,
1311
- "loss": 0.4187,
1312
- "step": 2040
1313
- },
1314
- {
1315
- "epoch": 1.5,
1316
- "learning_rate": 7.271472109678379e-06,
1317
- "loss": 0.3775,
1318
- "step": 2050
1319
- },
1320
- {
1321
- "epoch": 1.51,
1322
- "learning_rate": 7.0695145065334585e-06,
1323
- "loss": 0.4019,
1324
- "step": 2060
1325
- },
1326
- {
1327
- "epoch": 1.52,
1328
- "learning_rate": 6.869938319483471e-06,
1329
- "loss": 0.4143,
1330
- "step": 2070
1331
- },
1332
- {
1333
- "epoch": 1.53,
1334
- "learning_rate": 6.67277005500222e-06,
1335
- "loss": 0.3853,
1336
- "step": 2080
1337
- },
1338
- {
1339
- "epoch": 1.53,
1340
- "learning_rate": 6.478035899758139e-06,
1341
- "loss": 0.4403,
1342
- "step": 2090
1343
- },
1344
- {
1345
- "epoch": 1.54,
1346
- "learning_rate": 6.285761717136335e-06,
1347
- "loss": 0.3888,
1348
- "step": 2100
1349
- },
1350
- {
1351
- "epoch": 1.55,
1352
- "learning_rate": 6.095973043803577e-06,
1353
- "loss": 0.4123,
1354
- "step": 2110
1355
- },
1356
- {
1357
- "epoch": 1.55,
1358
- "learning_rate": 5.908695086316701e-06,
1359
- "loss": 0.4035,
1360
- "step": 2120
1361
- },
1362
- {
1363
- "epoch": 1.56,
1364
- "learning_rate": 5.723952717774763e-06,
1365
- "loss": 0.4219,
1366
- "step": 2130
1367
- },
1368
- {
1369
- "epoch": 1.57,
1370
- "learning_rate": 5.54177047451562e-06,
1371
- "loss": 0.4209,
1372
- "step": 2140
1373
- },
1374
- {
1375
- "epoch": 1.58,
1376
- "learning_rate": 5.362172552857128e-06,
1377
- "loss": 0.4286,
1378
- "step": 2150
1379
- },
1380
- {
1381
- "epoch": 1.58,
1382
- "learning_rate": 5.18518280588354e-06,
1383
- "loss": 0.4134,
1384
- "step": 2160
1385
- },
1386
- {
1387
- "epoch": 1.59,
1388
- "learning_rate": 5.010824740277501e-06,
1389
- "loss": 0.4005,
1390
- "step": 2170
1391
- },
1392
- {
1393
- "epoch": 1.6,
1394
- "learning_rate": 4.83912151319802e-06,
1395
- "loss": 0.3973,
1396
- "step": 2180
1397
- },
1398
- {
1399
- "epoch": 1.61,
1400
- "learning_rate": 4.6700959292048875e-06,
1401
- "loss": 0.3959,
1402
- "step": 2190
1403
- },
1404
- {
1405
- "epoch": 1.61,
1406
- "learning_rate": 4.503770437229918e-06,
1407
- "loss": 0.4017,
1408
- "step": 2200
1409
- },
1410
- {
1411
- "epoch": 1.61,
1412
- "eval_loss": 0.41970890760421753,
1413
- "eval_runtime": 69.9019,
1414
- "eval_samples_per_second": 15.765,
1415
- "eval_steps_per_second": 1.588,
1416
- "step": 2200
1417
- },
1418
- {
1419
- "epoch": 1.62,
1420
- "learning_rate": 4.340167127595407e-06,
1421
- "loss": 0.4013,
1422
- "step": 2210
1423
- },
1424
- {
1425
- "epoch": 1.63,
1426
- "learning_rate": 4.179307729080256e-06,
1427
- "loss": 0.4069,
1428
- "step": 2220
1429
- },
1430
- {
1431
- "epoch": 1.64,
1432
- "learning_rate": 4.021213606034064e-06,
1433
- "loss": 0.3857,
1434
- "step": 2230
1435
- },
1436
- {
1437
- "epoch": 1.64,
1438
- "learning_rate": 3.8659057555396645e-06,
1439
- "loss": 0.4262,
1440
- "step": 2240
1441
- },
1442
- {
1443
- "epoch": 1.65,
1444
- "learning_rate": 3.713404804624418e-06,
1445
- "loss": 0.3953,
1446
- "step": 2250
1447
- },
1448
- {
1449
- "epoch": 1.66,
1450
- "learning_rate": 3.5637310075206544e-06,
1451
- "loss": 0.4231,
1452
- "step": 2260
1453
- },
1454
- {
1455
- "epoch": 1.66,
1456
- "learning_rate": 3.416904242975635e-06,
1457
- "loss": 0.407,
1458
- "step": 2270
1459
- },
1460
- {
1461
- "epoch": 1.67,
1462
- "learning_rate": 3.2729440116113843e-06,
1463
- "loss": 0.4254,
1464
- "step": 2280
1465
- },
1466
- {
1467
- "epoch": 1.68,
1468
- "learning_rate": 3.131869433334725e-06,
1469
- "loss": 0.4054,
1470
- "step": 2290
1471
- },
1472
- {
1473
- "epoch": 1.69,
1474
- "learning_rate": 2.9936992447979068e-06,
1475
- "loss": 0.3801,
1476
- "step": 2300
1477
- },
1478
- {
1479
- "epoch": 1.69,
1480
- "learning_rate": 2.8584517969101054e-06,
1481
- "loss": 0.4335,
1482
- "step": 2310
1483
- },
1484
- {
1485
- "epoch": 1.7,
1486
- "learning_rate": 2.7261450524001807e-06,
1487
- "loss": 0.412,
1488
- "step": 2320
1489
- },
1490
- {
1491
- "epoch": 1.71,
1492
- "learning_rate": 2.596796583430969e-06,
1493
- "loss": 0.4456,
1494
- "step": 2330
1495
- },
1496
- {
1497
- "epoch": 1.72,
1498
- "learning_rate": 2.470423569265462e-06,
1499
- "loss": 0.3972,
1500
- "step": 2340
1501
- },
1502
- {
1503
- "epoch": 1.72,
1504
- "learning_rate": 2.34704279398516e-06,
1505
- "loss": 0.3935,
1506
- "step": 2350
1507
- },
1508
- {
1509
- "epoch": 1.73,
1510
- "learning_rate": 2.2266706442609226e-06,
1511
- "loss": 0.3974,
1512
- "step": 2360
1513
- },
1514
- {
1515
- "epoch": 1.74,
1516
- "learning_rate": 2.109323107176578e-06,
1517
- "loss": 0.4157,
1518
- "step": 2370
1519
- },
1520
- {
1521
- "epoch": 1.75,
1522
- "learning_rate": 1.9950157681056318e-06,
1523
- "loss": 0.4379,
1524
- "step": 2380
1525
- },
1526
- {
1527
- "epoch": 1.75,
1528
- "learning_rate": 1.8837638086413063e-06,
1529
- "loss": 0.4035,
1530
- "step": 2390
1531
- },
1532
- {
1533
- "epoch": 1.76,
1534
- "learning_rate": 1.7755820045802145e-06,
1535
- "loss": 0.4455,
1536
- "step": 2400
1537
- },
1538
- {
1539
- "epoch": 1.76,
1540
- "eval_loss": 0.41885045170783997,
1541
- "eval_runtime": 70.0957,
1542
- "eval_samples_per_second": 15.721,
1543
- "eval_steps_per_second": 1.584,
1544
- "step": 2400
1545
- },
1546
- {
1547
- "epoch": 1.77,
1548
- "learning_rate": 1.6704847239599364e-06,
1549
- "loss": 0.4532,
1550
- "step": 2410
1551
- },
1552
- {
1553
- "epoch": 1.77,
1554
- "learning_rate": 1.5684859251507394e-06,
1555
- "loss": 0.4213,
1556
- "step": 2420
1557
- },
1558
- {
1559
- "epoch": 1.78,
1560
- "learning_rate": 1.4695991550017164e-06,
1561
- "loss": 0.3873,
1562
- "step": 2430
1563
- },
1564
- {
1565
- "epoch": 1.79,
1566
- "learning_rate": 1.373837547041576e-06,
1567
- "loss": 0.4163,
1568
- "step": 2440
1569
- },
1570
- {
1571
- "epoch": 1.8,
1572
- "learning_rate": 1.2812138197343392e-06,
1573
- "loss": 0.3946,
1574
- "step": 2450
1575
- },
1576
- {
1577
- "epoch": 1.8,
1578
- "learning_rate": 1.1917402747901152e-06,
1579
- "loss": 0.41,
1580
- "step": 2460
1581
- },
1582
- {
1583
- "epoch": 1.81,
1584
- "learning_rate": 1.105428795531327e-06,
1585
- "loss": 0.4126,
1586
- "step": 2470
1587
- },
1588
- {
1589
- "epoch": 1.82,
1590
- "learning_rate": 1.0222908453143804e-06,
1591
- "loss": 0.4505,
1592
- "step": 2480
1593
- },
1594
- {
1595
- "epoch": 1.83,
1596
- "learning_rate": 9.423374660072065e-07,
1597
- "loss": 0.4303,
1598
- "step": 2490
1599
- },
1600
- {
1601
- "epoch": 1.83,
1602
- "learning_rate": 8.655792765227405e-07,
1603
- "loss": 0.4179,
1604
- "step": 2500
1605
- },
1606
- {
1607
- "epoch": 1.84,
1608
- "learning_rate": 7.920264714085828e-07,
1609
- "loss": 0.4376,
1610
- "step": 2510
1611
- },
1612
- {
1613
- "epoch": 1.85,
1614
- "learning_rate": 7.216888194930272e-07,
1615
- "loss": 0.4069,
1616
- "step": 2520
1617
- },
1618
- {
1619
- "epoch": 1.86,
1620
- "learning_rate": 6.545756625876031e-07,
1621
- "loss": 0.3887,
1622
- "step": 2530
1623
- },
1624
- {
1625
- "epoch": 1.86,
1626
- "learning_rate": 5.906959142463947e-07,
1627
- "loss": 0.3972,
1628
- "step": 2540
1629
- },
1630
- {
1631
- "epoch": 1.87,
1632
- "learning_rate": 5.300580585821696e-07,
1633
- "loss": 0.4078,
1634
- "step": 2550
1635
- },
1636
- {
1637
- "epoch": 1.88,
1638
- "learning_rate": 4.7267014913956463e-07,
1639
- "loss": 0.4467,
1640
- "step": 2560
1641
- },
1642
- {
1643
- "epoch": 1.88,
1644
- "learning_rate": 4.1853980782549097e-07,
1645
- "loss": 0.4567,
1646
- "step": 2570
1647
- },
1648
- {
1649
- "epoch": 1.89,
1650
- "learning_rate": 3.6767422389682173e-07,
1651
- "loss": 0.4494,
1652
- "step": 2580
1653
- },
1654
- {
1655
- "epoch": 1.9,
1656
- "learning_rate": 3.2008015300555306e-07,
1657
- "loss": 0.4344,
1658
- "step": 2590
1659
- },
1660
- {
1661
- "epoch": 1.91,
1662
- "learning_rate": 2.757639163015774e-07,
1663
- "loss": 0.4101,
1664
- "step": 2600
1665
- },
1666
- {
1667
- "epoch": 1.91,
1668
- "eval_loss": 0.41886425018310547,
1669
- "eval_runtime": 70.2179,
1670
- "eval_samples_per_second": 15.694,
1671
- "eval_steps_per_second": 1.581,
1672
- "step": 2600
1673
- },
1674
- {
1675
- "epoch": 1.91,
1676
- "learning_rate": 2.347313995931466e-07,
1677
- "loss": 0.4058,
1678
- "step": 2610
1679
- },
1680
- {
1681
- "epoch": 1.92,
1682
- "learning_rate": 1.9698805256513908e-07,
1683
- "loss": 0.414,
1684
- "step": 2620
1685
- },
1686
- {
1687
- "epoch": 1.93,
1688
- "learning_rate": 1.6253888805527474e-07,
1689
- "loss": 0.4254,
1690
- "step": 2630
1691
- },
1692
- {
1693
- "epoch": 1.94,
1694
- "learning_rate": 1.3138848138835313e-07,
1695
- "loss": 0.4017,
1696
- "step": 2640
1697
- },
1698
- {
1699
- "epoch": 1.94,
1700
- "learning_rate": 1.0354096976856186e-07,
1701
- "loss": 0.4258,
1702
- "step": 2650
1703
- },
1704
- {
1705
- "epoch": 1.95,
1706
- "learning_rate": 7.900005173002712e-08,
1707
- "loss": 0.4098,
1708
- "step": 2660
1709
- },
1710
- {
1711
- "epoch": 1.96,
1712
- "learning_rate": 5.776898664557051e-08,
1713
- "loss": 0.42,
1714
- "step": 2670
1715
- },
1716
- {
1717
- "epoch": 1.97,
1718
- "learning_rate": 3.985059429383875e-08,
1719
- "loss": 0.3886,
1720
- "step": 2680
1721
- },
1722
- {
1723
- "epoch": 1.97,
1724
- "learning_rate": 2.5247254484794813e-08,
1725
- "loss": 0.4276,
1726
- "step": 2690
1727
- },
1728
- {
1729
- "epoch": 1.98,
1730
- "learning_rate": 1.3960906743634706e-08,
1731
- "loss": 0.4006,
1732
- "step": 2700
1733
- },
1734
- {
1735
- "epoch": 1.99,
1736
- "learning_rate": 5.993050053204607e-09,
1737
- "loss": 0.4243,
1738
- "step": 2710
1739
- },
1740
- {
1741
- "epoch": 1.99,
1742
- "learning_rate": 1.3447426549129117e-09,
1743
- "loss": 0.4439,
1744
- "step": 2720
1745
- },
1746
- {
1747
- "epoch": 2.0,
1748
- "step": 2726,
1749
- "total_flos": 1.1289170586615415e+19,
1750
- "train_loss": 0.46561298847898175,
1751
- "train_runtime": 35477.4141,
1752
- "train_samples_per_second": 6.149,
1753
- "train_steps_per_second": 0.077
1754
- }
1755
- ],
1756
- "max_steps": 2726,
1757
- "num_train_epochs": 2,
1758
- "total_flos": 1.1289170586615415e+19,
1759
- "trial_name": null,
1760
- "trial_params": null
1761
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Baichuan-13B-Chat-lora-Task/training_args.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:0037c3e69fa21891dd65687a3f79ad20a8abc0a75790c554b437378b35c59fe9
3
- size 4600
 
 
 
 
Baichuan-13B-Chat-lora-Task/training_eval_loss.png DELETED
Binary file (36.1 kB)
 
Baichuan-13B-Chat-lora-Task/training_loss.png DELETED
Binary file (39.2 kB)