saifrahmed commited on
Commit
aef1cc0
1 Parent(s): 09e1a1f

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. README.md +152 -0
  2. adapter_config.json +34 -0
  3. adapter_model.bin +3 -0
  4. checkpoint-12/README.md +202 -0
  5. checkpoint-12/adapter_config.json +34 -0
  6. checkpoint-12/adapter_model.safetensors +3 -0
  7. checkpoint-12/optimizer.pt +3 -0
  8. checkpoint-12/rng_state.pth +3 -0
  9. checkpoint-12/scheduler.pt +3 -0
  10. checkpoint-12/special_tokens_map.json +24 -0
  11. checkpoint-12/tokenizer.model +3 -0
  12. checkpoint-12/tokenizer_config.json +44 -0
  13. checkpoint-12/trainer_state.json +157 -0
  14. checkpoint-12/training_args.bin +3 -0
  15. checkpoint-24/README.md +202 -0
  16. checkpoint-24/adapter_config.json +34 -0
  17. checkpoint-24/adapter_model.safetensors +3 -0
  18. checkpoint-24/optimizer.pt +3 -0
  19. checkpoint-24/rng_state.pth +3 -0
  20. checkpoint-24/scheduler.pt +3 -0
  21. checkpoint-24/special_tokens_map.json +24 -0
  22. checkpoint-24/tokenizer.model +3 -0
  23. checkpoint-24/tokenizer_config.json +44 -0
  24. checkpoint-24/trainer_state.json +273 -0
  25. checkpoint-24/training_args.bin +3 -0
  26. checkpoint-36/README.md +202 -0
  27. checkpoint-36/adapter_config.json +34 -0
  28. checkpoint-36/adapter_model.safetensors +3 -0
  29. checkpoint-36/optimizer.pt +3 -0
  30. checkpoint-36/rng_state.pth +3 -0
  31. checkpoint-36/scheduler.pt +3 -0
  32. checkpoint-36/special_tokens_map.json +24 -0
  33. checkpoint-36/tokenizer.model +3 -0
  34. checkpoint-36/tokenizer_config.json +44 -0
  35. checkpoint-36/trainer_state.json +389 -0
  36. checkpoint-36/training_args.bin +3 -0
  37. checkpoint-48/README.md +202 -0
  38. checkpoint-48/adapter_config.json +34 -0
  39. checkpoint-48/adapter_model.safetensors +3 -0
  40. checkpoint-48/optimizer.pt +3 -0
  41. checkpoint-48/rng_state.pth +3 -0
  42. checkpoint-48/scheduler.pt +3 -0
  43. checkpoint-48/special_tokens_map.json +24 -0
  44. checkpoint-48/tokenizer.model +3 -0
  45. checkpoint-48/tokenizer_config.json +44 -0
  46. checkpoint-48/trainer_state.json +505 -0
  47. checkpoint-48/training_args.bin +3 -0
  48. config.json +44 -0
  49. merged/config.json +29 -0
  50. merged/generation_config.json +8 -0
README.md ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T
3
+ library_name: peft
4
+ license: apache-2.0
5
+ tags:
6
+ - generated_from_trainer
7
+ model-index:
8
+ - name: outputs/lora-out
9
+ results: []
10
+ ---
11
+
12
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
13
+ should probably proofread and complete it, then remove this comment. -->
14
+
15
+ [<img src="https://raw.githubusercontent.com/OpenAccess-AI-Collective/axolotl/main/image/axolotl-badge-web.png" alt="Built with Axolotl" width="200" height="32"/>](https://github.com/OpenAccess-AI-Collective/axolotl)
16
+ <details><summary>See axolotl config</summary>
17
+
18
+ axolotl version: `0.4.1`
19
+ ```yaml
20
+ base_model: TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T
21
+ model_type: LlamaForCausalLM
22
+ tokenizer_type: LlamaTokenizer
23
+
24
+ load_in_8bit: true
25
+ load_in_4bit: false
26
+ strict: false
27
+
28
+ datasets:
29
+ - path: mhenrichsen/alpaca_2k_test
30
+ type: alpaca
31
+ dataset_prepared_path:
32
+ val_set_size: 0.05
33
+ output_dir: ./outputs/lora-out
34
+
35
+ sequence_len: 4096
36
+ sample_packing: true
37
+ eval_sample_packing: false
38
+ pad_to_sequence_len: true
39
+
40
+ adapter: lora
41
+ lora_model_dir:
42
+ lora_r: 32
43
+ lora_alpha: 16
44
+ lora_dropout: 0.05
45
+ lora_target_linear: true
46
+ lora_fan_in_fan_out:
47
+
48
+ wandb_project:
49
+ wandb_entity:
50
+ wandb_watch:
51
+ wandb_name:
52
+ wandb_log_model:
53
+
54
+ gradient_accumulation_steps: 4
55
+ micro_batch_size: 2
56
+ num_epochs: 4
57
+ optimizer: adamw_bnb_8bit
58
+ lr_scheduler: cosine
59
+ learning_rate: 0.0002
60
+
61
+ train_on_inputs: false
62
+ group_by_length: false
63
+ bf16: auto
64
+ fp16:
65
+ tf32: false
66
+
67
+ gradient_checkpointing: true
68
+ early_stopping_patience:
69
+ resume_from_checkpoint:
70
+ local_rank:
71
+ logging_steps: 1
72
+ xformers_attention:
73
+ flash_attention: true
74
+
75
+ warmup_steps: 10
76
+ evals_per_epoch: 4
77
+ saves_per_epoch: 1
78
+ debug:
79
+ deepspeed:
80
+ weight_decay: 0.0
81
+ fsdp:
82
+ fsdp_config:
83
+ special_tokens:
84
+
85
+ ```
86
+
87
+ </details><br>
88
+
89
+ # outputs/lora-out
90
+
91
+ This model is a fine-tuned version of [TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T](https://huggingface.co/TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T) on the None dataset.
92
+ It achieves the following results on the evaluation set:
93
+ - Loss: 1.2115
94
+
95
+ ## Model description
96
+
97
+ More information needed
98
+
99
+ ## Intended uses & limitations
100
+
101
+ More information needed
102
+
103
+ ## Training and evaluation data
104
+
105
+ More information needed
106
+
107
+ ## Training procedure
108
+
109
+ ### Training hyperparameters
110
+
111
+ The following hyperparameters were used during training:
112
+ - learning_rate: 0.0002
113
+ - train_batch_size: 2
114
+ - eval_batch_size: 2
115
+ - seed: 42
116
+ - gradient_accumulation_steps: 4
117
+ - total_train_batch_size: 8
118
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
119
+ - lr_scheduler_type: cosine
120
+ - lr_scheduler_warmup_steps: 10
121
+ - num_epochs: 4
122
+
123
+ ### Training results
124
+
125
+ | Training Loss | Epoch | Step | Validation Loss |
126
+ |:-------------:|:------:|:----:|:---------------:|
127
+ | 1.4615 | 0.08 | 1 | 1.4899 |
128
+ | 1.3846 | 0.24 | 3 | 1.4859 |
129
+ | 1.3667 | 0.48 | 6 | 1.4399 |
130
+ | 1.267 | 0.72 | 9 | 1.3382 |
131
+ | 1.2276 | 0.96 | 12 | 1.2941 |
132
+ | 1.2515 | 1.16 | 15 | 1.2793 |
133
+ | 1.2275 | 1.4 | 18 | 1.2556 |
134
+ | 1.1351 | 1.6400 | 21 | 1.2347 |
135
+ | 1.2701 | 1.88 | 24 | 1.2253 |
136
+ | 1.1487 | 2.08 | 27 | 1.2213 |
137
+ | 1.1518 | 2.32 | 30 | 1.2209 |
138
+ | 1.1942 | 2.56 | 33 | 1.2171 |
139
+ | 1.1122 | 2.8 | 36 | 1.2147 |
140
+ | 1.1513 | 3.04 | 39 | 1.2139 |
141
+ | 1.1887 | 3.24 | 42 | 1.2128 |
142
+ | 1.1011 | 3.48 | 45 | 1.2114 |
143
+ | 1.1887 | 3.7200 | 48 | 1.2115 |
144
+
145
+
146
+ ### Framework versions
147
+
148
+ - PEFT 0.11.1
149
+ - Transformers 4.42.3
150
+ - Pytorch 2.1.2+cu118
151
+ - Datasets 2.19.1
152
+ - Tokenizers 0.19.1
adapter_config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T",
5
+ "bias": "none",
6
+ "fan_in_fan_out": null,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layer_replication": null,
10
+ "layers_pattern": null,
11
+ "layers_to_transform": null,
12
+ "loftq_config": {},
13
+ "lora_alpha": 16,
14
+ "lora_dropout": 0.05,
15
+ "megatron_config": null,
16
+ "megatron_core": "megatron.core",
17
+ "modules_to_save": null,
18
+ "peft_type": "LORA",
19
+ "r": 32,
20
+ "rank_pattern": {},
21
+ "revision": null,
22
+ "target_modules": [
23
+ "q_proj",
24
+ "o_proj",
25
+ "v_proj",
26
+ "down_proj",
27
+ "up_proj",
28
+ "gate_proj",
29
+ "k_proj"
30
+ ],
31
+ "task_type": "CAUSAL_LM",
32
+ "use_dora": false,
33
+ "use_rslora": false
34
+ }
adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:04765d043cca34767e6be85248961c2d2d756a590969242fed181e87e505d68f
3
+ size 101036698
checkpoint-12/README.md ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T
3
+ library_name: peft
4
+ ---
5
+
6
+ # Model Card for Model ID
7
+
8
+ <!-- Provide a quick summary of what the model is/does. -->
9
+
10
+
11
+
12
+ ## Model Details
13
+
14
+ ### Model Description
15
+
16
+ <!-- Provide a longer summary of what this model is. -->
17
+
18
+
19
+
20
+ - **Developed by:** [More Information Needed]
21
+ - **Funded by [optional]:** [More Information Needed]
22
+ - **Shared by [optional]:** [More Information Needed]
23
+ - **Model type:** [More Information Needed]
24
+ - **Language(s) (NLP):** [More Information Needed]
25
+ - **License:** [More Information Needed]
26
+ - **Finetuned from model [optional]:** [More Information Needed]
27
+
28
+ ### Model Sources [optional]
29
+
30
+ <!-- Provide the basic links for the model. -->
31
+
32
+ - **Repository:** [More Information Needed]
33
+ - **Paper [optional]:** [More Information Needed]
34
+ - **Demo [optional]:** [More Information Needed]
35
+
36
+ ## Uses
37
+
38
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
39
+
40
+ ### Direct Use
41
+
42
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
43
+
44
+ [More Information Needed]
45
+
46
+ ### Downstream Use [optional]
47
+
48
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
49
+
50
+ [More Information Needed]
51
+
52
+ ### Out-of-Scope Use
53
+
54
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
55
+
56
+ [More Information Needed]
57
+
58
+ ## Bias, Risks, and Limitations
59
+
60
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
61
+
62
+ [More Information Needed]
63
+
64
+ ### Recommendations
65
+
66
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
67
+
68
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
69
+
70
+ ## How to Get Started with the Model
71
+
72
+ Use the code below to get started with the model.
73
+
74
+ [More Information Needed]
75
+
76
+ ## Training Details
77
+
78
+ ### Training Data
79
+
80
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
+
82
+ [More Information Needed]
83
+
84
+ ### Training Procedure
85
+
86
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
+
88
+ #### Preprocessing [optional]
89
+
90
+ [More Information Needed]
91
+
92
+
93
+ #### Training Hyperparameters
94
+
95
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
96
+
97
+ #### Speeds, Sizes, Times [optional]
98
+
99
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
100
+
101
+ [More Information Needed]
102
+
103
+ ## Evaluation
104
+
105
+ <!-- This section describes the evaluation protocols and provides the results. -->
106
+
107
+ ### Testing Data, Factors & Metrics
108
+
109
+ #### Testing Data
110
+
111
+ <!-- This should link to a Dataset Card if possible. -->
112
+
113
+ [More Information Needed]
114
+
115
+ #### Factors
116
+
117
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
118
+
119
+ [More Information Needed]
120
+
121
+ #### Metrics
122
+
123
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
124
+
125
+ [More Information Needed]
126
+
127
+ ### Results
128
+
129
+ [More Information Needed]
130
+
131
+ #### Summary
132
+
133
+
134
+
135
+ ## Model Examination [optional]
136
+
137
+ <!-- Relevant interpretability work for the model goes here -->
138
+
139
+ [More Information Needed]
140
+
141
+ ## Environmental Impact
142
+
143
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
144
+
145
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
146
+
147
+ - **Hardware Type:** [More Information Needed]
148
+ - **Hours used:** [More Information Needed]
149
+ - **Cloud Provider:** [More Information Needed]
150
+ - **Compute Region:** [More Information Needed]
151
+ - **Carbon Emitted:** [More Information Needed]
152
+
153
+ ## Technical Specifications [optional]
154
+
155
+ ### Model Architecture and Objective
156
+
157
+ [More Information Needed]
158
+
159
+ ### Compute Infrastructure
160
+
161
+ [More Information Needed]
162
+
163
+ #### Hardware
164
+
165
+ [More Information Needed]
166
+
167
+ #### Software
168
+
169
+ [More Information Needed]
170
+
171
+ ## Citation [optional]
172
+
173
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
174
+
175
+ **BibTeX:**
176
+
177
+ [More Information Needed]
178
+
179
+ **APA:**
180
+
181
+ [More Information Needed]
182
+
183
+ ## Glossary [optional]
184
+
185
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
186
+
187
+ [More Information Needed]
188
+
189
+ ## More Information [optional]
190
+
191
+ [More Information Needed]
192
+
193
+ ## Model Card Authors [optional]
194
+
195
+ [More Information Needed]
196
+
197
+ ## Model Card Contact
198
+
199
+ [More Information Needed]
200
+ ### Framework versions
201
+
202
+ - PEFT 0.11.1
checkpoint-12/adapter_config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T",
5
+ "bias": "none",
6
+ "fan_in_fan_out": null,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layer_replication": null,
10
+ "layers_pattern": null,
11
+ "layers_to_transform": null,
12
+ "loftq_config": {},
13
+ "lora_alpha": 16,
14
+ "lora_dropout": 0.05,
15
+ "megatron_config": null,
16
+ "megatron_core": "megatron.core",
17
+ "modules_to_save": null,
18
+ "peft_type": "LORA",
19
+ "r": 32,
20
+ "rank_pattern": {},
21
+ "revision": null,
22
+ "target_modules": [
23
+ "q_proj",
24
+ "o_proj",
25
+ "v_proj",
26
+ "down_proj",
27
+ "up_proj",
28
+ "gate_proj",
29
+ "k_proj"
30
+ ],
31
+ "task_type": "CAUSAL_LM",
32
+ "use_dora": false,
33
+ "use_rslora": false
34
+ }
checkpoint-12/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2851811fb072817c03540939a7171413f508e32a43b2da99b2cd036dfcc4127e
3
+ size 100966336
checkpoint-12/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:91f91d2e08263a999235f281f300c3430b0a4cb752a269ac1501fb26b9bbf6ed
3
+ size 50916644
checkpoint-12/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:35bb56ec4b2bbb057a5a3c1c341f4ef749ec10d715336336c3c6e1a4afccacd5
3
+ size 14244
checkpoint-12/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4e535a15f440e3b1b4d1872998a3c1d64048b2d54e365eb59e3aa3a5899e46b5
3
+ size 1064
checkpoint-12/special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "</s>",
17
+ "unk_token": {
18
+ "content": "<unk>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
checkpoint-12/tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723
checkpoint-12/tokenizer_config.json ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "add_prefix_space": true,
5
+ "added_tokens_decoder": {
6
+ "0": {
7
+ "content": "<unk>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false,
12
+ "special": true
13
+ },
14
+ "1": {
15
+ "content": "<s>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false,
20
+ "special": true
21
+ },
22
+ "2": {
23
+ "content": "</s>",
24
+ "lstrip": false,
25
+ "normalized": false,
26
+ "rstrip": false,
27
+ "single_word": false,
28
+ "special": true
29
+ }
30
+ },
31
+ "bos_token": "<s>",
32
+ "clean_up_tokenization_spaces": false,
33
+ "eos_token": "</s>",
34
+ "legacy": false,
35
+ "model_max_length": 1000000000000000019884624838656,
36
+ "pad_token": "</s>",
37
+ "padding_side": "right",
38
+ "sp_model_kwargs": {},
39
+ "spaces_between_special_tokens": false,
40
+ "tokenizer_class": "LlamaTokenizer",
41
+ "unk_token": "<unk>",
42
+ "use_default_system_prompt": false,
43
+ "use_fast": true
44
+ }
checkpoint-12/trainer_state.json ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.96,
5
+ "eval_steps": 3,
6
+ "global_step": 12,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.08,
13
+ "grad_norm": 0.17093713581562042,
14
+ "learning_rate": 2e-05,
15
+ "loss": 1.4615,
16
+ "step": 1
17
+ },
18
+ {
19
+ "epoch": 0.08,
20
+ "eval_loss": 1.4899382591247559,
21
+ "eval_runtime": 17.3107,
22
+ "eval_samples_per_second": 5.777,
23
+ "eval_steps_per_second": 2.888,
24
+ "step": 1
25
+ },
26
+ {
27
+ "epoch": 0.16,
28
+ "grad_norm": 0.19339510798454285,
29
+ "learning_rate": 4e-05,
30
+ "loss": 1.4241,
31
+ "step": 2
32
+ },
33
+ {
34
+ "epoch": 0.24,
35
+ "grad_norm": 0.1669788658618927,
36
+ "learning_rate": 6e-05,
37
+ "loss": 1.3846,
38
+ "step": 3
39
+ },
40
+ {
41
+ "epoch": 0.24,
42
+ "eval_loss": 1.4858685731887817,
43
+ "eval_runtime": 17.4215,
44
+ "eval_samples_per_second": 5.74,
45
+ "eval_steps_per_second": 2.87,
46
+ "step": 3
47
+ },
48
+ {
49
+ "epoch": 0.32,
50
+ "grad_norm": 0.14142441749572754,
51
+ "learning_rate": 8e-05,
52
+ "loss": 1.2219,
53
+ "step": 4
54
+ },
55
+ {
56
+ "epoch": 0.4,
57
+ "grad_norm": 0.15717843174934387,
58
+ "learning_rate": 0.0001,
59
+ "loss": 1.3617,
60
+ "step": 5
61
+ },
62
+ {
63
+ "epoch": 0.48,
64
+ "grad_norm": 0.16309261322021484,
65
+ "learning_rate": 0.00012,
66
+ "loss": 1.3667,
67
+ "step": 6
68
+ },
69
+ {
70
+ "epoch": 0.48,
71
+ "eval_loss": 1.439871072769165,
72
+ "eval_runtime": 17.6339,
73
+ "eval_samples_per_second": 5.671,
74
+ "eval_steps_per_second": 2.835,
75
+ "step": 6
76
+ },
77
+ {
78
+ "epoch": 0.56,
79
+ "grad_norm": 0.15078029036521912,
80
+ "learning_rate": 0.00014,
81
+ "loss": 1.3008,
82
+ "step": 7
83
+ },
84
+ {
85
+ "epoch": 0.64,
86
+ "grad_norm": 0.13603582978248596,
87
+ "learning_rate": 0.00016,
88
+ "loss": 1.3333,
89
+ "step": 8
90
+ },
91
+ {
92
+ "epoch": 0.72,
93
+ "grad_norm": 0.1104956567287445,
94
+ "learning_rate": 0.00018,
95
+ "loss": 1.267,
96
+ "step": 9
97
+ },
98
+ {
99
+ "epoch": 0.72,
100
+ "eval_loss": 1.3381670713424683,
101
+ "eval_runtime": 17.2986,
102
+ "eval_samples_per_second": 5.781,
103
+ "eval_steps_per_second": 2.89,
104
+ "step": 9
105
+ },
106
+ {
107
+ "epoch": 0.8,
108
+ "grad_norm": 0.09913735836744308,
109
+ "learning_rate": 0.0002,
110
+ "loss": 1.2946,
111
+ "step": 10
112
+ },
113
+ {
114
+ "epoch": 0.88,
115
+ "grad_norm": 0.11903145164251328,
116
+ "learning_rate": 0.000199658449300667,
117
+ "loss": 1.2921,
118
+ "step": 11
119
+ },
120
+ {
121
+ "epoch": 0.96,
122
+ "grad_norm": 0.11169299483299255,
123
+ "learning_rate": 0.00019863613034027224,
124
+ "loss": 1.2276,
125
+ "step": 12
126
+ },
127
+ {
128
+ "epoch": 0.96,
129
+ "eval_loss": 1.2940881252288818,
130
+ "eval_runtime": 17.4061,
131
+ "eval_samples_per_second": 5.745,
132
+ "eval_steps_per_second": 2.873,
133
+ "step": 12
134
+ }
135
+ ],
136
+ "logging_steps": 1,
137
+ "max_steps": 48,
138
+ "num_input_tokens_seen": 0,
139
+ "num_train_epochs": 4,
140
+ "save_steps": 12,
141
+ "stateful_callbacks": {
142
+ "TrainerControl": {
143
+ "args": {
144
+ "should_epoch_stop": false,
145
+ "should_evaluate": false,
146
+ "should_log": false,
147
+ "should_save": true,
148
+ "should_training_stop": false
149
+ },
150
+ "attributes": {}
151
+ }
152
+ },
153
+ "total_flos": 2500249176244224.0,
154
+ "train_batch_size": 2,
155
+ "trial_name": null,
156
+ "trial_params": null
157
+ }
checkpoint-12/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1b6f97a9990d98ad2e97e692ffff5d13e2f2644982eea955df1c8a971b386400
3
+ size 6008
checkpoint-24/README.md ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T
3
+ library_name: peft
4
+ ---
5
+
6
+ # Model Card for Model ID
7
+
8
+ <!-- Provide a quick summary of what the model is/does. -->
9
+
10
+
11
+
12
+ ## Model Details
13
+
14
+ ### Model Description
15
+
16
+ <!-- Provide a longer summary of what this model is. -->
17
+
18
+
19
+
20
+ - **Developed by:** [More Information Needed]
21
+ - **Funded by [optional]:** [More Information Needed]
22
+ - **Shared by [optional]:** [More Information Needed]
23
+ - **Model type:** [More Information Needed]
24
+ - **Language(s) (NLP):** [More Information Needed]
25
+ - **License:** [More Information Needed]
26
+ - **Finetuned from model [optional]:** [More Information Needed]
27
+
28
+ ### Model Sources [optional]
29
+
30
+ <!-- Provide the basic links for the model. -->
31
+
32
+ - **Repository:** [More Information Needed]
33
+ - **Paper [optional]:** [More Information Needed]
34
+ - **Demo [optional]:** [More Information Needed]
35
+
36
+ ## Uses
37
+
38
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
39
+
40
+ ### Direct Use
41
+
42
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
43
+
44
+ [More Information Needed]
45
+
46
+ ### Downstream Use [optional]
47
+
48
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
49
+
50
+ [More Information Needed]
51
+
52
+ ### Out-of-Scope Use
53
+
54
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
55
+
56
+ [More Information Needed]
57
+
58
+ ## Bias, Risks, and Limitations
59
+
60
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
61
+
62
+ [More Information Needed]
63
+
64
+ ### Recommendations
65
+
66
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
67
+
68
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
69
+
70
+ ## How to Get Started with the Model
71
+
72
+ Use the code below to get started with the model.
73
+
74
+ [More Information Needed]
75
+
76
+ ## Training Details
77
+
78
+ ### Training Data
79
+
80
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
+
82
+ [More Information Needed]
83
+
84
+ ### Training Procedure
85
+
86
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
+
88
+ #### Preprocessing [optional]
89
+
90
+ [More Information Needed]
91
+
92
+
93
+ #### Training Hyperparameters
94
+
95
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
96
+
97
+ #### Speeds, Sizes, Times [optional]
98
+
99
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
100
+
101
+ [More Information Needed]
102
+
103
+ ## Evaluation
104
+
105
+ <!-- This section describes the evaluation protocols and provides the results. -->
106
+
107
+ ### Testing Data, Factors & Metrics
108
+
109
+ #### Testing Data
110
+
111
+ <!-- This should link to a Dataset Card if possible. -->
112
+
113
+ [More Information Needed]
114
+
115
+ #### Factors
116
+
117
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
118
+
119
+ [More Information Needed]
120
+
121
+ #### Metrics
122
+
123
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
124
+
125
+ [More Information Needed]
126
+
127
+ ### Results
128
+
129
+ [More Information Needed]
130
+
131
+ #### Summary
132
+
133
+
134
+
135
+ ## Model Examination [optional]
136
+
137
+ <!-- Relevant interpretability work for the model goes here -->
138
+
139
+ [More Information Needed]
140
+
141
+ ## Environmental Impact
142
+
143
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
144
+
145
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
146
+
147
+ - **Hardware Type:** [More Information Needed]
148
+ - **Hours used:** [More Information Needed]
149
+ - **Cloud Provider:** [More Information Needed]
150
+ - **Compute Region:** [More Information Needed]
151
+ - **Carbon Emitted:** [More Information Needed]
152
+
153
+ ## Technical Specifications [optional]
154
+
155
+ ### Model Architecture and Objective
156
+
157
+ [More Information Needed]
158
+
159
+ ### Compute Infrastructure
160
+
161
+ [More Information Needed]
162
+
163
+ #### Hardware
164
+
165
+ [More Information Needed]
166
+
167
+ #### Software
168
+
169
+ [More Information Needed]
170
+
171
+ ## Citation [optional]
172
+
173
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
174
+
175
+ **BibTeX:**
176
+
177
+ [More Information Needed]
178
+
179
+ **APA:**
180
+
181
+ [More Information Needed]
182
+
183
+ ## Glossary [optional]
184
+
185
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
186
+
187
+ [More Information Needed]
188
+
189
+ ## More Information [optional]
190
+
191
+ [More Information Needed]
192
+
193
+ ## Model Card Authors [optional]
194
+
195
+ [More Information Needed]
196
+
197
+ ## Model Card Contact
198
+
199
+ [More Information Needed]
200
+ ### Framework versions
201
+
202
+ - PEFT 0.11.1
checkpoint-24/adapter_config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T",
5
+ "bias": "none",
6
+ "fan_in_fan_out": null,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layer_replication": null,
10
+ "layers_pattern": null,
11
+ "layers_to_transform": null,
12
+ "loftq_config": {},
13
+ "lora_alpha": 16,
14
+ "lora_dropout": 0.05,
15
+ "megatron_config": null,
16
+ "megatron_core": "megatron.core",
17
+ "modules_to_save": null,
18
+ "peft_type": "LORA",
19
+ "r": 32,
20
+ "rank_pattern": {},
21
+ "revision": null,
22
+ "target_modules": [
23
+ "q_proj",
24
+ "o_proj",
25
+ "v_proj",
26
+ "down_proj",
27
+ "up_proj",
28
+ "gate_proj",
29
+ "k_proj"
30
+ ],
31
+ "task_type": "CAUSAL_LM",
32
+ "use_dora": false,
33
+ "use_rslora": false
34
+ }
checkpoint-24/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2ae7f7faeb13406f9baee68ce6df23340ed54f657ec8cf5499068465cafa61c9
3
+ size 100966336
checkpoint-24/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b50cd600842200d32a7ba4ccac017eeff6e48deefce732c4a1cb4e5a3e415c87
3
+ size 50916644
checkpoint-24/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e05ca4a3ea886c932e939cdd1e683db542bf08214470c9ad647abdc302b57add
3
+ size 14244
checkpoint-24/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8fd2df94f244c74ef9128181bbcabe340233f441f19aa2de60f32d36a56a9cac
3
+ size 1064
checkpoint-24/special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "</s>",
17
+ "unk_token": {
18
+ "content": "<unk>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
checkpoint-24/tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723
checkpoint-24/tokenizer_config.json ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "add_prefix_space": true,
5
+ "added_tokens_decoder": {
6
+ "0": {
7
+ "content": "<unk>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false,
12
+ "special": true
13
+ },
14
+ "1": {
15
+ "content": "<s>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false,
20
+ "special": true
21
+ },
22
+ "2": {
23
+ "content": "</s>",
24
+ "lstrip": false,
25
+ "normalized": false,
26
+ "rstrip": false,
27
+ "single_word": false,
28
+ "special": true
29
+ }
30
+ },
31
+ "bos_token": "<s>",
32
+ "clean_up_tokenization_spaces": false,
33
+ "eos_token": "</s>",
34
+ "legacy": false,
35
+ "model_max_length": 1000000000000000019884624838656,
36
+ "pad_token": "</s>",
37
+ "padding_side": "right",
38
+ "sp_model_kwargs": {},
39
+ "spaces_between_special_tokens": false,
40
+ "tokenizer_class": "LlamaTokenizer",
41
+ "unk_token": "<unk>",
42
+ "use_default_system_prompt": false,
43
+ "use_fast": true
44
+ }
checkpoint-24/trainer_state.json ADDED
@@ -0,0 +1,273 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 1.88,
5
+ "eval_steps": 3,
6
+ "global_step": 24,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.08,
13
+ "grad_norm": 0.17093713581562042,
14
+ "learning_rate": 2e-05,
15
+ "loss": 1.4615,
16
+ "step": 1
17
+ },
18
+ {
19
+ "epoch": 0.08,
20
+ "eval_loss": 1.4899382591247559,
21
+ "eval_runtime": 17.3107,
22
+ "eval_samples_per_second": 5.777,
23
+ "eval_steps_per_second": 2.888,
24
+ "step": 1
25
+ },
26
+ {
27
+ "epoch": 0.16,
28
+ "grad_norm": 0.19339510798454285,
29
+ "learning_rate": 4e-05,
30
+ "loss": 1.4241,
31
+ "step": 2
32
+ },
33
+ {
34
+ "epoch": 0.24,
35
+ "grad_norm": 0.1669788658618927,
36
+ "learning_rate": 6e-05,
37
+ "loss": 1.3846,
38
+ "step": 3
39
+ },
40
+ {
41
+ "epoch": 0.24,
42
+ "eval_loss": 1.4858685731887817,
43
+ "eval_runtime": 17.4215,
44
+ "eval_samples_per_second": 5.74,
45
+ "eval_steps_per_second": 2.87,
46
+ "step": 3
47
+ },
48
+ {
49
+ "epoch": 0.32,
50
+ "grad_norm": 0.14142441749572754,
51
+ "learning_rate": 8e-05,
52
+ "loss": 1.2219,
53
+ "step": 4
54
+ },
55
+ {
56
+ "epoch": 0.4,
57
+ "grad_norm": 0.15717843174934387,
58
+ "learning_rate": 0.0001,
59
+ "loss": 1.3617,
60
+ "step": 5
61
+ },
62
+ {
63
+ "epoch": 0.48,
64
+ "grad_norm": 0.16309261322021484,
65
+ "learning_rate": 0.00012,
66
+ "loss": 1.3667,
67
+ "step": 6
68
+ },
69
+ {
70
+ "epoch": 0.48,
71
+ "eval_loss": 1.439871072769165,
72
+ "eval_runtime": 17.6339,
73
+ "eval_samples_per_second": 5.671,
74
+ "eval_steps_per_second": 2.835,
75
+ "step": 6
76
+ },
77
+ {
78
+ "epoch": 0.56,
79
+ "grad_norm": 0.15078029036521912,
80
+ "learning_rate": 0.00014,
81
+ "loss": 1.3008,
82
+ "step": 7
83
+ },
84
+ {
85
+ "epoch": 0.64,
86
+ "grad_norm": 0.13603582978248596,
87
+ "learning_rate": 0.00016,
88
+ "loss": 1.3333,
89
+ "step": 8
90
+ },
91
+ {
92
+ "epoch": 0.72,
93
+ "grad_norm": 0.1104956567287445,
94
+ "learning_rate": 0.00018,
95
+ "loss": 1.267,
96
+ "step": 9
97
+ },
98
+ {
99
+ "epoch": 0.72,
100
+ "eval_loss": 1.3381670713424683,
101
+ "eval_runtime": 17.2986,
102
+ "eval_samples_per_second": 5.781,
103
+ "eval_steps_per_second": 2.89,
104
+ "step": 9
105
+ },
106
+ {
107
+ "epoch": 0.8,
108
+ "grad_norm": 0.09913735836744308,
109
+ "learning_rate": 0.0002,
110
+ "loss": 1.2946,
111
+ "step": 10
112
+ },
113
+ {
114
+ "epoch": 0.88,
115
+ "grad_norm": 0.11903145164251328,
116
+ "learning_rate": 0.000199658449300667,
117
+ "loss": 1.2921,
118
+ "step": 11
119
+ },
120
+ {
121
+ "epoch": 0.96,
122
+ "grad_norm": 0.11169299483299255,
123
+ "learning_rate": 0.00019863613034027224,
124
+ "loss": 1.2276,
125
+ "step": 12
126
+ },
127
+ {
128
+ "epoch": 0.96,
129
+ "eval_loss": 1.2940881252288818,
130
+ "eval_runtime": 17.4061,
131
+ "eval_samples_per_second": 5.745,
132
+ "eval_steps_per_second": 2.873,
133
+ "step": 12
134
+ },
135
+ {
136
+ "epoch": 1.04,
137
+ "grad_norm": 0.1135605201125145,
138
+ "learning_rate": 0.00019694002659393305,
139
+ "loss": 1.2666,
140
+ "step": 13
141
+ },
142
+ {
143
+ "epoch": 1.08,
144
+ "grad_norm": 0.1120605319738388,
145
+ "learning_rate": 0.00019458172417006347,
146
+ "loss": 1.2589,
147
+ "step": 14
148
+ },
149
+ {
150
+ "epoch": 1.16,
151
+ "grad_norm": 0.10806083679199219,
152
+ "learning_rate": 0.00019157733266550575,
153
+ "loss": 1.2515,
154
+ "step": 15
155
+ },
156
+ {
157
+ "epoch": 1.16,
158
+ "eval_loss": 1.2792645692825317,
159
+ "eval_runtime": 17.2617,
160
+ "eval_samples_per_second": 5.793,
161
+ "eval_steps_per_second": 2.897,
162
+ "step": 15
163
+ },
164
+ {
165
+ "epoch": 1.24,
166
+ "grad_norm": 0.09928147494792938,
167
+ "learning_rate": 0.0001879473751206489,
168
+ "loss": 1.1636,
169
+ "step": 16
170
+ },
171
+ {
172
+ "epoch": 1.32,
173
+ "grad_norm": 0.09012632817029953,
174
+ "learning_rate": 0.00018371664782625287,
175
+ "loss": 1.1601,
176
+ "step": 17
177
+ },
178
+ {
179
+ "epoch": 1.4,
180
+ "grad_norm": 0.09199394285678864,
181
+ "learning_rate": 0.00017891405093963938,
182
+ "loss": 1.2275,
183
+ "step": 18
184
+ },
185
+ {
186
+ "epoch": 1.4,
187
+ "eval_loss": 1.2555986642837524,
188
+ "eval_runtime": 17.3997,
189
+ "eval_samples_per_second": 5.747,
190
+ "eval_steps_per_second": 2.874,
191
+ "step": 18
192
+ },
193
+ {
194
+ "epoch": 1.48,
195
+ "grad_norm": 0.096685491502285,
196
+ "learning_rate": 0.00017357239106731317,
197
+ "loss": 1.228,
198
+ "step": 19
199
+ },
200
+ {
201
+ "epoch": 1.56,
202
+ "grad_norm": 0.08053378760814667,
203
+ "learning_rate": 0.00016772815716257412,
204
+ "loss": 1.192,
205
+ "step": 20
206
+ },
207
+ {
208
+ "epoch": 1.6400000000000001,
209
+ "grad_norm": 0.07304864376783371,
210
+ "learning_rate": 0.0001614212712689668,
211
+ "loss": 1.1351,
212
+ "step": 21
213
+ },
214
+ {
215
+ "epoch": 1.6400000000000001,
216
+ "eval_loss": 1.2347127199172974,
217
+ "eval_runtime": 17.3396,
218
+ "eval_samples_per_second": 5.767,
219
+ "eval_steps_per_second": 2.884,
220
+ "step": 21
221
+ },
222
+ {
223
+ "epoch": 1.72,
224
+ "grad_norm": 0.0821811631321907,
225
+ "learning_rate": 0.00015469481581224272,
226
+ "loss": 1.2031,
227
+ "step": 22
228
+ },
229
+ {
230
+ "epoch": 1.8,
231
+ "grad_norm": 0.07574562728404999,
232
+ "learning_rate": 0.00014759473930370736,
233
+ "loss": 1.2476,
234
+ "step": 23
235
+ },
236
+ {
237
+ "epoch": 1.88,
238
+ "grad_norm": 0.07235240191221237,
239
+ "learning_rate": 0.00014016954246529696,
240
+ "loss": 1.2701,
241
+ "step": 24
242
+ },
243
+ {
244
+ "epoch": 1.88,
245
+ "eval_loss": 1.2253003120422363,
246
+ "eval_runtime": 17.292,
247
+ "eval_samples_per_second": 5.783,
248
+ "eval_steps_per_second": 2.892,
249
+ "step": 24
250
+ }
251
+ ],
252
+ "logging_steps": 1,
253
+ "max_steps": 48,
254
+ "num_input_tokens_seen": 0,
255
+ "num_train_epochs": 4,
256
+ "save_steps": 12,
257
+ "stateful_callbacks": {
258
+ "TrainerControl": {
259
+ "args": {
260
+ "should_epoch_stop": false,
261
+ "should_evaluate": false,
262
+ "should_log": false,
263
+ "should_save": true,
264
+ "should_training_stop": false
265
+ },
266
+ "attributes": {}
267
+ }
268
+ },
269
+ "total_flos": 5000498352488448.0,
270
+ "train_batch_size": 2,
271
+ "trial_name": null,
272
+ "trial_params": null
273
+ }
checkpoint-24/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1b6f97a9990d98ad2e97e692ffff5d13e2f2644982eea955df1c8a971b386400
3
+ size 6008
checkpoint-36/README.md ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T
3
+ library_name: peft
4
+ ---
5
+
6
+ # Model Card for Model ID
7
+
8
+ <!-- Provide a quick summary of what the model is/does. -->
9
+
10
+
11
+
12
+ ## Model Details
13
+
14
+ ### Model Description
15
+
16
+ <!-- Provide a longer summary of what this model is. -->
17
+
18
+
19
+
20
+ - **Developed by:** [More Information Needed]
21
+ - **Funded by [optional]:** [More Information Needed]
22
+ - **Shared by [optional]:** [More Information Needed]
23
+ - **Model type:** [More Information Needed]
24
+ - **Language(s) (NLP):** [More Information Needed]
25
+ - **License:** [More Information Needed]
26
+ - **Finetuned from model [optional]:** [More Information Needed]
27
+
28
+ ### Model Sources [optional]
29
+
30
+ <!-- Provide the basic links for the model. -->
31
+
32
+ - **Repository:** [More Information Needed]
33
+ - **Paper [optional]:** [More Information Needed]
34
+ - **Demo [optional]:** [More Information Needed]
35
+
36
+ ## Uses
37
+
38
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
39
+
40
+ ### Direct Use
41
+
42
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
43
+
44
+ [More Information Needed]
45
+
46
+ ### Downstream Use [optional]
47
+
48
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
49
+
50
+ [More Information Needed]
51
+
52
+ ### Out-of-Scope Use
53
+
54
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
55
+
56
+ [More Information Needed]
57
+
58
+ ## Bias, Risks, and Limitations
59
+
60
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
61
+
62
+ [More Information Needed]
63
+
64
+ ### Recommendations
65
+
66
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
67
+
68
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
69
+
70
+ ## How to Get Started with the Model
71
+
72
+ Use the code below to get started with the model.
73
+
74
+ [More Information Needed]
75
+
76
+ ## Training Details
77
+
78
+ ### Training Data
79
+
80
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
+
82
+ [More Information Needed]
83
+
84
+ ### Training Procedure
85
+
86
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
+
88
+ #### Preprocessing [optional]
89
+
90
+ [More Information Needed]
91
+
92
+
93
+ #### Training Hyperparameters
94
+
95
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
96
+
97
+ #### Speeds, Sizes, Times [optional]
98
+
99
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
100
+
101
+ [More Information Needed]
102
+
103
+ ## Evaluation
104
+
105
+ <!-- This section describes the evaluation protocols and provides the results. -->
106
+
107
+ ### Testing Data, Factors & Metrics
108
+
109
+ #### Testing Data
110
+
111
+ <!-- This should link to a Dataset Card if possible. -->
112
+
113
+ [More Information Needed]
114
+
115
+ #### Factors
116
+
117
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
118
+
119
+ [More Information Needed]
120
+
121
+ #### Metrics
122
+
123
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
124
+
125
+ [More Information Needed]
126
+
127
+ ### Results
128
+
129
+ [More Information Needed]
130
+
131
+ #### Summary
132
+
133
+
134
+
135
+ ## Model Examination [optional]
136
+
137
+ <!-- Relevant interpretability work for the model goes here -->
138
+
139
+ [More Information Needed]
140
+
141
+ ## Environmental Impact
142
+
143
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
144
+
145
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
146
+
147
+ - **Hardware Type:** [More Information Needed]
148
+ - **Hours used:** [More Information Needed]
149
+ - **Cloud Provider:** [More Information Needed]
150
+ - **Compute Region:** [More Information Needed]
151
+ - **Carbon Emitted:** [More Information Needed]
152
+
153
+ ## Technical Specifications [optional]
154
+
155
+ ### Model Architecture and Objective
156
+
157
+ [More Information Needed]
158
+
159
+ ### Compute Infrastructure
160
+
161
+ [More Information Needed]
162
+
163
+ #### Hardware
164
+
165
+ [More Information Needed]
166
+
167
+ #### Software
168
+
169
+ [More Information Needed]
170
+
171
+ ## Citation [optional]
172
+
173
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
174
+
175
+ **BibTeX:**
176
+
177
+ [More Information Needed]
178
+
179
+ **APA:**
180
+
181
+ [More Information Needed]
182
+
183
+ ## Glossary [optional]
184
+
185
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
186
+
187
+ [More Information Needed]
188
+
189
+ ## More Information [optional]
190
+
191
+ [More Information Needed]
192
+
193
+ ## Model Card Authors [optional]
194
+
195
+ [More Information Needed]
196
+
197
+ ## Model Card Contact
198
+
199
+ [More Information Needed]
200
+ ### Framework versions
201
+
202
+ - PEFT 0.11.1
checkpoint-36/adapter_config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T",
5
+ "bias": "none",
6
+ "fan_in_fan_out": null,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layer_replication": null,
10
+ "layers_pattern": null,
11
+ "layers_to_transform": null,
12
+ "loftq_config": {},
13
+ "lora_alpha": 16,
14
+ "lora_dropout": 0.05,
15
+ "megatron_config": null,
16
+ "megatron_core": "megatron.core",
17
+ "modules_to_save": null,
18
+ "peft_type": "LORA",
19
+ "r": 32,
20
+ "rank_pattern": {},
21
+ "revision": null,
22
+ "target_modules": [
23
+ "q_proj",
24
+ "o_proj",
25
+ "v_proj",
26
+ "down_proj",
27
+ "up_proj",
28
+ "gate_proj",
29
+ "k_proj"
30
+ ],
31
+ "task_type": "CAUSAL_LM",
32
+ "use_dora": false,
33
+ "use_rslora": false
34
+ }
checkpoint-36/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:40bc41fd8180f2ca0127e4626c91276aed9de4159efd34dc831e0ddb9a905b7e
3
+ size 100966336
checkpoint-36/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3bcab64fb2240d5c9e39264c8e13b177843cb543b2340833b1b74c3f3441a6cc
3
+ size 50916644
checkpoint-36/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:097d1ea8f5f9af9824d41438adbb887f58098ad9ba06156cee3883694e2e2736
3
+ size 14244
checkpoint-36/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f5918ddd95097cd0d9acd73ea2bf14c23b23f8d6e0bb73e5c46156ea038bd743
3
+ size 1064
checkpoint-36/special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "</s>",
17
+ "unk_token": {
18
+ "content": "<unk>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
checkpoint-36/tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723
checkpoint-36/tokenizer_config.json ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "add_prefix_space": true,
5
+ "added_tokens_decoder": {
6
+ "0": {
7
+ "content": "<unk>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false,
12
+ "special": true
13
+ },
14
+ "1": {
15
+ "content": "<s>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false,
20
+ "special": true
21
+ },
22
+ "2": {
23
+ "content": "</s>",
24
+ "lstrip": false,
25
+ "normalized": false,
26
+ "rstrip": false,
27
+ "single_word": false,
28
+ "special": true
29
+ }
30
+ },
31
+ "bos_token": "<s>",
32
+ "clean_up_tokenization_spaces": false,
33
+ "eos_token": "</s>",
34
+ "legacy": false,
35
+ "model_max_length": 1000000000000000019884624838656,
36
+ "pad_token": "</s>",
37
+ "padding_side": "right",
38
+ "sp_model_kwargs": {},
39
+ "spaces_between_special_tokens": false,
40
+ "tokenizer_class": "LlamaTokenizer",
41
+ "unk_token": "<unk>",
42
+ "use_default_system_prompt": false,
43
+ "use_fast": true
44
+ }
checkpoint-36/trainer_state.json ADDED
@@ -0,0 +1,389 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 2.8,
5
+ "eval_steps": 3,
6
+ "global_step": 36,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.08,
13
+ "grad_norm": 0.17093713581562042,
14
+ "learning_rate": 2e-05,
15
+ "loss": 1.4615,
16
+ "step": 1
17
+ },
18
+ {
19
+ "epoch": 0.08,
20
+ "eval_loss": 1.4899382591247559,
21
+ "eval_runtime": 17.3107,
22
+ "eval_samples_per_second": 5.777,
23
+ "eval_steps_per_second": 2.888,
24
+ "step": 1
25
+ },
26
+ {
27
+ "epoch": 0.16,
28
+ "grad_norm": 0.19339510798454285,
29
+ "learning_rate": 4e-05,
30
+ "loss": 1.4241,
31
+ "step": 2
32
+ },
33
+ {
34
+ "epoch": 0.24,
35
+ "grad_norm": 0.1669788658618927,
36
+ "learning_rate": 6e-05,
37
+ "loss": 1.3846,
38
+ "step": 3
39
+ },
40
+ {
41
+ "epoch": 0.24,
42
+ "eval_loss": 1.4858685731887817,
43
+ "eval_runtime": 17.4215,
44
+ "eval_samples_per_second": 5.74,
45
+ "eval_steps_per_second": 2.87,
46
+ "step": 3
47
+ },
48
+ {
49
+ "epoch": 0.32,
50
+ "grad_norm": 0.14142441749572754,
51
+ "learning_rate": 8e-05,
52
+ "loss": 1.2219,
53
+ "step": 4
54
+ },
55
+ {
56
+ "epoch": 0.4,
57
+ "grad_norm": 0.15717843174934387,
58
+ "learning_rate": 0.0001,
59
+ "loss": 1.3617,
60
+ "step": 5
61
+ },
62
+ {
63
+ "epoch": 0.48,
64
+ "grad_norm": 0.16309261322021484,
65
+ "learning_rate": 0.00012,
66
+ "loss": 1.3667,
67
+ "step": 6
68
+ },
69
+ {
70
+ "epoch": 0.48,
71
+ "eval_loss": 1.439871072769165,
72
+ "eval_runtime": 17.6339,
73
+ "eval_samples_per_second": 5.671,
74
+ "eval_steps_per_second": 2.835,
75
+ "step": 6
76
+ },
77
+ {
78
+ "epoch": 0.56,
79
+ "grad_norm": 0.15078029036521912,
80
+ "learning_rate": 0.00014,
81
+ "loss": 1.3008,
82
+ "step": 7
83
+ },
84
+ {
85
+ "epoch": 0.64,
86
+ "grad_norm": 0.13603582978248596,
87
+ "learning_rate": 0.00016,
88
+ "loss": 1.3333,
89
+ "step": 8
90
+ },
91
+ {
92
+ "epoch": 0.72,
93
+ "grad_norm": 0.1104956567287445,
94
+ "learning_rate": 0.00018,
95
+ "loss": 1.267,
96
+ "step": 9
97
+ },
98
+ {
99
+ "epoch": 0.72,
100
+ "eval_loss": 1.3381670713424683,
101
+ "eval_runtime": 17.2986,
102
+ "eval_samples_per_second": 5.781,
103
+ "eval_steps_per_second": 2.89,
104
+ "step": 9
105
+ },
106
+ {
107
+ "epoch": 0.8,
108
+ "grad_norm": 0.09913735836744308,
109
+ "learning_rate": 0.0002,
110
+ "loss": 1.2946,
111
+ "step": 10
112
+ },
113
+ {
114
+ "epoch": 0.88,
115
+ "grad_norm": 0.11903145164251328,
116
+ "learning_rate": 0.000199658449300667,
117
+ "loss": 1.2921,
118
+ "step": 11
119
+ },
120
+ {
121
+ "epoch": 0.96,
122
+ "grad_norm": 0.11169299483299255,
123
+ "learning_rate": 0.00019863613034027224,
124
+ "loss": 1.2276,
125
+ "step": 12
126
+ },
127
+ {
128
+ "epoch": 0.96,
129
+ "eval_loss": 1.2940881252288818,
130
+ "eval_runtime": 17.4061,
131
+ "eval_samples_per_second": 5.745,
132
+ "eval_steps_per_second": 2.873,
133
+ "step": 12
134
+ },
135
+ {
136
+ "epoch": 1.04,
137
+ "grad_norm": 0.1135605201125145,
138
+ "learning_rate": 0.00019694002659393305,
139
+ "loss": 1.2666,
140
+ "step": 13
141
+ },
142
+ {
143
+ "epoch": 1.08,
144
+ "grad_norm": 0.1120605319738388,
145
+ "learning_rate": 0.00019458172417006347,
146
+ "loss": 1.2589,
147
+ "step": 14
148
+ },
149
+ {
150
+ "epoch": 1.16,
151
+ "grad_norm": 0.10806083679199219,
152
+ "learning_rate": 0.00019157733266550575,
153
+ "loss": 1.2515,
154
+ "step": 15
155
+ },
156
+ {
157
+ "epoch": 1.16,
158
+ "eval_loss": 1.2792645692825317,
159
+ "eval_runtime": 17.2617,
160
+ "eval_samples_per_second": 5.793,
161
+ "eval_steps_per_second": 2.897,
162
+ "step": 15
163
+ },
164
+ {
165
+ "epoch": 1.24,
166
+ "grad_norm": 0.09928147494792938,
167
+ "learning_rate": 0.0001879473751206489,
168
+ "loss": 1.1636,
169
+ "step": 16
170
+ },
171
+ {
172
+ "epoch": 1.32,
173
+ "grad_norm": 0.09012632817029953,
174
+ "learning_rate": 0.00018371664782625287,
175
+ "loss": 1.1601,
176
+ "step": 17
177
+ },
178
+ {
179
+ "epoch": 1.4,
180
+ "grad_norm": 0.09199394285678864,
181
+ "learning_rate": 0.00017891405093963938,
182
+ "loss": 1.2275,
183
+ "step": 18
184
+ },
185
+ {
186
+ "epoch": 1.4,
187
+ "eval_loss": 1.2555986642837524,
188
+ "eval_runtime": 17.3997,
189
+ "eval_samples_per_second": 5.747,
190
+ "eval_steps_per_second": 2.874,
191
+ "step": 18
192
+ },
193
+ {
194
+ "epoch": 1.48,
195
+ "grad_norm": 0.096685491502285,
196
+ "learning_rate": 0.00017357239106731317,
197
+ "loss": 1.228,
198
+ "step": 19
199
+ },
200
+ {
201
+ "epoch": 1.56,
202
+ "grad_norm": 0.08053378760814667,
203
+ "learning_rate": 0.00016772815716257412,
204
+ "loss": 1.192,
205
+ "step": 20
206
+ },
207
+ {
208
+ "epoch": 1.6400000000000001,
209
+ "grad_norm": 0.07304864376783371,
210
+ "learning_rate": 0.0001614212712689668,
211
+ "loss": 1.1351,
212
+ "step": 21
213
+ },
214
+ {
215
+ "epoch": 1.6400000000000001,
216
+ "eval_loss": 1.2347127199172974,
217
+ "eval_runtime": 17.3396,
218
+ "eval_samples_per_second": 5.767,
219
+ "eval_steps_per_second": 2.884,
220
+ "step": 21
221
+ },
222
+ {
223
+ "epoch": 1.72,
224
+ "grad_norm": 0.0821811631321907,
225
+ "learning_rate": 0.00015469481581224272,
226
+ "loss": 1.2031,
227
+ "step": 22
228
+ },
229
+ {
230
+ "epoch": 1.8,
231
+ "grad_norm": 0.07574562728404999,
232
+ "learning_rate": 0.00014759473930370736,
233
+ "loss": 1.2476,
234
+ "step": 23
235
+ },
236
+ {
237
+ "epoch": 1.88,
238
+ "grad_norm": 0.07235240191221237,
239
+ "learning_rate": 0.00014016954246529696,
240
+ "loss": 1.2701,
241
+ "step": 24
242
+ },
243
+ {
244
+ "epoch": 1.88,
245
+ "eval_loss": 1.2253003120422363,
246
+ "eval_runtime": 17.292,
247
+ "eval_samples_per_second": 5.783,
248
+ "eval_steps_per_second": 2.892,
249
+ "step": 24
250
+ },
251
+ {
252
+ "epoch": 1.96,
253
+ "grad_norm": 0.07071871310472488,
254
+ "learning_rate": 0.00013246994692046836,
255
+ "loss": 1.2051,
256
+ "step": 25
257
+ },
258
+ {
259
+ "epoch": 2.04,
260
+ "grad_norm": 0.07875131815671921,
261
+ "learning_rate": 0.00012454854871407994,
262
+ "loss": 1.192,
263
+ "step": 26
264
+ },
265
+ {
266
+ "epoch": 2.08,
267
+ "grad_norm": 0.06468148529529572,
268
+ "learning_rate": 0.00011645945902807341,
269
+ "loss": 1.1487,
270
+ "step": 27
271
+ },
272
+ {
273
+ "epoch": 2.08,
274
+ "eval_loss": 1.2213425636291504,
275
+ "eval_runtime": 17.7102,
276
+ "eval_samples_per_second": 5.646,
277
+ "eval_steps_per_second": 2.823,
278
+ "step": 27
279
+ },
280
+ {
281
+ "epoch": 2.16,
282
+ "grad_norm": 0.0741707906126976,
283
+ "learning_rate": 0.00010825793454723325,
284
+ "loss": 1.1673,
285
+ "step": 28
286
+ },
287
+ {
288
+ "epoch": 2.24,
289
+ "grad_norm": 0.06802140176296234,
290
+ "learning_rate": 0.0001,
291
+ "loss": 1.2054,
292
+ "step": 29
293
+ },
294
+ {
295
+ "epoch": 2.32,
296
+ "grad_norm": 0.06834083795547485,
297
+ "learning_rate": 9.174206545276677e-05,
298
+ "loss": 1.1518,
299
+ "step": 30
300
+ },
301
+ {
302
+ "epoch": 2.32,
303
+ "eval_loss": 1.220943808555603,
304
+ "eval_runtime": 17.4872,
305
+ "eval_samples_per_second": 5.718,
306
+ "eval_steps_per_second": 2.859,
307
+ "step": 30
308
+ },
309
+ {
310
+ "epoch": 2.4,
311
+ "grad_norm": 0.06714992970228195,
312
+ "learning_rate": 8.35405409719266e-05,
313
+ "loss": 1.183,
314
+ "step": 31
315
+ },
316
+ {
317
+ "epoch": 2.48,
318
+ "grad_norm": 0.06744072586297989,
319
+ "learning_rate": 7.54514512859201e-05,
320
+ "loss": 1.2098,
321
+ "step": 32
322
+ },
323
+ {
324
+ "epoch": 2.56,
325
+ "grad_norm": 0.06815183162689209,
326
+ "learning_rate": 6.753005307953167e-05,
327
+ "loss": 1.1942,
328
+ "step": 33
329
+ },
330
+ {
331
+ "epoch": 2.56,
332
+ "eval_loss": 1.217085599899292,
333
+ "eval_runtime": 17.2384,
334
+ "eval_samples_per_second": 5.801,
335
+ "eval_steps_per_second": 2.901,
336
+ "step": 33
337
+ },
338
+ {
339
+ "epoch": 2.64,
340
+ "grad_norm": 0.07002735882997513,
341
+ "learning_rate": 5.983045753470308e-05,
342
+ "loss": 1.1896,
343
+ "step": 34
344
+ },
345
+ {
346
+ "epoch": 2.7199999999999998,
347
+ "grad_norm": 0.07221011817455292,
348
+ "learning_rate": 5.240526069629265e-05,
349
+ "loss": 1.1795,
350
+ "step": 35
351
+ },
352
+ {
353
+ "epoch": 2.8,
354
+ "grad_norm": 0.07009800523519516,
355
+ "learning_rate": 4.530518418775733e-05,
356
+ "loss": 1.1122,
357
+ "step": 36
358
+ },
359
+ {
360
+ "epoch": 2.8,
361
+ "eval_loss": 1.2147068977355957,
362
+ "eval_runtime": 17.3366,
363
+ "eval_samples_per_second": 5.768,
364
+ "eval_steps_per_second": 2.884,
365
+ "step": 36
366
+ }
367
+ ],
368
+ "logging_steps": 1,
369
+ "max_steps": 48,
370
+ "num_input_tokens_seen": 0,
371
+ "num_train_epochs": 4,
372
+ "save_steps": 12,
373
+ "stateful_callbacks": {
374
+ "TrainerControl": {
375
+ "args": {
376
+ "should_epoch_stop": false,
377
+ "should_evaluate": false,
378
+ "should_log": false,
379
+ "should_save": true,
380
+ "should_training_stop": false
381
+ },
382
+ "attributes": {}
383
+ }
384
+ },
385
+ "total_flos": 7474703266480128.0,
386
+ "train_batch_size": 2,
387
+ "trial_name": null,
388
+ "trial_params": null
389
+ }
checkpoint-36/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1b6f97a9990d98ad2e97e692ffff5d13e2f2644982eea955df1c8a971b386400
3
+ size 6008
checkpoint-48/README.md ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T
3
+ library_name: peft
4
+ ---
5
+
6
+ # Model Card for Model ID
7
+
8
+ <!-- Provide a quick summary of what the model is/does. -->
9
+
10
+
11
+
12
+ ## Model Details
13
+
14
+ ### Model Description
15
+
16
+ <!-- Provide a longer summary of what this model is. -->
17
+
18
+
19
+
20
+ - **Developed by:** [More Information Needed]
21
+ - **Funded by [optional]:** [More Information Needed]
22
+ - **Shared by [optional]:** [More Information Needed]
23
+ - **Model type:** [More Information Needed]
24
+ - **Language(s) (NLP):** [More Information Needed]
25
+ - **License:** [More Information Needed]
26
+ - **Finetuned from model [optional]:** [More Information Needed]
27
+
28
+ ### Model Sources [optional]
29
+
30
+ <!-- Provide the basic links for the model. -->
31
+
32
+ - **Repository:** [More Information Needed]
33
+ - **Paper [optional]:** [More Information Needed]
34
+ - **Demo [optional]:** [More Information Needed]
35
+
36
+ ## Uses
37
+
38
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
39
+
40
+ ### Direct Use
41
+
42
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
43
+
44
+ [More Information Needed]
45
+
46
+ ### Downstream Use [optional]
47
+
48
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
49
+
50
+ [More Information Needed]
51
+
52
+ ### Out-of-Scope Use
53
+
54
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
55
+
56
+ [More Information Needed]
57
+
58
+ ## Bias, Risks, and Limitations
59
+
60
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
61
+
62
+ [More Information Needed]
63
+
64
+ ### Recommendations
65
+
66
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
67
+
68
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
69
+
70
+ ## How to Get Started with the Model
71
+
72
+ Use the code below to get started with the model.
73
+
74
+ [More Information Needed]
75
+
76
+ ## Training Details
77
+
78
+ ### Training Data
79
+
80
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
+
82
+ [More Information Needed]
83
+
84
+ ### Training Procedure
85
+
86
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
+
88
+ #### Preprocessing [optional]
89
+
90
+ [More Information Needed]
91
+
92
+
93
+ #### Training Hyperparameters
94
+
95
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
96
+
97
+ #### Speeds, Sizes, Times [optional]
98
+
99
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
100
+
101
+ [More Information Needed]
102
+
103
+ ## Evaluation
104
+
105
+ <!-- This section describes the evaluation protocols and provides the results. -->
106
+
107
+ ### Testing Data, Factors & Metrics
108
+
109
+ #### Testing Data
110
+
111
+ <!-- This should link to a Dataset Card if possible. -->
112
+
113
+ [More Information Needed]
114
+
115
+ #### Factors
116
+
117
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
118
+
119
+ [More Information Needed]
120
+
121
+ #### Metrics
122
+
123
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
124
+
125
+ [More Information Needed]
126
+
127
+ ### Results
128
+
129
+ [More Information Needed]
130
+
131
+ #### Summary
132
+
133
+
134
+
135
+ ## Model Examination [optional]
136
+
137
+ <!-- Relevant interpretability work for the model goes here -->
138
+
139
+ [More Information Needed]
140
+
141
+ ## Environmental Impact
142
+
143
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
144
+
145
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
146
+
147
+ - **Hardware Type:** [More Information Needed]
148
+ - **Hours used:** [More Information Needed]
149
+ - **Cloud Provider:** [More Information Needed]
150
+ - **Compute Region:** [More Information Needed]
151
+ - **Carbon Emitted:** [More Information Needed]
152
+
153
+ ## Technical Specifications [optional]
154
+
155
+ ### Model Architecture and Objective
156
+
157
+ [More Information Needed]
158
+
159
+ ### Compute Infrastructure
160
+
161
+ [More Information Needed]
162
+
163
+ #### Hardware
164
+
165
+ [More Information Needed]
166
+
167
+ #### Software
168
+
169
+ [More Information Needed]
170
+
171
+ ## Citation [optional]
172
+
173
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
174
+
175
+ **BibTeX:**
176
+
177
+ [More Information Needed]
178
+
179
+ **APA:**
180
+
181
+ [More Information Needed]
182
+
183
+ ## Glossary [optional]
184
+
185
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
186
+
187
+ [More Information Needed]
188
+
189
+ ## More Information [optional]
190
+
191
+ [More Information Needed]
192
+
193
+ ## Model Card Authors [optional]
194
+
195
+ [More Information Needed]
196
+
197
+ ## Model Card Contact
198
+
199
+ [More Information Needed]
200
+ ### Framework versions
201
+
202
+ - PEFT 0.11.1
checkpoint-48/adapter_config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T",
5
+ "bias": "none",
6
+ "fan_in_fan_out": null,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layer_replication": null,
10
+ "layers_pattern": null,
11
+ "layers_to_transform": null,
12
+ "loftq_config": {},
13
+ "lora_alpha": 16,
14
+ "lora_dropout": 0.05,
15
+ "megatron_config": null,
16
+ "megatron_core": "megatron.core",
17
+ "modules_to_save": null,
18
+ "peft_type": "LORA",
19
+ "r": 32,
20
+ "rank_pattern": {},
21
+ "revision": null,
22
+ "target_modules": [
23
+ "q_proj",
24
+ "o_proj",
25
+ "v_proj",
26
+ "down_proj",
27
+ "up_proj",
28
+ "gate_proj",
29
+ "k_proj"
30
+ ],
31
+ "task_type": "CAUSAL_LM",
32
+ "use_dora": false,
33
+ "use_rslora": false
34
+ }
checkpoint-48/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:23d730c3d6976448e84ec766f96c2ab0cb6e0a2244da82904d5f04555910ebcf
3
+ size 100966336
checkpoint-48/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:767d44130f9670629778470c8a976df6fc095672da1beb3259e9efa426ad6df8
3
+ size 50916644
checkpoint-48/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:17f72c0a030693438895e7bd5e11e6e5913e59736aaef065212621cb21e6209c
3
+ size 14244
checkpoint-48/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b832e0373d616c3d50894a908dda7ef6c28f6cb2f8a92b6d36348dbf67fd1715
3
+ size 1064
checkpoint-48/special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "</s>",
17
+ "unk_token": {
18
+ "content": "<unk>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
checkpoint-48/tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723
checkpoint-48/tokenizer_config.json ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "add_prefix_space": true,
5
+ "added_tokens_decoder": {
6
+ "0": {
7
+ "content": "<unk>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false,
12
+ "special": true
13
+ },
14
+ "1": {
15
+ "content": "<s>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false,
20
+ "special": true
21
+ },
22
+ "2": {
23
+ "content": "</s>",
24
+ "lstrip": false,
25
+ "normalized": false,
26
+ "rstrip": false,
27
+ "single_word": false,
28
+ "special": true
29
+ }
30
+ },
31
+ "bos_token": "<s>",
32
+ "clean_up_tokenization_spaces": false,
33
+ "eos_token": "</s>",
34
+ "legacy": false,
35
+ "model_max_length": 1000000000000000019884624838656,
36
+ "pad_token": "</s>",
37
+ "padding_side": "right",
38
+ "sp_model_kwargs": {},
39
+ "spaces_between_special_tokens": false,
40
+ "tokenizer_class": "LlamaTokenizer",
41
+ "unk_token": "<unk>",
42
+ "use_default_system_prompt": false,
43
+ "use_fast": true
44
+ }
checkpoint-48/trainer_state.json ADDED
@@ -0,0 +1,505 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 3.7199999999999998,
5
+ "eval_steps": 3,
6
+ "global_step": 48,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.08,
13
+ "grad_norm": 0.17093713581562042,
14
+ "learning_rate": 2e-05,
15
+ "loss": 1.4615,
16
+ "step": 1
17
+ },
18
+ {
19
+ "epoch": 0.08,
20
+ "eval_loss": 1.4899382591247559,
21
+ "eval_runtime": 17.3107,
22
+ "eval_samples_per_second": 5.777,
23
+ "eval_steps_per_second": 2.888,
24
+ "step": 1
25
+ },
26
+ {
27
+ "epoch": 0.16,
28
+ "grad_norm": 0.19339510798454285,
29
+ "learning_rate": 4e-05,
30
+ "loss": 1.4241,
31
+ "step": 2
32
+ },
33
+ {
34
+ "epoch": 0.24,
35
+ "grad_norm": 0.1669788658618927,
36
+ "learning_rate": 6e-05,
37
+ "loss": 1.3846,
38
+ "step": 3
39
+ },
40
+ {
41
+ "epoch": 0.24,
42
+ "eval_loss": 1.4858685731887817,
43
+ "eval_runtime": 17.4215,
44
+ "eval_samples_per_second": 5.74,
45
+ "eval_steps_per_second": 2.87,
46
+ "step": 3
47
+ },
48
+ {
49
+ "epoch": 0.32,
50
+ "grad_norm": 0.14142441749572754,
51
+ "learning_rate": 8e-05,
52
+ "loss": 1.2219,
53
+ "step": 4
54
+ },
55
+ {
56
+ "epoch": 0.4,
57
+ "grad_norm": 0.15717843174934387,
58
+ "learning_rate": 0.0001,
59
+ "loss": 1.3617,
60
+ "step": 5
61
+ },
62
+ {
63
+ "epoch": 0.48,
64
+ "grad_norm": 0.16309261322021484,
65
+ "learning_rate": 0.00012,
66
+ "loss": 1.3667,
67
+ "step": 6
68
+ },
69
+ {
70
+ "epoch": 0.48,
71
+ "eval_loss": 1.439871072769165,
72
+ "eval_runtime": 17.6339,
73
+ "eval_samples_per_second": 5.671,
74
+ "eval_steps_per_second": 2.835,
75
+ "step": 6
76
+ },
77
+ {
78
+ "epoch": 0.56,
79
+ "grad_norm": 0.15078029036521912,
80
+ "learning_rate": 0.00014,
81
+ "loss": 1.3008,
82
+ "step": 7
83
+ },
84
+ {
85
+ "epoch": 0.64,
86
+ "grad_norm": 0.13603582978248596,
87
+ "learning_rate": 0.00016,
88
+ "loss": 1.3333,
89
+ "step": 8
90
+ },
91
+ {
92
+ "epoch": 0.72,
93
+ "grad_norm": 0.1104956567287445,
94
+ "learning_rate": 0.00018,
95
+ "loss": 1.267,
96
+ "step": 9
97
+ },
98
+ {
99
+ "epoch": 0.72,
100
+ "eval_loss": 1.3381670713424683,
101
+ "eval_runtime": 17.2986,
102
+ "eval_samples_per_second": 5.781,
103
+ "eval_steps_per_second": 2.89,
104
+ "step": 9
105
+ },
106
+ {
107
+ "epoch": 0.8,
108
+ "grad_norm": 0.09913735836744308,
109
+ "learning_rate": 0.0002,
110
+ "loss": 1.2946,
111
+ "step": 10
112
+ },
113
+ {
114
+ "epoch": 0.88,
115
+ "grad_norm": 0.11903145164251328,
116
+ "learning_rate": 0.000199658449300667,
117
+ "loss": 1.2921,
118
+ "step": 11
119
+ },
120
+ {
121
+ "epoch": 0.96,
122
+ "grad_norm": 0.11169299483299255,
123
+ "learning_rate": 0.00019863613034027224,
124
+ "loss": 1.2276,
125
+ "step": 12
126
+ },
127
+ {
128
+ "epoch": 0.96,
129
+ "eval_loss": 1.2940881252288818,
130
+ "eval_runtime": 17.4061,
131
+ "eval_samples_per_second": 5.745,
132
+ "eval_steps_per_second": 2.873,
133
+ "step": 12
134
+ },
135
+ {
136
+ "epoch": 1.04,
137
+ "grad_norm": 0.1135605201125145,
138
+ "learning_rate": 0.00019694002659393305,
139
+ "loss": 1.2666,
140
+ "step": 13
141
+ },
142
+ {
143
+ "epoch": 1.08,
144
+ "grad_norm": 0.1120605319738388,
145
+ "learning_rate": 0.00019458172417006347,
146
+ "loss": 1.2589,
147
+ "step": 14
148
+ },
149
+ {
150
+ "epoch": 1.16,
151
+ "grad_norm": 0.10806083679199219,
152
+ "learning_rate": 0.00019157733266550575,
153
+ "loss": 1.2515,
154
+ "step": 15
155
+ },
156
+ {
157
+ "epoch": 1.16,
158
+ "eval_loss": 1.2792645692825317,
159
+ "eval_runtime": 17.2617,
160
+ "eval_samples_per_second": 5.793,
161
+ "eval_steps_per_second": 2.897,
162
+ "step": 15
163
+ },
164
+ {
165
+ "epoch": 1.24,
166
+ "grad_norm": 0.09928147494792938,
167
+ "learning_rate": 0.0001879473751206489,
168
+ "loss": 1.1636,
169
+ "step": 16
170
+ },
171
+ {
172
+ "epoch": 1.32,
173
+ "grad_norm": 0.09012632817029953,
174
+ "learning_rate": 0.00018371664782625287,
175
+ "loss": 1.1601,
176
+ "step": 17
177
+ },
178
+ {
179
+ "epoch": 1.4,
180
+ "grad_norm": 0.09199394285678864,
181
+ "learning_rate": 0.00017891405093963938,
182
+ "loss": 1.2275,
183
+ "step": 18
184
+ },
185
+ {
186
+ "epoch": 1.4,
187
+ "eval_loss": 1.2555986642837524,
188
+ "eval_runtime": 17.3997,
189
+ "eval_samples_per_second": 5.747,
190
+ "eval_steps_per_second": 2.874,
191
+ "step": 18
192
+ },
193
+ {
194
+ "epoch": 1.48,
195
+ "grad_norm": 0.096685491502285,
196
+ "learning_rate": 0.00017357239106731317,
197
+ "loss": 1.228,
198
+ "step": 19
199
+ },
200
+ {
201
+ "epoch": 1.56,
202
+ "grad_norm": 0.08053378760814667,
203
+ "learning_rate": 0.00016772815716257412,
204
+ "loss": 1.192,
205
+ "step": 20
206
+ },
207
+ {
208
+ "epoch": 1.6400000000000001,
209
+ "grad_norm": 0.07304864376783371,
210
+ "learning_rate": 0.0001614212712689668,
211
+ "loss": 1.1351,
212
+ "step": 21
213
+ },
214
+ {
215
+ "epoch": 1.6400000000000001,
216
+ "eval_loss": 1.2347127199172974,
217
+ "eval_runtime": 17.3396,
218
+ "eval_samples_per_second": 5.767,
219
+ "eval_steps_per_second": 2.884,
220
+ "step": 21
221
+ },
222
+ {
223
+ "epoch": 1.72,
224
+ "grad_norm": 0.0821811631321907,
225
+ "learning_rate": 0.00015469481581224272,
226
+ "loss": 1.2031,
227
+ "step": 22
228
+ },
229
+ {
230
+ "epoch": 1.8,
231
+ "grad_norm": 0.07574562728404999,
232
+ "learning_rate": 0.00014759473930370736,
233
+ "loss": 1.2476,
234
+ "step": 23
235
+ },
236
+ {
237
+ "epoch": 1.88,
238
+ "grad_norm": 0.07235240191221237,
239
+ "learning_rate": 0.00014016954246529696,
240
+ "loss": 1.2701,
241
+ "step": 24
242
+ },
243
+ {
244
+ "epoch": 1.88,
245
+ "eval_loss": 1.2253003120422363,
246
+ "eval_runtime": 17.292,
247
+ "eval_samples_per_second": 5.783,
248
+ "eval_steps_per_second": 2.892,
249
+ "step": 24
250
+ },
251
+ {
252
+ "epoch": 1.96,
253
+ "grad_norm": 0.07071871310472488,
254
+ "learning_rate": 0.00013246994692046836,
255
+ "loss": 1.2051,
256
+ "step": 25
257
+ },
258
+ {
259
+ "epoch": 2.04,
260
+ "grad_norm": 0.07875131815671921,
261
+ "learning_rate": 0.00012454854871407994,
262
+ "loss": 1.192,
263
+ "step": 26
264
+ },
265
+ {
266
+ "epoch": 2.08,
267
+ "grad_norm": 0.06468148529529572,
268
+ "learning_rate": 0.00011645945902807341,
269
+ "loss": 1.1487,
270
+ "step": 27
271
+ },
272
+ {
273
+ "epoch": 2.08,
274
+ "eval_loss": 1.2213425636291504,
275
+ "eval_runtime": 17.7102,
276
+ "eval_samples_per_second": 5.646,
277
+ "eval_steps_per_second": 2.823,
278
+ "step": 27
279
+ },
280
+ {
281
+ "epoch": 2.16,
282
+ "grad_norm": 0.0741707906126976,
283
+ "learning_rate": 0.00010825793454723325,
284
+ "loss": 1.1673,
285
+ "step": 28
286
+ },
287
+ {
288
+ "epoch": 2.24,
289
+ "grad_norm": 0.06802140176296234,
290
+ "learning_rate": 0.0001,
291
+ "loss": 1.2054,
292
+ "step": 29
293
+ },
294
+ {
295
+ "epoch": 2.32,
296
+ "grad_norm": 0.06834083795547485,
297
+ "learning_rate": 9.174206545276677e-05,
298
+ "loss": 1.1518,
299
+ "step": 30
300
+ },
301
+ {
302
+ "epoch": 2.32,
303
+ "eval_loss": 1.220943808555603,
304
+ "eval_runtime": 17.4872,
305
+ "eval_samples_per_second": 5.718,
306
+ "eval_steps_per_second": 2.859,
307
+ "step": 30
308
+ },
309
+ {
310
+ "epoch": 2.4,
311
+ "grad_norm": 0.06714992970228195,
312
+ "learning_rate": 8.35405409719266e-05,
313
+ "loss": 1.183,
314
+ "step": 31
315
+ },
316
+ {
317
+ "epoch": 2.48,
318
+ "grad_norm": 0.06744072586297989,
319
+ "learning_rate": 7.54514512859201e-05,
320
+ "loss": 1.2098,
321
+ "step": 32
322
+ },
323
+ {
324
+ "epoch": 2.56,
325
+ "grad_norm": 0.06815183162689209,
326
+ "learning_rate": 6.753005307953167e-05,
327
+ "loss": 1.1942,
328
+ "step": 33
329
+ },
330
+ {
331
+ "epoch": 2.56,
332
+ "eval_loss": 1.217085599899292,
333
+ "eval_runtime": 17.2384,
334
+ "eval_samples_per_second": 5.801,
335
+ "eval_steps_per_second": 2.901,
336
+ "step": 33
337
+ },
338
+ {
339
+ "epoch": 2.64,
340
+ "grad_norm": 0.07002735882997513,
341
+ "learning_rate": 5.983045753470308e-05,
342
+ "loss": 1.1896,
343
+ "step": 34
344
+ },
345
+ {
346
+ "epoch": 2.7199999999999998,
347
+ "grad_norm": 0.07221011817455292,
348
+ "learning_rate": 5.240526069629265e-05,
349
+ "loss": 1.1795,
350
+ "step": 35
351
+ },
352
+ {
353
+ "epoch": 2.8,
354
+ "grad_norm": 0.07009800523519516,
355
+ "learning_rate": 4.530518418775733e-05,
356
+ "loss": 1.1122,
357
+ "step": 36
358
+ },
359
+ {
360
+ "epoch": 2.8,
361
+ "eval_loss": 1.2147068977355957,
362
+ "eval_runtime": 17.3366,
363
+ "eval_samples_per_second": 5.768,
364
+ "eval_steps_per_second": 2.884,
365
+ "step": 36
366
+ },
367
+ {
368
+ "epoch": 2.88,
369
+ "grad_norm": 0.06943687796592712,
370
+ "learning_rate": 3.857872873103322e-05,
371
+ "loss": 1.1838,
372
+ "step": 37
373
+ },
374
+ {
375
+ "epoch": 2.96,
376
+ "grad_norm": 0.06811714172363281,
377
+ "learning_rate": 3.227184283742591e-05,
378
+ "loss": 1.166,
379
+ "step": 38
380
+ },
381
+ {
382
+ "epoch": 3.04,
383
+ "grad_norm": 0.06823349744081497,
384
+ "learning_rate": 2.6427608932686843e-05,
385
+ "loss": 1.1513,
386
+ "step": 39
387
+ },
388
+ {
389
+ "epoch": 3.04,
390
+ "eval_loss": 1.213902473449707,
391
+ "eval_runtime": 17.4349,
392
+ "eval_samples_per_second": 5.736,
393
+ "eval_steps_per_second": 2.868,
394
+ "step": 39
395
+ },
396
+ {
397
+ "epoch": 3.08,
398
+ "grad_norm": 0.06420764327049255,
399
+ "learning_rate": 2.1085949060360654e-05,
400
+ "loss": 1.155,
401
+ "step": 40
402
+ },
403
+ {
404
+ "epoch": 3.16,
405
+ "grad_norm": 0.06513047218322754,
406
+ "learning_rate": 1.6283352173747145e-05,
407
+ "loss": 1.1463,
408
+ "step": 41
409
+ },
410
+ {
411
+ "epoch": 3.24,
412
+ "grad_norm": 0.06813672184944153,
413
+ "learning_rate": 1.2052624879351104e-05,
414
+ "loss": 1.1887,
415
+ "step": 42
416
+ },
417
+ {
418
+ "epoch": 3.24,
419
+ "eval_loss": 1.2127918004989624,
420
+ "eval_runtime": 17.5485,
421
+ "eval_samples_per_second": 5.699,
422
+ "eval_steps_per_second": 2.849,
423
+ "step": 42
424
+ },
425
+ {
426
+ "epoch": 3.32,
427
+ "grad_norm": 0.06876266747713089,
428
+ "learning_rate": 8.422667334494249e-06,
429
+ "loss": 1.1745,
430
+ "step": 43
431
+ },
432
+ {
433
+ "epoch": 3.4,
434
+ "grad_norm": 0.06679647415876389,
435
+ "learning_rate": 5.418275829936537e-06,
436
+ "loss": 1.0975,
437
+ "step": 44
438
+ },
439
+ {
440
+ "epoch": 3.48,
441
+ "grad_norm": 0.06702674925327301,
442
+ "learning_rate": 3.059973406066963e-06,
443
+ "loss": 1.1011,
444
+ "step": 45
445
+ },
446
+ {
447
+ "epoch": 3.48,
448
+ "eval_loss": 1.2114481925964355,
449
+ "eval_runtime": 17.6099,
450
+ "eval_samples_per_second": 5.679,
451
+ "eval_steps_per_second": 2.839,
452
+ "step": 45
453
+ },
454
+ {
455
+ "epoch": 3.56,
456
+ "grad_norm": 0.06805083155632019,
457
+ "learning_rate": 1.3638696597277679e-06,
458
+ "loss": 1.1265,
459
+ "step": 46
460
+ },
461
+ {
462
+ "epoch": 3.64,
463
+ "grad_norm": 0.06833863258361816,
464
+ "learning_rate": 3.415506993330153e-07,
465
+ "loss": 1.1453,
466
+ "step": 47
467
+ },
468
+ {
469
+ "epoch": 3.7199999999999998,
470
+ "grad_norm": 0.06925249099731445,
471
+ "learning_rate": 0.0,
472
+ "loss": 1.1887,
473
+ "step": 48
474
+ },
475
+ {
476
+ "epoch": 3.7199999999999998,
477
+ "eval_loss": 1.2115026712417603,
478
+ "eval_runtime": 17.3158,
479
+ "eval_samples_per_second": 5.775,
480
+ "eval_steps_per_second": 2.888,
481
+ "step": 48
482
+ }
483
+ ],
484
+ "logging_steps": 1,
485
+ "max_steps": 48,
486
+ "num_input_tokens_seen": 0,
487
+ "num_train_epochs": 4,
488
+ "save_steps": 12,
489
+ "stateful_callbacks": {
490
+ "TrainerControl": {
491
+ "args": {
492
+ "should_epoch_stop": false,
493
+ "should_evaluate": false,
494
+ "should_log": false,
495
+ "should_save": true,
496
+ "should_training_stop": true
497
+ },
498
+ "attributes": {}
499
+ }
500
+ },
501
+ "total_flos": 9974952442724352.0,
502
+ "train_batch_size": 2,
503
+ "trial_name": null,
504
+ "trial_params": null
505
+ }
checkpoint-48/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1b6f97a9990d98ad2e97e692ffff5d13e2f2644982eea955df1c8a971b386400
3
+ size 6008
config.json ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T",
3
+ "architectures": [
4
+ "LlamaForCausalLM"
5
+ ],
6
+ "attention_bias": false,
7
+ "attention_dropout": 0.0,
8
+ "bos_token_id": 1,
9
+ "eos_token_id": 2,
10
+ "hidden_act": "silu",
11
+ "hidden_size": 2048,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 5632,
14
+ "max_position_embeddings": 4096,
15
+ "mlp_bias": false,
16
+ "model_type": "llama",
17
+ "num_attention_heads": 32,
18
+ "num_hidden_layers": 22,
19
+ "num_key_value_heads": 4,
20
+ "pretraining_tp": 1,
21
+ "quantization_config": {
22
+ "_load_in_4bit": false,
23
+ "_load_in_8bit": true,
24
+ "bnb_4bit_compute_dtype": "float32",
25
+ "bnb_4bit_quant_storage": "uint8",
26
+ "bnb_4bit_quant_type": "fp4",
27
+ "bnb_4bit_use_double_quant": false,
28
+ "llm_int8_enable_fp32_cpu_offload": false,
29
+ "llm_int8_has_fp16_weight": false,
30
+ "llm_int8_skip_modules": null,
31
+ "llm_int8_threshold": 6.0,
32
+ "load_in_4bit": false,
33
+ "load_in_8bit": true,
34
+ "quant_method": "bitsandbytes"
35
+ },
36
+ "rms_norm_eps": 1e-05,
37
+ "rope_scaling": null,
38
+ "rope_theta": 10000.0,
39
+ "tie_word_embeddings": false,
40
+ "torch_dtype": "float32",
41
+ "transformers_version": "4.42.3",
42
+ "use_cache": false,
43
+ "vocab_size": 32000
44
+ }
merged/config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T",
3
+ "architectures": [
4
+ "LlamaForCausalLM"
5
+ ],
6
+ "attention_bias": false,
7
+ "attention_dropout": 0.0,
8
+ "bos_token_id": 1,
9
+ "eos_token_id": 2,
10
+ "hidden_act": "silu",
11
+ "hidden_size": 2048,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 5632,
14
+ "max_position_embeddings": 4096,
15
+ "mlp_bias": false,
16
+ "model_type": "llama",
17
+ "num_attention_heads": 32,
18
+ "num_hidden_layers": 22,
19
+ "num_key_value_heads": 4,
20
+ "pretraining_tp": 1,
21
+ "rms_norm_eps": 1e-05,
22
+ "rope_scaling": null,
23
+ "rope_theta": 10000.0,
24
+ "tie_word_embeddings": false,
25
+ "torch_dtype": "bfloat16",
26
+ "transformers_version": "4.42.3",
27
+ "use_cache": false,
28
+ "vocab_size": 32000
29
+ }
merged/generation_config.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 1,
3
+ "do_sample": true,
4
+ "eos_token_id": 2,
5
+ "max_length": 2048,
6
+ "pad_token_id": 0,
7
+ "transformers_version": "4.42.3"
8
+ }