martimfasantos commited on
Commit
ce21d1d
·
verified ·
1 Parent(s): ea5aa08

End of training

Browse files
README.md ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ base_model: TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T
4
+ tags:
5
+ - xcomet_xl_xxl
6
+ - generated_from_trainer
7
+ model-index:
8
+ - name: sft-xcomet_xl_xxl-chosen-10lp-shuff-full-tiny
9
+ results: []
10
+ ---
11
+
12
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
13
+ should probably proofread and complete it, then remove this comment. -->
14
+
15
+ # sft-xcomet_xl_xxl-chosen-10lp-shuff-full-tiny
16
+
17
+ This model is a fine-tuned version of [TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T](https://huggingface.co/TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T) on the Unbabel/TowerAligned-v0.1 dataset.
18
+ It achieves the following results on the evaluation set:
19
+ - Loss: 0.7027
20
+ - Nll Loss: 0.7027
21
+ - Logps/best: -69.8057
22
+ - Rewards/chosen: 3.3548
23
+ - Rewards/rejected: 2.9021
24
+ - Rewards/accuracies: 0.6820
25
+ - Rewards/margins: 0.4527
26
+ - Logps/rejected: -68.4018
27
+ - Logps/chosen: -69.8057
28
+ - Logits/rejected: -1.7405
29
+ - Logits/chosen: -1.8685
30
+
31
+ ## Model description
32
+
33
+ More information needed
34
+
35
+ ## Intended uses & limitations
36
+
37
+ More information needed
38
+
39
+ ## Training and evaluation data
40
+
41
+ More information needed
42
+
43
+ ## Training procedure
44
+
45
+ ### Training hyperparameters
46
+
47
+ The following hyperparameters were used during training:
48
+ - learning_rate: 1e-05
49
+ - train_batch_size: 1
50
+ - eval_batch_size: 4
51
+ - seed: 42
52
+ - gradient_accumulation_steps: 16
53
+ - total_train_batch_size: 16
54
+ - optimizer: Adam with betas=(0.9,0.95) and epsilon=1e-08
55
+ - lr_scheduler_type: linear
56
+ - lr_scheduler_warmup_ratio: 0.1
57
+ - num_epochs: 1
58
+
59
+ ### Training results
60
+
61
+ | Training Loss | Epoch | Step | Validation Loss | Nll Loss | Logps/best | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
62
+ |:-------------:|:------:|:----:|:---------------:|:--------:|:----------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
63
+ | 0.8021 | 0.1063 | 100 | 0.7701 | 0.7701 | -76.4054 | 2.6949 | 2.3664 | 0.6740 | 0.3284 | -73.7585 | -76.4054 | -1.7763 | -1.9055 |
64
+ | 0.7255 | 0.2127 | 200 | 0.7367 | 0.7367 | -73.1546 | 3.0200 | 2.6460 | 0.6820 | 0.3740 | -70.9634 | -73.1546 | -1.7637 | -1.8923 |
65
+ | 0.6979 | 0.3190 | 300 | 0.7232 | 0.7232 | -71.8372 | 3.1517 | 2.7499 | 0.6660 | 0.4018 | -69.9242 | -71.8372 | -1.7452 | -1.8727 |
66
+ | 0.7072 | 0.4254 | 400 | 0.7137 | 0.7137 | -70.8879 | 3.2466 | 2.8103 | 0.6960 | 0.4363 | -69.3198 | -70.8879 | -1.7467 | -1.8743 |
67
+ | 0.6958 | 0.5317 | 500 | 0.7085 | 0.7085 | -70.3945 | 3.2960 | 2.8412 | 0.6920 | 0.4548 | -69.0110 | -70.3945 | -1.7476 | -1.8756 |
68
+ | 0.7216 | 0.6381 | 600 | 0.7055 | 0.7055 | -70.0888 | 3.3265 | 2.8702 | 0.6900 | 0.4564 | -68.7212 | -70.0888 | -1.7377 | -1.8651 |
69
+ | 0.7531 | 0.7444 | 700 | 0.7038 | 0.7038 | -69.9193 | 3.3435 | 2.8863 | 0.6860 | 0.4572 | -68.5603 | -69.9193 | -1.7392 | -1.8670 |
70
+ | 0.6531 | 0.8508 | 800 | 0.7028 | 0.7028 | -69.8163 | 3.3538 | 2.9020 | 0.6800 | 0.4518 | -68.4026 | -69.8163 | -1.7410 | -1.8690 |
71
+ | 0.6801 | 0.9571 | 900 | 0.7027 | 0.7027 | -69.8057 | 3.3548 | 2.9021 | 0.6820 | 0.4527 | -68.4018 | -69.8057 | -1.7405 | -1.8685 |
72
+
73
+
74
+ ### Framework versions
75
+
76
+ - Transformers 4.41.2
77
+ - Pytorch 2.1.2
78
+ - Datasets 2.20.0
79
+ - Tokenizers 0.19.1
config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T",
3
+ "architectures": [
4
+ "LlamaForCausalLM"
5
+ ],
6
+ "attention_bias": false,
7
+ "attention_dropout": 0.0,
8
+ "bos_token_id": 1,
9
+ "eos_token_id": 2,
10
+ "hidden_act": "silu",
11
+ "hidden_size": 2048,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 5632,
14
+ "max_position_embeddings": 2048,
15
+ "mlp_bias": false,
16
+ "model_type": "llama",
17
+ "num_attention_heads": 32,
18
+ "num_hidden_layers": 22,
19
+ "num_key_value_heads": 4,
20
+ "pretraining_tp": 1,
21
+ "rms_norm_eps": 1e-05,
22
+ "rope_scaling": null,
23
+ "rope_theta": 10000.0,
24
+ "tie_word_embeddings": false,
25
+ "torch_dtype": "bfloat16",
26
+ "transformers_version": "4.41.2",
27
+ "use_cache": true,
28
+ "vocab_size": 32000
29
+ }
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 1,
3
+ "eos_token_id": 2,
4
+ "max_length": 2048,
5
+ "pad_token_id": 0,
6
+ "transformers_version": "4.41.2"
7
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d5287e3487293b88a1ef59bb75f20c287e4a94eb2fd8cdabfeaacb1614d15a18
3
+ size 2200119864
special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "[PAD]",
17
+ "unk_token": {
18
+ "content": "<unk>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723
tokenizer_config.json ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "added_tokens_decoder": {
5
+ "0": {
6
+ "content": "<unk>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "1": {
14
+ "content": "<s>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "2": {
22
+ "content": "</s>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ }
29
+ },
30
+ "bos_token": "<s>",
31
+ "clean_up_tokenization_spaces": false,
32
+ "eos_token": "</s>",
33
+ "legacy": false,
34
+ "model_max_length": 1000000000000000019884624838656,
35
+ "pad_token": "[PAD]",
36
+ "padding_side": "left",
37
+ "sp_model_kwargs": {},
38
+ "tokenizer_class": "LlamaTokenizer",
39
+ "unk_token": "<unk>",
40
+ "use_default_system_prompt": false
41
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b792b7de2a40119330cae1921206769323cf6c529e535b3cdb8b1e97002aa3c7
3
+ size 5240