Casper0508 commited on
Commit
40dad40
1 Parent(s): 3a1e068

End of training

Browse files
README.md ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: llama2
3
+ base_model: meta-llama/Llama-2-7b-chat-hf
4
+ tags:
5
+ - generated_from_trainer
6
+ model-index:
7
+ - name: MSc_llama2_finetuned_model_secondData10
8
+ results: []
9
+ library_name: peft
10
+ ---
11
+
12
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
13
+ should probably proofread and complete it, then remove this comment. -->
14
+
15
+ # MSc_llama2_finetuned_model_secondData10
16
+
17
+ This model is a fine-tuned version of [meta-llama/Llama-2-7b-chat-hf](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf) on the None dataset.
18
+ It achieves the following results on the evaluation set:
19
+ - Loss: 0.7118
20
+
21
+ ## Model description
22
+
23
+ More information needed
24
+
25
+ ## Intended uses & limitations
26
+
27
+ More information needed
28
+
29
+ ## Training and evaluation data
30
+
31
+ More information needed
32
+
33
+ ## Training procedure
34
+
35
+
36
+ The following `bitsandbytes` quantization config was used during training:
37
+ - quant_method: bitsandbytes
38
+ - _load_in_8bit: False
39
+ - _load_in_4bit: True
40
+ - llm_int8_threshold: 6.0
41
+ - llm_int8_skip_modules: None
42
+ - llm_int8_enable_fp32_cpu_offload: False
43
+ - llm_int8_has_fp16_weight: False
44
+ - bnb_4bit_quant_type: nf4
45
+ - bnb_4bit_use_double_quant: True
46
+ - bnb_4bit_compute_dtype: bfloat16
47
+ - load_in_4bit: True
48
+ - load_in_8bit: False
49
+ ### Training hyperparameters
50
+
51
+ The following hyperparameters were used during training:
52
+ - learning_rate: 3e-05
53
+ - train_batch_size: 32
54
+ - eval_batch_size: 8
55
+ - seed: 42
56
+ - gradient_accumulation_steps: 2
57
+ - total_train_batch_size: 64
58
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
59
+ - lr_scheduler_type: cosine
60
+ - lr_scheduler_warmup_ratio: 0.03
61
+ - training_steps: 250
62
+
63
+ ### Training results
64
+
65
+ | Training Loss | Epoch | Step | Validation Loss |
66
+ |:-------------:|:-----:|:----:|:---------------:|
67
+ | 3.8835 | 1.33 | 10 | 3.4352 |
68
+ | 2.9529 | 2.67 | 20 | 2.3780 |
69
+ | 1.991 | 4.0 | 30 | 1.6911 |
70
+ | 1.5061 | 5.33 | 40 | 1.2670 |
71
+ | 1.0666 | 6.67 | 50 | 0.8670 |
72
+ | 0.8464 | 8.0 | 60 | 0.8088 |
73
+ | 0.7622 | 9.33 | 70 | 0.7478 |
74
+ | 0.6869 | 10.67 | 80 | 0.7055 |
75
+ | 0.6336 | 12.0 | 90 | 0.6840 |
76
+ | 0.5789 | 13.33 | 100 | 0.6749 |
77
+ | 0.5518 | 14.67 | 110 | 0.6685 |
78
+ | 0.5159 | 16.0 | 120 | 0.6657 |
79
+ | 0.4894 | 17.33 | 130 | 0.6743 |
80
+ | 0.4674 | 18.67 | 140 | 0.6720 |
81
+ | 0.4496 | 20.0 | 150 | 0.6806 |
82
+ | 0.4292 | 21.33 | 160 | 0.6883 |
83
+ | 0.421 | 22.67 | 170 | 0.6910 |
84
+ | 0.4088 | 24.0 | 180 | 0.6956 |
85
+ | 0.3988 | 25.33 | 190 | 0.7014 |
86
+ | 0.3898 | 26.67 | 200 | 0.7065 |
87
+ | 0.3827 | 28.0 | 210 | 0.7091 |
88
+ | 0.3819 | 29.33 | 220 | 0.7104 |
89
+ | 0.3778 | 30.67 | 230 | 0.7117 |
90
+ | 0.3803 | 32.0 | 240 | 0.7126 |
91
+ | 0.3804 | 33.33 | 250 | 0.7118 |
92
+
93
+
94
+ ### Framework versions
95
+
96
+ - PEFT 0.4.0
97
+ - Transformers 4.38.2
98
+ - Pytorch 2.4.0+cu121
99
+ - Datasets 2.13.1
100
+ - Tokenizers 0.15.2
adapter_config.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "meta-llama/Llama-2-7b-chat-hf",
4
+ "bias": "none",
5
+ "fan_in_fan_out": false,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 128,
11
+ "lora_dropout": 0.1,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 64,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "q_proj",
18
+ "k_proj",
19
+ "v_proj",
20
+ "out_proj"
21
+ ],
22
+ "task_type": "CAUSAL_LM"
23
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d730df41800d07ef359aae131abc72c2b60cd1b742dff6f2d4a86f6075c178ed
3
+ size 201352664
emissions.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ timestamp,experiment_id,project_name,duration,emissions,energy_consumed,country_name,country_iso_code,region,on_cloud,cloud_provider,cloud_region
2
+ 2024-07-24T22:31:56,200f4ccd-4b13-4dae-ad39-fa215487bc86,codecarbon,2113.7502529621124,0.13254571399834814,0.1972086417122769,United Kingdom,GBR,scotland,N,,
runs/Jul24_21-56-39_msc-modeltrain-pod/events.out.tfevents.1721858202.msc-modeltrain-pod.197.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d5aa652df549400add7e1e047b5d1cb17ad43518b4e31d9c5f2884bde3449929
3
+ size 17465
special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "</s>",
17
+ "unk_token": {
18
+ "content": "<unk>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723
tokenizer_config.json ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "added_tokens_decoder": {
5
+ "0": {
6
+ "content": "<unk>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "1": {
14
+ "content": "<s>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "2": {
22
+ "content": "</s>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ }
29
+ },
30
+ "bos_token": "<s>",
31
+ "chat_template": "{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% else %}{% set loop_messages = messages %}{% set system_message = false %}{% endif %}{% for message in loop_messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if loop.index0 == 0 and system_message != false %}{% set content = '<<SYS>>\\n' + system_message + '\\n<</SYS>>\\n\\n' + message['content'] %}{% else %}{% set content = message['content'] %}{% endif %}{% if message['role'] == 'user' %}{{ bos_token + '[INST] ' + content.strip() + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ ' ' + content.strip() + ' ' + eos_token }}{% endif %}{% endfor %}",
32
+ "clean_up_tokenization_spaces": false,
33
+ "eos_token": "</s>",
34
+ "legacy": false,
35
+ "model_max_length": 1000000000000000019884624838656,
36
+ "pad_token": "</s>",
37
+ "padding_side": "right",
38
+ "sp_model_kwargs": {},
39
+ "tokenizer_class": "LlamaTokenizer",
40
+ "unk_token": "<unk>",
41
+ "use_default_system_prompt": false
42
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:69e5ba45a6fc39c463f300d263891490ef6f06cc8aff58b2ff480f3abe183cde
3
+ size 4984