aisuko commited on
Commit
686c44d
·
verified ·
1 Parent(s): 6737ce8
README.md CHANGED
@@ -1,7 +1,7 @@
1
  ---
 
2
  library_name: peft
3
  license: mit
4
- base_model: microsoft/Phi-3.5-mini-instruct
5
  tags:
6
  - generated_from_trainer
7
  model-index:
@@ -16,7 +16,7 @@ should probably proofread and complete it, then remove this comment. -->
16
 
17
  This model is a fine-tuned version of [microsoft/Phi-3.5-mini-instruct](https://huggingface.co/microsoft/Phi-3.5-mini-instruct) on an unknown dataset.
18
  It achieves the following results on the evaluation set:
19
- - Loss: 1.9570
20
  - Exact Match Ratio: 0.0
21
  - Sequence Accuracy: 0.0
22
 
@@ -38,10 +38,10 @@ More information needed
38
 
39
  The following hyperparameters were used during training:
40
  - learning_rate: 2e-05
41
- - train_batch_size: 16
42
- - eval_batch_size: 16
43
  - seed: 42
44
- - optimizer: Use adamw_torch with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
45
  - lr_scheduler_type: linear
46
  - num_epochs: 3
47
 
@@ -49,15 +49,15 @@ The following hyperparameters were used during training:
49
 
50
  | Training Loss | Epoch | Step | Validation Loss | Exact Match Ratio | Sequence Accuracy |
51
  |:-------------:|:-----:|:----:|:---------------:|:-----------------:|:-----------------:|
52
- | 11.7855 | 1.0 | 25 | 8.9031 | 0.0 | 0.0 |
53
- | 6.224 | 2.0 | 50 | 3.5512 | 0.0 | 0.0 |
54
- | 2.4789 | 3.0 | 75 | 1.9570 | 0.0 | 0.0 |
55
 
56
 
57
  ### Framework versions
58
 
59
- - PEFT 0.13.2
60
- - Transformers 4.46.3
61
- - Pytorch 2.4.0
62
- - Datasets 3.1.0
63
- - Tokenizers 0.20.3
 
1
  ---
2
+ base_model: microsoft/Phi-3.5-mini-instruct
3
  library_name: peft
4
  license: mit
 
5
  tags:
6
  - generated_from_trainer
7
  model-index:
 
16
 
17
  This model is a fine-tuned version of [microsoft/Phi-3.5-mini-instruct](https://huggingface.co/microsoft/Phi-3.5-mini-instruct) on an unknown dataset.
18
  It achieves the following results on the evaluation set:
19
+ - Loss: 0.7631
20
  - Exact Match Ratio: 0.0
21
  - Sequence Accuracy: 0.0
22
 
 
38
 
39
  The following hyperparameters were used during training:
40
  - learning_rate: 2e-05
41
+ - train_batch_size: 8
42
+ - eval_batch_size: 8
43
  - seed: 42
44
+ - optimizer: Use OptimizerNames.ADAMW_TORCH with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
45
  - lr_scheduler_type: linear
46
  - num_epochs: 3
47
 
 
49
 
50
  | Training Loss | Epoch | Step | Validation Loss | Exact Match Ratio | Sequence Accuracy |
51
  |:-------------:|:-----:|:----:|:---------------:|:-----------------:|:-----------------:|
52
+ | 8.1148 | 1.0 | 50 | 1.6942 | 0.0 | 0.0 |
53
+ | 1.2769 | 2.0 | 100 | 0.8926 | 0.0 | 0.0 |
54
+ | 0.8832 | 3.0 | 150 | 0.7631 | 0.0 | 0.0 |
55
 
56
 
57
  ### Framework versions
58
 
59
+ - PEFT 0.14.0
60
+ - Transformers 4.47.0
61
+ - Pytorch 2.3.1.post300
62
+ - Datasets 2.2.1
63
+ - Tokenizers 0.21.0
adapter_config.json CHANGED
@@ -3,6 +3,8 @@
3
  "auto_mapping": null,
4
  "base_model_name_or_path": "microsoft/Phi-3.5-mini-instruct",
5
  "bias": "none",
 
 
6
  "fan_in_fan_out": false,
7
  "inference_mode": true,
8
  "init_lora_weights": true,
@@ -11,6 +13,7 @@
11
  "layers_to_transform": null,
12
  "loftq_config": {},
13
  "lora_alpha": 16,
 
14
  "lora_dropout": 0.1,
15
  "megatron_config": null,
16
  "megatron_core": "megatron.core",
@@ -23,10 +26,10 @@
23
  "rank_pattern": {},
24
  "revision": null,
25
  "target_modules": [
26
- "qkv_proj",
27
- "gate_up_proj",
28
  "down_proj",
29
- "o_proj"
 
 
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
 
3
  "auto_mapping": null,
4
  "base_model_name_or_path": "microsoft/Phi-3.5-mini-instruct",
5
  "bias": "none",
6
+ "eva_config": null,
7
+ "exclude_modules": null,
8
  "fan_in_fan_out": false,
9
  "inference_mode": true,
10
  "init_lora_weights": true,
 
13
  "layers_to_transform": null,
14
  "loftq_config": {},
15
  "lora_alpha": 16,
16
+ "lora_bias": false,
17
  "lora_dropout": 0.1,
18
  "megatron_config": null,
19
  "megatron_core": "megatron.core",
 
26
  "rank_pattern": {},
27
  "revision": null,
28
  "target_modules": [
 
 
29
  "down_proj",
30
+ "qkv_proj",
31
+ "o_proj",
32
+ "gate_up_proj"
33
  ],
34
  "task_type": "CAUSAL_LM",
35
  "use_dora": false,
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:27a7cbcb66e6e504dbc358a729696bdab2dae4d22de840c2832cfdbc1c55e746
3
  size 888703384
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f15bd442fe3e4d1b515b51f3d64667e033497c8f4e4a635f2a227d6d8078cec8
3
  size 888703384
runs/Dec06_21-54-03_default/events.out.tfevents.1733522044.default.1672.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d1c4344e7c26553c5fd19bb007e2d0979beb5abc3a2e541dc386440cee72e924
3
+ size 8620
runs/Dec06_22-01-43_default/events.out.tfevents.1733522503.default.1672.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7bebf8de3902aafecf7fcdeeb25e7f517d0c9ca810950d4467191faf9013b897
3
+ size 8407
runs/Dec06_22-02-36_default/events.out.tfevents.1733522557.default.1812.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f78ab36464de3c3c080e47d5d98735bb6e2764a9ba94d1f29badb6a5c52b23c5
3
+ size 8407
runs/Dec06_22-03-48_default/events.out.tfevents.1733522628.default.1977.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:75754a447b08f5ada39fb6ae4e18a10c48be2bd011da25946472b520aa01b9cb
3
+ size 8407
runs/Dec06_22-05-18_default/events.out.tfevents.1733522718.default.2104.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2e12ce4ad6722e80f88e9ae15680926035df7d9052cd9e40e7e9eba8d7a98e77
3
+ size 10551
runs/Dec06_22-05-18_default/events.out.tfevents.1733522920.default.2104.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aec63876df9534d0b085749e3fc5074b79f4fb53ec11a428e5f6fe1b1c7d394a
3
+ size 481
tokenizer_config.json CHANGED
@@ -120,6 +120,7 @@
120
  "chat_template": "{% for message in messages %}{% if message['role'] == 'system' and message['content'] %}{{'<|system|>\n' + message['content'] + '<|end|>\n'}}{% elif message['role'] == 'user' %}{{'<|user|>\n' + message['content'] + '<|end|>\n'}}{% elif message['role'] == 'assistant' %}{{'<|assistant|>\n' + message['content'] + '<|end|>\n'}}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '<|assistant|>\n' }}{% else %}{{ eos_token }}{% endif %}",
121
  "clean_up_tokenization_spaces": false,
122
  "eos_token": "<|endoftext|>",
 
123
  "legacy": false,
124
  "model_max_length": 131072,
125
  "pad_token": "<|endoftext|>",
 
120
  "chat_template": "{% for message in messages %}{% if message['role'] == 'system' and message['content'] %}{{'<|system|>\n' + message['content'] + '<|end|>\n'}}{% elif message['role'] == 'user' %}{{'<|user|>\n' + message['content'] + '<|end|>\n'}}{% elif message['role'] == 'assistant' %}{{'<|assistant|>\n' + message['content'] + '<|end|>\n'}}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '<|assistant|>\n' }}{% else %}{{ eos_token }}{% endif %}",
121
  "clean_up_tokenization_spaces": false,
122
  "eos_token": "<|endoftext|>",
123
+ "extra_special_tokens": {},
124
  "legacy": false,
125
  "model_max_length": 131072,
126
  "pad_token": "<|endoftext|>",
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9c9f41011fcf7ec3fe9753744e688c1d2abc7dbdbb48b0e9ea7b284cfb56b5f4
3
- size 5240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fb9a6430bbb63abb7745d79404d46877785336574cf79a89fd40d253e0990ea9
3
+ size 5304