yerkekz commited on
Commit
a04fec8
1 Parent(s): 67187e2

mistral-instruct-2-finetuned-clinical

Browse files
README.md CHANGED
@@ -5,7 +5,7 @@ tags:
5
  - trl
6
  - sft
7
  - generated_from_trainer
8
- base_model: mistralai/Mistral-7B-v0.1
9
  model-index:
10
  - name: results
11
  results: []
@@ -16,9 +16,9 @@ should probably proofread and complete it, then remove this comment. -->
16
 
17
  # results
18
 
19
- This model is a fine-tuned version of [mistralai/Mistral-7B-v0.1](https://huggingface.co/mistralai/Mistral-7B-v0.1) on an unknown dataset.
20
  It achieves the following results on the evaluation set:
21
- - Loss: 0.9724
22
 
23
  ## Model description
24
 
@@ -46,52 +46,24 @@ The following hyperparameters were used during training:
46
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
47
  - lr_scheduler_type: cosine
48
  - lr_scheduler_warmup_steps: 0.03
49
- - num_epochs: 1
50
 
51
  ### Training results
52
 
53
  | Training Loss | Epoch | Step | Validation Loss |
54
  |:-------------:|:-----:|:----:|:---------------:|
55
- | 1.0248 | 0.03 | 50 | 1.0145 |
56
- | 1.0168 | 0.06 | 100 | 1.0078 |
57
- | 1.008 | 0.09 | 150 | 1.0058 |
58
- | 1.0082 | 0.12 | 200 | 1.0030 |
59
- | 0.9846 | 0.14 | 250 | 1.0005 |
60
- | 0.9807 | 0.17 | 300 | 0.9998 |
61
- | 0.9968 | 0.2 | 350 | 0.9992 |
62
- | 0.9834 | 0.23 | 400 | 0.9967 |
63
- | 1.0267 | 0.26 | 450 | 0.9953 |
64
- | 1.0119 | 0.29 | 500 | 0.9937 |
65
- | 0.9759 | 0.32 | 550 | 0.9939 |
66
- | 0.9978 | 0.35 | 600 | 0.9921 |
67
- | 1.0145 | 0.38 | 650 | 0.9901 |
68
- | 1.0064 | 0.4 | 700 | 0.9897 |
69
- | 0.9949 | 0.43 | 750 | 0.9890 |
70
- | 0.9936 | 0.46 | 800 | 0.9865 |
71
- | 0.9944 | 0.49 | 850 | 0.9852 |
72
- | 0.9819 | 0.52 | 900 | 0.9845 |
73
- | 0.9991 | 0.55 | 950 | 0.9826 |
74
- | 0.9874 | 0.58 | 1000 | 0.9812 |
75
- | 0.981 | 0.61 | 1050 | 0.9798 |
76
- | 0.9807 | 0.64 | 1100 | 0.9789 |
77
- | 0.9639 | 0.67 | 1150 | 0.9776 |
78
- | 0.9645 | 0.69 | 1200 | 0.9767 |
79
- | 0.9788 | 0.72 | 1250 | 0.9758 |
80
- | 0.9823 | 0.75 | 1300 | 0.9751 |
81
- | 0.9906 | 0.78 | 1350 | 0.9745 |
82
- | 0.9536 | 0.81 | 1400 | 0.9738 |
83
- | 0.9635 | 0.84 | 1450 | 0.9732 |
84
- | 0.9754 | 0.87 | 1500 | 0.9729 |
85
- | 0.9785 | 0.9 | 1550 | 0.9727 |
86
- | 0.9828 | 0.93 | 1600 | 0.9725 |
87
- | 0.9951 | 0.95 | 1650 | 0.9724 |
88
- | 0.983 | 0.98 | 1700 | 0.9724 |
89
 
90
 
91
  ### Framework versions
92
 
93
- - PEFT 0.9.0
94
- - Transformers 4.38.2
95
  - Pytorch 2.2.1+cu121
96
  - Datasets 2.18.0
97
  - Tokenizers 0.15.2
 
5
  - trl
6
  - sft
7
  - generated_from_trainer
8
+ base_model: mistralai/Mistral-7B-Instruct-v0.2
9
  model-index:
10
  - name: results
11
  results: []
 
16
 
17
  # results
18
 
19
+ This model is a fine-tuned version of [mistralai/Mistral-7B-Instruct-v0.2](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2) on an unknown dataset.
20
  It achieves the following results on the evaluation set:
21
+ - Loss: 0.9955
22
 
23
  ## Model description
24
 
 
46
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
47
  - lr_scheduler_type: cosine
48
  - lr_scheduler_warmup_steps: 0.03
49
+ - num_epochs: 2
50
 
51
  ### Training results
52
 
53
  | Training Loss | Epoch | Step | Validation Loss |
54
  |:-------------:|:-----:|:----:|:---------------:|
55
+ | 1.0284 | 0.29 | 500 | 1.0100 |
56
+ | 1.0079 | 0.58 | 1000 | 1.0023 |
57
+ | 0.9938 | 0.87 | 1500 | 0.9934 |
58
+ | 0.8683 | 1.16 | 2000 | 1.0001 |
59
+ | 0.8724 | 1.45 | 2500 | 0.9977 |
60
+ | 0.8777 | 1.74 | 3000 | 0.9955 |
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
 
62
 
63
  ### Framework versions
64
 
65
+ - PEFT 0.10.0
66
+ - Transformers 4.39.1
67
  - Pytorch 2.2.1+cu121
68
  - Datasets 2.18.0
69
  - Tokenizers 0.15.2
adapter_config.json CHANGED
@@ -1,11 +1,12 @@
1
  {
2
  "alpha_pattern": {},
3
  "auto_mapping": null,
4
- "base_model_name_or_path": "mistralai/Mistral-7B-v0.1",
5
  "bias": "none",
6
  "fan_in_fan_out": false,
7
  "inference_mode": true,
8
  "init_lora_weights": true,
 
9
  "layers_pattern": null,
10
  "layers_to_transform": null,
11
  "loftq_config": {},
@@ -19,13 +20,13 @@
19
  "rank_pattern": {},
20
  "revision": null,
21
  "target_modules": [
22
- "v_proj",
23
- "q_proj",
24
  "gate_proj",
 
 
25
  "down_proj",
26
  "o_proj",
27
- "up_proj",
28
- "k_proj"
29
  ],
30
  "task_type": "CAUSAL_LM",
31
  "use_dora": false,
 
1
  {
2
  "alpha_pattern": {},
3
  "auto_mapping": null,
4
+ "base_model_name_or_path": "mistralai/Mistral-7B-Instruct-v0.2",
5
  "bias": "none",
6
  "fan_in_fan_out": false,
7
  "inference_mode": true,
8
  "init_lora_weights": true,
9
+ "layer_replication": null,
10
  "layers_pattern": null,
11
  "layers_to_transform": null,
12
  "loftq_config": {},
 
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
 
 
23
  "gate_proj",
24
+ "q_proj",
25
+ "up_proj",
26
  "down_proj",
27
  "o_proj",
28
+ "k_proj",
29
+ "v_proj"
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a5a2351fb8d9722413f891c353d531fbddecc049984d48bf64c4c063c144c1a7
3
  size 671149168
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:659e580c931fb02b0220472c5b34093687e55dfe0ea630fa5ee88cc0595cb3ba
3
  size 671149168
tokenizer_config.json CHANGED
@@ -29,6 +29,7 @@
29
  },
30
  "additional_special_tokens": [],
31
  "bos_token": "<s>",
 
32
  "clean_up_tokenization_spaces": false,
33
  "eos_token": "</s>",
34
  "legacy": true,
 
29
  },
30
  "additional_special_tokens": [],
31
  "bos_token": "<s>",
32
+ "chat_template": "{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '[INST] ' + message['content'] + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ message['content'] + eos_token}}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}",
33
  "clean_up_tokenization_spaces": false,
34
  "eos_token": "</s>",
35
  "legacy": true,
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e5f0012504e5862af840860bf7a91b6c215c14b0ad783b9adef9fe6669bb8300
3
- size 4856
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:986c5cabedb6dd6f7a8edad563b7a9d396a1e1e440c167b6475c92cd8bf0a2b5
3
+ size 4920