andrian-kr commited on
Commit
e697be5
·
verified ·
1 Parent(s): 5d19e20

End of training

Browse files
README.md CHANGED
@@ -7,14 +7,14 @@ tags:
7
  - generated_from_trainer
8
  base_model: mistralai/Mistral-7B-Instruct-v0.2
9
  model-index:
10
- - name: mistral-7b-it-ua-gec
11
  results: []
12
  ---
13
 
14
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
  should probably proofread and complete it, then remove this comment. -->
16
 
17
- # mistral-7b-it-ua-gec
18
 
19
  This model is a fine-tuned version of [mistralai/Mistral-7B-Instruct-v0.2](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2) on an unknown dataset.
20
 
@@ -36,14 +36,13 @@ More information needed
36
 
37
  The following hyperparameters were used during training:
38
  - learning_rate: 0.0002
39
- - train_batch_size: 8
40
  - eval_batch_size: 8
41
  - seed: 42
42
- - gradient_accumulation_steps: 4
43
- - total_train_batch_size: 32
44
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
45
- - lr_scheduler_type: linear
46
- - num_epochs: 5
 
47
 
48
  ### Training results
49
 
@@ -51,8 +50,8 @@ The following hyperparameters were used during training:
51
 
52
  ### Framework versions
53
 
54
- - PEFT 0.9.1.dev0
55
- - Transformers 4.39.0.dev0
56
  - Pytorch 2.1.2
57
  - Datasets 2.16.0
58
  - Tokenizers 0.15.0
 
7
  - generated_from_trainer
8
  base_model: mistralai/Mistral-7B-Instruct-v0.2
9
  model-index:
10
+ - name: mistral-7b-ua-gec
11
  results: []
12
  ---
13
 
14
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
  should probably proofread and complete it, then remove this comment. -->
16
 
17
+ # mistral-7b-ua-gec
18
 
19
  This model is a fine-tuned version of [mistralai/Mistral-7B-Instruct-v0.2](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2) on an unknown dataset.
20
 
 
36
 
37
  The following hyperparameters were used during training:
38
  - learning_rate: 0.0002
39
+ - train_batch_size: 4
40
  - eval_batch_size: 8
41
  - seed: 42
 
 
42
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
43
+ - lr_scheduler_type: constant
44
+ - lr_scheduler_warmup_ratio: 0.03
45
+ - num_epochs: 1
46
 
47
  ### Training results
48
 
 
50
 
51
  ### Framework versions
52
 
53
+ - PEFT 0.10.1.dev0
54
+ - Transformers 4.40.0.dev0
55
  - Pytorch 2.1.2
56
  - Datasets 2.16.0
57
  - Tokenizers 0.15.0
adapter_config.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
  "alpha_pattern": {},
3
  "auto_mapping": null,
4
- "base_model_name_or_path": "/kaggle/input/mistral/pytorch/7b-instruct-v0.1-hf/1",
5
  "bias": "none",
6
  "fan_in_fan_out": false,
7
  "inference_mode": true,
 
1
  {
2
  "alpha_pattern": {},
3
  "auto_mapping": null,
4
+ "base_model_name_or_path": "mistralai/Mistral-7B-Instruct-v0.2",
5
  "bias": "none",
6
  "fan_in_fan_out": false,
7
  "inference_mode": true,
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2ee7a79115e4fdf1a35765bc1744b1adf5f787484866d9d50378d3db9a7951eb
3
  size 23111032
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4c1737928126f90d8d42507dee56bfa0df9d6a672314c94308398fab39c0e509
3
  size 23111032
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json CHANGED
@@ -1,7 +1,6 @@
1
  {
2
  "add_bos_token": true,
3
  "add_eos_token": false,
4
- "add_prefix_space": true,
5
  "added_tokens_decoder": {
6
  "0": {
7
  "content": "<unk>",
@@ -36,7 +35,7 @@
36
  "legacy": true,
37
  "model_max_length": 1000000000000000019884624838656,
38
  "pad_token": "[PAD]",
39
- "padding_side": "left",
40
  "sp_model_kwargs": {},
41
  "spaces_between_special_tokens": false,
42
  "tokenizer_class": "LlamaTokenizer",
 
1
  {
2
  "add_bos_token": true,
3
  "add_eos_token": false,
 
4
  "added_tokens_decoder": {
5
  "0": {
6
  "content": "<unk>",
 
35
  "legacy": true,
36
  "model_max_length": 1000000000000000019884624838656,
37
  "pad_token": "[PAD]",
38
+ "padding_side": "right",
39
  "sp_model_kwargs": {},
40
  "spaces_between_special_tokens": false,
41
  "tokenizer_class": "LlamaTokenizer",
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:59a3d64117e197a8c1c9cd77fea4f22973966c91427e868cc435ef925c99f659
3
  size 4920
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ab118c0a55bb33d5f1ab9aed5bbc01cbd2e7d141d301cdb9e5a3b002800c92c1
3
  size 4920