acrobatlm commited on
Commit
9ed40f3
·
verified ·
1 Parent(s): 9f1eb6f

acrobatlm/mistral-7binstruct-summary-100s

Browse files
README.md CHANGED
@@ -20,7 +20,7 @@ should probably proofread and complete it, then remove this comment. -->
20
 
21
  This model is a fine-tuned version of [mistralai/Mistral-7B-Instruct-v0.2](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2) on the generator dataset.
22
  It achieves the following results on the evaluation set:
23
- - Loss: 1.4760
24
 
25
  ## Model description
26
 
@@ -52,8 +52,8 @@ The following hyperparameters were used during training:
52
 
53
  | Training Loss | Epoch | Step | Validation Loss |
54
  |:-------------:|:-----:|:----:|:---------------:|
55
- | 1.6937 | 0.22 | 25 | 1.5235 |
56
- | 1.5335 | 0.43 | 50 | 1.4760 |
57
 
58
 
59
  ### Framework versions
 
20
 
21
  This model is a fine-tuned version of [mistralai/Mistral-7B-Instruct-v0.2](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2) on the generator dataset.
22
  It achieves the following results on the evaluation set:
23
+ - Loss: 1.4779
24
 
25
  ## Model description
26
 
 
52
 
53
  | Training Loss | Epoch | Step | Validation Loss |
54
  |:-------------:|:-----:|:----:|:---------------:|
55
+ | 1.6991 | 0.22 | 25 | 1.5629 |
56
+ | 1.5556 | 0.43 | 50 | 1.4779 |
57
 
58
 
59
  ### Framework versions
adapter_config.json CHANGED
@@ -10,18 +10,18 @@
10
  "layers_pattern": null,
11
  "layers_to_transform": null,
12
  "loftq_config": {},
13
- "lora_alpha": 64,
14
  "lora_dropout": 0.1,
15
  "megatron_config": null,
16
  "megatron_core": "megatron.core",
17
  "modules_to_save": null,
18
  "peft_type": "LORA",
19
- "r": 32,
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
- "q_proj",
24
- "v_proj"
25
  ],
26
  "task_type": "CAUSAL_LM",
27
  "use_dora": false,
 
10
  "layers_pattern": null,
11
  "layers_to_transform": null,
12
  "loftq_config": {},
13
+ "lora_alpha": 32,
14
  "lora_dropout": 0.1,
15
  "megatron_config": null,
16
  "megatron_core": "megatron.core",
17
  "modules_to_save": null,
18
  "peft_type": "LORA",
19
+ "r": 16,
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
+ "v_proj",
24
+ "q_proj"
25
  ],
26
  "task_type": "CAUSAL_LM",
27
  "use_dora": false,
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:aa21a3ce54e4891cb95361184a37afb7ab33fd7e3b71e71eb669f3666b229924
3
- size 54543184
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e44ce263e6fd885f50d82ca515b9325375b43ee36ededb75acf161ce88bc2e41
3
+ size 48
runs/Apr18_13-07-05_02c2438b5883/events.out.tfevents.1713445639.02c2438b5883.334.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:59b7c5a0030a347df0c54e25959c9aaaba0aa072e694677c6c927fd8ccdc582d
3
+ size 7037
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:caf24f7bcef5ec932455dbcbcac88f366e846ea83c657bb969c450a1f8785289
3
  size 4920
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1db73b3e4c5624152527ba9989b5aa678eb274c40f26ac111b55a0540e66b8d9
3
  size 4920