mtzig commited on
Commit
5d6fdcd
·
verified ·
1 Parent(s): 36134be

Model save

Browse files
Files changed (3) hide show
  1. README.md +113 -0
  2. adapter_config.json +29 -0
  3. adapter_model.safetensors +3 -0
README.md ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ base_model: peiyi9979/math-shepherd-mistral-7b-prm
4
+ tags:
5
+ - generated_from_trainer
6
+ metrics:
7
+ - accuracy
8
+ - precision
9
+ - recall
10
+ - f1
11
+ model-index:
12
+ - name: v3c_mistral_lora
13
+ results: []
14
+ ---
15
+
16
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
17
+ should probably proofread and complete it, then remove this comment. -->
18
+
19
+ # v3c_mistral_lora
20
+
21
+ This model is a fine-tuned version of [peiyi9979/math-shepherd-mistral-7b-prm](https://huggingface.co/peiyi9979/math-shepherd-mistral-7b-prm) on an unknown dataset.
22
+ It achieves the following results on the evaluation set:
23
+ - Loss: 0.2939
24
+ - Accuracy: 0.8636
25
+ - Precision: 0.8421
26
+ - Recall: 0.6324
27
+ - F1: 0.7223
28
+
29
+ ## Model description
30
+
31
+ More information needed
32
+
33
+ ## Intended uses & limitations
34
+
35
+ More information needed
36
+
37
+ ## Training and evaluation data
38
+
39
+ More information needed
40
+
41
+ ## Training procedure
42
+
43
+ ### Training hyperparameters
44
+
45
+ The following hyperparameters were used during training:
46
+ - learning_rate: 2e-05
47
+ - train_batch_size: 8
48
+ - eval_batch_size: 8
49
+ - seed: 765837
50
+ - distributed_type: multi-GPU
51
+ - num_devices: 4
52
+ - gradient_accumulation_steps: 2
53
+ - total_train_batch_size: 64
54
+ - total_eval_batch_size: 32
55
+ - optimizer: Use OptimizerNames.ADAMW_TORCH with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
56
+ - lr_scheduler_type: cosine
57
+ - lr_scheduler_warmup_ratio: 0.1
58
+ - num_epochs: 1
59
+
60
+ ### Training results
61
+
62
+ | Training Loss | Epoch | Step | Validation Loss | Accuracy | Precision | Recall | F1 |
63
+ |:-------------:|:------:|:----:|:---------------:|:--------:|:---------:|:------:|:------:|
64
+ | No log | 0 | 0 | 0.6026 | 0.7339 | 0.6 | 0.1542 | 0.2453 |
65
+ | 0.6391 | 0.0248 | 20 | 0.5954 | 0.7361 | 0.6119 | 0.1621 | 0.2562 |
66
+ | 0.5891 | 0.0495 | 40 | 0.5570 | 0.7550 | 0.6176 | 0.3320 | 0.4319 |
67
+ | 0.4606 | 0.0743 | 60 | 0.4962 | 0.7794 | 0.6667 | 0.4269 | 0.5205 |
68
+ | 0.4229 | 0.0990 | 80 | 0.4433 | 0.7905 | 0.6649 | 0.5099 | 0.5772 |
69
+ | 0.3836 | 0.1238 | 100 | 0.4297 | 0.8160 | 0.7605 | 0.5020 | 0.6048 |
70
+ | 0.3363 | 0.1485 | 120 | 0.3676 | 0.8381 | 0.7892 | 0.5771 | 0.6667 |
71
+ | 0.2483 | 0.1733 | 140 | 0.3537 | 0.8404 | 0.8225 | 0.5494 | 0.6588 |
72
+ | 0.2803 | 0.1980 | 160 | 0.3468 | 0.8415 | 0.8481 | 0.5296 | 0.6521 |
73
+ | 0.2782 | 0.2228 | 180 | 0.3493 | 0.8237 | 0.8310 | 0.4664 | 0.5975 |
74
+ | 0.2174 | 0.2475 | 200 | 0.3329 | 0.8492 | 0.8232 | 0.5889 | 0.6866 |
75
+ | 0.2965 | 0.2723 | 220 | 0.3314 | 0.8448 | 0.8343 | 0.5573 | 0.6682 |
76
+ | 0.2379 | 0.2970 | 240 | 0.3736 | 0.8149 | 0.8468 | 0.4150 | 0.5570 |
77
+ | 0.1587 | 0.3218 | 260 | 0.3315 | 0.8404 | 0.8609 | 0.5138 | 0.6436 |
78
+ | 0.1769 | 0.3465 | 280 | 0.3329 | 0.8370 | 0.8313 | 0.5257 | 0.6441 |
79
+ | 0.1984 | 0.3713 | 300 | 0.3211 | 0.8537 | 0.8712 | 0.5613 | 0.6827 |
80
+ | 0.2109 | 0.3960 | 320 | 0.3064 | 0.8570 | 0.8333 | 0.6126 | 0.7062 |
81
+ | 0.1961 | 0.4208 | 340 | 0.3035 | 0.8625 | 0.8413 | 0.6285 | 0.7195 |
82
+ | 0.2369 | 0.4455 | 360 | 0.2959 | 0.8747 | 0.8365 | 0.6877 | 0.7549 |
83
+ | 0.2355 | 0.4703 | 380 | 0.3176 | 0.8537 | 0.8380 | 0.5929 | 0.6944 |
84
+ | 0.1538 | 0.4950 | 400 | 0.3098 | 0.8503 | 0.8554 | 0.5613 | 0.6778 |
85
+ | 0.2261 | 0.5198 | 420 | 0.2964 | 0.8659 | 0.8235 | 0.6640 | 0.7352 |
86
+ | 0.1894 | 0.5446 | 440 | 0.3085 | 0.8625 | 0.8772 | 0.5929 | 0.7075 |
87
+ | 0.2089 | 0.5693 | 460 | 0.3103 | 0.8592 | 0.8621 | 0.5929 | 0.7026 |
88
+ | 0.225 | 0.5941 | 480 | 0.2933 | 0.8670 | 0.8519 | 0.6364 | 0.7285 |
89
+ | 0.2837 | 0.6188 | 500 | 0.2955 | 0.8636 | 0.8283 | 0.6482 | 0.7273 |
90
+ | 0.2046 | 0.6436 | 520 | 0.2943 | 0.8647 | 0.8429 | 0.6364 | 0.7252 |
91
+ | 0.1548 | 0.6683 | 540 | 0.3003 | 0.8636 | 0.8421 | 0.6324 | 0.7223 |
92
+ | 0.1626 | 0.6931 | 560 | 0.2982 | 0.8625 | 0.8603 | 0.6087 | 0.7130 |
93
+ | 0.2065 | 0.7178 | 580 | 0.2877 | 0.8636 | 0.8186 | 0.6601 | 0.7309 |
94
+ | 0.1423 | 0.7426 | 600 | 0.3031 | 0.8603 | 0.8757 | 0.5850 | 0.7014 |
95
+ | 0.1743 | 0.7673 | 620 | 0.2920 | 0.8659 | 0.8511 | 0.6324 | 0.7256 |
96
+ | 0.1281 | 0.7921 | 640 | 0.2912 | 0.8659 | 0.8474 | 0.6364 | 0.7269 |
97
+ | 0.1879 | 0.8168 | 660 | 0.2938 | 0.8625 | 0.8449 | 0.6245 | 0.7182 |
98
+ | 0.1741 | 0.8416 | 680 | 0.2965 | 0.8625 | 0.8486 | 0.6206 | 0.7169 |
99
+ | 0.1429 | 0.8663 | 700 | 0.2911 | 0.8647 | 0.8359 | 0.6443 | 0.7277 |
100
+ | 0.2218 | 0.8911 | 720 | 0.2950 | 0.8625 | 0.8449 | 0.6245 | 0.7182 |
101
+ | 0.1608 | 0.9158 | 740 | 0.2995 | 0.8603 | 0.8508 | 0.6087 | 0.7097 |
102
+ | 0.2056 | 0.9406 | 760 | 0.2967 | 0.8592 | 0.8424 | 0.6126 | 0.7094 |
103
+ | 0.2127 | 0.9653 | 780 | 0.2944 | 0.8625 | 0.8413 | 0.6285 | 0.7195 |
104
+ | 0.2252 | 0.9901 | 800 | 0.2939 | 0.8636 | 0.8421 | 0.6324 | 0.7223 |
105
+
106
+
107
+ ### Framework versions
108
+
109
+ - PEFT 0.13.2
110
+ - Transformers 4.46.0
111
+ - Pytorch 2.5.1+cu124
112
+ - Datasets 3.1.0
113
+ - Tokenizers 0.20.3
adapter_config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "peiyi9979/math-shepherd-mistral-7b-prm",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layer_replication": null,
10
+ "layers_pattern": null,
11
+ "layers_to_transform": null,
12
+ "loftq_config": {},
13
+ "lora_alpha": 32,
14
+ "lora_dropout": 0.05,
15
+ "megatron_config": null,
16
+ "megatron_core": "megatron.core",
17
+ "modules_to_save": null,
18
+ "peft_type": "LORA",
19
+ "r": 16,
20
+ "rank_pattern": {},
21
+ "revision": null,
22
+ "target_modules": [
23
+ "q_proj",
24
+ "v_proj"
25
+ ],
26
+ "task_type": "CAUSAL_LM",
27
+ "use_dora": false,
28
+ "use_rslora": false
29
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:15b33c1cef97883822b6cc845f3ac13a56f0a3834e7818aa3fe275a9c54d2b27
3
+ size 27280152