morriszms commited on
Commit
d41256a
·
verified ·
1 Parent(s): 393460d

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,15 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ magnum-v4-27b-Q2_K.gguf filter=lfs diff=lfs merge=lfs -text
37
+ magnum-v4-27b-Q3_K_L.gguf filter=lfs diff=lfs merge=lfs -text
38
+ magnum-v4-27b-Q3_K_M.gguf filter=lfs diff=lfs merge=lfs -text
39
+ magnum-v4-27b-Q3_K_S.gguf filter=lfs diff=lfs merge=lfs -text
40
+ magnum-v4-27b-Q4_0.gguf filter=lfs diff=lfs merge=lfs -text
41
+ magnum-v4-27b-Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text
42
+ magnum-v4-27b-Q4_K_S.gguf filter=lfs diff=lfs merge=lfs -text
43
+ magnum-v4-27b-Q5_0.gguf filter=lfs diff=lfs merge=lfs -text
44
+ magnum-v4-27b-Q5_K_M.gguf filter=lfs diff=lfs merge=lfs -text
45
+ magnum-v4-27b-Q5_K_S.gguf filter=lfs diff=lfs merge=lfs -text
46
+ magnum-v4-27b-Q6_K.gguf filter=lfs diff=lfs merge=lfs -text
47
+ magnum-v4-27b-Q8_0.gguf filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,190 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - en
4
+ license: gemma
5
+ library_name: transformers
6
+ tags:
7
+ - chat
8
+ - TensorBlock
9
+ - GGUF
10
+ pipeline_tag: text-generation
11
+ datasets:
12
+ - anthracite-org/c2_logs_16k_llama_v1.1
13
+ - NewEden/Claude-Instruct-5K
14
+ - anthracite-org/kalo-opus-instruct-22k-no-refusal
15
+ - Epiculous/SynthRP-Gens-v1.1-Filtered-n-Cleaned
16
+ - lodrick-the-lafted/kalo-opus-instruct-3k-filtered
17
+ - anthracite-org/nopm_claude_writing_fixed
18
+ - Epiculous/Synthstruct-Gens-v1.1-Filtered-n-Cleaned
19
+ - anthracite-org/kalo_opus_misc_240827
20
+ - anthracite-org/kalo_misc_part2
21
+ base_model: anthracite-org/magnum-v4-27b
22
+ model-index:
23
+ - name: magnum-v4-27b
24
+ results:
25
+ - task:
26
+ type: text-generation
27
+ name: Text Generation
28
+ dataset:
29
+ name: IFEval (0-Shot)
30
+ type: HuggingFaceH4/ifeval
31
+ args:
32
+ num_few_shot: 0
33
+ metrics:
34
+ - type: inst_level_strict_acc and prompt_level_strict_acc
35
+ value: 34.54
36
+ name: strict accuracy
37
+ source:
38
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=anthracite-org/magnum-v4-27b
39
+ name: Open LLM Leaderboard
40
+ - task:
41
+ type: text-generation
42
+ name: Text Generation
43
+ dataset:
44
+ name: BBH (3-Shot)
45
+ type: BBH
46
+ args:
47
+ num_few_shot: 3
48
+ metrics:
49
+ - type: acc_norm
50
+ value: 40.96
51
+ name: normalized accuracy
52
+ source:
53
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=anthracite-org/magnum-v4-27b
54
+ name: Open LLM Leaderboard
55
+ - task:
56
+ type: text-generation
57
+ name: Text Generation
58
+ dataset:
59
+ name: MATH Lvl 5 (4-Shot)
60
+ type: hendrycks/competition_math
61
+ args:
62
+ num_few_shot: 4
63
+ metrics:
64
+ - type: exact_match
65
+ value: 16.16
66
+ name: exact match
67
+ source:
68
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=anthracite-org/magnum-v4-27b
69
+ name: Open LLM Leaderboard
70
+ - task:
71
+ type: text-generation
72
+ name: Text Generation
73
+ dataset:
74
+ name: GPQA (0-shot)
75
+ type: Idavidrein/gpqa
76
+ args:
77
+ num_few_shot: 0
78
+ metrics:
79
+ - type: acc_norm
80
+ value: 16.0
81
+ name: acc_norm
82
+ source:
83
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=anthracite-org/magnum-v4-27b
84
+ name: Open LLM Leaderboard
85
+ - task:
86
+ type: text-generation
87
+ name: Text Generation
88
+ dataset:
89
+ name: MuSR (0-shot)
90
+ type: TAUR-Lab/MuSR
91
+ args:
92
+ num_few_shot: 0
93
+ metrics:
94
+ - type: acc_norm
95
+ value: 12.82
96
+ name: acc_norm
97
+ source:
98
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=anthracite-org/magnum-v4-27b
99
+ name: Open LLM Leaderboard
100
+ - task:
101
+ type: text-generation
102
+ name: Text Generation
103
+ dataset:
104
+ name: MMLU-PRO (5-shot)
105
+ type: TIGER-Lab/MMLU-Pro
106
+ config: main
107
+ split: test
108
+ args:
109
+ num_few_shot: 5
110
+ metrics:
111
+ - type: acc
112
+ value: 37.51
113
+ name: accuracy
114
+ source:
115
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=anthracite-org/magnum-v4-27b
116
+ name: Open LLM Leaderboard
117
+ ---
118
+
119
+ <div style="width: auto; margin-left: auto; margin-right: auto">
120
+ <img src="https://i.imgur.com/jC7kdl8.jpeg" alt="TensorBlock" style="width: 100%; min-width: 400px; display: block; margin: auto;">
121
+ </div>
122
+ <div style="display: flex; justify-content: space-between; width: 100%;">
123
+ <div style="display: flex; flex-direction: column; align-items: flex-start;">
124
+ <p style="margin-top: 0.5em; margin-bottom: 0em;">
125
+ Feedback and support: TensorBlock's <a href="https://x.com/tensorblock_aoi">Twitter/X</a>, <a href="https://t.me/TensorBlock">Telegram Group</a> and <a href="https://x.com/tensorblock_aoi">Discord server</a>
126
+ </p>
127
+ </div>
128
+ </div>
129
+
130
+ ## anthracite-org/magnum-v4-27b - GGUF
131
+
132
+ This repo contains GGUF format model files for [anthracite-org/magnum-v4-27b](https://huggingface.co/anthracite-org/magnum-v4-27b).
133
+
134
+ The files were quantized using machines provided by [TensorBlock](https://tensorblock.co/), and they are compatible with llama.cpp as of [commit b4011](https://github.com/ggerganov/llama.cpp/commit/a6744e43e80f4be6398fc7733a01642c846dce1d).
135
+
136
+ <div style="text-align: left; margin: 20px 0;">
137
+ <a href="https://tensorblock.co/waitlist/client" style="display: inline-block; padding: 10px 20px; background-color: #007bff; color: white; text-decoration: none; border-radius: 5px; font-weight: bold;">
138
+ Run them on the TensorBlock client using your local machine ↗
139
+ </a>
140
+ </div>
141
+
142
+ ## Prompt template
143
+
144
+ ```
145
+ <|im_start|>system
146
+ {system_prompt}<|im_end|>
147
+ <|im_start|>user
148
+ {prompt}<|im_end|>
149
+ <|im_start|>assistant
150
+ ```
151
+
152
+ ## Model file specification
153
+
154
+ | Filename | Quant type | File Size | Description |
155
+ | -------- | ---------- | --------- | ----------- |
156
+ | [magnum-v4-27b-Q2_K.gguf](https://huggingface.co/tensorblock/magnum-v4-27b-GGUF/blob/main/magnum-v4-27b-Q2_K.gguf) | Q2_K | 10.450 GB | smallest, significant quality loss - not recommended for most purposes |
157
+ | [magnum-v4-27b-Q3_K_S.gguf](https://huggingface.co/tensorblock/magnum-v4-27b-GGUF/blob/main/magnum-v4-27b-Q3_K_S.gguf) | Q3_K_S | 12.169 GB | very small, high quality loss |
158
+ | [magnum-v4-27b-Q3_K_M.gguf](https://huggingface.co/tensorblock/magnum-v4-27b-GGUF/blob/main/magnum-v4-27b-Q3_K_M.gguf) | Q3_K_M | 13.425 GB | very small, high quality loss |
159
+ | [magnum-v4-27b-Q3_K_L.gguf](https://huggingface.co/tensorblock/magnum-v4-27b-GGUF/blob/main/magnum-v4-27b-Q3_K_L.gguf) | Q3_K_L | 14.519 GB | small, substantial quality loss |
160
+ | [magnum-v4-27b-Q4_0.gguf](https://huggingface.co/tensorblock/magnum-v4-27b-GGUF/blob/main/magnum-v4-27b-Q4_0.gguf) | Q4_0 | 15.628 GB | legacy; small, very high quality loss - prefer using Q3_K_M |
161
+ | [magnum-v4-27b-Q4_K_S.gguf](https://huggingface.co/tensorblock/magnum-v4-27b-GGUF/blob/main/magnum-v4-27b-Q4_K_S.gguf) | Q4_K_S | 15.739 GB | small, greater quality loss |
162
+ | [magnum-v4-27b-Q4_K_M.gguf](https://huggingface.co/tensorblock/magnum-v4-27b-GGUF/blob/main/magnum-v4-27b-Q4_K_M.gguf) | Q4_K_M | 16.645 GB | medium, balanced quality - recommended |
163
+ | [magnum-v4-27b-Q5_0.gguf](https://huggingface.co/tensorblock/magnum-v4-27b-GGUF/blob/main/magnum-v4-27b-Q5_0.gguf) | Q5_0 | 18.884 GB | legacy; medium, balanced quality - prefer using Q4_K_M |
164
+ | [magnum-v4-27b-Q5_K_S.gguf](https://huggingface.co/tensorblock/magnum-v4-27b-GGUF/blob/main/magnum-v4-27b-Q5_K_S.gguf) | Q5_K_S | 18.884 GB | large, low quality loss - recommended |
165
+ | [magnum-v4-27b-Q5_K_M.gguf](https://huggingface.co/tensorblock/magnum-v4-27b-GGUF/blob/main/magnum-v4-27b-Q5_K_M.gguf) | Q5_K_M | 19.408 GB | large, very low quality loss - recommended |
166
+ | [magnum-v4-27b-Q6_K.gguf](https://huggingface.co/tensorblock/magnum-v4-27b-GGUF/blob/main/magnum-v4-27b-Q6_K.gguf) | Q6_K | 22.344 GB | very large, extremely low quality loss |
167
+ | [magnum-v4-27b-Q8_0.gguf](https://huggingface.co/tensorblock/magnum-v4-27b-GGUF/blob/main/magnum-v4-27b-Q8_0.gguf) | Q8_0 | 28.937 GB | very large, extremely low quality loss - not recommended |
168
+
169
+
170
+ ## Downloading instruction
171
+
172
+ ### Command line
173
+
174
+ Firstly, install Huggingface Client
175
+
176
+ ```shell
177
+ pip install -U "huggingface_hub[cli]"
178
+ ```
179
+
180
+ Then, downoad the individual model file the a local directory
181
+
182
+ ```shell
183
+ huggingface-cli download tensorblock/magnum-v4-27b-GGUF --include "magnum-v4-27b-Q2_K.gguf" --local-dir MY_LOCAL_DIR
184
+ ```
185
+
186
+ If you wanna download multiple model files with a pattern (e.g., `*Q4_K*gguf`), you can try:
187
+
188
+ ```shell
189
+ huggingface-cli download tensorblock/magnum-v4-27b-GGUF --local-dir MY_LOCAL_DIR --local-dir-use-symlinks False --include='*Q4_K*gguf'
190
+ ```
magnum-v4-27b-Q2_K.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9d345e3a43b25217be5190503329a2a945363f4496a751c1314deb99d448f77b
3
+ size 10449575744
magnum-v4-27b-Q3_K_L.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:07a279b803d9769759fb5efcf747a59771cafe8c920eb8bc6d1f4ff3514e4383
3
+ size 14519361344
magnum-v4-27b-Q3_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:26de41c256428ebba41213f76062137510231e1c3b7d4633c15223d4437dcfba
3
+ size 13424648000
magnum-v4-27b-Q3_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:57e866f74f55696e93a7d0ab4620de3f59de4e7d121fe9c8ddc2bb9dac9592ce
3
+ size 12169060160
magnum-v4-27b-Q4_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f7c7e71836526da7c4aaae07516b668fafd9aff13573b39727b325be9a837348
3
+ size 15628377920
magnum-v4-27b-Q4_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1181696b249d0e6db56de8ad72bcfe3871c185af27e00b9c79d1c420c94018ee
3
+ size 16645381952
magnum-v4-27b-Q4_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6682f82397973f40b02a5b8cd4d71a64ce07762f78de267b5dfce675eb317cfe
3
+ size 15739264832
magnum-v4-27b-Q5_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a1d8dd3de9f11ca2a9c5872bdf067b6e60820f0454b7af47bbb06ac84d4bd4a2
3
+ size 18884206400
magnum-v4-27b-Q5_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:05eb663b11125514d3d04ef25212f783c5145dc02f21e51356ceaefc897c439a
3
+ size 19408117568
magnum-v4-27b-Q5_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bd861faafafd8f56aace98805b05e6edfaaf2eed1075d71b97c9e4d981d2fa30
3
+ size 18884206400
magnum-v4-27b-Q6_K.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6e89a08f9184ba973730d842b1f7cd0ac8dcd4fc919dd45757077ca6fa90aab6
3
+ size 22343524160
magnum-v4-27b-Q8_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bf6935c08b1f09508fce196e94216c072d81266ad8e0ed1a96305819b836af4b
3
+ size 28937387840