morriszms commited on
Commit
31bdcd9
1 Parent(s): 30be3c2

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,15 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ calme-2.1-qwen2-7b-Q2_K.gguf filter=lfs diff=lfs merge=lfs -text
37
+ calme-2.1-qwen2-7b-Q3_K_L.gguf filter=lfs diff=lfs merge=lfs -text
38
+ calme-2.1-qwen2-7b-Q3_K_M.gguf filter=lfs diff=lfs merge=lfs -text
39
+ calme-2.1-qwen2-7b-Q3_K_S.gguf filter=lfs diff=lfs merge=lfs -text
40
+ calme-2.1-qwen2-7b-Q4_0.gguf filter=lfs diff=lfs merge=lfs -text
41
+ calme-2.1-qwen2-7b-Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text
42
+ calme-2.1-qwen2-7b-Q4_K_S.gguf filter=lfs diff=lfs merge=lfs -text
43
+ calme-2.1-qwen2-7b-Q5_0.gguf filter=lfs diff=lfs merge=lfs -text
44
+ calme-2.1-qwen2-7b-Q5_K_M.gguf filter=lfs diff=lfs merge=lfs -text
45
+ calme-2.1-qwen2-7b-Q5_K_S.gguf filter=lfs diff=lfs merge=lfs -text
46
+ calme-2.1-qwen2-7b-Q6_K.gguf filter=lfs diff=lfs merge=lfs -text
47
+ calme-2.1-qwen2-7b-Q8_0.gguf filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,190 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - en
4
+ license: apache-2.0
5
+ library_name: transformers
6
+ tags:
7
+ - chat
8
+ - qwen
9
+ - qwen2
10
+ - finetune
11
+ - chatml
12
+ - OpenHermes-2.5
13
+ - HelpSteer2
14
+ - Orca
15
+ - SlimOrca
16
+ - TensorBlock
17
+ - GGUF
18
+ base_model: MaziyarPanahi/calme-2.1-qwen2-7b
19
+ datasets:
20
+ - nvidia/HelpSteer2
21
+ - teknium/OpenHermes-2.5
22
+ - microsoft/orca-math-word-problems-200k
23
+ - Open-Orca/SlimOrca
24
+ pipeline_tag: text-generation
25
+ inference: false
26
+ model_creator: MaziyarPanahi
27
+ quantized_by: MaziyarPanahi
28
+ model-index:
29
+ - name: calme-2.1-qwen2-7b
30
+ results:
31
+ - task:
32
+ type: text-generation
33
+ name: Text Generation
34
+ dataset:
35
+ name: IFEval (0-Shot)
36
+ type: HuggingFaceH4/ifeval
37
+ args:
38
+ num_few_shot: 0
39
+ metrics:
40
+ - type: inst_level_strict_acc and prompt_level_strict_acc
41
+ value: 38.16
42
+ name: strict accuracy
43
+ source:
44
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=MaziyarPanahi/calme-2.1-qwen2-7b
45
+ name: Open LLM Leaderboard
46
+ - task:
47
+ type: text-generation
48
+ name: Text Generation
49
+ dataset:
50
+ name: BBH (3-Shot)
51
+ type: BBH
52
+ args:
53
+ num_few_shot: 3
54
+ metrics:
55
+ - type: acc_norm
56
+ value: 31.01
57
+ name: normalized accuracy
58
+ source:
59
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=MaziyarPanahi/calme-2.1-qwen2-7b
60
+ name: Open LLM Leaderboard
61
+ - task:
62
+ type: text-generation
63
+ name: Text Generation
64
+ dataset:
65
+ name: MATH Lvl 5 (4-Shot)
66
+ type: hendrycks/competition_math
67
+ args:
68
+ num_few_shot: 4
69
+ metrics:
70
+ - type: exact_match
71
+ value: 21.07
72
+ name: exact match
73
+ source:
74
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=MaziyarPanahi/calme-2.1-qwen2-7b
75
+ name: Open LLM Leaderboard
76
+ - task:
77
+ type: text-generation
78
+ name: Text Generation
79
+ dataset:
80
+ name: GPQA (0-shot)
81
+ type: Idavidrein/gpqa
82
+ args:
83
+ num_few_shot: 0
84
+ metrics:
85
+ - type: acc_norm
86
+ value: 5.26
87
+ name: acc_norm
88
+ source:
89
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=MaziyarPanahi/calme-2.1-qwen2-7b
90
+ name: Open LLM Leaderboard
91
+ - task:
92
+ type: text-generation
93
+ name: Text Generation
94
+ dataset:
95
+ name: MuSR (0-shot)
96
+ type: TAUR-Lab/MuSR
97
+ args:
98
+ num_few_shot: 0
99
+ metrics:
100
+ - type: acc_norm
101
+ value: 13.8
102
+ name: acc_norm
103
+ source:
104
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=MaziyarPanahi/calme-2.1-qwen2-7b
105
+ name: Open LLM Leaderboard
106
+ - task:
107
+ type: text-generation
108
+ name: Text Generation
109
+ dataset:
110
+ name: MMLU-PRO (5-shot)
111
+ type: TIGER-Lab/MMLU-Pro
112
+ config: main
113
+ split: test
114
+ args:
115
+ num_few_shot: 5
116
+ metrics:
117
+ - type: acc
118
+ value: 29.92
119
+ name: accuracy
120
+ source:
121
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=MaziyarPanahi/calme-2.1-qwen2-7b
122
+ name: Open LLM Leaderboard
123
+ ---
124
+
125
+ <div style="width: auto; margin-left: auto; margin-right: auto">
126
+ <img src="https://i.imgur.com/jC7kdl8.jpeg" alt="TensorBlock" style="width: 100%; min-width: 400px; display: block; margin: auto;">
127
+ </div>
128
+ <div style="display: flex; justify-content: space-between; width: 100%;">
129
+ <div style="display: flex; flex-direction: column; align-items: flex-start;">
130
+ <p style="margin-top: 0.5em; margin-bottom: 0em;">
131
+ Feedback and support: TensorBlock's <a href="https://x.com/tensorblock_aoi">Twitter/X</a>, <a href="https://t.me/TensorBlock">Telegram Group</a> and <a href="https://x.com/tensorblock_aoi">Discord server</a>
132
+ </p>
133
+ </div>
134
+ </div>
135
+
136
+ ## MaziyarPanahi/calme-2.1-qwen2-7b - GGUF
137
+
138
+ This repo contains GGUF format model files for [MaziyarPanahi/calme-2.1-qwen2-7b](https://huggingface.co/MaziyarPanahi/calme-2.1-qwen2-7b).
139
+
140
+ The files were quantized using machines provided by [TensorBlock](https://tensorblock.co/), and they are compatible with llama.cpp as of [commit b4011](https://github.com/ggerganov/llama.cpp/commit/a6744e43e80f4be6398fc7733a01642c846dce1d).
141
+
142
+ ## Prompt template
143
+
144
+ ```
145
+ <|im_start|>system
146
+ {system_prompt}<|im_end|>
147
+ <|im_start|>user
148
+ {prompt}<|im_end|>
149
+ <|im_start|>assistant
150
+ ```
151
+
152
+ ## Model file specification
153
+
154
+ | Filename | Quant type | File Size | Description |
155
+ | -------- | ---------- | --------- | ----------- |
156
+ | [calme-2.1-qwen2-7b-Q2_K.gguf](https://huggingface.co/tensorblock/calme-2.1-qwen2-7b-GGUF/tree/main/calme-2.1-qwen2-7b-Q2_K.gguf) | Q2_K | 2.809 GB | smallest, significant quality loss - not recommended for most purposes |
157
+ | [calme-2.1-qwen2-7b-Q3_K_S.gguf](https://huggingface.co/tensorblock/calme-2.1-qwen2-7b-GGUF/tree/main/calme-2.1-qwen2-7b-Q3_K_S.gguf) | Q3_K_S | 3.253 GB | very small, high quality loss |
158
+ | [calme-2.1-qwen2-7b-Q3_K_M.gguf](https://huggingface.co/tensorblock/calme-2.1-qwen2-7b-GGUF/tree/main/calme-2.1-qwen2-7b-Q3_K_M.gguf) | Q3_K_M | 3.547 GB | very small, high quality loss |
159
+ | [calme-2.1-qwen2-7b-Q3_K_L.gguf](https://huggingface.co/tensorblock/calme-2.1-qwen2-7b-GGUF/tree/main/calme-2.1-qwen2-7b-Q3_K_L.gguf) | Q3_K_L | 3.808 GB | small, substantial quality loss |
160
+ | [calme-2.1-qwen2-7b-Q4_0.gguf](https://huggingface.co/tensorblock/calme-2.1-qwen2-7b-GGUF/tree/main/calme-2.1-qwen2-7b-Q4_0.gguf) | Q4_0 | 4.127 GB | legacy; small, very high quality loss - prefer using Q3_K_M |
161
+ | [calme-2.1-qwen2-7b-Q4_K_S.gguf](https://huggingface.co/tensorblock/calme-2.1-qwen2-7b-GGUF/tree/main/calme-2.1-qwen2-7b-Q4_K_S.gguf) | Q4_K_S | 4.152 GB | small, greater quality loss |
162
+ | [calme-2.1-qwen2-7b-Q4_K_M.gguf](https://huggingface.co/tensorblock/calme-2.1-qwen2-7b-GGUF/tree/main/calme-2.1-qwen2-7b-Q4_K_M.gguf) | Q4_K_M | 4.361 GB | medium, balanced quality - recommended |
163
+ | [calme-2.1-qwen2-7b-Q5_0.gguf](https://huggingface.co/tensorblock/calme-2.1-qwen2-7b-GGUF/tree/main/calme-2.1-qwen2-7b-Q5_0.gguf) | Q5_0 | 4.950 GB | legacy; medium, balanced quality - prefer using Q4_K_M |
164
+ | [calme-2.1-qwen2-7b-Q5_K_S.gguf](https://huggingface.co/tensorblock/calme-2.1-qwen2-7b-GGUF/tree/main/calme-2.1-qwen2-7b-Q5_K_S.gguf) | Q5_K_S | 4.950 GB | large, low quality loss - recommended |
165
+ | [calme-2.1-qwen2-7b-Q5_K_M.gguf](https://huggingface.co/tensorblock/calme-2.1-qwen2-7b-GGUF/tree/main/calme-2.1-qwen2-7b-Q5_K_M.gguf) | Q5_K_M | 5.071 GB | large, very low quality loss - recommended |
166
+ | [calme-2.1-qwen2-7b-Q6_K.gguf](https://huggingface.co/tensorblock/calme-2.1-qwen2-7b-GGUF/tree/main/calme-2.1-qwen2-7b-Q6_K.gguf) | Q6_K | 5.825 GB | very large, extremely low quality loss |
167
+ | [calme-2.1-qwen2-7b-Q8_0.gguf](https://huggingface.co/tensorblock/calme-2.1-qwen2-7b-GGUF/tree/main/calme-2.1-qwen2-7b-Q8_0.gguf) | Q8_0 | 7.542 GB | very large, extremely low quality loss - not recommended |
168
+
169
+
170
+ ## Downloading instruction
171
+
172
+ ### Command line
173
+
174
+ Firstly, install Huggingface Client
175
+
176
+ ```shell
177
+ pip install -U "huggingface_hub[cli]"
178
+ ```
179
+
180
+ Then, downoad the individual model file the a local directory
181
+
182
+ ```shell
183
+ huggingface-cli download tensorblock/calme-2.1-qwen2-7b-GGUF --include "calme-2.1-qwen2-7b-Q2_K.gguf" --local-dir MY_LOCAL_DIR
184
+ ```
185
+
186
+ If you wanna download multiple model files with a pattern (e.g., `*Q4_K*gguf`), you can try:
187
+
188
+ ```shell
189
+ huggingface-cli download tensorblock/calme-2.1-qwen2-7b-GGUF --local-dir MY_LOCAL_DIR --local-dir-use-symlinks False --include='*Q4_K*gguf'
190
+ ```
calme-2.1-qwen2-7b-Q2_K.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:db84dc7c6936f886f21fabc17c1d2c20cd75a62012eb9011a6eea3d213067ad0
3
+ size 3015938368
calme-2.1-qwen2-7b-Q3_K_L.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:33872796d11ee0afb0a5c5d0ddbc90382224df06a27a14eb7b3281094ffb51db
3
+ size 4088457536
calme-2.1-qwen2-7b-Q3_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a2bdea7566536af1827d7d34ea584eb18b7ffbe9624dad6b60d8907fd737f9ef
3
+ size 3808389440
calme-2.1-qwen2-7b-Q3_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:472f3b205d4c61f3a5e8fc116ebf777a56e05b7aebe8d1906d3566bbb0da2e70
3
+ size 3492366656
calme-2.1-qwen2-7b-Q4_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a0e6ee9b3714b01c1372d94eae92e934c427f22a9bad5221957eb93a5225c863
3
+ size 4431388992
calme-2.1-qwen2-7b-Q4_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:34d4e08561efca75339aa5f9d58567711adb8a864ce542cf61cdc7f0bc42125f
3
+ size 4683071808
calme-2.1-qwen2-7b-Q4_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:357d29552a9e244f67e90d9160f7d0cd77f2e17f520305152fbc61c3bbefaeca
3
+ size 4457767232
calme-2.1-qwen2-7b-Q5_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:754b8a2b1d04522f6e02834ccd1e6eeaa26a9a64826ce283b0b5dca114e1c1ae
3
+ size 5315174720
calme-2.1-qwen2-7b-Q5_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f162d1d890994d73aa6bd86cd8ede9463565202d552e84a05de5e951002079b3
3
+ size 5444829504
calme-2.1-qwen2-7b-Q5_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:755920f9ccef3c52f0e9332bd6da3d0b92d1ce0ead462838ee135a0709e33793
3
+ size 5315174720
calme-2.1-qwen2-7b-Q6_K.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d012a80bad4853a93d771c2f93ac5a1697dd43c820733e48cf575f5e4f84b590
3
+ size 6254197056
calme-2.1-qwen2-7b-Q8_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8a7cd280f5d976d0d1ddf2889541d120c28f7c90c0dfe2b7e8669884a73afa58
3
+ size 8098523456