morriszms commited on
Commit
25df371
·
verified ·
1 Parent(s): 411559c

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,15 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ CodeMate-v0.1-Q2_K.gguf filter=lfs diff=lfs merge=lfs -text
37
+ CodeMate-v0.1-Q3_K_L.gguf filter=lfs diff=lfs merge=lfs -text
38
+ CodeMate-v0.1-Q3_K_M.gguf filter=lfs diff=lfs merge=lfs -text
39
+ CodeMate-v0.1-Q3_K_S.gguf filter=lfs diff=lfs merge=lfs -text
40
+ CodeMate-v0.1-Q4_0.gguf filter=lfs diff=lfs merge=lfs -text
41
+ CodeMate-v0.1-Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text
42
+ CodeMate-v0.1-Q4_K_S.gguf filter=lfs diff=lfs merge=lfs -text
43
+ CodeMate-v0.1-Q5_0.gguf filter=lfs diff=lfs merge=lfs -text
44
+ CodeMate-v0.1-Q5_K_M.gguf filter=lfs diff=lfs merge=lfs -text
45
+ CodeMate-v0.1-Q5_K_S.gguf filter=lfs diff=lfs merge=lfs -text
46
+ CodeMate-v0.1-Q6_K.gguf filter=lfs diff=lfs merge=lfs -text
47
+ CodeMate-v0.1-Q8_0.gguf filter=lfs diff=lfs merge=lfs -text
CodeMate-v0.1-Q2_K.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:58f3b00a6fa32de34cd4b73acb02262a0646a21f83f5b9c7aa7a8e2e0adf3b13
3
+ size 12505690848
CodeMate-v0.1-Q3_K_L.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:30c9d503beebabeee698bf077106aa0eef963fa637b9e137156bb29e0dab52a3
3
+ size 17771524832
CodeMate-v0.1-Q3_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dae5bfb581d3a6f3bb0fae5cd4b5cdd8d6bef4bca005f52795e0e0a8f0192a95
3
+ size 16306139872
CodeMate-v0.1-Q3_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c761f9c2460786f8d510cfb199689e93a090b1aa57d214908e0565e2df162240
3
+ size 14605349600
CodeMate-v0.1-Q4_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6afa17c1f28b862f67ac91368ac8e278aeba810da3aca0bc9813f14a97e570a6
3
+ size 19052049120
CodeMate-v0.1-Q4_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:28b41009a67dba0c90d62ebc49e40a8bc4e8d1ace67cfe2dc04ad75c3689553a
3
+ size 20219900640
CodeMate-v0.1-Q4_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:556fed42a27913d851ac7fe2830ffd28725a55bedc37f81285f88af86acf7fe6
3
+ size 19191509728
CodeMate-v0.1-Q5_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b0754c02d35d242e2e46a4077af3c008e034792081b06f50140629665e7a89ef
3
+ size 23237178080
CodeMate-v0.1-Q5_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b4546ef78a5bd14306cdf932ecb22073a0963b47406fc739c7ed55d5d0a6d5b1
3
+ size 23838798560
CodeMate-v0.1-Q5_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c0b2d5cced7721d71a52e62555deafa6dbe4c68ced71ae9b95265cda8b685f8f
3
+ size 23237178080
CodeMate-v0.1-Q6_K.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7ad7ef00ebac25b0767fab7855e9bb66c40340834a952fbc22595cc2c5ee083c
3
+ size 27683877600
CodeMate-v0.1-Q8_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c5003177f1e8ff8a86d9f35fefda80e80d2fc562edaede41a99083b6e8113173
3
+ size 35856052960
README.md ADDED
@@ -0,0 +1,196 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - en
4
+ license: llama2
5
+ library_name: transformers
6
+ tags:
7
+ - CodeMate
8
+ - Code
9
+ - CodeLLaMa
10
+ - TensorBlock
11
+ - GGUF
12
+ pipeline_tag: text-generation
13
+ base_model: codemateai/CodeMate-v0.1
14
+ model-index:
15
+ - name: CodeMate-v0.1
16
+ results:
17
+ - task:
18
+ type: text-generation
19
+ dataset:
20
+ name: HumanEval
21
+ type: openai_humaneval
22
+ metrics:
23
+ - type: pass@1
24
+ value: 74.9%
25
+ name: pass@1
26
+ verified: false
27
+ - task:
28
+ type: text-generation
29
+ name: Text Generation
30
+ dataset:
31
+ name: AI2 Reasoning Challenge (25-Shot)
32
+ type: ai2_arc
33
+ config: ARC-Challenge
34
+ split: test
35
+ args:
36
+ num_few_shot: 25
37
+ metrics:
38
+ - type: acc_norm
39
+ value: 55.55
40
+ name: normalized accuracy
41
+ source:
42
+ url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=codemateai/CodeMate-v0.1
43
+ name: Open LLM Leaderboard
44
+ - task:
45
+ type: text-generation
46
+ name: Text Generation
47
+ dataset:
48
+ name: HellaSwag (10-Shot)
49
+ type: hellaswag
50
+ split: validation
51
+ args:
52
+ num_few_shot: 10
53
+ metrics:
54
+ - type: acc_norm
55
+ value: 78.03
56
+ name: normalized accuracy
57
+ source:
58
+ url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=codemateai/CodeMate-v0.1
59
+ name: Open LLM Leaderboard
60
+ - task:
61
+ type: text-generation
62
+ name: Text Generation
63
+ dataset:
64
+ name: MMLU (5-Shot)
65
+ type: cais/mmlu
66
+ config: all
67
+ split: test
68
+ args:
69
+ num_few_shot: 5
70
+ metrics:
71
+ - type: acc
72
+ value: 55.31
73
+ name: accuracy
74
+ source:
75
+ url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=codemateai/CodeMate-v0.1
76
+ name: Open LLM Leaderboard
77
+ - task:
78
+ type: text-generation
79
+ name: Text Generation
80
+ dataset:
81
+ name: TruthfulQA (0-shot)
82
+ type: truthful_qa
83
+ config: multiple_choice
84
+ split: validation
85
+ args:
86
+ num_few_shot: 0
87
+ metrics:
88
+ - type: mc2
89
+ value: 48.64
90
+ source:
91
+ url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=codemateai/CodeMate-v0.1
92
+ name: Open LLM Leaderboard
93
+ - task:
94
+ type: text-generation
95
+ name: Text Generation
96
+ dataset:
97
+ name: Winogrande (5-shot)
98
+ type: winogrande
99
+ config: winogrande_xl
100
+ split: validation
101
+ args:
102
+ num_few_shot: 5
103
+ metrics:
104
+ - type: acc
105
+ value: 72.61
106
+ name: accuracy
107
+ source:
108
+ url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=codemateai/CodeMate-v0.1
109
+ name: Open LLM Leaderboard
110
+ - task:
111
+ type: text-generation
112
+ name: Text Generation
113
+ dataset:
114
+ name: GSM8k (5-shot)
115
+ type: gsm8k
116
+ config: main
117
+ split: test
118
+ args:
119
+ num_few_shot: 5
120
+ metrics:
121
+ - type: acc
122
+ value: 40.18
123
+ name: accuracy
124
+ source:
125
+ url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=codemateai/CodeMate-v0.1
126
+ name: Open LLM Leaderboard
127
+ ---
128
+
129
+ <div style="width: auto; margin-left: auto; margin-right: auto">
130
+ <img src="https://i.imgur.com/jC7kdl8.jpeg" alt="TensorBlock" style="width: 100%; min-width: 400px; display: block; margin: auto;">
131
+ </div>
132
+ <div style="display: flex; justify-content: space-between; width: 100%;">
133
+ <div style="display: flex; flex-direction: column; align-items: flex-start;">
134
+ <p style="margin-top: 0.5em; margin-bottom: 0em;">
135
+ Feedback and support: TensorBlock's <a href="https://x.com/tensorblock_aoi">Twitter/X</a>, <a href="https://t.me/TensorBlock">Telegram Group</a> and <a href="https://x.com/tensorblock_aoi">Discord server</a>
136
+ </p>
137
+ </div>
138
+ </div>
139
+
140
+ ## codemateai/CodeMate-v0.1 - GGUF
141
+
142
+ This repo contains GGUF format model files for [codemateai/CodeMate-v0.1](https://huggingface.co/codemateai/CodeMate-v0.1).
143
+
144
+ The files were quantized using machines provided by [TensorBlock](https://tensorblock.co/), and they are compatible with llama.cpp as of [commit b4242](https://github.com/ggerganov/llama.cpp/commit/a6744e43e80f4be6398fc7733a01642c846dce1d).
145
+
146
+ <div style="text-align: left; margin: 20px 0;">
147
+ <a href="https://tensorblock.co/waitlist/client" style="display: inline-block; padding: 10px 20px; background-color: #007bff; color: white; text-decoration: none; border-radius: 5px; font-weight: bold;">
148
+ Run them on the TensorBlock client using your local machine ↗
149
+ </a>
150
+ </div>
151
+
152
+ ## Prompt template
153
+
154
+ ```
155
+
156
+ ```
157
+
158
+ ## Model file specification
159
+
160
+ | Filename | Quant type | File Size | Description |
161
+ | -------- | ---------- | --------- | ----------- |
162
+ | [CodeMate-v0.1-Q2_K.gguf](https://huggingface.co/tensorblock/CodeMate-v0.1-GGUF/blob/main/CodeMate-v0.1-Q2_K.gguf) | Q2_K | 12.506 GB | smallest, significant quality loss - not recommended for most purposes |
163
+ | [CodeMate-v0.1-Q3_K_S.gguf](https://huggingface.co/tensorblock/CodeMate-v0.1-GGUF/blob/main/CodeMate-v0.1-Q3_K_S.gguf) | Q3_K_S | 14.605 GB | very small, high quality loss |
164
+ | [CodeMate-v0.1-Q3_K_M.gguf](https://huggingface.co/tensorblock/CodeMate-v0.1-GGUF/blob/main/CodeMate-v0.1-Q3_K_M.gguf) | Q3_K_M | 16.306 GB | very small, high quality loss |
165
+ | [CodeMate-v0.1-Q3_K_L.gguf](https://huggingface.co/tensorblock/CodeMate-v0.1-GGUF/blob/main/CodeMate-v0.1-Q3_K_L.gguf) | Q3_K_L | 17.772 GB | small, substantial quality loss |
166
+ | [CodeMate-v0.1-Q4_0.gguf](https://huggingface.co/tensorblock/CodeMate-v0.1-GGUF/blob/main/CodeMate-v0.1-Q4_0.gguf) | Q4_0 | 19.052 GB | legacy; small, very high quality loss - prefer using Q3_K_M |
167
+ | [CodeMate-v0.1-Q4_K_S.gguf](https://huggingface.co/tensorblock/CodeMate-v0.1-GGUF/blob/main/CodeMate-v0.1-Q4_K_S.gguf) | Q4_K_S | 19.192 GB | small, greater quality loss |
168
+ | [CodeMate-v0.1-Q4_K_M.gguf](https://huggingface.co/tensorblock/CodeMate-v0.1-GGUF/blob/main/CodeMate-v0.1-Q4_K_M.gguf) | Q4_K_M | 20.220 GB | medium, balanced quality - recommended |
169
+ | [CodeMate-v0.1-Q5_0.gguf](https://huggingface.co/tensorblock/CodeMate-v0.1-GGUF/blob/main/CodeMate-v0.1-Q5_0.gguf) | Q5_0 | 23.237 GB | legacy; medium, balanced quality - prefer using Q4_K_M |
170
+ | [CodeMate-v0.1-Q5_K_S.gguf](https://huggingface.co/tensorblock/CodeMate-v0.1-GGUF/blob/main/CodeMate-v0.1-Q5_K_S.gguf) | Q5_K_S | 23.237 GB | large, low quality loss - recommended |
171
+ | [CodeMate-v0.1-Q5_K_M.gguf](https://huggingface.co/tensorblock/CodeMate-v0.1-GGUF/blob/main/CodeMate-v0.1-Q5_K_M.gguf) | Q5_K_M | 23.839 GB | large, very low quality loss - recommended |
172
+ | [CodeMate-v0.1-Q6_K.gguf](https://huggingface.co/tensorblock/CodeMate-v0.1-GGUF/blob/main/CodeMate-v0.1-Q6_K.gguf) | Q6_K | 27.684 GB | very large, extremely low quality loss |
173
+ | [CodeMate-v0.1-Q8_0.gguf](https://huggingface.co/tensorblock/CodeMate-v0.1-GGUF/blob/main/CodeMate-v0.1-Q8_0.gguf) | Q8_0 | 35.856 GB | very large, extremely low quality loss - not recommended |
174
+
175
+
176
+ ## Downloading instruction
177
+
178
+ ### Command line
179
+
180
+ Firstly, install Huggingface Client
181
+
182
+ ```shell
183
+ pip install -U "huggingface_hub[cli]"
184
+ ```
185
+
186
+ Then, downoad the individual model file the a local directory
187
+
188
+ ```shell
189
+ huggingface-cli download tensorblock/CodeMate-v0.1-GGUF --include "CodeMate-v0.1-Q2_K.gguf" --local-dir MY_LOCAL_DIR
190
+ ```
191
+
192
+ If you wanna download multiple model files with a pattern (e.g., `*Q4_K*gguf`), you can try:
193
+
194
+ ```shell
195
+ huggingface-cli download tensorblock/CodeMate-v0.1-GGUF --local-dir MY_LOCAL_DIR --local-dir-use-symlinks False --include='*Q4_K*gguf'
196
+ ```