morriszms commited on
Commit
e8930bf
1 Parent(s): 70bf446

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,15 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ granite-8b-code-base-4k-Q2_K.gguf filter=lfs diff=lfs merge=lfs -text
37
+ granite-8b-code-base-4k-Q3_K_L.gguf filter=lfs diff=lfs merge=lfs -text
38
+ granite-8b-code-base-4k-Q3_K_M.gguf filter=lfs diff=lfs merge=lfs -text
39
+ granite-8b-code-base-4k-Q3_K_S.gguf filter=lfs diff=lfs merge=lfs -text
40
+ granite-8b-code-base-4k-Q4_0.gguf filter=lfs diff=lfs merge=lfs -text
41
+ granite-8b-code-base-4k-Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text
42
+ granite-8b-code-base-4k-Q4_K_S.gguf filter=lfs diff=lfs merge=lfs -text
43
+ granite-8b-code-base-4k-Q5_0.gguf filter=lfs diff=lfs merge=lfs -text
44
+ granite-8b-code-base-4k-Q5_K_M.gguf filter=lfs diff=lfs merge=lfs -text
45
+ granite-8b-code-base-4k-Q5_K_S.gguf filter=lfs diff=lfs merge=lfs -text
46
+ granite-8b-code-base-4k-Q6_K.gguf filter=lfs diff=lfs merge=lfs -text
47
+ granite-8b-code-base-4k-Q8_0.gguf filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ pipeline_tag: text-generation
3
+ inference: false
4
+ license: apache-2.0
5
+ datasets:
6
+ - codeparrot/github-code-clean
7
+ - bigcode/starcoderdata
8
+ - open-web-math/open-web-math
9
+ - math-ai/StackMathQA
10
+ metrics:
11
+ - code_eval
12
+ library_name: transformers
13
+ tags:
14
+ - code
15
+ - granite
16
+ - TensorBlock
17
+ - GGUF
18
+ base_model: ibm-granite/granite-8b-code-base-4k
19
+ model-index:
20
+ - name: granite-8b-code-base-4k
21
+ results:
22
+ - task:
23
+ type: text-generation
24
+ dataset:
25
+ name: MBPP
26
+ type: mbpp
27
+ metrics:
28
+ - type: pass@1
29
+ value: 42.2
30
+ name: pass@1
31
+ - task:
32
+ type: text-generation
33
+ dataset:
34
+ name: MBPP+
35
+ type: evalplus/mbppplus
36
+ metrics:
37
+ - type: pass@1
38
+ value: 49.6
39
+ name: pass@1
40
+ - task:
41
+ type: text-generation
42
+ dataset:
43
+ name: HumanEvalSynthesis(Python)
44
+ type: bigcode/humanevalpack
45
+ metrics:
46
+ - type: pass@1
47
+ value: 43.9
48
+ name: pass@1
49
+ - type: pass@1
50
+ value: 52.4
51
+ name: pass@1
52
+ - type: pass@1
53
+ value: 56.1
54
+ name: pass@1
55
+ - type: pass@1
56
+ value: 31.7
57
+ name: pass@1
58
+ - type: pass@1
59
+ value: 43.9
60
+ name: pass@1
61
+ - type: pass@1
62
+ value: 32.9
63
+ name: pass@1
64
+ - type: pass@1
65
+ value: 23.5
66
+ name: pass@1
67
+ - type: pass@1
68
+ value: 32.3
69
+ name: pass@1
70
+ - type: pass@1
71
+ value: 25.0
72
+ name: pass@1
73
+ - type: pass@1
74
+ value: 23.2
75
+ name: pass@1
76
+ - type: pass@1
77
+ value: 28.0
78
+ name: pass@1
79
+ - type: pass@1
80
+ value: 19.5
81
+ name: pass@1
82
+ - type: pass@1
83
+ value: 22.6
84
+ name: pass@1
85
+ - type: pass@1
86
+ value: 35.4
87
+ name: pass@1
88
+ - type: pass@1
89
+ value: 38.4
90
+ name: pass@1
91
+ - type: pass@1
92
+ value: 37.2
93
+ name: pass@1
94
+ - type: pass@1
95
+ value: 28.7
96
+ name: pass@1
97
+ - type: pass@1
98
+ value: 15.2
99
+ name: pass@1
100
+ ---
101
+
102
+ <div style="width: auto; margin-left: auto; margin-right: auto">
103
+ <img src="https://i.imgur.com/jC7kdl8.jpeg" alt="TensorBlock" style="width: 100%; min-width: 400px; display: block; margin: auto;">
104
+ </div>
105
+ <div style="display: flex; justify-content: space-between; width: 100%;">
106
+ <div style="display: flex; flex-direction: column; align-items: flex-start;">
107
+ <p style="margin-top: 0.5em; margin-bottom: 0em;">
108
+ Feedback and support: TensorBlock's <a href="https://x.com/tensorblock_aoi">Twitter/X</a>, <a href="https://t.me/TensorBlock">Telegram Group</a> and <a href="https://x.com/tensorblock_aoi">Discord server</a>
109
+ </p>
110
+ </div>
111
+ </div>
112
+
113
+ ## ibm-granite/granite-8b-code-base-4k - GGUF
114
+
115
+ This repo contains GGUF format model files for [ibm-granite/granite-8b-code-base-4k](https://huggingface.co/ibm-granite/granite-8b-code-base-4k).
116
+
117
+ The files were quantized using machines provided by [TensorBlock](https://tensorblock.co/), and they are compatible with llama.cpp as of [commit b4011](https://github.com/ggerganov/llama.cpp/commit/a6744e43e80f4be6398fc7733a01642c846dce1d).
118
+
119
+ ## Prompt template
120
+
121
+ ```
122
+
123
+ ```
124
+
125
+ ## Model file specification
126
+
127
+ | Filename | Quant type | File Size | Description |
128
+ | -------- | ---------- | --------- | ----------- |
129
+ | [granite-8b-code-base-4k-Q2_K.gguf](https://huggingface.co/tensorblock/granite-8b-code-base-4k-GGUF/tree/main/granite-8b-code-base-4k-Q2_K.gguf) | Q2_K | 2.852 GB | smallest, significant quality loss - not recommended for most purposes |
130
+ | [granite-8b-code-base-4k-Q3_K_S.gguf](https://huggingface.co/tensorblock/granite-8b-code-base-4k-GGUF/tree/main/granite-8b-code-base-4k-Q3_K_S.gguf) | Q3_K_S | 3.304 GB | very small, high quality loss |
131
+ | [granite-8b-code-base-4k-Q3_K_M.gguf](https://huggingface.co/tensorblock/granite-8b-code-base-4k-GGUF/tree/main/granite-8b-code-base-4k-Q3_K_M.gguf) | Q3_K_M | 3.674 GB | very small, high quality loss |
132
+ | [granite-8b-code-base-4k-Q3_K_L.gguf](https://huggingface.co/tensorblock/granite-8b-code-base-4k-GGUF/tree/main/granite-8b-code-base-4k-Q3_K_L.gguf) | Q3_K_L | 3.993 GB | small, substantial quality loss |
133
+ | [granite-8b-code-base-4k-Q4_0.gguf](https://huggingface.co/tensorblock/granite-8b-code-base-4k-GGUF/tree/main/granite-8b-code-base-4k-Q4_0.gguf) | Q4_0 | 4.276 GB | legacy; small, very high quality loss - prefer using Q3_K_M |
134
+ | [granite-8b-code-base-4k-Q4_K_S.gguf](https://huggingface.co/tensorblock/granite-8b-code-base-4k-GGUF/tree/main/granite-8b-code-base-4k-Q4_K_S.gguf) | Q4_K_S | 4.305 GB | small, greater quality loss |
135
+ | [granite-8b-code-base-4k-Q4_K_M.gguf](https://huggingface.co/tensorblock/granite-8b-code-base-4k-GGUF/tree/main/granite-8b-code-base-4k-Q4_K_M.gguf) | Q4_K_M | 4.548 GB | medium, balanced quality - recommended |
136
+ | [granite-8b-code-base-4k-Q5_0.gguf](https://huggingface.co/tensorblock/granite-8b-code-base-4k-GGUF/tree/main/granite-8b-code-base-4k-Q5_0.gguf) | Q5_0 | 5.190 GB | legacy; medium, balanced quality - prefer using Q4_K_M |
137
+ | [granite-8b-code-base-4k-Q5_K_S.gguf](https://huggingface.co/tensorblock/granite-8b-code-base-4k-GGUF/tree/main/granite-8b-code-base-4k-Q5_K_S.gguf) | Q5_K_S | 5.190 GB | large, low quality loss - recommended |
138
+ | [granite-8b-code-base-4k-Q5_K_M.gguf](https://huggingface.co/tensorblock/granite-8b-code-base-4k-GGUF/tree/main/granite-8b-code-base-4k-Q5_K_M.gguf) | Q5_K_M | 5.330 GB | large, very low quality loss - recommended |
139
+ | [granite-8b-code-base-4k-Q6_K.gguf](https://huggingface.co/tensorblock/granite-8b-code-base-4k-GGUF/tree/main/granite-8b-code-base-4k-Q6_K.gguf) | Q6_K | 6.161 GB | very large, extremely low quality loss |
140
+ | [granite-8b-code-base-4k-Q8_0.gguf](https://huggingface.co/tensorblock/granite-8b-code-base-4k-GGUF/tree/main/granite-8b-code-base-4k-Q8_0.gguf) | Q8_0 | 7.977 GB | very large, extremely low quality loss - not recommended |
141
+
142
+
143
+ ## Downloading instruction
144
+
145
+ ### Command line
146
+
147
+ Firstly, install Huggingface Client
148
+
149
+ ```shell
150
+ pip install -U "huggingface_hub[cli]"
151
+ ```
152
+
153
+ Then, downoad the individual model file the a local directory
154
+
155
+ ```shell
156
+ huggingface-cli download tensorblock/granite-8b-code-base-4k-GGUF --include "granite-8b-code-base-4k-Q2_K.gguf" --local-dir MY_LOCAL_DIR
157
+ ```
158
+
159
+ If you wanna download multiple model files with a pattern (e.g., `*Q4_K*gguf`), you can try:
160
+
161
+ ```shell
162
+ huggingface-cli download tensorblock/granite-8b-code-base-4k-GGUF --local-dir MY_LOCAL_DIR --local-dir-use-symlinks False --include='*Q4_K*gguf'
163
+ ```
granite-8b-code-base-4k-Q2_K.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b66fece8ebd5097944cfe01258476403c096603cde19a4e7e2d9449682bcde36
3
+ size 3062070560
granite-8b-code-base-4k-Q3_K_L.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0882bb368e8dd54c988fd668c82a2a11da39733467d01cf03dbc4eabcf01ca70
3
+ size 4287724832
granite-8b-code-base-4k-Q3_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:639146bb4f84e26ff91e583007e7d8bbb53184e94d2182416819a41ded0d1200
3
+ size 3944840480
granite-8b-code-base-4k-Q3_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7569da2522ef51b9bb2fad9c8e160c9525e5c5f4ccc1dddf56dd0642abc35ca0
3
+ size 3548085536
granite-8b-code-base-4k-Q4_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6d70d6959e0b8b8bf15cd7f9ed81ca6bd8dbbd32f9926efaf12999b7db0b0311
3
+ size 4590894368
granite-8b-code-base-4k-Q4_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:94bca484c67e8e2379fcc6aeb8d714036c3f40af593c8c0e39f1d43b8b813ccc
3
+ size 4882857248
granite-8b-code-base-4k-Q4_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:59559852699a88dcd4655d8a18e9522927e6dcbbb36fe8043ea2b29e71c68b53
3
+ size 4622351648
granite-8b-code-base-4k-Q5_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bc7ecb09d8925efb2a2f209f89f3719a8d338b04a107799142256ef8fc79a34b
3
+ size 5572361504
granite-8b-code-base-4k-Q5_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6c110d4cfe0a0dbaa5331857f74834654e77a8ad2b38b75c482dcb9939792fed
3
+ size 5722766624
granite-8b-code-base-4k-Q5_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d0c07ccecc94fc68b0a3a0fbbadf8eb72bba9913cec6f77f815b6dcdcd3452b1
3
+ size 5572361504
granite-8b-code-base-4k-Q6_K.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ef6a558a2d3818a3db1c3119cea6a1a3ea17fe82b6fcfc32118c56f51e81e0e8
3
+ size 6615170336
granite-8b-code-base-4k-Q8_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:72f61de4c1c5d463d77102953e937365df5c7cb2484bb7da15d46ad03949c139
3
+ size 8565521696