morriszms commited on
Commit
51d585c
1 Parent(s): e720f23

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,15 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ stablelm-3b-4e1t-Q2_K.gguf filter=lfs diff=lfs merge=lfs -text
37
+ stablelm-3b-4e1t-Q3_K_L.gguf filter=lfs diff=lfs merge=lfs -text
38
+ stablelm-3b-4e1t-Q3_K_M.gguf filter=lfs diff=lfs merge=lfs -text
39
+ stablelm-3b-4e1t-Q3_K_S.gguf filter=lfs diff=lfs merge=lfs -text
40
+ stablelm-3b-4e1t-Q4_0.gguf filter=lfs diff=lfs merge=lfs -text
41
+ stablelm-3b-4e1t-Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text
42
+ stablelm-3b-4e1t-Q4_K_S.gguf filter=lfs diff=lfs merge=lfs -text
43
+ stablelm-3b-4e1t-Q5_0.gguf filter=lfs diff=lfs merge=lfs -text
44
+ stablelm-3b-4e1t-Q5_K_M.gguf filter=lfs diff=lfs merge=lfs -text
45
+ stablelm-3b-4e1t-Q5_K_S.gguf filter=lfs diff=lfs merge=lfs -text
46
+ stablelm-3b-4e1t-Q6_K.gguf filter=lfs diff=lfs merge=lfs -text
47
+ stablelm-3b-4e1t-Q8_0.gguf filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,182 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - en
4
+ license: cc-by-sa-4.0
5
+ tags:
6
+ - causal-lm
7
+ - TensorBlock
8
+ - GGUF
9
+ datasets:
10
+ - tiiuae/falcon-refinedweb
11
+ - togethercomputer/RedPajama-Data-1T
12
+ - CarperAI/pilev2-dev
13
+ - bigcode/starcoderdata
14
+ - allenai/peS2o
15
+ base_model: stabilityai/stablelm-3b-4e1t
16
+ model-index:
17
+ - name: stablelm-3b-4e1t
18
+ results:
19
+ - task:
20
+ type: text-generation
21
+ name: Text Generation
22
+ dataset:
23
+ name: AI2 Reasoning Challenge (25-Shot)
24
+ type: ai2_arc
25
+ config: ARC-Challenge
26
+ split: test
27
+ args:
28
+ num_few_shot: 25
29
+ metrics:
30
+ - type: acc_norm
31
+ value: 46.59
32
+ name: normalized accuracy
33
+ source:
34
+ url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=stabilityai/stablelm-3b-4e1t
35
+ name: Open LLM Leaderboard
36
+ - task:
37
+ type: text-generation
38
+ name: Text Generation
39
+ dataset:
40
+ name: HellaSwag (10-Shot)
41
+ type: hellaswag
42
+ split: validation
43
+ args:
44
+ num_few_shot: 10
45
+ metrics:
46
+ - type: acc_norm
47
+ value: 75.94
48
+ name: normalized accuracy
49
+ source:
50
+ url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=stabilityai/stablelm-3b-4e1t
51
+ name: Open LLM Leaderboard
52
+ - task:
53
+ type: text-generation
54
+ name: Text Generation
55
+ dataset:
56
+ name: MMLU (5-Shot)
57
+ type: cais/mmlu
58
+ config: all
59
+ split: test
60
+ args:
61
+ num_few_shot: 5
62
+ metrics:
63
+ - type: acc
64
+ value: 45.23
65
+ name: accuracy
66
+ source:
67
+ url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=stabilityai/stablelm-3b-4e1t
68
+ name: Open LLM Leaderboard
69
+ - task:
70
+ type: text-generation
71
+ name: Text Generation
72
+ dataset:
73
+ name: TruthfulQA (0-shot)
74
+ type: truthful_qa
75
+ config: multiple_choice
76
+ split: validation
77
+ args:
78
+ num_few_shot: 0
79
+ metrics:
80
+ - type: mc2
81
+ value: 37.2
82
+ source:
83
+ url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=stabilityai/stablelm-3b-4e1t
84
+ name: Open LLM Leaderboard
85
+ - task:
86
+ type: text-generation
87
+ name: Text Generation
88
+ dataset:
89
+ name: Winogrande (5-shot)
90
+ type: winogrande
91
+ config: winogrande_xl
92
+ split: validation
93
+ args:
94
+ num_few_shot: 5
95
+ metrics:
96
+ - type: acc
97
+ value: 71.19
98
+ name: accuracy
99
+ source:
100
+ url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=stabilityai/stablelm-3b-4e1t
101
+ name: Open LLM Leaderboard
102
+ - task:
103
+ type: text-generation
104
+ name: Text Generation
105
+ dataset:
106
+ name: GSM8k (5-shot)
107
+ type: gsm8k
108
+ config: main
109
+ split: test
110
+ args:
111
+ num_few_shot: 5
112
+ metrics:
113
+ - type: acc
114
+ value: 3.34
115
+ name: accuracy
116
+ source:
117
+ url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=stabilityai/stablelm-3b-4e1t
118
+ name: Open LLM Leaderboard
119
+ ---
120
+
121
+ <div style="width: auto; margin-left: auto; margin-right: auto">
122
+ <img src="https://i.imgur.com/jC7kdl8.jpeg" alt="TensorBlock" style="width: 100%; min-width: 400px; display: block; margin: auto;">
123
+ </div>
124
+ <div style="display: flex; justify-content: space-between; width: 100%;">
125
+ <div style="display: flex; flex-direction: column; align-items: flex-start;">
126
+ <p style="margin-top: 0.5em; margin-bottom: 0em;">
127
+ Feedback and support: TensorBlock's <a href="https://x.com/tensorblock_aoi">Twitter/X</a>, <a href="https://t.me/TensorBlock">Telegram Group</a> and <a href="https://x.com/tensorblock_aoi">Discord server</a>
128
+ </p>
129
+ </div>
130
+ </div>
131
+
132
+ ## stabilityai/stablelm-3b-4e1t - GGUF
133
+
134
+ This repo contains GGUF format model files for [stabilityai/stablelm-3b-4e1t](https://huggingface.co/stabilityai/stablelm-3b-4e1t).
135
+
136
+ The files were quantized using machines provided by [TensorBlock](https://tensorblock.co/), and they are compatible with llama.cpp as of [commit b4011](https://github.com/ggerganov/llama.cpp/commit/a6744e43e80f4be6398fc7733a01642c846dce1d).
137
+
138
+ ## Prompt template
139
+
140
+ ```
141
+
142
+ ```
143
+
144
+ ## Model file specification
145
+
146
+ | Filename | Quant type | File Size | Description |
147
+ | -------- | ---------- | --------- | ----------- |
148
+ | [stablelm-3b-4e1t-Q2_K.gguf](https://huggingface.co/tensorblock/stablelm-3b-4e1t-GGUF/tree/main/stablelm-3b-4e1t-Q2_K.gguf) | Q2_K | 1.009 GB | smallest, significant quality loss - not recommended for most purposes |
149
+ | [stablelm-3b-4e1t-Q3_K_S.gguf](https://huggingface.co/tensorblock/stablelm-3b-4e1t-GGUF/tree/main/stablelm-3b-4e1t-Q3_K_S.gguf) | Q3_K_S | 1.168 GB | very small, high quality loss |
150
+ | [stablelm-3b-4e1t-Q3_K_M.gguf](https://huggingface.co/tensorblock/stablelm-3b-4e1t-GGUF/tree/main/stablelm-3b-4e1t-Q3_K_M.gguf) | Q3_K_M | 1.296 GB | very small, high quality loss |
151
+ | [stablelm-3b-4e1t-Q3_K_L.gguf](https://huggingface.co/tensorblock/stablelm-3b-4e1t-GGUF/tree/main/stablelm-3b-4e1t-Q3_K_L.gguf) | Q3_K_L | 1.405 GB | small, substantial quality loss |
152
+ | [stablelm-3b-4e1t-Q4_0.gguf](https://huggingface.co/tensorblock/stablelm-3b-4e1t-GGUF/tree/main/stablelm-3b-4e1t-Q4_0.gguf) | Q4_0 | 1.498 GB | legacy; small, very high quality loss - prefer using Q3_K_M |
153
+ | [stablelm-3b-4e1t-Q4_K_S.gguf](https://huggingface.co/tensorblock/stablelm-3b-4e1t-GGUF/tree/main/stablelm-3b-4e1t-Q4_K_S.gguf) | Q4_K_S | 1.509 GB | small, greater quality loss |
154
+ | [stablelm-3b-4e1t-Q4_K_M.gguf](https://huggingface.co/tensorblock/stablelm-3b-4e1t-GGUF/tree/main/stablelm-3b-4e1t-Q4_K_M.gguf) | Q4_K_M | 1.591 GB | medium, balanced quality - recommended |
155
+ | [stablelm-3b-4e1t-Q5_0.gguf](https://huggingface.co/tensorblock/stablelm-3b-4e1t-GGUF/tree/main/stablelm-3b-4e1t-Q5_0.gguf) | Q5_0 | 1.809 GB | legacy; medium, balanced quality - prefer using Q4_K_M |
156
+ | [stablelm-3b-4e1t-Q5_K_S.gguf](https://huggingface.co/tensorblock/stablelm-3b-4e1t-GGUF/tree/main/stablelm-3b-4e1t-Q5_K_S.gguf) | Q5_K_S | 1.809 GB | large, low quality loss - recommended |
157
+ | [stablelm-3b-4e1t-Q5_K_M.gguf](https://huggingface.co/tensorblock/stablelm-3b-4e1t-GGUF/tree/main/stablelm-3b-4e1t-Q5_K_M.gguf) | Q5_K_M | 1.856 GB | large, very low quality loss - recommended |
158
+ | [stablelm-3b-4e1t-Q6_K.gguf](https://huggingface.co/tensorblock/stablelm-3b-4e1t-GGUF/tree/main/stablelm-3b-4e1t-Q6_K.gguf) | Q6_K | 2.138 GB | very large, extremely low quality loss |
159
+ | [stablelm-3b-4e1t-Q8_0.gguf](https://huggingface.co/tensorblock/stablelm-3b-4e1t-GGUF/tree/main/stablelm-3b-4e1t-Q8_0.gguf) | Q8_0 | 2.769 GB | very large, extremely low quality loss - not recommended |
160
+
161
+
162
+ ## Downloading instruction
163
+
164
+ ### Command line
165
+
166
+ Firstly, install Huggingface Client
167
+
168
+ ```shell
169
+ pip install -U "huggingface_hub[cli]"
170
+ ```
171
+
172
+ Then, downoad the individual model file the a local directory
173
+
174
+ ```shell
175
+ huggingface-cli download tensorblock/stablelm-3b-4e1t-GGUF --include "stablelm-3b-4e1t-Q2_K.gguf" --local-dir MY_LOCAL_DIR
176
+ ```
177
+
178
+ If you wanna download multiple model files with a pattern (e.g., `*Q4_K*gguf`), you can try:
179
+
180
+ ```shell
181
+ huggingface-cli download tensorblock/stablelm-3b-4e1t-GGUF --local-dir MY_LOCAL_DIR --local-dir-use-symlinks False --include='*Q4_K*gguf'
182
+ ```
stablelm-3b-4e1t-Q2_K.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:504c428bf3f0e32bc3521e7335835d5ef9712e1f5694586313eaf2f7e0068a6b
3
+ size 1083756096
stablelm-3b-4e1t-Q3_K_L.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d450ed5a25944cf1245460dbb406dde62ecf27a755b889a155f1be9d4b9fe7a0
3
+ size 1508565056
stablelm-3b-4e1t-Q3_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5fa761f81e0f43f82fb39876281b0ee10cae25f42fb9ea03715864df2fbbc261
3
+ size 1391419456
stablelm-3b-4e1t-Q3_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cca8f40fa2fdc2465c6b4a608f499081f5e82219d493ba22cce32b4c41cf660f
3
+ size 1254449216
stablelm-3b-4e1t-Q4_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:93c759b52ed02fc449776bfc59a1ef5701817d0fbe506f5ee9f26d4b6cff358e
3
+ size 1608571456
stablelm-3b-4e1t-Q4_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5dba9690565cdb7fbeba24e985a2cd6a548ef35f24c9745d3a633289d1c8358a
3
+ size 1708595776
stablelm-3b-4e1t-Q4_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b9394ae76be31b205693f5d318720966bb62cb9fe476d8ecb766dd93a18f82a9
3
+ size 1620695616
stablelm-3b-4e1t-Q5_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8ad0bfed0379ff3cae98c24f358c12814a114cd58c4f6858d234456bdc829f5d
3
+ size 1941862976
stablelm-3b-4e1t-Q5_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:79c0ac937f29d528d3d25669482fb8675e8bf5ed99c57e1d0670c23ffa8c4de2
3
+ size 1993390656
stablelm-3b-4e1t-Q5_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:114aeb4368b585ffd60793219c2252a454574c71eae92c0d0907539ae0cb0d71
3
+ size 1941862976
stablelm-3b-4e1t-Q6_K.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6c14bcb78dca0e147a988c68ccd6b5235ad48f6bb673c4476a1a6e860625710b
3
+ size 2295985216
stablelm-3b-4e1t-Q8_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7a7151ab2cfaada4accb3744246181412e72f36420ccca03a3044486e327f539
3
+ size 2972926016