morriszms commited on
Commit
6fdca70
·
verified ·
1 Parent(s): 38cb5ca

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,15 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ FinMatcha-3B-Instruct-Q2_K.gguf filter=lfs diff=lfs merge=lfs -text
37
+ FinMatcha-3B-Instruct-Q3_K_L.gguf filter=lfs diff=lfs merge=lfs -text
38
+ FinMatcha-3B-Instruct-Q3_K_M.gguf filter=lfs diff=lfs merge=lfs -text
39
+ FinMatcha-3B-Instruct-Q3_K_S.gguf filter=lfs diff=lfs merge=lfs -text
40
+ FinMatcha-3B-Instruct-Q4_0.gguf filter=lfs diff=lfs merge=lfs -text
41
+ FinMatcha-3B-Instruct-Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text
42
+ FinMatcha-3B-Instruct-Q4_K_S.gguf filter=lfs diff=lfs merge=lfs -text
43
+ FinMatcha-3B-Instruct-Q5_0.gguf filter=lfs diff=lfs merge=lfs -text
44
+ FinMatcha-3B-Instruct-Q5_K_M.gguf filter=lfs diff=lfs merge=lfs -text
45
+ FinMatcha-3B-Instruct-Q5_K_S.gguf filter=lfs diff=lfs merge=lfs -text
46
+ FinMatcha-3B-Instruct-Q6_K.gguf filter=lfs diff=lfs merge=lfs -text
47
+ FinMatcha-3B-Instruct-Q8_0.gguf filter=lfs diff=lfs merge=lfs -text
FinMatcha-3B-Instruct-Q2_K.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:55bc8769bd77d9d3335595c1c677f2f5aa11b4cabd4c8c4c2c20ec7662239542
3
+ size 1363935872
FinMatcha-3B-Instruct-Q3_K_L.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:affb75eed0ea19e495e6941d25eba5b37d29f4b0107e6e7673ae62fcde8f3c57
3
+ size 1815347840
FinMatcha-3B-Instruct-Q3_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2cbc434380f98a545600652c04f8555ca340d3474166361a4cc25dc8e550fb9c
3
+ size 1687159424
FinMatcha-3B-Instruct-Q3_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d237501f01460fd4c1134dc3bd431e938571063611e40600cfdceeeac55c0348
3
+ size 1542849152
FinMatcha-3B-Instruct-Q4_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f79f483717d7614fe65d39970e3ed26458bb5460979fec59d4eab63cc3de5236
3
+ size 1917190784
FinMatcha-3B-Instruct-Q4_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:96599b01769e46939509e08ba9b83d6bec67e8a6e0d65f870cee7e8b98f4d891
3
+ size 2019377792
FinMatcha-3B-Instruct-Q4_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2b9cbdc7f31b015219708e0aa5dc1656dbf3c7d9472fe8fdf4937a293c068f06
3
+ size 1928200832
FinMatcha-3B-Instruct-Q5_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3cc79e9507b5bee394e10c4a1f88eafc3a497a1346db06733d92b4535acba27b
3
+ size 2269512320
FinMatcha-3B-Instruct-Q5_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7b5d31cc1ccfa2dd1d0872478c8482f7117bfcb5ee0321d9bd8d494f75fa9134
3
+ size 2322154112
FinMatcha-3B-Instruct-Q5_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2db25cb3ccaaf60377556641e1b125984ee5b459daaa8a438645e2721227c026
3
+ size 2269512320
FinMatcha-3B-Instruct-Q6_K.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a3a04445c7e5ac91c8a3200aea02a44bee3c4cf5d9b64620ed69149173b45c30
3
+ size 2643853952
FinMatcha-3B-Instruct-Q8_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f9bea0650d3bd786b97be14a96c4b84683d465cdc1e07b858e6abaca80c76e2b
3
+ size 3421899392
README.md ADDED
@@ -0,0 +1,186 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - id
4
+ license: apache-2.0
5
+ tags:
6
+ - Indonesian
7
+ - Chat
8
+ - Instruct
9
+ - TensorBlock
10
+ - GGUF
11
+ base_model: xMaulana/FinMatcha-3B-Instruct
12
+ datasets:
13
+ - NekoFi/alpaca-gpt4-indonesia-cleaned
14
+ pipeline_tag: text-generation
15
+ model-index:
16
+ - name: FinMatcha-3B-Instruct
17
+ results:
18
+ - task:
19
+ type: text-generation
20
+ name: Text Generation
21
+ dataset:
22
+ name: IFEval (0-Shot)
23
+ type: HuggingFaceH4/ifeval
24
+ args:
25
+ num_few_shot: 0
26
+ metrics:
27
+ - type: inst_level_strict_acc and prompt_level_strict_acc
28
+ value: 75.48
29
+ name: strict accuracy
30
+ source:
31
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=xMaulana/FinMatcha-3B-Instruct
32
+ name: Open LLM Leaderboard
33
+ - task:
34
+ type: text-generation
35
+ name: Text Generation
36
+ dataset:
37
+ name: BBH (3-Shot)
38
+ type: BBH
39
+ args:
40
+ num_few_shot: 3
41
+ metrics:
42
+ - type: acc_norm
43
+ value: 23.19
44
+ name: normalized accuracy
45
+ source:
46
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=xMaulana/FinMatcha-3B-Instruct
47
+ name: Open LLM Leaderboard
48
+ - task:
49
+ type: text-generation
50
+ name: Text Generation
51
+ dataset:
52
+ name: MATH Lvl 5 (4-Shot)
53
+ type: hendrycks/competition_math
54
+ args:
55
+ num_few_shot: 4
56
+ metrics:
57
+ - type: exact_match
58
+ value: 12.39
59
+ name: exact match
60
+ source:
61
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=xMaulana/FinMatcha-3B-Instruct
62
+ name: Open LLM Leaderboard
63
+ - task:
64
+ type: text-generation
65
+ name: Text Generation
66
+ dataset:
67
+ name: GPQA (0-shot)
68
+ type: Idavidrein/gpqa
69
+ args:
70
+ num_few_shot: 0
71
+ metrics:
72
+ - type: acc_norm
73
+ value: 2.57
74
+ name: acc_norm
75
+ source:
76
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=xMaulana/FinMatcha-3B-Instruct
77
+ name: Open LLM Leaderboard
78
+ - task:
79
+ type: text-generation
80
+ name: Text Generation
81
+ dataset:
82
+ name: MuSR (0-shot)
83
+ type: TAUR-Lab/MuSR
84
+ args:
85
+ num_few_shot: 0
86
+ metrics:
87
+ - type: acc_norm
88
+ value: 5.02
89
+ name: acc_norm
90
+ source:
91
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=xMaulana/FinMatcha-3B-Instruct
92
+ name: Open LLM Leaderboard
93
+ - task:
94
+ type: text-generation
95
+ name: Text Generation
96
+ dataset:
97
+ name: MMLU-PRO (5-shot)
98
+ type: TIGER-Lab/MMLU-Pro
99
+ config: main
100
+ split: test
101
+ args:
102
+ num_few_shot: 5
103
+ metrics:
104
+ - type: acc
105
+ value: 24.24
106
+ name: accuracy
107
+ source:
108
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=xMaulana/FinMatcha-3B-Instruct
109
+ name: Open LLM Leaderboard
110
+ ---
111
+
112
+ <div style="width: auto; margin-left: auto; margin-right: auto">
113
+ <img src="https://i.imgur.com/jC7kdl8.jpeg" alt="TensorBlock" style="width: 100%; min-width: 400px; display: block; margin: auto;">
114
+ </div>
115
+ <div style="display: flex; justify-content: space-between; width: 100%;">
116
+ <div style="display: flex; flex-direction: column; align-items: flex-start;">
117
+ <p style="margin-top: 0.5em; margin-bottom: 0em;">
118
+ Feedback and support: TensorBlock's <a href="https://x.com/tensorblock_aoi">Twitter/X</a>, <a href="https://t.me/TensorBlock">Telegram Group</a> and <a href="https://x.com/tensorblock_aoi">Discord server</a>
119
+ </p>
120
+ </div>
121
+ </div>
122
+
123
+ ## xMaulana/FinMatcha-3B-Instruct - GGUF
124
+
125
+ This repo contains GGUF format model files for [xMaulana/FinMatcha-3B-Instruct](https://huggingface.co/xMaulana/FinMatcha-3B-Instruct).
126
+
127
+ The files were quantized using machines provided by [TensorBlock](https://tensorblock.co/), and they are compatible with llama.cpp as of [commit b4011](https://github.com/ggerganov/llama.cpp/commit/a6744e43e80f4be6398fc7733a01642c846dce1d).
128
+
129
+ <div style="text-align: left; margin: 20px 0;">
130
+ <a href="https://tensorblock.co/waitlist/client" style="display: inline-block; padding: 10px 20px; background-color: #007bff; color: white; text-decoration: none; border-radius: 5px; font-weight: bold;">
131
+ Run them on the TensorBlock client using your local machine ↗
132
+ </a>
133
+ </div>
134
+
135
+ ## Prompt template
136
+
137
+ ```
138
+ <|begin_of_text|><|start_header_id|>system<|end_header_id|>
139
+
140
+ Cutting Knowledge Date: December 2023
141
+ Today Date: 27 Nov 2024
142
+
143
+ {system_prompt}<|eot_id|><|start_header_id|>user<|end_header_id|>
144
+
145
+ {prompt}<|eot_id|><|start_header_id|>assistant<|end_header_id|>
146
+ ```
147
+
148
+ ## Model file specification
149
+
150
+ | Filename | Quant type | File Size | Description |
151
+ | -------- | ---------- | --------- | ----------- |
152
+ | [FinMatcha-3B-Instruct-Q2_K.gguf](https://huggingface.co/tensorblock/FinMatcha-3B-Instruct-GGUF/blob/main/FinMatcha-3B-Instruct-Q2_K.gguf) | Q2_K | 1.364 GB | smallest, significant quality loss - not recommended for most purposes |
153
+ | [FinMatcha-3B-Instruct-Q3_K_S.gguf](https://huggingface.co/tensorblock/FinMatcha-3B-Instruct-GGUF/blob/main/FinMatcha-3B-Instruct-Q3_K_S.gguf) | Q3_K_S | 1.543 GB | very small, high quality loss |
154
+ | [FinMatcha-3B-Instruct-Q3_K_M.gguf](https://huggingface.co/tensorblock/FinMatcha-3B-Instruct-GGUF/blob/main/FinMatcha-3B-Instruct-Q3_K_M.gguf) | Q3_K_M | 1.687 GB | very small, high quality loss |
155
+ | [FinMatcha-3B-Instruct-Q3_K_L.gguf](https://huggingface.co/tensorblock/FinMatcha-3B-Instruct-GGUF/blob/main/FinMatcha-3B-Instruct-Q3_K_L.gguf) | Q3_K_L | 1.815 GB | small, substantial quality loss |
156
+ | [FinMatcha-3B-Instruct-Q4_0.gguf](https://huggingface.co/tensorblock/FinMatcha-3B-Instruct-GGUF/blob/main/FinMatcha-3B-Instruct-Q4_0.gguf) | Q4_0 | 1.917 GB | legacy; small, very high quality loss - prefer using Q3_K_M |
157
+ | [FinMatcha-3B-Instruct-Q4_K_S.gguf](https://huggingface.co/tensorblock/FinMatcha-3B-Instruct-GGUF/blob/main/FinMatcha-3B-Instruct-Q4_K_S.gguf) | Q4_K_S | 1.928 GB | small, greater quality loss |
158
+ | [FinMatcha-3B-Instruct-Q4_K_M.gguf](https://huggingface.co/tensorblock/FinMatcha-3B-Instruct-GGUF/blob/main/FinMatcha-3B-Instruct-Q4_K_M.gguf) | Q4_K_M | 2.019 GB | medium, balanced quality - recommended |
159
+ | [FinMatcha-3B-Instruct-Q5_0.gguf](https://huggingface.co/tensorblock/FinMatcha-3B-Instruct-GGUF/blob/main/FinMatcha-3B-Instruct-Q5_0.gguf) | Q5_0 | 2.270 GB | legacy; medium, balanced quality - prefer using Q4_K_M |
160
+ | [FinMatcha-3B-Instruct-Q5_K_S.gguf](https://huggingface.co/tensorblock/FinMatcha-3B-Instruct-GGUF/blob/main/FinMatcha-3B-Instruct-Q5_K_S.gguf) | Q5_K_S | 2.270 GB | large, low quality loss - recommended |
161
+ | [FinMatcha-3B-Instruct-Q5_K_M.gguf](https://huggingface.co/tensorblock/FinMatcha-3B-Instruct-GGUF/blob/main/FinMatcha-3B-Instruct-Q5_K_M.gguf) | Q5_K_M | 2.322 GB | large, very low quality loss - recommended |
162
+ | [FinMatcha-3B-Instruct-Q6_K.gguf](https://huggingface.co/tensorblock/FinMatcha-3B-Instruct-GGUF/blob/main/FinMatcha-3B-Instruct-Q6_K.gguf) | Q6_K | 2.644 GB | very large, extremely low quality loss |
163
+ | [FinMatcha-3B-Instruct-Q8_0.gguf](https://huggingface.co/tensorblock/FinMatcha-3B-Instruct-GGUF/blob/main/FinMatcha-3B-Instruct-Q8_0.gguf) | Q8_0 | 3.422 GB | very large, extremely low quality loss - not recommended |
164
+
165
+
166
+ ## Downloading instruction
167
+
168
+ ### Command line
169
+
170
+ Firstly, install Huggingface Client
171
+
172
+ ```shell
173
+ pip install -U "huggingface_hub[cli]"
174
+ ```
175
+
176
+ Then, downoad the individual model file the a local directory
177
+
178
+ ```shell
179
+ huggingface-cli download tensorblock/FinMatcha-3B-Instruct-GGUF --include "FinMatcha-3B-Instruct-Q2_K.gguf" --local-dir MY_LOCAL_DIR
180
+ ```
181
+
182
+ If you wanna download multiple model files with a pattern (e.g., `*Q4_K*gguf`), you can try:
183
+
184
+ ```shell
185
+ huggingface-cli download tensorblock/FinMatcha-3B-Instruct-GGUF --local-dir MY_LOCAL_DIR --local-dir-use-symlinks False --include='*Q4_K*gguf'
186
+ ```