Upload llama-2-7b-chat-catala_q8_0.gguf with huggingface_hub
Browse files- .gitattributes +1 -0
- llama-2-7b-chat-catala_q8_0.gguf +3 -0
.gitattributes
CHANGED
@@ -39,3 +39,4 @@ llama-2-7b-chat-catala_q3_k_l.gguf filter=lfs diff=lfs merge=lfs -text
|
|
39 |
llama-2-7b-chat-catala_q4_1.gguf filter=lfs diff=lfs merge=lfs -text
|
40 |
llama-2-7b-chat-catala_q4_k_m.gguf filter=lfs diff=lfs merge=lfs -text
|
41 |
llama-2-7b-chat-catala_q2_k.gguf filter=lfs diff=lfs merge=lfs -text
|
|
|
|
39 |
llama-2-7b-chat-catala_q4_1.gguf filter=lfs diff=lfs merge=lfs -text
|
40 |
llama-2-7b-chat-catala_q4_k_m.gguf filter=lfs diff=lfs merge=lfs -text
|
41 |
llama-2-7b-chat-catala_q2_k.gguf filter=lfs diff=lfs merge=lfs -text
|
42 |
+
llama-2-7b-chat-catala_q8_0.gguf filter=lfs diff=lfs merge=lfs -text
|
llama-2-7b-chat-catala_q8_0.gguf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1c1dd87135c5a732e24dd161c25e88f6c3bb5f02017cb136979fd1ce11d49c3b
|
3 |
+
size 7161089824
|