Spaces:
Sleeping
Sleeping
Using Q8_0 instead.
Browse files- .gitattributes +1 -0
- app/main.py +1 -1
- models/final-gemma2b_SA-Q8_0.gguf +3 -0
.gitattributes
CHANGED
@@ -34,3 +34,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
models/final-gemma2b_SA-Q5_K.gguf filter=lfs diff=lfs merge=lfs -text
|
|
|
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
models/final-gemma2b_SA-Q5_K.gguf filter=lfs diff=lfs merge=lfs -text
|
37 |
+
models/final-gemma2b_SA-Q8_0.gguf filter=lfs diff=lfs merge=lfs -text
|
app/main.py
CHANGED
@@ -8,7 +8,7 @@ from llama_cpp import Llama
|
|
8 |
|
9 |
print("Loading model...")
|
10 |
llm = Llama(
|
11 |
-
model_path="/models/final-gemma2b_SA-
|
12 |
# n_gpu_layers=28, # Uncomment to use GPU acceleration
|
13 |
# seed=1337, # Uncomment to set a specific seed
|
14 |
# n_ctx=2048, # Uncomment to increase the context window
|
|
|
8 |
|
9 |
print("Loading model...")
|
10 |
llm = Llama(
|
11 |
+
model_path="/models/final-gemma2b_SA-Q8_0.gguf",
|
12 |
# n_gpu_layers=28, # Uncomment to use GPU acceleration
|
13 |
# seed=1337, # Uncomment to set a specific seed
|
14 |
# n_ctx=2048, # Uncomment to increase the context window
|
models/final-gemma2b_SA-Q8_0.gguf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8770ad0c0a61bdda3c6e2c07b2db9381300b7e9699affa1419bd4ebd8256edd5
|
3 |
+
size 2669351840
|