Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -12,14 +12,14 @@ llm = Llama(
|
|
12 |
# repo_id="microsoft/Phi-3-mini-4k-instruct-gguf",
|
13 |
# filename="Phi-3-mini-4k-instruct-q4.gguf",
|
14 |
# ),
|
15 |
-
# model_path=hf_hub_download(
|
16 |
-
# repo_id="Ankitajadhav/Phi-3-mini-4k-instruct-q4.gguf",
|
17 |
-
# filename="Phi-3-mini-4k-instruct-q4.gguf",
|
18 |
-
# ),
|
19 |
model_path=hf_hub_download(
|
20 |
-
repo_id="
|
21 |
-
filename="
|
22 |
),
|
|
|
|
|
|
|
|
|
23 |
n_ctx=2048,
|
24 |
n_gpu_layers=50, # Adjust based on your VRAM
|
25 |
)
|
|
|
12 |
# repo_id="microsoft/Phi-3-mini-4k-instruct-gguf",
|
13 |
# filename="Phi-3-mini-4k-instruct-q4.gguf",
|
14 |
# ),
|
|
|
|
|
|
|
|
|
15 |
model_path=hf_hub_download(
|
16 |
+
repo_id="Ankitajadhav/Phi-3-mini-4k-instruct-q4.gguf",
|
17 |
+
filename="Phi-3-mini-4k-instruct-q4.gguf",
|
18 |
),
|
19 |
+
# model_path=hf_hub_download(
|
20 |
+
# repo_id="TheBloke/CapybaraHermes-2.5-Mistral-7B-GGUF",
|
21 |
+
# filename="capybarahermes-2.5-mistral-7b.Q2_K.gguf",
|
22 |
+
# ),
|
23 |
n_ctx=2048,
|
24 |
n_gpu_layers=50, # Adjust based on your VRAM
|
25 |
)
|