Update app/llm.py
Browse files- app/llm.py +1 -1
app/llm.py
CHANGED
@@ -34,7 +34,7 @@ llm_chat = llama_cpp.Llama.from_pretrained(
|
|
34 |
filename="healthcare-ai-q8_0-unsloth.Q8_0.gguf",
|
35 |
#tokenizer=llama_cpp.llama_tokenizer.LlamaHFTokenizer.from_pretrained("moriire/healthcare-ai-q8_0"),
|
36 |
verbose=False,
|
37 |
-
n_ctx=
|
38 |
n_gpu_layers=0,
|
39 |
#chat_format="llama-2"
|
40 |
)
|
|
|
34 |
filename="healthcare-ai-q8_0-unsloth.Q8_0.gguf",
|
35 |
#tokenizer=llama_cpp.llama_tokenizer.LlamaHFTokenizer.from_pretrained("moriire/healthcare-ai-q8_0"),
|
36 |
verbose=False,
|
37 |
+
n_ctx=256,
|
38 |
n_gpu_layers=0,
|
39 |
#chat_format="llama-2"
|
40 |
)
|