Update app/llm.py
Browse files- app/llm.py +3 -3
app/llm.py
CHANGED
@@ -32,16 +32,16 @@ class ChatModel(BaseModel):
|
|
32 |
llm_chat = llama_cpp.Llama.from_pretrained(
|
33 |
repo_id="moriire/healthcare-ai-q8_0",
|
34 |
filename="healthcare-ai-q8_0-unsloth.Q8_0.gguf",
|
35 |
-
|
36 |
verbose=False,
|
37 |
n_ctx=1024,
|
38 |
n_gpu_layers=0,
|
39 |
-
chat_format="llama-2"
|
40 |
)
|
41 |
llm_generate = llama_cpp.Llama.from_pretrained(
|
42 |
repo_id="moriire/healthcare-ai-q8_0",
|
43 |
filename="healthcare-ai-q8_0-unsloth.Q8_0.gguf",
|
44 |
-
#tokenizer=llama_cpp.llama_tokenizer.LlamaHFTokenizer.from_pretrained("moriire/healthcare-ai-
|
45 |
verbose=False,
|
46 |
n_ctx=4096,
|
47 |
n_gpu_layers=0,
|
|
|
32 |
llm_chat = llama_cpp.Llama.from_pretrained(
|
33 |
repo_id="moriire/healthcare-ai-q8_0",
|
34 |
filename="healthcare-ai-q8_0-unsloth.Q8_0.gguf",
|
35 |
+
tokenizer=llama_cpp.llama_tokenizer.LlamaHFTokenizer.from_pretrained("moriire/healthcare-ai-q8_0"),
|
36 |
verbose=False,
|
37 |
n_ctx=1024,
|
38 |
n_gpu_layers=0,
|
39 |
+
#chat_format="llama-2"
|
40 |
)
|
41 |
llm_generate = llama_cpp.Llama.from_pretrained(
|
42 |
repo_id="moriire/healthcare-ai-q8_0",
|
43 |
filename="healthcare-ai-q8_0-unsloth.Q8_0.gguf",
|
44 |
+
#tokenizer=llama_cpp.llama_tokenizer.LlamaHFTokenizer.from_pretrained("moriire/healthcare-ai-q8_0"),
|
45 |
verbose=False,
|
46 |
n_ctx=4096,
|
47 |
n_gpu_layers=0,
|