moriire commited on
Commit
df24c3c
1 Parent(s): e06db31

Update app/llm.py

Browse files
Files changed (1) hide show
  1. app/llm.py +2 -2
app/llm.py CHANGED
@@ -32,7 +32,7 @@ class ChatModel(BaseModel):
32
  llm_chat = llama_cpp.Llama.from_pretrained(
33
  repo_id="moriire/healthcare-ai-q2_k",
34
  filename="*.gguf",
35
- #tokenizer=llama_cpp.llama_tokenizer.LlamaHFTokenizer.from_pretrained("moriire/healthcare-ai-q4_k_m"),
36
  verbose=False,
37
  n_ctx=256,
38
  n_gpu_layers=0,
@@ -41,7 +41,7 @@ llm_chat = llama_cpp.Llama.from_pretrained(
41
  llm_generate = llama_cpp.Llama.from_pretrained(
42
  repo_id="moriire/healthcare-ai-q2_k",
43
  filename="*.gguf",
44
- #tokenizer=llama_cpp.llama_tokenizer.LlamaHFTokenizer.from_pretrained("moriire/healthcare-ai-q8_0"),
45
  verbose=False,
46
  n_ctx=4096,
47
  n_gpu_layers=0,
 
32
  llm_chat = llama_cpp.Llama.from_pretrained(
33
  repo_id="moriire/healthcare-ai-q2_k",
34
  filename="*.gguf",
35
+ tokenizer=llama_cpp.llama_tokenizer.LlamaHFTokenizer.from_pretrained("moriire/healthcare-ai-q2_k"),
36
  verbose=False,
37
  n_ctx=256,
38
  n_gpu_layers=0,
 
41
  llm_generate = llama_cpp.Llama.from_pretrained(
42
  repo_id="moriire/healthcare-ai-q2_k",
43
  filename="*.gguf",
44
+ tokenizer=llama_cpp.llama_tokenizer.LlamaHFTokenizer.from_pretrained("moriire/healthcare-ai-q2_k"),
45
  verbose=False,
46
  n_ctx=4096,
47
  n_gpu_layers=0,