Update app/llm.py
Browse files- app/llm.py +12 -0
app/llm.py
CHANGED
@@ -25,6 +25,17 @@ from langchain_core.runnables import RunnablePassthrough, RunnablePick
|
|
25 |
rag_prompt_llama = hub.pull("rlm/rag-prompt-llama")
|
26 |
rag_prompt.messages
|
27 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
28 |
class RagChat:
|
29 |
def agent(self):
|
30 |
loader = WebBaseLoader("https://lilianweng.github.io/posts/2023-06-23-agent/")
|
@@ -69,6 +80,7 @@ class ChatModel(BaseModel):
|
|
69 |
mirostat_mode: int=2
|
70 |
mirostat_tau: float=4.0
|
71 |
mirostat_eta: float=1.1
|
|
|
72 |
llm_chat = llama_cpp.Llama.from_pretrained(
|
73 |
repo_id="Qwen/Qwen1.5-0.5B-Chat-GGUF",
|
74 |
filename="*q4_0.gguf",
|
|
|
25 |
rag_prompt_llama = hub.pull("rlm/rag-prompt-llama")
|
26 |
rag_prompt.messages
|
27 |
|
28 |
+
llm = llama_cpp.Llama.from_pretrained(
|
29 |
+
repo_id="Qwen/Qwen1.5-0.5B-Chat-GGUF",
|
30 |
+
filename="*q4_0.gguf",
|
31 |
+
tokenizer=llama_cpp.llama_tokenizer.LlamaHFTokenizer.from_pretrained("Qwen/Qwen1.5-0.5B-Chat"),
|
32 |
+
verbose=False,
|
33 |
+
n_ctx=512,
|
34 |
+
n_gpu_layers=0,
|
35 |
+
#chat_format="llama-2"
|
36 |
+
)
|
37 |
+
|
38 |
+
|
39 |
class RagChat:
|
40 |
def agent(self):
|
41 |
loader = WebBaseLoader("https://lilianweng.github.io/posts/2023-06-23-agent/")
|
|
|
80 |
mirostat_mode: int=2
|
81 |
mirostat_tau: float=4.0
|
82 |
mirostat_eta: float=1.1
|
83 |
+
|
84 |
llm_chat = llama_cpp.Llama.from_pretrained(
|
85 |
repo_id="Qwen/Qwen1.5-0.5B-Chat-GGUF",
|
86 |
filename="*q4_0.gguf",
|