Spaces:
Running
Running
Bugfix in cli
Browse files- search_agent.py +1 -1
- web_rag.py +2 -2
search_agent.py
CHANGED
@@ -101,7 +101,7 @@ if __name__ == '__main__':
|
|
101 |
vector_store = wc.vectorize(contents)
|
102 |
|
103 |
with console.status("[bold green]Querying LLM relevant context", spinner='dots8Bit'):
|
104 |
-
respomse = wr.query_rag(chat, query, optimize_search_query, vector_store, callbacks=callbacks)
|
105 |
|
106 |
console.rule(f"[bold green]Response from {provider}")
|
107 |
if output == "text":
|
|
|
101 |
vector_store = wc.vectorize(contents)
|
102 |
|
103 |
with console.status("[bold green]Querying LLM relevant context", spinner='dots8Bit'):
|
104 |
+
respomse = wr.query_rag(chat, query, optimize_search_query, vector_store, top_k = 5, callbacks=callbacks)
|
105 |
|
106 |
console.rule(f"[bold green]Response from {provider}")
|
107 |
if output == "text":
|
web_rag.py
CHANGED
@@ -219,7 +219,7 @@ def build_rag_prompt(question, search_query, vectorstore, top_k = 10, callbacks
|
|
219 |
prompt = get_rag_prompt_template().format(query=question, context=context)
|
220 |
return prompt
|
221 |
|
222 |
-
def query_rag(chat_llm, question, search_query, vectorstore, callbacks = []):
|
223 |
-
prompt = build_rag_prompt(question, search_query, vectorstore, callbacks)
|
224 |
response = chat_llm.invoke(prompt, config={"callbacks": callbacks})
|
225 |
return response.content
|
|
|
219 |
prompt = get_rag_prompt_template().format(query=question, context=context)
|
220 |
return prompt
|
221 |
|
222 |
+
def query_rag(chat_llm, question, search_query, vectorstore, top_k = 10, callbacks = []):
|
223 |
+
prompt = build_rag_prompt(question, search_query, vectorstore, top_k= top_k, callbacks = callbacks)
|
224 |
response = chat_llm.invoke(prompt, config={"callbacks": callbacks})
|
225 |
return response.content
|