gerasdf
commited on
Commit
·
20528a7
1
Parent(s):
f5924e7
Revert "History with doc_ids. This version doesn't work. to do get the _ids for documents I'd have to impement either a new retriever that answers with documents and their _ids or manually implement the vector search in the pipeline. I'm dropping it for now, I'll just re-do the vector search when loading a history"
Browse files
query.py
CHANGED
@@ -13,7 +13,7 @@ from elevenlabs import VoiceSettings
|
|
13 |
from elevenlabs.client import ElevenLabs
|
14 |
from openai import OpenAI
|
15 |
|
16 |
-
from json import loads as json_loads
|
17 |
import itertools
|
18 |
import time
|
19 |
import os
|
@@ -49,9 +49,7 @@ def ai_setup():
|
|
49 |
retriever = vstore.as_retriever(search_kwargs={'k': 10})
|
50 |
|
51 |
prompt_template = os.environ.get("PROMPT_TEMPLATE")
|
52 |
-
prompt = ChatPromptTemplate.from_messages([
|
53 |
-
('system', "{doc_ids}"),
|
54 |
-
('system', prompt_template)])
|
55 |
prompt_chain = (
|
56 |
{"context": retriever, "question": RunnablePassthrough()}
|
57 |
| RunnableLambda(format_context)
|
@@ -101,9 +99,7 @@ def format_context(pipeline_state):
|
|
101 |
context += text
|
102 |
context += '\n\n---\n'
|
103 |
|
104 |
-
doc_ids = [1,2,3,4,5]
|
105 |
pipeline_state["context"] = context
|
106 |
-
pipeline_state["doc_ids"] = json_dumps(doc_ids)
|
107 |
return pipeline_state
|
108 |
|
109 |
def just_read(pipeline_state):
|
@@ -278,13 +274,12 @@ def chat(message, history, state, request:gr.Request):
|
|
278 |
else:
|
279 |
if AI:
|
280 |
if not history:
|
281 |
-
|
282 |
-
system_prompt =
|
283 |
state["system"] = system_prompt
|
284 |
|
285 |
# Next is commented out because astra has a limit on document size
|
286 |
-
|
287 |
-
add_history(state, request, "system", doc_ids, name=message)
|
288 |
else:
|
289 |
system_prompt = state["system"]
|
290 |
|
|
|
13 |
from elevenlabs.client import ElevenLabs
|
14 |
from openai import OpenAI
|
15 |
|
16 |
+
from json import loads as json_loads
|
17 |
import itertools
|
18 |
import time
|
19 |
import os
|
|
|
49 |
retriever = vstore.as_retriever(search_kwargs={'k': 10})
|
50 |
|
51 |
prompt_template = os.environ.get("PROMPT_TEMPLATE")
|
52 |
+
prompt = ChatPromptTemplate.from_messages([('system', prompt_template)])
|
|
|
|
|
53 |
prompt_chain = (
|
54 |
{"context": retriever, "question": RunnablePassthrough()}
|
55 |
| RunnableLambda(format_context)
|
|
|
99 |
context += text
|
100 |
context += '\n\n---\n'
|
101 |
|
|
|
102 |
pipeline_state["context"] = context
|
|
|
103 |
return pipeline_state
|
104 |
|
105 |
def just_read(pipeline_state):
|
|
|
274 |
else:
|
275 |
if AI:
|
276 |
if not history:
|
277 |
+
system_prompt = prompt_chain.invoke(message)
|
278 |
+
system_prompt = system_prompt.messages[0]
|
279 |
state["system"] = system_prompt
|
280 |
|
281 |
# Next is commented out because astra has a limit on document size
|
282 |
+
# add_history(state, request, "system", system_prompt, name=message)
|
|
|
283 |
else:
|
284 |
system_prompt = state["system"]
|
285 |
|