File size: 2,504 Bytes
5b2b2db 7b899a2 9eaf8c4 6705f79 0dad17d 6705f79 0dad17d 7b899a2 0dad17d 6705f79 0dad17d 6705f79 0dad17d 6705f79 0dad17d 6705f79 9eaf8c4 0dad17d 6705f79 0dad17d 6705f79 0dad17d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 |
# my_memory_logic.py
import os
# Import the "run_with_chain_context" function from pipeline.py
# This function must accept a dict with { "input": ..., "chat_history": ... }
# and return a dict with { "answer": ... }.
from pipeline import run_with_chain_context
# For session-based chat history
from langchain_core.chat_history import BaseChatMessageHistory
from langchain_community.chat_message_histories import ChatMessageHistory
from langchain_core.runnables.history import RunnableWithMessageHistory
###############################################################################
# 1) In-Memory Store: session_id -> ChatMessageHistory
###############################################################################
store = {} # e.g., { "abc123": ChatMessageHistory(...) }
def get_session_history(session_id: str) -> BaseChatMessageHistory:
"""
Retrieve (or create) a ChatMessageHistory for the given session_id.
This ensures each session_id has its own conversation transcripts.
"""
if session_id not in store:
store[session_id] = ChatMessageHistory()
return store[session_id]
###############################################################################
# 2) Build a RunnableWithMessageHistory that wraps "run_with_chain_context"
###############################################################################
# "run_with_chain_context" must be a function returning a dict,
# e.g. { "answer": "... final string ..." }
# input_messages_key -> "input"
# history_messages_key -> "chat_history"
# output_messages_key -> "answer"
conversational_rag_chain = RunnableWithMessageHistory(
run_with_chain_context, # from pipeline.py
get_session_history,
input_messages_key="input",
history_messages_key="chat_history",
output_messages_key="answer"
)
###############################################################################
# 3) A convenience function that calls our chain with session-based memory
###############################################################################
def run_with_session_memory(user_query: str, session_id: str) -> str:
"""
Calls the 'conversational_rag_chain' with a given session_id and user_query.
This returns the final 'answer' from run_with_chain_context.
"""
response = conversational_rag_chain.invoke(
{"input": user_query},
config={
"configurable": {
"session_id": session_id
}
}
)
return response["answer"]
|