Spaces:
Sleeping
Sleeping
# chain_setup.py | |
from langchain.chains import ConversationalRetrievalChain | |
from langchain_community.chat_models import ChatOllama | |
from langchain.memory import ConversationBufferMemory | |
def build_conversational_chain(vectorstore): | |
""" | |
Creates a ConversationalRetrievalChain with a ChatOllama LLM and | |
a ConversationBufferMemory for multi-turn Q&A. | |
""" | |
llm = ChatOllama(model="qwen2.5:7b") | |
memory = ConversationBufferMemory( | |
memory_key="chat_history", | |
return_messages=True | |
) | |
qa_chain = ConversationalRetrievalChain.from_llm( | |
llm=llm, | |
retriever=vectorstore.as_retriever(search_type="similarity", search_kwargs={"k": 5}), | |
memory=memory, | |
verbose=True # optional debug logs | |
) | |
return qa_chain | |