|
|
|
|
|
from langchain.chains import ConversationalRetrievalChain
|
|
from langchain_community.chat_models import ChatOllama
|
|
from langchain.memory import ConversationBufferMemory
|
|
|
|
def build_conversational_chain(vectorstore):
|
|
"""
|
|
Creates a ConversationalRetrievalChain with a ChatOllama LLM and
|
|
a ConversationBufferMemory for multi-turn Q&A.
|
|
"""
|
|
llm = ChatOllama(model="qwen2.5:7b")
|
|
|
|
memory = ConversationBufferMemory(
|
|
memory_key="chat_history",
|
|
return_messages=True
|
|
)
|
|
|
|
qa_chain = ConversationalRetrievalChain.from_llm(
|
|
llm=llm,
|
|
retriever=vectorstore.as_retriever(search_type="similarity", search_kwargs={"k": 5}),
|
|
memory=memory,
|
|
verbose=True
|
|
)
|
|
|
|
return qa_chain
|
|
|