File size: 1,493 Bytes
c2d9892
 
 
f49be22
 
 
 
c2d9892
 
 
 
 
 
 
 
 
3436310
 
c2d9892
 
9189cc1
 
 
 
 
 
 
c2d9892
 
 
f49be22
3436310
c2d9892
3436310
 
f49be22
3436310
c2d9892
f49be22
 
 
 
3436310
f49be22
 
05c35ae
f49be22
c2d9892
f49be22
3436310
f49be22
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
import os
from huggingface_hub import hf_hub_download
from langchain.llms import LlamaCpp
from langchain.chains import ConversationalRetrievalChain
from langchain.memory import ConversationBufferMemory

def load_llm():
    """
    Downloads a Qwen2.5 GGUF model and loads it via llama-cpp.
    """
    # 1) Download the GGUF model from Hugging Face
    model_file = hf_hub_download(
        repo_id="bartowski/Qwen2.5-7B-Instruct-GGUF",  # Non-math version
        filename="Qwen2.5-7B-Instruct-Q4_K_M.gguf",    # Example file
        local_dir="./models",
        local_dir_use_symlinks=False
    )

    # 2) Load the model with llama-cpp via LangChain’s LlamaCpp
    llm = LlamaCpp(
    model_path=model_file,
    flash_attn=False,
    n_ctx=2048,  # or 4096
    n_batch=512, # or even 256
    chat_format='chatml'
)


    return llm

def build_conversational_chain(vectorstore):
    """
    Creates a ConversationalRetrievalChain using the local llama-cpp-based LLM 
    and a ConversationBufferMemory for multi-turn Q&A.
    """
    llm = load_llm()

    # We'll store chat history in memory so the chain can handle multi-turn conversations
    memory = ConversationBufferMemory(
        memory_key="chat_history",
        return_messages=True
    )

    qa_chain = ConversationalRetrievalChain.from_llm(
        llm=llm,
        retriever=vectorstore.as_retriever(search_type="similarity", search_kwargs={"k": 5}),
        memory=memory,
        verbose=True
    )

    return qa_chain