File size: 1,054 Bytes
d0fbfa7
 
 
 
 
 
d5648ee
d0fbfa7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import Chroma
from langchain.llms import OpenAI
from langchain.chains import RetrievalQA
import os
from dotenv import load_dotenv
# load_dotenv(r'C:\Users\sksha\Desktop\llm-assignment-master\llm-assignment-master\llm-assignment-master_\backend\.env')
openai_api_key = os.environ.get('OPENAI_API_KEY')
def load_qa_chain(collection_name):
    # Load the vector store from disk
    vector_store = Chroma(collection_name=collection_name, embedding_function=OpenAIEmbeddings())

    # Create an instance of OpenAI language model
    llm = OpenAI(openai_api_key=openai_api_key)
    retriever = vector_store.as_retriever(search_kwargs={"k": 2})
    # Create a RetrievalQA chain
    qa_chain = RetrievalQA.from_chain_type(
        llm=llm,
        chain_type="map_reduce",
        retriever=vector_store.as_retriever()
    )

    return qa_chain

def process_query(query, qa_chain):
    # Run the query through the RetrievalQA chain
    result = qa_chain.run(query)

    return result