Update app.py
Browse files
app.py
CHANGED
@@ -41,11 +41,11 @@ def invoke(openai_api_key, use_rag, prompt):
|
|
41 |
splits = text_splitter.split_documents(docs)
|
42 |
vector_db = Chroma.from_documents(documents = splits, embedding = OpenAIEmbeddings(), persist_directory = CHROMA_DIR)
|
43 |
rag_chain = RetrievalQA.from_chain_type(llm, retriever = vector_db.as_retriever(search_kwargs = {"k": 3}), return_source_documents = True, chain_type_kwargs = {"prompt": CHAIN_PROMPT})
|
44 |
-
result = rag_chain({"
|
45 |
result = result["result"]
|
46 |
else:
|
47 |
chain = LLMChain(llm = llm, prompt = CHAIN_PROMPT)
|
48 |
-
result = chain.run({"context": "", "
|
49 |
#print(result)
|
50 |
return result
|
51 |
|
|
|
41 |
splits = text_splitter.split_documents(docs)
|
42 |
vector_db = Chroma.from_documents(documents = splits, embedding = OpenAIEmbeddings(), persist_directory = CHROMA_DIR)
|
43 |
rag_chain = RetrievalQA.from_chain_type(llm, retriever = vector_db.as_retriever(search_kwargs = {"k": 3}), return_source_documents = True, chain_type_kwargs = {"prompt": CHAIN_PROMPT})
|
44 |
+
result = rag_chain({"query": prompt})
|
45 |
result = result["result"]
|
46 |
else:
|
47 |
chain = LLMChain(llm = llm, prompt = CHAIN_PROMPT)
|
48 |
+
result = chain.run({"context": "", "query": prompt})
|
49 |
#print(result)
|
50 |
return result
|
51 |
|