Update app.py
Browse files
app.py
CHANGED
@@ -42,11 +42,12 @@ def invoke(openai_api_key, use_rag, prompt):
|
|
42 |
vector_db = Chroma.from_documents(documents = splits, embedding = OpenAIEmbeddings(), persist_directory = CHROMA_DIR)
|
43 |
rag_chain = RetrievalQA.from_chain_type(llm, retriever = vector_db.as_retriever(search_kwargs = {"k": 3}), return_source_documents = True, chain_type_kwargs = {"prompt": CHAIN_PROMPT})
|
44 |
result = rag_chain({"question": prompt})
|
|
|
45 |
else:
|
46 |
chain = LLMChain(llm = llm, prompt = CHAIN_PROMPT)
|
47 |
result = chain.run({"context": "", "question": prompt})
|
48 |
#print(result)
|
49 |
-
return result
|
50 |
|
51 |
description = """<strong>Overview:</strong> The app demonstrates how to use a Large Language Model (LLM) with Retrieval Augmented Generation (RAG) on external data
|
52 |
(in this case a YouTube video, but it could be PDFs, URLs, or other structured/unstructured private/public
|
|
|
42 |
vector_db = Chroma.from_documents(documents = splits, embedding = OpenAIEmbeddings(), persist_directory = CHROMA_DIR)
|
43 |
rag_chain = RetrievalQA.from_chain_type(llm, retriever = vector_db.as_retriever(search_kwargs = {"k": 3}), return_source_documents = True, chain_type_kwargs = {"prompt": CHAIN_PROMPT})
|
44 |
result = rag_chain({"question": prompt})
|
45 |
+
result = result["result"]
|
46 |
else:
|
47 |
chain = LLMChain(llm = llm, prompt = CHAIN_PROMPT)
|
48 |
result = chain.run({"context": "", "question": prompt})
|
49 |
#print(result)
|
50 |
+
return result
|
51 |
|
52 |
description = """<strong>Overview:</strong> The app demonstrates how to use a Large Language Model (LLM) with Retrieval Augmented Generation (RAG) on external data
|
53 |
(in this case a YouTube video, but it could be PDFs, URLs, or other structured/unstructured private/public
|