Update app.py
Browse files
app.py
CHANGED
@@ -20,7 +20,7 @@ template = """Use the following pieces of context to answer the question at the
|
|
20 |
an answer. Keep the answer as concise as possible. Always say "🔥 Thanks for using the app - Bernd Straehle." at the end of the answer.
|
21 |
{context} Question: {question} Helpful Answer: """
|
22 |
|
23 |
-
|
24 |
|
25 |
CHROMA_DIR = "docs/chroma"
|
26 |
YOUTUBE_DIR = "docs/youtube"
|
@@ -40,13 +40,11 @@ def invoke(openai_api_key, use_rag, prompt):
|
|
40 |
text_splitter = RecursiveCharacterTextSplitter(chunk_size = 1500, chunk_overlap = 150)
|
41 |
splits = text_splitter.split_documents(docs)
|
42 |
vector_db = Chroma.from_documents(documents = splits, embedding = OpenAIEmbeddings(), persist_directory = CHROMA_DIR)
|
43 |
-
rag_chain = RetrievalQA.from_chain_type(llm, retriever = vector_db.as_retriever(search_kwargs = {"k": 3}), return_source_documents = True, chain_type_kwargs = {"prompt":
|
44 |
result = rag_chain({"query": prompt})
|
45 |
else:
|
46 |
-
|
47 |
-
|
48 |
-
chain = LLMChain(llm = llm)
|
49 |
-
result = chain({"query": prompt})
|
50 |
#print(result)
|
51 |
return result["result"]
|
52 |
|
|
|
20 |
an answer. Keep the answer as concise as possible. Always say "🔥 Thanks for using the app - Bernd Straehle." at the end of the answer.
|
21 |
{context} Question: {question} Helpful Answer: """
|
22 |
|
23 |
+
CHAIN_PROMPT = PromptTemplate(input_variables = ["context", "question"], template = template)
|
24 |
|
25 |
CHROMA_DIR = "docs/chroma"
|
26 |
YOUTUBE_DIR = "docs/youtube"
|
|
|
40 |
text_splitter = RecursiveCharacterTextSplitter(chunk_size = 1500, chunk_overlap = 150)
|
41 |
splits = text_splitter.split_documents(docs)
|
42 |
vector_db = Chroma.from_documents(documents = splits, embedding = OpenAIEmbeddings(), persist_directory = CHROMA_DIR)
|
43 |
+
rag_chain = RetrievalQA.from_chain_type(llm, retriever = vector_db.as_retriever(search_kwargs = {"k": 3}), return_source_documents = True, chain_type_kwargs = {"prompt": CHAIN_PROMPT})
|
44 |
result = rag_chain({"query": prompt})
|
45 |
else:
|
46 |
+
chain = LLMChain(llm = llm, prompt = CHAIN_PROMPT)
|
47 |
+
result = chain.run(prompt)
|
|
|
|
|
48 |
#print(result)
|
49 |
return result["result"]
|
50 |
|